ValidationError: Input should be a valid dictionary or instance of BaseTool

Updated from 0.70.1 to 0.80.0 with existing working code, and upon execution the output error is as below. Thoughts on resolution?

Traceback (most recent call last):
File “/home/lane/AI/crewAI-0.80.0/hedgefundmanager.py”, line 59, in
researcher = Agent(
File “/home/lane/AI/crewAI-0.80.0/crewAI-0.80.0/lib/python3.10/site-packages/pydantic/main.py”, line 214, in init
validated_self = self.__pydantic_validator.validate_python(data, self_instance=self)
pydantic_core._pydantic_core.ValidationError: 1 validation error for Agent
tools.0
Input should be a valid dictionary or instance of BaseTool [type=model_type, input_value=DuckDuckGoSearchRun(api_w…d=‘api’, source=‘text’)), input_type=DuckDuckGoSearchRun]
For further information visit Redirecting...

Code snippet starting on line 59:

researcher = Agent(
    role='Information Researcher',
    goal=f'Search the internet for the latest and most recent news, legal actions and announcements about {research_topic} with accurate sourcing.',
    backstory="""You are a hedge fund manager who has vast experience in the stock market, investment options and vehicles, and trading. Your decisions are critical to the success of the hedge fund with billions of dollars on the line including potential jail time for incorrect data from SEC sanctions.""",
    verbose=agent_verbose,
    #planning=True,
    max_iter=max_iter,
    max_execution_time=None,
    full_output=True,
    output_file=f'markdown/{log_date}/{area}/{today}/{research_topic}-researcher.md',
    llm=llm_ollama,
    tools=[search]
)

Can you share all of your code, please, including your tool?

Tool is duckduckgo for search. Code below.

import logging
import os
import sys
import json
from textwrap import dedent
from datetime import datetime
from crewai import Agent, Task, Crew, Process
from langchain_community.tools import DuckDuckGoSearchRun
from crewai.tasks.task_output import TaskOutput
from langchain.agents import Tool
from langchain_community.llms import Ollama

#setting venv - crewAI-GitHub
#subprocess.run(["source", "crewAI-GitHub/bin/activate"])

#local variables needed for controlling configuration, logging levels, tools
#llm_ollama = Ollama(model="llama3.1")
llm_ollama = 'ollama/llama3.2'
search = DuckDuckGoSearchRun()
agent_verbose = True
task_verbose = True
crew_verbose = True
max_iter=5000000

# Key variables for use case automation needs
today = datetime.today().strftime('%Y-%m-%d-%H-%M')
log_date = datetime.today().strftime('%Y-%m-%d')
area="hedge_fund"

# create directories for logging and markdown output paths
os.makedirs(f"logs/{log_date}/{area}", exist_ok=True)
os.makedirs(f"logs/{log_date}/{area}/{today}", exist_ok=True)
os.makedirs(f"markdown/{log_date}/{area}", exist_ok=True)
os.makedirs(f"markdown/{log_date}/{area}/{today}", exist_ok=True)
os.makedirs(f"memory/{area}", exist_ok=True)

# create directories for memory paths
os.environ["CHROMA_DB_FOLDER"] = "/home/lane/AI/crewAI-0.80.0/memory/hedge_fund"
os.environ["CREWAI_STORAGE_DIR"] = "/home/lane/AI/crewAI-0.80.0/memory/hedge_fund"

# Research topics to be used for automation
#research_topics = ['Apple Inc.(Stock code :AAPL)']
#research_topics = ['Apple Inc.(Stock code :AAPL)', 'Microsoft Corp.(Stock code :MSFT)', 'JP Morgan Chase & Co.(Stock code :JPM)', 'Eli Lilly & Co. (Stock code :LLY)', 'UnitedHealth Group Incorporated. (Stock code: UNH)', 'NVIDIA (Stock code: NVDA)', 'Amazon (Stock code: AMZN)', 'Berkshire Hathaway Class B (Stock code: BRK.B)', 'Alphabet Class A (Stock code :GOOGL)', 'Alphabet Class C (Stock code :GOOG)', 'Tesla (Stock code :TSLA)', 'Proctor Gamble Company (Stock code :PG)', 'Disney (Stock code :DIS)', 'Morgan Stanley (Stock code :MS)', 'Bank of America (Stock code :BAC)', 'Abbvie (Stock code :ABBV)', 'Goldman Sachs (Stock code :GS)', 'Pfizer (Stock code :PFE)', 'Northrop Grumman (Stock code :NOC)', 'ResMed Inc (Stock code :RMD)', 'Lockheed Martin (Stock code :LMT)', 'CVS (Stock code :CVS)', 'Merck (Stock code :MRK)', 'Boeing (Stock code :BA)', 'Novo Nordisk (Stock code :NVO)', 'Mastercard (Stock code :MA)', 'Walmart (Stock code :WMT)', 'Dow (Stock code :DOW)', 'Blackrock (Stock code :BLK)', 'Visa (Stock code :V)', 'Abbott Labs (Stock code :ABT)', 'AMD (Stock code :AMD)']
research_topics = ['Apple', 'NVIDIA', 'MSFT']

for research_topic in research_topics:
    #sys.stdout = open(f"logs/{log_date}/{area}/{today}/{research_topic}.log", 'w')

    result=''

    def search_result_callback(output: TaskOutput):
        if output.result is not None:
            print("Search task completed successfully!")
            print("Search Results:\n", output.result)
        else:
            print("Search task failed to produce output.")

    # Define agents with specific roles and tools
    researcher = Agent(
        role='Information Researcher',
        goal=f'Search the internet for the latest and most recent news, legal actions and announcements about {research_topic} with accurate sourcing.',
        backstory="""You are a hedge fund manager who has vast experience in the stock market, investment options and vehicles, and trading. Your decisions are critical to the success of the hedge fund with billions of dollars on the line including potential jail time for incorrect data from SEC sanctions.""",
        verbose=agent_verbose,
        #planning=True,
        max_iter=max_iter,
        max_execution_time=None,
        full_output=True,
        output_file=f'markdown/{log_date}/{area}/{today}/{research_topic}-researcher.md',
        llm=llm_ollama,
        tools=[search],
    )

    fact_checker = Agent(
        role='Information Fact Checker',
        goal="Provide judgement to AI Information Researcher findings on the credibility of the information by 'True' or 'False' or 'unknown'.",
        backstory="""As the guardian of truth, you critically assess the AI Information Researcher's data for accuracy by cross check from internet. Your expertise ensures the integrity and credibility of information shared, maintaining a standard of trustworthiness in a data-saturated world.""",
        verbose=agent_verbose,
        #planning=True,
        max_iter=max_iter,
        max_execution_time=None,
        full_output=True,
        output_file=f'markdown/{log_date}/{area}/{today}/{research_topic}-fact_checker.md',
        llm=llm_ollama,
        tools=[search]
    )

    # Create tasks for the agents
    research_task = Task(
        # description=f'Search 3 topic for the latest news {research_topic} within February of 2024, ensuring all data is backed by credible sources. Compile a report detailing each finding and its source for verification.', 
        description=dedent(
            f'''<role>
                You are a hedge fund manager who has vast experience in the stock market, investment options and vehicles, and trading. Your decisions are critical to the success of the hedge fund with billions of dollars on the line including potential jail time for incorrect data from SEC sanctions.
                </role>

                <task>
                Provide a detailed financial analysis of {research_topic} in regards to their investment likelihood.
                </task>
            ''',
        ),
        expected_output=dedent(
            f'''Provide a detailed financial analysis of {research_topic} in regards to their investment likelihood.  
            
            <example>
                Current Date:  
                Company Name:  
                Company Stock Ticker:  
                Company Stock Ticker Value:  
                Market Valuation:  
                Investment Areas:  
                Decision to Invest:  
            </example>
            ''',
        ),
        tools=[search],
        verbose=task_verbose,
        max_iter=max_iter,
        max_execution_time=None,
        #planning=True,
        output_file=f'markdown/{log_date}/{area}/{today}/{research_topic}-research_task.md',
        output_log_file=f'logs/{log_date}/{area}/{today}/{research_topic}-research_task.log',
        agent=researcher
        #callback=search_result_callback
    )
    fact_checker_task = Task(
        description=dedent(
            f'''Verify the accuracy of the AI Information Researcher report for {research_topic}. 
            Cross-check all cited sources against authoritative references to confirm factual correctness, highlighting any inaccuracies 
            or areas needing further verification.',
            ''',
        ),
        expected_output=dedent(
            f'''Verification report on {research_topic} with accuracy assessment and source validation.  Generate a report of the latest news worthy details regarding {research_topic} as of {log_date} with credible sources.  
            Template for this output should be this example xml tag below:

            Provide a detailed financial analysis of {research_topic} in regards to their investment likelihood.  
            
            <example>
                Current Date:  
                Company Name:  
                Company Stock Ticker:  
                Company Stock Ticker Value:  
                Market Valuation:  
                Investment Areas:  
                Decision to Invest:  
            </example>
            ''',
        ),
        tools=[search],
        verbose=task_verbose,
        max_iter=max_iter,
        max_execution_time=None,
        #planning=True,
        output_file=f'markdown/{log_date}/{area}/{today}/{research_topic}-fact_checker_task.md',
        output_log_file=f'logs/{log_date}/{area}/{today}/{research_topic}-fact_checker_task.log',
        agent=fact_checker
    )

    # Assemble the crew with a sequential process
    my_crew = Crew(
        agents=[researcher, fact_checker],
        tasks=[research_task, fact_checker_task],
        verbose=crew_verbose,
        full_output=True,
        # usage_metrics=True,
        # config=dict[str],
        output_file=f'markdown/{log_date}/{area}/{today}/{research_topic}-crew.md',
        output_log_file=f'logs/{log_date}/{area}/{today}/{research_topic}-crew.log',
        memory=True,
        embedder={
            "provider": "ollama",
            "config":{
                "model": 'llama3.2'
            }
        },
        # planning=True,
        # planning_llm=llm_ollama,
        # planning_file=f'logs/{log_date}/{area}/{today}/{research_topic}-{today}-crew-planning.log',
        manager_llm=llm_ollama,
        manager_file=f'markdown/{log_date}/{area}/{today}/{research_topic}-crew-manager.md',
        #process=Process.hierarchical
        process=Process.sequential
    )

    # Start the crew's task execution
    result = my_crew.kickoff()
    print(result)

    #testing crew.output
    #crew_output = my_crew.kickoff()
    # print(f"Raw Output: {crew_output.raw}")
    # if crew_output.json_dict:
    #     print(f"JSON Output: {json.dumps(crew_output.json_dict, indent=2)}")
    # if crew_output.pydantic:
    #     print(f"Pydantic Output: {crew_output.pydantic}")
    print('\n')
    print("----------Metrics Usage----------")
    #print(f"Tasks Output: {crew_output.tasks_output}")
    #print(f"Tasks Output: {result.tasks_output}")
    #print('\n')
    #print(f"Token Usage: {crew_output.token_usage}")
    print(f"Token Usage: {result.token_usage}")
    print("----------Metrics Usage----------")

You need to extend the BaseTool class by defining a subclass (e.g., MyCustomDuckDuckGoTool). See the docs.

from crewai.tools import BaseTool
from langchain_community.tools import DuckDuckGoSearchRun

class MyCustomDuckDuckGoTool(BaseTool):
    name: str = "DuckDuckGo Search Tool"
    description: str = "Search the web for a given query."

    def _run(self, query: str) -> str:
        # Implement DuckDuckGoSearchRun here

        return "Result from custom tool"

Thanks. I’ve added the that to my code but still seeing the same output error. Any recommendations on how to resolve or if I added your fix incorrectly?

Traceback (most recent call last):
File “/home/lane/AI/crewAI-0.80.0/hedge_fund_manager.py”, line 79, in
researcher = Agent(
File “/home/lane/AI/crewAI-0.80.0/crewAI-0.80.0/lib/python3.10/site-packages/pydantic/main.py”, line 214, in init
validated_self = self.pydantic_validator.validate_python(data, self_instance=self)
pydantic_core._pydantic_core.ValidationError: 1 validation error for Agent
tools.0
Input should be a valid dictionary or instance of BaseTool [type=model_type, input_value=DuckDuckGoSearchRun(api_w…d=‘api’, source=‘text’)), input_type=DuckDuckGoSearchRun]
For further information visit Redirecting...

import logging
import os
import sys
import json
from textwrap import dedent
from datetime import datetime
from crewai import Agent, Task, Crew, Process
from langchain_community.tools import DuckDuckGoSearchRun
from crewai.tasks.task_output import TaskOutput
from langchain.agents import Tool
from langchain_community.llms import Ollama
from crewai.tools import BaseTool
from langchain_community.tools import DuckDuckGoSearchRun

class MyCustomTool(BaseTool):
    name: str = "Name of my tool"
    description: str = "Clear description for what this tool is useful for, your agent will need this information to use it."

    def _run(self, argument: str) -> str:
        # Implement DuckDuckGoSearchRun here

        return "Result from custom tool"

class MyCustomTool(BaseTool):
    name: str = "duckduckgosearchrun"
    description: str = "Clear description for what this tool is useful for, your agent will need this information to use it."

    def _run(self, argument: str) -> str:
        # Implement DuckDuckGoSearchRun here

        return "Result from custom tool"

#setting venv - crewAI-GitHub
#subprocess.run(["source", "crewAI-GitHub/bin/activate"])

#local variables needed for controlling configuration, logging levels, tools
#llm_ollama = Ollama(model="llama3.1")
llm_ollama = 'ollama/llama3.2'
search = DuckDuckGoSearchRun()
agent_verbose = True
task_verbose = True
crew_verbose = True
max_iter=5000000

# Key variables for use case automation needs
today = datetime.today().strftime('%Y-%m-%d-%H-%M')
log_date = datetime.today().strftime('%Y-%m-%d')
area="hedge_fund"

# create directories for logging and markdown output paths
os.makedirs(f"logs/{log_date}/{area}", exist_ok=True)
os.makedirs(f"logs/{log_date}/{area}/{today}", exist_ok=True)
os.makedirs(f"markdown/{log_date}/{area}", exist_ok=True)
os.makedirs(f"markdown/{log_date}/{area}/{today}", exist_ok=True)
os.makedirs(f"memory/{area}", exist_ok=True)

# create directories for memory paths
os.environ["CHROMA_DB_FOLDER"] = "/home/lane/AI/crewAI-0.80.0/memory/hedge_fund"
os.environ["CREWAI_STORAGE_DIR"] = "/home/lane/AI/crewAI-0.80.0/memory/hedge_fund"

# Research topics to be used for automation
#research_topics = ['Apple Inc.(Stock code :AAPL)']
#research_topics = ['Apple Inc.(Stock code :AAPL)', 'Microsoft Corp.(Stock code :MSFT)', 'JP Morgan Chase & Co.(Stock code :JPM)', 'Eli Lilly & Co. (Stock code :LLY)', 'UnitedHealth Group Incorporated. (Stock code: UNH)', 'NVIDIA (Stock code: NVDA)', 'Amazon (Stock code: AMZN)', 'Berkshire Hathaway Class B (Stock code: BRK.B)', 'Alphabet Class A (Stock code :GOOGL)', 'Alphabet Class C (Stock code :GOOG)', 'Tesla (Stock code :TSLA)', 'Proctor Gamble Company (Stock code :PG)', 'Disney (Stock code :DIS)', 'Morgan Stanley (Stock code :MS)', 'Bank of America (Stock code :BAC)', 'Abbvie (Stock code :ABBV)', 'Goldman Sachs (Stock code :GS)', 'Pfizer (Stock code :PFE)', 'Northrop Grumman (Stock code :NOC)', 'ResMed Inc (Stock code :RMD)', 'Lockheed Martin (Stock code :LMT)', 'CVS (Stock code :CVS)', 'Merck (Stock code :MRK)', 'Boeing (Stock code :BA)', 'Novo Nordisk (Stock code :NVO)', 'Mastercard (Stock code :MA)', 'Walmart (Stock code :WMT)', 'Dow (Stock code :DOW)', 'Blackrock (Stock code :BLK)', 'Visa (Stock code :V)', 'Abbott Labs (Stock code :ABT)', 'AMD (Stock code :AMD)']
research_topics = ['Apple', 'NVIDIA', 'MSFT']

for research_topic in research_topics:
    #sys.stdout = open(f"logs/{log_date}/{area}/{today}/{research_topic}.log", 'w')

    result=''

    def search_result_callback(output: TaskOutput):
        if output.result is not None:
            print("Search task completed successfully!")
            print("Search Results:\n", output.result)
        else:
            print("Search task failed to produce output.")

    # Define agents with specific roles and tools
    researcher = Agent(
        role='Information Researcher',
        goal=f'Search the internet for the latest and most recent news, legal actions and announcements about {research_topic} with accurate sourcing.',
        backstory="""You are a hedge fund manager who has vast experience in the stock market, investment options and vehicles, and trading. Your decisions are critical to the success of the hedge fund with billions of dollars on the line including potential jail time for incorrect data from SEC sanctions.""",
        verbose=agent_verbose,
        #planning=True,
        max_iter=max_iter,
        max_execution_time=None,
        full_output=True,
        output_file=f'markdown/{log_date}/{area}/{today}/{research_topic}-researcher.md',
        llm=llm_ollama,
        tools=[search],
    )

    fact_checker = Agent(
        role='Information Fact Checker',
        goal="Provide judgement to AI Information Researcher findings on the credibility of the information by 'True' or 'False' or 'unknown'.",
        backstory="""As the guardian of truth, you critically assess the AI Information Researcher's data for accuracy by cross check from internet. Your expertise ensures the integrity and credibility of information shared, maintaining a standard of trustworthiness in a data-saturated world.""",
        verbose=agent_verbose,
        #planning=True,
        max_iter=max_iter,
        max_execution_time=None,
        full_output=True,
        output_file=f'markdown/{log_date}/{area}/{today}/{research_topic}-fact_checker.md',
        llm=llm_ollama,
        tools=[search]
    )

    # Create tasks for the agents
    research_task = Task(
        # description=f'Search 3 topic for the latest news {research_topic} within February of 2024, ensuring all data is backed by credible sources. Compile a report detailing each finding and its source for verification.', 
        description=dedent(
            f'''<role>
                You are a hedge fund manager who has vast experience in the stock market, investment options and vehicles, and trading. Your decisions are critical to the success of the hedge fund with billions of dollars on the line including potential jail time for incorrect data from SEC sanctions.
                </role>

                <task>
                Provide a detailed financial analysis of {research_topic} in regards to their investment likelihood.
                </task>
            ''',
        ),
        expected_output=dedent(
            f'''Provide a detailed financial analysis of {research_topic} in regards to their investment likelihood.  
            
            <example>
                Current Date:  
                Company Name:  
                Company Stock Ticker:  
                Company Stock Ticker Value:  
                Market Valuation:  
                Investment Areas:  
                Decision to Invest:  
            </example>
            ''',
        ),
        tools=[search],
        verbose=task_verbose,
        max_iter=max_iter,
        max_execution_time=None,
        #planning=True,
        output_file=f'markdown/{log_date}/{area}/{today}/{research_topic}-research_task.md',
        output_log_file=f'logs/{log_date}/{area}/{today}/{research_topic}-research_task.log',
        agent=researcher
        #callback=search_result_callback
    )
    fact_checker_task = Task(
        description=dedent(
            f'''Verify the accuracy of the AI Information Researcher report for {research_topic}. 
            Cross-check all cited sources against authoritative references to confirm factual correctness, highlighting any inaccuracies 
            or areas needing further verification.',
            ''',
        ),
        expected_output=dedent(
            f'''Verification report on {research_topic} with accuracy assessment and source validation.  Generate a report of the latest news worthy details regarding {research_topic} as of {log_date} with credible sources.  
            Template for this output should be this example xml tag below:

            Provide a detailed financial analysis of {research_topic} in regards to their investment likelihood.  
            
            <example>
                Current Date:  
                Company Name:  
                Company Stock Ticker:  
                Company Stock Ticker Value:  
                Market Valuation:  
                Investment Areas:  
                Decision to Invest:  
            </example>
            ''',
        ),
        tools=[search],
        verbose=task_verbose,
        max_iter=max_iter,
        max_execution_time=None,
        #planning=True,
        output_file=f'markdown/{log_date}/{area}/{today}/{research_topic}-fact_checker_task.md',
        output_log_file=f'logs/{log_date}/{area}/{today}/{research_topic}-fact_checker_task.log',
        agent=fact_checker
    )

    # Assemble the crew with a sequential process
    my_crew = Crew(
        agents=[researcher, fact_checker],
        tasks=[research_task, fact_checker_task],
        verbose=crew_verbose,
        full_output=True,
        # usage_metrics=True,
        # config=dict[str],
        output_file=f'markdown/{log_date}/{area}/{today}/{research_topic}-crew.md',
        output_log_file=f'logs/{log_date}/{area}/{today}/{research_topic}-crew.log',
        memory=True,
        embedder={
            "provider": "ollama",
            "config":{
                "model": 'llama3.2'
            }
        },
        # planning=True,
        # planning_llm=llm_ollama,
        # planning_file=f'logs/{log_date}/{area}/{today}/{research_topic}-{today}-crew-planning.log',
        manager_llm=llm_ollama,
        manager_file=f'markdown/{log_date}/{area}/{today}/{research_topic}-crew-manager.md',
        #process=Process.hierarchical
        process=Process.sequential
    )

    # Start the crew's task execution
    result = my_crew.kickoff()
    print(result)

    #testing crew.output
    #crew_output = my_crew.kickoff()
    # print(f"Raw Output: {crew_output.raw}")
    # if crew_output.json_dict:
    #     print(f"JSON Output: {json.dumps(crew_output.json_dict, indent=2)}")
    # if crew_output.pydantic:
    #     print(f"Pydantic Output: {crew_output.pydantic}")
    print('\n')
    print("----------Metrics Usage----------")
    #print(f"Tasks Output: {crew_output.tasks_output}")
    #print(f"Tasks Output: {result.tasks_output}")
    #print('\n')
    #print(f"Token Usage: {crew_output.token_usage}")
    print(f"Token Usage: {result.token_usage}")
    print("----------Metrics Usage----------")

You’re still initializing the tool without extending the BaseTool class by defining a subclass. Remove this line completely.

You’re still setting the tool parameter incorrectly.

Also, you didn’t implement the tool. You just copy-pasted my code from the answer above.

Try the following:

from crewai.tools import BaseTool
from langchain_community.tools import DuckDuckGoSearchRun

class MyCustomDuckDuckGoTool(BaseTool):
    name: str = "DuckDuckGo Search Tool"
    description: str = "Search the web for a given query."

    def _run(self, query: str) -> str:
        duckduckgo_tool = DuckDuckGoSearchRun()
        
        response = duckduckgo_tool.invoke(query)

        return response

Then set the tools parameter as follows:

tools=[MyCustomDuckDuckGoTool()]

Bingo Bango!! Thanks I will begin to update my codebase on this change. Thanks for the detailed help here, really appreciate it.