Custom tool throws error `ValueError: Invalid response from LLM call - None or empty`

Hi, I added a custom tool to validate URL, but I keep getting the following error even after using a dummy one. Strangely, the other custom tool works just fine. The error occurs whenever the quality_control_specialist evokes url_validator_tool

For context, I’m using MODEL=claude-3-5-sonnet-20241022.

from crewai.tools import BaseTool
from pydantic import BaseModel, Field

class URLValidatorToolInput(BaseModel):
    url: str = Field(
        ..., description="An URL to a website, be it a company website or job posting"
    )


class URLValidatorTool(BaseTool):
    name: str = "URL Validator Tool"
    description: str = "Detect any broken links on job listings"
    args_schema: type[BaseModel] = URLValidatorToolInput

    def _run(self, url: str) -> str:
        return f"URL is valid"
 Received None or empty response from LLM call.
<frieda> debugging::: Invalid response from LLM call - None or empty.
🖇 AgentOps: Session Stats - Duration: 3m 18.7s | Cost: $0.215157 | LLMs: 21 | Tools: 12 | Actions: 0 | Errors: 0
Traceback (most recent call last):
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/lib/python3.12/site-packages/crewai/agent.py", line 297, in execute_task
    result = self.agent_executor.invoke(
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 93, in invoke
    formatted_answer = self._invoke_loop()
                       ^^^^^^^^^^^^^^^^^^^
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 189, in _invoke_loop
    raise e
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 128, in _invoke_loop
    raise ValueError(
ValueError: Invalid response from LLM call - None or empty.

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/lib/python3.12/site-packages/crewai/agent.py", line 297, in execute_task
    result = self.agent_executor.invoke(
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 93, in invoke
    formatted_answer = self._invoke_loop()
                       ^^^^^^^^^^^^^^^^^^^
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 189, in _invoke_loop
    raise e
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 128, in _invoke_loop
    raise ValueError(
ValueError: Invalid response from LLM call - None or empty.

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/bin/run_crew", line 8, in <module>
    sys.exit(run())
             ^^^^^
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/src/wonder_multiagent/main.py", line 49, in run
    crew_output = WonderMultiagent().crew().kickoff(inputs=inputs)
                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/lib/python3.12/site-packages/crewai/crew.py", line 540, in kickoff
    result = self._run_sequential_process()
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/lib/python3.12/site-packages/crewai/crew.py", line 647, in _run_sequential_process
    return self._execute_tasks(self.tasks)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/lib/python3.12/site-packages/crewai/crew.py", line 745, in _execute_tasks
    task_output = task.execute_sync(
                  ^^^^^^^^^^^^^^^^^^
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/lib/python3.12/site-packages/crewai/task.py", line 192, in execute_sync
    return self._execute_core(agent, context, tools)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/lib/python3.12/site-packages/crewai/task.py", line 250, in _execute_core
    result = agent.execute_task(
             ^^^^^^^^^^^^^^^^^^^
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/lib/python3.12/site-packages/crewai/agent.py", line 309, in execute_task
    result = self.execute_task(task, context, tools)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/lib/python3.12/site-packages/crewai/agent.py", line 309, in execute_task
    result = self.execute_task(task, context, tools)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/lib/python3.12/site-packages/crewai/agent.py", line 308, in execute_task
    raise e
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/lib/python3.12/site-packages/crewai/agent.py", line 297, in execute_task
    result = self.agent_executor.invoke(
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 93, in invoke
    formatted_answer = self._invoke_loop()
                       ^^^^^^^^^^^^^^^^^^^
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 189, in _invoke_loop
    raise e
  File "/Users/friedahuang/Documents/wonder/wonder_multiagent/.venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py", line 128, in _invoke_loop
    raise ValueError(
ValueError: Invalid response from LLM call - None or empty.
An error occurred while running the crew: Command '['uv', 'run', 'run_crew']' returned non-zero exit status 1.
from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task
from crewai_tools import EXASearchTool
from wonder_multiagent.tools.resume_read_tool import ResumeReadTool
from wonder_multiagent.tools.url_validator_tool import URLValidatorTool

# Instantiate tools
exa_search_tool = EXASearchTool(n_results=2)
resume_read_tool = ResumeReadTool()
url_validator_tool = URLValidatorTool()


@CrewBase
class WonderMultiagent:
    """WonderMultiagent crew"""

    agents_config = "config/agents.yaml"
    tasks_config = "config/tasks.yaml"

    # Create agents
    @agent
    def job_finder(self) -> Agent:
        return Agent(
            config=self.agents_config["job_finder"],
            tools=[exa_search_tool],
            verbose=True,
        )

    @agent
    def resume_reader(self) -> Agent:
        return Agent(
            config=self.agents_config["resume_reader"],
            tools=[resume_read_tool],
            verbose=True,
        )

    @agent
    def matcher(self) -> Agent:
        return Agent(
            config=self.agents_config["matcher"],
            verbose=True,
        )

    @agent
    def quality_control_specialist(self) -> Agent:
        return Agent(
            config=self.agents_config["quality_control_specialist"],
            tools=[url_validator_tool],
            verbose=True,
        )

    # Define tasks
    @task
    def job_finding_task(self) -> Task:
        return Task(config=self.tasks_config["job_finding_task"])

    @task
    def resume_reading_task(self) -> Task:
        return Task(config=self.tasks_config["resume_reading_task"])

    @task
    def job_matching_task(self) -> Task:
        return Task(
            config=self.tasks_config["job_matching_task"],
            context=[self.resume_reading_task(), self.job_finding_task()],
        )

    @task
    def job_quality_control_task(self) -> Task:
        return Task(
            config=self.tasks_config["job_quality_control_task"],
            context=[self.job_matching_task()],
            output_file="output/jobs.md",
        )

    @crew
    def crew(self) -> Crew:
        """Creates the WonderMultiagent crew"""
        return Crew(
            agents=self.agents,  # Automatically created by the @agent decorator
            tasks=self.tasks,  # Automatically created by the @task decorator
            process=Process.sequential,
            verbose=True,
            memory=True,
            memory_config={"provider": "mem0", "config": {"user_id": "friedahuang"}},
        )

I switched from claude-3-5-sonnet-20241022 to gpt-4o and it now works!
Is there a way we can have a fallback option where if one model fails to accomplish the task, another equally or more capable model can pick it up? I belive LiteLLM has this feature (Reliability - Retries, Fallbacks | liteLLM)

1 Like

Will let the CrewAI staff know. Thanks for the suggestion!

1 Like