I created a MCP server that exposes basic mathematical operations. The crew must solve an operation whose name is unknown to the LLM (a “globul” operation) to force a tool call. The correct tool is called and the return value is received. After the call, the crew status indicates “X LLM Failed” in the terminal log. The server doesn’t crash.
System: Windows 11
python version: 3.12.2
crewai version: 0.141.0
LLM: ollama/deepseek-r1:14b
Server:
from mcp.server.fastmcp import FastMCP
mcp = FastMCP("Math", stateless_http=True, host="127.0.0.1", port=8000)
@mcp.tool()
def add(a: float, b: float) -> float:
"""Add two numbers (ints or floats)"""
return a + b
@mcp.tool()
def subtract(a: float, b: float) -> float:
"""Subtract b from a (ints or floats)"""
return a - b
@mcp.tool()
def globul(a: float, b: float) -> float:
"""
Returns the globul of a and b
:param a: First number
:param b: Second number
:return: The globul of a and b
"""
return a * b
if __name__ == "__main__":
mcp.run(transport="streamable-http")
Client:
import os
import warnings
from pydantic import PydanticDeprecatedSince20
warnings.filterwarnings("ignore", category=PydanticDeprecatedSince20)
from crewai import Agent, Task, Crew, LLM
from crewai_tools import MCPServerAdapter
from dotenv import load_dotenv
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s [%(levelname)s] %(message)s')
load_dotenv()
# Check for .env variables
ollama_model_name = os.getenv('OLLAMA_MODEL_NAME')
if not ollama_model_name:
raise EnvironmentError(f"OLLAMA_MODEL_NAME not found in the environment variables")
ollama_api_base = os.getenv('OLLAMA_API_BASE')
if not ollama_api_base:
raise EnvironmentError(f"OLLAMA_API_BASE not found in the environment variables")
crewai_disable_telemetry = os.getenv('CREWAI_DISABLE_TELEMETRY')
if not crewai_disable_telemetry:
raise EnvironmentError(f"CREWAI_DISABLE_TELEMETRY not found in the environment variables")
# Configure the LLM to use Ollama
llm = LLM(
model=ollama_model_name,
base_url=ollama_api_base,
temperature=0.1,
)
server_param = {
"url": "http://localhost:8000/mcp",
"transport": "streamable-http"
}
def main():
with MCPServerAdapter(server_param) as tools:
print(f"Available tools from streamable MCP server: {[tool.name for tool in tools]}")
agent = Agent(
role="Mathematician",
goal="Perform mathematical operations. The current goal description is: {problem}",
backstory="An experienced mathematician who can perform mathematical operations",
tools=tools,
verbose=True,
llm=llm,
allow_delegation=True,
)
task = Task(
description="Solve the following math problem: '{problem}'.",
expected_output="The correct answer to the math problem using the available tools. Formulate as a sentence. Ex.: 'The solution is -8.65'",
agent=agent,
llm=llm,
)
crew = Crew(
agents=[agent],
tasks=[task],
verbose=True,
)
result = crew.kickoff(inputs={"problem": "The globul of 2.1 and 3.4"})
logging.info(f"result: {result}")
if __name__ == '__main__':
main()
Server terminal log:
INFO: Started server process [36016]
INFO: Waiting for application startup.
[07/16/25 10:18:18] INFO StreamableHTTP session manager started streamable_http_manager.py:111
INFO: Application startup complete.
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
INFO: 127.0.0.1:36108 - "POST /mcp HTTP/1.1" 307 Temporary Redirect
INFO: 127.0.0.1:36108 - "POST /mcp/ HTTP/1.1" 200 OK
INFO: 127.0.0.1:36110 - "POST /mcp HTTP/1.1" 307 Temporary Redirect
INFO: 127.0.0.1:36110 - "POST /mcp/ HTTP/1.1" 202 Accepted
INFO: 127.0.0.1:36112 - "POST /mcp HTTP/1.1" 307 Temporary Redirect
INFO: 127.0.0.1:36112 - "POST /mcp/ HTTP/1.1" 200 OK
[07/16/25 10:18:46] INFO Processing request of type ListToolsRequest server.py:625
INFO: 127.0.0.1:36114 - "POST /mcp HTTP/1.1" 307 Temporary Redirect
INFO: 127.0.0.1:36114 - "POST /mcp/ HTTP/1.1" 200 OK
INFO Processing request of type ListToolsRequest server.py:625
INFO: 127.0.0.1:36120 - "POST /mcp HTTP/1.1" 307 Temporary Redirect
INFO: 127.0.0.1:36120 - "POST /mcp/ HTTP/1.1" 200 OK
[07/16/25 10:18:57] INFO Processing request of type CallToolRequest server.py:625
Client terminal log:
C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\.venv\Lib\site-packages\pydantic\fields.py:1093: PydanticDeprecatedSince20: Using extra keyword arguments on `Field` is deprecated and will be removed. Use `json_schema_extra` instead. (Extra keys: 'required'). Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.11/migration/
warn(
C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\.venv\Lib\site-packages\pydantic\fields.py:1093: PydanticDeprecatedSince20: Using extra keyword arguments on `Field` is deprecated and will be removed. Use `json_schema_extra` instead. (Extra keys: 'items', 'anyOf', 'enum', 'properties'). Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.11/migration/
warn(
Available tools from streamable MCP server: ['add', 'subtract', 'globul']
╭─────────────────────────────────────── Crew Execution Started ───────────────────────────────────────╮
│ │
│ Crew Execution Started │
│ Name: crew │
│ ID: b38ec0c9-94ea-4b3f-b7f4-98ddb1df8c5a │
│ Tool Args: │
│ │
│ │
╰──────────────────────────────────────────────────────────────────────────────────────────────────────╯
🚀 Crew: crew
└── 📋 Task: bbebd16f-df4a-468d-aedb-3580733f6d17
Status: Executing Task...
╭────────────────────────────────────────── 🤖 Agent Started ──────────────────────────────────────────╮
│ │
│ Agent: Mathematician │
│ │
│ Task: Solve the following math problem: 'The globul of 2.1 and 3.4'. │
│ │
╰──────────────────────────────────────────────────────────────────────────────────────────────────────╯
🚀 Crew: crew
└── 📋 Task: bbebd16f-df4a-468d-aedb-3580733f6d17
Status: Executing Task...
└── 🔧 Used globul (1)
╭────────────────────────────────────── 🔧 Agent Tool Execution ───────────────────────────────────────╮
│ │
│ Agent: Mathematician │
│ │
│ Thought: <think> │
│ Okay, so I need to solve this math problem: 'The globul of 2.1 and 3.4'. Hmm, I'm not exactly sure │
│ what "globul" means in a mathematical context. Let me check the tools available. │
│ Looking at the tools provided, there's add, subtract, and globul. The add tool adds two numbers, │
│ subtract subtracts one from another, but the globul tool is described as returning the globul of a │
│ and b without any further explanation. Since I don't have a definition for "globul", maybe it's a │
│ custom operation defined by these tools. │
│ I think the best approach is to use the globul tool directly with the given numbers 2.1 and 3.4. │
│ So, I'll call the globul function with a=2.1 and b=3.4. Let me do that. │
│ </think> │
│ Thought: The problem requires finding the globul of 2.1 and 3.4 using the available tools. │
│ │
│ Using Tool: globul │
│ │
╰──────────────────────────────────────────────────────────────────────────────────────────────────────╯
╭───────────────────────────────────────────── Tool Input ─────────────────────────────────────────────╮
│ │
│ "{\"a\": 2.1, \"b\": 3.4}" │
│ │
╰──────────────────────────────────────────────────────────────────────────────────────────────────────╯
╭──────────────────────────────────────────── Tool Output ─────────────────────────────────────────────╮
│ │
│ 7.14 │
│ │
╰──────────────────────────────────────────────────────────────────────────────────────────────────────╯
🚀 Crew: crew
└── 📋 Task: bbebd16f-df4a-468d-aedb-3580733f6d17
Status: Executing Task...
├── 🔧 Used globul (1)
└── ❌ LLM Failed
An unknown error occurred. Please check the details below.
🚀 Crew: crew
└── 📋 Task: bbebd16f-df4a-468d-aedb-3580733f6d17
Assigned to: Mathematician
Status: ❌ Failed
├── 🔧 Used globul (1)
└── ❌ LLM Failed
╭──────────────────────────────────────────── Task Failure ────────────────────────────────────────────╮
│ │
│ Task Failed │
│ Name: bbebd16f-df4a-468d-aedb-3580733f6d17 │
│ Agent: Mathematician │
│ Tool Args: │
│ │
│ │
╰──────────────────────────────────────────────────────────────────────────────────────────────────────╯
╭──────────────────────────────────────────── Crew Failure ────────────────────────────────────────────╮
│ │
│ Crew Execution Failed │
│ Name: crew │
│ ID: b38ec0c9-94ea-4b3f-b7f4-98ddb1df8c5a │
│ Tool Args: │
│ Final Output: │
│ │
│ │
╰──────────────────────────────────────────────────────────────────────────────────────────────────────╯
Traceback (most recent call last):
response = base_llm_http_handler.completion(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
data = provider_config.transform_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
modified_prompt = ollama_pt(model=model, messages=messages)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
tool_calls = messages[msg_i].get("tool_calls")
~~~~~~~~^^^^^^^
IndexError: list index out of range
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\streamable_http_client.py", line 67, in <module>
main()
File "C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\streamable_http_client.py", line 63, in main
result = crew.kickoff(inputs={"problem": "The globul of 2.1 and 3.4"})
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\.venv\Lib\site-packages\crewai\crew.py", line 669, in kickoff
result = self._run_sequential_process()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\.venv\Lib\site-packages\crewai\crew.py", line 780, in _run_sequential_process
return self._execute_tasks(self.tasks)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\.venv\Lib\site-packages\crewai\crew.py", line 883, in _execute_tasks
task_output = task.execute_sync(
^^^^^^^^^^^^^^^^^^
File "C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\.venv\Lib\site-packages\crewai\task.py", line 351, in execute_sync
return self._execute_core(agent, context, tools)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\.venv\Lib\site-packages\crewai\task.py", line 499, in _execute_core
raise e # Re-raise the exception after emitting the event
^^^^^^^
File "C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\.venv\Lib\site-packages\crewai\task.py", line 415, in _execute_core
result = agent.execute_task(
^^^^^^^^^^^^^^^^^^^
File "C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\.venv\Lib\site-packages\crewai\agent.py", line 459, in execute_task
raise e
File "C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\.venv\Lib\site-packages\crewai\agent.py", line 435, in execute_task
result = self._execute_without_timeout(task_prompt, task)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\.venv\Lib\site-packages\crewai\agent.py", line 531, in _execute_without_timeout
return self.agent_executor.invoke(
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\.venv\Lib\site-packages\crewai\agents\crew_agent_executor.py", line 125, in invoke
raise e
File "C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\.venv\Lib\site-packages\crewai\agents\crew_agent_executor.py", line 114, in invoke
formatted_answer = self._invoke_loop()
^^^^^^^^^^^^^^^^^^^
File "C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\.venv\Lib\site-packages\crewai\agents\crew_agent_executor.py", line 211, in _invoke_loop
raise e
File "C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\.venv\Lib\site-packages\crewai\agents\crew_agent_executor.py", line 157, in _invoke_loop
answer = get_llm_response(
^^^^^^^^^^^^^^^^^
File "C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\.venv\Lib\site-packages\crewai\utilities\agent_utils.py", line 164, in get_llm_response
raise e
File "C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\.venv\Lib\site-packages\crewai\utilities\agent_utils.py", line 153, in get_llm_response
answer = llm.call(
^^^^^^^^^
File "C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\.venv\Lib\site-packages\crewai\llm.py", line 976, in call
return self._handle_non_streaming_response(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\070038649\Documents\projects\tutorial_agentic_ai\math_server\.venv\Lib\site-packages\crewai\llm.py", line 782, in _handle_non_streaming_response
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
raise e
result = original_function(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
raise exception_type(
^^^^^^^^^^^^^^^
raise e
raise APIConnectionError(
.APIConnectionError: