Anyone managed to use mcp server with ollama model provider?

Hey i have managed to get it working.
I have found patchy solution found in this thread.

So basically it is not a crewai bug but a litellm bug
To fix this bug you need to go to this path in litellm package
.venv/lib/python3.11/site-packages/litellm/litellm_core_utils/prompt_templates/factory.py

Search for this comment

## MERGE CONSECUTIVE ASSISTANT CONTENT ##

And do these two code changes

After this you will be able to run your crews succesfully using MCP as tools with Ollama as LLM provider

I have tested it on version

crewai                                   0.134.0
crewai-tools                             0.48.0

Here is my sample code snippet

from crewai import Agent, Task, Crew, LLM, Process
from crewai_tools import MCPServerAdapter
from mcp import StdioServerParameters
import os
# from langchain.llms import Ollama
from langchain_openai import ChatOpenAI

from crewai import LLM

server_params=StdioServerParameters(
    command="uv", # Or your python3 executable i.e. "python3"
    args=["run", "/home/jon/doe/load_testing_stdio/server.py"],
)

with MCPServerAdapter(server_params) as tools:
    print(f"Available tools from Stdio MCP server: {[tool.name for tool in tools]}")

    # my_llm = LLM(
    #     model="ollama/llama3.2",
    #     base_url="http://localhost:11434",
    #     streaming=True
    # )

    # my_llm = Ollama(model="ollama/llama3.2")

    my_llm = ChatOpenAI(
        model="ollama/llama3.2",
        base_url="http://localhost:11434",
        api_key="sk-ollama",
        stream=True
    )


    # Example: Using the tools from the Stdio MCP server in a CrewAI Agent
    agent = Agent(
        role="Hash Calculator",
        goal="compute hash of a given string",
        backstory="You are a hash calculator, you compute hash of a given string",
        tools=tools,
        verbose=True,
        allow_delegation=False,
        llm=my_llm,
    )
    task = Task(
        description="Compute hash for the string 'hello world'",
        expected_output="return hash of specified string.",
        agent=agent,
        verbose=True
    )
    crew = Crew(
        agents=[agent],
        tasks=[task],
        verbose=True,
        process=Process.sequential,
    )
    result = crew.kickoff()
    print(result)

Here is screenshot of successful completion of my crew

I case you want to quickly test it , here is my MCP server

from typing import Any
import hashlib
import time
from loguru import logger
from mcp.server.fastmcp import FastMCP

# Initialize FastMCP server
mcp = FastMCP("mcp-1")

@mcp.tool()
def generate_md5_hash(input_str: str) -> str:
    # Create an md5 hash object
    logger.info(f"Generating MD5 hash for: {input_str}")
    md5_hash = hashlib.md5()

    # add forced delay
    time.sleep(20)
    
    # Update the hash object with the bytes of the input string
    md5_hash.update(input_str.encode('utf-8'))
    
    # Return the hexadecimal representation of the digest
    return md5_hash.hexdigest()

@mcp.tool()
def count_characters(input_str: str) -> int:
    # Count number of characters in the input string
    logger.info(f"Counting characters in: {input_str}")
    return len(input_str)


@mcp.tool()
def get_first_half(input_str: str) -> str:
    # Calculate the midpoint of the string
    logger.info(f"Getting first half of: {input_str}")
    midpoint = len(input_str) // 2
    
    # Return the first half of the string
    return input_str[:midpoint]


if __name__ == "__main__":
    # Initialize and run the server
    mcp.run(transport='stdio')