Mcp server tool - config, adapter example - in CrewAI cli setup

Does anyone have an example of an MCP server tool configuration in a crew.py? You know, Crew.py, main.py (flow) tasks.yaml agents.yaml setup.

example

from crewai_tools import MCPServerAdapter
from mcp import StdioServerParameters

server_params=StdioServerParameters(
    command="python", 
    args=["scrape_mcp_server.py"],
    env={**os.environ},
)

with MCPServerAdapter(server_params) as tools:
    print(f"Available tools from Stdio MCP server: {[tool.name for tool in tools]}")

@CrewBase
class ScoutResearchCrew:
    """Scout Research Crew"""

    agents: List[BaseAgent]
    tasks: List[Task]

    agents_config = 'config/agents.yaml'
    tasks_config = 'config/tasks.yaml'

    @agent
    def agent1(self) -> Agent:
        return Agent(
            config=self.agents_config["agent1"],
            verbose=True,
            tools=tools,
            embedder=ollama_embedder,
            llm=ollamallm,
        )
....

So, did you check out the examples @tonykipkemboi put out in the intro video? He goes over setting up local servers, and even shows how to link up with a remote Cloudflare docs server.

Hey Max, yep

but I dont want to use a math server; I want to use something exciting like a mcp scraping tool, duckduckgo search mcp server, what ever roo code can dream up mcp server.. etc..

Anyways, the agents in the terminal are saying when I turn on debugging, the tool doesn’t have a search function or it’s blocked, and go back to simulated output etc..

I have also given the adv LLM agents like claude 4, gpt, gemini info from the below urls to analyze and use as a reference.

CrewAI MCP Documentation MCP Servers as Tools in CrewAI - CrewAI
Model Context Protocol Introduction - Model Context Protocol
CrewAI Community MCP Search results for 'mcp' - CrewAI

No one can figure it out! :thinking: maybe Max can! :brain: :flexed_biceps:t2:

Oh, by the way, I am on the cheap, just using just Ollama, qwen3:4b

projects/scout/
├── main.py
└── scout_crews/
    └── research_crew/
        ├── __init__.py
        ├── crew.py
        ├── simple_duckduckgo_mcp.py
        └── config/
            ├── agents.yaml
            └── tasks.yaml

Here is one example of a duckduckgo mcp server I have tried;
I have many versions. :woozy_face:

#! simple duckduckgo MCP server
"""
A Simple (nothing is simple when it comes to this!)  DuckDuckGo MCP Server for CrewAI
Basic search functionality with minimal complexity
"""

import sys
import json
from typing import Any, Dict, List
from mcp.server.fastmcp import FastMCP
from duckduckgo_search import DDGS

# Initialize FastMCP server
mcp = FastMCP("DuckDuckGo-Search")

@mcp.tool()
def search_web(query: str, max_results: int = 5) -> str:
    """
    Search the web using DuckDuckGo.
    
    Args:
        query (str): Search query
        max_results (int): Maximum number of results (default: 5)
    
    Returns:
        str: JSON formatted search results
    """
    if not query or not query.strip():
        return json.dumps({
            "error": "Query cannot be empty",
            "results": []
        })
    
    try:
        # Simple search with basic timeout
        with DDGS() as ddgs:
            results = list(ddgs.text(
                keywords=query.strip(),
                max_results=min(max_results, 10)  # Cap at 10 results
            ))
        
        # Format results
        formatted_results = []
        for result in results:
            formatted_results.append({
                "title": result.get("title", ""),
                "url": result.get("href", ""),
                "snippet": result.get("body", "")
            })
        
        return json.dumps({
            "query": query,
            "total_results": len(formatted_results),
            "results": formatted_results
        }, indent=2)
    
    except Exception as e:
        return json.dumps({
            "error": f"Search failed: {str(e)}",
            "query": query,
            "results": []
        })

@mcp.tool()
def search_news(query: str, max_results: int = 3) -> str:
    """
    Search for news using DuckDuckGo.
    
    Args:
        query (str): Search query for news
        max_results (int): Maximum number of results (default: 3)
    
    Returns:
        str: JSON formatted news results
    """
    if not query or not query.strip():
        return json.dumps({
            "error": "Query cannot be empty",
            "results": []
        })
    
    try:
        with DDGS() as ddgs:
            results = list(ddgs.news(
                keywords=query.strip(),
                max_results=min(max_results, 8)  # Cap at 8 results
            ))
        
        # Format news results
        formatted_results = []
        for result in results:
            formatted_results.append({
                "title": result.get("title", ""),
                "url": result.get("url", ""),
                "snippet": result.get("body", ""),
                "date": result.get("date", ""),
                "source": result.get("source", "")
            })
        
        return json.dumps({
            "query": query,
            "total_results": len(formatted_results),
            "results": formatted_results
        }, indent=2)
    
    except Exception as e:
        return json.dumps({
            "error": f"News search failed: {str(e)}",
            "query": query,
            "results": []
        })

@mcp.tool()
def quick_answer(query: str) -> str:
    """
    Get a quick answer for a query.
    
    Args:
        query (str): Query to get answer for
    
    Returns:
        str: Quick answer or search result
    """
    if not query or not query.strip():
        return "Error: Query cannot be empty"
    
    try:
        with DDGS() as ddgs:
            results = list(ddgs.text(
                keywords=query.strip(),
                max_results=1
            ))
        
        if results:
            result = results[0]
            answer = f"Title: {result.get('title', 'No title')}\n"
            answer += f"Answer: {result.get('body', 'No description available')}\n"
            answer += f"Source: {result.get('href', 'No URL')}"
            return answer
        else:
            return f"No answer found for: {query}"
    
    except Exception as e:
        return f"Error getting answer: {str(e)}"

if __name__ == "__main__":
    # Run the server
    mcp.run(transport="stdio")

Adding something else to this question,
Can there be a @MCP Annotation in crew.py ?
:backhand_index_pointing_right:t2: https://docs.crewai.com/how-to/using-annotations

Alright, folks. Here we go.

In this simple example I’ve whipped up to show you what’s what, the main idea is to have a local MCP server (that’s the server.py file) which basically just wraps the duckduckgo_search library. As a solid best practice, you’ll see a class used to standardize the tool’s output. You should definitely make it a habit when you’re building tools for LLMs.

For the client side, we’ve got a pretty straightforward crew that uses these tools. First off, it hits the internet for a search, and then it digs through the content of each result it finds. In this example, I’m bringing together both an MCP tool (that’s our server) and a traditional, out-of-the-box CrewAI tool. This is to show you how you can mix and match different kinds of tools to open up a whole bunch of possibilities.

And for a little something new, you’ll see I’m using a more souped-up version of ScrapeWebsiteTool. You can, and absolutely should, grab it here and save it in the same directory as your other files. This tool might even end up being a proposal to replace CrewAI’s original ScrapeWebsiteTool, 'cause it gives you options to limit context window usage (the original one just sends back the entire page content, no matter how huge it is). By the end of this week, @tonykipkemboi and I are gonna decide if it’s gonna make it into the next CrewAI version. For now, though, go ahead and download it to get a feel for it and give it a whirl.

One last new tidbit is the Task.markdown attribute, which was rolled out not too long ago.

To install the libraries you’ll need:

pip install --upgrade crewai crewai-tools[mcp] duckduckgo-search

Directory structure:

crewai_mcp_tool_example/
├── crew.py
├── server.py
└── versatile_scrape_website_tool.py

server.py file:

from typing import List, Optional, Dict

from pydantic import BaseModel, Field, ConfigDict
from mcp.server.fastmcp import FastMCP
from duckduckgo_search import DDGS
from duckduckgo_search.exceptions import DuckDuckGoSearchException


class DuckDuckGoSearchResultItem(BaseModel):
    """Represents a single search result item from DuckDuckGo."""

    href: str
    title: str
    body: str
    model_config = ConfigDict(extra="forbid")


class DuckDuckGoSearchToolOutput(BaseModel):
    """Standardized output for the duckduckgo_text_search tool."""

    query_used: Optional[str] = Field(
        default=None, description="The query string used for the search."
    )
    search_results: List[DuckDuckGoSearchResultItem] = Field(
        default_factory=list,
        description=(
            "A list of search results. Empty if no results were found "
            "or an error occurred."
        ),
    )
    error_message: Optional[str] = Field(
        default=None,
        description=(
            "An error message if the search process failed, "
            "no query was provided, or no results were found."
        ),
    )
    model_config = ConfigDict(extra="forbid", validate_assignment=True)

    def to_llm_response(self) -> str:
        """Converts the output to a JSON string for the LLM."""
        return self.model_dump_json(exclude_none=True, indent=2)


mcp_service = FastMCP(
    name="DuckDuckGoSearchService",
    description="Provides DuckDuckGo web search functionality.",
)


@mcp_service.tool()
def duckduckgo_text_search(query: Optional[str] = None) -> str:
    """
    Performs a DuckDuckGo web search for the given query.
    If no query is provided, an error message is returned.
    The results are returned as a JSON string based on the
    DuckDuckGoSearchToolOutput model.

    Args:
        query: The search query string. This is optional. If not
               provided or empty, an error will be indicated in the
               output.

    Returns:
        A JSON string representing the search outcome, including
        results or an error message.
    """
    if not query or not query.strip():
        error_msg = "Query parameter is missing or empty."
        print(f"[MCP Server] Error: {error_msg}")
        output = DuckDuckGoSearchToolOutput(
            query_used=query if query else None, error_message=error_msg
        )
        return output.to_llm_response()

    print(f"[MCP Server] Received text_search request: query='{query}'")

    # Hardcoded parameters for simplicity
    region: str = "wt-wt"
    safesearch: str = "moderate"
    timelimit: Optional[str] = None
    max_results: int = 5  # Fetch top 5 results

    try:
        ddgs_client = DDGS(timeout=10)
        # ddgs.text returns List[Dict] or None
        raw_results: Optional[List[Dict[str, str]]] = ddgs_client.text(
            keywords=query,
            region=region,
            safesearch=safesearch,
            timelimit=timelimit,
            max_results=max_results,
        )

        if raw_results:
            parsed_results = [
                DuckDuckGoSearchResultItem(**item) for item in raw_results
            ]
            output = DuckDuckGoSearchToolOutput(
                query_used=query, search_results=parsed_results
            )
            print(
                f"[MCP Server] Found {len(parsed_results)} results "
                f"for query: '{query}'"
            )
        else:
            # No results found by DDGS, or DDGS returned None
            msg = "No results found for the query."
            print(f"[MCP Server] {msg} Query: '{query}'")
            output = DuckDuckGoSearchToolOutput(
                query_used=query,
                error_message=msg,
                # search_results is an empty list by default
            )
    except DuckDuckGoSearchException as e:
        error_msg = f"DuckDuckGo search API error: {str(e)}"
        print(f"[MCP Server] Error: {error_msg}")
        output = DuckDuckGoSearchToolOutput(
            query_used=query, error_message=error_msg
        )
    except Exception as e:
        # Catch any other unexpected errors during the process
        error_msg = (
            f"An unexpected error occurred in "
            f"duckduckgo_text_search: {str(e)}"
        )
        print(f"[MCP Server] Error: {error_msg}")
        output = DuckDuckGoSearchToolOutput(
            query_used=query, error_message=error_msg
        )

    return output.to_llm_response()


if __name__ == "__main__":
    print(
        "Starting DuckDuckGo Search MCP Server via Stdio...\n"
        "This server will listen for MCP messages on stdin/stdout."
    )
    mcp_service.run(transport="stdio")

crew.py file:

from crewai import Agent, Task, Crew, LLM, Process
from crewai_tools import MCPServerAdapter
from mcp import StdioServerParameters
from versatile_scrape_website_tool import VersatileScrapeWebsiteTool as ScrapeWebsiteTool
import os
import warnings
from pydantic.warnings import PydanticDeprecatedSince20

warnings.filterwarnings("ignore", category=PydanticDeprecatedSince20)

os.environ["GEMINI_API_KEY"] = "<YOUR_GEMINI_API_KEY>"

ddgs_stdio_server_params = StdioServerParameters(
    command="python3",
    args=["server.py"],
    env={"UV_PYTHON": "3.12", **os.environ},
)

try:
    ddgs_mcp_server_adapter = MCPServerAdapter(
        serverparams=ddgs_stdio_server_params
    )
    ddgs_mcp_tools = ddgs_mcp_server_adapter.tools

    scrape_website_tool = ScrapeWebsiteTool(
        retrieval_mode="random_chunks",
        max_chars=6000
    )

    all_tools = [*ddgs_mcp_tools, scrape_website_tool]

    gemini_llm = LLM(
        model="gemini/gemini-2.5-flash-preview-04-17",
        temperature=0.7,
        timeout=30,
    )

    study_guide_agent = Agent(
        role="Expert Study Guide Creator",
        goal=(
            "To research topics online and create structured learning "
            "paths that guide students from basic to advanced concepts."
        ),
        backstory=(
            "You are a seasoned educator with expertise in curriculum "
            "design. You excel at breaking down complex topics into "
            "digestible learning steps, finding the best online resources, "
            "and creating progressive study plans that build knowledge "
            "systematically from fundamentals to advanced concepts."
        ),
        tools=all_tools,
        llm=gemini_llm,
        allow_delegation=False,
        verbose=True,
    )

    learning_path_task = Task(
        description=(
            "The user has interest in better learning about:\n\n"
            "{user_interest}\n\n"
            "Using EXCLUSIVELY the available tools, you must:\n\n"
            "1. Search the internet for interesting sites about it\n"
            "2. Scrape EVERY searched site to evaluate the knowledge "
            "offered there\n"
            "3. Create a study plan for the user with 3 to 5 steps "
            "for learning, from least to most complex, including a "
            "brief summary of what they'll learn at each step and "
            "recommended URLs from those you visited."
        ),
        expected_output=(
            "A comprehensive study plan with 3-5 progressive learning "
            "steps, each containing: step title, learning objectives, "
            "content summary, and recommended website URL. Steps should "
            "flow from basic concepts to advanced applications."
        ),
        markdown=True,
        agent=study_guide_agent,
    )

    study_guide_crew = Crew(
        agents=[study_guide_agent],
        tasks=[learning_path_task],
        process=Process.sequential,
        verbose=True,
    )

    user_interest = "Quantum Physics for Beginners"

    learning_result = study_guide_crew.kickoff(
        inputs={"user_interest": user_interest}
    )
    
    print(learning_result.raw)
finally:
    ddgs_mcp_server_adapter.stop()

To get it running, just pop open your terminal and run python3 crew.py – it’ll take care of firing up the server. Hope this helps y’all out!

2 Likes

this will be a challenge to abstract it out completely into an annotation. mcp is changing so much that it would keep breaking.

This topic was automatically closed 24 hours after the last reply. New replies are no longer allowed.