G’day team,
in watching the way CursorAI is aiding in the creation of the code for CrewAI it seems logical that we establish a best practice .cursorrules to allow for reduced hallucinations and greater streamlined coding experience.
I have used a combination of Perplexity and 4o to draft this version. I’d be interested to hear what other people use in their .cursorrules file. Maybe we can get this optimised to always give real tools and always keep the yaml config structure and details the same!
“”"
CursorRules for CrewAI Development
Version: 1.0.0
“”"
RULES = {
“file_structure”: {
“MUST”: [
“Place agent definitions in config/agents.yaml”,
“Place task definitions in config/tasks.yaml”,
“Define crew orchestration logic in crew.py”,
“Store custom tools in tools/ directory”,
“Keep main execution flow in main.py”
]
},
"model_selection": {
"MUST": [
"Use gpt-4o-mini for basic tasks and simple interactions",
"Use gpt-4o for complex agentic tasks requiring deep reasoning",
"Set temperature=0.7 for creative tasks",
"Set temperature=0.1 for analytical tasks"
]
},
"agents_yaml": {
"MUST": """
researcher:
role: “Research Analyst”
goal: “Gather accurate and relevant information”
backstory: “Expert research analyst with extensive experience”
tools:
- SerperDevTool
- WebsiteSearchTool
allow_delegation: false
verbose: true
“”"
},
"tasks_yaml": {
"MUST": """
research_task:
description: “Conduct comprehensive research on the topic”
agent: researcher
expected_output: “Detailed research findings in markdown format”
tools:
- SerperDevTool
async_execution: false
output_file: “research_output.md”
“”"
},
"tool_implementation": {
"MUST": """
from crewai.tools import BaseTool
class CustomSearchTool(BaseTool):
name: str = “search_tool”
description: str = “Performs web searches using specified parameters”
def _run(self, query: str) -> str:
# Implementation
return "Search results"
“”"
},
"supported_tools": {
"MUST_USE_ONLY": [
"DirectoryReadTool",
"FileReadTool",
"SerperDevTool",
"WebsiteSearchTool",
"CodeInterpreterTool",
"BrowserbaseTool",
"FirecrawlTools (CrawlWebsite, ScrapeWebsite, Search)"
]
},
"crew_implementation": {
"MUST": """
from crewai.project import CrewBase, agent, task, crew
from crewai import Process
@CrewBase
class ResearchCrew:
"""Research crew for gathering and analyzing information"""
agents_config = 'config/agents.yaml'
tasks_config = 'config/tasks.yaml'
@crew
def crew(self) -> Crew:
return Crew(
agents=self.agents,
tasks=self.tasks,
process=Process.sequential,
verbose=2
)
“”"
},
"decorators": {
"MUST_USE": [
"@CrewBase for crew class definition",
"@crew for crew formation method",
"@agent for agent creation methods",
"@task for task definition methods",
"@before_kickoff for input preprocessing",
"@after_kickoff for output processing"
]
},
"process_control": {
"MUST": [
"Use Process.sequential for linear task execution",
"Use Process.hierarchical for manager-delegated tasks",
"Set verbose=2 for detailed execution logging",
"Implement proper error handling with try/except blocks",
"Use callbacks for task completion monitoring"
]
},
"tool_integration": {
"MUST": """
from crewai_tools import (
DirectoryReadTool,
SerperDevTool,
WebsiteSearchTool
)
tools = [
DirectoryReadTool(directory=‘./data’),
SerperDevTool(),
WebsiteSearchTool()
]
“”"
},
"error_handling": {
"MUST": """
try:
crew_result = crew.kickoff()
except Exception as e:
logging.error(f"Crew execution failed: {str(e)}“)
raise CrewExecutionError(f"Failed to execute crew: {str(e)}”)
“”"
},
"output_handling": {
"MUST": """
@after_kickoff
def process_output(self, output):
"""Process crew output"""
if isinstance(output, str):
return {“result”: output}
return output
“”"
},
"dependencies": {
"MUST": [
"crewai>=0.11.0",
"openai>=1.3.0",
"python-dotenv>=1.0.0",
"langchain>=0.1.0",
"tavily-python>=0.3.0",
"wikisearch>=1.0.0",
"serper-python>=0.2.0",
"aiohttp>=3.8.0",
"tenacity>=8.2.0",
"pydantic>=2.0.0"
]
}
}