What are the LLMs that crewai supports, getting error LLM value is an unknown object

I am building stock, forex and crypto research multi agent application.
let me know what are the LLMs that crewai support apart from OpenAI, looking for free LLM.
code1:

ollama_llm = OllamaLLM(model="llama2") 
 agent = Agent(
            role=value["role"],
            goal=value["goal"],
            backstory=value["backstory"],
            tools=tools,
            llm=ollama_llm,  # ✅ Pass Ollama LLM here
            verbose=True
        )
code2:
groq_llm = ChatGroq(
        model_name="mixtral-8x7b-32768",
        api_key=GROQ_API_KEY
    )
 agent = Agent(
            role=value["role"],
            goal=value["goal"],
            backstory=value["backstory"],
            tools=tools,
            llm=groq_llm,
            verbose=True
        )

I tried multiple options and still getting the error:

LLM value is an unknown object

Provider List: https://docs.litellm.ai/docs/providers

ERROR:root:Failed to get supported params: argument of type 'NoneType' is not iterable
Error in CrewAI analysis: 'str' object has no attribute 'get'

Here is a list of supported LLMs from the CrewAI docs LLMs - CrewAI

This might help you.
Like it if its of any help :innocent:

thanks you Mayank, I am not using Local LLMs.

CrewAI docs mentioned that most of the LLMs works. in reality it is not true, I have written wrapper class and tried to create handler and nothing worked out. not sure they have tested it or not. Still I am struggling to fix error:Provider List: Providers | liteLLM

ERROR:root:Failed to get supported params: argument of type ‘NoneType’ is not iterable
Error in CrewAI analysis: ‘str’ object has no attribute ‘get’
Providing my code. @crewai team, please help in fixing this issue and thanks in advance.
code:
import os
import yaml
from crewai import Crew, Process, Agent, Task
from tools.pdf_report_tool import generate_stock_report
from tools.market_data_tool import fetch_market_data
from tools.fundamental_tool import analyze_fundamentals
from tools.technical_tool import analyze_technical_strength
from tools.risk_tool import assess_risk
from tools.trade_plan_tool import generate_trade_plan
#from langchain_ollama import OllamaLLM # :white_check_mark: Updated Ollama import
#from langchain_community.llms import Ollama
from langchain_ollama import OllamaLLM
#from langchain.llms import LLM
#from langchain_community.llms import LLM
from langchain_mistralai import ChatMistralAI
api_key = “111”
mistral_model = “codestral-latest”
llm_mstral = ChatMistralAI(model=mistral_model, temperature=0, api_key=api_key)
MISTRAL_API_KEY=“111”
#from litellm import LLM
from litellm import completion
from typing import List
import requests
import json

from langchain.schema import SystemMessage, HumanMessage

# ------------------- Utility Function -------------------

# Create a function wrapper for litellm completion

def litellm_handler(messages: List[str | SystemMessage | HumanMessage], **kwargs):

try:

formatted_messages =

for message in messages:

if isinstance(message, SystemMessage):

formatted_messages.append({“role”: “system”, “content”: message.content})

elif isinstance(message, HumanMessage):

formatted_messages.append({“role”: “user”, “content”: message.content})

elif isinstance(message, str):

formatted_messages.append({“role”: “user”, “content”: message})

response = completion(

model=“mistral/mistral-large-latest”,

messages=formatted_messages,

temperature=0.7,

api_key=MISTRAL_API_KEY

)

return response.choices[0].message.content

except Exception as e:

print(f"LiteLLM Error: {str(e)}")

return “Error processing request”

class MistralWrapper:
def init(self, api_key):
self.api_key = api_key
self.url = “https://api.mistral.ai/v1/chat/completions
self.headers = {
“Content-Type”: “application/json”,
“Authorization”: f"Bearer {api_key}"
}

def __call__(self, prompt, **kwargs):
    try:
        if isinstance(prompt, list):
            messages = prompt
        else:
            messages = [{"role": "user", "content": prompt}]

        data = {
            "model": "mistral-large-latest",
            "messages": messages,
            "temperature": 0.7
        }

        response = requests.post(
            self.url,
            headers=self.headers,
            data=json.dumps(data)
        )
        
        if response.status_code == 200:
            return response.json()['choices'][0]['message']['content']
        else:
            return f"Error: {response.status_code}"
            
    except Exception as e:
        print(f"Error: {str(e)}")
        return "Error processing request"

def get_mistral_response(**kwargs):
response = completion(
model=“mistral/mistral-large-latest”,
messages=[{“role”: “user”, “content”: kwargs.get(“prompt”, “”)}],
temperature=0.7,
api_key=MISTRAL_API_KEY
)
return response.choices[0].message.content
def load_yaml(filename):
“”“Loads agents or tasks from a YAML file.”“”
base_path = os.path.dirname(file)
full_path = os.path.join(base_path, “config”, filename)
with open(full_path, “r”) as file:
return yaml.safe_load(file)

------------------- Crew Creation -------------------

def create_stock_advisor_crew(stock):
“”“Create a Crew for stock analysis using Ollama LLM.”“”
# Initialize Mistral wrapper
mistral = MistralWrapper(api_key=MISTRAL_API_KEY)
# Initialize Ollama LLM
#ollama_llm = OllamaLLM(model=“llama2”) # :white_check_mark: Updated Ollama usage

mistral_llm = LLM(

model=“mistral/mistral-large-latest”,

temperature=0.7,

api_key=MISTRAL_API_KEY

)

# Tool mapping
tool_mapping = {
    "fetch_market_data": fetch_market_data,
    "analyze_fundamentals": analyze_fundamentals,
    "analyze_technical_strength": analyze_technical_strength,
    "assess_risk": assess_risk,
    "generate_trade_plan": generate_trade_plan
}

# Load configurations
agents_config = load_yaml("agents.yaml")
tasks_config = load_yaml("tasks.yaml")

# Create agents
# In your create_stock_advisor_crew function, modify the agent creation:

agent = Agent(

role=value[“role”],

goal=value[“goal”],

backstory=value[“backstory”],

tools=tools,

llm=litellm_handler,

allow_delegation=True, # Add this line

verbose=True

)

agents = {}
for key, value in agents_config.items():
    tools = [tool_mapping[tool] for tool in value.get("tools", [])]
    agent = Agent(
        role=value["role"],
        goal=value["goal"],
        backstory=value["backstory"],
        tools=tools,
        llm=mistral,#litellm_handler,#get_mistral_response,#mistral_llm,#llm_mstral,#mistral_llm,#ollama_llm,  # ✅ Pass Ollama LLM here
        #allow_delegation=True,  # Add this line
        verbose=True
    )
    agents[value["role"]] = agent

# Create tasks
tasks = []
for key, value in tasks_config.items():
    if value["agent"] in agents:
        task = Task(
            description=f"{value['description']} Stock: {stock}",
            expected_output=value['expected_output'],
            agent=agents[value["agent"]],
            context=[stock, key]  # ✅ Pass context as a list
        )
        tasks.append(task)

# Create crew
crew = Crew(
    agents=list(agents.values()),
    tasks=tasks,
    process=Process.sequential,
    verbose=True
)

return crew

------------------- Stock Analysis Function -------------------

def analyze_stock(stock):
“”“Analyze a single stock and return results.”“”
print(f":mag: Analyzing {stock}…")

# Run individual tool analyses
market_data = fetch_market_data._run(stock, "stock")
fundamentals = analyze_fundamentals._run(stock)
technicals = analyze_technical_strength._run(stock)
risk_analysis = assess_risk._run(stock)
trade_plan = generate_trade_plan._run(stock)

# Combine individual analyses
stock_result = {
    "symbol": stock,
    "market_data": market_data,
    "fundamentals": fundamentals,
    "technicals": technicals,
    "risk_analysis": risk_analysis,
    "trade_plan": trade_plan
}

# Run CrewAI analysis
try:
    crew = create_stock_advisor_crew(stock)
    crew_result = crew.kickoff()
    stock_result["crew_analysis"] = crew_result
    print(f"\n📊 CrewAI Final Report for {stock}:\n{crew_result}")
except Exception as e:
    print(f"Error in CrewAI analysis: {str(e)}")
    stock_result["crew_analysis"] = f"Error: {str(e)}"

return stock_result

------------------- Main Function -------------------

def main():
“”“Main function to analyze stocks.”“”
stock = “HDFCBANK.NS”
results =

# Analyze single stock
result = analyze_stock(stock)
results.append(result)

if name == “main”:
main()

what models are you using? it seems it is through Ollama but you mention that it is not local?

I tried all options. please share any code snippet where I can use free LLM with crewai frame work. Local Ollama tried, due to local space issue … not able to test it.
agent = Agent(
role=value[“role”],
goal=value[“goal”],
backstory=value[“backstory”],
tools=tools,
llm=mistral,#litellm_handler,#get_mistral_response,#mistral_llm,#llm_mstral,#mistral_llm,#ollama_llm, # :white_check_mark: Pass Ollama LLM here
#allow_delegation=True, # Add this line
verbose=True
)

I think you’re going about this the wrong way. You can’t use free locally without enough memory to do so. You will have to used a hosted LLM if you don’t have anough memory. Also, please share a repo with your full code so we can better troubleshoot it for you.

Thank You Tony.
I am sharing the code, initially I was using main.py and crew.py separately. All the code merged into main.py to test it. You can help me creating separate files.
I am looking your help in working this code for both ollama_llm. How can I share the code?
Let me know your mail id, so that I can attach the code.
Thanks in advance Tony.

@crewai staff Tony.
Please provide me the mail id where I can share the code.
Thanks in advance for your support.