I want to use the local LLM via Ollama, but I'm facing some issues. It keeps asking me for the OpenAI API key

from crewai import Agent, Crew, Process, Task, LLM
from crewai.project import CrewBase, agent, crew, task
from crewai_tools import SerperDevTool, ScrapeWebsiteTool
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
from models import JobRequirements, ResumeOptimization, CompanyResearch
from langchain_ollama import OllamaEmbeddings

import os

# 🚀 Explicitly remove OpenAI API Key
os.environ.pop("OPENAI_API_KEY", None)
os.environ["CREWAI_LLM_PROVIDER"] = "ollama"
os.environ["CREWAI_EMBEDDINGS_PROVIDER"] = "ollama"

print("\n🔍 DEBUG: Checking CrewAI Environment Variables")
for key in ["OPENAI_API_KEY", "CREWAI_LLM_PROVIDER", "CREWAI_EMBEDDINGS_PROVIDER"]:
    print(f"{key}: {os.environ.get(key)}")

# Use Ollama embeddings
ollama_embeddings = OllamaEmbeddings(model="nomic-embed-text:latest")

# Initialize Ollama LLM
llm = LLM(
    model="ollama/deepseek-r1:32b",
    base_url="http://localhost:11434",
    api_key="ollama",  # This ensures it's not using OpenAI
    temperature=0.3
)
import json

print("\n🔍 DEBUG: CrewAI LLM Configuration:")
# print(json.dumps(llm.dict(), indent=4))

print("\n🔍 DEBUG: Environment Variables (Ensure No OpenAI Keys)")
for key in ["OPENAI_API_KEY", "CREWAI_LLM_PROVIDER", "CREWAI_EMBEDDINGS_PROVIDER"]:
    print(f"{key}: {os.environ.get(key)}")
# Initialize the tools
search_tool = SerperDevTool()
scrape_tool = ScrapeWebsiteTool()

@CrewBase
class ResumeCrew:
    """ResumeCrew for resume optimization and interview preparation"""

    def __init__(self, resume_text: str) -> None:
        self.resume_text = resume_text
        self.llm = llm
        self.agents_config = 'config/agents.yaml'
        self.tasks_config = 'config/tasks.yaml'

    @agent
    def resume_analyzer(self) -> Agent:
        return Agent(
            config=self.agents_config['resume_analyzer'],
            verbose=True,
            llm=self.llm,
            knowledge_sources=[StringKnowledgeSource(content=self.resume_text)]
        )

    @agent
    def job_analyzer(self) -> Agent:
        return Agent(
            config=self.agents_config['job_analyzer'],
            verbose=True,
            tools=[scrape_tool],
            llm=self.llm
        )

    @agent
    def company_researcher(self) -> Agent:
        return Agent(
            config=self.agents_config['company_researcher'],
            verbose=True,
            tools=[search_tool],
            llm=self.llm,
            knowledge_sources=[StringKnowledgeSource(content=self.resume_text)]
        )

    @agent
    def resume_writer(self) -> Agent:
        return Agent(
            config=self.agents_config['resume_writer'],
            verbose=True,
            llm=self.llm
        )

    @agent
    def report_generator(self) -> Agent:
        return Agent(
            config=self.agents_config['report_generator'],
            verbose=True,
            llm=self.llm
        )

    @task
    def analyze_job_task(self) -> Task:
        return Task(
            config=self.tasks_config['analyze_job_task'],
            output_file='output/job_analysis.json',
            output_pydantic=JobRequirements
        )

    @task
    def optimize_resume_task(self) -> Task:
        return Task(
            config=self.tasks_config['optimize_resume_task'],
            output_file='output/resume_optimization.json',
            output_pydantic=ResumeOptimization
        )

    @task
    def research_company_task(self) -> Task:
        return Task(
            config=self.tasks_config['research_company_task'],
            output_file='output/company_research.json',
            output_pydantic=CompanyResearch
        )

    @task
    def generate_resume_task(self) -> Task:
        return Task(
            config=self.tasks_config['generate_resume_task'],
            output_file='output/optimized_resume.md'
        )

    @task
    def generate_report_task(self) -> Task:
        return Task(
            config=self.tasks_config['generate_report_task'],
            output_file='output/final_report.md'
        )

    @crew
    def crew(self) -> Crew:
        return Crew(
            agents=self.agents,
            tasks=self.tasks,
            verbose=True,
            process=Process.sequential,
            knowledge_sources=[StringKnowledgeSource(content=self.resume_text)],
            cache=False,
            planning=True,
            planning_llm=self.llm,
            memory=False,
            embedder={
                "provider": "ollama",  # Ensure this is explicitly set
                "config": {
                    "model": "nomic-embed-text"
                }
            }
        )

It was my understanding that if you use the os.environ notation, you need to use a .env file. Also, it looks like you are missing load_dotemv().

This syntax is working for me:

# Load environment variables
load_dotenv()

# Get environment variables
ollama_model = os.getenv("OLLAMA_MODEL")

# Enable LiteLLM debugging
os.environ["LITELLM_LOG"] = "DEBUG"

# Register Ollama model with correct format
model_name = f"ollama/{ollama_model}"
litellm.register_model({
    model_name: {
        "api_base": "http://host.docker.internal:11434",
        "api_key": "not-needed",
        "api_type": "ollama"
    }
})

I am reading it in another python file:

import streamlit as st
from crews import ResumeCrew
from pdf_utils import extract_text_from_pdf
from tempfile import NamedTemporaryFile

from dotenv import load_dotenv
import os
import fitz  # PyMuPDF for reading PDFs
import litellm

os.environ["CREWAI_TELEMETRY"] = "false"
os.environ["CREWAI_ANALYTICS"] = "false"
litellm.set_verbose=True
# os.environ.pop("OPENAI_API_KEY", None)
os.environ['LITELLM_LOG'] = 'DEBUG'
load_dotenv('creds.env')

see the thread that looks the most advanced with CrewAI team in String Knowledge sources not working with Gemini

Have you tried adding load_dotenv(‘creds.env’, override=True) to override the standard .env implementation? Openly, I’m not familiar with how you have that implemented…

Hi @Saeed_Kasmani !

Give a try on:

  • using dotenv do load environment variables from .env file
  • ensure to set OPENAI_API_KEY=NA
  • no need to have api_key in LLM object in case you are using local

Let us know if that worked or any issue.

Hi @Renato_Guilherme,
Thanks for your reply. i think there is an issue with using crewai, ollama and pydantic. I requested for help. and shared my code!

from crewai import Agent, Crew, Process, Task, LLM
from crewai.project import CrewBase, agent, crew, task
from crewai_tools import SerperDevTool, ScrapeWebsiteTool, PDFSearchTool
from crewai.knowledge.source.pdf_knowledge_source import PDFKnowledgeSource
from models import (
    JobRequirements,
    ResumeOptimization,
    CompanyResearch
)

from dotenv import load_dotenv

load_dotenv()


@CrewBase
class Optimizer():
	"""Optimizer crew"""

	agents_config = 'config/agents.yaml'
	tasks_config = 'config/tasks.yaml'

	def __init__(self) -> None:
		self.resume_pdf = PDFKnowledgeSource(file_paths="CV_Mohan.pdf")

	@agent
	def resume_analyzer(self) -> Agent:
		return Agent(
			config=self.agents_config['resume_analyzer'],
			verbose=True,
			knowledge_sources=[self.resume_pdf],
			embedder=dict(
				provider="ollama",
				config=dict(
					model="nomic-embed-text",
					base_url="http://localhost:11434"
				)
			)
		)

	@agent
	def job_analyzer(self) -> Agent:
		return Agent(
			config=self.agents_config['job_analyzer'],
			verbose=True,
			tools=[ScrapeWebsiteTool()],
		)
	
	@agent
	def company_researcher(self) -> Agent:
		return Agent(
			config=self.agents_config['company_researcher'],
			verbose=True,
			tools=[SerperDevTool()],
			knowledge_sources=[self.resume_pdf],
			embedder=dict(
				provider="ollama",
				config=dict(
					model="nomic-embed-text",
					base_url="http://localhost:11434"
				)
			)
		)

	@agent
	def resume_writer(self) -> Agent:
		return Agent(
			config=self.agents_config['resume_writer'],
			verbose=True,
		)
	
	@agent
	def report_generator(self) -> Agent:
		return Agent(
			config=self.agents_config['report_generator'],
			verbose=True,
		)

	@task
	def analyze_job_task(self) -> Task:
		return Task(
			config=self.tasks_config['analyze_job_task'],
			output_file='output/job_analysis.json',
			output_pydantic=JobRequirements
		)

	@task
	def optimize_resume_task(self) -> Task:
		return Task(
			config=self.tasks_config['optimize_resume_task'],
			output_file='output/resume_optimization.json',
			output_pydantic=ResumeOptimization
		)
	
	@task
	def research_company_task(self) -> Task:
		return Task(
			config=self.tasks_config['research_company_task'],
			output_file='output/company_research.json',
			output_pydantic=CompanyResearch
		)

	@task
	def generate_resume_task(self) -> Task:
		return Task(
			config=self.tasks_config['generate_resume_task'],
			output_file='output/optimized_resume.md',
		)
	
	@task
	def generate_report_task(self) -> Task:
		return Task(
			config=self.tasks_config['generate_report_task'],
			output_file='output/final_report.md'
		)

	@crew
	def crew(self) -> Crew:
		return Crew(
			agents=self.agents,
			tasks=self.tasks,
			process=Process.sequential,
			verbose=True,
			knowledge_sources=[self.resume_pdf],
		)

I manage to handle the issue with OpenAI API key issue… I’m using llama3.2 btw… unfortunately I encountered an error with ScrapeWebsiteTool… if anyone manage to run this project locally, do share your valuable insight… thanks!