from crewai import Agent, Crew, Process, Task, LLM
from crewai.project import CrewBase, agent, crew, task
from crewai_tools import SerperDevTool, ScrapeWebsiteTool
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
from models import JobRequirements, ResumeOptimization, CompanyResearch
from langchain_ollama import OllamaEmbeddings
import os
# 🚀 Explicitly remove OpenAI API Key
os.environ.pop("OPENAI_API_KEY", None)
os.environ["CREWAI_LLM_PROVIDER"] = "ollama"
os.environ["CREWAI_EMBEDDINGS_PROVIDER"] = "ollama"
print("\n🔍 DEBUG: Checking CrewAI Environment Variables")
for key in ["OPENAI_API_KEY", "CREWAI_LLM_PROVIDER", "CREWAI_EMBEDDINGS_PROVIDER"]:
print(f"{key}: {os.environ.get(key)}")
# Use Ollama embeddings
ollama_embeddings = OllamaEmbeddings(model="nomic-embed-text:latest")
# Initialize Ollama LLM
llm = LLM(
model="ollama/deepseek-r1:32b",
base_url="http://localhost:11434",
api_key="ollama", # This ensures it's not using OpenAI
temperature=0.3
)
import json
print("\n🔍 DEBUG: CrewAI LLM Configuration:")
# print(json.dumps(llm.dict(), indent=4))
print("\n🔍 DEBUG: Environment Variables (Ensure No OpenAI Keys)")
for key in ["OPENAI_API_KEY", "CREWAI_LLM_PROVIDER", "CREWAI_EMBEDDINGS_PROVIDER"]:
print(f"{key}: {os.environ.get(key)}")
# Initialize the tools
search_tool = SerperDevTool()
scrape_tool = ScrapeWebsiteTool()
@CrewBase
class ResumeCrew:
"""ResumeCrew for resume optimization and interview preparation"""
def __init__(self, resume_text: str) -> None:
self.resume_text = resume_text
self.llm = llm
self.agents_config = 'config/agents.yaml'
self.tasks_config = 'config/tasks.yaml'
@agent
def resume_analyzer(self) -> Agent:
return Agent(
config=self.agents_config['resume_analyzer'],
verbose=True,
llm=self.llm,
knowledge_sources=[StringKnowledgeSource(content=self.resume_text)]
)
@agent
def job_analyzer(self) -> Agent:
return Agent(
config=self.agents_config['job_analyzer'],
verbose=True,
tools=[scrape_tool],
llm=self.llm
)
@agent
def company_researcher(self) -> Agent:
return Agent(
config=self.agents_config['company_researcher'],
verbose=True,
tools=[search_tool],
llm=self.llm,
knowledge_sources=[StringKnowledgeSource(content=self.resume_text)]
)
@agent
def resume_writer(self) -> Agent:
return Agent(
config=self.agents_config['resume_writer'],
verbose=True,
llm=self.llm
)
@agent
def report_generator(self) -> Agent:
return Agent(
config=self.agents_config['report_generator'],
verbose=True,
llm=self.llm
)
@task
def analyze_job_task(self) -> Task:
return Task(
config=self.tasks_config['analyze_job_task'],
output_file='output/job_analysis.json',
output_pydantic=JobRequirements
)
@task
def optimize_resume_task(self) -> Task:
return Task(
config=self.tasks_config['optimize_resume_task'],
output_file='output/resume_optimization.json',
output_pydantic=ResumeOptimization
)
@task
def research_company_task(self) -> Task:
return Task(
config=self.tasks_config['research_company_task'],
output_file='output/company_research.json',
output_pydantic=CompanyResearch
)
@task
def generate_resume_task(self) -> Task:
return Task(
config=self.tasks_config['generate_resume_task'],
output_file='output/optimized_resume.md'
)
@task
def generate_report_task(self) -> Task:
return Task(
config=self.tasks_config['generate_report_task'],
output_file='output/final_report.md'
)
@crew
def crew(self) -> Crew:
return Crew(
agents=self.agents,
tasks=self.tasks,
verbose=True,
process=Process.sequential,
knowledge_sources=[StringKnowledgeSource(content=self.resume_text)],
cache=False,
planning=True,
planning_llm=self.llm,
memory=False,
embedder={
"provider": "ollama", # Ensure this is explicitly set
"config": {
"model": "nomic-embed-text"
}
}
)
It was my understanding that if you use the ‘os.environ’ notation, you need to use a .env file. Also, it looks like you are missing ‘load_dotemv()’.
This syntax is working for me:
Load environment variables
load_dotenv()
Get environment variables
ollama_model = os.getenv(“OLLAMA_MODEL”)
Enable LiteLLM debugging
os.environ[“LITELLM_LOG”] = “DEBUG”
Register Ollama model with correct format
model_name = f"ollama/{ollama_model}"
litellm.register_model({
model_name: {
“api_base”: “http://host.docker.internal:11434”,
“api_key”: “not-needed”,
“api_type”: “ollama”
}
})
I am reading it in another python file:
import streamlit as st
from crews import ResumeCrew
from pdf_utils import extract_text_from_pdf
from tempfile import NamedTemporaryFile
from dotenv import load_dotenv
import os
import fitz # PyMuPDF for reading PDFs
import litellm
os.environ[“CREWAI_TELEMETRY”] = “false”
os.environ[“CREWAI_ANALYTICS”] = “false”
litellm.set_verbose=True
os.environ.pop(“OPENAI_API_KEY”, None)
os.environ[‘LITELLM_LOG’] = ‘DEBUG’
load_dotenv(‘creds.env’)