Hi @rokbenko
Please help me with the similar issue. I have tried using Together chat api using Together() as well as LLM. Even with running model locally with LMStudio and accessing with ls.llm() and LLM(). Nothing seems to work till now.
The error:
Agent: AI Assistant
Task: Answer the following question: Which is bigger, 9 or 10?
Crew: crew
└──
Task: 36bd5b41-7b78-47b0-afe3-c4608e68514d
Status: Executing Task…
└──
LLM Failed
An unknown error occurred. Please check the details below.
Crew: crew
└──
Task: 36bd5b41-7b78-47b0-afe3-c4608e68514d
Assigned to: AI Assistant
Status:
Failed
└──
LLM Failed
╭────────────────────────────────────────────────── Task Failure ──────────────────────────────────────────────────╮
│ │
│ Task Failed │
│ Name: 36bd5b41-7b78-47b0-afe3-c4608e68514d │
│ Agent: AI Assistant │
│ │
│ │
╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
╭────────────────────────────────────────────────── Crew Failure ──────────────────────────────────────────────────╮
│ │
│ Crew Execution Failed │
│ Name: crew │
│ ID: b891d8b6-2232-464e-b7a0-6099fa709099 │
=============
CODE
from crewai import Agent, Task, Crew, LLM
from langchain_together import Together
import lmstudio as lms
from dotenv import load_dotenv
import os
Load environment variables
load_dotenv()
def create_simple_agent():
“”“Create a simple CrewAI agent using Together AI”“”
# # Get API key from environment
# api_key = os.getenv("TOGETHER_API_KEY")
# if not api_key:
# print("❌ Error: TOGETHER_API_KEY not found in .env file")
# return None
# print(f"✅ API key found: {api_key[:20]}...")
# # Initialize Together AI LLM via LangChain
# try:
# model_name = os.getenv("TOGETHER_MODEL", "meta-llama/Llama-3.1-8B-Instruct")
# llm = Together(
# model=model_name,
# temperature=0.7,
# api_key=api_key,
# max_tokens=2048
# )
# print(f"✅ LLM initialized with model: {model_name}")
# except Exception as e:
# print(f"❌ Error initializing LLM: {e}")
# return None
llm = LLM(
model="google/gemma-3-12b"
#base_url="https://api.gemma.ai/v1",
)
print(f"✅ LLM initialized with model: {llm}")
# Create a simple assistant agent
assistant = Agent(
role='AI Assistant',
goal='Provide helpful, accurate, and informative responses to user questions',
backstory="""You are a knowledgeable AI assistant with expertise in various fields.
You provide clear, well-structured answers and are always helpful and friendly.""",
verbose=True,
allow_delegation=False,
llm=llm
)
print("✅ Agent created.")
return assistant
def ask_question(agent, question):
“”“Ask a question to the agent”“”
# Create a task for the question
task = Task(
description=f"Answer the following question: {question}",
agent=agent,
expected_output="A clear, helpful, and informative answer to the user's question."
)
# Create a crew with just this agent and task
crew = Crew(
agents=[agent],
tasks=[task],
verbose=True
)
# Execute the crew
print(f"\n🤖 Agent is thinking about: {question}")
print("=" * 50)
try:
result = crew.kickoff()
print(f"✅ Got Response: {result}")
return result
except Exception as e:
print(f"❌ Error during execution: {e}")
print("❌ Didn't get a response from the agent")
return None
def interactive_chat():
“”“Interactive chat with the AI agent”“”
print("🤖 Simple CrewAI Agent with Together AI")
print("=" * 50)
# Create the agent
agent = create_simple_agent()
if not agent:
return
print("\n💬 You can now chat with the AI agent!")
print("Type 'quit' or 'exit' to end the conversation")
print("-" * 50)
while True:
# Get user input
question = input("\n❓ Your question: ").strip()
# Check for exit command
if question.lower() in ['quit', 'exit', 'bye']:
print("👋 Goodbye! Thanks for chatting!")
break
# Skip empty questions
if not question:
print("Please enter a question.")
continue
# Get response from agent
response = ask_question(agent, question)
if response:
print(f"\n🤖 Agent's response:")
print("-" * 30)
print(response)
print("-" * 30)
else:
print("❌ Sorry, I couldn't process your question. Please try again.")
if name == “main”:
interactive_chat()