Do image analysis with locall ollama

import os
import ollama
from crewai import Agent, Task, Crew, Process, LLM
from crewai.tools import BaseTool

— 1. LLM Configuration —

We are back to using the multimodal model directly, as the dual-LLM approach

was also foiled by the underlying bug. Our goal now is just to force a halt.

ollama_llm = LLM(
model=“ollama/qwen2.5vl:7b”,
base_url=“http://localhost:11434
)

— 2. The Specialist Tool (with “Final Answer:” prefix) —

class ImageAnalysisTool(BaseTool):
name: str = “Image Analysis Tool”
description: str = “Analyzes an image from a file path and returns a bullet list of objects and environment.”

def _run(self, image_path: str) -> str:
    if not os.path.exists(image_path):
        return f"Error: Image path not found at '{image_path}'."
    try:
        with open(image_path, "rb") as image_file:
            image_bytes = image_file.read()
        
        response = ollama.chat(
            model='qwen2.5vl:7b',
            messages=[{
                'role': 'user',
                'content': 'Describe this image in a plain text bullet list. Each bullet should be one key detail.',
                'images': [image_bytes]
            }]
        )
        
        # FORCE HALT MECHANISM 1: The official signal to stop.
        raw_description = response['message']['content']
        return f"Final Answer: {raw_description}"

    except Exception as e:
        return f"An error occurred during image analysis: {e}"

image_tool = ImageAnalysisTool()

— 3. The Agent (with max_iter=1) —

image_describer = Agent(
role=‘Image Description Specialist’,
goal=“Use the Image Analysis Tool exactly once to describe the image at the given path.”,
backstory=‘A single-purpose expert that executes its tool and immediately stops.’,
tools=[image_tool],
llm=ollama_llm,
verbose=True,
allow_delegation=False,
# FORCE HALT MECHANISM 2: Allow only one turn.
max_iter=1
)

— 4. The Task (with explicit tool assignment) —

describe_task = Task(
description=“Analyze the image located at ‘{image_path}’ using your tool. Your job is complete after the tool returns its output.”,
expected_output=“The direct bullet-point output from the Image Analysis Tool.”,
agent=image_describer,
# FORCE HALT MECHANISM 3: Explicitly bind the tool to the task.
tools=[image_tool]
)

def run_crew(image_path: str):
if not os.path.exists(image_path):
print(f"Error: File ‘{image_path}’ does not exist.")
return

image_crew = Crew(
    agents=[image_describer],
    tasks=[describe_task],
    process=Process.sequential,
    verbose=True
)

print("🚀 Kicking off the Crew with a FORCED HALT configuration...")
result = image_crew.kickoff(inputs={'image_path': image_path})
print("\n\n########################")
print("## Crew Analysis Complete!")
print("########################\n")
print("Final Report:")
print(result)

if name == “main”:
IMAGE_FILE_PATH = “/Users/skasmani/Desktop/x1.png”
if os.path.exists(IMAGE_FILE_PATH):
run_crew(IMAGE_FILE_PATH)
else:
print(f"ERROR: The image file was not found at the specified path: {IMAGE_FILE_PATH}")

Here is the output log:
:rocket: Kicking off the Crew with a FORCED HALT configuration…

╭──────────────────────────────────────────── Crew Execution Started ─────────────────────────────────────────────╮ │ │ │ Crew Execution Started │ │ Name: crew │ │ ID: f7f17d79-7610-459b-a5c4-12d0139f9106 │ │ │ │ │ ╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯

:rocket: Crew: crew └── :clipboard: Task: 67fda6a4-409b-435f-b78c-e0437668298d Status: Executing Task…

:rocket: Crew: crew └── :clipboard: Task: 67fda6a4-409b-435f-b78c-e0437668298d Status: Executing Task… └── :robot: Agent: Image Description Specialist Status: In Progress

Agent: Image Description Specialist ## Task: Analyze the image located at ‘/Users/skasmani/Desktop/x1.png’ using your tool. Your job is complete after the tool returns its output.

:robot: Agent: Image Description Specialist Status: In Progress

Agent: Image Description Specialist ## Thought: Thought: I need to use the Image Analysis Tool to analyze the image at the given path. ## Using tool: Image Analysis Tool ## Tool Input: “{"image_path": "/Users/skasmani/Desktop/x1.png"}” ## Tool Output: Final Answer: - A man in a suit is speaking into a microphone. - He is surrounded by other men in suits. - A woman is holding a microphone and appears to be recording. - The setting appears to be outdoors, with trees and a building in the background. - The man speaking is wearing glasses and has a beard. Maximum iterations reached. Requesting final answer.

:robot: Agent: Image Description Specialist Status: In Progress └── :brain: Thinking…

LiteLLM.Info: If you need to debug this error, use `litellm._turn_on_debug()'. Provider List: Providers | liteLLM

:rocket: Crew: crew └── :clipboard: Task: 67fda6a4-409b-435f-b78c-e0437668298d Status: Executing Task… └── :robot: Agent: Image Description Specialist Status: In Progress └── :cross_mark: LLM Failed

╭─────────────────────────────────────────────────── LLM Error ───────────────────────────────────────────────────╮ │ │ │ :cross_mark: LLM Call Failed │ │ Error: litellm.APIConnectionError: list index out of range │ │ Traceback (most recent call last): │ │ File │ │ “/Users/skasmani/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litel │ │ lm/main.py”, line 2870, in completion │ │ ) │ │ │ │ File │ │ “/Users/skasmani/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litel │ │ lm/llms/custom_httpx/llm_http_handler.py”, line 269, in completion │ │ client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, │ │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ │ │ File │ │ “/Users/skasmani/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litel │ │ lm/llms/ollama/completion/transformation.py”, line 322, in transform_request │ │ ) → dict: │ │ │ │ File │ │ “/Users/skasmani/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litel │ │ lm/litellm_core_utils/prompt_templates/factory.py”, line 229, in ollama_pt │ │ ## MERGE CONSECUTIVE ASSISTANT CONTENT ## │ │ ^^^^^^^^^^^^^^^ │ │ IndexError: list index out of range │ │ │ │ │ ╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯

ERROR:root:LiteLLM call failed: litellm.APIConnectionError: list index out of range Traceback (most recent call last): File "/Users/skasmani/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/main.py", line 2870, in completion ) File "/Users/skasmani/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/llms/custom_httpx/llm_http_handler.py", line 269, in completion client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/skasmani/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/llms/ollama/completion/transformation.py", line 322, in transform_request ) → dict: File "/Users/skasmani/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/litellm_core_utils/prompt_templates/factory.py", line 229, in ollama_pt ## MERGE CONSECUTIVE ASSISTANT CONTENT ## ^^^^^^^^^^^^^^^ IndexError: list index out of range

An unknown error occurred. Please check the details below. Error details: litellm.APIConnectionError: list index out of range Traceback (most recent call last): File “/Users/skasmani/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/main.py”, line 2870, in completion ) File “/Users/skasmani/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/llms/custom_httpx/llm_http_handler.py”, line 269, in completion client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File “/Users/skasmani/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/llms/ollama/completion/transformation.py”, line 322, in transform_request ) → dict: File “/Users/skasmani/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/litellm_core_utils/prompt_templates/factory.py”, line 229, in ollama_pt ## MERGE CONSECUTIVE ASSISTANT CONTENT ## ^^^^^^^^^^^^^^^ IndexError: list index out of range

:rocket: Crew: crew └── :clipboard: Task: 67fda6a4-409b-435f-b78c-e0437668298d Assigned to: Image Description Specialist Status: :cross_mark: Failed └── :robot: Agent: Image Description Specialist Status: In Progress └── :cross_mark: LLM Failed

╭───────────────────────────────────────────────── Task Failure ──────────────────────────────────────────────────╮ │ │ │ Task Failed │ │ Name: 67fda6a4-409b-435f-b78c-e0437668298d │ │ Agent: Image Description Specialist │ │ │ │ │ ╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯

╭───────────────────────────────────────────────── Crew Failure ──────────────────────────────────────────────────╮ │ │ │ Crew Execution Failed │ │ Name: crew │ │ ID: f7f17d79-7610-459b-a5c4-12d0139f9106 │ │ │ │ │ ╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯

--------------------------------------------------------------------------- IndexError Traceback (most recent call last) File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/main.py:2870, in completion(model, messages, timeout, temperature, top_p, n, stream, stream_options, stop, max_completion_tokens, max_tokens, modalities, prediction, audio, presence_penalty, frequency_penalty, logit_bias, user, reasoning_effort, response_format, seed, tools, tool_choice, logprobs, top_logprobs, parallel_tool_calls, deployment_id, extra_headers, functions, function_call, base_url, api_version, api_key, model_list, thinking, **kwargs) 2864 api_base = ( 2865 litellm.api_base 2866 or api_base 2867 or get_secret(“OLLAMA_API_BASE”) 2868 or “http://localhost:11434” 2869 ) → 2870 response = base_llm_http_handler.completion( 2871 model=model, 2872 stream=stream, 2873 messages=messages, 2874 acompletion=acompletion, 2875 api_base=api_base, 2876 model_response=model_response, 2877 optional_params=optional_params, 2878 litellm_params=litellm_params, 2879 custom_llm_provider=“ollama”, 2880 timeout=timeout, 2881 headers=headers, 2882 encoding=encoding, 2883 api_key=api_key, 2884 logging_obj=logging, # model call logging done inside the class as we make need to modify I/O to fit aleph alpha’s requirements 2885 client=client, 2886 ) 2888 elif custom_llm_provider == “ollama_chat”: File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/llms/custom_httpx/llm_http_handler.py:269, in BaseLLMHTTPHandler.completion(self, model, messages, api_base, custom_llm_provider, model_response, encoding, logging_obj, optional_params, timeout, litellm_params, acompletion, stream, fake_stream, api_key, headers, client, provider_config) 260 api_base = provider_config.get_complete_url( 261 api_base=api_base, 262 api_key=api_key, (…) 266 litellm_params=litellm_params, 267 ) → 269 data = provider_config.transform_request( 270 model=model, 271 messages=messages, 272 optional_params=optional_params, 273 litellm_params=litellm_params, 274 headers=headers, 275 ) 277 if extra_body is not None: File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/llms/ollama/completion/transformation.py:322, in OllamaConfig.transform_request(self, model, messages, optional_params, litellm_params, headers) 321 else: # handle [/chat/completions](http://localhost:8888/chat/completions) requests → 322 modified_prompt = ollama_pt(model=model, messages=messages) 323 if isinstance(modified_prompt, dict): File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/litellm_core_utils/prompt_templates/factory.py:229, in ollama_pt(model, messages) 227 msg_i += 1 → 229 tool_calls = messages[msg_i].get(“tool_calls”) 230 ollama_tool_calls = IndexError: list index out of range During handling of the above exception, another exception occurred: APIConnectionError Traceback (most recent call last) Cell In[57], line 89 87 IMAGE_FILE_PATH = “/Users/skasmani/Desktop/x1.png” 88 if os.path.exists(IMAGE_FILE_PATH): —> 89 run_crew(IMAGE_FILE_PATH) 90 else: 91 print(f"ERROR: The image file was not found at the specified path: {IMAGE_FILE_PATH}“) Cell In[57], line 79, in run_crew(image_path) 71 image_crew = Crew( 72 agents=[image_describer], 73 tasks=[describe_task], 74 process=Process.sequential, 75 verbose=True 76 ) 78 print(“:rocket: Kicking off the Crew with a FORCED HALT configuration…”) —> 79 result = image_crew.kickoff(inputs={‘image_path’: image_path}) 80 print(”\n\n########################“) 81 print(”## Crew Analysis Complete!“) File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/crewai/crew.py:649, in Crew.kickoff(self, inputs) 646 metrics: List[UsageMetrics] = 648 if self.process == Process.sequential: → 649 result = self._run_sequential_process() 650 elif self.process == Process.hierarchical: 651 result = self._run_hierarchical_process() File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/crewai/crew.py:761, in Crew._run_sequential_process(self) 759 def _run_sequential_process(self) → CrewOutput: 760 “”“Executes tasks sequentially and returns the final output.””" → 761 return self._execute_tasks(self.tasks) File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/crewai/crew.py:864, in Crew._execute_tasks(self, tasks, start_index, was_replayed) 861 futures.clear() 863 context = self._get_context(task, task_outputs) → 864 task_output = task.execute_sync( 865 agent=agent_to_use, 866 context=context, 867 tools=cast(List[BaseTool], tools_for_task), 868 ) 869 task_outputs.append(task_output) 870 self._process_task_result(task, task_output) File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/crewai/task.py:347, in Task.execute_sync(self, agent, context, tools) 340 def execute_sync( 341 self, 342 agent: Optional[BaseAgent] = None, 343 context: Optional[str] = None, 344 tools: Optional[List[BaseTool]] = None, 345 ) → TaskOutput: 346 “”“Execute the task synchronously.”“” → 347 return self._execute_core(agent, context, tools) File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/crewai/task.py:491, in Task._execute_core(self, agent, context, tools) 489 self.end_time = datetime.datetime.now() 490 crewai_event_bus.emit(self, TaskFailedEvent(error=str(e), task=self)) → 491 raise e File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/crewai/task.py:411, in Task._execute_core(self, agent, context, tools) 409 self.processed_by_agents.add(agent.role) 410 crewai_event_bus.emit(self, TaskStartedEvent(context=context, task=self)) → 411 result = agent.execute_task( 412 task=self, 413 context=context, 414 tools=tools, 415 ) 417 pydantic_output, json_output = self._export_output(result) 418 task_output = TaskOutput( 419 name=self.name, 420 description=self.description, (…) 426 output_format=self._get_output_format(), 427 ) File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/crewai/agent.py:319, in Agent.execute_task(self, task, context, tools) 309 if e.class.module.startswith(“litellm”): 310 # Do not retry on litellm errors 311 crewai_event_bus.emit( 312 self, 313 event=AgentExecutionErrorEvent( (…) 317 ), 318 ) → 319 raise e 320 self._times_executed += 1 321 if self._times_executed > self.max_retry_limit: File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/crewai/agent.py:295, in Agent.execute_task(self, task, context, tools) 293 result = self._execute_with_timeout(task_prompt, task, self.max_execution_time) 294 else: → 295 result = self._execute_without_timeout(task_prompt, task) 297 except TimeoutError as e: 298 # Propagate TimeoutError without retry 299 crewai_event_bus.emit( 300 self, 301 event=AgentExecutionErrorEvent( (…) 305 ), 306 ) File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/crewai/agent.py:399, in Agent._execute_without_timeout(self, task_prompt, task) 385 def _execute_without_timeout( 386 self, 387 task_prompt: str, 388 task: Task 389 ) → str: 390 “”“Execute a task without a timeout. 391 392 Args: (…) 397 The output of the agent. 398 “”” → 399 return self.agent_executor.invoke( 400 { 401 “input”: task_prompt, 402 “tool_names”: self.agent_executor.tools_names, 403 “tools”: self.agent_executor.tools_description, 404 “ask_for_human_input”: task.human_input, 405 } 406 )[“output”] File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py:123, in CrewAgentExecutor.invoke(self, inputs) 120 handle_unknown_error(self._printer, e) 121 if e.class.module.startswith(“litellm”): 122 # Do not retry on litellm errors → 123 raise e 124 else: 125 raise e File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py:112, in CrewAgentExecutor.invoke(self, inputs) 109 self.ask_for_human_input = bool(inputs.get(“ask_for_human_input”, False)) 111 try: → 112 formatted_answer = self._invoke_loop() 113 except AssertionError: 114 self._printer.print( 115 content=“Agent failed to reach a final answer. This is likely a bug - please report it.”, 116 color=“red”, 117 ) File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py:208, in CrewAgentExecutor._invoke_loop(self) 205 except Exception as e: 206 if e.class.module.startswith(“litellm”): 207 # Do not retry on litellm errors → 208 raise e 209 if is_context_length_exceeded(e): 210 handle_context_length( 211 respect_context_window=self.respect_context_window, 212 printer=self._printer, (…) 216 i18n=self._i18n, 217 ) File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/crewai/agents/crew_agent_executor.py:144, in _invoke_loop(self) 140 try: 141 if has_reached_max_iterations(self.iterations, self.max_iter): 142 formatted_answer = handle_max_iterations_exceeded( 143 formatted_answer, → 144 printer=self._printer, 145 i18n=self._i18n, 146 messages=self.messages, 147 llm=self.llm, 148 callbacks=self.callbacks, 149 ) 151 enforce_rpm_limit(self.request_within_rpm_limit) 153 answer = get_llm_response( 154 llm=self.llm, 155 messages=self.messages, 156 callbacks=self.callbacks, 157 printer=self._printer, 158 ) File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/crewai/utilities/agent_utils.py:96, in handle_max_iterations_exceeded(formatted_answer, printer, i18n, messages, llm, callbacks) 0 <Error retrieving source code with stack_data see ipython/ipython#13598> File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/crewai/llm.py:888, in LLM.call(self, messages, tools, callbacks, available_functions) 884 return self._handle_streaming_response( 885 params, callbacks, available_functions 886 ) 887 else: → 888 return self._handle_non_streaming_response( 889 params, callbacks, available_functions 890 ) 892 except LLMContextLengthExceededException: 893 # Re-raise LLMContextLengthExceededException as it should be handled 894 # by the CrewAgentExecutor._invoke_loop method, which can then decide 895 # whether to summarize the content or abort based on the respect_context_window flag 896 raise File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/crewai/llm.py:727, in LLM._handle_non_streaming_response(self, params, callbacks, available_functions) 721 # — 1) Make the completion call 722 try: 723 # Attempt to make the completion call, but catch context window errors 724 # and convert them to our own exception type for consistent handling 725 # across the codebase. This allows CrewAgentExecutor to handle context 726 # length issues appropriately. → 727 response = litellm.completion(**params) 728 except ContextWindowExceededError as e: 729 # Convert litellm’s context window error to our own exception type 730 # for consistent handling in the rest of the codebase 731 raise LLMContextLengthExceededException(str(e)) File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/utils.py:1247, in client..wrapper(*args, **kwargs) 1243 if logging_obj: 1244 logging_obj.failure_handler( 1245 e, traceback_exception, start_time, end_time 1246 ) # DO NOT MAKE THREADED - router retry fallback relies on this! → 1247 raise e File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/utils.py:1125, in client..wrapper(*args, **kwargs) 1123 print_verbose(f"Error while checking max token limit: {str(e)}“) 1124 # MODEL CALL → 1125 result = original_function(*args, **kwargs) 1126 end_time = datetime.datetime.now() 1127 if “stream” in kwargs and kwargs[“stream”] is True: File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/main.py:3182, in completion(model, messages, timeout, temperature, top_p, n, stream, stream_options, stop, max_completion_tokens, max_tokens, modalities, prediction, audio, presence_penalty, frequency_penalty, logit_bias, user, reasoning_effort, response_format, seed, tools, tool_choice, logprobs, top_logprobs, parallel_tool_calls, deployment_id, extra_headers, functions, function_call, base_url, api_version, api_key, model_list, thinking, **kwargs) 3179 return response 3180 except Exception as e: 3181 ## Map to OpenAI Exception → 3182 raise exception_type( 3183 model=model, 3184 custom_llm_provider=custom_llm_provider, 3185 original_exception=e, 3186 completion_kwargs=args, 3187 extra_kwargs=kwargs, 3188 ) File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py:2214, in exception_type(model, original_exception, custom_llm_provider, completion_kwargs, extra_kwargs) 2212 if exception_mapping_worked: 2213 setattr(e, “litellm_response_headers”, litellm_response_headers) → 2214 raise e 2215 else: 2216 for error_type in litellm.LITELLM_EXCEPTION_TYPES: File ~/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py:2190, in exception_type(model, original_exception, custom_llm_provider, completion_kwargs, extra_kwargs) 2183 raise APIConnectionError( 2184 message=”{} - {}“.format(exception_provider, error_str), 2185 llm_provider=custom_llm_provider, 2186 model=model, 2187 request=original_exception.request, 2188 ) 2189 else: → 2190 raise APIConnectionError( 2191 message=”{}\n{}".format( 2192 str(original_exception), traceback.format_exc() 2193 ), 2194 llm_provider=custom_llm_provider, 2195 model=model, 2196 request=httpx.Request( 2197 method=“POST”, url=“https://api.openai.com/v1/” 2198 ), # stub the request 2199 ) 2200 except Exception as e: 2201 # LOGGING 2202 exception_logging( 2203 logger_fn=None, 2204 additional_args={ (…) 2208 exception=e, 2209 ) APIConnectionError: litellm.APIConnectionError: list index out of range Traceback (most recent call last): File "/Users/skasmani/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/main.py", line 2870, in completion ) File "/Users/skasmani/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/llms/custom_httpx/llm_http_handler.py", line 269, in completion client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/skasmani/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/llms/ollama/completion/transformation.py", line 322, in transform_request ) → dict: File "/Users/skasmani/Downloads/personal/github/CrewAI-Worksatation/crewai-venv/lib/python3.12/site-packages/litellm/litellm_core_utils/prompt_templates/factory.py", line 229, in ollama_pt ## MERGE CONSECUTIVE ASSISTANT CONTENT ## ^^^^^^^^^^^^^^^ IndexError: list index out of range