Crew error when instantiating agent with local nvidia nim llama model

class nvllm(LLM):
def init(
self,
llm: ChatNVIDIA,
model_str: str,
timeout: Optional[Union[float, int]] = None,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
n: Optional[int] = None,
stop: Optional[Union[str, List[str]]] = None,
max_completion_tokens: Optional[int] = None,
max_tokens: Optional[int] = None,
presence_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
logit_bias: Optional[Dict[int, float]] = None,
response_format: Optional[Dict[str, Any]] = None,
seed: Optional[int] = None,
logprobs: Optional[bool] = None,
top_logprobs: Optional[int] = None,
base_url: Optional[str] = None,
api_version: Optional[str] = None,
api_key: Optional[str] = None,
callbacks: List[Any] = None,
**kwargs,
):
self.model = model_str
self.timeout = timeout
self.temperature = temperature
self.top_p = top_p
self.n = n
self.stop = stop
self.max_completion_tokens = max_completion_tokens
self.max_tokens = max_tokens
self.presence_penalty = presence_penalty
self.frequency_penalty = frequency_penalty
self.logit_bias = logit_bias
self.response_format = response_format
self.seed = seed
self.logprobs = logprobs
self.top_logprobs = top_logprobs
self.base_url = base_url
self.api_version = api_version
self.api_key = api_key
self.callbacks = callbacks
self.kwargs = kwargs
self.llm = llm

    if callbacks is None:
        self.callbacks = callbacks = []

    self.set_callbacks(callbacks)

def call(self, messages: List[Dict[str, str]], callbacks: List[Any] = None) -> str:
    if callbacks is None:
        callbacks = []
    if callbacks and len(callbacks) > 0:
        self.set_callbacks(callbacks)

    try:
        params = {
            "model": self.llm.model,
            "input": messages,
            "timeout": self.timeout,
            "temperature": self.temperature,
            "top_p": self.top_p,
            "n": self.n,
            "stop": self.stop,
            "max_tokens": self.max_tokens or self.max_completion_tokens,
            "presence_penalty": self.presence_penalty,
            "frequency_penalty": self.frequency_penalty,
            "logit_bias": self.logit_bias,
            "response_format": self.response_format,
            "seed": self.seed,
            "logprobs": self.logprobs,
            "top_logprobs": self.top_logprobs,
            "api_key": self.api_key,
            **self.kwargs,
        }

        response = self.llm.invoke(**params)
        return response.content
    except Exception as e:
        if not LLMContextLengthExceededException(str(e))._is_context_limit_error(
            str(e)
        ):
            logging.error(f"LiteLLM call failed: {str(e)}")

        raise  # Re-raise the exception after logging

def set_callbacks(self, callbacks: List[Any]):
    callback_types = [type(callback) for callback in callbacks]
    for callback in litellm.success_callback[:]:
        if type(callback) in callback_types:
            litellm.success_callback.remove(callback)

    for callback in litellm._async_success_callback[:]:
        if type(callback) in callback_types:
            litellm._async_success_callback.remove(callback)

    litellm.callbacks = callbacks

llm = ChatNVIDIA(model=“meta/llama-3.1-8b-instruct”, base_url=scalableai_constants.NVIDIA_CHATBOT_MODEL_ENDPOINT, verify=scalableai_constants.SCALABLEAI_SECURE_SSL_KEY_CHAIN, api_key=scalableai_constants.NVIDIA_CHATBOT_MODEL_TOKEN)

default_llm = nvllm(model_str=“nvidia_nim/meta/llama-3.1-8b-instruct”, llm=llm)

Define retriever agent - This agent will construct the response to user query based on information received from vector store

Retriever_Agent = Agent(
    role="RAG Specialist",
    goal="Provide accurate answers using retrieval-augmented generation",
    backstory="Expert in information retrieval and answer synthesis",
    verbose=True,
    allow_delegation=False,
    llm=default_llm,
    tools=[advanced_rag_retrieval]
)

I get following error
Traceback (most recent call last):
File “/home/ashish/venv310/lib/python3.10/site-packages/uvicorn/protocols/http/httptools_impl.py”, line 409, in run_asgi
result = await app( # type: ignore[func-returns-value]
File “/home/ashish/venv310/lib/python3.10/site-packages/uvicorn/middleware/proxy_headers.py”, line 60, in call
return await self.app(scope, receive, send)
File “/home/ashish/venv310/lib/python3.10/site-packages/fastapi/applications.py”, line 1106, in call
await super().call(scope, receive, send)
File “/home/ashish/venv310/lib/python3.10/site-packages/starlette/applications.py”, line 122, in call
await self.middleware_stack(scope, receive, send)
File “/home/ashish/venv310/lib/python3.10/site-packages/starlette/middleware/errors.py”, line 184, in call
raise exc
File “/home/ashish/venv310/lib/python3.10/site-packages/starlette/middleware/errors.py”, line 162, in call
await self.app(scope, receive, _send)
File “/home/ashish/venv310/lib/python3.10/site-packages/starlette/middleware/exceptions.py”, line 79, in call
raise exc
File “/home/ashish/venv310/lib/python3.10/site-packages/starlette/middleware/exceptions.py”, line 68, in call
await self.app(scope, receive, sender)
File “/home/ashish/venv310/lib/python3.10/site-packages/fastapi/middleware/asyncexitstack.py”, line 20, in call
raise e
File “/home/ashish/venv310/lib/python3.10/site-packages/fastapi/middleware/asyncexitstack.py”, line 17, in call
await self.app(scope, receive, send)
File “/home/ashish/venv310/lib/python3.10/site-packages/starlette/routing.py”, line 718, in call
await route.handle(scope, receive, send)
File “/home/ashish/venv310/lib/python3.10/site-packages/starlette/routing.py”, line 276, in handle
await self.app(scope, receive, send)
File “/home/ashish/venv310/lib/python3.10/site-packages/starlette/routing.py”, line 66, in app
response = await func(request)
File “/home/ashish/venv310/lib/python3.10/site-packages/fastapi/routing.py”, line 274, in app
raw_response = await run_endpoint_function(
File “/home/ashish/venv310/lib/python3.10/site-packages/fastapi/routing.py”, line 191, in run_endpoint_function
return await dependant.call(**values)
File “/home/ashish/conversational-ai-4.0/chatbot_v2/scalableai_app_chat.py”, line 233, in kickoff_crew_endpoint
result = kickoff_crew(input)
File “/home/ashish/conversational-ai-4.0/chatbot_v2/scalableai_app_chat.py”, line 220, in kickoff_crew
crew = create_crew(inputs)
File “/home/ashish/conversational-ai-4.0/chatbot_v2/scalableai_app_chat.py”, line 186, in create_crew
Retriever_Agent = Agent(
File “/home/ashish/venv310/lib/python3.10/site-packages/pydantic/main.py”, line 214, in init
validated_self = self.pydantic_validator.validate_python(data, self_instance=self)
File “/home/ashish/venv310/lib/python3.10/site-packages/crewai/agent.py”, line 141, in post_init_setup
self._setup_agent_executor()
File “/home/ashish/venv310/lib/python3.10/site-packages/crewai/agent.py”, line 151, in _setup_agent_executor
self.set_cache_handler(self.cache_handler)
File “/home/ashish/venv310/lib/python3.10/site-packages/crewai/agents/agent_builder/base_agent.py”, line 339, in set_cache_handler
self.create_agent_executor()
File “/home/ashish/venv310/lib/python3.10/site-packages/crewai/agent.py”, line 305, in create_agent_executor
self.agent_executor = CrewAgentExecutor(
File “/home/ashish/venv310/lib/python3.10/site-packages/crewai/agents/crew_agent_executor.py”, line 85, in init
self.llm.stop = list(set(self.llm.stop + self.stop))
TypeError: unsupported operand type(s) for +: ‘NoneType’ and ‘list’