diff --git a/litellm/llms/openai.py b/litellm/llms/openai.py index 7d14fa450..32e63b957 100644 --- a/litellm/llms/openai.py +++ b/litellm/llms/openai.py @@ -678,17 +678,17 @@ class OpenAIChatCompletion(BaseLLM): if headers: optional_params["extra_headers"] = headers if model is None or messages is None: - raise OpenAIError(status_code=422, message=f"Missing model or messages") + raise OpenAIError(status_code=422, message="Missing model or messages") if not isinstance(timeout, float) and not isinstance( timeout, httpx.Timeout ): raise OpenAIError( status_code=422, - message=f"Timeout needs to be a float or httpx.Timeout", + message="Timeout needs to be a float or httpx.Timeout", ) - if custom_llm_provider != "openai": + if custom_llm_provider is not None and custom_llm_provider != "openai": model_response.model = f"{custom_llm_provider}/{model}" # process all OpenAI compatible provider logic here if custom_llm_provider == "mistral": diff --git a/litellm/main.py b/litellm/main.py index 69ce61fab..9945e1b95 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -1828,6 +1828,7 @@ def completion( logging_obj=logging, acompletion=acompletion, timeout=timeout, # type: ignore + custom_llm_provider="openrouter", ) ## LOGGING logging.post_call(