fix(main.py): pass in openrouter as custom provider for openai client call

Fixes https://github.com/BerriAI/litellm/issues/4414
This commit is contained in:
Krrish Dholakia 2024-06-28 21:26:22 -07:00
parent d0c89ddbe3
commit d10912beeb
2 changed files with 4 additions and 3 deletions

View file

@ -678,17 +678,17 @@ class OpenAIChatCompletion(BaseLLM):
if headers: if headers:
optional_params["extra_headers"] = headers optional_params["extra_headers"] = headers
if model is None or messages is None: if model is None or messages is None:
raise OpenAIError(status_code=422, message=f"Missing model or messages") raise OpenAIError(status_code=422, message="Missing model or messages")
if not isinstance(timeout, float) and not isinstance( if not isinstance(timeout, float) and not isinstance(
timeout, httpx.Timeout timeout, httpx.Timeout
): ):
raise OpenAIError( raise OpenAIError(
status_code=422, status_code=422,
message=f"Timeout needs to be a float or httpx.Timeout", message="Timeout needs to be a float or httpx.Timeout",
) )
if custom_llm_provider != "openai": if custom_llm_provider is not None and custom_llm_provider != "openai":
model_response.model = f"{custom_llm_provider}/{model}" model_response.model = f"{custom_llm_provider}/{model}"
# process all OpenAI compatible provider logic here # process all OpenAI compatible provider logic here
if custom_llm_provider == "mistral": if custom_llm_provider == "mistral":

View file

@ -1828,6 +1828,7 @@ def completion(
logging_obj=logging, logging_obj=logging,
acompletion=acompletion, acompletion=acompletion,
timeout=timeout, # type: ignore timeout=timeout, # type: ignore
custom_llm_provider="openrouter",
) )
## LOGGING ## LOGGING
logging.post_call( logging.post_call(