add support for litellm proxy calls

This commit is contained in:
Krrish Dholakia 2023-09-18 12:15:19 -07:00
parent 0f88b82c4f
commit 9067ec3b43
7 changed files with 110 additions and 16 deletions

View file

@ -18,6 +18,7 @@ from litellm.utils import (
read_config_args,
completion_with_fallbacks,
get_llm_provider,
get_api_key,
mock_completion_streaming_obj
)
from .llms import anthropic
@ -233,6 +234,10 @@ def completion(
custom_llm_provider = model.split("/", 1)[0]
model = model.split("/", 1)[1]
model, custom_llm_provider = get_llm_provider(model=model, custom_llm_provider=custom_llm_provider)
model_api_key = get_api_key(llm_provider=custom_llm_provider, dynamic_api_key=api_key) # get the api key from the environment if required for the model
if model_api_key and "sk-litellm" in model_api_key:
api_base = "https://proxy.litellm.ai"
custom_llm_provider = "openai"
# check if user passed in any of the OpenAI optional params
optional_params = get_optional_params(
functions=functions,