Merge branch 'main' into litellm_response_cost_headers

This commit is contained in:
Krish Dholakia 2024-06-27 21:33:09 -07:00 committed by GitHub
commit 869275585a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
124 changed files with 3705 additions and 150 deletions

View file

@ -2435,6 +2435,7 @@ def get_optional_params(
and custom_llm_provider != "together_ai"
and custom_llm_provider != "groq"
and custom_llm_provider != "nvidia_nim"
and custom_llm_provider != "volcengine"
and custom_llm_provider != "deepseek"
and custom_llm_provider != "codestral"
and custom_llm_provider != "mistral"
@ -3111,6 +3112,17 @@ def get_optional_params(
optional_params=optional_params,
model=model,
)
elif custom_llm_provider == "volcengine":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.VolcEngineConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
model=model,
)
elif custom_llm_provider == "groq":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
@ -3681,6 +3693,8 @@ def get_supported_openai_params(
return litellm.FireworksAIConfig().get_supported_openai_params()
elif custom_llm_provider == "nvidia_nim":
return litellm.NvidiaNimConfig().get_supported_openai_params()
elif custom_llm_provider == "volcengine":
return litellm.VolcEngineConfig().get_supported_openai_params(model=model)
elif custom_llm_provider == "groq":
return [
"temperature",
@ -3692,6 +3706,8 @@ def get_supported_openai_params(
"tool_choice",
"response_format",
"seed",
"extra_headers",
"extra_body",
]
elif custom_llm_provider == "deepseek":
return [
@ -4045,6 +4061,10 @@ def get_llm_provider(
# nvidia_nim is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.endpoints.anyscale.com/v1
api_base = "https://integrate.api.nvidia.com/v1"
dynamic_api_key = get_secret("NVIDIA_NIM_API_KEY")
elif custom_llm_provider == "volcengine":
# volcengine is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.endpoints.anyscale.com/v1
api_base = "https://ark.cn-beijing.volces.com/api/v3"
dynamic_api_key = get_secret("VOLCENGINE_API_KEY")
elif custom_llm_provider == "codestral":
# codestral is openai compatible, we just need to set this to custom_openai and have the api_base be https://codestral.mistral.ai/v1
api_base = "https://codestral.mistral.ai/v1"
@ -4967,6 +4987,11 @@ def validate_environment(model: Optional[str] = None) -> dict:
keys_in_environment = True
else:
missing_keys.append("NVIDIA_NIM_API_KEY")
elif custom_llm_provider == "volcengine":
if "VOLCENGINE_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("VOLCENGINE_API_KEY")
elif (
custom_llm_provider == "codestral"
or custom_llm_provider == "text-completion-codestral"
@ -7802,6 +7827,7 @@ class CustomStreamWrapper:
"<s>",
"</s>",
"<|im_end|>",
"<|im_start|>",
]
self.holding_chunk = ""
self.complete_response = ""