(feat) proxy - support dynamic timeout per request

This commit is contained in:
ishaan-jaff 2023-12-30 10:55:42 +05:30
parent 459ba5b45e
commit 2f4cd3b569
2 changed files with 22 additions and 10 deletions

View file

@ -547,10 +547,6 @@ def completion(
model_api_key = get_api_key(
llm_provider=custom_llm_provider, dynamic_api_key=api_key
) # get the api key from the environment if required for the model
if model_api_key and "sk-litellm" in model_api_key:
api_base = "https://proxy.litellm.ai"
custom_llm_provider = "openai"
api_key = model_api_key
if dynamic_api_key is not None:
api_key = dynamic_api_key
@ -578,6 +574,7 @@ def completion(
max_retries=max_retries,
logprobs=logprobs,
top_logprobs=top_logprobs,
timeout=timeout,
**non_default_params,
)