From 77be3e3114d62c4b1af9f90ba887b6ac62d48976 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 30 Dec 2023 11:46:51 +0530 Subject: [PATCH] fix(main.py): don't set timeout as an optional api param --- litellm/llms/openai.py | 1 + litellm/main.py | 1 - litellm/tests/test_async_fn.py | 2 +- litellm/utils.py | 35 +++++++--------------------------- 4 files changed, 9 insertions(+), 30 deletions(-) diff --git a/litellm/llms/openai.py b/litellm/llms/openai.py index b1947bad2..cc2a3889a 100644 --- a/litellm/llms/openai.py +++ b/litellm/llms/openai.py @@ -382,6 +382,7 @@ class OpenAIChatCompletion(BaseLLM): "complete_input_dict": data, }, ) + response = await openai_aclient.chat.completions.create(**data) stringified_response = response.model_dump_json() logging_obj.post_call( diff --git a/litellm/main.py b/litellm/main.py index 131110481..3b13e717a 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -574,7 +574,6 @@ def completion( max_retries=max_retries, logprobs=logprobs, top_logprobs=top_logprobs, - timeout=timeout, **non_default_params, ) diff --git a/litellm/tests/test_async_fn.py b/litellm/tests/test_async_fn.py index 81eecf3de..ecc862735 100644 --- a/litellm/tests/test_async_fn.py +++ b/litellm/tests/test_async_fn.py @@ -195,7 +195,7 @@ def test_get_cloudflare_response_streaming(): asyncio.run(test_async_call()) -test_get_cloudflare_response_streaming() +# test_get_cloudflare_response_streaming() def test_get_response_streaming(): diff --git a/litellm/utils.py b/litellm/utils.py index 5b0fe8b1d..afd544c6b 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -2910,7 +2910,6 @@ def get_optional_params( max_retries=None, logprobs=None, top_logprobs=None, - timeout=None, **kwargs, ): # retrieve all parameters passed to the function @@ -2940,7 +2939,6 @@ def get_optional_params( "max_retries": None, "logprobs": None, "top_logprobs": None, - "timeout": 600, } # filter out those parameters that were passed with non-default values non_default_params = { @@ -3734,7 +3732,6 @@ def get_optional_params( "max_retries", "logprobs", "top_logprobs", - "timeout", ] _check_valid_arg(supported_params=supported_params) if functions is not None: @@ -3775,8 +3772,6 @@ def get_optional_params( optional_params["logprobs"] = logprobs if top_logprobs is not None: optional_params["top_logprobs"] = top_logprobs - if timeout is not None: - optional_params["timeout"] = timeout # if user passed in non-default kwargs for specific providers/models, pass them along for k in passed_params.keys(): if k not in default_params.keys(): @@ -6539,12 +6534,14 @@ class CustomStreamWrapper: self.special_tokens = ["<|assistant|>", "<|system|>", "<|user|>", "", ""] self.holding_chunk = "" self.complete_response = "" - self._hidden_params = { - "model_id": ( - self.logging_obj.model_call_details.get("litellm_params", {}) - .get("model_info", {}) - .get("id", None) + _model_info = ( + self.logging_obj.model_call_details.get("litellm_params", {}).get( + "model_info", {} ) + or {} + ) + self._hidden_params = { + "model_id": (_model_info.get("id", None)) } # returned as x-litellm-model-id response header in proxy def __iter__(self): @@ -7437,14 +7434,6 @@ class CustomStreamWrapper: target=self.logging_obj.success_handler, args=(response,) ).start() # log response # RETURN RESULT - if hasattr(response, "_hidden_params"): - response._hidden_params["model_id"] = ( - self.logging_obj.model_call_details.get( - "litellm_params", {} - ) - .get("model_info", {}) - .get("id", None) - ) return response except StopIteration: raise # Re-raise StopIteration @@ -7495,16 +7484,6 @@ class CustomStreamWrapper: processed_chunk, ) ) - # RETURN RESULT - if hasattr(processed_chunk, "_hidden_params"): - model_id = ( - self.logging_obj.model_call_details.get( - "litellm_params", {} - ) - .get("model_info", {}) - .get("id", None) - ) - processed_chunk._hidden_params["model_id"] = model_id return processed_chunk raise StopAsyncIteration else: # temporary patch for non-aiohttp async calls