From efe81032f4268da5cab9fda8ca4467fc1e03528f Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 14 Nov 2023 21:51:39 -0800 Subject: [PATCH] fix: fix linting errors --- litellm/llms/azure.py | 10 ++++++++-- litellm/llms/openai.py | 8 +++++++- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/litellm/llms/azure.py b/litellm/llms/azure.py index fcc70fd6e..9ec140a50 100644 --- a/litellm/llms/azure.py +++ b/litellm/llms/azure.py @@ -171,7 +171,7 @@ class AzureChatCompletion(BaseLLM): ## RESPONSE OBJECT return convert_to_model_response_object(response_object=response_json, model_response_object=model_response) except Exception as e: - if httpx.TimeoutException: + if isinstance(e,httpx.TimeoutException): raise AzureOpenAIError(status_code=500, message="Request Timeout Error") elif response and hasattr(response, "text"): raise AzureOpenAIError(status_code=500, message=f"{str(e)}\n\nOriginal Response: {response.text}") @@ -186,6 +186,8 @@ class AzureChatCompletion(BaseLLM): model_response: ModelResponse, model: str ): + if self._client_session is None: + self._client_session = self.create_client_session() with self._client_session.stream( url=f"{api_base}", json=data, @@ -207,7 +209,9 @@ class AzureChatCompletion(BaseLLM): headers: dict, model_response: ModelResponse, model: str): - client = httpx.AsyncClient() + if self._aclient_session is None: + self._aclient_session = self.create_aclient_session() + client = self._aclient_session async with client.stream( url=f"{api_base}", json=data, @@ -233,6 +237,8 @@ class AzureChatCompletion(BaseLLM): optional_params=None,): super().embedding() exception_mapping_worked = False + if self._client_session is None: + self._client_session = self.create_client_session() try: headers = self.validate_environment(api_key, azure_ad_token=azure_ad_token) # Ensure api_base ends with a trailing slash diff --git a/litellm/llms/openai.py b/litellm/llms/openai.py index fc9ae1631..19ea18ce9 100644 --- a/litellm/llms/openai.py +++ b/litellm/llms/openai.py @@ -275,7 +275,7 @@ class OpenAIChatCompletion(BaseLLM): ## RESPONSE OBJECT return convert_to_model_response_object(response_object=response_json, model_response_object=model_response) except Exception as e: - if httpx.TimeoutException: + if isinstance(e, httpx.TimeoutException): raise OpenAIError(status_code=500, message="Request Timeout Error") if response and hasattr(response, "text"): raise OpenAIError(status_code=500, message=f"{str(e)}\n\nOriginal Response: {response.text}") @@ -290,6 +290,8 @@ class OpenAIChatCompletion(BaseLLM): model_response: ModelResponse, model: str ): + if self._client_session is None: + self._client_session = self.create_client_session() with self._client_session.stream( url=f"{api_base}", # type: ignore json=data, @@ -311,6 +313,8 @@ class OpenAIChatCompletion(BaseLLM): headers: dict, model_response: ModelResponse, model: str): + if self._aclient_session is None: + self._aclient_session = self.create_aclient_session() async with self._aclient_session.stream( url=f"{api_base}", json=data, @@ -334,6 +338,8 @@ class OpenAIChatCompletion(BaseLLM): optional_params=None,): super().embedding() exception_mapping_worked = False + if self._client_session is None: + self._client_session = self.create_client_session() try: headers = self.validate_environment(api_key) api_base = f"{api_base}/embeddings"