From 18731cf42b38f1e6a51c66e121ddf2b287b6d04b Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 27 Aug 2024 12:14:23 -0700 Subject: [PATCH] fix: fix linting errors --- .pre-commit-config.yaml | 12 ++++++------ litellm/llms/azure.py | 4 ++-- litellm/llms/openai.py | 10 +++++++--- litellm/tests/test_exceptions.py | 2 +- 4 files changed, 16 insertions(+), 12 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d429bc6b8..a33473b72 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,12 +1,12 @@ repos: - repo: local hooks: - # - id: mypy - # name: mypy - # entry: python3 -m mypy --ignore-missing-imports - # language: system - # types: [python] - # files: ^litellm/ + - id: mypy + name: mypy + entry: python3 -m mypy --ignore-missing-imports + language: system + types: [python] + files: ^litellm/ - id: isort name: isort entry: isort diff --git a/litellm/llms/azure.py b/litellm/llms/azure.py index 57bc6f854..f1a882bb2 100644 --- a/litellm/llms/azure.py +++ b/litellm/llms/azure.py @@ -854,7 +854,6 @@ class AzureChatCompletion(BaseLLM): additional_args={"complete_input_dict": data}, original_response=str(e), ) - exception_mapping_worked = True raise e except asyncio.CancelledError as e: ## LOGGING @@ -1029,9 +1028,10 @@ class AzureChatCompletion(BaseLLM): openai_aclient = AsyncAzureOpenAI(**azure_client_params) else: openai_aclient = client - response = await openai_aclient.embeddings.with_raw_response.create( + raw_response = await openai_aclient.embeddings.with_raw_response.create( **data, timeout=timeout ) + response = raw_response.parse() stringified_response = response.model_dump() ## LOGGING logging_obj.post_call( diff --git a/litellm/llms/openai.py b/litellm/llms/openai.py index 25e8c59aa..5c48eef4d 100644 --- a/litellm/llms/openai.py +++ b/litellm/llms/openai.py @@ -2027,8 +2027,8 @@ class OpenAITextCompletion(BaseLLM): else: openai_client = client - response = openai_client.completions.with_raw_response.create(**data) # type: ignore - + raw_response = openai_client.completions.with_raw_response.create(**data) # type: ignore + response = raw_response.parse() response_json = response.model_dump() ## LOGGING @@ -2075,8 +2075,12 @@ class OpenAITextCompletion(BaseLLM): else: openai_aclient = client - response = await openai_aclient.completions.with_raw_response.create(**data) + raw_response = await openai_aclient.completions.with_raw_response.create( + **data + ) + response = raw_response.parse() response_json = response.model_dump() + ## LOGGING logging_obj.post_call( input=prompt, diff --git a/litellm/tests/test_exceptions.py b/litellm/tests/test_exceptions.py index fbc1dd047..2bcc51382 100644 --- a/litellm/tests/test_exceptions.py +++ b/litellm/tests/test_exceptions.py @@ -855,7 +855,7 @@ def _pre_call_utils( ): if call_type == "embedding": data["input"] = "Hello world!" - mapped_target = client.embeddings.with_raw_response + mapped_target: Any = client.embeddings.with_raw_response if sync_mode: original_function = litellm.embedding else: