forked from phoenix/litellm-mirror
fix: fix linting errors
This commit is contained in:
parent
415abc86c6
commit
18731cf42b
4 changed files with 16 additions and 12 deletions
|
@ -1,12 +1,12 @@
|
||||||
repos:
|
repos:
|
||||||
- repo: local
|
- repo: local
|
||||||
hooks:
|
hooks:
|
||||||
# - id: mypy
|
- id: mypy
|
||||||
# name: mypy
|
name: mypy
|
||||||
# entry: python3 -m mypy --ignore-missing-imports
|
entry: python3 -m mypy --ignore-missing-imports
|
||||||
# language: system
|
language: system
|
||||||
# types: [python]
|
types: [python]
|
||||||
# files: ^litellm/
|
files: ^litellm/
|
||||||
- id: isort
|
- id: isort
|
||||||
name: isort
|
name: isort
|
||||||
entry: isort
|
entry: isort
|
||||||
|
|
|
@ -854,7 +854,6 @@ class AzureChatCompletion(BaseLLM):
|
||||||
additional_args={"complete_input_dict": data},
|
additional_args={"complete_input_dict": data},
|
||||||
original_response=str(e),
|
original_response=str(e),
|
||||||
)
|
)
|
||||||
exception_mapping_worked = True
|
|
||||||
raise e
|
raise e
|
||||||
except asyncio.CancelledError as e:
|
except asyncio.CancelledError as e:
|
||||||
## LOGGING
|
## LOGGING
|
||||||
|
@ -1029,9 +1028,10 @@ class AzureChatCompletion(BaseLLM):
|
||||||
openai_aclient = AsyncAzureOpenAI(**azure_client_params)
|
openai_aclient = AsyncAzureOpenAI(**azure_client_params)
|
||||||
else:
|
else:
|
||||||
openai_aclient = client
|
openai_aclient = client
|
||||||
response = await openai_aclient.embeddings.with_raw_response.create(
|
raw_response = await openai_aclient.embeddings.with_raw_response.create(
|
||||||
**data, timeout=timeout
|
**data, timeout=timeout
|
||||||
)
|
)
|
||||||
|
response = raw_response.parse()
|
||||||
stringified_response = response.model_dump()
|
stringified_response = response.model_dump()
|
||||||
## LOGGING
|
## LOGGING
|
||||||
logging_obj.post_call(
|
logging_obj.post_call(
|
||||||
|
|
|
@ -2027,8 +2027,8 @@ class OpenAITextCompletion(BaseLLM):
|
||||||
else:
|
else:
|
||||||
openai_client = client
|
openai_client = client
|
||||||
|
|
||||||
response = openai_client.completions.with_raw_response.create(**data) # type: ignore
|
raw_response = openai_client.completions.with_raw_response.create(**data) # type: ignore
|
||||||
|
response = raw_response.parse()
|
||||||
response_json = response.model_dump()
|
response_json = response.model_dump()
|
||||||
|
|
||||||
## LOGGING
|
## LOGGING
|
||||||
|
@ -2075,8 +2075,12 @@ class OpenAITextCompletion(BaseLLM):
|
||||||
else:
|
else:
|
||||||
openai_aclient = client
|
openai_aclient = client
|
||||||
|
|
||||||
response = await openai_aclient.completions.with_raw_response.create(**data)
|
raw_response = await openai_aclient.completions.with_raw_response.create(
|
||||||
|
**data
|
||||||
|
)
|
||||||
|
response = raw_response.parse()
|
||||||
response_json = response.model_dump()
|
response_json = response.model_dump()
|
||||||
|
|
||||||
## LOGGING
|
## LOGGING
|
||||||
logging_obj.post_call(
|
logging_obj.post_call(
|
||||||
input=prompt,
|
input=prompt,
|
||||||
|
|
|
@ -855,7 +855,7 @@ def _pre_call_utils(
|
||||||
):
|
):
|
||||||
if call_type == "embedding":
|
if call_type == "embedding":
|
||||||
data["input"] = "Hello world!"
|
data["input"] = "Hello world!"
|
||||||
mapped_target = client.embeddings.with_raw_response
|
mapped_target: Any = client.embeddings.with_raw_response
|
||||||
if sync_mode:
|
if sync_mode:
|
||||||
original_function = litellm.embedding
|
original_function = litellm.embedding
|
||||||
else:
|
else:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue