build(pyproject.toml): add new dev dependencies - for type checking (#9631)

* build(pyproject.toml): add new dev dependencies - for type checking

* build: reformat files to fit black

* ci: reformat to fit black

* ci(test-litellm.yml): make tests run clear

* build(pyproject.toml): add ruff

* fix: fix ruff checks

* build(mypy/): fix mypy linting errors

* fix(hashicorp_secret_manager.py): fix passing cert for tls auth

* build(mypy/): resolve all mypy errors

* test: update test

* fix: fix black formatting

* build(pre-commit-config.yaml): use poetry run black

* fix(proxy_server.py): fix linting error

* fix: fix ruff safe representation error
This commit is contained in:
Krish Dholakia 2025-03-29 11:02:13 -07:00 committed by GitHub
parent 95e5dfae5a
commit 9b7ebb6a7d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
214 changed files with 1553 additions and 1433 deletions

View file

@ -266,7 +266,6 @@ class OpenAIConfig(BaseConfig):
api_key: Optional[str] = None,
json_mode: Optional[bool] = None,
) -> ModelResponse:
logging_obj.post_call(original_response=raw_response.text)
logging_obj.model_call_details["response_headers"] = raw_response.headers
final_response_obj = cast(
@ -320,7 +319,6 @@ class OpenAIChatCompletionResponseIterator(BaseModelResponseIterator):
class OpenAIChatCompletion(BaseLLM, BaseOpenAILLM):
def __init__(self) -> None:
super().__init__()
@ -513,7 +511,6 @@ class OpenAIChatCompletion(BaseLLM, BaseOpenAILLM):
custom_llm_provider: Optional[str] = None,
drop_params: Optional[bool] = None,
):
super().completion()
try:
fake_stream: bool = False
@ -553,7 +550,6 @@ class OpenAIChatCompletion(BaseLLM, BaseOpenAILLM):
for _ in range(
2
): # if call fails due to alternating messages, retry with reformatted message
if provider_config is not None:
data = provider_config.transform_request(
model=model,
@ -649,13 +645,14 @@ class OpenAIChatCompletion(BaseLLM, BaseOpenAILLM):
},
)
headers, response = (
self.make_sync_openai_chat_completion_request(
openai_client=openai_client,
data=data,
timeout=timeout,
logging_obj=logging_obj,
)
(
headers,
response,
) = self.make_sync_openai_chat_completion_request(
openai_client=openai_client,
data=data,
timeout=timeout,
logging_obj=logging_obj,
)
logging_obj.model_call_details["response_headers"] = headers
@ -763,7 +760,6 @@ class OpenAIChatCompletion(BaseLLM, BaseOpenAILLM):
for _ in range(
2
): # if call fails due to alternating messages, retry with reformatted message
try:
openai_aclient: AsyncOpenAI = self._get_openai_client( # type: ignore
is_async=True,
@ -973,7 +969,6 @@ class OpenAIChatCompletion(BaseLLM, BaseOpenAILLM):
except (
Exception
) as e: # need to exception handle here. async exceptions don't get caught in sync functions.
if isinstance(e, OpenAIError):
raise e
@ -1246,7 +1241,6 @@ class OpenAIChatCompletion(BaseLLM, BaseOpenAILLM):
):
response = None
try:
openai_aclient = self._get_openai_client(
is_async=True,
api_key=api_key,
@ -1333,7 +1327,6 @@ class OpenAIChatCompletion(BaseLLM, BaseOpenAILLM):
)
return convert_to_model_response_object(response_object=response, model_response_object=model_response, response_type="image_generation") # type: ignore
except OpenAIError as e:
## LOGGING
logging_obj.post_call(
input=prompt,
@ -1372,7 +1365,6 @@ class OpenAIChatCompletion(BaseLLM, BaseOpenAILLM):
aspeech: Optional[bool] = None,
client=None,
) -> HttpxBinaryResponseContent:
if aspeech is not None and aspeech is True:
return self.async_audio_speech(
model=model,
@ -1419,7 +1411,6 @@ class OpenAIChatCompletion(BaseLLM, BaseOpenAILLM):
timeout: Union[float, httpx.Timeout],
client=None,
) -> HttpxBinaryResponseContent:
openai_client = cast(
AsyncOpenAI,
self._get_openai_client(