From b9e6989e41be01c937ea2c45fd421538e2513c22 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Thu, 9 Nov 2023 16:50:43 -0800 Subject: [PATCH] test: fix linting issues --- litellm/llms/azure.py | 2 +- litellm/llms/openai.py | 6 ++---- litellm/main.py | 5 +++-- litellm/proxy/proxy_server.py | 2 +- 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/litellm/llms/azure.py b/litellm/llms/azure.py index 8553f931a..d817233d2 100644 --- a/litellm/llms/azure.py +++ b/litellm/llms/azure.py @@ -147,7 +147,7 @@ class AzureChatCompletion(BaseLLM): if optional_params.get("stream", False): return self.async_streaming(logging_obj=logging_obj, api_base=api_base, data=data, headers=headers, model_response=model_response, model=model) else: - return self.acompletion(logging_obj=logging_obj, api_base=api_base, data=data, headers=headers, model_response=model_response, model=model) + return self.acompletion(api_base=api_base, data=data, headers=headers, model_response=model_response) elif "stream" in optional_params and optional_params["stream"] == True: response = self._client_session.post( url=api_base, diff --git a/litellm/llms/openai.py b/litellm/llms/openai.py index c43e65bda..e56d199d9 100644 --- a/litellm/llms/openai.py +++ b/litellm/llms/openai.py @@ -222,7 +222,7 @@ class OpenAIChatCompletion(BaseLLM): if optional_params.get("stream", False): return self.async_streaming(logging_obj=logging_obj, api_base=api_base, data=data, headers=headers, model_response=model_response, model=model) else: - return self.acompletion(logging_obj=logging_obj, api_base=api_base, data=data, headers=headers, model_response=model_response, model=model) + return self.acompletion(api_base=api_base, data=data, headers=headers, model_response=model_response) elif "stream" in optional_params and optional_params["stream"] == True: response = self._client_session.post( url=api_base, @@ -276,11 +276,9 @@ class OpenAIChatCompletion(BaseLLM): raise OpenAIError(status_code=500, message=traceback.format_exc()) async def acompletion(self, - logging_obj, api_base: str, data: dict, headers: dict, - model_response: ModelResponse, - model: str): + model_response: ModelResponse): async with aiohttp.ClientSession() as session: async with session.post(api_base, json=data, headers=headers) as response: response_json = await response.json() diff --git a/litellm/main.py b/litellm/main.py index c78a03021..d5de2c81a 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -77,7 +77,7 @@ openai_text_completions = OpenAITextCompletion() azure_chat_completions = AzureChatCompletion() ####### COMPLETION ENDPOINTS ################ -async def acompletion(model: str, messages: List = [], *args, **kwargs): +async def acompletion(*args, **kwargs): """ Asynchronously executes a litellm.completion() call for any of litellm supported llms (example gpt-4, gpt-3.5-turbo, claude-2, command-nightly) @@ -117,7 +117,8 @@ async def acompletion(model: str, messages: List = [], *args, **kwargs): - If `stream` is True, the function returns an async generator that yields completion lines. """ loop = asyncio.get_event_loop() - + model = args[0] if len(args) > 0 else kwargs["model"] + messages = args[1] if len(args) > 1 else kwargs["messages"] ### INITIALIZE LOGGING OBJECT ### kwargs["litellm_call_id"] = str(uuid.uuid4()) start_time = datetime.datetime.now() diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 9af92a64a..dcea9de50 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -199,7 +199,7 @@ def save_params_to_config(data: dict): def load_router_config(router: Optional[litellm.Router], config_file_path: str): config = {} - server_settings = {} + server_settings: dict = {} try: if os.path.exists(config_file_path): with open(config_file_path, 'r') as file: