From f99a161d98016878886d280c12a8be92f53fa38b Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Thu, 16 Nov 2023 12:15:50 -0800 Subject: [PATCH] fix(azure.py): fix linting errors --- litellm/llms/azure.py | 11 +++-------- litellm/main.py | 2 +- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/litellm/llms/azure.py b/litellm/llms/azure.py index fbb3a65831..c59fb8ee33 100644 --- a/litellm/llms/azure.py +++ b/litellm/llms/azure.py @@ -131,11 +131,11 @@ class AzureChatCompletion(BaseLLM): ) if acompletion is True: if optional_params.get("stream", False): - return self.async_streaming(logging_obj=logging_obj, api_base=api_base, data=data, headers=headers, model_response=model_response, model=model, api_key=api_key, api_version=api_version, azure_ad_token=azure_ad_token) + return self.async_streaming(logging_obj=logging_obj, api_base=api_base, data=data, model=model, api_key=api_key, api_version=api_version, azure_ad_token=azure_ad_token) else: - return self.acompletion(api_base=api_base, data=data, headers=headers, model_response=model_response, api_key=api_key, api_version=api_version, model=model, azure_ad_token=azure_ad_token) + return self.acompletion(api_base=api_base, data=data, model_response=model_response, api_key=api_key, api_version=api_version, model=model, azure_ad_token=azure_ad_token) elif "stream" in optional_params and optional_params["stream"] == True: - return self.streaming(logging_obj=logging_obj, api_base=api_base, data=data, headers=headers, model_response=model_response, model=model, api_key=api_key, api_version=api_version, azure_ad_token=azure_ad_token) + return self.streaming(logging_obj=logging_obj, api_base=api_base, data=data, model=model, api_key=api_key, api_version=api_version, azure_ad_token=azure_ad_token) else: azure_client = AzureOpenAI(api_key=api_key, api_version=api_version, azure_endpoint=api_base, azure_deployment=model, azure_ad_token=azure_ad_token) response = azure_client.chat.completions.create(**data) # type: ignore @@ -152,7 +152,6 @@ class AzureChatCompletion(BaseLLM): model: str, api_base: str, data: dict, - headers: dict, model_response: ModelResponse, azure_ad_token: Optional[str]=None, ): try: @@ -173,8 +172,6 @@ class AzureChatCompletion(BaseLLM): api_key: str, api_version: str, data: dict, - headers: dict, - model_response: ModelResponse, model: str, azure_ad_token: Optional[str]=None, ): @@ -190,8 +187,6 @@ class AzureChatCompletion(BaseLLM): api_key: str, api_version: str, data: dict, - headers: dict, - model_response: ModelResponse, model: str, azure_ad_token: Optional[str]=None): azure_client = AsyncAzureOpenAI(api_key=api_key, api_version=api_version, azure_endpoint=api_base, azure_deployment=model, azure_ad_token=azure_ad_token) diff --git a/litellm/main.py b/litellm/main.py index 6d484224e5..f4eaae6c5e 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -1819,7 +1819,7 @@ def embedding( ###### Text Completion ################ def text_completion( prompt: Union[str, List[Union[str, List[Union[str, List[int]]]]]], # Required: The prompt(s) to generate completions for. - model: Optional[str] = None, # Required: ID of the model to use. + model: str, # Required: ID of the model to use. best_of: Optional[int] = None, # Optional: Generates best_of completions server-side. echo: Optional[bool] = None, # Optional: Echo back the prompt in addition to the completion. frequency_penalty: Optional[float] = None, # Optional: Penalize new tokens based on their existing frequency.