forked from phoenix/litellm-mirror
fix(azure.py): fix linting errors
This commit is contained in:
parent
63104f4194
commit
f99a161d98
2 changed files with 4 additions and 9 deletions
|
@ -131,11 +131,11 @@ class AzureChatCompletion(BaseLLM):
|
||||||
)
|
)
|
||||||
if acompletion is True:
|
if acompletion is True:
|
||||||
if optional_params.get("stream", False):
|
if optional_params.get("stream", False):
|
||||||
return self.async_streaming(logging_obj=logging_obj, api_base=api_base, data=data, headers=headers, model_response=model_response, model=model, api_key=api_key, api_version=api_version, azure_ad_token=azure_ad_token)
|
return self.async_streaming(logging_obj=logging_obj, api_base=api_base, data=data, model=model, api_key=api_key, api_version=api_version, azure_ad_token=azure_ad_token)
|
||||||
else:
|
else:
|
||||||
return self.acompletion(api_base=api_base, data=data, headers=headers, model_response=model_response, api_key=api_key, api_version=api_version, model=model, azure_ad_token=azure_ad_token)
|
return self.acompletion(api_base=api_base, data=data, model_response=model_response, api_key=api_key, api_version=api_version, model=model, azure_ad_token=azure_ad_token)
|
||||||
elif "stream" in optional_params and optional_params["stream"] == True:
|
elif "stream" in optional_params and optional_params["stream"] == True:
|
||||||
return self.streaming(logging_obj=logging_obj, api_base=api_base, data=data, headers=headers, model_response=model_response, model=model, api_key=api_key, api_version=api_version, azure_ad_token=azure_ad_token)
|
return self.streaming(logging_obj=logging_obj, api_base=api_base, data=data, model=model, api_key=api_key, api_version=api_version, azure_ad_token=azure_ad_token)
|
||||||
else:
|
else:
|
||||||
azure_client = AzureOpenAI(api_key=api_key, api_version=api_version, azure_endpoint=api_base, azure_deployment=model, azure_ad_token=azure_ad_token)
|
azure_client = AzureOpenAI(api_key=api_key, api_version=api_version, azure_endpoint=api_base, azure_deployment=model, azure_ad_token=azure_ad_token)
|
||||||
response = azure_client.chat.completions.create(**data) # type: ignore
|
response = azure_client.chat.completions.create(**data) # type: ignore
|
||||||
|
@ -152,7 +152,6 @@ class AzureChatCompletion(BaseLLM):
|
||||||
model: str,
|
model: str,
|
||||||
api_base: str,
|
api_base: str,
|
||||||
data: dict,
|
data: dict,
|
||||||
headers: dict,
|
|
||||||
model_response: ModelResponse,
|
model_response: ModelResponse,
|
||||||
azure_ad_token: Optional[str]=None, ):
|
azure_ad_token: Optional[str]=None, ):
|
||||||
try:
|
try:
|
||||||
|
@ -173,8 +172,6 @@ class AzureChatCompletion(BaseLLM):
|
||||||
api_key: str,
|
api_key: str,
|
||||||
api_version: str,
|
api_version: str,
|
||||||
data: dict,
|
data: dict,
|
||||||
headers: dict,
|
|
||||||
model_response: ModelResponse,
|
|
||||||
model: str,
|
model: str,
|
||||||
azure_ad_token: Optional[str]=None,
|
azure_ad_token: Optional[str]=None,
|
||||||
):
|
):
|
||||||
|
@ -190,8 +187,6 @@ class AzureChatCompletion(BaseLLM):
|
||||||
api_key: str,
|
api_key: str,
|
||||||
api_version: str,
|
api_version: str,
|
||||||
data: dict,
|
data: dict,
|
||||||
headers: dict,
|
|
||||||
model_response: ModelResponse,
|
|
||||||
model: str,
|
model: str,
|
||||||
azure_ad_token: Optional[str]=None):
|
azure_ad_token: Optional[str]=None):
|
||||||
azure_client = AsyncAzureOpenAI(api_key=api_key, api_version=api_version, azure_endpoint=api_base, azure_deployment=model, azure_ad_token=azure_ad_token)
|
azure_client = AsyncAzureOpenAI(api_key=api_key, api_version=api_version, azure_endpoint=api_base, azure_deployment=model, azure_ad_token=azure_ad_token)
|
||||||
|
|
|
@ -1819,7 +1819,7 @@ def embedding(
|
||||||
###### Text Completion ################
|
###### Text Completion ################
|
||||||
def text_completion(
|
def text_completion(
|
||||||
prompt: Union[str, List[Union[str, List[Union[str, List[int]]]]]], # Required: The prompt(s) to generate completions for.
|
prompt: Union[str, List[Union[str, List[Union[str, List[int]]]]]], # Required: The prompt(s) to generate completions for.
|
||||||
model: Optional[str] = None, # Required: ID of the model to use.
|
model: str, # Required: ID of the model to use.
|
||||||
best_of: Optional[int] = None, # Optional: Generates best_of completions server-side.
|
best_of: Optional[int] = None, # Optional: Generates best_of completions server-side.
|
||||||
echo: Optional[bool] = None, # Optional: Echo back the prompt in addition to the completion.
|
echo: Optional[bool] = None, # Optional: Echo back the prompt in addition to the completion.
|
||||||
frequency_penalty: Optional[float] = None, # Optional: Penalize new tokens based on their existing frequency.
|
frequency_penalty: Optional[float] = None, # Optional: Penalize new tokens based on their existing frequency.
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue