mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
test_azure_embedding_max_retries_0
This commit is contained in:
parent
b316911120
commit
80a5cfa01d
2 changed files with 0 additions and 22 deletions
|
@ -652,7 +652,6 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
|
||||||
model: str,
|
model: str,
|
||||||
data: dict,
|
data: dict,
|
||||||
model_response: EmbeddingResponse,
|
model_response: EmbeddingResponse,
|
||||||
azure_client_params: dict,
|
|
||||||
input: list,
|
input: list,
|
||||||
logging_obj: LiteLLMLoggingObj,
|
logging_obj: LiteLLMLoggingObj,
|
||||||
api_base: str,
|
api_base: str,
|
||||||
|
@ -743,14 +742,6 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
|
||||||
data = {"model": model, "input": input, **optional_params}
|
data = {"model": model, "input": input, **optional_params}
|
||||||
if max_retries is None:
|
if max_retries is None:
|
||||||
max_retries = litellm.DEFAULT_MAX_RETRIES
|
max_retries = litellm.DEFAULT_MAX_RETRIES
|
||||||
azure_client_params = self.initialize_azure_sdk_client(
|
|
||||||
litellm_params=litellm_params or {},
|
|
||||||
api_key=api_key,
|
|
||||||
model_name=model,
|
|
||||||
api_version=api_version,
|
|
||||||
api_base=api_base,
|
|
||||||
is_async=False,
|
|
||||||
)
|
|
||||||
## LOGGING
|
## LOGGING
|
||||||
logging_obj.pre_call(
|
logging_obj.pre_call(
|
||||||
input=input,
|
input=input,
|
||||||
|
@ -769,7 +760,6 @@ class AzureChatCompletion(BaseAzureLLM, BaseLLM):
|
||||||
logging_obj=logging_obj,
|
logging_obj=logging_obj,
|
||||||
api_key=api_key,
|
api_key=api_key,
|
||||||
model_response=model_response,
|
model_response=model_response,
|
||||||
azure_client_params=azure_client_params,
|
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
client=client,
|
client=client,
|
||||||
litellm_params=litellm_params,
|
litellm_params=litellm_params,
|
||||||
|
|
|
@ -12,18 +12,6 @@ from ..common_utils import AzureOpenAIError, BaseAzureLLM
|
||||||
openai_text_completion_config = OpenAITextCompletionConfig()
|
openai_text_completion_config = OpenAITextCompletionConfig()
|
||||||
|
|
||||||
|
|
||||||
def select_azure_base_url_or_endpoint(azure_client_params: dict):
|
|
||||||
azure_endpoint = azure_client_params.get("azure_endpoint", None)
|
|
||||||
if azure_endpoint is not None:
|
|
||||||
# see : https://github.com/openai/openai-python/blob/3d61ed42aba652b547029095a7eb269ad4e1e957/src/openai/lib/azure.py#L192
|
|
||||||
if "/openai/deployments" in azure_endpoint:
|
|
||||||
# this is base_url, not an azure_endpoint
|
|
||||||
azure_client_params["base_url"] = azure_endpoint
|
|
||||||
azure_client_params.pop("azure_endpoint")
|
|
||||||
|
|
||||||
return azure_client_params
|
|
||||||
|
|
||||||
|
|
||||||
class AzureTextCompletion(BaseAzureLLM):
|
class AzureTextCompletion(BaseAzureLLM):
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue