mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
fix - reuse client initialized on proxy config
This commit is contained in:
parent
57852bada9
commit
b7bca0af6c
2 changed files with 16 additions and 5 deletions
|
@ -812,7 +812,7 @@ class AzureChatCompletion(BaseLLM):
|
||||||
azure_client_params: dict,
|
azure_client_params: dict,
|
||||||
api_key: str,
|
api_key: str,
|
||||||
input: list,
|
input: list,
|
||||||
client=None,
|
client: Optional[AsyncAzureOpenAI] = None,
|
||||||
logging_obj=None,
|
logging_obj=None,
|
||||||
timeout=None,
|
timeout=None,
|
||||||
):
|
):
|
||||||
|
@ -911,6 +911,7 @@ class AzureChatCompletion(BaseLLM):
|
||||||
model_response=model_response,
|
model_response=model_response,
|
||||||
azure_client_params=azure_client_params,
|
azure_client_params=azure_client_params,
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
|
client=client,
|
||||||
)
|
)
|
||||||
return response
|
return response
|
||||||
if client is None:
|
if client is None:
|
||||||
|
|
|
@ -996,11 +996,11 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
self,
|
self,
|
||||||
input: list,
|
input: list,
|
||||||
data: dict,
|
data: dict,
|
||||||
model_response: ModelResponse,
|
model_response: litellm.utils.EmbeddingResponse,
|
||||||
timeout: float,
|
timeout: float,
|
||||||
api_key: Optional[str] = None,
|
api_key: Optional[str] = None,
|
||||||
api_base: Optional[str] = None,
|
api_base: Optional[str] = None,
|
||||||
client=None,
|
client: Optional[AsyncOpenAI] = None,
|
||||||
max_retries=None,
|
max_retries=None,
|
||||||
logging_obj=None,
|
logging_obj=None,
|
||||||
):
|
):
|
||||||
|
@ -1039,9 +1039,9 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
input: list,
|
input: list,
|
||||||
timeout: float,
|
timeout: float,
|
||||||
logging_obj,
|
logging_obj,
|
||||||
|
model_response: litellm.utils.EmbeddingResponse,
|
||||||
api_key: Optional[str] = None,
|
api_key: Optional[str] = None,
|
||||||
api_base: Optional[str] = None,
|
api_base: Optional[str] = None,
|
||||||
model_response: Optional[litellm.utils.EmbeddingResponse] = None,
|
|
||||||
optional_params=None,
|
optional_params=None,
|
||||||
client=None,
|
client=None,
|
||||||
aembedding=None,
|
aembedding=None,
|
||||||
|
@ -1062,7 +1062,17 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
)
|
)
|
||||||
|
|
||||||
if aembedding is True:
|
if aembedding is True:
|
||||||
response = self.aembedding(data=data, input=input, logging_obj=logging_obj, model_response=model_response, api_base=api_base, api_key=api_key, timeout=timeout, client=client, max_retries=max_retries) # type: ignore
|
response = self.aembedding(
|
||||||
|
data=data,
|
||||||
|
input=input,
|
||||||
|
logging_obj=logging_obj,
|
||||||
|
model_response=model_response,
|
||||||
|
api_base=api_base,
|
||||||
|
api_key=api_key,
|
||||||
|
timeout=timeout,
|
||||||
|
client=client,
|
||||||
|
max_retries=max_retries,
|
||||||
|
)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
openai_client = self._get_openai_client(
|
openai_client = self._get_openai_client(
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue