mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
handle _get_async_http_client for OpenAI
This commit is contained in:
parent
ef94a1ed0c
commit
3aa1e78ec3
1 changed files with 24 additions and 2 deletions
|
@ -370,7 +370,7 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
_new_client: Union[OpenAI, AsyncOpenAI] = AsyncOpenAI(
|
_new_client: Union[OpenAI, AsyncOpenAI] = AsyncOpenAI(
|
||||||
api_key=api_key,
|
api_key=api_key,
|
||||||
base_url=api_base,
|
base_url=api_base,
|
||||||
http_client=litellm.aclient_session,
|
http_client=OpenAIChatCompletion._get_async_http_client(),
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
max_retries=max_retries,
|
max_retries=max_retries,
|
||||||
organization=organization,
|
organization=organization,
|
||||||
|
@ -379,7 +379,7 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
_new_client = OpenAI(
|
_new_client = OpenAI(
|
||||||
api_key=api_key,
|
api_key=api_key,
|
||||||
base_url=api_base,
|
base_url=api_base,
|
||||||
http_client=litellm.client_session,
|
http_client=OpenAIChatCompletion._get_sync_http_client(),
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
max_retries=max_retries,
|
max_retries=max_retries,
|
||||||
organization=organization,
|
organization=organization,
|
||||||
|
@ -401,6 +401,28 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
)
|
)
|
||||||
return client
|
return client
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_async_http_client() -> Optional[httpx.AsyncClient]:
|
||||||
|
if litellm.ssl_verify:
|
||||||
|
return httpx.AsyncClient(
|
||||||
|
limits=httpx.Limits(
|
||||||
|
max_connections=1000, max_keepalive_connections=100
|
||||||
|
),
|
||||||
|
verify=litellm.ssl_verify,
|
||||||
|
)
|
||||||
|
return litellm.aclient_session
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_sync_http_client() -> Optional[httpx.Client]:
|
||||||
|
if litellm.ssl_verify:
|
||||||
|
return httpx.Client(
|
||||||
|
limits=httpx.Limits(
|
||||||
|
max_connections=1000, max_keepalive_connections=100
|
||||||
|
),
|
||||||
|
verify=litellm.ssl_verify,
|
||||||
|
)
|
||||||
|
return litellm.client_session
|
||||||
|
|
||||||
@track_llm_api_timing()
|
@track_llm_api_timing()
|
||||||
async def make_openai_chat_completion_request(
|
async def make_openai_chat_completion_request(
|
||||||
self,
|
self,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue