forked from phoenix/litellm-mirror
fix get_async_httpx_client
This commit is contained in:
parent
0a10b1ef1c
commit
398e6d0ac6
2 changed files with 12 additions and 3 deletions
|
@ -6,7 +6,11 @@ import httpx
|
|||
import litellm
|
||||
from litellm.caching.caching import Cache, LiteLLMCacheType
|
||||
from litellm.litellm_core_utils.litellm_logging import Logging
|
||||
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
|
||||
from litellm.llms.custom_httpx.http_handler import (
|
||||
AsyncHTTPHandler,
|
||||
HTTPHandler,
|
||||
get_async_httpx_client,
|
||||
)
|
||||
from litellm.llms.OpenAI.openai import AllMessageValues
|
||||
from litellm.types.llms.vertex_ai import (
|
||||
CachedContentListAllResponseBody,
|
||||
|
@ -352,7 +356,10 @@ class ContextCachingEndpoints(VertexBase):
|
|||
if isinstance(timeout, float) or isinstance(timeout, int):
|
||||
timeout = httpx.Timeout(timeout)
|
||||
_params["timeout"] = timeout
|
||||
client = AsyncHTTPHandler(**_params) # type: ignore
|
||||
client = get_async_httpx_client(
|
||||
llm_provider=litellm.LlmProviders.VERTEX_AI,
|
||||
params={"timeout": timeout},
|
||||
)
|
||||
else:
|
||||
client = client
|
||||
|
||||
|
|
|
@ -1026,7 +1026,9 @@ async def make_call(
|
|||
logging_obj,
|
||||
):
|
||||
if client is None:
|
||||
client = AsyncHTTPHandler() # Create a new client if none provided
|
||||
client = get_async_httpx_client(
|
||||
llm_provider=litellm.LlmProviders.VERTEX_AI,
|
||||
)
|
||||
|
||||
try:
|
||||
response = await client.post(api_base, headers=headers, data=data, stream=True)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue