forked from phoenix/litellm-mirror
fix get_async_httpx_client
This commit is contained in:
parent
398e6d0ac6
commit
89d76d1eb7
2 changed files with 14 additions and 3 deletions
|
@ -45,7 +45,10 @@ class OpenAILikeEmbeddingHandler(OpenAILikeBase):
|
|||
response = None
|
||||
try:
|
||||
if client is None or isinstance(client, AsyncHTTPHandler):
|
||||
self.async_client = AsyncHTTPHandler(timeout=timeout) # type: ignore
|
||||
self.async_client = get_async_httpx_client(
|
||||
llm_provider=litellm.LlmProviders.OPENAI,
|
||||
params={"timeout": timeout},
|
||||
)
|
||||
else:
|
||||
self.async_client = client
|
||||
|
||||
|
|
|
@ -7,8 +7,13 @@ from typing import Any, List, Literal, Optional, Union
|
|||
|
||||
import httpx
|
||||
|
||||
import litellm
|
||||
from litellm import EmbeddingResponse
|
||||
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
|
||||
from litellm.llms.custom_httpx.http_handler import (
|
||||
AsyncHTTPHandler,
|
||||
HTTPHandler,
|
||||
get_async_httpx_client,
|
||||
)
|
||||
from litellm.types.llms.openai import EmbeddingInput
|
||||
from litellm.types.llms.vertex_ai import (
|
||||
VertexAIBatchEmbeddingsRequestBody,
|
||||
|
@ -150,7 +155,10 @@ class GoogleBatchEmbeddings(VertexLLM):
|
|||
else:
|
||||
_params["timeout"] = httpx.Timeout(timeout=600.0, connect=5.0)
|
||||
|
||||
async_handler: AsyncHTTPHandler = AsyncHTTPHandler(**_params) # type: ignore
|
||||
async_handler: AsyncHTTPHandler = get_async_httpx_client(
|
||||
llm_provider=litellm.LlmProviders.VERTEX_AI,
|
||||
params={"timeout": timeout},
|
||||
)
|
||||
else:
|
||||
async_handler = client # type: ignore
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue