(fix) add linting check to ban creating AsyncHTTPHandler during LLM calling (#6855)

* fix triton

* fix TEXT_COMPLETION_CODESTRAL

* fix REPLICATE

* fix CLARIFAI

* fix HUGGINGFACE

* add test_no_async_http_handler_usage

* fix PREDIBASE

* fix anthropic use get_async_httpx_client

* fix vertex fine tuning

* fix dbricks get_async_httpx_client

* fix get_async_httpx_client vertex

* fix get_async_httpx_client

* fix get_async_httpx_client

* fix make_async_azure_httpx_request

* fix check_for_async_http_handler

* test: cleanup mistral model

* add check for AsyncClient

* fix check_for_async_http_handler

* fix get_async_httpx_client

* fix tests using in_memory_llm_clients_cache

* fix langfuse import

* fix import

---------

Co-authored-by: Krrish Dholakia <krrishdholakia@gmail.com>
This commit is contained in:
Ishaan Jaff 2024-11-21 19:03:02 -08:00 committed by GitHub
parent 71ebf47cef
commit 920f4c9f82
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
26 changed files with 288 additions and 62 deletions

View file

@ -8,6 +8,7 @@ import traceback
from dotenv import load_dotenv
from openai.types.image import Image
from litellm.caching import InMemoryCache
logging.basicConfig(level=logging.DEBUG)
load_dotenv()
@ -107,7 +108,7 @@ class TestVertexImageGeneration(BaseImageGenTest):
# comment this when running locally
load_vertex_ai_credentials()
litellm.in_memory_llm_clients_cache = {}
litellm.in_memory_llm_clients_cache = InMemoryCache()
return {
"model": "vertex_ai/imagegeneration@006",
"vertex_ai_project": "adroit-crow-413218",
@ -118,13 +119,13 @@ class TestVertexImageGeneration(BaseImageGenTest):
class TestBedrockSd3(BaseImageGenTest):
def get_base_image_generation_call_args(self) -> dict:
litellm.in_memory_llm_clients_cache = {}
litellm.in_memory_llm_clients_cache = InMemoryCache()
return {"model": "bedrock/stability.sd3-large-v1:0"}
class TestBedrockSd1(BaseImageGenTest):
def get_base_image_generation_call_args(self) -> dict:
litellm.in_memory_llm_clients_cache = {}
litellm.in_memory_llm_clients_cache = InMemoryCache()
return {"model": "bedrock/stability.sd3-large-v1:0"}
@ -181,7 +182,7 @@ def test_image_generation_azure_dall_e_3():
@pytest.mark.asyncio
async def test_aimage_generation_bedrock_with_optional_params():
try:
litellm.in_memory_llm_clients_cache = {}
litellm.in_memory_llm_clients_cache = InMemoryCache()
response = await litellm.aimage_generation(
prompt="A cute baby sea otter",
model="bedrock/stability.stable-diffusion-xl-v1",