From fc8b54eb7367f9dd5b1f7b4c0bca2b78f56aab9d Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Wed, 15 Nov 2023 18:25:14 -0800 Subject: [PATCH] fix(openai.py): switch back to using requests instead of httpx --- litellm/__init__.py | 2 +- litellm/llms/openai.py | 5 ++--- litellm/tests/test_custom_logger.py | 5 ----- litellm/tests/test_model_fallback.py | 28 ---------------------------- 4 files changed, 3 insertions(+), 37 deletions(-) delete mode 100644 litellm/tests/test_model_fallback.py diff --git a/litellm/__init__.py b/litellm/__init__.py index d4e72c5d94..4ad362d509 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -19,7 +19,7 @@ telemetry = True max_tokens = 256 # OpenAI Defaults drop_params = False retry = True -request_timeout: Optional[float] = 600 +request_timeout: Optional[float] = 6000 api_key: Optional[str] = None openai_key: Optional[str] = None azure_key: Optional[str] = None diff --git a/litellm/llms/openai.py b/litellm/llms/openai.py index 451f4cedab..d528b1f126 100644 --- a/litellm/llms/openai.py +++ b/litellm/llms/openai.py @@ -4,7 +4,7 @@ import httpx from .base import BaseLLM from litellm.utils import ModelResponse, Choices, Message, CustomStreamWrapper, convert_to_model_response_object, Usage from typing import Callable, Optional -import aiohttp +import aiohttp, requests import litellm class OpenAIError(Exception): @@ -356,7 +356,7 @@ class OpenAIChatCompletion(BaseLLM): additional_args={"complete_input_dict": data}, ) ## COMPLETION CALL - response = httpx.post( + response = requests.post( api_base, headers=headers, json=data, timeout=litellm.request_timeout ) ## LOGGING @@ -488,7 +488,6 @@ class OpenAITextCompletion(BaseLLM): url=f"{api_base}", json=data, headers=headers, - timeout=litellm.request_timeout ) if response.status_code != 200: raise OpenAIError(status_code=response.status_code, message=response.text) diff --git a/litellm/tests/test_custom_logger.py b/litellm/tests/test_custom_logger.py index dc79eb3ce8..5fda1596a5 100644 --- a/litellm/tests/test_custom_logger.py +++ b/litellm/tests/test_custom_logger.py @@ -38,11 +38,6 @@ def test_chat_openai(): for chunk in response: # print(chunk) continue - response = completion(model="gpt-3.5-turbo", - messages=[{ - "role": "user", - "content": "Hi 👋 - i'm openai" - }]) # print(response) except Exception as e: diff --git a/litellm/tests/test_model_fallback.py b/litellm/tests/test_model_fallback.py deleted file mode 100644 index 895588091e..0000000000 --- a/litellm/tests/test_model_fallback.py +++ /dev/null @@ -1,28 +0,0 @@ -#### What this tests #### -# This tests error handling + logging (esp. for sentry breadcrumbs) - -import sys, os -import traceback - -sys.path.insert( - 0, os.path.abspath("../..") -) # Adds the parent directory to the system path -import litellm -from litellm import embedding, completion - -litellm.success_callback = ["posthog"] -litellm.failure_callback = ["sentry", "posthog"] - -litellm.set_verbose = True - -model_fallback_list = ["claude-instant-1", "gpt-3.5-turbo", "chatgpt-test"] - -user_message = "Hello, how are you?" -messages = [{"content": user_message, "role": "user"}] - -for model in model_fallback_list: - try: - response = embedding(model="text-embedding-ada-002", input=[user_message]) - response = completion(model=model, messages=messages) - except Exception as e: - print(f"error occurred: {traceback.format_exc()}")