fix(openai.py): switch back to using requests instead of httpx

This commit is contained in:
Krrish Dholakia 2023-11-15 18:25:14 -08:00
parent 5990d046c4
commit fc8b54eb73
4 changed files with 3 additions and 37 deletions

View file

@ -19,7 +19,7 @@ telemetry = True
max_tokens = 256 # OpenAI Defaults
drop_params = False
retry = True
request_timeout: Optional[float] = 600
request_timeout: Optional[float] = 6000
api_key: Optional[str] = None
openai_key: Optional[str] = None
azure_key: Optional[str] = None

View file

@ -4,7 +4,7 @@ import httpx
from .base import BaseLLM
from litellm.utils import ModelResponse, Choices, Message, CustomStreamWrapper, convert_to_model_response_object, Usage
from typing import Callable, Optional
import aiohttp
import aiohttp, requests
import litellm
class OpenAIError(Exception):
@ -356,7 +356,7 @@ class OpenAIChatCompletion(BaseLLM):
additional_args={"complete_input_dict": data},
)
## COMPLETION CALL
response = httpx.post(
response = requests.post(
api_base, headers=headers, json=data, timeout=litellm.request_timeout
)
## LOGGING
@ -488,7 +488,6 @@ class OpenAITextCompletion(BaseLLM):
url=f"{api_base}",
json=data,
headers=headers,
timeout=litellm.request_timeout
)
if response.status_code != 200:
raise OpenAIError(status_code=response.status_code, message=response.text)

View file

@ -38,11 +38,6 @@ def test_chat_openai():
for chunk in response:
# print(chunk)
continue
response = completion(model="gpt-3.5-turbo",
messages=[{
"role": "user",
"content": "Hi 👋 - i'm openai"
}])
# print(response)
except Exception as e:

View file

@ -1,28 +0,0 @@
#### What this tests ####
# This tests error handling + logging (esp. for sentry breadcrumbs)
import sys, os
import traceback
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import litellm
from litellm import embedding, completion
litellm.success_callback = ["posthog"]
litellm.failure_callback = ["sentry", "posthog"]
litellm.set_verbose = True
model_fallback_list = ["claude-instant-1", "gpt-3.5-turbo", "chatgpt-test"]
user_message = "Hello, how are you?"
messages = [{"content": user_message, "role": "user"}]
for model in model_fallback_list:
try:
response = embedding(model="text-embedding-ada-002", input=[user_message])
response = completion(model=model, messages=messages)
except Exception as e:
print(f"error occurred: {traceback.format_exc()}")