mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
fix(openai.py): switch back to using requests instead of httpx
This commit is contained in:
parent
5990d046c4
commit
fc8b54eb73
4 changed files with 3 additions and 37 deletions
|
@ -19,7 +19,7 @@ telemetry = True
|
||||||
max_tokens = 256 # OpenAI Defaults
|
max_tokens = 256 # OpenAI Defaults
|
||||||
drop_params = False
|
drop_params = False
|
||||||
retry = True
|
retry = True
|
||||||
request_timeout: Optional[float] = 600
|
request_timeout: Optional[float] = 6000
|
||||||
api_key: Optional[str] = None
|
api_key: Optional[str] = None
|
||||||
openai_key: Optional[str] = None
|
openai_key: Optional[str] = None
|
||||||
azure_key: Optional[str] = None
|
azure_key: Optional[str] = None
|
||||||
|
|
|
@ -4,7 +4,7 @@ import httpx
|
||||||
from .base import BaseLLM
|
from .base import BaseLLM
|
||||||
from litellm.utils import ModelResponse, Choices, Message, CustomStreamWrapper, convert_to_model_response_object, Usage
|
from litellm.utils import ModelResponse, Choices, Message, CustomStreamWrapper, convert_to_model_response_object, Usage
|
||||||
from typing import Callable, Optional
|
from typing import Callable, Optional
|
||||||
import aiohttp
|
import aiohttp, requests
|
||||||
import litellm
|
import litellm
|
||||||
|
|
||||||
class OpenAIError(Exception):
|
class OpenAIError(Exception):
|
||||||
|
@ -356,7 +356,7 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
additional_args={"complete_input_dict": data},
|
additional_args={"complete_input_dict": data},
|
||||||
)
|
)
|
||||||
## COMPLETION CALL
|
## COMPLETION CALL
|
||||||
response = httpx.post(
|
response = requests.post(
|
||||||
api_base, headers=headers, json=data, timeout=litellm.request_timeout
|
api_base, headers=headers, json=data, timeout=litellm.request_timeout
|
||||||
)
|
)
|
||||||
## LOGGING
|
## LOGGING
|
||||||
|
@ -488,7 +488,6 @@ class OpenAITextCompletion(BaseLLM):
|
||||||
url=f"{api_base}",
|
url=f"{api_base}",
|
||||||
json=data,
|
json=data,
|
||||||
headers=headers,
|
headers=headers,
|
||||||
timeout=litellm.request_timeout
|
|
||||||
)
|
)
|
||||||
if response.status_code != 200:
|
if response.status_code != 200:
|
||||||
raise OpenAIError(status_code=response.status_code, message=response.text)
|
raise OpenAIError(status_code=response.status_code, message=response.text)
|
||||||
|
|
|
@ -38,11 +38,6 @@ def test_chat_openai():
|
||||||
for chunk in response:
|
for chunk in response:
|
||||||
# print(chunk)
|
# print(chunk)
|
||||||
continue
|
continue
|
||||||
response = completion(model="gpt-3.5-turbo",
|
|
||||||
messages=[{
|
|
||||||
"role": "user",
|
|
||||||
"content": "Hi 👋 - i'm openai"
|
|
||||||
}])
|
|
||||||
# print(response)
|
# print(response)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
|
@ -1,28 +0,0 @@
|
||||||
#### What this tests ####
|
|
||||||
# This tests error handling + logging (esp. for sentry breadcrumbs)
|
|
||||||
|
|
||||||
import sys, os
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
sys.path.insert(
|
|
||||||
0, os.path.abspath("../..")
|
|
||||||
) # Adds the parent directory to the system path
|
|
||||||
import litellm
|
|
||||||
from litellm import embedding, completion
|
|
||||||
|
|
||||||
litellm.success_callback = ["posthog"]
|
|
||||||
litellm.failure_callback = ["sentry", "posthog"]
|
|
||||||
|
|
||||||
litellm.set_verbose = True
|
|
||||||
|
|
||||||
model_fallback_list = ["claude-instant-1", "gpt-3.5-turbo", "chatgpt-test"]
|
|
||||||
|
|
||||||
user_message = "Hello, how are you?"
|
|
||||||
messages = [{"content": user_message, "role": "user"}]
|
|
||||||
|
|
||||||
for model in model_fallback_list:
|
|
||||||
try:
|
|
||||||
response = embedding(model="text-embedding-ada-002", input=[user_message])
|
|
||||||
response = completion(model=model, messages=messages)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"error occurred: {traceback.format_exc()}")
|
|
Loading…
Add table
Add a link
Reference in a new issue