mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
fix(openai.py): switch back to using requests instead of httpx
This commit is contained in:
parent
5990d046c4
commit
fc8b54eb73
4 changed files with 3 additions and 37 deletions
|
@ -4,7 +4,7 @@ import httpx
|
|||
from .base import BaseLLM
|
||||
from litellm.utils import ModelResponse, Choices, Message, CustomStreamWrapper, convert_to_model_response_object, Usage
|
||||
from typing import Callable, Optional
|
||||
import aiohttp
|
||||
import aiohttp, requests
|
||||
import litellm
|
||||
|
||||
class OpenAIError(Exception):
|
||||
|
@ -356,7 +356,7 @@ class OpenAIChatCompletion(BaseLLM):
|
|||
additional_args={"complete_input_dict": data},
|
||||
)
|
||||
## COMPLETION CALL
|
||||
response = httpx.post(
|
||||
response = requests.post(
|
||||
api_base, headers=headers, json=data, timeout=litellm.request_timeout
|
||||
)
|
||||
## LOGGING
|
||||
|
@ -488,7 +488,6 @@ class OpenAITextCompletion(BaseLLM):
|
|||
url=f"{api_base}",
|
||||
json=data,
|
||||
headers=headers,
|
||||
timeout=litellm.request_timeout
|
||||
)
|
||||
if response.status_code != 200:
|
||||
raise OpenAIError(status_code=response.status_code, message=response.text)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue