mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
aiohttp_openai/
fixes - allow using aiohttp_openai/gpt-4o
(#7598)
* fixes for get_complete_url * update aiohttp tests * fix event loop for aiohtto * ci/cd run again * test_aiohttp_openai
This commit is contained in:
parent
744beac754
commit
2ca0977921
5 changed files with 106 additions and 61 deletions
|
@ -9,7 +9,7 @@ New config to ensure we introduce this without causing breaking changes for user
|
|||
|
||||
from typing import TYPE_CHECKING, Any, List, Optional
|
||||
|
||||
import httpx
|
||||
from aiohttp import ClientResponse
|
||||
|
||||
from litellm.llms.openai_like.chat.transformation import OpenAILikeChatConfig
|
||||
from litellm.types.llms.openai import AllMessageValues
|
||||
|
@ -24,6 +24,22 @@ else:
|
|||
|
||||
|
||||
class AiohttpOpenAIChatConfig(OpenAILikeChatConfig):
|
||||
def get_complete_url(
|
||||
self,
|
||||
api_base: str,
|
||||
model: str,
|
||||
optional_params: dict,
|
||||
stream: Optional[bool] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Ensure - /v1/chat/completions is at the end of the url
|
||||
|
||||
"""
|
||||
|
||||
if not api_base.endswith("/chat/completions"):
|
||||
api_base += "/chat/completions"
|
||||
return api_base
|
||||
|
||||
def validate_environment(
|
||||
self,
|
||||
headers: dict,
|
||||
|
@ -33,12 +49,12 @@ class AiohttpOpenAIChatConfig(OpenAILikeChatConfig):
|
|||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
return {}
|
||||
return {"Authorization": f"Bearer {api_key}"}
|
||||
|
||||
def transform_response(
|
||||
async def transform_response( # type: ignore
|
||||
self,
|
||||
model: str,
|
||||
raw_response: httpx.Response,
|
||||
raw_response: ClientResponse,
|
||||
model_response: ModelResponse,
|
||||
logging_obj: LiteLLMLoggingObj,
|
||||
request_data: dict,
|
||||
|
@ -49,4 +65,5 @@ class AiohttpOpenAIChatConfig(OpenAILikeChatConfig):
|
|||
api_key: Optional[str] = None,
|
||||
json_mode: Optional[bool] = None,
|
||||
) -> ModelResponse:
|
||||
return ModelResponse(**raw_response.json())
|
||||
_json_response = await raw_response.json()
|
||||
return ModelResponse(**_json_response)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue