litellm-mirror/litellm/llms/OpenAI/realtime/handler.py
Krish Dholakia f9d0bcc5a1
OpenAI /v1/realtime api support (#6047)
* feat(azure/realtime): initial working commit for proxy azure openai realtime endpoint support

Adds support for passing /v1/realtime calls via litellm proxy

* feat(realtime_api/main.py): abstraction for handling openai realtime api calls

* feat(router.py): add `arealtime()` endpoint in router for realtime api calls

Allows using `model_list` in proxy for realtime as well

* fix: make realtime api a private function

Structure might change based on feedback. Make that clear to users.

* build(requirements.txt): add websockets to the requirements.txt

* feat(openai/realtime): add openai /v1/realtime api support
2024-10-03 17:11:22 -04:00

81 lines
2.7 KiB
Python

"""
This file contains the calling Azure OpenAI's `/openai/realtime` endpoint.
This requires websockets, and is currently only supported on LiteLLM Proxy.
"""
import asyncio
from typing import Any, Optional
from ..openai import OpenAIChatCompletion
async def forward_messages(client_ws: Any, backend_ws: Any):
import websockets
try:
while True:
message = await backend_ws.recv()
await client_ws.send_text(message)
except websockets.exceptions.ConnectionClosed: # type: ignore
pass
class OpenAIRealtime(OpenAIChatCompletion):
def _construct_url(self, api_base: str, model: str) -> str:
"""
Example output:
"BACKEND_WS_URL = "wss://localhost:8080/v1/realtime?model=gpt-4o-realtime-preview-2024-10-01"";
"""
api_base = api_base.replace("https://", "wss://")
api_base = api_base.replace("http://", "ws://")
return f"{api_base}/v1/realtime?model={model}"
async def async_realtime(
self,
model: str,
websocket: Any,
api_base: Optional[str] = None,
api_key: Optional[str] = None,
client: Optional[Any] = None,
timeout: Optional[float] = None,
):
import websockets
if api_base is None:
raise ValueError("api_base is required for Azure OpenAI calls")
if api_key is None:
raise ValueError("api_key is required for Azure OpenAI calls")
url = self._construct_url(api_base, model)
try:
async with websockets.connect( # type: ignore
url,
extra_headers={
"Authorization": f"Bearer {api_key}", # type: ignore
"OpenAI-Beta": "realtime=v1",
},
) as backend_ws:
forward_task = asyncio.create_task(
forward_messages(websocket, backend_ws)
)
try:
while True:
message = await websocket.receive_text()
await backend_ws.send(message)
except websockets.exceptions.ConnectionClosed: # type: ignore
forward_task.cancel()
finally:
if not forward_task.done():
forward_task.cancel()
try:
await forward_task
except asyncio.CancelledError:
pass
except websockets.exceptions.InvalidStatusCode as e: # type: ignore
await websocket.close(code=e.status_code, reason=str(e))
except Exception as e:
await websocket.close(code=1011, reason=f"Internal server error: {str(e)}")