feat(main.py): support openai tts endpoint

Closes https://github.com/BerriAI/litellm/issues/3094
This commit is contained in:
Krrish Dholakia 2024-05-30 14:28:28 -07:00
parent 3167bee25a
commit a67cbf47f6
5 changed files with 322 additions and 3 deletions

View file

@ -26,6 +26,7 @@ import litellm
from .prompt_templates.factory import prompt_factory, custom_prompt
from openai import OpenAI, AsyncOpenAI
from ..types.llms.openai import *
import openai
class OpenAIError(Exception):
@ -1180,6 +1181,94 @@ class OpenAIChatCompletion(BaseLLM):
)
raise e
def audio_speech(
self,
model: str,
input: str,
voice: str,
optional_params: dict,
api_key: Optional[str],
api_base: Optional[str],
organization: Optional[str],
project: Optional[str],
max_retries: int,
timeout: Union[float, httpx.Timeout],
aspeech: Optional[bool] = None,
client=None,
) -> ResponseContextManager[StreamedBinaryAPIResponse]:
if aspeech is not None and aspeech == True:
return self.async_audio_speech(
model=model,
input=input,
voice=voice,
optional_params=optional_params,
api_key=api_key,
api_base=api_base,
organization=organization,
project=project,
max_retries=max_retries,
timeout=timeout,
client=client,
) # type: ignore
if client is None:
openai_client = OpenAI(
api_key=api_key,
base_url=api_base,
organization=organization,
project=project,
http_client=litellm.client_session,
timeout=timeout,
max_retries=max_retries,
)
else:
openai_client = client
response = openai_client.audio.speech.with_streaming_response.create(
model="tts-1",
voice="alloy",
input="the quick brown fox jumped over the lazy dogs",
**optional_params,
)
return response
def async_audio_speech(
self,
model: str,
input: str,
voice: str,
optional_params: dict,
api_key: Optional[str],
api_base: Optional[str],
organization: Optional[str],
project: Optional[str],
max_retries: int,
timeout: Union[float, httpx.Timeout],
client=None,
) -> AsyncResponseContextManager[AsyncStreamedBinaryAPIResponse]:
if client is None:
openai_client = AsyncOpenAI(
api_key=api_key,
base_url=api_base,
organization=organization,
project=project,
http_client=litellm.aclient_session,
timeout=timeout,
max_retries=max_retries,
)
else:
openai_client = client
response = openai_client.audio.speech.with_streaming_response.create(
model="tts-1",
voice="alloy",
input="the quick brown fox jumped over the lazy dogs",
**optional_params,
)
return response
async def ahealth_check(
self,
model: Optional[str],