mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
feat(main.py): support openai tts endpoint
Closes https://github.com/BerriAI/litellm/issues/3094
This commit is contained in:
parent
3167bee25a
commit
a67cbf47f6
5 changed files with 322 additions and 3 deletions
91
litellm/tests/test_audio_speech.py
Normal file
91
litellm/tests/test_audio_speech.py
Normal file
|
@ -0,0 +1,91 @@
|
|||
# What is this?
|
||||
## unit tests for openai tts endpoint
|
||||
|
||||
import sys, os, asyncio, time, random, uuid
|
||||
import traceback
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
import os
|
||||
|
||||
sys.path.insert(
|
||||
0, os.path.abspath("../..")
|
||||
) # Adds the parent directory to the system path
|
||||
import pytest
|
||||
import litellm, openai
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
@pytest.mark.parametrize("sync_mode", [True, False])
|
||||
@pytest.mark.asyncio
|
||||
async def test_audio_speech_openai(sync_mode):
|
||||
|
||||
speech_file_path = Path(__file__).parent / "speech.mp3"
|
||||
openai_chat_completions = litellm.OpenAIChatCompletion()
|
||||
if sync_mode:
|
||||
with openai_chat_completions.audio_speech(
|
||||
model="tts-1",
|
||||
voice="alloy",
|
||||
input="the quick brown fox jumped over the lazy dogs",
|
||||
api_base=None,
|
||||
api_key=None,
|
||||
organization=None,
|
||||
project=None,
|
||||
max_retries=1,
|
||||
timeout=600,
|
||||
client=None,
|
||||
optional_params={},
|
||||
) as response:
|
||||
response.stream_to_file(speech_file_path)
|
||||
else:
|
||||
async with openai_chat_completions.async_audio_speech(
|
||||
model="tts-1",
|
||||
voice="alloy",
|
||||
input="the quick brown fox jumped over the lazy dogs",
|
||||
api_base=None,
|
||||
api_key=None,
|
||||
organization=None,
|
||||
project=None,
|
||||
max_retries=1,
|
||||
timeout=600,
|
||||
client=None,
|
||||
optional_params={},
|
||||
) as response:
|
||||
speech = await response.parse()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("sync_mode", [True, False])
|
||||
@pytest.mark.asyncio
|
||||
async def test_audio_speech_litellm(sync_mode):
|
||||
speech_file_path = Path(__file__).parent / "speech.mp3"
|
||||
|
||||
if sync_mode:
|
||||
with litellm.speech(
|
||||
model="openai/tts-1",
|
||||
voice="alloy",
|
||||
input="the quick brown fox jumped over the lazy dogs",
|
||||
api_base=None,
|
||||
api_key=None,
|
||||
organization=None,
|
||||
project=None,
|
||||
max_retries=1,
|
||||
timeout=600,
|
||||
client=None,
|
||||
optional_params={},
|
||||
) as response:
|
||||
response.stream_to_file(speech_file_path)
|
||||
else:
|
||||
async with litellm.aspeech(
|
||||
model="openai/tts-1",
|
||||
voice="alloy",
|
||||
input="the quick brown fox jumped over the lazy dogs",
|
||||
api_base=None,
|
||||
api_key=None,
|
||||
organization=None,
|
||||
project=None,
|
||||
max_retries=1,
|
||||
timeout=600,
|
||||
client=None,
|
||||
optional_params={},
|
||||
) as response:
|
||||
await response.stream_to_file(speech_file_path)
|
Loading…
Add table
Add a link
Reference in a new issue