diff --git a/litellm/llms/openai.py b/litellm/llms/openai.py index 2c7a7a4df1..fae8a448ad 100644 --- a/litellm/llms/openai.py +++ b/litellm/llms/openai.py @@ -1881,8 +1881,15 @@ class OpenAIChatCompletion(BaseLLM): model=model, # type: ignore prompt=prompt, # type: ignore ) + elif mode == "audio_speech": + # Get the current directory of the file being run + completion = await client.audio.speech.with_raw_response.create( + model=model, # type: ignore + input=prompt, # type: ignore + voice="alloy", + ) else: - raise Exception("mode not set") + raise ValueError("mode not set, passed in mode: " + mode) response = {} if completion is None or not hasattr(completion, "headers"): diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index 0e3f0826e2..bd8f5bfd0a 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -8,6 +8,12 @@ model_list: litellm_params: model: fireworks_ai/accounts/fireworks/models/llama-v3-70b-instruct api_key: "os.environ/FIREWORKS" + - model_name: tts + litellm_params: + model: openai/tts-1 + api_key: "os.environ/OPENAI_API_KEY" + model_info: + mode: audio_speech general_settings: master_key: sk-1234 alerting: ["slack"]