(fix) litellm router.aspeech (#6962)

* doc Migrating Databases

* fix aspeech on router

* test_audio_speech_router

* test_audio_speech_router
This commit is contained in:
Ishaan Jaff 2024-12-05 13:39:50 -08:00 committed by GitHub
parent 2f3fc6d0d1
commit 1d0fb4f435
3 changed files with 74 additions and 3 deletions

View file

@ -69,3 +69,24 @@ When disabling spend logs (`disable_spend_logs: True`):
When disabling error logs (`disable_error_logs: True`): When disabling error logs (`disable_error_logs: True`):
- You **will not** be able to view Errors on the LiteLLM UI - You **will not** be able to view Errors on the LiteLLM UI
- You **will** continue seeing error logs in your application logs and any other logging integrations you are using - You **will** continue seeing error logs in your application logs and any other logging integrations you are using
## Migrating Databases
If you need to migrate Databases the following Tables should be copied to ensure continuation of services and no downtime
| Table Name | Description |
|------------|-------------|
| LiteLLM_VerificationToken | **Required** to ensure existing virtual keys continue working |
| LiteLLM_UserTable | **Required** to ensure existing virtual keys continue working |
| LiteLLM_TeamTable | **Required** to ensure Teams are migrated |
| LiteLLM_TeamMembership | **Required** to ensure Teams member budgets are migrated |
| LiteLLM_BudgetTable | **Required** to migrate existing budgeting settings |
| LiteLLM_OrganizationTable | **Optional** Only migrate if you use Organizations in DB |
| LiteLLM_OrganizationMembership | **Optional** Only migrate if you use Organizations in DB |
| LiteLLM_ProxyModelTable | **Optional** Only migrate if you store your LLMs in the DB (i.e you set `STORE_MODEL_IN_DB=True`) |
| LiteLLM_SpendLogs | **Optional** Only migrate if you want historical data on LiteLLM UI |
| LiteLLM_ErrorLogs | **Optional** Only migrate if you want historical data on LiteLLM UI |

View file

@ -1690,11 +1690,15 @@ class Router:
and potential_model_client is not None and potential_model_client is not None
and dynamic_api_key != potential_model_client.api_key and dynamic_api_key != potential_model_client.api_key
): ):
pass model_client = None
else: else:
pass model_client = potential_model_client
response = await litellm.aspeech(**data, **kwargs) response = await litellm.aspeech(
**data,
client=model_client,
**kwargs,
)
return response return response
except Exception as e: except Exception as e:

View file

@ -20,6 +20,7 @@ sys.path.insert(
) # Adds the parent directory to the system path ) # Adds the parent directory to the system path
import litellm import litellm
from litellm import APIConnectionError, Router from litellm import APIConnectionError, Router
from unittest.mock import ANY
async def test_router_init(): async def test_router_init():
@ -213,3 +214,48 @@ def test_router_init_azure_service_principal_with_secret_with_environment_variab
# asyncio.run(test_router_init()) # asyncio.run(test_router_init())
@pytest.mark.asyncio
async def test_audio_speech_router():
"""
Test that router uses OpenAI/Azure OpenAI Client initialized during init for litellm.aspeech
"""
from litellm import Router
litellm.set_verbose = True
model_list = [
{
"model_name": "tts",
"litellm_params": {
"model": "azure/azure-tts",
"api_base": os.getenv("AZURE_SWEDEN_API_BASE"),
"api_key": os.getenv("AZURE_SWEDEN_API_KEY"),
},
},
]
_router = Router(model_list=model_list)
expected_openai_client = _router._get_client(
deployment=_router.model_list[0],
kwargs={},
client_type="async",
)
with patch("litellm.aspeech") as mock_aspeech:
await _router.aspeech(
model="tts",
voice="alloy",
input="the quick brown fox jumped over the lazy dogs",
)
print(
"litellm.aspeech was called with kwargs = ", mock_aspeech.call_args.kwargs
)
# Get the actual client that was passed
client_passed_in_request = mock_aspeech.call_args.kwargs["client"]
assert client_passed_in_request == expected_openai_client