diff --git a/docs/my-website/docs/proxy/db_info.md b/docs/my-website/docs/proxy/db_info.md index 8429f6360..1b87aa1e5 100644 --- a/docs/my-website/docs/proxy/db_info.md +++ b/docs/my-website/docs/proxy/db_info.md @@ -69,3 +69,24 @@ When disabling spend logs (`disable_spend_logs: True`): When disabling error logs (`disable_error_logs: True`): - You **will not** be able to view Errors on the LiteLLM UI - You **will** continue seeing error logs in your application logs and any other logging integrations you are using + + +## Migrating Databases + +If you need to migrate Databases the following Tables should be copied to ensure continuation of services and no downtime + + +| Table Name | Description | +|------------|-------------| +| LiteLLM_VerificationToken | **Required** to ensure existing virtual keys continue working | +| LiteLLM_UserTable | **Required** to ensure existing virtual keys continue working | +| LiteLLM_TeamTable | **Required** to ensure Teams are migrated | +| LiteLLM_TeamMembership | **Required** to ensure Teams member budgets are migrated | +| LiteLLM_BudgetTable | **Required** to migrate existing budgeting settings | +| LiteLLM_OrganizationTable | **Optional** Only migrate if you use Organizations in DB | +| LiteLLM_OrganizationMembership | **Optional** Only migrate if you use Organizations in DB | +| LiteLLM_ProxyModelTable | **Optional** Only migrate if you store your LLMs in the DB (i.e you set `STORE_MODEL_IN_DB=True`) | +| LiteLLM_SpendLogs | **Optional** Only migrate if you want historical data on LiteLLM UI | +| LiteLLM_ErrorLogs | **Optional** Only migrate if you want historical data on LiteLLM UI | + + diff --git a/litellm/router.py b/litellm/router.py index 3751b2403..bf0f7dd7b 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -1689,11 +1689,15 @@ class Router: and potential_model_client is not None and dynamic_api_key != potential_model_client.api_key ): - pass + model_client = None else: - pass + model_client = potential_model_client - response = await litellm.aspeech(**data, **kwargs) + response = await litellm.aspeech( + **data, + client=model_client, + **kwargs, + ) return response except Exception as e: diff --git a/tests/local_testing/test_router_client_init.py b/tests/local_testing/test_router_client_init.py index 978562409..bdfd71972 100644 --- a/tests/local_testing/test_router_client_init.py +++ b/tests/local_testing/test_router_client_init.py @@ -20,6 +20,7 @@ sys.path.insert( ) # Adds the parent directory to the system path import litellm from litellm import APIConnectionError, Router +from unittest.mock import ANY async def test_router_init(): @@ -213,3 +214,48 @@ def test_router_init_azure_service_principal_with_secret_with_environment_variab # asyncio.run(test_router_init()) + + +@pytest.mark.asyncio +async def test_audio_speech_router(): + """ + Test that router uses OpenAI/Azure OpenAI Client initialized during init for litellm.aspeech + """ + + from litellm import Router + + litellm.set_verbose = True + + model_list = [ + { + "model_name": "tts", + "litellm_params": { + "model": "azure/azure-tts", + "api_base": os.getenv("AZURE_SWEDEN_API_BASE"), + "api_key": os.getenv("AZURE_SWEDEN_API_KEY"), + }, + }, + ] + + _router = Router(model_list=model_list) + + expected_openai_client = _router._get_client( + deployment=_router.model_list[0], + kwargs={}, + client_type="async", + ) + + with patch("litellm.aspeech") as mock_aspeech: + await _router.aspeech( + model="tts", + voice="alloy", + input="the quick brown fox jumped over the lazy dogs", + ) + + print( + "litellm.aspeech was called with kwargs = ", mock_aspeech.call_args.kwargs + ) + + # Get the actual client that was passed + client_passed_in_request = mock_aspeech.call_args.kwargs["client"] + assert client_passed_in_request == expected_openai_client