diff --git a/litellm/main.py b/litellm/main.py index 6156d9c398..69ec7985de 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -665,6 +665,7 @@ def completion( "supports_system_message", "region_name", "allowed_model_region", + "model_config", ] default_params = openai_params + litellm_params @@ -2860,6 +2861,7 @@ def embedding( "no-log", "region_name", "allowed_model_region", + "model_config", ] default_params = openai_params + litellm_params non_default_params = { @@ -3760,6 +3762,7 @@ def image_generation( "cache", "region_name", "allowed_model_region", + "model_config", ] default_params = openai_params + litellm_params non_default_params = { diff --git a/litellm/tests/test_proxy_custom_logger.py b/litellm/tests/test_proxy_custom_logger.py index 5496b6c834..e9000ada10 100644 --- a/litellm/tests/test_proxy_custom_logger.py +++ b/litellm/tests/test_proxy_custom_logger.py @@ -10,7 +10,7 @@ import os, io, asyncio sys.path.insert( 0, os.path.abspath("../..") ) # Adds the parent directory to the system path -import pytest +import pytest, time import litellm from litellm import embedding, completion, completion_cost, Timeout from litellm import RateLimitError @@ -159,7 +159,7 @@ def test_chat_completion(client): response = client.post("/chat/completions", json=test_data, headers=headers) print("made request", response.status_code, response.text) print("LiteLLM Callbacks", litellm.callbacks) - asyncio.sleep(1) # sleep while waiting for callback to run + time.sleep(1) # sleep while waiting for callback to run print( "my_custom_logger in /chat/completions",