From c4aea7432f03114d658f3cdfd3607735844de834 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Mon, 27 Nov 2023 22:05:02 -0800 Subject: [PATCH] build: adding debug logs to gitignore --- .gitignore | 1 + litellm/router.py | 4 ++-- litellm/tests/test_profiling_router.py | 8 ++++---- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index 76d98409ab..62e898dac2 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,4 @@ router_config.yaml litellm_server/config.yaml litellm/proxy/_secret_config.yaml .aws-sam/ +litellm/tests/aiologs.log diff --git a/litellm/router.py b/litellm/router.py index 734cffce5e..a7c1418b44 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -176,13 +176,13 @@ class Router: ########## remove -ModelID-XXXX from model ############## original_model_string = data["model"] # Find the index of "ModelID" in the string + self.print_verbose(f"completion model: {original_model_string}"} index_of_model_id = original_model_string.find("-ModelID") # Remove everything after "-ModelID" if it exists if index_of_model_id != -1: data["model"] = original_model_string[:index_of_model_id] else: - data["model"] = original_model_string - self.print_verbose(f"completion model: {data['model']}") + data["model"] = original_model_string) return litellm.completion(**{**data, "messages": messages, "caching": self.cache_responses, **kwargs}) except Exception as e: raise e diff --git a/litellm/tests/test_profiling_router.py b/litellm/tests/test_profiling_router.py index 01edbd85d9..383d9dd692 100644 --- a/litellm/tests/test_profiling_router.py +++ b/litellm/tests/test_profiling_router.py @@ -30,21 +30,21 @@ # "model_name": "azure-model", # "litellm_params": { # "model": "azure/gpt-turbo", -# "api_key": "6a5ae4c5b2bd4e8088248067799c6899", +# "api_key": "os.environ/AZURE_FRANCE_API_KEY", # "api_base": "https://openai-france-1234.openai.azure.com" # } # }, { # "model_name": "azure-model", # "litellm_params": { # "model": "azure/gpt-35-turbo", -# "api_key": "fe5b390e8990407e8d913f40833b19f7", +# "api_key": "os.environ/AZURE_EUROPE_API_KEY", # "api_base": "https://my-endpoint-europe-berri-992.openai.azure.com" # } # }, { # "model_name": "azure-model", # "litellm_params": { # "model": "azure/gpt-35-turbo", -# "api_key": "6a0f46e99d554e8caad9c2b7c0ba7319", +# "api_key": "os.environ/AZURE_CANADA_API_KEY", # "api_base": "https://my-endpoint-canada-berri992.openai.azure.com" # } # }] @@ -64,7 +64,7 @@ # async def loadtest_fn(): # start = time.time() -# n = 10 +# n = 100 # tasks = [router_completion() for _ in range(n)] # chat_completions = await asyncio.gather(*tasks) # successful_completions = [c for c in chat_completions if c is not None]