diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index c4d2a6441f..a81d133e58 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -1,4 +1,4 @@ model_list: - model_name: "test-model" litellm_params: - model: "openai/gpt-3.5-turbo-instruct-0914" \ No newline at end of file + model: "openai/text-embedding-ada-002" \ No newline at end of file diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 106b95453b..f22f25f732 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -3334,6 +3334,7 @@ async def embeddings( if ( "input" in data and isinstance(data["input"], list) + and len(data["input"]) > 0 and isinstance(data["input"][0], list) and isinstance(data["input"][0][0], int) ): # check if array of tokens passed in @@ -3464,8 +3465,8 @@ async def embeddings( litellm_debug_info, ) verbose_proxy_logger.error( - "litellm.proxy.proxy_server.embeddings(): Exception occured - {}".format( - str(e) + "litellm.proxy.proxy_server.embeddings(): Exception occured - {}\n{}".format( + str(e), traceback.format_exc() ) ) verbose_proxy_logger.debug(traceback.format_exc())