diff --git a/litellm/main.py b/litellm/main.py index ca7b902e2..5c421a351 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -1736,6 +1736,7 @@ def embedding( api_key: Optional[str] = None, api_type: Optional[str] = None, caching: bool=False, + user: Optional[str]=None, custom_llm_provider=None, litellm_call_id=None, litellm_logging_obj=None, diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index f1824c95b..c9ba4c215 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -201,6 +201,8 @@ class ModelParams(BaseModel): model_name: str litellm_params: dict model_info: Optional[dict] + class Config: + protected_namespaces = () class GenerateKeyRequest(BaseModel): duration: str = "1h" @@ -1086,8 +1088,24 @@ async def model_info(request: Request): all_models = config['model_list'] for model in all_models: + # get the model cost map info + ## make an api call + data = copy.deepcopy(model["litellm_params"]) + data["messages"] = [{"role": "user", "content": "Hey, how's it going?"}] + data["max_tokens"] = 10 + print(f"data going to litellm acompletion: {data}") + response = await litellm.acompletion(**data) + response_model = response["model"] + print(f"response model: {response_model}; response - {response}") + litellm_model_info = litellm.get_model_info(response_model) + model_info = model.get("model_info", {}) + for k, v in litellm_model_info.items(): + if k not in model_info: + model_info[k] = v + model["model_info"] = model_info # don't return the api key model["litellm_params"].pop("api_key", None) + # all_models = list(set([m["model_name"] for m in llm_model_list])) print_verbose(f"all_models: {all_models}") return dict( diff --git a/litellm/utils.py b/litellm/utils.py index bed6d1cda..280a6342f 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -2703,6 +2703,13 @@ def get_model_info(model: str): except requests.exceptions.RequestException as e: return None try: + azure_llms = { + "gpt-35-turbo": "azure/gpt-3.5-turbo", + "gpt-35-turbo-16k": "azure/gpt-3.5-turbo-16k", + "gpt-35-turbo-instruct": "azure/gpt-3.5-turbo-instruct" + } + if model in azure_llms: + model = azure_llms[model] if model in litellm.model_cost: return litellm.model_cost[model] model, custom_llm_provider, _, _ = get_llm_provider(model=model)