mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
LiteLLM Minor Fixes & Improvements (01/10/2025) - p1 (#7670)
* test(test_get_model_info.py): add unit test confirming router deployment updates global 'get_model_info' * fix(get_supported_openai_params.py): fix custom llm provider 'get_supported_openai_params' Fixes https://github.com/BerriAI/litellm/issues/7668 * docs(azure.md): clarify how azure ad token refresh on proxy works Closes https://github.com/BerriAI/litellm/issues/7665
This commit is contained in:
parent
9369129bf0
commit
ebc66c1e1e
6 changed files with 112 additions and 5 deletions
|
@ -285,3 +285,28 @@ def test_get_model_info_custom_provider():
|
|||
get_model_info(
|
||||
model="my-custom-llm/my-fake-model"
|
||||
) # 💥 "Exception: This model isn't mapped yet." in v1.56.10
|
||||
|
||||
|
||||
def test_get_model_info_custom_model_router():
|
||||
from litellm import Router
|
||||
from litellm import get_model_info
|
||||
|
||||
litellm._turn_on_debug()
|
||||
|
||||
router = Router(
|
||||
model_list=[
|
||||
{
|
||||
"model_name": "ma-summary",
|
||||
"litellm_params": {
|
||||
"api_base": "http://ma-mix-llm-serving.cicero.svc.cluster.local/v1",
|
||||
"input_cost_per_token": 1,
|
||||
"output_cost_per_token": 1,
|
||||
"model": "openai/meta-llama/Meta-Llama-3-8B-Instruct",
|
||||
"model_id": "c20d603e-1166-4e0f-aa65-ed9c476ad4ca",
|
||||
},
|
||||
}
|
||||
]
|
||||
)
|
||||
info = get_model_info("openai/meta-llama/Meta-Llama-3-8B-Instruct")
|
||||
print("info", info)
|
||||
assert info is not None
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue