forked from phoenix/litellm-mirror
fix(router.py): generate consistent model id's
having the same id for a deployment, lets redis usage caching work across multiple instances
This commit is contained in:
parent
180cf9bd5c
commit
a47a719caa
4 changed files with 78 additions and 9 deletions
|
@ -932,6 +932,35 @@ def test_openai_completion_on_router():
|
|||
# test_openai_completion_on_router()
|
||||
|
||||
|
||||
def test_consistent_model_id():
|
||||
"""
|
||||
- For a given model group + litellm params, assert the model id is always the same
|
||||
|
||||
Test on `_generate_model_id`
|
||||
|
||||
Test on `set_model_list`
|
||||
|
||||
Test on `_add_deployment`
|
||||
"""
|
||||
model_group = "gpt-3.5-turbo"
|
||||
litellm_params = {
|
||||
"model": "openai/my-fake-model",
|
||||
"api_key": "my-fake-key",
|
||||
"api_base": "https://openai-function-calling-workers.tasslexyz.workers.dev/",
|
||||
"stream_timeout": 0.001,
|
||||
}
|
||||
|
||||
id1 = Router()._generate_model_id(
|
||||
model_group=model_group, litellm_params=litellm_params
|
||||
)
|
||||
|
||||
id2 = Router()._generate_model_id(
|
||||
model_group=model_group, litellm_params=litellm_params
|
||||
)
|
||||
|
||||
assert id1 == id2
|
||||
|
||||
|
||||
def test_reading_keys_os_environ():
|
||||
import openai
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue