forked from phoenix/litellm-mirror
fix - use csv list for batch completions
This commit is contained in:
parent
62276fc221
commit
d4288b134b
2 changed files with 4 additions and 6 deletions
|
@ -3673,8 +3673,9 @@ async def chat_completion(
|
||||||
# skip router if user passed their key
|
# skip router if user passed their key
|
||||||
if "api_key" in data:
|
if "api_key" in data:
|
||||||
tasks.append(litellm.acompletion(**data))
|
tasks.append(litellm.acompletion(**data))
|
||||||
elif isinstance(data["model"], list) and llm_router is not None:
|
elif "," in data["model"] and llm_router is not None:
|
||||||
_models = data.pop("model")
|
_models_csv_string = data.pop("model")
|
||||||
|
_models = _models_csv_string.split(",")
|
||||||
tasks.append(llm_router.abatch_completion(models=_models, **data))
|
tasks.append(llm_router.abatch_completion(models=_models, **data))
|
||||||
elif "user_config" in data:
|
elif "user_config" in data:
|
||||||
# initialize a new router instance. make request using this Router
|
# initialize a new router instance. make request using this Router
|
||||||
|
|
|
@ -424,10 +424,7 @@ async def test_batch_chat_completions():
|
||||||
response = await chat_completion(
|
response = await chat_completion(
|
||||||
session=session,
|
session=session,
|
||||||
key="sk-1234",
|
key="sk-1234",
|
||||||
model=[
|
model="gpt-3.5-turbo,fake-openai-endpoint",
|
||||||
"gpt-3.5-turbo",
|
|
||||||
"fake-openai-endpoint",
|
|
||||||
],
|
|
||||||
)
|
)
|
||||||
|
|
||||||
print(f"response: {response}")
|
print(f"response: {response}")
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue