From fd12287c0d97e2a08e8f3ae88766456cb1723fff Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Wed, 30 Aug 2023 20:17:24 -0700 Subject: [PATCH] Update readme.md --- cookbook/llm-ab-test-server/readme.md | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/cookbook/llm-ab-test-server/readme.md b/cookbook/llm-ab-test-server/readme.md index 2c4200e5d..2512e106b 100644 --- a/cookbook/llm-ab-test-server/readme.md +++ b/cookbook/llm-ab-test-server/readme.md @@ -45,16 +45,12 @@ llm_dict = { } ``` -All models defined can be called with the same Input/Output format using litellm `completion` +Easily call selected model during `completion` ```python -from litellm import completion -# SET API KEYS in .env -# openai call -response = completion(model="gpt-3.5-turbo", messages=messages) -# cohere call -response = completion(model="command-nightly", messages=messages) -# anthropic -response = completion(model="claude-2", messages=messages) +selected_llm = random.choices(list(llm_dict.keys()), weights=list(llm_dict.values()))[0] + +response = completion(model=selected_model, messages=[{ "content": "Hello, how are you?","role": "user"}]) + ``` This server allows you to view responses, costs and latency on your LiteLLM dashboard