From 99f08b45fc9fa608794915c1ea0e50ad58cc176f Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Fri, 8 Sep 2023 14:10:05 -0700 Subject: [PATCH] docs --- docs/my-website/docs/providers/vllm.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/my-website/docs/providers/vllm.md b/docs/my-website/docs/providers/vllm.md index aace02d1d..1b1e18add 100644 --- a/docs/my-website/docs/providers/vllm.md +++ b/docs/my-website/docs/providers/vllm.md @@ -12,7 +12,7 @@ pip install litellm vllm import litellm response = completion( - model="vllm/facebook/opt-125m", + model="vllm/facebook/opt-125m", # add a vllm prefix so litellm knows the custom_llm_provider==vllm messages=messages, temperature=0.2, max_tokens=80) @@ -30,7 +30,7 @@ In order to use litellm to call a hosted vllm server add the following to your c import litellm response = completion( - model="vllm/facebook/opt-125m", + model="facebook/opt-125m", # pass the vllm model name messages=messages, api_base="https://hosted-vllm-api.co/v1", custom_llm_provider="openai",