From c2e309baf36ebe6abcd4b747cade6d637edf7fe6 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 25 Jul 2024 20:05:28 -0700 Subject: [PATCH 1/3] docs using litellm proxy --- docs/my-website/docs/proxy/quick_start.md | 28 +++++++++++++++++++++++ docs/my-website/docs/proxy/user_keys.md | 28 +++++++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/docs/my-website/docs/proxy/quick_start.md b/docs/my-website/docs/proxy/quick_start.md index 21698bd49..9da860b0d 100644 --- a/docs/my-website/docs/proxy/quick_start.md +++ b/docs/my-website/docs/proxy/quick_start.md @@ -388,6 +388,34 @@ print(response) ``` + + + +```python +import os + +from anthropic import Anthropic + +client = Anthropic( + base_url="http://localhost:4000", # proxy endpoint + api_key="sk-s4xN1IiLTCytwtZFJaYQrA", # litellm proxy virtual key +) + +message = client.messages.create( + max_tokens=1024, + messages=[ + { + "role": "user", + "content": "Hello, Claude", + } + ], + model="claude-3-opus-20240229", +) +print(message.content) +``` + + + [**More Info**](./configs.md) diff --git a/docs/my-website/docs/proxy/user_keys.md b/docs/my-website/docs/proxy/user_keys.md index 44e1c8842..7417ef6bd 100644 --- a/docs/my-website/docs/proxy/user_keys.md +++ b/docs/my-website/docs/proxy/user_keys.md @@ -234,6 +234,34 @@ main(); ``` + + + +```python +import os + +from anthropic import Anthropic + +client = Anthropic( + base_url="http://localhost:4000", # proxy endpoint + api_key="sk-s4xN1IiLTCytwtZFJaYQrA", # litellm proxy virtual key +) + +message = client.messages.create( + max_tokens=1024, + messages=[ + { + "role": "user", + "content": "Hello, Claude", + } + ], + model="claude-3-opus-20240229", +) +print(message.content) +``` + + + ```python From 9247fc3c64335b920a55fabef1f72bbe5b4d7c2b Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 25 Jul 2024 20:09:49 -0700 Subject: [PATCH 2/3] deploy link to using litellm --- docs/my-website/docs/proxy/deploy.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/my-website/docs/proxy/deploy.md b/docs/my-website/docs/proxy/deploy.md index ff575f0d4..e8bc432b8 100644 --- a/docs/my-website/docs/proxy/deploy.md +++ b/docs/my-website/docs/proxy/deploy.md @@ -254,6 +254,15 @@ Your OpenAI proxy server is now running on `http://127.0.0.1:4000`. **That's it ! That's the quick start to deploy litellm** +## Use with Langchain, OpenAI SDK, LlamaIndex, Instructor, Curl + +:::info +💡 Go here 👉 [to make your first LLM API Request](user_keys) + +LiteLLM is compatible with several SDKs - including OpenAI SDK, Anthropic SDK, Mistral SDK, LLamaIndex, Langchain (Js, Python) + +::: + ## Options to deploy LiteLLM | Docs | When to Use | From 50bf488b58f790d191adfd53963b603ebc216bf4 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 25 Jul 2024 20:10:02 -0700 Subject: [PATCH 3/3] read me link to using litellm --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 3ac5f0285..f36f189f3 100644 --- a/README.md +++ b/README.md @@ -166,6 +166,10 @@ $ litellm --model huggingface/bigcode/starcoder ### Step 2: Make ChatCompletions Request to Proxy + +> [!IMPORTANT] +> [Use with Langchain (Python, JS), OpenAI SDK (Python, JS) Anthropic SDK, Mistral SDK, LlamaIndex, Instructor, Curl](https://docs.litellm.ai/docs/migration) + ```python import openai # openai v1.0.0+ client = openai.OpenAI(api_key="anything",base_url="http://0.0.0.0:4000") # set proxy to base_url