diff --git a/docs/my-website/docs/proxy/configs.md b/docs/my-website/docs/proxy/configs.md
index 5c97a6601..2379cf4ad 100644
--- a/docs/my-website/docs/proxy/configs.md
+++ b/docs/my-website/docs/proxy/configs.md
@@ -22,18 +22,22 @@ Set a model alias for your deployments.
In the `config.yaml` the model_name parameter is the user-facing name to use for your deployment.
-In the config below requests with:
+In the config below:
+- `model_name`: the name to pass TO litellm from the external client
+- `litellm_params.model`: the model string passed to the litellm.completion() function
+
+E.g.:
- `model=vllm-models` will route to `openai/facebook/opt-125m`.
- `model=gpt-3.5-turbo` will load balance between `azure/gpt-turbo-small-eu` and `azure/gpt-turbo-small-ca`
```yaml
model_list:
- - model_name: gpt-3.5-turbo # user-facing model alias
+ - model_name: gpt-3.5-turbo ### RECEIVED MODEL NAME ###
litellm_params: # all params accepted by litellm.completion() - https://docs.litellm.ai/docs/completion/input
- model: azure/gpt-turbo-small-eu
+ model: azure/gpt-turbo-small-eu ### MODEL NAME sent to `litellm.completion()` ###
api_base: https://my-endpoint-europe-berri-992.openai.azure.com/
api_key: "os.environ/AZURE_API_KEY_EU" # does os.getenv("AZURE_API_KEY_EU")
- rpm: 6 # Rate limit for this deployment: in requests per minute (rpm)
+ rpm: 6 # [OPTIONAL] Rate limit for this deployment: in requests per minute (rpm)
- model_name: bedrock-claude-v1
litellm_params:
model: bedrock/anthropic.claude-instant-v1
@@ -43,6 +47,11 @@ model_list:
api_base: https://my-endpoint-canada-berri992.openai.azure.com/
api_key: "os.environ/AZURE_API_KEY_CA"
rpm: 6
+ - model_name: anthropic-claude
+ litellm_params:
+ model="bedrock/anthropic.claude-instant-v1"
+ ### [OPTIONAL] SET AWS REGION ###
+ aws_region_name="us-east-1"
- model_name: vllm-models
litellm_params:
model: openai/facebook/opt-125m # the `openai/` prefix tells litellm it's openai compatible
@@ -58,6 +67,11 @@ litellm_settings: # module level litellm settings - https://github.com/BerriAI/l
general_settings:
master_key: sk-1234 # [OPTIONAL] Only use this if you to require all calls to contain this key (Authorization: Bearer sk-1234)
```
+:::info
+
+For more provider-specific info, [go here](../providers/)
+
+:::
#### Step 2: Start Proxy with config
diff --git a/docs/my-website/docs/proxy/quick_start.md b/docs/my-website/docs/proxy/quick_start.md
index fec578c96..82acfc79b 100644
--- a/docs/my-website/docs/proxy/quick_start.md
+++ b/docs/my-website/docs/proxy/quick_start.md
@@ -40,115 +40,6 @@ litellm --test
This will now automatically route any requests for gpt-3.5-turbo to bigcode starcoder, hosted on huggingface inference endpoints.
-### Using LiteLLM Proxy - Curl Request, OpenAI Package, Langchain
-
-
-
-
-```shell
-curl --location 'http://0.0.0.0:8000/chat/completions' \
---header 'Content-Type: application/json' \
---data ' {
- "model": "gpt-3.5-turbo",
- "messages": [
- {
- "role": "user",
- "content": "what llm are you"
- }
- ]
- }
-'
-```
-
-
-
-```python
-import openai
-client = openai.OpenAI(
- api_key="anything",
- base_url="http://0.0.0.0:8000"
-)
-
-# request sent to model set on litellm proxy, `litellm --model`
-response = client.chat.completions.create(model="gpt-3.5-turbo", messages = [
- {
- "role": "user",
- "content": "this is a test request, write a short poem"
- }
-])
-
-print(response)
-
-```
-
-
-
-```python
-from langchain.chat_models import ChatOpenAI
-from langchain.prompts.chat import (
- ChatPromptTemplate,
- HumanMessagePromptTemplate,
- SystemMessagePromptTemplate,
-)
-from langchain.schema import HumanMessage, SystemMessage
-
-chat = ChatOpenAI(
- openai_api_base="http://0.0.0.0:8000", # set openai_api_base to the LiteLLM Proxy
- model = "gpt-3.5-turbo",
- temperature=0.1
-)
-
-messages = [
- SystemMessage(
- content="You are a helpful assistant that im using to make a test request to."
- ),
- HumanMessage(
- content="test from litellm. tell me why it's amazing in 1 sentence"
- ),
-]
-response = chat(messages)
-
-print(response)
-```
-
-
-
-
-```python
-from langchain.embeddings import OpenAIEmbeddings
-
-embeddings = OpenAIEmbeddings(model="sagemaker-embeddings", openai_api_base="http://0.0.0.0:8000", openai_api_key="temp-key")
-
-
-text = "This is a test document."
-
-query_result = embeddings.embed_query(text)
-
-print(f"SAGEMAKER EMBEDDINGS")
-print(query_result[:5])
-
-embeddings = OpenAIEmbeddings(model="bedrock-embeddings", openai_api_base="http://0.0.0.0:8000", openai_api_key="temp-key")
-
-text = "This is a test document."
-
-query_result = embeddings.embed_query(text)
-
-print(f"BEDROCK EMBEDDINGS")
-print(query_result[:5])
-
-embeddings = OpenAIEmbeddings(model="bedrock-titan-embeddings", openai_api_base="http://0.0.0.0:8000", openai_api_key="temp-key")
-
-text = "This is a test document."
-
-query_result = embeddings.embed_query(text)
-
-print(f"TITAN EMBEDDINGS")
-print(query_result[:5])
-```
-
-
-
-
### Supported LLMs
All LiteLLM supported LLMs are supported on the Proxy. Seel all [supported llms](https://docs.litellm.ai/docs/providers)
@@ -331,6 +222,113 @@ $ litellm --model command-nightly
+### Using LiteLLM Proxy - Curl Request, OpenAI Package, Langchain
+
+
+
+
+```shell
+curl --location 'http://0.0.0.0:8000/chat/completions' \
+--header 'Content-Type: application/json' \
+--data ' {
+ "model": "gpt-3.5-turbo",
+ "messages": [
+ {
+ "role": "user",
+ "content": "what llm are you"
+ }
+ ]
+ }
+'
+```
+
+
+
+```python
+import openai
+client = openai.OpenAI(
+ api_key="anything",
+ base_url="http://0.0.0.0:8000"
+)
+
+# request sent to model set on litellm proxy, `litellm --model`
+response = client.chat.completions.create(model="gpt-3.5-turbo", messages = [
+ {
+ "role": "user",
+ "content": "this is a test request, write a short poem"
+ }
+])
+
+print(response)
+
+```
+
+
+
+```python
+from langchain.chat_models import ChatOpenAI
+from langchain.prompts.chat import (
+ ChatPromptTemplate,
+ HumanMessagePromptTemplate,
+ SystemMessagePromptTemplate,
+)
+from langchain.schema import HumanMessage, SystemMessage
+
+chat = ChatOpenAI(
+ openai_api_base="http://0.0.0.0:8000", # set openai_api_base to the LiteLLM Proxy
+ model = "gpt-3.5-turbo",
+ temperature=0.1
+)
+
+messages = [
+ SystemMessage(
+ content="You are a helpful assistant that im using to make a test request to."
+ ),
+ HumanMessage(
+ content="test from litellm. tell me why it's amazing in 1 sentence"
+ ),
+]
+response = chat(messages)
+
+print(response)
+```
+
+
+
+
+```python
+from langchain.embeddings import OpenAIEmbeddings
+
+embeddings = OpenAIEmbeddings(model="sagemaker-embeddings", openai_api_base="http://0.0.0.0:8000", openai_api_key="temp-key")
+
+
+text = "This is a test document."
+
+query_result = embeddings.embed_query(text)
+
+print(f"SAGEMAKER EMBEDDINGS")
+print(query_result[:5])
+
+embeddings = OpenAIEmbeddings(model="bedrock-embeddings", openai_api_base="http://0.0.0.0:8000", openai_api_key="temp-key")
+
+text = "This is a test document."
+
+query_result = embeddings.embed_query(text)
+
+print(f"BEDROCK EMBEDDINGS")
+print(query_result[:5])
+
+embeddings = OpenAIEmbeddings(model="bedrock-titan-embeddings", openai_api_base="http://0.0.0.0:8000", openai_api_key="temp-key")
+
+text = "This is a test document."
+
+query_result = embeddings.embed_query(text)
+
+print(f"TITAN EMBEDDINGS")
+print(query_result[:5])
+```
+
+
## Quick Start - LiteLLM Proxy + Config.yaml