diff --git a/docs/my-website/docs/getting_started.md b/docs/my-website/docs/getting_started.md
index f0cef0313..ef9a893e8 100644
--- a/docs/my-website/docs/getting_started.md
+++ b/docs/my-website/docs/getting_started.md
@@ -1,15 +1,19 @@
# Getting Started
+import QuickStart from '../src/components/QuickStart.js'
+
LiteLLM simplifies LLM API calls by mapping them all to the [OpenAI ChatCompletion format](https://platform.openai.com/docs/api-reference/chat).
## basic usage
+By default we provide a free $10 key to try all providers supported on LiteLLM.
+
```python
from litellm import completion
## set ENV variables
-os.environ["OPENAI_API_KEY"] = "openai key"
-os.environ["COHERE_API_KEY"] = "cohere key"
+os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
+os.environ["COHERE_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your cohere key
messages = [{ "content": "Hello, how are you?","role": "user"}]
@@ -19,10 +23,11 @@ response = completion(model="gpt-3.5-turbo", messages=messages)
# cohere call
response = completion("command-nightly", messages)
```
+Next Steps 👉 [All supported models](./proxy_api.md)
More details 👉
* [Completion() function details](./completion/)
-* [Supported models / providers](./providers/)
+* [All supported models / providers on LiteLLM](./providers/)
## streaming
@@ -41,6 +46,8 @@ response = completion(model="gpt-3.5-turbo", messages=messages, stream=True)
# cohere call
response = completion("command-nightly", messages, stream=True)
+
+print(response)
```
More details 👉
diff --git a/docs/my-website/docs/proxy_api.md b/docs/my-website/docs/proxy_api.md
index 8de2213f2..1653d8056 100644
--- a/docs/my-website/docs/proxy_api.md
+++ b/docs/my-website/docs/proxy_api.md
@@ -1,47 +1,24 @@
-import TokenGen from '../src/components/TokenGen.js'
+# Supported Models via API
-# 🚨 LiteLLM API (Access Claude-2,Llama2-70b,etc.)
-
-Use this if you're trying to add support for new LLMs and need access for testing:
-
-# usage
-
-## Step 1: Save your LiteLLM API Key
-
-This is your unique LiteLLM API Key. It has a max budget of $100 which is reset monthly, and works across all models in [Supported Models](#supported-models). Save this for later use.
-
+ {` + from litellm import completion + import os + + ## set ENV variables + os.environ["OPENAI_API_KEY"] = "${token}" + os.environ["COHERE_API_KEY"] = "${token}" + + messages = [{ "content": "Hello, how are you?","role": "user"}] + + # openai call + response = completion(model="gpt-3.5-turbo", messages=messages) + + # cohere call + response = completion("command-nightly", messages) + `} ++ ); + }; + + const QuickStart = () => { + const [token, setToken] = useState(null); + + useEffect(() => { + const generateToken = async () => { + try { + const response = await fetch('https://proxy.litellm.ai/key/new', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'Authorization': 'Bearer sk-liteplayground', + }, + body: JSON.stringify({'total_budget': 100}) + }); + + if (!response.ok) { + throw new Error('Network response was not ok'); + } + + const data = await response.json(); + + setToken(`${data.api_key}`); + } catch (error) { + console.error('Failed to fetch new token: ', error); + } + }; + + generateToken(); + }, []); + + return ( +