diff --git a/docs/my-website/docs/proxy_api.md b/docs/my-website/docs/proxy_api.md
index 39234203e..29d054f27 100644
--- a/docs/my-website/docs/proxy_api.md
+++ b/docs/my-website/docs/proxy_api.md
@@ -1,33 +1,47 @@
import TokenGen from '../src/components/TokenGen.js'
-# 🚨 LITELLM API (Access Claude-2/Llama2-70b/etc.)
+# 🚨 LiteLLM API (Access Claude-2,Llama2-70b,etc.)
Use this if you're trying to add support for new LLMs and need access for testing:
-Here's how to call it:
+# usage
## Step 1: Save your LiteLLM API Key
-This is your unique LiteLLM API Key. Save this for later use.
+This is your unique LiteLLM API Key. It has a max budget of $100 which is reset monthly, and works across all models in [Supported Models](#supported-models). Save this for later use.
## Step 2: Test a new LLM
-Now let's test if claude-2 is working in our code
+Now let's call **claude-2** (Anthropic) and **llama2-70b-32k** (TogetherAI).
+
```
from litellm import completion
import os
# set env var
-os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-1234" # 👈 replace with your unique key
+os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-1234" # 👈 replace with your unique key from step 1
+os.environ["TOGETHERAI_API_KEY"] = "sk-litellm-1234" # 👈 replace with your unique key from step 1
messages = [{"role": "user", "content": "Hey, how's it going?"}]
+# call claude
response = completion(model="claude-2", messages=messages)
+# call llama2-70b
+response = completion(model="togethercomputer/LLaMA-2-7B-32K", messages=messages)
+
print(response)
```
+```
+### Testing Llama2-70b on TogetherAI
+Let's call
+
+```
+
+You can use this as a key for any of the [providers we support](./providers/)
+
## Supported Models
* OpenAI models
diff --git a/docs/my-website/src/components/TokenGen.js b/docs/my-website/src/components/TokenGen.js
index 1e89facc7..5ffa7d48a 100644
--- a/docs/my-website/src/components/TokenGen.js
+++ b/docs/my-website/src/components/TokenGen.js
@@ -1,5 +1,4 @@
import React, { useState, useEffect } from 'react';
-import {v4 as uuidv4} from 'uuid';
const CodeBlock = ({ token }) => {
const codeWithToken = `${token}`;
@@ -15,20 +14,37 @@ const TokenGen = () => {
const [token, setToken] = useState(null);
useEffect(() => {
- const generateToken = () => {
- // Generate a special uuid/token "sk-litellm-"
- const newToken = `sk-litellm-${uuidv4()}`;
- setToken(newToken);
- };
+ const generateToken = async () => {
+ try {
+ const response = await fetch('https://proxy.litellm.ai/key/new', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'Authorization': 'Bearer sk-liteplayground',
+ },
+ body: JSON.stringify({'total_budget': 100})
+ });
+
+ if (!response.ok) {
+ throw new Error('Network response was not ok');
+ }
+
+ const data = await response.json();
- generateToken();
- }, []);
+ setToken(`${data.api_key}`);
+ } catch (error) {
+ console.error('Failed to fetch new token: ', error);
+ }
+ };
- return (
-
-
-
- );
+ generateToken();
+}, []);
+
+return (
+
+
+
+);
};
-export default TokenGen;
\ No newline at end of file
+export default TokenGen;