update proxy api docs

This commit is contained in:
Krrish Dholakia 2023-09-18 16:28:04 -07:00
parent 5acd1c9d47
commit 6fbd01e715
2 changed files with 49 additions and 19 deletions

View file

@ -1,33 +1,47 @@
import TokenGen from '../src/components/TokenGen.js' import TokenGen from '../src/components/TokenGen.js'
# 🚨 LITELLM API (Access Claude-2/Llama2-70b/etc.) # 🚨 LiteLLM API (Access Claude-2,Llama2-70b,etc.)
Use this if you're trying to add support for new LLMs and need access for testing: Use this if you're trying to add support for new LLMs and need access for testing:
Here's how to call it: # usage
## Step 1: Save your LiteLLM API Key ## Step 1: Save your LiteLLM API Key
This is your unique LiteLLM API Key. Save this for later use. This is your unique LiteLLM API Key. It has a max budget of $100 which is reset monthly, and works across all models in [Supported Models](#supported-models). Save this for later use.
<TokenGen/> <TokenGen/>
## Step 2: Test a new LLM ## Step 2: Test a new LLM
Now let's test if claude-2 is working in our code Now let's call **claude-2** (Anthropic) and **llama2-70b-32k** (TogetherAI).
``` ```
from litellm import completion from litellm import completion
import os import os
# set env var # set env var
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-1234" # 👈 replace with your unique key os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-1234" # 👈 replace with your unique key from step 1
os.environ["TOGETHERAI_API_KEY"] = "sk-litellm-1234" # 👈 replace with your unique key from step 1
messages = [{"role": "user", "content": "Hey, how's it going?"}] messages = [{"role": "user", "content": "Hey, how's it going?"}]
# call claude
response = completion(model="claude-2", messages=messages) response = completion(model="claude-2", messages=messages)
# call llama2-70b
response = completion(model="togethercomputer/LLaMA-2-7B-32K", messages=messages)
print(response) print(response)
``` ```
```
### Testing Llama2-70b on TogetherAI
Let's call
```
You can use this as a key for any of the [providers we support](./providers/)
## Supported Models ## Supported Models
* OpenAI models * OpenAI models

View file

@ -1,5 +1,4 @@
import React, { useState, useEffect } from 'react'; import React, { useState, useEffect } from 'react';
import {v4 as uuidv4} from 'uuid';
const CodeBlock = ({ token }) => { const CodeBlock = ({ token }) => {
const codeWithToken = `${token}`; const codeWithToken = `${token}`;
@ -15,20 +14,37 @@ const TokenGen = () => {
const [token, setToken] = useState(null); const [token, setToken] = useState(null);
useEffect(() => { useEffect(() => {
const generateToken = () => { const generateToken = async () => {
// Generate a special uuid/token "sk-litellm-<uuid>" try {
const newToken = `sk-litellm-${uuidv4()}`; const response = await fetch('https://proxy.litellm.ai/key/new', {
setToken(newToken); method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': 'Bearer sk-liteplayground',
},
body: JSON.stringify({'total_budget': 100})
});
if (!response.ok) {
throw new Error('Network response was not ok');
}
const data = await response.json();
setToken(`${data.api_key}`);
} catch (error) {
console.error('Failed to fetch new token: ', error);
}
}; };
generateToken(); generateToken();
}, []); }, []);
return ( return (
<div> <div>
<CodeBlock token={token} /> <CodeBlock token={token} />
</div> </div>
); );
}; };
export default TokenGen; export default TokenGen;