forked from phoenix/litellm-mirror
update proxy api docs
This commit is contained in:
parent
5acd1c9d47
commit
6fbd01e715
2 changed files with 49 additions and 19 deletions
|
@ -1,33 +1,47 @@
|
|||
import TokenGen from '../src/components/TokenGen.js'
|
||||
|
||||
# 🚨 LITELLM API (Access Claude-2/Llama2-70b/etc.)
|
||||
# 🚨 LiteLLM API (Access Claude-2,Llama2-70b,etc.)
|
||||
|
||||
Use this if you're trying to add support for new LLMs and need access for testing:
|
||||
|
||||
Here's how to call it:
|
||||
# usage
|
||||
|
||||
## Step 1: Save your LiteLLM API Key
|
||||
|
||||
This is your unique LiteLLM API Key. Save this for later use.
|
||||
This is your unique LiteLLM API Key. It has a max budget of $100 which is reset monthly, and works across all models in [Supported Models](#supported-models). Save this for later use.
|
||||
<TokenGen/>
|
||||
|
||||
## Step 2: Test a new LLM
|
||||
|
||||
Now let's test if claude-2 is working in our code
|
||||
Now let's call **claude-2** (Anthropic) and **llama2-70b-32k** (TogetherAI).
|
||||
|
||||
```
|
||||
from litellm import completion
|
||||
import os
|
||||
|
||||
# set env var
|
||||
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-1234" # 👈 replace with your unique key
|
||||
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-1234" # 👈 replace with your unique key from step 1
|
||||
os.environ["TOGETHERAI_API_KEY"] = "sk-litellm-1234" # 👈 replace with your unique key from step 1
|
||||
|
||||
messages = [{"role": "user", "content": "Hey, how's it going?"}]
|
||||
|
||||
# call claude
|
||||
response = completion(model="claude-2", messages=messages)
|
||||
|
||||
# call llama2-70b
|
||||
response = completion(model="togethercomputer/LLaMA-2-7B-32K", messages=messages)
|
||||
|
||||
print(response)
|
||||
```
|
||||
|
||||
```
|
||||
### Testing Llama2-70b on TogetherAI
|
||||
Let's call
|
||||
|
||||
```
|
||||
|
||||
You can use this as a key for any of the [providers we support](./providers/)
|
||||
|
||||
## Supported Models
|
||||
|
||||
* OpenAI models
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
import React, { useState, useEffect } from 'react';
|
||||
import {v4 as uuidv4} from 'uuid';
|
||||
|
||||
const CodeBlock = ({ token }) => {
|
||||
const codeWithToken = `${token}`;
|
||||
|
@ -15,10 +14,27 @@ const TokenGen = () => {
|
|||
const [token, setToken] = useState(null);
|
||||
|
||||
useEffect(() => {
|
||||
const generateToken = () => {
|
||||
// Generate a special uuid/token "sk-litellm-<uuid>"
|
||||
const newToken = `sk-litellm-${uuidv4()}`;
|
||||
setToken(newToken);
|
||||
const generateToken = async () => {
|
||||
try {
|
||||
const response = await fetch('https://proxy.litellm.ai/key/new', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': 'Bearer sk-liteplayground',
|
||||
},
|
||||
body: JSON.stringify({'total_budget': 100})
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error('Network response was not ok');
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
|
||||
setToken(`${data.api_key}`);
|
||||
} catch (error) {
|
||||
console.error('Failed to fetch new token: ', error);
|
||||
}
|
||||
};
|
||||
|
||||
generateToken();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue