forked from phoenix/litellm-mirror
adding proxy api keys to docs
This commit is contained in:
parent
56a912ac88
commit
281491f6cf
3 changed files with 85 additions and 38 deletions
|
@ -1,15 +1,19 @@
|
||||||
# Getting Started
|
# Getting Started
|
||||||
|
|
||||||
|
import QuickStart from '../src/components/QuickStart.js'
|
||||||
|
|
||||||
LiteLLM simplifies LLM API calls by mapping them all to the [OpenAI ChatCompletion format](https://platform.openai.com/docs/api-reference/chat).
|
LiteLLM simplifies LLM API calls by mapping them all to the [OpenAI ChatCompletion format](https://platform.openai.com/docs/api-reference/chat).
|
||||||
|
|
||||||
## basic usage
|
## basic usage
|
||||||
|
|
||||||
|
By default we provide a free $10 key to try all providers supported on LiteLLM.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
|
|
||||||
## set ENV variables
|
## set ENV variables
|
||||||
os.environ["OPENAI_API_KEY"] = "openai key"
|
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
|
||||||
os.environ["COHERE_API_KEY"] = "cohere key"
|
os.environ["COHERE_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your cohere key
|
||||||
|
|
||||||
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
||||||
|
|
||||||
|
@ -19,10 +23,11 @@ response = completion(model="gpt-3.5-turbo", messages=messages)
|
||||||
# cohere call
|
# cohere call
|
||||||
response = completion("command-nightly", messages)
|
response = completion("command-nightly", messages)
|
||||||
```
|
```
|
||||||
|
Next Steps 👉 [All supported models](./proxy_api.md)
|
||||||
|
|
||||||
More details 👉
|
More details 👉
|
||||||
* [Completion() function details](./completion/)
|
* [Completion() function details](./completion/)
|
||||||
* [Supported models / providers](./providers/)
|
* [All supported models / providers on LiteLLM](./providers/)
|
||||||
|
|
||||||
## streaming
|
## streaming
|
||||||
|
|
||||||
|
@ -41,6 +46,8 @@ response = completion(model="gpt-3.5-turbo", messages=messages, stream=True)
|
||||||
|
|
||||||
# cohere call
|
# cohere call
|
||||||
response = completion("command-nightly", messages, stream=True)
|
response = completion("command-nightly", messages, stream=True)
|
||||||
|
|
||||||
|
print(response)
|
||||||
```
|
```
|
||||||
|
|
||||||
More details 👉
|
More details 👉
|
||||||
|
|
|
@ -1,47 +1,24 @@
|
||||||
import TokenGen from '../src/components/TokenGen.js'
|
# Supported Models via API
|
||||||
|
|
||||||
# 🚨 LiteLLM API (Access Claude-2,Llama2-70b,etc.)
|
Use this if you're trying to add support for new LLMs and need access for testing. We provide a free $10 key for testing all providers on LiteLLM:
|
||||||
|
|
||||||
Use this if you're trying to add support for new LLMs and need access for testing:
|
|
||||||
|
|
||||||
# usage
|
|
||||||
|
|
||||||
## Step 1: Save your LiteLLM API Key
|
|
||||||
|
|
||||||
This is your unique LiteLLM API Key. It has a max budget of $100 which is reset monthly, and works across all models in [Supported Models](#supported-models). Save this for later use.
|
|
||||||
<TokenGen/>
|
|
||||||
|
|
||||||
## Step 2: Test a new LLM
|
|
||||||
|
|
||||||
Now let's call **claude-2** (Anthropic) and **llama2-70b-32k** (TogetherAI).
|
|
||||||
|
|
||||||
|
## usage
|
||||||
```python
|
```python
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
import os
|
|
||||||
|
|
||||||
# set env var
|
## set ENV variables
|
||||||
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-1234" # 👈 replace with your unique key from step 1
|
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
|
||||||
os.environ["TOGETHERAI_API_KEY"] = "sk-litellm-1234" # 👈 replace with your unique key from step 1
|
os.environ["COHERE_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your cohere key
|
||||||
|
|
||||||
messages = [{"role": "user", "content": "Hey, how's it going?"}]
|
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
||||||
|
|
||||||
# call claude
|
# openai call
|
||||||
response = completion(model="claude-2", messages=messages)
|
response = completion(model="gpt-3.5-turbo", messages=messages)
|
||||||
|
|
||||||
# call llama2-70b
|
# cohere call
|
||||||
response = completion(model="togethercomputer/LLaMA-2-7B-32K", messages=messages)
|
response = completion("command-nightly", messages)
|
||||||
|
|
||||||
print(response)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
|
||||||
### Testing Llama2-70b on TogetherAI
|
|
||||||
Let's call
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
You can use this as a key for any of the [providers we support](./providers/)
|
|
||||||
|
|
||||||
## Supported Models
|
## Supported Models
|
||||||
|
|
||||||
* OpenAI models
|
* OpenAI models
|
||||||
|
|
63
docs/my-website/src/components/QuickStart.js
Normal file
63
docs/my-website/src/components/QuickStart.js
Normal file
|
@ -0,0 +1,63 @@
|
||||||
|
import React, { useState, useEffect } from 'react';
|
||||||
|
|
||||||
|
const QuickStartCodeBlock = ({ token }) => {
|
||||||
|
return (
|
||||||
|
<pre>
|
||||||
|
{`
|
||||||
|
from litellm import completion
|
||||||
|
import os
|
||||||
|
|
||||||
|
## set ENV variables
|
||||||
|
os.environ["OPENAI_API_KEY"] = "${token}"
|
||||||
|
os.environ["COHERE_API_KEY"] = "${token}"
|
||||||
|
|
||||||
|
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
||||||
|
|
||||||
|
# openai call
|
||||||
|
response = completion(model="gpt-3.5-turbo", messages=messages)
|
||||||
|
|
||||||
|
# cohere call
|
||||||
|
response = completion("command-nightly", messages)
|
||||||
|
`}
|
||||||
|
</pre>
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
const QuickStart = () => {
|
||||||
|
const [token, setToken] = useState(null);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
const generateToken = async () => {
|
||||||
|
try {
|
||||||
|
const response = await fetch('https://proxy.litellm.ai/key/new', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Authorization': 'Bearer sk-liteplayground',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({'total_budget': 100})
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error('Network response was not ok');
|
||||||
|
}
|
||||||
|
|
||||||
|
const data = await response.json();
|
||||||
|
|
||||||
|
setToken(`${data.api_key}`);
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to fetch new token: ', error);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
generateToken();
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div>
|
||||||
|
<QuickStartCodeBlock token={token} />
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export default QuickStart;
|
Loading…
Add table
Add a link
Reference in a new issue