adding proxy api keys to docs

This commit is contained in:
Krrish Dholakia 2023-09-19 17:16:15 -07:00
parent 56a912ac88
commit 281491f6cf
3 changed files with 85 additions and 38 deletions

View file

@ -1,15 +1,19 @@
# Getting Started
import QuickStart from '../src/components/QuickStart.js'
LiteLLM simplifies LLM API calls by mapping them all to the [OpenAI ChatCompletion format](https://platform.openai.com/docs/api-reference/chat).
## basic usage
By default we provide a free $10 key to try all providers supported on LiteLLM.
```python
from litellm import completion
## set ENV variables
os.environ["OPENAI_API_KEY"] = "openai key"
os.environ["COHERE_API_KEY"] = "cohere key"
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
os.environ["COHERE_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your cohere key
messages = [{ "content": "Hello, how are you?","role": "user"}]
@ -19,10 +23,11 @@ response = completion(model="gpt-3.5-turbo", messages=messages)
# cohere call
response = completion("command-nightly", messages)
```
Next Steps 👉 [All supported models](./proxy_api.md)
More details 👉
* [Completion() function details](./completion/)
* [Supported models / providers](./providers/)
* [All supported models / providers on LiteLLM](./providers/)
## streaming
@ -41,6 +46,8 @@ response = completion(model="gpt-3.5-turbo", messages=messages, stream=True)
# cohere call
response = completion("command-nightly", messages, stream=True)
print(response)
```
More details 👉

View file

@ -1,47 +1,24 @@
import TokenGen from '../src/components/TokenGen.js'
# Supported Models via API
# 🚨 LiteLLM API (Access Claude-2,Llama2-70b,etc.)
Use this if you're trying to add support for new LLMs and need access for testing:
# usage
## Step 1: Save your LiteLLM API Key
This is your unique LiteLLM API Key. It has a max budget of $100 which is reset monthly, and works across all models in [Supported Models](#supported-models). Save this for later use.
<TokenGen/>
## Step 2: Test a new LLM
Now let's call **claude-2** (Anthropic) and **llama2-70b-32k** (TogetherAI).
Use this if you're trying to add support for new LLMs and need access for testing. We provide a free $10 key for testing all providers on LiteLLM:
## usage
```python
from litellm import completion
import os
from litellm import completion
# set env var
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-1234" # 👈 replace with your unique key from step 1
os.environ["TOGETHERAI_API_KEY"] = "sk-litellm-1234" # 👈 replace with your unique key from step 1
## set ENV variables
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
os.environ["COHERE_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your cohere key
messages = [{"role": "user", "content": "Hey, how's it going?"}]
messages = [{ "content": "Hello, how are you?","role": "user"}]
# call claude
response = completion(model="claude-2", messages=messages)
# openai call
response = completion(model="gpt-3.5-turbo", messages=messages)
# call llama2-70b
response = completion(model="togethercomputer/LLaMA-2-7B-32K", messages=messages)
print(response)
# cohere call
response = completion("command-nightly", messages)
```
```
### Testing Llama2-70b on TogetherAI
Let's call
```
You can use this as a key for any of the [providers we support](./providers/)
## Supported Models
* OpenAI models

View file

@ -0,0 +1,63 @@
import React, { useState, useEffect } from 'react';
const QuickStartCodeBlock = ({ token }) => {
return (
<pre>
{`
from litellm import completion
import os
## set ENV variables
os.environ["OPENAI_API_KEY"] = "${token}"
os.environ["COHERE_API_KEY"] = "${token}"
messages = [{ "content": "Hello, how are you?","role": "user"}]
# openai call
response = completion(model="gpt-3.5-turbo", messages=messages)
# cohere call
response = completion("command-nightly", messages)
`}
</pre>
);
};
const QuickStart = () => {
const [token, setToken] = useState(null);
useEffect(() => {
const generateToken = async () => {
try {
const response = await fetch('https://proxy.litellm.ai/key/new', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': 'Bearer sk-liteplayground',
},
body: JSON.stringify({'total_budget': 100})
});
if (!response.ok) {
throw new Error('Network response was not ok');
}
const data = await response.json();
setToken(`${data.api_key}`);
} catch (error) {
console.error('Failed to fetch new token: ', error);
}
};
generateToken();
}, []);
return (
<div>
<QuickStartCodeBlock token={token} />
</div>
);
}
export default QuickStart;