forked from phoenix/litellm-mirror
docs(assistants.md): add assistants api to docs
This commit is contained in:
parent
d83b4a00d3
commit
a0fb301b18
5 changed files with 305 additions and 3 deletions
183
docs/my-website/docs/assistants.md
Normal file
183
docs/my-website/docs/assistants.md
Normal file
|
@ -0,0 +1,183 @@
|
||||||
|
import Tabs from '@theme/Tabs';
|
||||||
|
import TabItem from '@theme/TabItem';
|
||||||
|
|
||||||
|
# Assistants API
|
||||||
|
|
||||||
|
Covers Threads, Messages, Assistants.
|
||||||
|
|
||||||
|
LiteLLM currently covers:
|
||||||
|
- Get Assistants
|
||||||
|
- Create Thread
|
||||||
|
- Get Thread
|
||||||
|
- Add Messages
|
||||||
|
- Get Messages
|
||||||
|
- Run Thread
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
Call an existing Assistant.
|
||||||
|
|
||||||
|
- Get the Assistant
|
||||||
|
|
||||||
|
- Create a Thread when a user starts a conversation.
|
||||||
|
|
||||||
|
- Add Messages to the Thread as the user asks questions.
|
||||||
|
|
||||||
|
- Run the Assistant on the Thread to generate a response by calling the model and the tools.
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
<TabItem value="sdk" label="SDK">
|
||||||
|
|
||||||
|
**Get the Assistant**
|
||||||
|
|
||||||
|
```python
|
||||||
|
from litellm import get_assistants, aget_assistants
|
||||||
|
import os
|
||||||
|
|
||||||
|
os.environ["OPENAI_API_KEY"] = "sk-.."
|
||||||
|
|
||||||
|
assistants = get_assistants(custom_llm_provider="openai")
|
||||||
|
|
||||||
|
### ASYNC USAGE ###
|
||||||
|
# assistants = await aget_assistants(custom_llm_provider="openai")
|
||||||
|
```
|
||||||
|
|
||||||
|
**Create a Thread**
|
||||||
|
|
||||||
|
```python
|
||||||
|
from litellm import create_thread, acreate_thread
|
||||||
|
import os
|
||||||
|
|
||||||
|
os.environ["OPENAI_API_KEY"] = "sk-.."
|
||||||
|
|
||||||
|
new_thread = create_thread(
|
||||||
|
custom_llm_provider="openai",
|
||||||
|
messages=[{"role": "user", "content": "Hey, how's it going?"}], # type: ignore
|
||||||
|
)
|
||||||
|
|
||||||
|
### ASYNC USAGE ###
|
||||||
|
# new_thread = await acreate_thread(custom_llm_provider="openai",messages=[{"role": "user", "content": "Hey, how's it going?"}])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Add Messages to the Thread**
|
||||||
|
|
||||||
|
```python
|
||||||
|
from litellm import create_thread, get_thread, aget_thread, add_message, a_add_message
|
||||||
|
import os
|
||||||
|
|
||||||
|
os.environ["OPENAI_API_KEY"] = "sk-.."
|
||||||
|
|
||||||
|
## CREATE A THREAD
|
||||||
|
_new_thread = create_thread(
|
||||||
|
custom_llm_provider="openai",
|
||||||
|
messages=[{"role": "user", "content": "Hey, how's it going?"}], # type: ignore
|
||||||
|
)
|
||||||
|
|
||||||
|
## OR retrieve existing thread
|
||||||
|
received_thread = get_thread(
|
||||||
|
custom_llm_provider="openai",
|
||||||
|
thread_id=_new_thread.id,
|
||||||
|
)
|
||||||
|
|
||||||
|
### ASYNC USAGE ###
|
||||||
|
# received_thread = await aget_thread(custom_llm_provider="openai", thread_id=_new_thread.id,)
|
||||||
|
|
||||||
|
## ADD MESSAGE TO THREAD
|
||||||
|
message = {"role": "user", "content": "Hey, how's it going?"}
|
||||||
|
added_message = add_message(
|
||||||
|
thread_id=_new_thread.id, custom_llm_provider="openai", **message
|
||||||
|
)
|
||||||
|
|
||||||
|
### ASYNC USAGE ###
|
||||||
|
# added_message = await a_add_message(thread_id=_new_thread.id, custom_llm_provider="openai", **message)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Run the Assistant on the Thread**
|
||||||
|
|
||||||
|
```python
|
||||||
|
from litellm import get_assistants, create_thread, add_message, run_thread, arun_thread
|
||||||
|
import os
|
||||||
|
|
||||||
|
os.environ["OPENAI_API_KEY"] = "sk-.."
|
||||||
|
assistants = get_assistants(custom_llm_provider="openai")
|
||||||
|
|
||||||
|
## get the first assistant ###
|
||||||
|
assistant_id = assistants.data[0].id
|
||||||
|
|
||||||
|
## GET A THREAD
|
||||||
|
_new_thread = create_thread(
|
||||||
|
custom_llm_provider="openai",
|
||||||
|
messages=[{"role": "user", "content": "Hey, how's it going?"}], # type: ignore
|
||||||
|
)
|
||||||
|
|
||||||
|
## ADD MESSAGE
|
||||||
|
message = {"role": "user", "content": "Hey, how's it going?"}
|
||||||
|
added_message = add_message(
|
||||||
|
thread_id=_new_thread.id, custom_llm_provider="openai", **message
|
||||||
|
)
|
||||||
|
|
||||||
|
## 🚨 RUN THREAD
|
||||||
|
response = run_thread(
|
||||||
|
custom_llm_provider="openai", thread_id=thread_id, assistant_id=assistant_id
|
||||||
|
)
|
||||||
|
|
||||||
|
### ASYNC USAGE ###
|
||||||
|
# response = await arun_thread(custom_llm_provider="openai", thread_id=thread_id, assistant_id=assistant_id)
|
||||||
|
|
||||||
|
print(f"run_thread: {run_thread}")
|
||||||
|
```
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="proxy" label="PROXY">
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ export OPENAI_API_KEY="sk-..."
|
||||||
|
|
||||||
|
$ litellm
|
||||||
|
|
||||||
|
# RUNNING on http://0.0.0.0:4000
|
||||||
|
```
|
||||||
|
|
||||||
|
**Get the Assistant**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl "http://0.0.0.0:4000/v1/assistants?order=desc&limit=20" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "Authorization: Bearer sk-1234" \
|
||||||
|
```
|
||||||
|
|
||||||
|
**Create a Thread**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl http://0.0.0.0:4000/v1/threads \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "Authorization: Bearer sk-1234" \
|
||||||
|
-d ''
|
||||||
|
```
|
||||||
|
|
||||||
|
**Add Messages to the Thread**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl http://0.0.0.0:4000/v1/threads/{thread_id}/messages \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "Authorization: Bearer sk-1234" \
|
||||||
|
-d '{
|
||||||
|
"role": "user",
|
||||||
|
"content": "How does AI work? Explain it in simple terms."
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
**Run the Assistant on the Thread**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl http://0.0.0.0:4000/v1/threads/thread_abc123/runs \
|
||||||
|
-H "Authorization: Bearer sk-1234" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{
|
||||||
|
"assistant_id": "asst_abc123"
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
## [👉 Proxy API Reference](https://litellm-api.up.railway.app/#/assistants)
|
87
docs/my-website/docs/text_to_speech.md
Normal file
87
docs/my-website/docs/text_to_speech.md
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
# Text to Speech
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
```python
|
||||||
|
from pathlib import Path
|
||||||
|
from litellm import speech
|
||||||
|
import os
|
||||||
|
|
||||||
|
os.environ["OPENAI_API_KEY"] = "sk-.."
|
||||||
|
|
||||||
|
speech_file_path = Path(__file__).parent / "speech.mp3"
|
||||||
|
response = speech(
|
||||||
|
model="openai/tts-1",
|
||||||
|
voice="alloy",
|
||||||
|
input="the quick brown fox jumped over the lazy dogs",
|
||||||
|
api_base=None,
|
||||||
|
api_key=None,
|
||||||
|
organization=None,
|
||||||
|
project=None,
|
||||||
|
max_retries=1,
|
||||||
|
timeout=600,
|
||||||
|
client=None,
|
||||||
|
optional_params={},
|
||||||
|
)
|
||||||
|
response.stream_to_file(speech_file_path)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Async Usage
|
||||||
|
|
||||||
|
```python
|
||||||
|
from litellm import aspeech
|
||||||
|
from pathlib import Path
|
||||||
|
import os, asyncio
|
||||||
|
|
||||||
|
os.environ["OPENAI_API_KEY"] = "sk-.."
|
||||||
|
|
||||||
|
async def test_async_speech():
|
||||||
|
speech_file_path = Path(__file__).parent / "speech.mp3"
|
||||||
|
response = await litellm.aspeech(
|
||||||
|
model="openai/tts-1",
|
||||||
|
voice="alloy",
|
||||||
|
input="the quick brown fox jumped over the lazy dogs",
|
||||||
|
api_base=None,
|
||||||
|
api_key=None,
|
||||||
|
organization=None,
|
||||||
|
project=None,
|
||||||
|
max_retries=1,
|
||||||
|
timeout=600,
|
||||||
|
client=None,
|
||||||
|
optional_params={},
|
||||||
|
)
|
||||||
|
response.stream_to_file(speech_file_path)
|
||||||
|
|
||||||
|
asyncio.run(test_async_speech())
|
||||||
|
```
|
||||||
|
|
||||||
|
## Proxy Usage
|
||||||
|
|
||||||
|
LiteLLM provides an openai-compatible `/audio/speech` endpoint for Text-to-speech calls.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl http://0.0.0.0:4000/v1/audio/speech \
|
||||||
|
-H "Authorization: Bearer sk-1234" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{
|
||||||
|
"model": "tts-1",
|
||||||
|
"input": "The quick brown fox jumped over the lazy dog.",
|
||||||
|
"voice": "alloy"
|
||||||
|
}' \
|
||||||
|
--output speech.mp3
|
||||||
|
```
|
||||||
|
|
||||||
|
**Setup**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
- model_name: tts
|
||||||
|
litellm_params:
|
||||||
|
model: openai/tts-1
|
||||||
|
api_key: os.environ/OPENAI_API_KEY
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
litellm --config /path/to/config.yaml
|
||||||
|
|
||||||
|
# RUNNING on http://0.0.0.0:4000
|
||||||
|
```
|
|
@ -100,13 +100,15 @@ const sidebars = {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
type: "category",
|
type: "category",
|
||||||
label: "Embedding(), Moderation(), Image Generation(), Audio Transcriptions()",
|
label: "Embedding(), Image Generation(), Assistants(), Moderation(), Audio Transcriptions(), TTS()",
|
||||||
items: [
|
items: [
|
||||||
"embedding/supported_embedding",
|
"embedding/supported_embedding",
|
||||||
"embedding/async_embedding",
|
"embedding/async_embedding",
|
||||||
"embedding/moderation",
|
"embedding/moderation",
|
||||||
"image_generation",
|
"image_generation",
|
||||||
"audio_transcription"
|
"audio_transcription",
|
||||||
|
"text_to_speech",
|
||||||
|
"assistants"
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -4120,7 +4120,7 @@ def transcription(
|
||||||
or litellm.api_key
|
or litellm.api_key
|
||||||
or litellm.azure_key
|
or litellm.azure_key
|
||||||
or get_secret("AZURE_API_KEY")
|
or get_secret("AZURE_API_KEY")
|
||||||
)
|
) # type: ignore
|
||||||
|
|
||||||
response = azure_chat_completions.audio_transcriptions(
|
response = azure_chat_completions.audio_transcriptions(
|
||||||
model=model,
|
model=model,
|
||||||
|
|
|
@ -5337,6 +5337,11 @@ async def get_assistants(
|
||||||
fastapi_response: Response,
|
fastapi_response: Response,
|
||||||
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
||||||
):
|
):
|
||||||
|
"""
|
||||||
|
Returns a list of assistants.
|
||||||
|
|
||||||
|
API Reference docs - https://platform.openai.com/docs/api-reference/assistants/listAssistants
|
||||||
|
"""
|
||||||
global proxy_logging_obj
|
global proxy_logging_obj
|
||||||
data: Dict = {}
|
data: Dict = {}
|
||||||
try:
|
try:
|
||||||
|
@ -5463,6 +5468,11 @@ async def create_threads(
|
||||||
fastapi_response: Response,
|
fastapi_response: Response,
|
||||||
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
||||||
):
|
):
|
||||||
|
"""
|
||||||
|
Create a thread.
|
||||||
|
|
||||||
|
API Reference - https://platform.openai.com/docs/api-reference/threads/createThread
|
||||||
|
"""
|
||||||
global proxy_logging_obj
|
global proxy_logging_obj
|
||||||
data: Dict = {}
|
data: Dict = {}
|
||||||
try:
|
try:
|
||||||
|
@ -5590,6 +5600,11 @@ async def get_thread(
|
||||||
fastapi_response: Response,
|
fastapi_response: Response,
|
||||||
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
||||||
):
|
):
|
||||||
|
"""
|
||||||
|
Retrieves a thread.
|
||||||
|
|
||||||
|
API Reference - https://platform.openai.com/docs/api-reference/threads/getThread
|
||||||
|
"""
|
||||||
global proxy_logging_obj
|
global proxy_logging_obj
|
||||||
data: Dict = {}
|
data: Dict = {}
|
||||||
try:
|
try:
|
||||||
|
@ -5714,6 +5729,11 @@ async def add_messages(
|
||||||
fastapi_response: Response,
|
fastapi_response: Response,
|
||||||
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
||||||
):
|
):
|
||||||
|
"""
|
||||||
|
Create a message.
|
||||||
|
|
||||||
|
API Reference - https://platform.openai.com/docs/api-reference/messages/createMessage
|
||||||
|
"""
|
||||||
global proxy_logging_obj
|
global proxy_logging_obj
|
||||||
data: Dict = {}
|
data: Dict = {}
|
||||||
try:
|
try:
|
||||||
|
@ -5841,6 +5861,11 @@ async def get_messages(
|
||||||
fastapi_response: Response,
|
fastapi_response: Response,
|
||||||
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
||||||
):
|
):
|
||||||
|
"""
|
||||||
|
Returns a list of messages for a given thread.
|
||||||
|
|
||||||
|
API Reference - https://platform.openai.com/docs/api-reference/messages/listMessages
|
||||||
|
"""
|
||||||
global proxy_logging_obj
|
global proxy_logging_obj
|
||||||
data: Dict = {}
|
data: Dict = {}
|
||||||
try:
|
try:
|
||||||
|
@ -5964,6 +5989,11 @@ async def run_thread(
|
||||||
fastapi_response: Response,
|
fastapi_response: Response,
|
||||||
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
||||||
):
|
):
|
||||||
|
"""
|
||||||
|
Create a run.
|
||||||
|
|
||||||
|
API Reference: https://platform.openai.com/docs/api-reference/runs/createRun
|
||||||
|
"""
|
||||||
global proxy_logging_obj
|
global proxy_logging_obj
|
||||||
data: Dict = {}
|
data: Dict = {}
|
||||||
try:
|
try:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue