mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 18:24:20 +00:00
(docs) routing
This commit is contained in:
parent
4cfaf7ff95
commit
8b8e5890e3
1 changed files with 63 additions and 15 deletions
|
@ -250,37 +250,85 @@ Queue your LLM API requests to ensure you're under your rate limits
|
|||
- Step 1: Create a `/queue/reques` request
|
||||
- Step 2: Poll your request, to check if it's completed
|
||||
|
||||
### Step 1: Queue a `/chat/completion` request
|
||||
|
||||
## Step 1 Add a config to the proxy, generate a temp key
|
||||
```python
|
||||
import requests
|
||||
# args to litellm.completion()
|
||||
data = {
|
||||
'model': 'gpt-3.5-turbo',
|
||||
'messages': [
|
||||
{'role': 'system', 'content': f'You are a helpful assistant. What llm are you?'},
|
||||
],
|
||||
import time
|
||||
config = {
|
||||
|
||||
}
|
||||
response = requests.post("http://0.0.0.0:8000/queue/request", json=data)
|
||||
response = response.json()
|
||||
polling_url = response["url"]
|
||||
|
||||
response = requests.post(
|
||||
url = "http://0.0.0.0:8000/key/generate",
|
||||
json={
|
||||
"config": config,
|
||||
"duration": "30d" # default to 30d, set it to 30m if you want a temp key
|
||||
},
|
||||
headers={
|
||||
"Authorization": "Bearer sk-hosted-litellm"
|
||||
}
|
||||
)
|
||||
|
||||
print("\nresponse from generating key", response.json())
|
||||
|
||||
generated_key = response.json()["key"]
|
||||
print("\ngenerated key for proxy", generated_key)
|
||||
```
|
||||
|
||||
### Step 2: Poll your `/chat/completion` request
|
||||
# Step 2: Queue a request to the proxy, using your generated_key
|
||||
```python
|
||||
while True:
|
||||
job_response = requests.post(
|
||||
url = "http://0.0.0.0:8000/queue/request",
|
||||
json={
|
||||
'model': 'gpt-3.5-turbo',
|
||||
'messages': [
|
||||
{'role': 'system', 'content': f'You are a helpful assistant. What is your name'},
|
||||
],
|
||||
},
|
||||
headers={
|
||||
"Authorization": f"Bearer {generated_key}"
|
||||
}
|
||||
)
|
||||
|
||||
job_response = job_response.json()
|
||||
job_id = job_response["id"]
|
||||
polling_url = job_response["url"]
|
||||
polling_url = f"http://0.0.0.0:8000{polling_url}"
|
||||
print("\nCreated Job, Polling Url", polling_url)
|
||||
```
|
||||
|
||||
# Step 3: Poll the request
|
||||
```python
|
||||
while True:
|
||||
try:
|
||||
polling_url = f"http://0.0.0.0:8000{polling_url}"
|
||||
polling_response = requests.get(polling_url)
|
||||
print("\nPolling URL", polling_url)
|
||||
polling_response = requests.get(
|
||||
url=polling_url,
|
||||
headers={
|
||||
"Authorization": f"Bearer {generated_key}"
|
||||
}
|
||||
)
|
||||
polling_response = polling_response.json()
|
||||
print("\n RESPONSE FROM POLLING JOB", polling_response)
|
||||
print("\nResponse from polling url", polling_response)
|
||||
status = polling_response["status"]
|
||||
if status == "finished":
|
||||
llm_response = polling_response["result"]
|
||||
print("LLM Response")
|
||||
print(llm_response)
|
||||
break
|
||||
time.sleep(0.5)
|
||||
except Exception as e:
|
||||
print("got exception in polling", e)
|
||||
break
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- ## litellm.completion()
|
||||
|
||||
If you're calling litellm.completion(), here's the different reliability options you can enable.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue