(test) test q

This commit is contained in:
ishaan-jaff 2023-11-21 19:45:46 -08:00
parent 363d74cf7d
commit 580e6dc166

View file

@ -1,14 +1,25 @@
import requests import requests
import time import time
import os
from dotenv import load_dotenv
load_dotenv()
# Set the base URL as needed # Set the base URL as needed
# base_url = "https://litellm-api.onrender.com" base_url = "https://api.litellm.ai"
# Uncomment the line below if you want to switch to the local server # Uncomment the line below if you want to switch to the local server
base_url = "http://0.0.0.0:8000" # base_url = "http://0.0.0.0:8000"
# Step 1 Add a config to the proxy, generate a temp key # Step 1 Add a config to the proxy, generate a temp key
config = { config = {
"model_list": [ "model_list": [
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "gpt-3.5-turbo",
"api_key": os.environ['OPENAI_API_KEY'],
}
}
] ]
} }
@ -19,7 +30,7 @@ response = requests.post(
"duration": "30d" # default to 30d, set it to 30m if you want a temp key "duration": "30d" # default to 30d, set it to 30m if you want a temp key
}, },
headers={ headers={
"Authorization": "Bearer sk-1234" "Authorization": "Bearer sk-hosted-litellm"
} }
) )