forked from phoenix/litellm-mirror
feat - migration script to hosted proxy
This commit is contained in:
parent
bc50b0a4a1
commit
a36c2f8e92
2 changed files with 138 additions and 0 deletions
73
cookbook/misc/config.yaml
Normal file
73
cookbook/misc/config.yaml
Normal file
|
@ -0,0 +1,73 @@
|
|||
model_list:
|
||||
- model_name: gpt-3.5-turbo
|
||||
litellm_params:
|
||||
model: azure/chatgpt-v-2
|
||||
api_base: https://openai-gpt-4-test-v-1.openai.azure.com/
|
||||
api_version: "2023-05-15"
|
||||
api_key: os.environ/AZURE_API_KEY # The `os.environ/` prefix tells litellm to read this from the env. See https://docs.litellm.ai/docs/simple_proxy#load-api-keys-from-vault
|
||||
- model_name: gpt-3.5-turbo-large
|
||||
litellm_params:
|
||||
model: "gpt-3.5-turbo-1106"
|
||||
api_key: os.environ/OPENAI_API_KEY
|
||||
rpm: 480
|
||||
timeout: 300
|
||||
stream_timeout: 60
|
||||
- model_name: gpt-4
|
||||
litellm_params:
|
||||
model: azure/chatgpt-v-2
|
||||
api_base: https://openai-gpt-4-test-v-1.openai.azure.com/
|
||||
api_version: "2023-05-15"
|
||||
api_key: os.environ/AZURE_API_KEY # The `os.environ/` prefix tells litellm to read this from the env. See https://docs.litellm.ai/docs/simple_proxy#load-api-keys-from-vault
|
||||
rpm: 480
|
||||
timeout: 300
|
||||
stream_timeout: 60
|
||||
- model_name: sagemaker-completion-model
|
||||
litellm_params:
|
||||
model: sagemaker/berri-benchmarking-Llama-2-70b-chat-hf-4
|
||||
input_cost_per_second: 0.000420
|
||||
- model_name: text-embedding-ada-002
|
||||
litellm_params:
|
||||
model: azure/azure-embedding-model
|
||||
api_key: os.environ/AZURE_API_KEY
|
||||
api_base: https://openai-gpt-4-test-v-1.openai.azure.com/
|
||||
api_version: "2023-05-15"
|
||||
model_info:
|
||||
mode: embedding
|
||||
base_model: text-embedding-ada-002
|
||||
- model_name: dall-e-2
|
||||
litellm_params:
|
||||
model: azure/
|
||||
api_version: 2023-06-01-preview
|
||||
api_base: https://openai-gpt-4-test-v-1.openai.azure.com/
|
||||
api_key: os.environ/AZURE_API_KEY
|
||||
- model_name: openai-dall-e-3
|
||||
litellm_params:
|
||||
model: dall-e-3
|
||||
- model_name: fake-openai-endpoint
|
||||
litellm_params:
|
||||
model: openai/fake
|
||||
api_key: fake-key
|
||||
api_base: https://exampleopenaiendpoint-production.up.railway.app/
|
||||
|
||||
litellm_settings:
|
||||
drop_params: True
|
||||
# max_budget: 100
|
||||
# budget_duration: 30d
|
||||
num_retries: 5
|
||||
request_timeout: 600
|
||||
telemetry: False
|
||||
context_window_fallbacks: [{"gpt-3.5-turbo": ["gpt-3.5-turbo-large"]}]
|
||||
|
||||
general_settings:
|
||||
master_key: sk-1234 # [OPTIONAL] Use to enforce auth on proxy. See - https://docs.litellm.ai/docs/proxy/virtual_keys
|
||||
store_model_in_db: True
|
||||
proxy_budget_rescheduler_min_time: 60
|
||||
proxy_budget_rescheduler_max_time: 64
|
||||
proxy_batch_write_at: 1
|
||||
# database_url: "postgresql://<user>:<password>@<host>:<port>/<dbname>" # [OPTIONAL] use for token-based auth to proxy
|
||||
|
||||
# environment_variables:
|
||||
# settings for using redis caching
|
||||
# REDIS_HOST: redis-16337.c322.us-east-1-2.ec2.cloud.redislabs.com
|
||||
# REDIS_PORT: "16337"
|
||||
# REDIS_PASSWORD:
|
65
cookbook/misc/migrate_proxy_config.py
Normal file
65
cookbook/misc/migrate_proxy_config.py
Normal file
|
@ -0,0 +1,65 @@
|
|||
"""
|
||||
LiteLLM Migration Script!
|
||||
|
||||
Takes a config.yaml and calls /model/new
|
||||
|
||||
Inputs:
|
||||
- File path to config.yaml
|
||||
- Proxy base url to your hosted proxy
|
||||
|
||||
Step 1: Reads your config.yaml
|
||||
Step 2: reads `model_list` and loops through all models
|
||||
Step 3: calls `<proxy-base-url>/model/new` for each model
|
||||
"""
|
||||
|
||||
import yaml
|
||||
import requests
|
||||
|
||||
|
||||
def migrate_models(config_file, proxy_base_url):
|
||||
# Step 1: Read the config.yaml file
|
||||
with open(config_file, "r") as f:
|
||||
config = yaml.safe_load(f)
|
||||
|
||||
# Step 2: Read the model_list and loop through all models
|
||||
model_list = config.get("model_list", [])
|
||||
print("model_list: ", model_list)
|
||||
for model in model_list:
|
||||
|
||||
model_name = model.get("model_name")
|
||||
print("\nAdding model: ", model_name)
|
||||
litellm_params = model.get("litellm_params", {})
|
||||
|
||||
for param, value in litellm_params.items():
|
||||
if isinstance(value, str) and value.startswith("os.environ/"):
|
||||
new_value = input(f"Enter value for {value}: ")
|
||||
litellm_params[param] = new_value
|
||||
|
||||
print("\nlitellm_params: ", litellm_params)
|
||||
|
||||
# Step 3: Call <proxy-base-url>/model/new for each model
|
||||
url = f"{proxy_base_url}/model/new"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {master_key}",
|
||||
}
|
||||
data = {"model_name": model_name, "litellm_params": litellm_params}
|
||||
|
||||
response = requests.post(url, headers=headers, json=data)
|
||||
if response.status_code != 200:
|
||||
print(f"Error: {response.status_code} - {response.text}")
|
||||
raise Exception(f"Error: {response.status_code} - {response.text}")
|
||||
|
||||
# Print the response for each model
|
||||
print(
|
||||
f"Response for model '{model_name}': Status Code:{response.status_code} - {response.text}"
|
||||
)
|
||||
|
||||
|
||||
# Usage
|
||||
config_file = "config.yaml"
|
||||
proxy_base_url = "http://0.0.0.0:4000"
|
||||
master_key = "sk-1234"
|
||||
print(f"config_file: {config_file}")
|
||||
print(f"proxy_base_url: {proxy_base_url}")
|
||||
migrate_models(config_file, proxy_base_url)
|
Loading…
Add table
Add a link
Reference in a new issue