(feat) - allow using os.environ/ vars for any value on config.yaml (#6276)

* add check for os.environ vars when readin config.yaml

* use base class for reading from config.yaml

* fix import

* fix linting

* add unit tests for base config class

* fix order of reading elements from config.yaml

* unit tests for reading configs from files

* fix user_config_file_path

* use simpler implementation

* use helper to get_config

* working unit tests for reading configs
This commit is contained in:
Ishaan Jaff 2024-10-19 09:00:27 +05:30 committed by GitHub
parent a0d45ba516
commit 19eff1a4b4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 289 additions and 19 deletions

View file

@ -0,0 +1,48 @@
model_list:
################################################################################
# Azure
- model_name: gpt-4o-mini
litellm_params:
model: azure/gpt-4o-mini
api_base: https://amazin-prod.openai.azure.com
api_key: "os.environ/AZURE_GPT_4O"
deployment_id: gpt-4o-mini
- model_name: gpt-4o
litellm_params:
model: azure/gpt-4o
api_base: https://very-cool-prod.openai.azure.com
api_key: "os.environ/AZURE_GPT_4O"
deployment_id: gpt-4o
################################################################################
# Fireworks
- model_name: fireworks-llama-v3p1-405b-instruct
litellm_params:
model: fireworks_ai/accounts/fireworks/models/llama-v3p1-405b-instruct
api_key: "os.environ/FIREWORKS"
- model_name: fireworks-llama-v3p1-70b-instruct
litellm_params:
model: fireworks_ai/accounts/fireworks/models/llama-v3p1-70b-instruct
api_key: "os.environ/FIREWORKS"
general_settings:
alerting_threshold: 300 # sends alerts if requests hang for 5min+ and responses take 5min+
litellm_settings: # module level litellm settings - https://github.com/BerriAI/litellm/blob/main/litellm/__init__.py
success_callback: ["prometheus"]
service_callback: ["prometheus_system"]
drop_params: False # Raise an exception if the openai param being passed in isn't supported.
cache: false
default_internal_user_params:
user_role: os.environ/DEFAULT_USER_ROLE
success_callback: ["s3"]
s3_callback_params:
s3_bucket_name: logs-bucket-litellm # AWS Bucket Name for S3
s3_region_name: us-west-2 # AWS Region Name for S3
s3_aws_access_key_id: os.environ/AWS_ACCESS_KEY_ID # us os.environ/<variable name> to pass environment variables. This is AWS Access Key ID for S3
s3_aws_secret_access_key: os.environ/AWS_SECRET_ACCESS_KEY # AWS Secret Access Key for S3
s3_path: my-test-path # [OPTIONAL] set path in bucket you want to write logs to
s3_endpoint_url: https://s3.amazonaws.com # [OPTIONAL] S3 endpoint URL, if you want to use Backblaze/cloudflare s3 buckets
router_settings:
routing_strategy: simple-shuffle # "simple-shuffle" shown to result in highest throughput. https://docs.litellm.ai/docs/proxy/configs#load-balancing

View file

@ -0,0 +1,117 @@
import os
import sys
import traceback
from unittest import mock
import pytest
from dotenv import load_dotenv
import litellm.proxy
import litellm.proxy.proxy_server
load_dotenv()
import io
import os
# this file is to test litellm/proxy
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import asyncio
import logging
from litellm.proxy.proxy_server import ProxyConfig
@pytest.mark.asyncio
async def test_basic_reading_configs_from_files():
"""
Test that the config is read correctly from the files in the example_config_yaml folder
"""
proxy_config_instance = ProxyConfig()
current_path = os.path.dirname(os.path.abspath(__file__))
example_config_yaml_path = os.path.join(current_path, "example_config_yaml")
# get all the files from example_config_yaml
files = os.listdir(example_config_yaml_path)
print(files)
for file in files:
config_path = os.path.join(example_config_yaml_path, file)
config = await proxy_config_instance.get_config(config_file_path=config_path)
print(config)
@pytest.mark.asyncio
async def test_read_config_from_bad_file_path():
"""
Raise an exception if the file path is not valid
"""
proxy_config_instance = ProxyConfig()
config_path = "non-existent-file.yaml"
with pytest.raises(Exception):
config = await proxy_config_instance.get_config(config_file_path=config_path)
@pytest.mark.asyncio
async def test_read_config_file_with_os_environ_vars():
"""
Ensures os.environ variables are read correctly from config.yaml
Following vars are set as os.environ variables in the config.yaml file
- DEFAULT_USER_ROLE
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AZURE_GPT_4O
- FIREWORKS
"""
_env_vars_for_testing = {
"DEFAULT_USER_ROLE": "admin",
"AWS_ACCESS_KEY_ID": "1234567890",
"AWS_SECRET_ACCESS_KEY": "1234567890",
"AZURE_GPT_4O": "1234567890",
"FIREWORKS": "1234567890",
}
_old_env_vars = {}
for key, value in _env_vars_for_testing.items():
if key in os.environ:
_old_env_vars[key] = os.environ.get(key)
os.environ[key] = value
# Read config
proxy_config_instance = ProxyConfig()
current_path = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(
current_path, "example_config_yaml", "config_with_env_vars.yaml"
)
config = await proxy_config_instance.get_config(config_file_path=config_path)
print(config)
# Add assertions
assert (
config["litellm_settings"]["default_internal_user_params"]["user_role"]
== "admin"
)
assert (
config["litellm_settings"]["s3_callback_params"]["s3_aws_access_key_id"]
== "1234567890"
)
assert (
config["litellm_settings"]["s3_callback_params"]["s3_aws_secret_access_key"]
== "1234567890"
)
for model in config["model_list"]:
if "azure" in model["litellm_params"]["model"]:
assert model["litellm_params"]["api_key"] == "1234567890"
elif "fireworks" in model["litellm_params"]["model"]:
assert model["litellm_params"]["api_key"] == "1234567890"
# cleanup
for key, value in _env_vars_for_testing.items():
if key in _old_env_vars:
os.environ[key] = _old_env_vars[key]
else:
del os.environ[key]