forked from phoenix/litellm-mirror
Revert "Revert "(feat) Allow using include to include external YAML files in a config.yaml (#6922)""
This reverts commit 5d13302e6b
.
This commit is contained in:
parent
a8b8deb793
commit
eba700a491
13 changed files with 225 additions and 23 deletions
59
docs/my-website/docs/proxy/config_management.md
Normal file
59
docs/my-website/docs/proxy/config_management.md
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
# File Management
|
||||||
|
|
||||||
|
## `include` external YAML files in a config.yaml
|
||||||
|
|
||||||
|
You can use `include` to include external YAML files in a config.yaml.
|
||||||
|
|
||||||
|
**Quick Start Usage:**
|
||||||
|
|
||||||
|
To include a config file, use `include` with either a single file or a list of files.
|
||||||
|
|
||||||
|
Contents of `parent_config.yaml`:
|
||||||
|
```yaml
|
||||||
|
include:
|
||||||
|
- model_config.yaml # 👈 Key change, will include the contents of model_config.yaml
|
||||||
|
|
||||||
|
litellm_settings:
|
||||||
|
callbacks: ["prometheus"]
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
Contents of `model_config.yaml`:
|
||||||
|
```yaml
|
||||||
|
model_list:
|
||||||
|
- model_name: gpt-4o
|
||||||
|
litellm_params:
|
||||||
|
model: openai/gpt-4o
|
||||||
|
api_base: https://exampleopenaiendpoint-production.up.railway.app/
|
||||||
|
- model_name: fake-anthropic-endpoint
|
||||||
|
litellm_params:
|
||||||
|
model: anthropic/fake
|
||||||
|
api_base: https://exampleanthropicendpoint-production.up.railway.app/
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
Start proxy server
|
||||||
|
|
||||||
|
This will start the proxy server with config `parent_config.yaml`. Since the `include` directive is used, the server will also include the contents of `model_config.yaml`.
|
||||||
|
```
|
||||||
|
litellm --config parent_config.yaml --detailed_debug
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Examples using `include`
|
||||||
|
|
||||||
|
Include a single file:
|
||||||
|
```yaml
|
||||||
|
include:
|
||||||
|
- model_config.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
Include multiple files:
|
||||||
|
```yaml
|
||||||
|
include:
|
||||||
|
- model_config.yaml
|
||||||
|
- another_config.yaml
|
||||||
|
```
|
|
@ -2,7 +2,7 @@ import Image from '@theme/IdealImage';
|
||||||
import Tabs from '@theme/Tabs';
|
import Tabs from '@theme/Tabs';
|
||||||
import TabItem from '@theme/TabItem';
|
import TabItem from '@theme/TabItem';
|
||||||
|
|
||||||
# Proxy Config.yaml
|
# Overview
|
||||||
Set model list, `api_base`, `api_key`, `temperature` & proxy server settings (`master-key`) on the config.yaml.
|
Set model list, `api_base`, `api_key`, `temperature` & proxy server settings (`master-key`) on the config.yaml.
|
||||||
|
|
||||||
| Param Name | Description |
|
| Param Name | Description |
|
||||||
|
|
|
@ -32,7 +32,7 @@ const sidebars = {
|
||||||
{
|
{
|
||||||
"type": "category",
|
"type": "category",
|
||||||
"label": "Config.yaml",
|
"label": "Config.yaml",
|
||||||
"items": ["proxy/configs", "proxy/config_settings"]
|
"items": ["proxy/configs", "proxy/config_management", "proxy/config_settings"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
type: "category",
|
type: "category",
|
||||||
|
|
10
litellm/proxy/model_config.yaml
Normal file
10
litellm/proxy/model_config.yaml
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
model_list:
|
||||||
|
- model_name: gpt-4o
|
||||||
|
litellm_params:
|
||||||
|
model: openai/gpt-4o
|
||||||
|
api_base: https://exampleopenaiendpoint-production.up.railway.app/
|
||||||
|
- model_name: fake-anthropic-endpoint
|
||||||
|
litellm_params:
|
||||||
|
model: anthropic/fake
|
||||||
|
api_base: https://exampleanthropicendpoint-production.up.railway.app/
|
||||||
|
|
|
@ -1,24 +1,5 @@
|
||||||
model_list:
|
include:
|
||||||
- model_name: gpt-4o
|
- model_config.yaml
|
||||||
litellm_params:
|
|
||||||
model: openai/gpt-4o
|
|
||||||
api_base: https://exampleopenaiendpoint-production.up.railway.app/
|
|
||||||
- model_name: fake-anthropic-endpoint
|
|
||||||
litellm_params:
|
|
||||||
model: anthropic/fake
|
|
||||||
api_base: https://exampleanthropicendpoint-production.up.railway.app/
|
|
||||||
|
|
||||||
router_settings:
|
|
||||||
provider_budget_config:
|
|
||||||
openai:
|
|
||||||
budget_limit: 0.3 # float of $ value budget for time period
|
|
||||||
time_period: 1d # can be 1d, 2d, 30d
|
|
||||||
anthropic:
|
|
||||||
budget_limit: 5
|
|
||||||
time_period: 1d
|
|
||||||
redis_host: os.environ/REDIS_HOST
|
|
||||||
redis_port: os.environ/REDIS_PORT
|
|
||||||
redis_password: os.environ/REDIS_PASSWORD
|
|
||||||
|
|
||||||
litellm_settings:
|
litellm_settings:
|
||||||
callbacks: ["datadog"]
|
callbacks: ["datadog"]
|
||||||
|
|
|
@ -1380,6 +1380,16 @@ class ProxyConfig:
|
||||||
_, file_extension = os.path.splitext(config_file_path)
|
_, file_extension = os.path.splitext(config_file_path)
|
||||||
return file_extension.lower() == ".yaml" or file_extension.lower() == ".yml"
|
return file_extension.lower() == ".yaml" or file_extension.lower() == ".yml"
|
||||||
|
|
||||||
|
def _load_yaml_file(self, file_path: str) -> dict:
|
||||||
|
"""
|
||||||
|
Load and parse a YAML file
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
with open(file_path, "r") as file:
|
||||||
|
return yaml.safe_load(file) or {}
|
||||||
|
except Exception as e:
|
||||||
|
raise Exception(f"Error loading yaml file {file_path}: {str(e)}")
|
||||||
|
|
||||||
async def _get_config_from_file(
|
async def _get_config_from_file(
|
||||||
self, config_file_path: Optional[str] = None
|
self, config_file_path: Optional[str] = None
|
||||||
) -> dict:
|
) -> dict:
|
||||||
|
@ -1410,6 +1420,51 @@ class ProxyConfig:
|
||||||
"litellm_settings": {},
|
"litellm_settings": {},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Process includes
|
||||||
|
config = self._process_includes(
|
||||||
|
config=config, base_dir=os.path.dirname(os.path.abspath(file_path or ""))
|
||||||
|
)
|
||||||
|
|
||||||
|
verbose_proxy_logger.debug(f"loaded config={json.dumps(config, indent=4)}")
|
||||||
|
return config
|
||||||
|
|
||||||
|
def _process_includes(self, config: dict, base_dir: str) -> dict:
|
||||||
|
"""
|
||||||
|
Process includes by appending their contents to the main config
|
||||||
|
|
||||||
|
Handles nested config.yamls with `include` section
|
||||||
|
|
||||||
|
Example config: This will get the contents from files in `include` and append it
|
||||||
|
```yaml
|
||||||
|
include:
|
||||||
|
- model_config.yaml
|
||||||
|
|
||||||
|
litellm_settings:
|
||||||
|
callbacks: ["prometheus"]
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
if "include" not in config:
|
||||||
|
return config
|
||||||
|
|
||||||
|
if not isinstance(config["include"], list):
|
||||||
|
raise ValueError("'include' must be a list of file paths")
|
||||||
|
|
||||||
|
# Load and append all included files
|
||||||
|
for include_file in config["include"]:
|
||||||
|
file_path = os.path.join(base_dir, include_file)
|
||||||
|
if not os.path.exists(file_path):
|
||||||
|
raise FileNotFoundError(f"Included file not found: {file_path}")
|
||||||
|
|
||||||
|
included_config = self._load_yaml_file(file_path)
|
||||||
|
# Simply update/extend the main config with included config
|
||||||
|
for key, value in included_config.items():
|
||||||
|
if isinstance(value, list) and key in config:
|
||||||
|
config[key].extend(value)
|
||||||
|
else:
|
||||||
|
config[key] = value
|
||||||
|
|
||||||
|
# Remove the include directive
|
||||||
|
del config["include"]
|
||||||
return config
|
return config
|
||||||
|
|
||||||
async def save_config(self, new_config: dict):
|
async def save_config(self, new_config: dict):
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
include:
|
||||||
|
- included_models.yaml
|
||||||
|
|
||||||
|
litellm_settings:
|
||||||
|
callbacks: ["prometheus"]
|
|
@ -0,0 +1,5 @@
|
||||||
|
include:
|
||||||
|
- non-existent-file.yaml
|
||||||
|
|
||||||
|
litellm_settings:
|
||||||
|
callbacks: ["prometheus"]
|
|
@ -0,0 +1,6 @@
|
||||||
|
include:
|
||||||
|
- models_file_1.yaml
|
||||||
|
- models_file_2.yaml
|
||||||
|
|
||||||
|
litellm_settings:
|
||||||
|
callbacks: ["prometheus"]
|
|
@ -0,0 +1,4 @@
|
||||||
|
model_list:
|
||||||
|
- model_name: included-model
|
||||||
|
litellm_params:
|
||||||
|
model: gpt-4
|
|
@ -0,0 +1,4 @@
|
||||||
|
model_list:
|
||||||
|
- model_name: included-model-1
|
||||||
|
litellm_params:
|
||||||
|
model: gpt-4
|
|
@ -0,0 +1,4 @@
|
||||||
|
model_list:
|
||||||
|
- model_name: included-model-2
|
||||||
|
litellm_params:
|
||||||
|
model: gpt-3.5-turbo
|
|
@ -23,6 +23,8 @@ import logging
|
||||||
|
|
||||||
from litellm.proxy.proxy_server import ProxyConfig
|
from litellm.proxy.proxy_server import ProxyConfig
|
||||||
|
|
||||||
|
INVALID_FILES = ["config_with_missing_include.yaml"]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_basic_reading_configs_from_files():
|
async def test_basic_reading_configs_from_files():
|
||||||
|
@ -38,6 +40,9 @@ async def test_basic_reading_configs_from_files():
|
||||||
print(files)
|
print(files)
|
||||||
|
|
||||||
for file in files:
|
for file in files:
|
||||||
|
if file in INVALID_FILES: # these are intentionally invalid files
|
||||||
|
continue
|
||||||
|
print("reading file=", file)
|
||||||
config_path = os.path.join(example_config_yaml_path, file)
|
config_path = os.path.join(example_config_yaml_path, file)
|
||||||
config = await proxy_config_instance.get_config(config_file_path=config_path)
|
config = await proxy_config_instance.get_config(config_file_path=config_path)
|
||||||
print(config)
|
print(config)
|
||||||
|
@ -115,3 +120,67 @@ async def test_read_config_file_with_os_environ_vars():
|
||||||
os.environ[key] = _old_env_vars[key]
|
os.environ[key] = _old_env_vars[key]
|
||||||
else:
|
else:
|
||||||
del os.environ[key]
|
del os.environ[key]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_basic_include_directive():
|
||||||
|
"""
|
||||||
|
Test that the include directive correctly loads and merges configs
|
||||||
|
"""
|
||||||
|
proxy_config_instance = ProxyConfig()
|
||||||
|
current_path = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
config_path = os.path.join(
|
||||||
|
current_path, "example_config_yaml", "config_with_include.yaml"
|
||||||
|
)
|
||||||
|
|
||||||
|
config = await proxy_config_instance.get_config(config_file_path=config_path)
|
||||||
|
|
||||||
|
# Verify the included model list was merged
|
||||||
|
assert len(config["model_list"]) > 0
|
||||||
|
assert any(
|
||||||
|
model["model_name"] == "included-model" for model in config["model_list"]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify original config settings remain
|
||||||
|
assert config["litellm_settings"]["callbacks"] == ["prometheus"]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_missing_include_file():
|
||||||
|
"""
|
||||||
|
Test that a missing included file raises FileNotFoundError
|
||||||
|
"""
|
||||||
|
proxy_config_instance = ProxyConfig()
|
||||||
|
current_path = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
config_path = os.path.join(
|
||||||
|
current_path, "example_config_yaml", "config_with_missing_include.yaml"
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(FileNotFoundError):
|
||||||
|
await proxy_config_instance.get_config(config_file_path=config_path)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_multiple_includes():
|
||||||
|
"""
|
||||||
|
Test that multiple files in the include list are all processed correctly
|
||||||
|
"""
|
||||||
|
proxy_config_instance = ProxyConfig()
|
||||||
|
current_path = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
config_path = os.path.join(
|
||||||
|
current_path, "example_config_yaml", "config_with_multiple_includes.yaml"
|
||||||
|
)
|
||||||
|
|
||||||
|
config = await proxy_config_instance.get_config(config_file_path=config_path)
|
||||||
|
|
||||||
|
# Verify models from both included files are present
|
||||||
|
assert len(config["model_list"]) == 2
|
||||||
|
assert any(
|
||||||
|
model["model_name"] == "included-model-1" for model in config["model_list"]
|
||||||
|
)
|
||||||
|
assert any(
|
||||||
|
model["model_name"] == "included-model-2" for model in config["model_list"]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify original config settings remain
|
||||||
|
assert config["litellm_settings"]["callbacks"] == ["prometheus"]
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue