mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-27 10:46:41 +00:00
# What does this PR do? This commit significantly improves the environment variable substitution functionality in Llama Stack configuration files: * The version field in configuration files has been changed from string to integer type for better type consistency across build and run configurations. * The environment variable substitution system for ${env.FOO:} was fixed and properly returns an error * The environment variable substitution system for ${env.FOO+} returns None instead of an empty strings, it better matches type annotations in config fields * The system includes automatic type conversion for boolean, integer, and float values. * The error messages have been enhanced to provide clearer guidance when environment variables are missing, including suggestions for using default values or conditional syntax. * Comprehensive documentation has been added to the configuration guide explaining all supported syntax patterns, best practices, and runtime override capabilities. * Multiple provider configurations have been updated to use the new conditional syntax for optional API keys, making the system more flexible for different deployment scenarios. The telemetry configuration has been improved to properly handle optional endpoints with appropriate validation, ensuring that required endpoints are specified when their corresponding sinks are enabled. * There were many instances of ${env.NVIDIA_API_KEY:} that should have caused the code to fail. However, due to a bug, the distro server was still being started, and early validation wasn’t triggered. As a result, failures were likely being handled downstream by the providers. I’ve maintained similar behavior by using ${env.NVIDIA_API_KEY:+}, though I believe this is incorrect for many configurations. I’ll leave it to each provider to correct it as needed. * Environment variable substitution now uses the same syntax as Bash parameter expansion. Signed-off-by: Sébastien Han <seb@redhat.com>
61 lines
2.3 KiB
Python
61 lines
2.3 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import os
|
|
from typing import Any
|
|
|
|
from pydantic import BaseModel, Field, SecretStr
|
|
|
|
from llama_stack.schema_utils import json_schema_type
|
|
|
|
|
|
@json_schema_type
|
|
class NVIDIAConfig(BaseModel):
|
|
"""
|
|
Configuration for the NVIDIA NIM inference endpoint.
|
|
|
|
Attributes:
|
|
url (str): A base url for accessing the NVIDIA NIM, e.g. http://localhost:8000
|
|
api_key (str): The access key for the hosted NIM endpoints
|
|
|
|
There are two ways to access NVIDIA NIMs -
|
|
0. Hosted: Preview APIs hosted at https://integrate.api.nvidia.com
|
|
1. Self-hosted: You can run NVIDIA NIMs on your own infrastructure
|
|
|
|
By default the configuration is set to use the hosted APIs. This requires
|
|
an API key which can be obtained from https://ngc.nvidia.com/.
|
|
|
|
By default the configuration will attempt to read the NVIDIA_API_KEY environment
|
|
variable to set the api_key. Please do not put your API key in code.
|
|
|
|
If you are using a self-hosted NVIDIA NIM, you can set the url to the
|
|
URL of your running NVIDIA NIM and do not need to set the api_key.
|
|
"""
|
|
|
|
url: str = Field(
|
|
default_factory=lambda: os.getenv("NVIDIA_BASE_URL", "https://integrate.api.nvidia.com"),
|
|
description="A base url for accessing the NVIDIA NIM",
|
|
)
|
|
api_key: SecretStr | None = Field(
|
|
default_factory=lambda: os.getenv("NVIDIA_API_KEY"),
|
|
description="The NVIDIA API key, only needed of using the hosted service",
|
|
)
|
|
timeout: int = Field(
|
|
default=60,
|
|
description="Timeout for the HTTP requests",
|
|
)
|
|
append_api_version: bool = Field(
|
|
default_factory=lambda: os.getenv("NVIDIA_APPEND_API_VERSION", "True").lower() != "false",
|
|
description="When set to false, the API version will not be appended to the base_url. By default, it is true.",
|
|
)
|
|
|
|
@classmethod
|
|
def sample_run_config(cls, **kwargs) -> dict[str, Any]:
|
|
return {
|
|
"url": "${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com}",
|
|
"api_key": "${env.NVIDIA_API_KEY:+}",
|
|
"append_api_version": "${env.NVIDIA_APPEND_API_VERSION:=True}",
|
|
}
|