mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 01:48:05 +00:00
fix: rename some more usages
change some more run.yaml into config.yaml references, alter some parameter names, etc Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
parent
0cd98c957e
commit
f05d5138e8
10 changed files with 26 additions and 26 deletions
6
.github/workflows/test-external.yml
vendored
6
.github/workflows/test-external.yml
vendored
|
|
@ -44,14 +44,14 @@ jobs:
|
|||
|
||||
- name: Print distro dependencies
|
||||
run: |
|
||||
uv run --no-sync llama stack list-deps tests/external/run-byoa.yaml
|
||||
uv run --no-sync llama stack list-deps tests/external/config.yaml
|
||||
|
||||
- name: Build distro from config file
|
||||
run: |
|
||||
uv venv ci-test
|
||||
source ci-test/bin/activate
|
||||
uv pip install -e .
|
||||
LLAMA_STACK_LOGGING=all=CRITICAL llama stack list-deps tests/external/run-byoa.yaml | xargs -L1 uv pip install
|
||||
LLAMA_STACK_LOGGING=all=CRITICAL llama stack list-deps tests/external/config.yaml | xargs -L1 uv pip install
|
||||
|
||||
- name: Start Llama Stack server in background
|
||||
if: ${{ matrix.image-type }} == 'venv'
|
||||
|
|
@ -62,7 +62,7 @@ jobs:
|
|||
# Use the virtual environment created by the build step (name comes from build config)
|
||||
source ci-test/bin/activate
|
||||
uv pip list
|
||||
nohup llama stack run tests/external/run-byoa.yaml > server.log 2>&1 &
|
||||
nohup llama stack run tests/external/config.yaml > server.log 2>&1 &
|
||||
|
||||
- name: Wait for Llama Stack server to be ready
|
||||
run: |
|
||||
|
|
|
|||
|
|
@ -337,7 +337,7 @@ uv pip install -e .
|
|||
7. Configure Llama Stack to use the provider:
|
||||
|
||||
```yaml
|
||||
# ~/.llama/run-byoa.yaml
|
||||
# ~/.llama/config.yaml
|
||||
version: "2"
|
||||
image_name: "llama-stack-api-weather"
|
||||
apis:
|
||||
|
|
@ -356,7 +356,7 @@ server:
|
|||
8. Run the server:
|
||||
|
||||
```bash
|
||||
llama stack run ~/.llama/run-byoa.yaml
|
||||
llama stack run ~/.llama/config.yaml
|
||||
```
|
||||
|
||||
9. Test the API:
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ def run_stack_list_deps_command(args: argparse.Namespace) -> None:
|
|||
with open(config_file) as f:
|
||||
try:
|
||||
contents = yaml.safe_load(f)
|
||||
run_config = StackConfig(**contents)
|
||||
config = StackConfig(**contents)
|
||||
except Exception as e:
|
||||
cprint(
|
||||
f"Could not parse config file {config_file}: {e}",
|
||||
|
|
@ -119,16 +119,16 @@ def run_stack_list_deps_command(args: argparse.Namespace) -> None:
|
|||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
run_config = StackConfig(providers=provider_list, image_name="providers-run")
|
||||
config = StackConfig(providers=provider_list, image_name="providers-run")
|
||||
|
||||
normal_deps, special_deps, external_provider_dependencies = get_provider_dependencies(run_config)
|
||||
normal_deps, special_deps, external_provider_dependencies = get_provider_dependencies(config)
|
||||
normal_deps += SERVER_DEPENDENCIES
|
||||
|
||||
# Add external API dependencies
|
||||
if run_config.external_apis_dir:
|
||||
if config.external_apis_dir:
|
||||
from llama_stack.core.external import load_external_apis
|
||||
|
||||
external_apis = load_external_apis(run_config)
|
||||
external_apis = load_external_apis(config)
|
||||
if external_apis:
|
||||
for _, api_spec in external_apis.items():
|
||||
normal_deps.extend(api_spec.pip_packages)
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ class ConversationServiceConfig(BaseModel):
|
|||
:param policy: Access control rules
|
||||
"""
|
||||
|
||||
run_config: StackConfig
|
||||
config: StackConfig
|
||||
policy: list[AccessRule] = []
|
||||
|
||||
|
||||
|
|
@ -56,7 +56,7 @@ class ConversationServiceImpl(Conversations):
|
|||
self.policy = config.policy
|
||||
|
||||
# Use conversations store reference from run config
|
||||
conversations_ref = config.run_config.storage.stores.conversations
|
||||
conversations_ref = config.config.storage.stores.conversations
|
||||
if not conversations_ref:
|
||||
raise ValueError("storage.stores.conversations must be configured in run config")
|
||||
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ from llama_stack_api import (
|
|||
|
||||
|
||||
class DistributionInspectConfig(BaseModel):
|
||||
run_config: StackConfig
|
||||
config: StackConfig
|
||||
|
||||
|
||||
async def get_provider_impl(config, deps):
|
||||
|
|
@ -40,7 +40,7 @@ class DistributionInspectImpl(Inspect):
|
|||
pass
|
||||
|
||||
async def list_routes(self, api_filter: str | None = None) -> ListRoutesResponse:
|
||||
run_config: StackConfig = self.config.run_config
|
||||
config: StackConfig = self.config.config
|
||||
|
||||
# Helper function to determine if a route should be included based on api_filter
|
||||
def should_include_route(webmethod) -> bool:
|
||||
|
|
@ -55,7 +55,7 @@ class DistributionInspectImpl(Inspect):
|
|||
return not webmethod.deprecated and webmethod.level == api_filter
|
||||
|
||||
ret = []
|
||||
external_apis = load_external_apis(run_config)
|
||||
external_apis = load_external_apis(config)
|
||||
all_endpoints = get_all_api_routes(external_apis)
|
||||
for api, endpoints in all_endpoints.items():
|
||||
# Always include provider and inspect APIs, filter others based on run config
|
||||
|
|
@ -72,7 +72,7 @@ class DistributionInspectImpl(Inspect):
|
|||
]
|
||||
)
|
||||
else:
|
||||
providers = run_config.providers.get(api.value, [])
|
||||
providers = config.providers.get(api.value, [])
|
||||
if providers: # Only process if there are providers for this API
|
||||
ret.extend(
|
||||
[
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ class PromptServiceConfig(BaseModel):
|
|||
:param run_config: Stack run configuration containing distribution info
|
||||
"""
|
||||
|
||||
run_config: StackConfig
|
||||
config: StackConfig
|
||||
|
||||
|
||||
async def get_provider_impl(config: PromptServiceConfig, deps: dict[Any, Any]):
|
||||
|
|
@ -40,7 +40,7 @@ class PromptServiceImpl(Prompts):
|
|||
|
||||
async def initialize(self) -> None:
|
||||
# Use prompts store reference from run config
|
||||
prompts_ref = self.config.run_config.storage.stores.prompts
|
||||
prompts_ref = self.config.config.storage.stores.prompts
|
||||
if not prompts_ref:
|
||||
raise ValueError("storage.stores.prompts must be configured in run config")
|
||||
self.kvstore = await kvstore_impl(prompts_ref)
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ logger = get_logger(name=__name__, category="core")
|
|||
|
||||
|
||||
class ProviderImplConfig(BaseModel):
|
||||
run_config: StackConfig
|
||||
config: StackConfig
|
||||
|
||||
|
||||
async def get_provider_impl(config, deps):
|
||||
|
|
@ -41,7 +41,7 @@ class ProviderImpl(Providers):
|
|||
pass
|
||||
|
||||
async def list_providers(self) -> ListProvidersResponse:
|
||||
run_config = self.config.run_config
|
||||
run_config = self.config
|
||||
safe_config = StackConfig(**redact_sensitive_fields(run_config.model_dump()))
|
||||
providers_health = await self.get_providers_health()
|
||||
ret = []
|
||||
|
|
|
|||
|
|
@ -341,7 +341,7 @@ def cast_image_name_to_string(config_dict: dict[str, Any]) -> dict[str, Any]:
|
|||
return config_dict
|
||||
|
||||
|
||||
def add_internal_implementations(impls: dict[Api, Any], run_config: StackConfig) -> None:
|
||||
def add_internal_implementations(impls: dict[Api, Any], config: StackConfig) -> None:
|
||||
"""Add internal implementations (inspect and providers) to the implementations dictionary.
|
||||
|
||||
Args:
|
||||
|
|
@ -349,25 +349,25 @@ def add_internal_implementations(impls: dict[Api, Any], run_config: StackConfig)
|
|||
run_config: Stack run configuration
|
||||
"""
|
||||
inspect_impl = DistributionInspectImpl(
|
||||
DistributionInspectConfig(run_config=run_config),
|
||||
DistributionInspectConfig(config=config),
|
||||
deps=impls,
|
||||
)
|
||||
impls[Api.inspect] = inspect_impl
|
||||
|
||||
providers_impl = ProviderImpl(
|
||||
ProviderImplConfig(run_config=run_config),
|
||||
ProviderImplConfig(config=config),
|
||||
deps=impls,
|
||||
)
|
||||
impls[Api.providers] = providers_impl
|
||||
|
||||
prompts_impl = PromptServiceImpl(
|
||||
PromptServiceConfig(run_config=run_config),
|
||||
PromptServiceConfig(config=config),
|
||||
deps=impls,
|
||||
)
|
||||
impls[Api.prompts] = prompts_impl
|
||||
|
||||
conversations_impl = ConversationServiceImpl(
|
||||
ConversationServiceConfig(run_config=run_config),
|
||||
ConversationServiceConfig(config=config),
|
||||
deps=impls,
|
||||
)
|
||||
impls[Api.conversations] = conversations_impl
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ async def temp_prompt_store(tmp_path_factory):
|
|||
providers={},
|
||||
storage=storage,
|
||||
)
|
||||
config = PromptServiceConfig(run_config=mock_run_config)
|
||||
config = PromptServiceConfig(config=mock_run_config)
|
||||
store = PromptServiceImpl(config, deps={})
|
||||
|
||||
register_kvstore_backends({"kv_test": storage.backends["kv_test"]})
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue