chore(yaml)!: move registered resources to a sub-key (#3861)

**NOTE: this is a backwards incompatible change to the run-configs.**

A small QOL update, but this will prove useful when I do a rename for
"vector_dbs" to "vector_stores" next.

Moves all the `models, shields, ...` keys in run-config under a
`registered_resources` sub-key.
This commit is contained in:
Ashwin Bharambe 2025-10-20 14:52:48 -07:00 committed by GitHub
parent 483d53cc37
commit 94faec7bc5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 342 additions and 320 deletions

View file

@ -422,6 +422,18 @@ def process_cors_config(cors_config: bool | CORSConfig | None) -> CORSConfig | N
raise ValueError(f"Expected bool or CORSConfig, got {type(cors_config).__name__}") raise ValueError(f"Expected bool or CORSConfig, got {type(cors_config).__name__}")
class RegisteredResources(BaseModel):
"""Registry of resources available in the distribution."""
models: list[ModelInput] = Field(default_factory=list)
shields: list[ShieldInput] = Field(default_factory=list)
vector_dbs: list[VectorDBInput] = Field(default_factory=list)
datasets: list[DatasetInput] = Field(default_factory=list)
scoring_fns: list[ScoringFnInput] = Field(default_factory=list)
benchmarks: list[BenchmarkInput] = Field(default_factory=list)
tool_groups: list[ToolGroupInput] = Field(default_factory=list)
class ServerConfig(BaseModel): class ServerConfig(BaseModel):
port: int = Field( port: int = Field(
default=8321, default=8321,
@ -491,14 +503,10 @@ can be instantiated multiple times (with different configs) if necessary.
description="Catalog of named storage backends and references available to the stack", description="Catalog of named storage backends and references available to the stack",
) )
# registry of "resources" in the distribution registered_resources: RegisteredResources = Field(
models: list[ModelInput] = Field(default_factory=list) default_factory=RegisteredResources,
shields: list[ShieldInput] = Field(default_factory=list) description="Registry of resources available in the distribution",
vector_dbs: list[VectorDBInput] = Field(default_factory=list) )
datasets: list[DatasetInput] = Field(default_factory=list)
scoring_fns: list[ScoringFnInput] = Field(default_factory=list)
benchmarks: list[BenchmarkInput] = Field(default_factory=list)
tool_groups: list[ToolGroupInput] = Field(default_factory=list)
logging: LoggingConfig | None = Field(default=None, description="Configuration for Llama Stack Logging") logging: LoggingConfig | None = Field(default=None, description="Configuration for Llama Stack Logging")

View file

@ -110,7 +110,7 @@ TEST_RECORDING_CONTEXT = None
async def register_resources(run_config: StackRunConfig, impls: dict[Api, Any]): async def register_resources(run_config: StackRunConfig, impls: dict[Api, Any]):
for rsrc, api, register_method, list_method in RESOURCES: for rsrc, api, register_method, list_method in RESOURCES:
objects = getattr(run_config, rsrc) objects = getattr(run_config.registered_resources, rsrc)
if api not in impls: if api not in impls:
continue continue

View file

@ -247,6 +247,7 @@ storage:
conversations: conversations:
table_name: openai_conversations table_name: openai_conversations
backend: sql_default backend: sql_default
registered_resources:
models: [] models: []
shields: shields:
- shield_id: llama-guard - shield_id: llama-guard

View file

@ -109,6 +109,7 @@ storage:
conversations: conversations:
table_name: openai_conversations table_name: openai_conversations
backend: sql_default backend: sql_default
registered_resources:
models: models:
- metadata: {} - metadata: {}
model_id: ${env.INFERENCE_MODEL} model_id: ${env.INFERENCE_MODEL}

View file

@ -105,6 +105,7 @@ storage:
conversations: conversations:
table_name: openai_conversations table_name: openai_conversations
backend: sql_default backend: sql_default
registered_resources:
models: models:
- metadata: {} - metadata: {}
model_id: ${env.INFERENCE_MODEL} model_id: ${env.INFERENCE_MODEL}

View file

@ -122,6 +122,7 @@ storage:
conversations: conversations:
table_name: openai_conversations table_name: openai_conversations
backend: sql_default backend: sql_default
registered_resources:
models: models:
- metadata: {} - metadata: {}
model_id: ${env.INFERENCE_MODEL} model_id: ${env.INFERENCE_MODEL}

View file

@ -112,6 +112,7 @@ storage:
conversations: conversations:
table_name: openai_conversations table_name: openai_conversations
backend: sql_default backend: sql_default
registered_resources:
models: models:
- metadata: {} - metadata: {}
model_id: ${env.INFERENCE_MODEL} model_id: ${env.INFERENCE_MODEL}

View file

@ -111,6 +111,7 @@ storage:
conversations: conversations:
table_name: openai_conversations table_name: openai_conversations
backend: sql_default backend: sql_default
registered_resources:
models: models:
- metadata: {} - metadata: {}
model_id: ${env.INFERENCE_MODEL} model_id: ${env.INFERENCE_MODEL}

View file

@ -100,6 +100,7 @@ storage:
conversations: conversations:
table_name: openai_conversations table_name: openai_conversations
backend: sql_default backend: sql_default
registered_resources:
models: [] models: []
shields: [] shields: []
vector_dbs: [] vector_dbs: []

View file

@ -142,6 +142,7 @@ storage:
conversations: conversations:
table_name: openai_conversations table_name: openai_conversations
backend: sql_default backend: sql_default
registered_resources:
models: models:
- metadata: {} - metadata: {}
model_id: gpt-4o model_id: gpt-4o

View file

@ -87,6 +87,7 @@ storage:
conversations: conversations:
table_name: openai_conversations table_name: openai_conversations
backend: sql_default backend: sql_default
registered_resources:
models: models:
- metadata: {} - metadata: {}
model_id: ${env.INFERENCE_MODEL} model_id: ${env.INFERENCE_MODEL}

View file

@ -250,6 +250,7 @@ storage:
conversations: conversations:
table_name: openai_conversations table_name: openai_conversations
backend: sql_default backend: sql_default
registered_resources:
models: [] models: []
shields: shields:
- shield_id: llama-guard - shield_id: llama-guard

View file

@ -247,6 +247,7 @@ storage:
conversations: conversations:
table_name: openai_conversations table_name: openai_conversations
backend: sql_default backend: sql_default
registered_resources:
models: [] models: []
shields: shields:
- shield_id: llama-guard - shield_id: llama-guard

View file

@ -272,6 +272,7 @@ class RunConfigSettings(BaseModel):
"apis": apis, "apis": apis,
"providers": provider_configs, "providers": provider_configs,
"storage": storage_config, "storage": storage_config,
"registered_resources": {
"models": [m.model_dump(exclude_none=True) for m in (self.default_models or [])], "models": [m.model_dump(exclude_none=True) for m in (self.default_models or [])],
"shields": [s.model_dump(exclude_none=True) for s in (self.default_shields or [])], "shields": [s.model_dump(exclude_none=True) for s in (self.default_shields or [])],
"vector_dbs": [], "vector_dbs": [],
@ -279,6 +280,7 @@ class RunConfigSettings(BaseModel):
"scoring_fns": [], "scoring_fns": [],
"benchmarks": [b.model_dump(exclude_none=True) for b in (self.default_benchmarks or [])], "benchmarks": [b.model_dump(exclude_none=True) for b in (self.default_benchmarks or [])],
"tool_groups": [t.model_dump(exclude_none=True) for t in (self.default_tool_groups or [])], "tool_groups": [t.model_dump(exclude_none=True) for t in (self.default_tool_groups or [])],
},
"server": { "server": {
"port": 8321, "port": 8321,
}, },

View file

@ -115,6 +115,7 @@ storage:
conversations: conversations:
table_name: openai_conversations table_name: openai_conversations
backend: sql_default backend: sql_default
registered_resources:
models: [] models: []
shields: [] shields: []
vector_dbs: [] vector_dbs: []