mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-06 18:40:57 +00:00
chore(yaml)!: move registered resources to a sub-key (#3861)
**NOTE: this is a backwards incompatible change to the run-configs.** A small QOL update, but this will prove useful when I do a rename for "vector_dbs" to "vector_stores" next. Moves all the `models, shields, ...` keys in run-config under a `registered_resources` sub-key.
This commit is contained in:
parent
483d53cc37
commit
94faec7bc5
15 changed files with 342 additions and 320 deletions
|
|
@ -422,6 +422,18 @@ def process_cors_config(cors_config: bool | CORSConfig | None) -> CORSConfig | N
|
|||
raise ValueError(f"Expected bool or CORSConfig, got {type(cors_config).__name__}")
|
||||
|
||||
|
||||
class RegisteredResources(BaseModel):
|
||||
"""Registry of resources available in the distribution."""
|
||||
|
||||
models: list[ModelInput] = Field(default_factory=list)
|
||||
shields: list[ShieldInput] = Field(default_factory=list)
|
||||
vector_dbs: list[VectorDBInput] = Field(default_factory=list)
|
||||
datasets: list[DatasetInput] = Field(default_factory=list)
|
||||
scoring_fns: list[ScoringFnInput] = Field(default_factory=list)
|
||||
benchmarks: list[BenchmarkInput] = Field(default_factory=list)
|
||||
tool_groups: list[ToolGroupInput] = Field(default_factory=list)
|
||||
|
||||
|
||||
class ServerConfig(BaseModel):
|
||||
port: int = Field(
|
||||
default=8321,
|
||||
|
|
@ -491,14 +503,10 @@ can be instantiated multiple times (with different configs) if necessary.
|
|||
description="Catalog of named storage backends and references available to the stack",
|
||||
)
|
||||
|
||||
# registry of "resources" in the distribution
|
||||
models: list[ModelInput] = Field(default_factory=list)
|
||||
shields: list[ShieldInput] = Field(default_factory=list)
|
||||
vector_dbs: list[VectorDBInput] = Field(default_factory=list)
|
||||
datasets: list[DatasetInput] = Field(default_factory=list)
|
||||
scoring_fns: list[ScoringFnInput] = Field(default_factory=list)
|
||||
benchmarks: list[BenchmarkInput] = Field(default_factory=list)
|
||||
tool_groups: list[ToolGroupInput] = Field(default_factory=list)
|
||||
registered_resources: RegisteredResources = Field(
|
||||
default_factory=RegisteredResources,
|
||||
description="Registry of resources available in the distribution",
|
||||
)
|
||||
|
||||
logging: LoggingConfig | None = Field(default=None, description="Configuration for Llama Stack Logging")
|
||||
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ TEST_RECORDING_CONTEXT = None
|
|||
|
||||
async def register_resources(run_config: StackRunConfig, impls: dict[Api, Any]):
|
||||
for rsrc, api, register_method, list_method in RESOURCES:
|
||||
objects = getattr(run_config, rsrc)
|
||||
objects = getattr(run_config.registered_resources, rsrc)
|
||||
if api not in impls:
|
||||
continue
|
||||
|
||||
|
|
|
|||
|
|
@ -247,6 +247,7 @@ storage:
|
|||
conversations:
|
||||
table_name: openai_conversations
|
||||
backend: sql_default
|
||||
registered_resources:
|
||||
models: []
|
||||
shields:
|
||||
- shield_id: llama-guard
|
||||
|
|
|
|||
|
|
@ -109,6 +109,7 @@ storage:
|
|||
conversations:
|
||||
table_name: openai_conversations
|
||||
backend: sql_default
|
||||
registered_resources:
|
||||
models:
|
||||
- metadata: {}
|
||||
model_id: ${env.INFERENCE_MODEL}
|
||||
|
|
|
|||
|
|
@ -105,6 +105,7 @@ storage:
|
|||
conversations:
|
||||
table_name: openai_conversations
|
||||
backend: sql_default
|
||||
registered_resources:
|
||||
models:
|
||||
- metadata: {}
|
||||
model_id: ${env.INFERENCE_MODEL}
|
||||
|
|
|
|||
|
|
@ -122,6 +122,7 @@ storage:
|
|||
conversations:
|
||||
table_name: openai_conversations
|
||||
backend: sql_default
|
||||
registered_resources:
|
||||
models:
|
||||
- metadata: {}
|
||||
model_id: ${env.INFERENCE_MODEL}
|
||||
|
|
|
|||
|
|
@ -112,6 +112,7 @@ storage:
|
|||
conversations:
|
||||
table_name: openai_conversations
|
||||
backend: sql_default
|
||||
registered_resources:
|
||||
models:
|
||||
- metadata: {}
|
||||
model_id: ${env.INFERENCE_MODEL}
|
||||
|
|
|
|||
|
|
@ -111,6 +111,7 @@ storage:
|
|||
conversations:
|
||||
table_name: openai_conversations
|
||||
backend: sql_default
|
||||
registered_resources:
|
||||
models:
|
||||
- metadata: {}
|
||||
model_id: ${env.INFERENCE_MODEL}
|
||||
|
|
|
|||
|
|
@ -100,6 +100,7 @@ storage:
|
|||
conversations:
|
||||
table_name: openai_conversations
|
||||
backend: sql_default
|
||||
registered_resources:
|
||||
models: []
|
||||
shields: []
|
||||
vector_dbs: []
|
||||
|
|
|
|||
|
|
@ -142,6 +142,7 @@ storage:
|
|||
conversations:
|
||||
table_name: openai_conversations
|
||||
backend: sql_default
|
||||
registered_resources:
|
||||
models:
|
||||
- metadata: {}
|
||||
model_id: gpt-4o
|
||||
|
|
|
|||
|
|
@ -87,6 +87,7 @@ storage:
|
|||
conversations:
|
||||
table_name: openai_conversations
|
||||
backend: sql_default
|
||||
registered_resources:
|
||||
models:
|
||||
- metadata: {}
|
||||
model_id: ${env.INFERENCE_MODEL}
|
||||
|
|
|
|||
|
|
@ -250,6 +250,7 @@ storage:
|
|||
conversations:
|
||||
table_name: openai_conversations
|
||||
backend: sql_default
|
||||
registered_resources:
|
||||
models: []
|
||||
shields:
|
||||
- shield_id: llama-guard
|
||||
|
|
|
|||
|
|
@ -247,6 +247,7 @@ storage:
|
|||
conversations:
|
||||
table_name: openai_conversations
|
||||
backend: sql_default
|
||||
registered_resources:
|
||||
models: []
|
||||
shields:
|
||||
- shield_id: llama-guard
|
||||
|
|
|
|||
|
|
@ -272,6 +272,7 @@ class RunConfigSettings(BaseModel):
|
|||
"apis": apis,
|
||||
"providers": provider_configs,
|
||||
"storage": storage_config,
|
||||
"registered_resources": {
|
||||
"models": [m.model_dump(exclude_none=True) for m in (self.default_models or [])],
|
||||
"shields": [s.model_dump(exclude_none=True) for s in (self.default_shields or [])],
|
||||
"vector_dbs": [],
|
||||
|
|
@ -279,6 +280,7 @@ class RunConfigSettings(BaseModel):
|
|||
"scoring_fns": [],
|
||||
"benchmarks": [b.model_dump(exclude_none=True) for b in (self.default_benchmarks or [])],
|
||||
"tool_groups": [t.model_dump(exclude_none=True) for t in (self.default_tool_groups or [])],
|
||||
},
|
||||
"server": {
|
||||
"port": 8321,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -115,6 +115,7 @@ storage:
|
|||
conversations:
|
||||
table_name: openai_conversations
|
||||
backend: sql_default
|
||||
registered_resources:
|
||||
models: []
|
||||
shields: []
|
||||
vector_dbs: []
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue