diff --git a/llama_stack/core/datatypes.py b/llama_stack/core/datatypes.py index 6d06adb84..e8cb36a02 100644 --- a/llama_stack/core/datatypes.py +++ b/llama_stack/core/datatypes.py @@ -422,6 +422,18 @@ def process_cors_config(cors_config: bool | CORSConfig | None) -> CORSConfig | N raise ValueError(f"Expected bool or CORSConfig, got {type(cors_config).__name__}") +class RegisteredResources(BaseModel): + """Registry of resources available in the distribution.""" + + models: list[ModelInput] = Field(default_factory=list) + shields: list[ShieldInput] = Field(default_factory=list) + vector_dbs: list[VectorDBInput] = Field(default_factory=list) + datasets: list[DatasetInput] = Field(default_factory=list) + scoring_fns: list[ScoringFnInput] = Field(default_factory=list) + benchmarks: list[BenchmarkInput] = Field(default_factory=list) + tool_groups: list[ToolGroupInput] = Field(default_factory=list) + + class ServerConfig(BaseModel): port: int = Field( default=8321, @@ -491,14 +503,10 @@ can be instantiated multiple times (with different configs) if necessary. description="Catalog of named storage backends and references available to the stack", ) - # registry of "resources" in the distribution - models: list[ModelInput] = Field(default_factory=list) - shields: list[ShieldInput] = Field(default_factory=list) - vector_dbs: list[VectorDBInput] = Field(default_factory=list) - datasets: list[DatasetInput] = Field(default_factory=list) - scoring_fns: list[ScoringFnInput] = Field(default_factory=list) - benchmarks: list[BenchmarkInput] = Field(default_factory=list) - tool_groups: list[ToolGroupInput] = Field(default_factory=list) + registered_resources: RegisteredResources = Field( + default_factory=RegisteredResources, + description="Registry of resources available in the distribution", + ) logging: LoggingConfig | None = Field(default=None, description="Configuration for Llama Stack Logging") diff --git a/llama_stack/core/stack.py b/llama_stack/core/stack.py index a2f7babd2..4cf1d072d 100644 --- a/llama_stack/core/stack.py +++ b/llama_stack/core/stack.py @@ -110,7 +110,7 @@ TEST_RECORDING_CONTEXT = None async def register_resources(run_config: StackRunConfig, impls: dict[Api, Any]): for rsrc, api, register_method, list_method in RESOURCES: - objects = getattr(run_config, rsrc) + objects = getattr(run_config.registered_resources, rsrc) if api not in impls: continue diff --git a/llama_stack/distributions/ci-tests/run.yaml b/llama_stack/distributions/ci-tests/run.yaml index 1653dc9bd..ecf9eed3b 100644 --- a/llama_stack/distributions/ci-tests/run.yaml +++ b/llama_stack/distributions/ci-tests/run.yaml @@ -247,23 +247,24 @@ storage: conversations: table_name: openai_conversations backend: sql_default -models: [] -shields: -- shield_id: llama-guard - provider_id: ${env.SAFETY_MODEL:+llama-guard} - provider_shield_id: ${env.SAFETY_MODEL:=} -- shield_id: code-scanner - provider_id: ${env.CODE_SCANNER_MODEL:+code-scanner} - provider_shield_id: ${env.CODE_SCANNER_MODEL:=} -vector_dbs: [] -datasets: [] -scoring_fns: [] -benchmarks: [] -tool_groups: -- toolgroup_id: builtin::websearch - provider_id: tavily-search -- toolgroup_id: builtin::rag - provider_id: rag-runtime +registered_resources: + models: [] + shields: + - shield_id: llama-guard + provider_id: ${env.SAFETY_MODEL:+llama-guard} + provider_shield_id: ${env.SAFETY_MODEL:=} + - shield_id: code-scanner + provider_id: ${env.CODE_SCANNER_MODEL:+code-scanner} + provider_shield_id: ${env.CODE_SCANNER_MODEL:=} + vector_dbs: [] + datasets: [] + scoring_fns: [] + benchmarks: [] + tool_groups: + - toolgroup_id: builtin::websearch + provider_id: tavily-search + - toolgroup_id: builtin::rag + provider_id: rag-runtime server: port: 8321 telemetry: diff --git a/llama_stack/distributions/dell/run-with-safety.yaml b/llama_stack/distributions/dell/run-with-safety.yaml index 3130285b9..2563f2f4b 100644 --- a/llama_stack/distributions/dell/run-with-safety.yaml +++ b/llama_stack/distributions/dell/run-with-safety.yaml @@ -109,31 +109,32 @@ storage: conversations: table_name: openai_conversations backend: sql_default -models: -- metadata: {} - model_id: ${env.INFERENCE_MODEL} - provider_id: tgi0 - model_type: llm -- metadata: {} - model_id: ${env.SAFETY_MODEL} - provider_id: tgi1 - model_type: llm -- metadata: - embedding_dimension: 768 - model_id: nomic-embed-text-v1.5 - provider_id: sentence-transformers - model_type: embedding -shields: -- shield_id: ${env.SAFETY_MODEL} -vector_dbs: [] -datasets: [] -scoring_fns: [] -benchmarks: [] -tool_groups: -- toolgroup_id: builtin::websearch - provider_id: brave-search -- toolgroup_id: builtin::rag - provider_id: rag-runtime +registered_resources: + models: + - metadata: {} + model_id: ${env.INFERENCE_MODEL} + provider_id: tgi0 + model_type: llm + - metadata: {} + model_id: ${env.SAFETY_MODEL} + provider_id: tgi1 + model_type: llm + - metadata: + embedding_dimension: 768 + model_id: nomic-embed-text-v1.5 + provider_id: sentence-transformers + model_type: embedding + shields: + - shield_id: ${env.SAFETY_MODEL} + vector_dbs: [] + datasets: [] + scoring_fns: [] + benchmarks: [] + tool_groups: + - toolgroup_id: builtin::websearch + provider_id: brave-search + - toolgroup_id: builtin::rag + provider_id: rag-runtime server: port: 8321 telemetry: diff --git a/llama_stack/distributions/dell/run.yaml b/llama_stack/distributions/dell/run.yaml index af1a96a21..7bada394f 100644 --- a/llama_stack/distributions/dell/run.yaml +++ b/llama_stack/distributions/dell/run.yaml @@ -105,26 +105,27 @@ storage: conversations: table_name: openai_conversations backend: sql_default -models: -- metadata: {} - model_id: ${env.INFERENCE_MODEL} - provider_id: tgi0 - model_type: llm -- metadata: - embedding_dimension: 768 - model_id: nomic-embed-text-v1.5 - provider_id: sentence-transformers - model_type: embedding -shields: [] -vector_dbs: [] -datasets: [] -scoring_fns: [] -benchmarks: [] -tool_groups: -- toolgroup_id: builtin::websearch - provider_id: brave-search -- toolgroup_id: builtin::rag - provider_id: rag-runtime +registered_resources: + models: + - metadata: {} + model_id: ${env.INFERENCE_MODEL} + provider_id: tgi0 + model_type: llm + - metadata: + embedding_dimension: 768 + model_id: nomic-embed-text-v1.5 + provider_id: sentence-transformers + model_type: embedding + shields: [] + vector_dbs: [] + datasets: [] + scoring_fns: [] + benchmarks: [] + tool_groups: + - toolgroup_id: builtin::websearch + provider_id: brave-search + - toolgroup_id: builtin::rag + provider_id: rag-runtime server: port: 8321 telemetry: diff --git a/llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml b/llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml index b43d1ff19..01b5db4f9 100644 --- a/llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml +++ b/llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml @@ -122,31 +122,32 @@ storage: conversations: table_name: openai_conversations backend: sql_default -models: -- metadata: {} - model_id: ${env.INFERENCE_MODEL} - provider_id: meta-reference-inference - model_type: llm -- metadata: {} - model_id: ${env.SAFETY_MODEL} - provider_id: meta-reference-safety - model_type: llm -- metadata: - embedding_dimension: 768 - model_id: nomic-embed-text-v1.5 - provider_id: sentence-transformers - model_type: embedding -shields: -- shield_id: ${env.SAFETY_MODEL} -vector_dbs: [] -datasets: [] -scoring_fns: [] -benchmarks: [] -tool_groups: -- toolgroup_id: builtin::websearch - provider_id: tavily-search -- toolgroup_id: builtin::rag - provider_id: rag-runtime +registered_resources: + models: + - metadata: {} + model_id: ${env.INFERENCE_MODEL} + provider_id: meta-reference-inference + model_type: llm + - metadata: {} + model_id: ${env.SAFETY_MODEL} + provider_id: meta-reference-safety + model_type: llm + - metadata: + embedding_dimension: 768 + model_id: nomic-embed-text-v1.5 + provider_id: sentence-transformers + model_type: embedding + shields: + - shield_id: ${env.SAFETY_MODEL} + vector_dbs: [] + datasets: [] + scoring_fns: [] + benchmarks: [] + tool_groups: + - toolgroup_id: builtin::websearch + provider_id: tavily-search + - toolgroup_id: builtin::rag + provider_id: rag-runtime server: port: 8321 telemetry: diff --git a/llama_stack/distributions/meta-reference-gpu/run.yaml b/llama_stack/distributions/meta-reference-gpu/run.yaml index 59e2d8129..87c33dde0 100644 --- a/llama_stack/distributions/meta-reference-gpu/run.yaml +++ b/llama_stack/distributions/meta-reference-gpu/run.yaml @@ -112,26 +112,27 @@ storage: conversations: table_name: openai_conversations backend: sql_default -models: -- metadata: {} - model_id: ${env.INFERENCE_MODEL} - provider_id: meta-reference-inference - model_type: llm -- metadata: - embedding_dimension: 768 - model_id: nomic-embed-text-v1.5 - provider_id: sentence-transformers - model_type: embedding -shields: [] -vector_dbs: [] -datasets: [] -scoring_fns: [] -benchmarks: [] -tool_groups: -- toolgroup_id: builtin::websearch - provider_id: tavily-search -- toolgroup_id: builtin::rag - provider_id: rag-runtime +registered_resources: + models: + - metadata: {} + model_id: ${env.INFERENCE_MODEL} + provider_id: meta-reference-inference + model_type: llm + - metadata: + embedding_dimension: 768 + model_id: nomic-embed-text-v1.5 + provider_id: sentence-transformers + model_type: embedding + shields: [] + vector_dbs: [] + datasets: [] + scoring_fns: [] + benchmarks: [] + tool_groups: + - toolgroup_id: builtin::websearch + provider_id: tavily-search + - toolgroup_id: builtin::rag + provider_id: rag-runtime server: port: 8321 telemetry: diff --git a/llama_stack/distributions/nvidia/run-with-safety.yaml b/llama_stack/distributions/nvidia/run-with-safety.yaml index e06787d0b..c23d0f9cb 100644 --- a/llama_stack/distributions/nvidia/run-with-safety.yaml +++ b/llama_stack/distributions/nvidia/run-with-safety.yaml @@ -111,25 +111,26 @@ storage: conversations: table_name: openai_conversations backend: sql_default -models: -- metadata: {} - model_id: ${env.INFERENCE_MODEL} - provider_id: nvidia - model_type: llm -- metadata: {} - model_id: ${env.SAFETY_MODEL} - provider_id: nvidia - model_type: llm -shields: -- shield_id: ${env.SAFETY_MODEL} - provider_id: nvidia -vector_dbs: [] -datasets: [] -scoring_fns: [] -benchmarks: [] -tool_groups: -- toolgroup_id: builtin::rag - provider_id: rag-runtime +registered_resources: + models: + - metadata: {} + model_id: ${env.INFERENCE_MODEL} + provider_id: nvidia + model_type: llm + - metadata: {} + model_id: ${env.SAFETY_MODEL} + provider_id: nvidia + model_type: llm + shields: + - shield_id: ${env.SAFETY_MODEL} + provider_id: nvidia + vector_dbs: [] + datasets: [] + scoring_fns: [] + benchmarks: [] + tool_groups: + - toolgroup_id: builtin::rag + provider_id: rag-runtime server: port: 8321 telemetry: diff --git a/llama_stack/distributions/nvidia/run.yaml b/llama_stack/distributions/nvidia/run.yaml index 85e0743e4..81e744d53 100644 --- a/llama_stack/distributions/nvidia/run.yaml +++ b/llama_stack/distributions/nvidia/run.yaml @@ -100,15 +100,16 @@ storage: conversations: table_name: openai_conversations backend: sql_default -models: [] -shields: [] -vector_dbs: [] -datasets: [] -scoring_fns: [] -benchmarks: [] -tool_groups: -- toolgroup_id: builtin::rag - provider_id: rag-runtime +registered_resources: + models: [] + shields: [] + vector_dbs: [] + datasets: [] + scoring_fns: [] + benchmarks: [] + tool_groups: + - toolgroup_id: builtin::rag + provider_id: rag-runtime server: port: 8321 telemetry: diff --git a/llama_stack/distributions/open-benchmark/run.yaml b/llama_stack/distributions/open-benchmark/run.yaml index 2c6936bfc..4fd0e199b 100644 --- a/llama_stack/distributions/open-benchmark/run.yaml +++ b/llama_stack/distributions/open-benchmark/run.yaml @@ -142,109 +142,110 @@ storage: conversations: table_name: openai_conversations backend: sql_default -models: -- metadata: {} - model_id: gpt-4o - provider_id: openai - provider_model_id: gpt-4o - model_type: llm -- metadata: {} - model_id: claude-3-5-sonnet-latest - provider_id: anthropic - provider_model_id: claude-3-5-sonnet-latest - model_type: llm -- metadata: {} - model_id: gemini/gemini-1.5-flash - provider_id: gemini - provider_model_id: gemini/gemini-1.5-flash - model_type: llm -- metadata: {} - model_id: meta-llama/Llama-3.3-70B-Instruct - provider_id: groq - provider_model_id: groq/llama-3.3-70b-versatile - model_type: llm -- metadata: {} - model_id: meta-llama/Llama-3.1-405B-Instruct - provider_id: together - provider_model_id: meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo - model_type: llm -shields: -- shield_id: meta-llama/Llama-Guard-3-8B -vector_dbs: [] -datasets: -- purpose: eval/messages-answer - source: - type: uri - uri: huggingface://datasets/llamastack/simpleqa?split=train - metadata: {} - dataset_id: simpleqa -- purpose: eval/messages-answer - source: - type: uri - uri: huggingface://datasets/llamastack/mmlu_cot?split=test&name=all - metadata: {} - dataset_id: mmlu_cot -- purpose: eval/messages-answer - source: - type: uri - uri: huggingface://datasets/llamastack/gpqa_0shot_cot?split=test&name=gpqa_main - metadata: {} - dataset_id: gpqa_cot -- purpose: eval/messages-answer - source: - type: uri - uri: huggingface://datasets/llamastack/math_500?split=test - metadata: {} - dataset_id: math_500 -- purpose: eval/messages-answer - source: - type: uri - uri: huggingface://datasets/llamastack/IfEval?split=train - metadata: {} - dataset_id: ifeval -- purpose: eval/messages-answer - source: - type: uri - uri: huggingface://datasets/llamastack/docvqa?split=val - metadata: {} - dataset_id: docvqa -scoring_fns: [] -benchmarks: -- dataset_id: simpleqa - scoring_functions: - - llm-as-judge::405b-simpleqa - metadata: {} - benchmark_id: meta-reference-simpleqa -- dataset_id: mmlu_cot - scoring_functions: - - basic::regex_parser_multiple_choice_answer - metadata: {} - benchmark_id: meta-reference-mmlu-cot -- dataset_id: gpqa_cot - scoring_functions: - - basic::regex_parser_multiple_choice_answer - metadata: {} - benchmark_id: meta-reference-gpqa-cot -- dataset_id: math_500 - scoring_functions: - - basic::regex_parser_math_response - metadata: {} - benchmark_id: meta-reference-math-500 -- dataset_id: ifeval - scoring_functions: - - basic::ifeval - metadata: {} - benchmark_id: meta-reference-ifeval -- dataset_id: docvqa - scoring_functions: - - basic::docvqa - metadata: {} - benchmark_id: meta-reference-docvqa -tool_groups: -- toolgroup_id: builtin::websearch - provider_id: tavily-search -- toolgroup_id: builtin::rag - provider_id: rag-runtime +registered_resources: + models: + - metadata: {} + model_id: gpt-4o + provider_id: openai + provider_model_id: gpt-4o + model_type: llm + - metadata: {} + model_id: claude-3-5-sonnet-latest + provider_id: anthropic + provider_model_id: claude-3-5-sonnet-latest + model_type: llm + - metadata: {} + model_id: gemini/gemini-1.5-flash + provider_id: gemini + provider_model_id: gemini/gemini-1.5-flash + model_type: llm + - metadata: {} + model_id: meta-llama/Llama-3.3-70B-Instruct + provider_id: groq + provider_model_id: groq/llama-3.3-70b-versatile + model_type: llm + - metadata: {} + model_id: meta-llama/Llama-3.1-405B-Instruct + provider_id: together + provider_model_id: meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo + model_type: llm + shields: + - shield_id: meta-llama/Llama-Guard-3-8B + vector_dbs: [] + datasets: + - purpose: eval/messages-answer + source: + type: uri + uri: huggingface://datasets/llamastack/simpleqa?split=train + metadata: {} + dataset_id: simpleqa + - purpose: eval/messages-answer + source: + type: uri + uri: huggingface://datasets/llamastack/mmlu_cot?split=test&name=all + metadata: {} + dataset_id: mmlu_cot + - purpose: eval/messages-answer + source: + type: uri + uri: huggingface://datasets/llamastack/gpqa_0shot_cot?split=test&name=gpqa_main + metadata: {} + dataset_id: gpqa_cot + - purpose: eval/messages-answer + source: + type: uri + uri: huggingface://datasets/llamastack/math_500?split=test + metadata: {} + dataset_id: math_500 + - purpose: eval/messages-answer + source: + type: uri + uri: huggingface://datasets/llamastack/IfEval?split=train + metadata: {} + dataset_id: ifeval + - purpose: eval/messages-answer + source: + type: uri + uri: huggingface://datasets/llamastack/docvqa?split=val + metadata: {} + dataset_id: docvqa + scoring_fns: [] + benchmarks: + - dataset_id: simpleqa + scoring_functions: + - llm-as-judge::405b-simpleqa + metadata: {} + benchmark_id: meta-reference-simpleqa + - dataset_id: mmlu_cot + scoring_functions: + - basic::regex_parser_multiple_choice_answer + metadata: {} + benchmark_id: meta-reference-mmlu-cot + - dataset_id: gpqa_cot + scoring_functions: + - basic::regex_parser_multiple_choice_answer + metadata: {} + benchmark_id: meta-reference-gpqa-cot + - dataset_id: math_500 + scoring_functions: + - basic::regex_parser_math_response + metadata: {} + benchmark_id: meta-reference-math-500 + - dataset_id: ifeval + scoring_functions: + - basic::ifeval + metadata: {} + benchmark_id: meta-reference-ifeval + - dataset_id: docvqa + scoring_functions: + - basic::docvqa + metadata: {} + benchmark_id: meta-reference-docvqa + tool_groups: + - toolgroup_id: builtin::websearch + provider_id: tavily-search + - toolgroup_id: builtin::rag + provider_id: rag-runtime server: port: 8321 telemetry: diff --git a/llama_stack/distributions/postgres-demo/run.yaml b/llama_stack/distributions/postgres-demo/run.yaml index 9556b1287..0d7ecff48 100644 --- a/llama_stack/distributions/postgres-demo/run.yaml +++ b/llama_stack/distributions/postgres-demo/run.yaml @@ -87,27 +87,28 @@ storage: conversations: table_name: openai_conversations backend: sql_default -models: -- metadata: {} - model_id: ${env.INFERENCE_MODEL} - provider_id: vllm-inference - model_type: llm -- metadata: - embedding_dimension: 768 - model_id: nomic-embed-text-v1.5 - provider_id: sentence-transformers - model_type: embedding -shields: -- shield_id: meta-llama/Llama-Guard-3-8B -vector_dbs: [] -datasets: [] -scoring_fns: [] -benchmarks: [] -tool_groups: -- toolgroup_id: builtin::websearch - provider_id: tavily-search -- toolgroup_id: builtin::rag - provider_id: rag-runtime +registered_resources: + models: + - metadata: {} + model_id: ${env.INFERENCE_MODEL} + provider_id: vllm-inference + model_type: llm + - metadata: + embedding_dimension: 768 + model_id: nomic-embed-text-v1.5 + provider_id: sentence-transformers + model_type: embedding + shields: + - shield_id: meta-llama/Llama-Guard-3-8B + vector_dbs: [] + datasets: [] + scoring_fns: [] + benchmarks: [] + tool_groups: + - toolgroup_id: builtin::websearch + provider_id: tavily-search + - toolgroup_id: builtin::rag + provider_id: rag-runtime server: port: 8321 telemetry: diff --git a/llama_stack/distributions/starter-gpu/run.yaml b/llama_stack/distributions/starter-gpu/run.yaml index 81f564779..92483c78e 100644 --- a/llama_stack/distributions/starter-gpu/run.yaml +++ b/llama_stack/distributions/starter-gpu/run.yaml @@ -250,23 +250,24 @@ storage: conversations: table_name: openai_conversations backend: sql_default -models: [] -shields: -- shield_id: llama-guard - provider_id: ${env.SAFETY_MODEL:+llama-guard} - provider_shield_id: ${env.SAFETY_MODEL:=} -- shield_id: code-scanner - provider_id: ${env.CODE_SCANNER_MODEL:+code-scanner} - provider_shield_id: ${env.CODE_SCANNER_MODEL:=} -vector_dbs: [] -datasets: [] -scoring_fns: [] -benchmarks: [] -tool_groups: -- toolgroup_id: builtin::websearch - provider_id: tavily-search -- toolgroup_id: builtin::rag - provider_id: rag-runtime +registered_resources: + models: [] + shields: + - shield_id: llama-guard + provider_id: ${env.SAFETY_MODEL:+llama-guard} + provider_shield_id: ${env.SAFETY_MODEL:=} + - shield_id: code-scanner + provider_id: ${env.CODE_SCANNER_MODEL:+code-scanner} + provider_shield_id: ${env.CODE_SCANNER_MODEL:=} + vector_dbs: [] + datasets: [] + scoring_fns: [] + benchmarks: [] + tool_groups: + - toolgroup_id: builtin::websearch + provider_id: tavily-search + - toolgroup_id: builtin::rag + provider_id: rag-runtime server: port: 8321 telemetry: diff --git a/llama_stack/distributions/starter/run.yaml b/llama_stack/distributions/starter/run.yaml index dc611a446..3b9d8f890 100644 --- a/llama_stack/distributions/starter/run.yaml +++ b/llama_stack/distributions/starter/run.yaml @@ -247,23 +247,24 @@ storage: conversations: table_name: openai_conversations backend: sql_default -models: [] -shields: -- shield_id: llama-guard - provider_id: ${env.SAFETY_MODEL:+llama-guard} - provider_shield_id: ${env.SAFETY_MODEL:=} -- shield_id: code-scanner - provider_id: ${env.CODE_SCANNER_MODEL:+code-scanner} - provider_shield_id: ${env.CODE_SCANNER_MODEL:=} -vector_dbs: [] -datasets: [] -scoring_fns: [] -benchmarks: [] -tool_groups: -- toolgroup_id: builtin::websearch - provider_id: tavily-search -- toolgroup_id: builtin::rag - provider_id: rag-runtime +registered_resources: + models: [] + shields: + - shield_id: llama-guard + provider_id: ${env.SAFETY_MODEL:+llama-guard} + provider_shield_id: ${env.SAFETY_MODEL:=} + - shield_id: code-scanner + provider_id: ${env.CODE_SCANNER_MODEL:+code-scanner} + provider_shield_id: ${env.CODE_SCANNER_MODEL:=} + vector_dbs: [] + datasets: [] + scoring_fns: [] + benchmarks: [] + tool_groups: + - toolgroup_id: builtin::websearch + provider_id: tavily-search + - toolgroup_id: builtin::rag + provider_id: rag-runtime server: port: 8321 telemetry: diff --git a/llama_stack/distributions/template.py b/llama_stack/distributions/template.py index daa609388..64f21e626 100644 --- a/llama_stack/distributions/template.py +++ b/llama_stack/distributions/template.py @@ -272,13 +272,15 @@ class RunConfigSettings(BaseModel): "apis": apis, "providers": provider_configs, "storage": storage_config, - "models": [m.model_dump(exclude_none=True) for m in (self.default_models or [])], - "shields": [s.model_dump(exclude_none=True) for s in (self.default_shields or [])], - "vector_dbs": [], - "datasets": [d.model_dump(exclude_none=True) for d in (self.default_datasets or [])], - "scoring_fns": [], - "benchmarks": [b.model_dump(exclude_none=True) for b in (self.default_benchmarks or [])], - "tool_groups": [t.model_dump(exclude_none=True) for t in (self.default_tool_groups or [])], + "registered_resources": { + "models": [m.model_dump(exclude_none=True) for m in (self.default_models or [])], + "shields": [s.model_dump(exclude_none=True) for s in (self.default_shields or [])], + "vector_dbs": [], + "datasets": [d.model_dump(exclude_none=True) for d in (self.default_datasets or [])], + "scoring_fns": [], + "benchmarks": [b.model_dump(exclude_none=True) for b in (self.default_benchmarks or [])], + "tool_groups": [t.model_dump(exclude_none=True) for t in (self.default_tool_groups or [])], + }, "server": { "port": 8321, }, diff --git a/llama_stack/distributions/watsonx/run.yaml b/llama_stack/distributions/watsonx/run.yaml index 37866cb32..ca3c8402d 100644 --- a/llama_stack/distributions/watsonx/run.yaml +++ b/llama_stack/distributions/watsonx/run.yaml @@ -115,17 +115,18 @@ storage: conversations: table_name: openai_conversations backend: sql_default -models: [] -shields: [] -vector_dbs: [] -datasets: [] -scoring_fns: [] -benchmarks: [] -tool_groups: -- toolgroup_id: builtin::websearch - provider_id: tavily-search -- toolgroup_id: builtin::rag - provider_id: rag-runtime +registered_resources: + models: [] + shields: [] + vector_dbs: [] + datasets: [] + scoring_fns: [] + benchmarks: [] + tool_groups: + - toolgroup_id: builtin::websearch + provider_id: tavily-search + - toolgroup_id: builtin::rag + provider_id: rag-runtime server: port: 8321 telemetry: