diff --git a/docs/source/distributions/remote_hosted_distro/nvidia.md b/docs/source/distributions/remote_hosted_distro/nvidia.md index f352f737e..70e334041 100644 --- a/docs/source/distributions/remote_hosted_distro/nvidia.md +++ b/docs/source/distributions/remote_hosted_distro/nvidia.md @@ -9,7 +9,7 @@ The `llamastack/distribution-nvidia` distribution consists of the following prov | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::nvidia` | -| safety | `inline::llama-guard` | +| safety | `inline::llama-guard`, `remote::fiddlecube` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` | diff --git a/docs/source/distributions/self_hosted_distro/bedrock.md b/docs/source/distributions/self_hosted_distro/bedrock.md index 64c9f8c19..3f6dc3a70 100644 --- a/docs/source/distributions/self_hosted_distro/bedrock.md +++ b/docs/source/distributions/self_hosted_distro/bedrock.md @@ -16,7 +16,7 @@ The `llamastack/distribution-bedrock` distribution consists of the following pro | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::bedrock` | -| safety | `remote::bedrock` | +| safety | `remote::bedrock`, `remote::fiddlecube` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` | diff --git a/docs/source/distributions/self_hosted_distro/cerebras.md b/docs/source/distributions/self_hosted_distro/cerebras.md index a0c9eb263..c1aa608c3 100644 --- a/docs/source/distributions/self_hosted_distro/cerebras.md +++ b/docs/source/distributions/self_hosted_distro/cerebras.md @@ -9,7 +9,7 @@ The `llamastack/distribution-cerebras` distribution consists of the following pr | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::cerebras` | -| safety | `inline::llama-guard` | +| safety | `inline::llama-guard`, `remote::fiddlecube` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime` | diff --git a/docs/source/distributions/self_hosted_distro/dell.md b/docs/source/distributions/self_hosted_distro/dell.md index aef3ecf58..74ad5afcc 100644 --- a/docs/source/distributions/self_hosted_distro/dell.md +++ b/docs/source/distributions/self_hosted_distro/dell.md @@ -20,7 +20,7 @@ The `llamastack/distribution-dell` distribution consists of the following provid | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::tgi` | -| safety | `inline::llama-guard` | +| safety | `inline::llama-guard`, `remote::fiddlecube` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime` | diff --git a/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md b/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md index b183757db..d449c4a5c 100644 --- a/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md +++ b/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md @@ -19,7 +19,7 @@ The `llamastack/distribution-meta-reference-gpu` distribution consists of the fo | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `inline::meta-reference` | -| safety | `inline::llama-guard` | +| safety | `inline::llama-guard`, `remote::fiddlecube` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` | diff --git a/docs/source/distributions/self_hosted_distro/meta-reference-quantized-gpu.md b/docs/source/distributions/self_hosted_distro/meta-reference-quantized-gpu.md index 9aeb7a88b..6a37ad061 100644 --- a/docs/source/distributions/self_hosted_distro/meta-reference-quantized-gpu.md +++ b/docs/source/distributions/self_hosted_distro/meta-reference-quantized-gpu.md @@ -19,7 +19,7 @@ The `llamastack/distribution-meta-reference-quantized-gpu` distribution consists | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `inline::meta-reference-quantized` | -| safety | `inline::llama-guard` | +| safety | `inline::llama-guard`, `remote::fiddlecube` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` | diff --git a/docs/source/distributions/self_hosted_distro/ollama.md b/docs/source/distributions/self_hosted_distro/ollama.md index a3a45f9a8..ff1cf9379 100644 --- a/docs/source/distributions/self_hosted_distro/ollama.md +++ b/docs/source/distributions/self_hosted_distro/ollama.md @@ -19,7 +19,7 @@ The `llamastack/distribution-ollama` distribution consists of the following prov | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::ollama` | -| safety | `inline::llama-guard` | +| safety | `inline::llama-guard`, `remote::fiddlecube` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime` | diff --git a/docs/source/distributions/self_hosted_distro/remote-vllm.md b/docs/source/distributions/self_hosted_distro/remote-vllm.md index 6c3bbd1d0..f109bd03e 100644 --- a/docs/source/distributions/self_hosted_distro/remote-vllm.md +++ b/docs/source/distributions/self_hosted_distro/remote-vllm.md @@ -18,7 +18,7 @@ The `llamastack/distribution-remote-vllm` distribution consists of the following | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::vllm` | -| safety | `inline::llama-guard` | +| safety | `inline::llama-guard`, `remote::fiddlecube` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` | diff --git a/docs/source/distributions/self_hosted_distro/sambanova.md b/docs/source/distributions/self_hosted_distro/sambanova.md index e6ac616be..5bc5c9c98 100644 --- a/docs/source/distributions/self_hosted_distro/sambanova.md +++ b/docs/source/distributions/self_hosted_distro/sambanova.md @@ -17,7 +17,7 @@ The `llamastack/distribution-sambanova` distribution consists of the following p |-----|-------------| | agents | `inline::meta-reference` | | inference | `remote::sambanova` | -| safety | `inline::llama-guard` | +| safety | `inline::llama-guard`, `remote::fiddlecube` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime` | | vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | diff --git a/docs/source/distributions/self_hosted_distro/tgi.md b/docs/source/distributions/self_hosted_distro/tgi.md index f4eecf2cd..f77aa1352 100644 --- a/docs/source/distributions/self_hosted_distro/tgi.md +++ b/docs/source/distributions/self_hosted_distro/tgi.md @@ -20,7 +20,7 @@ The `llamastack/distribution-tgi` distribution consists of the following provide | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::tgi` | -| safety | `inline::llama-guard` | +| safety | `inline::llama-guard`, `remote::fiddlecube` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` | diff --git a/docs/source/distributions/self_hosted_distro/together.md b/docs/source/distributions/self_hosted_distro/together.md index 8e36c1eb0..da331f5a6 100644 --- a/docs/source/distributions/self_hosted_distro/together.md +++ b/docs/source/distributions/self_hosted_distro/together.md @@ -19,7 +19,7 @@ The `llamastack/distribution-together` distribution consists of the following pr | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::together` | -| safety | `inline::llama-guard` | +| safety | `inline::llama-guard`, `remote::fiddlecube` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` | diff --git a/docs/source/index.md b/docs/source/index.md index 2834f5641..c431e1441 100644 --- a/docs/source/index.md +++ b/docs/source/index.md @@ -78,6 +78,7 @@ A number of "adapters" are available for some popular Inference and Vector Store | Prompt Guard | Single Node | | Code Scanner | Single Node | | AWS Bedrock | Hosted | +| FiddleCube | Hosted | ```{toctree} diff --git a/llama_stack/templates/bedrock/bedrock.py b/llama_stack/templates/bedrock/bedrock.py index 0c8259285..62003e383 100644 --- a/llama_stack/templates/bedrock/bedrock.py +++ b/llama_stack/templates/bedrock/bedrock.py @@ -19,7 +19,7 @@ def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::bedrock"], "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], - "safety": ["remote::bedrock"], + "safety": ["remote::bedrock", "remote::fiddlecube"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], "eval": ["inline::meta-reference"], diff --git a/llama_stack/templates/bedrock/build.yaml b/llama_stack/templates/bedrock/build.yaml index 6c07b0478..f121a25e9 100644 --- a/llama_stack/templates/bedrock/build.yaml +++ b/llama_stack/templates/bedrock/build.yaml @@ -10,6 +10,7 @@ distribution_spec: - remote::pgvector safety: - remote::bedrock + - remote::fiddlecube agents: - inline::meta-reference telemetry: diff --git a/llama_stack/templates/bedrock/run.yaml b/llama_stack/templates/bedrock/run.yaml index be6c9a928..916238463 100644 --- a/llama_stack/templates/bedrock/run.yaml +++ b/llama_stack/templates/bedrock/run.yaml @@ -27,6 +27,9 @@ providers: - provider_id: bedrock provider_type: remote::bedrock config: {} + - provider_id: fiddlecube + provider_type: remote::fiddlecube + config: {} agents: - provider_id: meta-reference provider_type: inline::meta-reference diff --git a/llama_stack/templates/cerebras/build.yaml b/llama_stack/templates/cerebras/build.yaml index 9d5ab1a52..27eb97761 100644 --- a/llama_stack/templates/cerebras/build.yaml +++ b/llama_stack/templates/cerebras/build.yaml @@ -6,6 +6,7 @@ distribution_spec: - remote::cerebras safety: - inline::llama-guard + - remote::fiddlecube vector_io: - inline::faiss - remote::chromadb diff --git a/llama_stack/templates/cerebras/cerebras.py b/llama_stack/templates/cerebras/cerebras.py index 2dfae04f8..0e491f300 100644 --- a/llama_stack/templates/cerebras/cerebras.py +++ b/llama_stack/templates/cerebras/cerebras.py @@ -22,7 +22,7 @@ from llama_stack.templates.template import DistributionTemplate, RunConfigSettin def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::cerebras"], - "safety": ["inline::llama-guard"], + "safety": ["inline::llama-guard", "remote::fiddlecube"], "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], "agents": ["inline::meta-reference"], "eval": ["inline::meta-reference"], diff --git a/llama_stack/templates/cerebras/run.yaml b/llama_stack/templates/cerebras/run.yaml index 05d3f4525..fda928406 100644 --- a/llama_stack/templates/cerebras/run.yaml +++ b/llama_stack/templates/cerebras/run.yaml @@ -24,6 +24,9 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} + - provider_id: fiddlecube + provider_type: remote::fiddlecube + config: {} vector_io: - provider_id: faiss provider_type: inline::faiss diff --git a/llama_stack/templates/dell/build.yaml b/llama_stack/templates/dell/build.yaml index e2edb9386..be1fe9a6b 100644 --- a/llama_stack/templates/dell/build.yaml +++ b/llama_stack/templates/dell/build.yaml @@ -11,6 +11,7 @@ distribution_spec: - remote::pgvector safety: - inline::llama-guard + - remote::fiddlecube agents: - inline::meta-reference telemetry: diff --git a/llama_stack/templates/dell/dell.py b/llama_stack/templates/dell/dell.py index 5781da7f4..2c7cdbb9a 100644 --- a/llama_stack/templates/dell/dell.py +++ b/llama_stack/templates/dell/dell.py @@ -23,7 +23,7 @@ def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::tgi"], "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], - "safety": ["inline::llama-guard"], + "safety": ["inline::llama-guard", "remote::fiddlecube"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], "eval": ["inline::meta-reference"], diff --git a/llama_stack/templates/dell/run-with-safety.yaml b/llama_stack/templates/dell/run-with-safety.yaml index 04c5957d4..21bf0cc77 100644 --- a/llama_stack/templates/dell/run-with-safety.yaml +++ b/llama_stack/templates/dell/run-with-safety.yaml @@ -32,6 +32,9 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} + - provider_id: fiddlecube + provider_type: remote::fiddlecube + config: {} agents: - provider_id: meta-reference provider_type: inline::meta-reference diff --git a/llama_stack/templates/dell/run.yaml b/llama_stack/templates/dell/run.yaml index 706444eb1..585baba34 100644 --- a/llama_stack/templates/dell/run.yaml +++ b/llama_stack/templates/dell/run.yaml @@ -28,6 +28,9 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} + - provider_id: fiddlecube + provider_type: remote::fiddlecube + config: {} agents: - provider_id: meta-reference provider_type: inline::meta-reference diff --git a/llama_stack/templates/fireworks/build.yaml b/llama_stack/templates/fireworks/build.yaml index cdd60ec2a..adbfc4da0 100644 --- a/llama_stack/templates/fireworks/build.yaml +++ b/llama_stack/templates/fireworks/build.yaml @@ -10,6 +10,7 @@ distribution_spec: - remote::pgvector safety: - inline::llama-guard + - remote::fiddlecube agents: - inline::meta-reference telemetry: diff --git a/llama_stack/templates/fireworks/fireworks.py b/llama_stack/templates/fireworks/fireworks.py index ec350010b..785d3e2de 100644 --- a/llama_stack/templates/fireworks/fireworks.py +++ b/llama_stack/templates/fireworks/fireworks.py @@ -28,7 +28,7 @@ def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::fireworks"], "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], - "safety": ["inline::llama-guard"], + "safety": ["inline::llama-guard", "remote::fiddlecube"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], "eval": ["inline::meta-reference"], diff --git a/llama_stack/templates/fireworks/run.yaml b/llama_stack/templates/fireworks/run.yaml index ccf67dcbb..5f13e07e7 100644 --- a/llama_stack/templates/fireworks/run.yaml +++ b/llama_stack/templates/fireworks/run.yaml @@ -29,8 +29,8 @@ providers: namespace: null db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/fireworks}/faiss_store.db safety: - - provider_id: llama-guard - provider_type: inline::llama-guard + - provider_id: fiddlecube + provider_type: remote::fiddlecube config: {} agents: - provider_id: meta-reference diff --git a/llama_stack/templates/hf-endpoint/build.yaml b/llama_stack/templates/hf-endpoint/build.yaml index c2eaaa05b..bca47e9a4 100644 --- a/llama_stack/templates/hf-endpoint/build.yaml +++ b/llama_stack/templates/hf-endpoint/build.yaml @@ -10,6 +10,7 @@ distribution_spec: - remote::pgvector safety: - inline::llama-guard + - remote::fiddlecube agents: - inline::meta-reference telemetry: diff --git a/llama_stack/templates/hf-endpoint/hf_endpoint.py b/llama_stack/templates/hf-endpoint/hf_endpoint.py index 4533fd95b..ea2d43ad5 100644 --- a/llama_stack/templates/hf-endpoint/hf_endpoint.py +++ b/llama_stack/templates/hf-endpoint/hf_endpoint.py @@ -23,7 +23,7 @@ def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::hf::endpoint"], "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], - "safety": ["inline::llama-guard"], + "safety": ["inline::llama-guard", "remote::fiddlecube"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], "eval": ["inline::meta-reference"], diff --git a/llama_stack/templates/hf-endpoint/run-with-safety.yaml b/llama_stack/templates/hf-endpoint/run-with-safety.yaml index f520a2fda..5555f9ffb 100644 --- a/llama_stack/templates/hf-endpoint/run-with-safety.yaml +++ b/llama_stack/templates/hf-endpoint/run-with-safety.yaml @@ -37,6 +37,9 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} + - provider_id: fiddlecube + provider_type: remote::fiddlecube + config: {} agents: - provider_id: meta-reference provider_type: inline::meta-reference diff --git a/llama_stack/templates/hf-endpoint/run.yaml b/llama_stack/templates/hf-endpoint/run.yaml index 708cb1bcc..d3962fcbd 100644 --- a/llama_stack/templates/hf-endpoint/run.yaml +++ b/llama_stack/templates/hf-endpoint/run.yaml @@ -32,6 +32,9 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} + - provider_id: fiddlecube + provider_type: remote::fiddlecube + config: {} agents: - provider_id: meta-reference provider_type: inline::meta-reference diff --git a/llama_stack/templates/hf-serverless/build.yaml b/llama_stack/templates/hf-serverless/build.yaml index f9303cfab..c037ca018 100644 --- a/llama_stack/templates/hf-serverless/build.yaml +++ b/llama_stack/templates/hf-serverless/build.yaml @@ -10,6 +10,7 @@ distribution_spec: - remote::pgvector safety: - inline::llama-guard + - remote::fiddlecube agents: - inline::meta-reference telemetry: diff --git a/llama_stack/templates/hf-serverless/hf_serverless.py b/llama_stack/templates/hf-serverless/hf_serverless.py index 8438de7a5..14f59d56e 100644 --- a/llama_stack/templates/hf-serverless/hf_serverless.py +++ b/llama_stack/templates/hf-serverless/hf_serverless.py @@ -23,7 +23,7 @@ def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::hf::serverless"], "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], - "safety": ["inline::llama-guard"], + "safety": ["inline::llama-guard", "remote::fiddlecube"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], "eval": ["inline::meta-reference"], diff --git a/llama_stack/templates/hf-serverless/run-with-safety.yaml b/llama_stack/templates/hf-serverless/run-with-safety.yaml index 7f0abf5be..66d209a1a 100644 --- a/llama_stack/templates/hf-serverless/run-with-safety.yaml +++ b/llama_stack/templates/hf-serverless/run-with-safety.yaml @@ -37,6 +37,9 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} + - provider_id: fiddlecube + provider_type: remote::fiddlecube + config: {} agents: - provider_id: meta-reference provider_type: inline::meta-reference diff --git a/llama_stack/templates/hf-serverless/run.yaml b/llama_stack/templates/hf-serverless/run.yaml index c0b7a4c60..497b08da3 100644 --- a/llama_stack/templates/hf-serverless/run.yaml +++ b/llama_stack/templates/hf-serverless/run.yaml @@ -32,6 +32,9 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} + - provider_id: fiddlecube + provider_type: remote::fiddlecube + config: {} agents: - provider_id: meta-reference provider_type: inline::meta-reference diff --git a/llama_stack/templates/meta-reference-gpu/build.yaml b/llama_stack/templates/meta-reference-gpu/build.yaml index b9130fc7d..15dcc8200 100644 --- a/llama_stack/templates/meta-reference-gpu/build.yaml +++ b/llama_stack/templates/meta-reference-gpu/build.yaml @@ -10,6 +10,7 @@ distribution_spec: - remote::pgvector safety: - inline::llama-guard + - remote::fiddlecube agents: - inline::meta-reference telemetry: diff --git a/llama_stack/templates/meta-reference-gpu/meta_reference.py b/llama_stack/templates/meta-reference-gpu/meta_reference.py index a3f82b0c8..b1f240287 100644 --- a/llama_stack/templates/meta-reference-gpu/meta_reference.py +++ b/llama_stack/templates/meta-reference-gpu/meta_reference.py @@ -27,7 +27,7 @@ def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["inline::meta-reference"], "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], - "safety": ["inline::llama-guard"], + "safety": ["inline::llama-guard", "remote::fiddlecube"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], "eval": ["inline::meta-reference"], diff --git a/llama_stack/templates/meta-reference-gpu/run-with-safety.yaml b/llama_stack/templates/meta-reference-gpu/run-with-safety.yaml index c5286fc6b..6c2b97b47 100644 --- a/llama_stack/templates/meta-reference-gpu/run-with-safety.yaml +++ b/llama_stack/templates/meta-reference-gpu/run-with-safety.yaml @@ -39,6 +39,9 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} + - provider_id: fiddlecube + provider_type: remote::fiddlecube + config: {} agents: - provider_id: meta-reference provider_type: inline::meta-reference diff --git a/llama_stack/templates/meta-reference-gpu/run.yaml b/llama_stack/templates/meta-reference-gpu/run.yaml index 310585f23..63912f098 100644 --- a/llama_stack/templates/meta-reference-gpu/run.yaml +++ b/llama_stack/templates/meta-reference-gpu/run.yaml @@ -33,6 +33,9 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} + - provider_id: fiddlecube + provider_type: remote::fiddlecube + config: {} agents: - provider_id: meta-reference provider_type: inline::meta-reference diff --git a/llama_stack/templates/meta-reference-quantized-gpu/build.yaml b/llama_stack/templates/meta-reference-quantized-gpu/build.yaml index 7bbcfe5f2..e939e6ca6 100644 --- a/llama_stack/templates/meta-reference-quantized-gpu/build.yaml +++ b/llama_stack/templates/meta-reference-quantized-gpu/build.yaml @@ -10,6 +10,7 @@ distribution_spec: - remote::pgvector safety: - inline::llama-guard + - remote::fiddlecube agents: - inline::meta-reference telemetry: diff --git a/llama_stack/templates/meta-reference-quantized-gpu/meta_reference.py b/llama_stack/templates/meta-reference-quantized-gpu/meta_reference.py index 8c2a6ec9f..71fcc46f1 100644 --- a/llama_stack/templates/meta-reference-quantized-gpu/meta_reference.py +++ b/llama_stack/templates/meta-reference-quantized-gpu/meta_reference.py @@ -22,7 +22,7 @@ def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["inline::meta-reference-quantized"], "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], - "safety": ["inline::llama-guard"], + "safety": ["inline::llama-guard", "remote::fiddlecube"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], "eval": ["inline::meta-reference"], diff --git a/llama_stack/templates/meta-reference-quantized-gpu/run.yaml b/llama_stack/templates/meta-reference-quantized-gpu/run.yaml index d43cf3917..93fbbe721 100644 --- a/llama_stack/templates/meta-reference-quantized-gpu/run.yaml +++ b/llama_stack/templates/meta-reference-quantized-gpu/run.yaml @@ -35,6 +35,9 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} + - provider_id: fiddlecube + provider_type: remote::fiddlecube + config: {} agents: - provider_id: meta-reference provider_type: inline::meta-reference diff --git a/llama_stack/templates/nvidia/build.yaml b/llama_stack/templates/nvidia/build.yaml index e9748721a..f36f7ee35 100644 --- a/llama_stack/templates/nvidia/build.yaml +++ b/llama_stack/templates/nvidia/build.yaml @@ -8,6 +8,7 @@ distribution_spec: - inline::faiss safety: - inline::llama-guard + - remote::fiddlecube agents: - inline::meta-reference telemetry: diff --git a/llama_stack/templates/nvidia/nvidia.py b/llama_stack/templates/nvidia/nvidia.py index d24c9ed48..2d82fcfe7 100644 --- a/llama_stack/templates/nvidia/nvidia.py +++ b/llama_stack/templates/nvidia/nvidia.py @@ -18,7 +18,7 @@ def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::nvidia"], "vector_io": ["inline::faiss"], - "safety": ["inline::llama-guard"], + "safety": ["inline::llama-guard", "remote::fiddlecube"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], "eval": ["inline::meta-reference"], diff --git a/llama_stack/templates/nvidia/run.yaml b/llama_stack/templates/nvidia/run.yaml index c8ae362f5..cce4b7868 100644 --- a/llama_stack/templates/nvidia/run.yaml +++ b/llama_stack/templates/nvidia/run.yaml @@ -29,6 +29,9 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} + - provider_id: fiddlecube + provider_type: remote::fiddlecube + config: {} agents: - provider_id: meta-reference provider_type: inline::meta-reference diff --git a/llama_stack/templates/ollama/build.yaml b/llama_stack/templates/ollama/build.yaml index 0fee6808c..3305ddd97 100644 --- a/llama_stack/templates/ollama/build.yaml +++ b/llama_stack/templates/ollama/build.yaml @@ -10,6 +10,7 @@ distribution_spec: - remote::pgvector safety: - inline::llama-guard + - remote::fiddlecube agents: - inline::meta-reference telemetry: diff --git a/llama_stack/templates/ollama/ollama.py b/llama_stack/templates/ollama/ollama.py index d14cb3aad..c30da5549 100644 --- a/llama_stack/templates/ollama/ollama.py +++ b/llama_stack/templates/ollama/ollama.py @@ -25,7 +25,7 @@ def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::ollama"], "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], - "safety": ["inline::llama-guard"], + "safety": ["inline::llama-guard", "remote::fiddlecube"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], "eval": ["inline::meta-reference"], diff --git a/llama_stack/templates/ollama/run.yaml b/llama_stack/templates/ollama/run.yaml index 485223675..0c887b903 100644 --- a/llama_stack/templates/ollama/run.yaml +++ b/llama_stack/templates/ollama/run.yaml @@ -31,6 +31,9 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} + - provider_id: fiddlecube + provider_type: remote::fiddlecube + config: {} agents: - provider_id: meta-reference provider_type: inline::meta-reference diff --git a/llama_stack/templates/remote-vllm/build.yaml b/llama_stack/templates/remote-vllm/build.yaml index 74d9f32d9..6e04368a6 100644 --- a/llama_stack/templates/remote-vllm/build.yaml +++ b/llama_stack/templates/remote-vllm/build.yaml @@ -10,6 +10,7 @@ distribution_spec: - remote::pgvector safety: - inline::llama-guard + - remote::fiddlecube agents: - inline::meta-reference eval: diff --git a/llama_stack/templates/remote-vllm/run-with-safety.yaml b/llama_stack/templates/remote-vllm/run-with-safety.yaml index 1fe998a1f..19d956585 100644 --- a/llama_stack/templates/remote-vllm/run-with-safety.yaml +++ b/llama_stack/templates/remote-vllm/run-with-safety.yaml @@ -39,6 +39,9 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} + - provider_id: fiddlecube + provider_type: remote::fiddlecube + config: {} agents: - provider_id: meta-reference provider_type: inline::meta-reference diff --git a/llama_stack/templates/remote-vllm/run.yaml b/llama_stack/templates/remote-vllm/run.yaml index 9d3db8a31..bc05cfa45 100644 --- a/llama_stack/templates/remote-vllm/run.yaml +++ b/llama_stack/templates/remote-vllm/run.yaml @@ -33,6 +33,9 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} + - provider_id: fiddlecube + provider_type: remote::fiddlecube + config: {} agents: - provider_id: meta-reference provider_type: inline::meta-reference diff --git a/llama_stack/templates/remote-vllm/vllm.py b/llama_stack/templates/remote-vllm/vllm.py index 6c835ef86..66b602a84 100644 --- a/llama_stack/templates/remote-vllm/vllm.py +++ b/llama_stack/templates/remote-vllm/vllm.py @@ -25,7 +25,7 @@ def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::vllm"], "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], - "safety": ["inline::llama-guard"], + "safety": ["inline::llama-guard", "remote::fiddlecube"], "agents": ["inline::meta-reference"], "eval": ["inline::meta-reference"], "datasetio": ["remote::huggingface", "inline::localfs"], diff --git a/llama_stack/templates/sambanova/build.yaml b/llama_stack/templates/sambanova/build.yaml index ca5ffe618..ed2bdb4ea 100644 --- a/llama_stack/templates/sambanova/build.yaml +++ b/llama_stack/templates/sambanova/build.yaml @@ -10,6 +10,7 @@ distribution_spec: - remote::pgvector safety: - inline::llama-guard + - remote::fiddlecube agents: - inline::meta-reference telemetry: diff --git a/llama_stack/templates/sambanova/run.yaml b/llama_stack/templates/sambanova/run.yaml index 39b0f3c4e..7c02e4c9c 100644 --- a/llama_stack/templates/sambanova/run.yaml +++ b/llama_stack/templates/sambanova/run.yaml @@ -32,6 +32,9 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} + - provider_id: fiddlecube + provider_type: remote::fiddlecube + config: {} agents: - provider_id: meta-reference provider_type: inline::meta-reference diff --git a/llama_stack/templates/sambanova/sambanova.py b/llama_stack/templates/sambanova/sambanova.py index 70b54b010..66e3ea0ae 100644 --- a/llama_stack/templates/sambanova/sambanova.py +++ b/llama_stack/templates/sambanova/sambanova.py @@ -24,7 +24,7 @@ def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::sambanova"], "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], - "safety": ["inline::llama-guard"], + "safety": ["inline::llama-guard", "remote::fiddlecube"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], "tool_runtime": [ diff --git a/llama_stack/templates/tgi/build.yaml b/llama_stack/templates/tgi/build.yaml index 8bc628158..a61627eb6 100644 --- a/llama_stack/templates/tgi/build.yaml +++ b/llama_stack/templates/tgi/build.yaml @@ -10,6 +10,7 @@ distribution_spec: - remote::pgvector safety: - inline::llama-guard + - remote::fiddlecube agents: - inline::meta-reference telemetry: diff --git a/llama_stack/templates/tgi/run-with-safety.yaml b/llama_stack/templates/tgi/run-with-safety.yaml index ed6c9ef6f..e4015d1f6 100644 --- a/llama_stack/templates/tgi/run-with-safety.yaml +++ b/llama_stack/templates/tgi/run-with-safety.yaml @@ -32,6 +32,9 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} + - provider_id: fiddlecube + provider_type: remote::fiddlecube + config: {} agents: - provider_id: meta-reference provider_type: inline::meta-reference diff --git a/llama_stack/templates/tgi/run.yaml b/llama_stack/templates/tgi/run.yaml index 8bf76f37b..cece8d83e 100644 --- a/llama_stack/templates/tgi/run.yaml +++ b/llama_stack/templates/tgi/run.yaml @@ -31,6 +31,9 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} + - provider_id: fiddlecube + provider_type: remote::fiddlecube + config: {} agents: - provider_id: meta-reference provider_type: inline::meta-reference diff --git a/llama_stack/templates/tgi/tgi.py b/llama_stack/templates/tgi/tgi.py index e49c98d72..cbfa36ab7 100644 --- a/llama_stack/templates/tgi/tgi.py +++ b/llama_stack/templates/tgi/tgi.py @@ -25,7 +25,7 @@ def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::tgi"], "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], - "safety": ["inline::llama-guard"], + "safety": ["inline::llama-guard", "remote::fiddlecube"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], "eval": ["inline::meta-reference"], diff --git a/llama_stack/templates/together/build.yaml b/llama_stack/templates/together/build.yaml index 90ee5bcee..ce5bec920 100644 --- a/llama_stack/templates/together/build.yaml +++ b/llama_stack/templates/together/build.yaml @@ -10,6 +10,7 @@ distribution_spec: - remote::pgvector safety: - inline::llama-guard + - remote::fiddlecube agents: - inline::meta-reference telemetry: diff --git a/llama_stack/templates/together/run.yaml b/llama_stack/templates/together/run.yaml index 920003759..8ce2eae8b 100644 --- a/llama_stack/templates/together/run.yaml +++ b/llama_stack/templates/together/run.yaml @@ -32,6 +32,9 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} + - provider_id: fiddlecube + provider_type: remote::fiddlecube + config: {} agents: - provider_id: meta-reference provider_type: inline::meta-reference diff --git a/llama_stack/templates/together/together.py b/llama_stack/templates/together/together.py index b7ac130ed..1f724fae3 100644 --- a/llama_stack/templates/together/together.py +++ b/llama_stack/templates/together/together.py @@ -28,7 +28,7 @@ def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::together"], "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], - "safety": ["inline::llama-guard"], + "safety": ["inline::llama-guard", "remote::fiddlecube"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], "eval": ["inline::meta-reference"], diff --git a/llama_stack/templates/vllm-gpu/build.yaml b/llama_stack/templates/vllm-gpu/build.yaml index d24046613..fb38fed74 100644 --- a/llama_stack/templates/vllm-gpu/build.yaml +++ b/llama_stack/templates/vllm-gpu/build.yaml @@ -10,6 +10,7 @@ distribution_spec: - remote::pgvector safety: - inline::llama-guard + - remote::fiddlecube agents: - inline::meta-reference telemetry: diff --git a/llama_stack/templates/vllm-gpu/run.yaml b/llama_stack/templates/vllm-gpu/run.yaml index 41a545e1a..bf56c3e49 100644 --- a/llama_stack/templates/vllm-gpu/run.yaml +++ b/llama_stack/templates/vllm-gpu/run.yaml @@ -35,6 +35,9 @@ providers: - provider_id: llama-guard provider_type: inline::llama-guard config: {} + - provider_id: fiddlecube + provider_type: remote::fiddlecube + config: {} agents: - provider_id: meta-reference provider_type: inline::meta-reference diff --git a/llama_stack/templates/vllm-gpu/vllm.py b/llama_stack/templates/vllm-gpu/vllm.py index 54ebd2d41..fff73924b 100644 --- a/llama_stack/templates/vllm-gpu/vllm.py +++ b/llama_stack/templates/vllm-gpu/vllm.py @@ -22,7 +22,7 @@ def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["inline::vllm"], "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"], - "safety": ["inline::llama-guard"], + "safety": ["inline::llama-guard", "remote::fiddlecube"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], "eval": ["inline::meta-reference"],