mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
# What does this PR do? **Why** - When AgentConfig has no `input_shields` / `output_shields` defined, we still outputs a shield_call step with violation=None. This is impossible to distinguish the case b/w (1) no violation from running shields v.s. (2) no shields call **What** - We should not have a shield_call step when no `input_shields` / `output_shields` are defined. - Also removes a never reached try/catch code block in agent loop. `run_multiple_shields` is never called in the try block (verified by stacktrace print) **Side Note** - pre-commit fix ## Test Plan Tested w/ DirectClient via: https://gist.github.com/yanxi0830/b48f2a53b6f5391b9ff1e39992bc05b3 **No Shields** <img width="858" alt="image" src="https://github.com/user-attachments/assets/67319370-329f-4954-bd16-d21ce54c6ebf" /> **With Input + Output Shields** <img width="854" alt="image" src="https://github.com/user-attachments/assets/75ab1bee-3ba9-4549-ab51-23210be83da7" /> **Input Shields Only** <img width="858" alt="image" src="https://github.com/user-attachments/assets/1897206b-13dd-4ea5-92c2-b39bf68e9286" /> E2E pytest ``` LLAMA_STACK_BASE_URL=http://localhost:5000 pytest -v ./tests/client-sdk/agents/test_agents.py ``` ## Sources Please link relevant resources if necessary. ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Ran pre-commit to handle lint / formatting issues. - [ ] Read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [ ] Updated relevant documentation. - [ ] Wrote necessary unit or integration tests.
72 lines
2.4 KiB
Python
72 lines
2.4 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from pathlib import Path
|
|
|
|
from llama_models.sku_list import all_registered_models
|
|
|
|
from llama_stack.apis.models import ModelInput
|
|
from llama_stack.distribution.datatypes import Provider
|
|
|
|
from llama_stack.providers.inline.memory.faiss.config import FaissImplConfig
|
|
from llama_stack.providers.remote.inference.bedrock.bedrock import MODEL_ALIASES
|
|
from llama_stack.templates.template import DistributionTemplate, RunConfigSettings
|
|
|
|
|
|
def get_distribution_template() -> DistributionTemplate:
|
|
providers = {
|
|
"inference": ["remote::bedrock"],
|
|
"memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"],
|
|
"safety": ["remote::bedrock"],
|
|
"agents": ["inline::meta-reference"],
|
|
"telemetry": ["inline::meta-reference"],
|
|
"eval": ["inline::meta-reference"],
|
|
"datasetio": ["remote::huggingface", "inline::localfs"],
|
|
"scoring": ["inline::basic", "inline::llm-as-judge", "inline::braintrust"],
|
|
}
|
|
name = "bedrock"
|
|
memory_provider = Provider(
|
|
provider_id="faiss",
|
|
provider_type="inline::faiss",
|
|
config=FaissImplConfig.sample_run_config(f"distributions/{name}"),
|
|
)
|
|
|
|
core_model_to_hf_repo = {
|
|
m.descriptor(): m.huggingface_repo for m in all_registered_models()
|
|
}
|
|
|
|
default_models = [
|
|
ModelInput(
|
|
model_id=core_model_to_hf_repo[m.llama_model],
|
|
provider_model_id=m.provider_model_id,
|
|
provider_id="bedrock",
|
|
)
|
|
for m in MODEL_ALIASES
|
|
]
|
|
|
|
return DistributionTemplate(
|
|
name=name,
|
|
distro_type="self_hosted",
|
|
description="Use AWS Bedrock for running LLM inference and safety",
|
|
docker_image=None,
|
|
template_path=Path(__file__).parent / "doc_template.md",
|
|
providers=providers,
|
|
default_models=default_models,
|
|
run_configs={
|
|
"run.yaml": RunConfigSettings(
|
|
provider_overrides={
|
|
"memory": [memory_provider],
|
|
},
|
|
default_models=default_models,
|
|
),
|
|
},
|
|
run_config_env_vars={
|
|
"LLAMASTACK_PORT": (
|
|
"5001",
|
|
"Port for the Llama Stack distribution server",
|
|
),
|
|
},
|
|
)
|