mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-27 18:50:41 +00:00
# What does this PR do? Automatically generates - build.yaml - run.yaml - run-with-safety.yaml - parts of markdown docs for the distributions. ## Test Plan At this point, this only updates the YAMLs and the docs. Some testing (especially with ollama and vllm) has been performed but needs to be much more tested.
58 lines
1.3 KiB
YAML
58 lines
1.3 KiB
YAML
version: '2'
|
|
image_name: local
|
|
docker_image: null
|
|
conda_env: local
|
|
apis:
|
|
- shields
|
|
- agents
|
|
- models
|
|
- memory
|
|
- memory_banks
|
|
- inference
|
|
- safety
|
|
providers:
|
|
inference:
|
|
- provider_id: meta0
|
|
provider_type: inline::meta-reference-quantized
|
|
config:
|
|
model: Llama3.2-3B-Instruct:int4-qlora-eo8
|
|
quantization:
|
|
type: int4
|
|
torch_seed: null
|
|
max_seq_len: 2048
|
|
max_batch_size: 1
|
|
- provider_id: meta1
|
|
provider_type: inline::meta-reference-quantized
|
|
config:
|
|
# not a quantized model !
|
|
model: Llama-Guard-3-1B
|
|
quantization: null
|
|
torch_seed: null
|
|
max_seq_len: 2048
|
|
max_batch_size: 1
|
|
safety:
|
|
- provider_id: meta0
|
|
provider_type: inline::llama-guard
|
|
config:
|
|
model: Llama-Guard-3-1B
|
|
excluded_categories: []
|
|
- provider_id: meta1
|
|
provider_type: inline::prompt-guard
|
|
config:
|
|
model: Prompt-Guard-86M
|
|
memory:
|
|
- provider_id: meta0
|
|
provider_type: inline::meta-reference
|
|
config: {}
|
|
agents:
|
|
- provider_id: meta0
|
|
provider_type: inline::meta-reference
|
|
config:
|
|
persistence_store:
|
|
namespace: null
|
|
type: sqlite
|
|
db_path: ~/.llama/runtime/kvstore.db
|
|
telemetry:
|
|
- provider_id: meta0
|
|
provider_type: inline::meta-reference
|
|
config: {}
|