llama-stack-mirror/tests/examples/simple-local-run.yaml
2024-09-22 22:03:04 -07:00

40 lines
863 B
YAML

built_at: '2024-09-19T22:50:36.239761'
image_name: simple-local
docker_image: null
conda_env: simple-local
apis_to_serve:
- inference
- safety
- agents
- memory
- models
- telemetry
provider_map:
inference:
provider_id: meta-reference
config:
model: Meta-Llama3.1-8B-Instruct
quantization: null
torch_seed: null
max_seq_len: 4096
max_batch_size: 1
safety:
provider_id: meta-reference
config:
llama_guard_shield:
model: Llama-Guard-3-8B
excluded_categories: []
disable_input_check: false
disable_output_check: false
prompt_guard_shield:
model: Prompt-Guard-86M
agents:
provider_id: meta-reference
config: {}
memory:
provider_id: meta-reference
config: {}
telemetry:
provider_id: meta-reference
config: {}
provider_routing_table: {}