mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
config templates restructure, docs (#262)
* wip * config templates * readmes
This commit is contained in:
parent
a07dfffbbf
commit
d787d1e84f
16 changed files with 57 additions and 78 deletions
|
@ -1,4 +1,4 @@
|
|||
name: local
|
||||
name: local-gpu
|
||||
distribution_spec:
|
||||
description: Use code from `llama_stack` itself to serve all llama stack APIs
|
||||
providers:
|
||||
|
@ -7,4 +7,4 @@ distribution_spec:
|
|||
safety: meta-reference
|
||||
agents: meta-reference
|
||||
telemetry: meta-reference
|
||||
image_type: conda
|
||||
image_type: docker
|
|
@ -1,11 +1,11 @@
|
|||
name: local-gpu
|
||||
name: local-tgi-chroma
|
||||
distribution_spec:
|
||||
description: local meta reference
|
||||
description: remote tgi inference + chromadb memory
|
||||
docker_image: null
|
||||
providers:
|
||||
inference: meta-reference
|
||||
inference: remote::tgi
|
||||
safety: meta-reference
|
||||
agents: meta-reference
|
||||
memory: meta-reference
|
||||
memory: remote::chromadb
|
||||
telemetry: meta-reference
|
||||
image_type: docker
|
|
@ -1,16 +1,16 @@
|
|||
version: '2'
|
||||
built_at: '2024-10-08T17:42:33.690666'
|
||||
image_name: local-gpu
|
||||
docker_image: local-gpu
|
||||
conda_env: null
|
||||
built_at: '2024-10-08T17:40:45.325529'
|
||||
image_name: local
|
||||
docker_image: null
|
||||
conda_env: local
|
||||
apis:
|
||||
- memory
|
||||
- inference
|
||||
- agents
|
||||
- shields
|
||||
- safety
|
||||
- agents
|
||||
- models
|
||||
- memory
|
||||
- memory_banks
|
||||
- inference
|
||||
- safety
|
||||
providers:
|
||||
inference:
|
||||
- provider_id: meta-reference
|
||||
|
@ -25,8 +25,13 @@ providers:
|
|||
- provider_id: meta-reference
|
||||
provider_type: meta-reference
|
||||
config:
|
||||
llama_guard_shield: null
|
||||
prompt_guard_shield: null
|
||||
llama_guard_shield:
|
||||
model: Llama-Guard-3-1B
|
||||
excluded_categories: []
|
||||
disable_input_check: false
|
||||
disable_output_check: false
|
||||
prompt_guard_shield:
|
||||
model: Prompt-Guard-86M
|
||||
memory:
|
||||
- provider_id: meta-reference
|
||||
provider_type: meta-reference
|
|
@ -1,29 +1,33 @@
|
|||
version: '2'
|
||||
built_at: '2024-10-08T17:42:07.505267'
|
||||
image_name: local-cpu
|
||||
docker_image: local-cpu
|
||||
conda_env: null
|
||||
built_at: '2024-10-08T17:40:45.325529'
|
||||
image_name: local
|
||||
docker_image: null
|
||||
conda_env: local
|
||||
apis:
|
||||
- shields
|
||||
- agents
|
||||
- inference
|
||||
- models
|
||||
- memory
|
||||
- safety
|
||||
- shields
|
||||
- memory_banks
|
||||
- inference
|
||||
- safety
|
||||
providers:
|
||||
inference:
|
||||
- provider_id: remote::ollama
|
||||
provider_type: remote::ollama
|
||||
- provider_id: tgi0
|
||||
provider_type: remote::tgi
|
||||
config:
|
||||
host: localhost
|
||||
port: 6000
|
||||
url: http://127.0.0.1:5009
|
||||
safety:
|
||||
- provider_id: meta-reference
|
||||
provider_type: meta-reference
|
||||
config:
|
||||
llama_guard_shield: null
|
||||
prompt_guard_shield: null
|
||||
llama_guard_shield:
|
||||
model: Llama-Guard-3-1B
|
||||
excluded_categories: []
|
||||
disable_input_check: false
|
||||
disable_output_check: false
|
||||
prompt_guard_shield:
|
||||
model: Prompt-Guard-86M
|
||||
memory:
|
||||
- provider_id: meta-reference
|
||||
provider_type: meta-reference
|
Loading…
Add table
Add a link
Reference in a new issue