mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-17 12:32:36 +00:00
Adding docker-compose.yaml, starting to simplify
This commit is contained in:
parent
e4509cb568
commit
f38e76ee98
14 changed files with 516 additions and 386 deletions
|
|
@ -6,39 +6,25 @@ conda_env: null
|
|||
apis:
|
||||
- inference
|
||||
- memory
|
||||
- safety
|
||||
- agents
|
||||
- telemetry
|
||||
providers:
|
||||
inference:
|
||||
# serves main inference model
|
||||
- provider_id: vllm-0
|
||||
- provider_id: vllm-inference
|
||||
provider_type: remote::vllm
|
||||
config:
|
||||
# NOTE: replace with "localhost" if you are running in "host" network mode
|
||||
url: ${env.VLLM_URL:http://host.docker.internal:5100/v1}
|
||||
max_tokens: ${env.MAX_TOKENS:4096}
|
||||
api_token: fake
|
||||
# serves safety llama_guard model
|
||||
- provider_id: vllm-1
|
||||
provider_type: remote::vllm
|
||||
config:
|
||||
# NOTE: replace with "localhost" if you are running in "host" network mode
|
||||
url: ${env.SAFETY_VLLM_URL:http://host.docker.internal:5101/v1}
|
||||
url: ${env.VLLM_URL}
|
||||
max_tokens: ${env.MAX_TOKENS:4096}
|
||||
api_token: fake
|
||||
memory:
|
||||
- provider_id: faiss-0
|
||||
- provider_id: faiss
|
||||
provider_type: inline::faiss
|
||||
config:
|
||||
kvstore:
|
||||
namespace: null
|
||||
type: sqlite
|
||||
db_path: "${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/faiss_store.db"
|
||||
safety:
|
||||
- provider_id: llama-guard
|
||||
provider_type: inline::llama-guard
|
||||
config: {}
|
||||
memory:
|
||||
- provider_id: meta0
|
||||
provider_type: inline::faiss
|
||||
|
|
@ -60,9 +46,5 @@ metadata_store:
|
|||
type: sqlite
|
||||
db_path: "${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/registry.db"
|
||||
models:
|
||||
- model_id: ${env.INFERENCE_MODEL:Llama3.1-8B-Instruct}
|
||||
provider_id: vllm-0
|
||||
- model_id: ${env.SAFETY_MODEL:Llama-Guard-3-1B}
|
||||
provider_id: vllm-1
|
||||
shields:
|
||||
- shield_id: ${env.SAFETY_MODEL:Llama-Guard-3-1B}
|
||||
- model_id: ${env.INFERENCE_MODEL}
|
||||
provider_id: vllm-inference
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue