llama-stack-mirror/distributions/remote-vllm/run-with-safety.yaml
2024-11-16 21:48:34 -08:00

68 lines
1.8 KiB
YAML

version: '2'
built_at: '2024-11-11T20:09:45.988375'
image_name: remote-vllm
docker_image: remote-vllm
conda_env: null
apis:
- inference
- memory
- safety
- agents
- telemetry
providers:
inference:
# serves main inference model
- provider_id: vllm-inference
provider_type: remote::vllm
config:
# NOTE: replace with "localhost" if you are running in "host" network mode
url: ${env.VLLM_URL}
max_tokens: ${env.MAX_TOKENS:4096}
api_token: fake
# serves safety llama_guard model
- provider_id: vllm-safety
provider_type: remote::vllm
config:
# NOTE: replace with "localhost" if you are running in "host" network mode
url: ${env.SAFETY_VLLM_URL}
max_tokens: ${env.MAX_TOKENS:4096}
api_token: fake
memory:
- provider_id: faiss-0
provider_type: inline::faiss
config:
kvstore:
namespace: null
type: sqlite
db_path: "${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/faiss_store.db"
safety:
- provider_id: llama-guard
provider_type: inline::llama-guard
config: {}
memory:
- provider_id: meta0
provider_type: inline::faiss
config: {}
agents:
- provider_id: meta0
provider_type: inline::meta-reference
config:
persistence_store:
namespace: null
type: sqlite
db_path: "${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/agents_store.db"
telemetry:
- provider_id: meta0
provider_type: inline::meta-reference
config: {}
metadata_store:
namespace: null
type: sqlite
db_path: "${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/registry.db"
models:
- model_id: ${env.INFERENCE_MODEL}
provider_id: vllm-inference
- model_id: ${env.SAFETY_MODEL}
provider_id: vllm-safety
shields:
- shield_id: ${env.SAFETY_MODEL}