llama-stack-mirror/distributions/remote-vllm/run.yaml
2024-11-16 21:48:34 -08:00

50 lines
1.2 KiB
YAML

version: '2'
built_at: '2024-11-11T20:09:45.988375'
image_name: remote-vllm
docker_image: remote-vllm
conda_env: null
apis:
- inference
- memory
- agents
- telemetry
providers:
inference:
# serves main inference model
- provider_id: vllm-inference
provider_type: remote::vllm
config:
url: ${env.VLLM_URL}
max_tokens: ${env.MAX_TOKENS:4096}
api_token: fake
memory:
- provider_id: faiss
provider_type: inline::faiss
config:
kvstore:
namespace: null
type: sqlite
db_path: "${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/faiss_store.db"
memory:
- provider_id: meta0
provider_type: inline::faiss
config: {}
agents:
- provider_id: meta0
provider_type: inline::meta-reference
config:
persistence_store:
namespace: null
type: sqlite
db_path: "${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/agents_store.db"
telemetry:
- provider_id: meta0
provider_type: inline::meta-reference
config: {}
metadata_store:
namespace: null
type: sqlite
db_path: "${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/registry.db"
models:
- model_id: ${env.INFERENCE_MODEL}
provider_id: vllm-inference