llama-stack-mirror/llama_stack/distributions/nvidia/build.yaml
2025-08-04 10:23:03 -07:00

29 lines
758 B
YAML

version: 2
distribution_spec:
description: Use NVIDIA NIM for running LLM inference, evaluation and safety
providers:
inference:
- provider_type: remote::nvidia
vector_io:
- provider_type: inline::faiss
safety:
- provider_type: remote::nvidia
agents:
- provider_type: inline::meta-reference
telemetry:
- provider_type: inline::meta-reference
eval:
- provider_type: remote::nvidia
post_training:
- provider_type: remote::nvidia
datasetio:
- provider_type: inline::localfs
- provider_type: remote::nvidia
scoring:
- provider_type: inline::basic
tool_runtime:
- provider_type: inline::rag-runtime
image_type: venv
additional_pip_packages:
- aiosqlite
- sqlalchemy[asyncio]