version: '2' distribution_spec: description: Use NVIDIA NIM for running LLM inference and safety providers: inference: - remote::nvidia vector_io: - inline::faiss safety: - remote::nvidia agents: - inline::meta-reference telemetry: - inline::meta-reference eval: - inline::meta-reference datasetio: - inline::localfs - remote::nvidia scoring: - inline::basic - inline::llm-as-judge - inline::braintrust post_training: - remote::nvidia tool_runtime: - inline::rag-runtime image_type: conda