# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. from pathlib import Path from llama_stack.distribution.datatypes import ModelInput, Provider from llama_stack.providers.remote.inference.nvidia import NVIDIAConfig from llama_stack.templates.template import DistributionTemplate, RunConfigSettings def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::nvidia"], "memory": ["inline::faiss"], "safety": ["inline::llama-guard"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], "eval": ["inline::meta-reference"], "datasetio": ["remote::huggingface", "inline::localfs"], "scoring": ["inline::basic", "inline::llm-as-judge", "inline::braintrust"], "tool_runtime": [ "remote::brave-search", "remote::tavily-search", "inline::code-interpreter", "inline::memory-runtime", ], } inference_provider = Provider( provider_id="nvidia", provider_type="remote::nvidia", config=NVIDIAConfig.sample_run_config(), ) inference_model = ModelInput( model_id="${env.INFERENCE_MODEL}", provider_id="nvidia", ) return DistributionTemplate( name="nvidia", distro_type="remote_hosted", description="Use NVIDIA NIM for running LLM inference", docker_image=None, template_path=Path(__file__).parent / "doc_template.md", providers=providers, default_models=[inference_model], run_configs={ "run.yaml": RunConfigSettings( provider_overrides={ "inference": [inference_provider], }, default_models=[inference_model], ), }, run_config_env_vars={ "LLAMASTACK_PORT": ( "5001", "Port for the Llama Stack distribution server", ), "NVIDIA_API_KEY": ( "", "NVIDIA API Key", ), }, )