forked from phoenix-oss/llama-stack-mirror
# What does this PR do? adds nvidia template for creating a distribution using inference adapter for NVIDIA NIMs. ## Test Plan Please describe: Build llama stack distribution for nvidia using the template, docker and conda. ```bash (.venv) local-cdgamarose@a4u8g-0006:~/llama-stack$ llama-stack-client configure --endpoint http://localhost:5000 Done! You can now use the Llama Stack Client CLI with endpoint http://localhost:5000 (.venv) local-cdgamarose@a4u8g-0006:~/llama-stack$ llama-stack-client models list ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━┓ ┃ identifier ┃ provider_id ┃ provider_resource_id ┃ metadata ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━┩ │ Llama3.1-8B-Instruct │ nvidia │ meta/llama-3.1-8b-instruct │ {} │ │ meta-llama/Llama-3.2-3B-Instruct │ nvidia │ meta/llama-3.2-3b-instruct │ {} │ └──────────────────────────────────┴─────────────┴────────────────────────────┴──────────┘ (.venv) local-cdgamarose@a4u8g-0006:~/llama-stack$ llama-stack-client inference chat-completion --message "hello, write me a 2 sentence poem" ChatCompletionResponse( completion_message=CompletionMessage( content='Here is a 2 sentence poem:\n\nThe sun sets slow and paints the sky, \nA gentle hue of pink that makes me sigh.', role='assistant', stop_reason='end_of_turn', tool_calls=[] ), logprobs=None ) ``` ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [x] Ran pre-commit to handle lint / formatting issues. - [x] Read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [x] Updated relevant documentation. - [ ] Wrote necessary unit or integration tests. --------- Co-authored-by: Matthew Farrellee <matt@cs.wisc.edu>
70 lines
2.2 KiB
Python
70 lines
2.2 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from pathlib import Path
|
|
|
|
from llama_stack.distribution.datatypes import ModelInput, Provider
|
|
from llama_stack.providers.remote.inference.nvidia import NVIDIAConfig
|
|
|
|
from llama_stack.templates.template import DistributionTemplate, RunConfigSettings
|
|
|
|
|
|
def get_distribution_template() -> DistributionTemplate:
|
|
providers = {
|
|
"inference": ["remote::nvidia"],
|
|
"memory": ["inline::faiss"],
|
|
"safety": ["inline::llama-guard"],
|
|
"agents": ["inline::meta-reference"],
|
|
"telemetry": ["inline::meta-reference"],
|
|
"eval": ["inline::meta-reference"],
|
|
"datasetio": ["remote::huggingface", "inline::localfs"],
|
|
"scoring": ["inline::basic", "inline::llm-as-judge", "inline::braintrust"],
|
|
"tool_runtime": [
|
|
"remote::brave-search",
|
|
"remote::tavily-search",
|
|
"inline::code-interpreter",
|
|
"inline::memory-runtime",
|
|
],
|
|
}
|
|
|
|
inference_provider = Provider(
|
|
provider_id="nvidia",
|
|
provider_type="remote::nvidia",
|
|
config=NVIDIAConfig.sample_run_config(),
|
|
)
|
|
|
|
inference_model = ModelInput(
|
|
model_id="${env.INFERENCE_MODEL}",
|
|
provider_id="nvidia",
|
|
)
|
|
|
|
return DistributionTemplate(
|
|
name="nvidia",
|
|
distro_type="remote_hosted",
|
|
description="Use NVIDIA NIM for running LLM inference",
|
|
docker_image=None,
|
|
template_path=Path(__file__).parent / "doc_template.md",
|
|
providers=providers,
|
|
default_models=[inference_model],
|
|
run_configs={
|
|
"run.yaml": RunConfigSettings(
|
|
provider_overrides={
|
|
"inference": [inference_provider],
|
|
},
|
|
default_models=[inference_model],
|
|
),
|
|
},
|
|
run_config_env_vars={
|
|
"LLAMASTACK_PORT": (
|
|
"5001",
|
|
"Port for the Llama Stack distribution server",
|
|
),
|
|
"NVIDIA_API_KEY": (
|
|
"",
|
|
"NVIDIA API Key",
|
|
),
|
|
},
|
|
)
|