mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-21 03:59:42 +00:00
add nvidia distribution (#565)
# What does this PR do? adds nvidia template for creating a distribution using inference adapter for NVIDIA NIMs. ## Test Plan Please describe: Build llama stack distribution for nvidia using the template, docker and conda. ```bash (.venv) local-cdgamarose@a4u8g-0006:~/llama-stack$ llama-stack-client configure --endpoint http://localhost:5000 Done! You can now use the Llama Stack Client CLI with endpoint http://localhost:5000 (.venv) local-cdgamarose@a4u8g-0006:~/llama-stack$ llama-stack-client models list ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━┓ ┃ identifier ┃ provider_id ┃ provider_resource_id ┃ metadata ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━┩ │ Llama3.1-8B-Instruct │ nvidia │ meta/llama-3.1-8b-instruct │ {} │ │ meta-llama/Llama-3.2-3B-Instruct │ nvidia │ meta/llama-3.2-3b-instruct │ {} │ └──────────────────────────────────┴─────────────┴────────────────────────────┴──────────┘ (.venv) local-cdgamarose@a4u8g-0006:~/llama-stack$ llama-stack-client inference chat-completion --message "hello, write me a 2 sentence poem" ChatCompletionResponse( completion_message=CompletionMessage( content='Here is a 2 sentence poem:\n\nThe sun sets slow and paints the sky, \nA gentle hue of pink that makes me sigh.', role='assistant', stop_reason='end_of_turn', tool_calls=[] ), logprobs=None ) ``` ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [x] Ran pre-commit to handle lint / formatting issues. - [x] Read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [x] Updated relevant documentation. - [ ] Wrote necessary unit or integration tests. --------- Co-authored-by: Matthew Farrellee <matt@cs.wisc.edu>
This commit is contained in:
parent
7fb2c1c48d
commit
b3202bcf77
15 changed files with 582 additions and 1 deletions
1
distributions/remote-nvidia/build.yaml
Symbolic link
1
distributions/remote-nvidia/build.yaml
Symbolic link
|
@ -0,0 +1 @@
|
|||
../../llama_stack/templates/nvidia/build.yaml
|
19
distributions/remote-nvidia/compose.yaml
Normal file
19
distributions/remote-nvidia/compose.yaml
Normal file
|
@ -0,0 +1,19 @@
|
|||
services:
|
||||
llamastack:
|
||||
image: distribution-nvidia:dev
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
- ~/.llama:/root/.llama
|
||||
- ./run.yaml:/root/llamastack-run-nvidia.yaml
|
||||
ports:
|
||||
- "5000:5000"
|
||||
environment:
|
||||
- INFERENCE_MODEL=${INFERENCE_MODEL:-Llama3.1-8B-Instruct}
|
||||
- NVIDIA_API_KEY=${NVIDIA_API_KEY:-}
|
||||
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml-config /root/llamastack-run-nvidia.yaml"
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
delay: 3s
|
||||
max_attempts: 5
|
||||
window: 60s
|
1
distributions/remote-nvidia/run.yaml
Symbolic link
1
distributions/remote-nvidia/run.yaml
Symbolic link
|
@ -0,0 +1 @@
|
|||
../../llama_stack/templates/nvidia/run.yaml
|
Loading…
Add table
Add a link
Reference in a new issue