forked from phoenix-oss/llama-stack-mirror
# What does this PR do? this PR adds a basic inference adapter to NVIDIA NIMs what it does - - chat completion api - tool calls - streaming - structured output - logprobs - support hosted NIM on integrate.api.nvidia.com - support downloaded NIM containers what it does not do - - completion api - embedding api - vision models - builtin tools - have certainty that sampling strategies are correct ## Feature/Issue validation/testing/test plan `pytest -s -v --providers inference=nvidia llama_stack/providers/tests/inference/ --env NVIDIA_API_KEY=...` all tests should pass. there are pydantic v1 warnings. ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [x] Did you read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [ ] Was this discussed/approved via a Github issue? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? - [x] Did you write any new necessary tests? Thanks for contributing 🎉!
22 lines
722 B
Python
22 lines
722 B
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from llama_stack.apis.inference import Inference
|
|
|
|
from .config import NVIDIAConfig
|
|
|
|
|
|
async def get_adapter_impl(config: NVIDIAConfig, _deps) -> Inference:
|
|
# import dynamically so `llama stack build` does not fail due to missing dependencies
|
|
from .nvidia import NVIDIAInferenceAdapter
|
|
|
|
if not isinstance(config, NVIDIAConfig):
|
|
raise RuntimeError(f"Unexpected config type: {type(config)}")
|
|
adapter = NVIDIAInferenceAdapter(config)
|
|
return adapter
|
|
|
|
|
|
__all__ = ["get_adapter_impl", "NVIDIAConfig"]
|