mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-02 16:54:42 +00:00
dynamically import NVIDIAInferenceAdapter
This commit is contained in:
parent
3ed2e816fa
commit
988741c276
1 changed files with 6 additions and 2 deletions
|
@ -4,11 +4,15 @@
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
|
from llama_stack.apis.inference import Inference
|
||||||
|
|
||||||
from ._config import NVIDIAConfig
|
from ._config import NVIDIAConfig
|
||||||
from ._nvidia import NVIDIAInferenceAdapter
|
|
||||||
|
|
||||||
|
|
||||||
async def get_adapter_impl(config: NVIDIAConfig, _deps) -> NVIDIAInferenceAdapter:
|
async def get_adapter_impl(config: NVIDIAConfig, _deps) -> Inference:
|
||||||
|
# import dynamically so `llama stack build` does not fail due to missing dependencies
|
||||||
|
from ._nvidia import NVIDIAInferenceAdapter
|
||||||
|
|
||||||
if not isinstance(config, NVIDIAConfig):
|
if not isinstance(config, NVIDIAConfig):
|
||||||
raise RuntimeError(f"Unexpected config type: {type(config)}")
|
raise RuntimeError(f"Unexpected config type: {type(config)}")
|
||||||
adapter = NVIDIAInferenceAdapter(config)
|
adapter = NVIDIAInferenceAdapter(config)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue