forked from phoenix-oss/llama-stack-mirror
# What does this PR do? IBM watsonx ai added as the inference [#1741 ](https://github.com/meta-llama/llama-stack/issues/1741) [//]: # (If resolving an issue, uncomment and update the line below) [//]: # (Closes #[issue-number]) --------- Co-authored-by: Sajikumar JS <sajikumar.js@ibm.com>
22 lines
729 B
Python
22 lines
729 B
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from llama_stack.apis.inference import Inference
|
|
|
|
from .config import WatsonXConfig
|
|
|
|
|
|
async def get_adapter_impl(config: WatsonXConfig, _deps) -> Inference:
|
|
# import dynamically so `llama stack build` does not fail due to missing dependencies
|
|
from .watsonx import WatsonXInferenceAdapter
|
|
|
|
if not isinstance(config, WatsonXConfig):
|
|
raise RuntimeError(f"Unexpected config type: {type(config)}")
|
|
adapter = WatsonXInferenceAdapter(config)
|
|
return adapter
|
|
|
|
|
|
__all__ = ["get_adapter_impl", "WatsonXConfig"]
|