mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-02 08:44:44 +00:00
rename all _file.py to file.py
This commit is contained in:
parent
8944491c3c
commit
1e18791fff
5 changed files with 6 additions and 6 deletions
|
@ -6,12 +6,12 @@
|
|||
|
||||
from llama_stack.apis.inference import Inference
|
||||
|
||||
from ._config import NVIDIAConfig
|
||||
from .config import NVIDIAConfig
|
||||
|
||||
|
||||
async def get_adapter_impl(config: NVIDIAConfig, _deps) -> Inference:
|
||||
# import dynamically so `llama stack build` does not fail due to missing dependencies
|
||||
from ._nvidia import NVIDIAInferenceAdapter
|
||||
from .nvidia import NVIDIAInferenceAdapter
|
||||
|
||||
if not isinstance(config, NVIDIAConfig):
|
||||
raise RuntimeError(f"Unexpected config type: {type(config)}")
|
||||
|
|
|
@ -34,13 +34,13 @@ from llama_stack.providers.utils.inference.model_registry import (
|
|||
ModelRegistryHelper,
|
||||
)
|
||||
|
||||
from ._config import NVIDIAConfig
|
||||
from ._openai_utils import (
|
||||
from . import NVIDIAConfig
|
||||
from .openai_utils import (
|
||||
convert_chat_completion_request,
|
||||
convert_openai_chat_completion_choice,
|
||||
convert_openai_chat_completion_stream,
|
||||
)
|
||||
from ._utils import _is_nvidia_hosted, check_health
|
||||
from .utils import _is_nvidia_hosted, check_health
|
||||
|
||||
_MODEL_ALIASES = [
|
||||
build_model_alias_with_just_provider_model_id(
|
|
@ -8,7 +8,7 @@ from typing import Tuple
|
|||
|
||||
import httpx
|
||||
|
||||
from ._config import NVIDIAConfig
|
||||
from . import NVIDIAConfig
|
||||
|
||||
|
||||
def _is_nvidia_hosted(config: NVIDIAConfig) -> bool:
|
Loading…
Add table
Add a link
Reference in a new issue