chore: slight renaming of model alias stuff (#1181)

Quick test by running:
```
LLAMA_STACK_CONFIG=fireworks pytest -s -v tests/client-sdk
```
This commit is contained in:
Ashwin Bharambe 2025-02-20 11:48:46 -08:00 committed by GitHub
parent 2eda050aef
commit eddef0b2ae
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 87 additions and 87 deletions

View file

@ -38,7 +38,7 @@ from llama_stack.models.llama.sku_list import all_registered_models
from llama_stack.providers.datatypes import ModelsProtocolPrivate
from llama_stack.providers.utils.inference.model_registry import (
ModelRegistryHelper,
build_model_alias,
build_hf_repo_model_alias,
)
from llama_stack.providers.utils.inference.openai_compat import (
OpenAICompatCompletionResponse,
@ -62,9 +62,9 @@ from .config import VLLMInferenceAdapterConfig
log = logging.getLogger(__name__)
def build_model_aliases():
def build_hf_repo_model_aliases():
return [
build_model_alias(
build_hf_repo_model_alias(
model.huggingface_repo,
model.descriptor(),
)
@ -204,7 +204,7 @@ async def _process_vllm_chat_completion_stream_response(
class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
def __init__(self, config: VLLMInferenceAdapterConfig) -> None:
self.register_helper = ModelRegistryHelper(build_model_aliases())
self.register_helper = ModelRegistryHelper(build_hf_repo_model_aliases())
self.config = config
self.client = None