diff --git a/llama_stack/providers/remote/inference/ollama/ollama.py b/llama_stack/providers/remote/inference/ollama/ollama.py index a5ea8e0db..32825e153 100644 --- a/llama_stack/providers/remote/inference/ollama/ollama.py +++ b/llama_stack/providers/remote/inference/ollama/ollama.py @@ -73,10 +73,7 @@ model_aliases = [ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate): def __init__(self, url: str) -> None: - self.model_register_helper = ModelRegistryHelper( - self, - model_aliases=model_aliases, - ) + self.model_register_helper = ModelRegistryHelper(model_aliases) self.url = url self.formatter = ChatFormat(Tokenizer.get_instance()) diff --git a/llama_stack/providers/remote/inference/vllm/vllm.py b/llama_stack/providers/remote/inference/vllm/vllm.py index 7fad85e8c..657b5b576 100644 --- a/llama_stack/providers/remote/inference/vllm/vllm.py +++ b/llama_stack/providers/remote/inference/vllm/vllm.py @@ -47,10 +47,7 @@ def build_model_aliases(): class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate): def __init__(self, config: VLLMInferenceAdapterConfig) -> None: - self.model_register_helper = ModelRegistryHelper( - self, - model_aliases=build_model_aliases(), - ) + self.model_register_helper = ModelRegistryHelper(build_model_aliases()) self.config = config self.formatter = ChatFormat(Tokenizer.get_instance()) self.client = None