chore: give OpenAIMixin subcalsses a change to list models without leaking _model_cache details

This commit is contained in:
Matthew Farrellee 2025-10-04 08:34:23 -04:00
parent f176196fba
commit c465472e42
3 changed files with 286 additions and 36 deletions

View file

@ -13,7 +13,6 @@ from llama_stack.apis.inference import (
Model,
OpenAICompletion,
)
from llama_stack.apis.models import ModelType
from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
@ -72,31 +71,13 @@ class DatabricksInferenceAdapter(
) -> OpenAICompletion:
raise NotImplementedError()
async def list_models(self) -> list[Model] | None:
self._model_cache = {} # from OpenAIMixin
ws_client = WorkspaceClient(host=self.config.url, token=self.get_api_key()) # TODO: this is not async
endpoints = ws_client.serving_endpoints.list()
for endpoint in endpoints:
model = Model(
provider_id=self.__provider_id__,
provider_resource_id=endpoint.name,
identifier=endpoint.name,
)
if endpoint.task == "llm/v1/chat":
model.model_type = ModelType.llm # this is redundant, but informative
elif endpoint.task == "llm/v1/embeddings":
if endpoint.name not in self.embedding_model_metadata:
logger.warning(f"No metadata information available for embedding model {endpoint.name}, skipping.")
continue
model.model_type = ModelType.embedding
model.metadata = self.embedding_model_metadata[endpoint.name]
else:
logger.warning(f"Unknown model type, skipping: {endpoint}")
continue
self._model_cache[endpoint.name] = model
return list(self._model_cache.values())
async def get_models(self) -> list[Model] | None:
return [
endpoint.name
for endpoint in WorkspaceClient(
host=self.config.url, token=self.get_api_key()
).serving_endpoints.list() # TODO: this is not async
]
async def should_refresh_models(self) -> bool:
return False

View file

@ -7,7 +7,7 @@
import base64
import uuid
from abc import ABC, abstractmethod
from collections.abc import AsyncIterator
from collections.abc import AsyncIterator, Iterable
from typing import Any
from openai import NOT_GIVEN, AsyncOpenAI
@ -111,6 +111,18 @@ class OpenAIMixin(ModelsProtocolPrivate, NeedsRequestProviderData, ABC):
"""
return {}
async def get_models(self) -> Iterable[str] | None:
"""
List available models from the provider.
Child classes can override this method to provide a custom implementation
for listing models. The default implementation uses the AsyncOpenAI client
to list models from the OpenAI-compatible endpoint.
:return: An iterable of model IDs or None if not implemented
"""
return None
@property
def client(self) -> AsyncOpenAI:
"""
@ -387,16 +399,30 @@ class OpenAIMixin(ModelsProtocolPrivate, NeedsRequestProviderData, ABC):
"""
self._model_cache = {}
async for m in self.client.models.list():
if self.allowed_models and m.id not in self.allowed_models:
logger.info(f"Skipping model {m.id} as it is not in the allowed models list")
# give subclasses a chance to provide custom model listing
if (iterable := await self.get_models()) is not None:
if not hasattr(iterable, "__iter__"):
raise TypeError(
f"Failed to list models: {self.__class__.__name__}.get_models() must return an iterable of "
f"strings or None, but returned {type(iterable).__name__}"
)
models_ids = list(iterable)
logger.info(
f"Using {self.__class__.__name__}.get_models() implementation, received {len(models_ids)} models"
)
else:
models_ids = [m.id async for m in self.client.models.list()]
for m_id in models_ids:
if self.allowed_models and m_id not in self.allowed_models:
logger.info(f"Skipping model {m_id} as it is not in the allowed models list")
continue
if metadata := self.embedding_model_metadata.get(m.id):
if metadata := self.embedding_model_metadata.get(m_id):
# This is an embedding model - augment with metadata
model = Model(
provider_id=self.__provider_id__, # type: ignore[attr-defined]
provider_resource_id=m.id,
identifier=m.id,
provider_resource_id=m_id,
identifier=m_id,
model_type=ModelType.embedding,
metadata=metadata,
)
@ -404,11 +430,11 @@ class OpenAIMixin(ModelsProtocolPrivate, NeedsRequestProviderData, ABC):
# This is an LLM
model = Model(
provider_id=self.__provider_id__, # type: ignore[attr-defined]
provider_resource_id=m.id,
identifier=m.id,
provider_resource_id=m_id,
identifier=m_id,
model_type=ModelType.llm,
)
self._model_cache[m.id] = model
self._model_cache[m_id] = model
return list(self._model_cache.values())