Merge branch 'refactor-list-models' into make-openaimix-pydantic

This commit is contained in:
Matthew Farrellee 2025-10-06 06:58:59 -04:00
commit 6fa23d816f
3 changed files with 101 additions and 248 deletions

View file

@ -4,13 +4,12 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from collections.abc import Iterable
from typing import Any
from databricks.sdk import WorkspaceClient
from llama_stack.apis.inference import (
OpenAICompletion,
)
from llama_stack.apis.inference import OpenAICompletion
from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
@ -69,3 +68,11 @@ class DatabricksInferenceAdapter(OpenAIMixin):
suffix: str | None = None,
) -> OpenAICompletion:
raise NotImplementedError()
async def list_provider_model_ids(self) -> Iterable[str]:
return [
endpoint.name
for endpoint in WorkspaceClient(
host=self.config.url, token=self.get_api_key()
).serving_endpoints.list() # TODO: this is not async
]

View file

@ -48,7 +48,7 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
- download_images: If True, downloads images and converts to base64 for providers that require it
- embedding_model_metadata: A dictionary mapping model IDs to their embedding metadata
- provider_data_api_key_field: Optional field name in provider data to look for API key
- get_models: Method to list available models from the provider
- list_provider_model_ids: Method to list available models from the provider
- get_extra_client_params: Method to provide extra parameters to the AsyncOpenAI client
Expected Dependencies:
@ -122,7 +122,7 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
"""
return {}
async def get_models(self) -> Iterable[str] | None:
async def list_provider_model_ids(self) -> Iterable[str]:
"""
List available models from the provider.
@ -132,7 +132,7 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
:return: An iterable of model IDs or None if not implemented
"""
return None
return [m.id async for m in self.client.models.list()]
async def initialize(self) -> None:
"""
@ -430,46 +430,42 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
"""
self._model_cache = {}
# give subclasses a chance to provide custom model listing
models_ids = []
try:
if (iterable := await self.get_models()) is not None: # TODO: handle exceptions from get_models
models_ids = list(iterable)
logger.info(
f"Using {self.__class__.__name__}.get_models() implementation, received {len(models_ids)} models"
)
for id_ in models_ids:
if not isinstance(id_, str):
raise ValueError(f"Model ID {id_} from get_models() is not a string")
iterable = await self.list_provider_model_ids()
except Exception as e:
logger.error(f"{self.__class__.__name__}.get_models() failed with: {e}")
logger.error(f"{self.__class__.__name__}.list_provider_model_ids() failed with: {e}")
raise
if not hasattr(iterable, "__iter__"):
raise TypeError(
f"Failed to list models: {self.__class__.__name__}.list_provider_model_ids() must return an iterable of "
f"strings, but returned {type(iterable).__name__}"
)
if not models_ids:
models_ids = [m.id async for m in self.client.models.list()]
provider_models_ids = list(iterable)
logger.info(f"{self.__class__.__name__}.list_provider_model_ids() returned {len(provider_models_ids)} models")
for m_id in models_ids:
if self.allowed_models and m_id not in self.allowed_models:
logger.info(f"Skipping model {m_id} as it is not in the allowed models list")
for provider_model_id in provider_models_ids:
if not isinstance(provider_model_id, str):
raise ValueError(f"Model ID {provider_model_id} from list_provider_model_ids() is not a string")
if self.allowed_models and provider_model_id not in self.allowed_models:
logger.info(f"Skipping model {provider_model_id} as it is not in the allowed models list")
continue
if metadata := self.embedding_model_metadata.get(m_id):
# This is an embedding model - augment with metadata
if metadata := self.embedding_model_metadata.get(provider_model_id):
model = Model(
provider_id=self.__provider_id__, # type: ignore[attr-defined]
provider_resource_id=m_id,
identifier=m_id,
provider_resource_id=provider_model_id,
identifier=provider_model_id,
model_type=ModelType.embedding,
metadata=metadata,
)
else:
# This is an LLM
model = Model(
provider_id=self.__provider_id__, # type: ignore[attr-defined]
provider_resource_id=m_id,
identifier=m_id,
provider_resource_id=provider_model_id,
identifier=provider_model_id,
model_type=ModelType.llm,
)
self._model_cache[m_id] = model
self._model_cache[provider_model_id] = model
return list(self._model_cache.values())