mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-14 06:22:37 +00:00
chore: give OpenAIMixin subcalsses a change to list models without leaking _model_cache details
This commit is contained in:
parent
f176196fba
commit
c465472e42
3 changed files with 286 additions and 36 deletions
|
|
@ -7,7 +7,7 @@
|
|||
import base64
|
||||
import uuid
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import AsyncIterator
|
||||
from collections.abc import AsyncIterator, Iterable
|
||||
from typing import Any
|
||||
|
||||
from openai import NOT_GIVEN, AsyncOpenAI
|
||||
|
|
@ -111,6 +111,18 @@ class OpenAIMixin(ModelsProtocolPrivate, NeedsRequestProviderData, ABC):
|
|||
"""
|
||||
return {}
|
||||
|
||||
async def get_models(self) -> Iterable[str] | None:
|
||||
"""
|
||||
List available models from the provider.
|
||||
|
||||
Child classes can override this method to provide a custom implementation
|
||||
for listing models. The default implementation uses the AsyncOpenAI client
|
||||
to list models from the OpenAI-compatible endpoint.
|
||||
|
||||
:return: An iterable of model IDs or None if not implemented
|
||||
"""
|
||||
return None
|
||||
|
||||
@property
|
||||
def client(self) -> AsyncOpenAI:
|
||||
"""
|
||||
|
|
@ -387,16 +399,30 @@ class OpenAIMixin(ModelsProtocolPrivate, NeedsRequestProviderData, ABC):
|
|||
"""
|
||||
self._model_cache = {}
|
||||
|
||||
async for m in self.client.models.list():
|
||||
if self.allowed_models and m.id not in self.allowed_models:
|
||||
logger.info(f"Skipping model {m.id} as it is not in the allowed models list")
|
||||
# give subclasses a chance to provide custom model listing
|
||||
if (iterable := await self.get_models()) is not None:
|
||||
if not hasattr(iterable, "__iter__"):
|
||||
raise TypeError(
|
||||
f"Failed to list models: {self.__class__.__name__}.get_models() must return an iterable of "
|
||||
f"strings or None, but returned {type(iterable).__name__}"
|
||||
)
|
||||
models_ids = list(iterable)
|
||||
logger.info(
|
||||
f"Using {self.__class__.__name__}.get_models() implementation, received {len(models_ids)} models"
|
||||
)
|
||||
else:
|
||||
models_ids = [m.id async for m in self.client.models.list()]
|
||||
|
||||
for m_id in models_ids:
|
||||
if self.allowed_models and m_id not in self.allowed_models:
|
||||
logger.info(f"Skipping model {m_id} as it is not in the allowed models list")
|
||||
continue
|
||||
if metadata := self.embedding_model_metadata.get(m.id):
|
||||
if metadata := self.embedding_model_metadata.get(m_id):
|
||||
# This is an embedding model - augment with metadata
|
||||
model = Model(
|
||||
provider_id=self.__provider_id__, # type: ignore[attr-defined]
|
||||
provider_resource_id=m.id,
|
||||
identifier=m.id,
|
||||
provider_resource_id=m_id,
|
||||
identifier=m_id,
|
||||
model_type=ModelType.embedding,
|
||||
metadata=metadata,
|
||||
)
|
||||
|
|
@ -404,11 +430,11 @@ class OpenAIMixin(ModelsProtocolPrivate, NeedsRequestProviderData, ABC):
|
|||
# This is an LLM
|
||||
model = Model(
|
||||
provider_id=self.__provider_id__, # type: ignore[attr-defined]
|
||||
provider_resource_id=m.id,
|
||||
identifier=m.id,
|
||||
provider_resource_id=m_id,
|
||||
identifier=m_id,
|
||||
model_type=ModelType.llm,
|
||||
)
|
||||
self._model_cache[m.id] = model
|
||||
self._model_cache[m_id] = model
|
||||
|
||||
return list(self._model_cache.values())
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue