feat: add static embedding metadata to dynamic model listings for providers using OpenAIMixin

- remove auto-download of ollama embedding models
- add embedding model metadata to dynamic listing w/ unit test
- add support and tests for allowed_models
- removed inference provider models.py files where dynamic listing is enabled
- store embedding metadata in embedding_model_metadata field on inference providers
- make model_entries optional on ModelRegistryHelper and LiteLLMOpenAIMixin
- make OpenAIMixin a ModelRegistryHelper
- skip base64 embedding test for remote::ollama, always returns floats
- only use OpenAI client for ollama model listing
- remove unused build_model_entry function
- remove unused get_huggingface_repo function
This commit is contained in:
Matthew Farrellee 2025-09-25 04:56:54 -04:00
parent a50b63906c
commit 466ef6f490
43 changed files with 370 additions and 1016 deletions

View file

@ -31,42 +31,28 @@ from llama_stack.apis.inference import (
ToolConfig,
ToolDefinition,
ToolPromptFormat,
)
from llama_stack.apis.models import Model, ModelType
from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.model_registry import (
ProviderModelEntry,
Model,
ModelType,
)
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from .config import DatabricksImplConfig
from llama_stack.log import get_logger
logger = get_logger(name=__name__, category="inference::databricks")
# source: https://docs.databricks.com/aws/en/machine-learning/foundation-model-apis/supported-models
EMBEDDING_MODEL_ENTRIES = {
"databricks-gte-large-en": ProviderModelEntry(
provider_model_id="databricks-gte-large-en",
metadata={
"embedding_dimension": 1024,
"context_length": 8192,
},
),
"databricks-bge-large-en": ProviderModelEntry(
provider_model_id="databricks-bge-large-en",
metadata={
"embedding_dimension": 1024,
"context_length": 512,
},
),
}
class DatabricksInferenceAdapter(
OpenAIMixin,
Inference,
):
# source: https://docs.databricks.com/aws/en/machine-learning/foundation-model-apis/supported-models
embedding_model_metadata = {
"databricks-gte-large-en": {"embedding_dimension": 1024, "context_length": 8192},
"databricks-bge-large-en": {"embedding_dimension": 1024, "context_length": 512},
}
def __init__(self, config: DatabricksImplConfig) -> None:
self.config = config
@ -156,11 +142,11 @@ class DatabricksInferenceAdapter(
if endpoint.task == "llm/v1/chat":
model.model_type = ModelType.llm # this is redundant, but informative
elif endpoint.task == "llm/v1/embeddings":
if endpoint.name not in EMBEDDING_MODEL_ENTRIES:
if endpoint.name not in self.embedding_model_metadata:
logger.warning(f"No metadata information available for embedding model {endpoint.name}, skipping.")
continue
model.model_type = ModelType.embedding
model.metadata = EMBEDDING_MODEL_ENTRIES[endpoint.name].metadata
model.metadata = self.embedding_model_metadata[endpoint.name]
else:
logger.warning(f"Unknown model type, skipping: {endpoint}")
continue
@ -169,13 +155,5 @@ class DatabricksInferenceAdapter(
return list(self._model_cache.values())
async def register_model(self, model: Model) -> Model:
if not await self.check_model_availability(model.provider_resource_id):
raise ValueError(f"Model {model.provider_resource_id} is not available in Databricks workspace.")
return model
async def unregister_model(self, model_id: str) -> None:
pass
async def should_refresh_models(self) -> bool:
return False