mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-11 19:56:03 +00:00
feat: add custom_metadata to OpenAIModel to unify /v1/models with /v1/openai/v1/models
Goal is to consolidate model listing endpoints. This is step 1: adding custom_metadata field to OpenAIModel that includes model_type, provider_id, provider_resource_id, and all model metadata from the native /v1/models response. Next steps: update stainless client to use /v1/openai/v1/models, migrate tests to read from custom_metadata, then remove /v1/openai/v1/ prefix entirely.
This commit is contained in:
parent
415fd9e36b
commit
3af73b754a
3 changed files with 16 additions and 0 deletions
|
|
@ -90,12 +90,14 @@ class OpenAIModel(BaseModel):
|
||||||
:object: The object type, which will be "model"
|
:object: The object type, which will be "model"
|
||||||
:created: The Unix timestamp in seconds when the model was created
|
:created: The Unix timestamp in seconds when the model was created
|
||||||
:owned_by: The owner of the model
|
:owned_by: The owner of the model
|
||||||
|
:custom_metadata: Llama Stack-specific metadata including model_type, provider info, and additional metadata
|
||||||
"""
|
"""
|
||||||
|
|
||||||
id: str
|
id: str
|
||||||
object: Literal["model"] = "model"
|
object: Literal["model"] = "model"
|
||||||
created: int
|
created: int
|
||||||
owned_by: str
|
owned_by: str
|
||||||
|
custom_metadata: dict[str, Any] | None = None
|
||||||
|
|
||||||
|
|
||||||
class OpenAIListModelsResponse(BaseModel):
|
class OpenAIListModelsResponse(BaseModel):
|
||||||
|
|
|
||||||
|
|
@ -134,6 +134,12 @@ class ModelsRoutingTable(CommonRoutingTableImpl, Models):
|
||||||
object="model",
|
object="model",
|
||||||
created=int(time.time()),
|
created=int(time.time()),
|
||||||
owned_by="llama_stack",
|
owned_by="llama_stack",
|
||||||
|
custom_metadata={
|
||||||
|
"model_type": model.model_type,
|
||||||
|
"provider_id": model.provider_id,
|
||||||
|
"provider_resource_id": model.provider_resource_id,
|
||||||
|
**model.metadata,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
for model in all_models
|
for model in all_models
|
||||||
]
|
]
|
||||||
|
|
|
||||||
|
|
@ -166,6 +166,14 @@ async def test_models_routing_table(cached_disk_dist_registry):
|
||||||
assert "test_provider/test-model" in openai_model_ids
|
assert "test_provider/test-model" in openai_model_ids
|
||||||
assert "test_provider/test-model-2" in openai_model_ids
|
assert "test_provider/test-model-2" in openai_model_ids
|
||||||
|
|
||||||
|
# Verify custom_metadata is populated with Llama Stack-specific data
|
||||||
|
for openai_model in openai_models.data:
|
||||||
|
assert openai_model.custom_metadata is not None
|
||||||
|
assert "model_type" in openai_model.custom_metadata
|
||||||
|
assert "provider_id" in openai_model.custom_metadata
|
||||||
|
assert "provider_resource_id" in openai_model.custom_metadata
|
||||||
|
assert openai_model.custom_metadata["provider_id"] == "test_provider"
|
||||||
|
|
||||||
# Test get_object_by_identifier
|
# Test get_object_by_identifier
|
||||||
model = await table.get_object_by_identifier("model", "test_provider/test-model")
|
model = await table.get_object_by_identifier("model", "test_provider/test-model")
|
||||||
assert model is not None
|
assert model is not None
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue