forked from phoenix-oss/llama-stack-mirror
Enable sane naming of registered objects with defaults (#429)
# What does this PR do? This is a follow-up to #425. That PR allows for specifying models in the registry, but each entry needs to look like: ```yaml - identifier: ... provider_id: ... provider_resource_identifier: ... ``` This is headache-inducing. The current PR makes this situation better by adopting the shape of our APIs. Namely, we need the user to only specify `model-id`. The rest should be optional and figured out by the Stack. You can always override it. Here's what example `ollama` "full stack" registry looks like (we still need to kill or simplify shield_type crap): ```yaml models: - model_id: Llama3.2-3B-Instruct - model_id: Llama-Guard-3-1B shields: - shield_id: llama_guard shield_type: llama_guard ``` ## Test Plan See test plan for #425. Re-ran it.
This commit is contained in:
parent
d9d271a684
commit
09269e2a44
17 changed files with 295 additions and 207 deletions
|
@ -9,7 +9,7 @@ import os
|
|||
import pytest
|
||||
import pytest_asyncio
|
||||
|
||||
from llama_stack.apis.models import Model
|
||||
from llama_stack.apis.models import ModelInput
|
||||
|
||||
from llama_stack.distribution.datatypes import Api, Provider
|
||||
from llama_stack.providers.inline.inference.meta_reference import (
|
||||
|
@ -162,10 +162,8 @@ async def inference_stack(request, inference_model):
|
|||
{"inference": inference_fixture.providers},
|
||||
inference_fixture.provider_data,
|
||||
models=[
|
||||
Model(
|
||||
identifier=inference_model,
|
||||
provider_resource_id=inference_model,
|
||||
provider_id=inference_fixture.providers[0].provider_id,
|
||||
ModelInput(
|
||||
model_id=inference_model,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue