forked from phoenix-oss/llama-stack-mirror
Enable sane naming of registered objects with defaults (#429)
# What does this PR do? This is a follow-up to #425. That PR allows for specifying models in the registry, but each entry needs to look like: ```yaml - identifier: ... provider_id: ... provider_resource_identifier: ... ``` This is headache-inducing. The current PR makes this situation better by adopting the shape of our APIs. Namely, we need the user to only specify `model-id`. The rest should be optional and figured out by the Stack. You can always override it. Here's what example `ollama` "full stack" registry looks like (we still need to kill or simplify shield_type crap): ```yaml models: - model_id: Llama3.2-3B-Instruct - model_id: Llama-Guard-3-1B shields: - shield_id: llama_guard shield_type: llama_guard ``` ## Test Plan See test plan for #425. Re-ran it.
This commit is contained in:
parent
d9d271a684
commit
09269e2a44
17 changed files with 295 additions and 207 deletions
|
@ -128,7 +128,6 @@ class LlamaGuardSafetyImpl(Safety, ShieldsProtocolPrivate):
|
|||
pass
|
||||
|
||||
async def register_shield(self, shield: Shield) -> None:
|
||||
print(f"Registering shield {shield}")
|
||||
if shield.shield_type != ShieldType.llama_guard:
|
||||
raise ValueError(f"Unsupported shield type: {shield.shield_type}")
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ import tempfile
|
|||
import pytest
|
||||
import pytest_asyncio
|
||||
|
||||
from llama_stack.apis.models import Model
|
||||
from llama_stack.apis.models import ModelInput
|
||||
from llama_stack.distribution.datatypes import Api, Provider
|
||||
|
||||
from llama_stack.providers.inline.agents.meta_reference import (
|
||||
|
@ -71,13 +71,9 @@ async def agents_stack(request, inference_model, safety_model):
|
|||
if fixture.provider_data:
|
||||
provider_data.update(fixture.provider_data)
|
||||
|
||||
inf_provider_id = providers["inference"][0].provider_id
|
||||
safety_provider_id = providers["safety"][0].provider_id
|
||||
|
||||
shield = get_shield_to_register(
|
||||
providers["safety"][0].provider_type, safety_provider_id, safety_model
|
||||
shield_input = get_shield_to_register(
|
||||
providers["safety"][0].provider_type, safety_model
|
||||
)
|
||||
|
||||
inference_models = (
|
||||
inference_model if isinstance(inference_model, list) else [inference_model]
|
||||
)
|
||||
|
@ -86,13 +82,11 @@ async def agents_stack(request, inference_model, safety_model):
|
|||
providers,
|
||||
provider_data,
|
||||
models=[
|
||||
Model(
|
||||
identifier=model,
|
||||
provider_id=inf_provider_id,
|
||||
provider_resource_id=model,
|
||||
ModelInput(
|
||||
model_id=model,
|
||||
)
|
||||
for model in inference_models
|
||||
],
|
||||
shields=[shield],
|
||||
shields=[shield_input],
|
||||
)
|
||||
return impls[Api.agents], impls[Api.memory]
|
||||
|
|
|
@ -9,7 +9,7 @@ import os
|
|||
import pytest
|
||||
import pytest_asyncio
|
||||
|
||||
from llama_stack.apis.models import Model
|
||||
from llama_stack.apis.models import ModelInput
|
||||
|
||||
from llama_stack.distribution.datatypes import Api, Provider
|
||||
from llama_stack.providers.inline.inference.meta_reference import (
|
||||
|
@ -162,10 +162,8 @@ async def inference_stack(request, inference_model):
|
|||
{"inference": inference_fixture.providers},
|
||||
inference_fixture.provider_data,
|
||||
models=[
|
||||
Model(
|
||||
identifier=inference_model,
|
||||
provider_resource_id=inference_model,
|
||||
provider_id=inference_fixture.providers[0].provider_id,
|
||||
ModelInput(
|
||||
model_id=inference_model,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
|
|
@ -7,9 +7,9 @@
|
|||
import pytest
|
||||
import pytest_asyncio
|
||||
|
||||
from llama_stack.apis.models import Model
|
||||
from llama_stack.apis.models import ModelInput
|
||||
|
||||
from llama_stack.apis.shields import Shield, ShieldType
|
||||
from llama_stack.apis.shields import ShieldInput, ShieldType
|
||||
|
||||
from llama_stack.distribution.datatypes import Api, Provider
|
||||
from llama_stack.providers.inline.safety.llama_guard import LlamaGuardConfig
|
||||
|
@ -99,28 +99,21 @@ async def safety_stack(inference_model, safety_model, request):
|
|||
provider_data.update(safety_fixture.provider_data)
|
||||
|
||||
shield_provider_type = safety_fixture.providers[0].provider_type
|
||||
shield = get_shield_to_register(
|
||||
shield_provider_type, safety_fixture.providers[0].provider_id, safety_model
|
||||
)
|
||||
shield_input = get_shield_to_register(shield_provider_type, safety_model)
|
||||
|
||||
impls = await resolve_impls_for_test_v2(
|
||||
[Api.safety, Api.shields, Api.inference],
|
||||
providers,
|
||||
provider_data,
|
||||
models=[
|
||||
Model(
|
||||
identifier=inference_model,
|
||||
provider_id=inference_fixture.providers[0].provider_id,
|
||||
provider_resource_id=inference_model,
|
||||
)
|
||||
],
|
||||
shields=[shield],
|
||||
models=[ModelInput(model_id=inference_model)],
|
||||
shields=[shield_input],
|
||||
)
|
||||
|
||||
shield = await impls[Api.shields].get_shield(shield_input.shield_id)
|
||||
return impls[Api.safety], impls[Api.shields], shield
|
||||
|
||||
|
||||
def get_shield_to_register(provider_type: str, provider_id: str, safety_model: str):
|
||||
def get_shield_to_register(provider_type: str, safety_model: str) -> ShieldInput:
|
||||
shield_config = {}
|
||||
shield_type = ShieldType.llama_guard
|
||||
identifier = "llama_guard"
|
||||
|
@ -133,10 +126,8 @@ def get_shield_to_register(provider_type: str, provider_id: str, safety_model: s
|
|||
shield_config["guardrailVersion"] = get_env_or_fail("BEDROCK_GUARDRAIL_VERSION")
|
||||
shield_type = ShieldType.generic_content_shield
|
||||
|
||||
return Shield(
|
||||
identifier=identifier,
|
||||
return ShieldInput(
|
||||
shield_id=identifier,
|
||||
shield_type=shield_type,
|
||||
params=shield_config,
|
||||
provider_id=provider_id,
|
||||
provider_resource_id=identifier,
|
||||
)
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
import pytest
|
||||
import pytest_asyncio
|
||||
|
||||
from llama_stack.apis.models import ModelInput
|
||||
|
||||
from llama_stack.distribution.datatypes import Api, Provider
|
||||
|
||||
from llama_stack.providers.tests.resolver import resolve_impls_for_test_v2
|
||||
|
@ -76,20 +78,14 @@ async def scoring_stack(request, inference_model):
|
|||
[Api.scoring, Api.datasetio, Api.inference],
|
||||
providers,
|
||||
provider_data,
|
||||
)
|
||||
|
||||
provider_id = providers["inference"][0].provider_id
|
||||
await impls[Api.models].register_model(
|
||||
model_id=inference_model,
|
||||
provider_id=provider_id,
|
||||
)
|
||||
await impls[Api.models].register_model(
|
||||
model_id="Llama3.1-405B-Instruct",
|
||||
provider_id=provider_id,
|
||||
)
|
||||
await impls[Api.models].register_model(
|
||||
model_id="Llama3.1-8B-Instruct",
|
||||
provider_id=provider_id,
|
||||
models=[
|
||||
ModelInput(model_id=model)
|
||||
for model in [
|
||||
inference_model,
|
||||
"Llama3.1-405B-Instruct",
|
||||
"Llama3.1-8B-Instruct",
|
||||
]
|
||||
],
|
||||
)
|
||||
|
||||
return impls
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue