Enable sane naming of registered objects with defaults

This commit is contained in:
Ashwin Bharambe 2024-11-12 10:17:34 -08:00
parent 9e925f43e5
commit 48a6e27de9
13 changed files with 222 additions and 131 deletions

View file

@ -9,7 +9,7 @@ import tempfile
import pytest
import pytest_asyncio
from llama_stack.apis.models import Model
from llama_stack.apis.models import ModelInput
from llama_stack.distribution.datatypes import Api, Provider
from llama_stack.providers.inline.agents.meta_reference import (
@ -71,13 +71,9 @@ async def agents_stack(request, inference_model, safety_model):
if fixture.provider_data:
provider_data.update(fixture.provider_data)
inf_provider_id = providers["inference"][0].provider_id
safety_provider_id = providers["safety"][0].provider_id
shield = get_shield_to_register(
providers["safety"][0].provider_type, safety_provider_id, safety_model
shield_input = get_shield_to_register(
providers["safety"][0].provider_type, safety_model
)
inference_models = (
inference_model if isinstance(inference_model, list) else [inference_model]
)
@ -86,13 +82,11 @@ async def agents_stack(request, inference_model, safety_model):
providers,
provider_data,
models=[
Model(
identifier=model,
provider_id=inf_provider_id,
provider_resource_id=model,
ModelInput(
model_id=model,
)
for model in inference_models
],
shields=[shield],
shields=[shield_input],
)
return impls[Api.agents], impls[Api.memory]

View file

@ -9,7 +9,7 @@ import os
import pytest
import pytest_asyncio
from llama_stack.apis.models import Model
from llama_stack.apis.models import ModelInput
from llama_stack.distribution.datatypes import Api, Provider
from llama_stack.providers.inline.inference.meta_reference import (
@ -162,10 +162,8 @@ async def inference_stack(request, inference_model):
{"inference": inference_fixture.providers},
inference_fixture.provider_data,
models=[
Model(
identifier=inference_model,
provider_resource_id=inference_model,
provider_id=inference_fixture.providers[0].provider_id,
ModelInput(
model_id=inference_model,
)
],
)

View file

@ -7,9 +7,9 @@
import pytest
import pytest_asyncio
from llama_stack.apis.models import Model
from llama_stack.apis.models import ModelInput
from llama_stack.apis.shields import Shield, ShieldType
from llama_stack.apis.shields import ShieldInput, ShieldType
from llama_stack.distribution.datatypes import Api, Provider
from llama_stack.providers.inline.safety.llama_guard import LlamaGuardConfig
@ -99,28 +99,21 @@ async def safety_stack(inference_model, safety_model, request):
provider_data.update(safety_fixture.provider_data)
shield_provider_type = safety_fixture.providers[0].provider_type
shield = get_shield_to_register(
shield_provider_type, safety_fixture.providers[0].provider_id, safety_model
)
shield_input = get_shield_to_register(shield_provider_type, safety_model)
impls = await resolve_impls_for_test_v2(
[Api.safety, Api.shields, Api.inference],
providers,
provider_data,
models=[
Model(
identifier=inference_model,
provider_id=inference_fixture.providers[0].provider_id,
provider_resource_id=inference_model,
)
],
shields=[shield],
models=[ModelInput(model_id=inference_model)],
shields=[shield_input],
)
shield = await impls[Api.shields].get_shield(shield_input.shield_id)
return impls[Api.safety], impls[Api.shields], shield
def get_shield_to_register(provider_type: str, provider_id: str, safety_model: str):
def get_shield_to_register(provider_type: str, safety_model: str) -> ShieldInput:
shield_config = {}
shield_type = ShieldType.llama_guard
identifier = "llama_guard"
@ -133,10 +126,8 @@ def get_shield_to_register(provider_type: str, provider_id: str, safety_model: s
shield_config["guardrailVersion"] = get_env_or_fail("BEDROCK_GUARDRAIL_VERSION")
shield_type = ShieldType.generic_content_shield
return Shield(
identifier=identifier,
return ShieldInput(
shield_id=identifier,
shield_type=shield_type,
params=shield_config,
provider_id=provider_id,
provider_resource_id=identifier,
)