mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-17 13:52:38 +00:00
use provider resource id to validate for models
This commit is contained in:
parent
e4f14eafe2
commit
95b7f57d92
7 changed files with 75 additions and 46 deletions
|
|
@ -142,6 +142,31 @@ def inference_bedrock() -> ProviderFixture:
|
|||
)
|
||||
|
||||
|
||||
def get_model_short_name(model_name: str) -> str:
|
||||
"""Convert model name to a short test identifier.
|
||||
|
||||
Args:
|
||||
model_name: Full model name like "Llama3.1-8B-Instruct"
|
||||
|
||||
Returns:
|
||||
Short name like "llama_8b" suitable for test markers
|
||||
"""
|
||||
model_name = model_name.lower()
|
||||
if "vision" in model_name:
|
||||
return "llama_vision"
|
||||
elif "3b" in model_name:
|
||||
return "llama_3b"
|
||||
elif "8b" in model_name:
|
||||
return "llama_8b"
|
||||
else:
|
||||
return model_name.replace(".", "_").replace("-", "_")
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def model_id(inference_model) -> str:
|
||||
return get_model_short_name(inference_model)
|
||||
|
||||
|
||||
INFERENCE_FIXTURES = [
|
||||
"meta_reference",
|
||||
"ollama",
|
||||
|
|
@ -154,7 +179,7 @@ INFERENCE_FIXTURES = [
|
|||
|
||||
|
||||
@pytest_asyncio.fixture(scope="session")
|
||||
async def inference_stack(request, inference_model):
|
||||
async def inference_stack(request, inference_model, model_id):
|
||||
fixture_name = request.param
|
||||
inference_fixture = request.getfixturevalue(f"inference_{fixture_name}")
|
||||
impls = await resolve_impls_for_test_v2(
|
||||
|
|
@ -163,7 +188,7 @@ async def inference_stack(request, inference_model):
|
|||
inference_fixture.provider_data,
|
||||
models=[
|
||||
ModelInput(
|
||||
model_id=inference_model,
|
||||
model_id=model_id,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ def sample_tool_definition():
|
|||
|
||||
class TestInference:
|
||||
@pytest.mark.asyncio
|
||||
async def test_model_list(self, inference_model, inference_stack):
|
||||
async def test_model_list(self, inference_model, inference_stack, model_id):
|
||||
_, models_impl = inference_stack
|
||||
response = await models_impl.list_models()
|
||||
assert isinstance(response, list)
|
||||
|
|
@ -73,17 +73,16 @@ class TestInference:
|
|||
|
||||
model_def = None
|
||||
for model in response:
|
||||
if model.identifier == inference_model:
|
||||
if model.identifier == model_id:
|
||||
model_def = model
|
||||
break
|
||||
|
||||
assert model_def is not None
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_completion(self, inference_model, inference_stack):
|
||||
async def test_completion(self, inference_model, inference_stack, model_id):
|
||||
inference_impl, _ = inference_stack
|
||||
|
||||
provider = inference_impl.routing_table.get_provider_impl(inference_model)
|
||||
provider = inference_impl.routing_table.get_provider_impl(model_id)
|
||||
if provider.__provider_spec__.provider_type not in (
|
||||
"meta-reference",
|
||||
"remote::ollama",
|
||||
|
|
@ -96,7 +95,7 @@ class TestInference:
|
|||
response = await inference_impl.completion(
|
||||
content="Micheael Jordan is born in ",
|
||||
stream=False,
|
||||
model=inference_model,
|
||||
model_id=model_id,
|
||||
sampling_params=SamplingParams(
|
||||
max_tokens=50,
|
||||
),
|
||||
|
|
@ -110,7 +109,7 @@ class TestInference:
|
|||
async for r in await inference_impl.completion(
|
||||
content="Roses are red,",
|
||||
stream=True,
|
||||
model=inference_model,
|
||||
model_id=model_id,
|
||||
sampling_params=SamplingParams(
|
||||
max_tokens=50,
|
||||
),
|
||||
|
|
@ -125,11 +124,11 @@ class TestInference:
|
|||
@pytest.mark.asyncio
|
||||
@pytest.mark.skip("This test is not quite robust")
|
||||
async def test_completions_structured_output(
|
||||
self, inference_model, inference_stack
|
||||
self, inference_model, inference_stack, model_id
|
||||
):
|
||||
inference_impl, _ = inference_stack
|
||||
|
||||
provider = inference_impl.routing_table.get_provider_impl(inference_model)
|
||||
provider = inference_impl.routing_table.get_provider_impl(model_id)
|
||||
if provider.__provider_spec__.provider_type not in (
|
||||
"meta-reference",
|
||||
"remote::tgi",
|
||||
|
|
@ -149,7 +148,7 @@ class TestInference:
|
|||
response = await inference_impl.completion(
|
||||
content=user_input,
|
||||
stream=False,
|
||||
model=inference_model,
|
||||
model_id=model_id,
|
||||
sampling_params=SamplingParams(
|
||||
max_tokens=50,
|
||||
),
|
||||
|
|
@ -167,11 +166,11 @@ class TestInference:
|
|||
|
||||
@pytest.mark.asyncio
|
||||
async def test_chat_completion_non_streaming(
|
||||
self, inference_model, inference_stack, common_params, sample_messages
|
||||
self, inference_model, inference_stack, common_params, sample_messages, model_id
|
||||
):
|
||||
inference_impl, _ = inference_stack
|
||||
response = await inference_impl.chat_completion(
|
||||
model=inference_model,
|
||||
model_id=model_id,
|
||||
messages=sample_messages,
|
||||
stream=False,
|
||||
**common_params,
|
||||
|
|
@ -184,11 +183,11 @@ class TestInference:
|
|||
|
||||
@pytest.mark.asyncio
|
||||
async def test_structured_output(
|
||||
self, inference_model, inference_stack, common_params
|
||||
self, inference_model, inference_stack, common_params, model_id
|
||||
):
|
||||
inference_impl, _ = inference_stack
|
||||
|
||||
provider = inference_impl.routing_table.get_provider_impl(inference_model)
|
||||
provider = inference_impl.routing_table.get_provider_impl(model_id)
|
||||
if provider.__provider_spec__.provider_type not in (
|
||||
"meta-reference",
|
||||
"remote::fireworks",
|
||||
|
|
@ -204,7 +203,7 @@ class TestInference:
|
|||
num_seasons_in_nba: int
|
||||
|
||||
response = await inference_impl.chat_completion(
|
||||
model=inference_model,
|
||||
model_id=model_id,
|
||||
messages=[
|
||||
SystemMessage(content="You are a helpful assistant."),
|
||||
UserMessage(content="Please give me information about Michael Jordan."),
|
||||
|
|
@ -227,7 +226,7 @@ class TestInference:
|
|||
assert answer.num_seasons_in_nba == 15
|
||||
|
||||
response = await inference_impl.chat_completion(
|
||||
model=inference_model,
|
||||
model_id=model_id,
|
||||
messages=[
|
||||
SystemMessage(content="You are a helpful assistant."),
|
||||
UserMessage(content="Please give me information about Michael Jordan."),
|
||||
|
|
@ -244,13 +243,13 @@ class TestInference:
|
|||
|
||||
@pytest.mark.asyncio
|
||||
async def test_chat_completion_streaming(
|
||||
self, inference_model, inference_stack, common_params, sample_messages
|
||||
self, inference_model, inference_stack, common_params, sample_messages, model_id
|
||||
):
|
||||
inference_impl, _ = inference_stack
|
||||
response = [
|
||||
r
|
||||
async for r in await inference_impl.chat_completion(
|
||||
model=inference_model,
|
||||
model_id=model_id,
|
||||
messages=sample_messages,
|
||||
stream=True,
|
||||
**common_params,
|
||||
|
|
@ -277,6 +276,7 @@ class TestInference:
|
|||
common_params,
|
||||
sample_messages,
|
||||
sample_tool_definition,
|
||||
model_id,
|
||||
):
|
||||
inference_impl, _ = inference_stack
|
||||
messages = sample_messages + [
|
||||
|
|
@ -286,7 +286,7 @@ class TestInference:
|
|||
]
|
||||
|
||||
response = await inference_impl.chat_completion(
|
||||
model=inference_model,
|
||||
model_id=model_id,
|
||||
messages=messages,
|
||||
tools=[sample_tool_definition],
|
||||
stream=False,
|
||||
|
|
@ -316,6 +316,7 @@ class TestInference:
|
|||
common_params,
|
||||
sample_messages,
|
||||
sample_tool_definition,
|
||||
model_id,
|
||||
):
|
||||
inference_impl, _ = inference_stack
|
||||
messages = sample_messages + [
|
||||
|
|
@ -327,7 +328,7 @@ class TestInference:
|
|||
response = [
|
||||
r
|
||||
async for r in await inference_impl.chat_completion(
|
||||
model=inference_model,
|
||||
model_id=model_id,
|
||||
messages=messages,
|
||||
tools=[sample_tool_definition],
|
||||
stream=True,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue