mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-05 18:22:41 +00:00
model selection fix:
This commit is contained in:
parent
82d942b501
commit
0763167fde
3 changed files with 33 additions and 40 deletions
|
@ -79,18 +79,6 @@ class TestClientTool(ClientTool):
|
|||
return -1
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def text_model_id(llama_stack_client):
|
||||
available_models = [
|
||||
model.identifier
|
||||
for model in llama_stack_client.models.list()
|
||||
if model.identifier.startswith("meta-llama") and "405" not in model.identifier
|
||||
]
|
||||
model_id = available_models[0]
|
||||
print(f"Using model: {model_id}")
|
||||
return model_id
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def agent_config(llama_stack_client, text_model_id):
|
||||
available_shields = [
|
||||
|
|
|
@ -20,6 +20,10 @@ def pytest_configure(config):
|
|||
config.pluginmanager.register(Report())
|
||||
|
||||
|
||||
TEXT_MODEL = "meta-llama/Llama-3.1-8B-Instruct"
|
||||
VISION_MODEL = "meta-llama/Llama-3.2-11B-Vision-Instruct"
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption(
|
||||
"--report",
|
||||
|
@ -27,10 +31,18 @@ def pytest_addoption(parser):
|
|||
action="store_true",
|
||||
help="Knob to determine if we should generate report, e.g. --output=True",
|
||||
)
|
||||
|
||||
|
||||
TEXT_MODEL = "meta-llama/Llama-3.1-8B-Instruct"
|
||||
INFERENCE_MODEL = "meta-llama/Llama-3.2-11B-Vision-Instruct"
|
||||
parser.addoption(
|
||||
"--inference-model",
|
||||
action="store",
|
||||
default=TEXT_MODEL,
|
||||
help="Specify the inference model to use for testing",
|
||||
)
|
||||
parser.addoption(
|
||||
"--vision-inference-model",
|
||||
action="store",
|
||||
default=VISION_MODEL,
|
||||
help="Specify the vision inference model to use for testing",
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
|
@ -61,3 +73,18 @@ def llama_stack_client(provider_data):
|
|||
else:
|
||||
raise ValueError("LLAMA_STACK_CONFIG or LLAMA_STACK_BASE_URL must be set")
|
||||
return client
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
if "text_model_id" in metafunc.fixturenames:
|
||||
metafunc.parametrize(
|
||||
"text_model_id",
|
||||
[metafunc.config.getoption("--inference-model")],
|
||||
scope="session",
|
||||
)
|
||||
if "vision_model_id" in metafunc.fixturenames:
|
||||
metafunc.parametrize(
|
||||
"vision_model_id",
|
||||
[metafunc.config.getoption("--vision-inference-model")],
|
||||
scope="session",
|
||||
)
|
||||
|
|
|
@ -34,30 +34,6 @@ def inference_provider_type(llama_stack_client):
|
|||
return inference_providers[0].provider_type
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def text_model_id(llama_stack_client):
|
||||
available_models = [
|
||||
model.identifier
|
||||
for model in llama_stack_client.models.list()
|
||||
if model.identifier.startswith("meta-llama") and "405" not in model.identifier
|
||||
]
|
||||
assert len(available_models) > 0
|
||||
return available_models[0]
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def vision_model_id(llama_stack_client):
|
||||
available_models = [
|
||||
model.identifier
|
||||
for model in llama_stack_client.models.list()
|
||||
if "vision" in model.identifier.lower()
|
||||
]
|
||||
if len(available_models) == 0:
|
||||
pytest.skip("No vision models available")
|
||||
|
||||
return available_models[0]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def get_weather_tool_definition():
|
||||
return {
|
||||
|
@ -107,6 +83,7 @@ def test_text_completion_streaming(llama_stack_client, text_model_id):
|
|||
assert "blue" in "".join(streamed_content).lower().strip()
|
||||
|
||||
|
||||
@pytest.mark.skip("Most inference providers don't support log probs yet")
|
||||
def test_completion_log_probs_non_streaming(llama_stack_client, text_model_id):
|
||||
response = llama_stack_client.inference.completion(
|
||||
content="Complete the sentence: Micheael Jordan is born in ",
|
||||
|
@ -124,6 +101,7 @@ def test_completion_log_probs_non_streaming(llama_stack_client, text_model_id):
|
|||
assert all(len(logprob.logprobs_by_token) == 3 for logprob in response.logprobs)
|
||||
|
||||
|
||||
@pytest.mark.skip("Most inference providers don't support log probs yet")
|
||||
def test_completion_log_probs_streaming(llama_stack_client, text_model_id):
|
||||
response = llama_stack_client.inference.completion(
|
||||
content="Complete the sentence: Micheael Jordan is born in ",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue