forked from phoenix-oss/llama-stack-mirror
[client sdk test] add options for inference_model, safety_shield, embedding_model (#843)
# What does this PR do? Default inference_model for testing: "meta-llama/Llama-3.1-8B-Instruct" Default vision inference_model for testing: "meta-llama/Llama-3.2-11B-Vision-Instruct" ## Test Plan `/opt/miniconda3/envs/stack/bin/pytest -s -v --inference-model=meta-llama/Llama-3.2-3B-Instruct tests/client-sdk/agents` `/opt/miniconda3/envs/stack/bin/pytest -s -v --embedding-model=all-MiniLM-L6-v2 tests/client-sdk/vector_io` `/opt/miniconda3/envs/stack/bin/pytest -s -v --safety-shield=meta-llama/Llama-Guard-3-1B tests/client-sdk/safety` ## Sources Please link relevant resources if necessary. ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Ran pre-commit to handle lint / formatting issues. - [ ] Read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [ ] Updated relevant documentation. - [ ] Wrote necessary unit or integration tests.
This commit is contained in:
parent
4dd4f09fc5
commit
f4f47970e5
7 changed files with 84 additions and 83 deletions
|
@ -34,30 +34,6 @@ def inference_provider_type(llama_stack_client):
|
|||
return inference_providers[0].provider_type
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def text_model_id(llama_stack_client):
|
||||
available_models = [
|
||||
model.identifier
|
||||
for model in llama_stack_client.models.list()
|
||||
if model.identifier.startswith("meta-llama") and "405" not in model.identifier
|
||||
]
|
||||
assert len(available_models) > 0
|
||||
return available_models[0]
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def vision_model_id(llama_stack_client):
|
||||
available_models = [
|
||||
model.identifier
|
||||
for model in llama_stack_client.models.list()
|
||||
if "vision" in model.identifier.lower()
|
||||
]
|
||||
if len(available_models) == 0:
|
||||
pytest.skip("No vision models available")
|
||||
|
||||
return available_models[0]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def get_weather_tool_definition():
|
||||
return {
|
||||
|
@ -107,6 +83,7 @@ def test_text_completion_streaming(llama_stack_client, text_model_id):
|
|||
assert "blue" in "".join(streamed_content).lower().strip()
|
||||
|
||||
|
||||
@pytest.mark.skip("Most inference providers don't support log probs yet")
|
||||
def test_completion_log_probs_non_streaming(llama_stack_client, text_model_id):
|
||||
response = llama_stack_client.inference.completion(
|
||||
content="Complete the sentence: Micheael Jordan is born in ",
|
||||
|
@ -124,6 +101,7 @@ def test_completion_log_probs_non_streaming(llama_stack_client, text_model_id):
|
|||
assert all(len(logprob.logprobs_by_token) == 3 for logprob in response.logprobs)
|
||||
|
||||
|
||||
@pytest.mark.skip("Most inference providers don't support log probs yet")
|
||||
def test_completion_log_probs_streaming(llama_stack_client, text_model_id):
|
||||
response = llama_stack_client.inference.completion(
|
||||
content="Complete the sentence: Micheael Jordan is born in ",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue