Summary:
N/A

Test Plan:
# Test Setup

**Type checker and check that the build compiles**

**Unit Tests**

**E2E Tests**

// Screenshots and videos
| Before | After |
|--|
| … | … |

# Monitoring Plan.
This commit is contained in:
Sixian Yi 2025-01-05 23:46:32 -08:00
parent 8c5b328566
commit b719743be9

View file

@ -28,7 +28,7 @@ CATEGORY_FUNCTIONALITY_TESTS = {
}
def generate_pytest_args(category, provider, env_key):
def generate_pytest_args(category, provider, test_keywords, env_key):
test_path = (
"./llama_stack/providers/tests/inference/test_{model_type}_inference.py".format(
model_type=category
@ -37,11 +37,14 @@ def generate_pytest_args(category, provider, env_key):
pytest_args = [
test_path,
"-v",
"-s",
# "-s",
"-k",
provider,
"{provider} and ({test_keywords})".format(
provider=provider, test_keywords=" or ".join(test_keywords)
),
"--inference-model={model_name}".format(model_name=TEST_MODELS[category]),
]
if env_key is not None:
pytest_args.extend(
[
@ -57,27 +60,26 @@ def generate_pytest_args(category, provider, env_key):
def main():
test_result = []
for model_category, functionality_tests in CATEGORY_FUNCTIONALITY_TESTS.items():
for model_category, test_keywords in CATEGORY_FUNCTIONALITY_TESTS.items():
for provider, env_key in INFERENCE_PROVIDER_ENV_KEY.items():
if provider == "ollama":
proc = subprocess.Popen(
[
"ollama",
"run",
(
"llama3.1:8b-instruct-fp16"
if model_category == "text"
else "llama3.2-vision"
),
]
ollama_model_alias = (
"llama3.1:8b-instruct-fp16"
if model_category == "text"
else "llama3.2-vision:11b-instruct-fp16"
)
proc = subprocess.Popen(["ollama", "run", ollama_model_alias])
retcode = pytest.main(
generate_pytest_args(model_category, provider, env_key)
generate_pytest_args(
model_category, provider, test_keywords, env_key
)
)
proc.terminate()
proc = subprocess.Popen(["ollama", "stop", ollama_model_alias])
else:
retcode = pytest.main(
generate_pytest_args(model_category, provider, env_key)
generate_pytest_args(
model_category, provider, test_keywords, env_key
)
)