From bf610adf9b7a8cd795e3b284f8b0fbcbea3fe691 Mon Sep 17 00:00:00 2001 From: Sixian Yi Date: Mon, 6 Jan 2025 20:13:10 -0800 Subject: [PATCH] Temporary Commit at 1/6/2025, 8:13:09 PM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Summary: ### THIS DIFF ### PLAN ### CONTEXT ### DESIGN Test Plan: # Test Setup **Type checker and check that the build compiles** **Unit Tests** **E2E Tests** // Screenshots and videos | Before | After | |--| | … | … | # Monitoring Plan. --- .github/.watchman-cookie-sxyi-mbp-77593-68 | 0 .github/.watchman-cookie-sxyi-mbp-77593-69 | 0 .github/.watchman-cookie-sxyi-mbp-77593-70 | 0 .../.watchman-cookie-sxyi-mbp-77593-68 | 0 .../.watchman-cookie-sxyi-mbp-77593-69 | 0 .../.watchman-cookie-sxyi-mbp-77593-70 | 0 docs/.watchman-cookie-sxyi-mbp-77593-68 | 0 docs/.watchman-cookie-sxyi-mbp-77593-69 | 0 docs/.watchman-cookie-sxyi-mbp-77593-70 | 0 .../.watchman-cookie-sxyi-mbp-77593-68 | 0 .../.watchman-cookie-sxyi-mbp-77593-69 | 0 .../.watchman-cookie-sxyi-mbp-77593-70 | 0 llama_stack/providers/tests/ci_test.py | 112 ++++++++++++++++++ .../providers/tests/ci_test_config.yml | 22 ++++ llama_stack/providers/tests/conftest.py | 63 +++++++++- .../tests/inference/test_text_inference.py | 5 +- llama_stack/providers/tests/manifest.json | 21 ++++ llama_stack/providers/tests/report_config.yml | 49 ++++++++ llama_stack/providers/tests/test.py | 36 ++++++ rfcs/.watchman-cookie-sxyi-mbp-77593-68 | 0 rfcs/.watchman-cookie-sxyi-mbp-77593-69 | 0 rfcs/.watchman-cookie-sxyi-mbp-77593-70 | 0 tests/.watchman-cookie-sxyi-mbp-77593-68 | 0 tests/.watchman-cookie-sxyi-mbp-77593-69 | 0 tests/.watchman-cookie-sxyi-mbp-77593-70 | 0 25 files changed, 305 insertions(+), 3 deletions(-) create mode 100755 .github/.watchman-cookie-sxyi-mbp-77593-68 create mode 100755 .github/.watchman-cookie-sxyi-mbp-77593-69 create mode 100755 .github/.watchman-cookie-sxyi-mbp-77593-70 create mode 100755 distributions/.watchman-cookie-sxyi-mbp-77593-68 create mode 100755 distributions/.watchman-cookie-sxyi-mbp-77593-69 create mode 100755 distributions/.watchman-cookie-sxyi-mbp-77593-70 create mode 100755 docs/.watchman-cookie-sxyi-mbp-77593-68 create mode 100755 docs/.watchman-cookie-sxyi-mbp-77593-69 create mode 100755 docs/.watchman-cookie-sxyi-mbp-77593-70 create mode 100755 llama_stack/.watchman-cookie-sxyi-mbp-77593-68 create mode 100755 llama_stack/.watchman-cookie-sxyi-mbp-77593-69 create mode 100755 llama_stack/.watchman-cookie-sxyi-mbp-77593-70 create mode 100644 llama_stack/providers/tests/ci_test.py create mode 100644 llama_stack/providers/tests/ci_test_config.yml create mode 100644 llama_stack/providers/tests/manifest.json create mode 100644 llama_stack/providers/tests/report_config.yml create mode 100644 llama_stack/providers/tests/test.py create mode 100755 rfcs/.watchman-cookie-sxyi-mbp-77593-68 create mode 100755 rfcs/.watchman-cookie-sxyi-mbp-77593-69 create mode 100755 rfcs/.watchman-cookie-sxyi-mbp-77593-70 create mode 100755 tests/.watchman-cookie-sxyi-mbp-77593-68 create mode 100755 tests/.watchman-cookie-sxyi-mbp-77593-69 create mode 100755 tests/.watchman-cookie-sxyi-mbp-77593-70 diff --git a/.github/.watchman-cookie-sxyi-mbp-77593-68 b/.github/.watchman-cookie-sxyi-mbp-77593-68 new file mode 100755 index 000000000..e69de29bb diff --git a/.github/.watchman-cookie-sxyi-mbp-77593-69 b/.github/.watchman-cookie-sxyi-mbp-77593-69 new file mode 100755 index 000000000..e69de29bb diff --git a/.github/.watchman-cookie-sxyi-mbp-77593-70 b/.github/.watchman-cookie-sxyi-mbp-77593-70 new file mode 100755 index 000000000..e69de29bb diff --git a/distributions/.watchman-cookie-sxyi-mbp-77593-68 b/distributions/.watchman-cookie-sxyi-mbp-77593-68 new file mode 100755 index 000000000..e69de29bb diff --git a/distributions/.watchman-cookie-sxyi-mbp-77593-69 b/distributions/.watchman-cookie-sxyi-mbp-77593-69 new file mode 100755 index 000000000..e69de29bb diff --git a/distributions/.watchman-cookie-sxyi-mbp-77593-70 b/distributions/.watchman-cookie-sxyi-mbp-77593-70 new file mode 100755 index 000000000..e69de29bb diff --git a/docs/.watchman-cookie-sxyi-mbp-77593-68 b/docs/.watchman-cookie-sxyi-mbp-77593-68 new file mode 100755 index 000000000..e69de29bb diff --git a/docs/.watchman-cookie-sxyi-mbp-77593-69 b/docs/.watchman-cookie-sxyi-mbp-77593-69 new file mode 100755 index 000000000..e69de29bb diff --git a/docs/.watchman-cookie-sxyi-mbp-77593-70 b/docs/.watchman-cookie-sxyi-mbp-77593-70 new file mode 100755 index 000000000..e69de29bb diff --git a/llama_stack/.watchman-cookie-sxyi-mbp-77593-68 b/llama_stack/.watchman-cookie-sxyi-mbp-77593-68 new file mode 100755 index 000000000..e69de29bb diff --git a/llama_stack/.watchman-cookie-sxyi-mbp-77593-69 b/llama_stack/.watchman-cookie-sxyi-mbp-77593-69 new file mode 100755 index 000000000..e69de29bb diff --git a/llama_stack/.watchman-cookie-sxyi-mbp-77593-70 b/llama_stack/.watchman-cookie-sxyi-mbp-77593-70 new file mode 100755 index 000000000..e69de29bb diff --git a/llama_stack/providers/tests/ci_test.py b/llama_stack/providers/tests/ci_test.py new file mode 100644 index 000000000..dc872cbbc --- /dev/null +++ b/llama_stack/providers/tests/ci_test.py @@ -0,0 +1,112 @@ +import os +import re +import signal +import subprocess +import time +import yaml +from typing import List +from pathlib import Path + +from llama_stack.apis import inference +from llama_stack.distribution.datatypes import Provider +import pytest + + +# Inference provider and the required environment arg for running integration tests +INFERENCE_PROVIDER_ENV_KEY = { + "ollama": None, + "fireworks": "FIREWORKS_API_KEY", + "together": "TOGETHER_API_KEY", +} + +TEST_MODELS = { + "text": "meta-llama/Llama-3.1-8B-Instruct", + "vision": "meta-llama/Llama-3.2-11B-Vision-Instruct", +} + +# Model category and the keywords of the corresponding functionality tests +CATEGORY_FUNCTIONALITY_TESTS = { + "text": ["streaming", "tool_calling", "structured_output"], + "vision": [ + "streaming", + ], +} + + +class TestConfig: + class ModelTest: + model_type: str + model_name: str + test_path: str + + providers: List[str] + apis: List[str] + capabilities: List[str] + model_tests: List[ModelTest] + +def generate_pytest_args(category, provider, test_keywords, env_key): + test_path = ( + "./llama_stack/providers/tests/inference/test_{model_type}_inference.py".format( + model_type=category + ) + ) + pytest_args = [ + test_path, + "-v", + # "-s", + "-k", + "{provider} and ({test_keywords})".format( + provider=provider, test_keywords=" or ".join(test_keywords) + ), + "--inference-model={model_name}".format(model_name=TEST_MODELS[category]), + ] + + if env_key is not None: + pytest_args.extend( + [ + "--env", + "{key_name}={key_value}".format( + key_name=env_key, key_value=os.getenv(env_key) + ), + ] + ) + return pytest_args + + +def main(): + test_result = [] + path = Path(__file__).parent / 'ci_test_config.yml' + with open(path, 'r') as f: + data = yaml.load(f, Loader=yaml.SafeLoader) + inference = data['inference'] + + model_tests = inference['model_tests'] + + + + + # for model_category, test_keywords in CATEGORY_FUNCTIONALITY_TESTS.items(): + # for provider, env_key in INFERENCE_PROVIDER_ENV_KEY.items(): + # if provider == "ollama": + # ollama_model_alias = ( + # "llama3.1:8b-instruct-fp16" + # if model_category == "text" + # else "llama3.2-vision:11b-instruct-fp16" + # ) + # proc = subprocess.Popen(["ollama", "run", ollama_model_alias]) + # retcode = pytest.main( + # generate_pytest_args( + # model_category, provider, test_keywords, env_key + # ) + # ) + # proc = subprocess.Popen(["ollama", "stop", ollama_model_alias]) + # else: + # retcode = pytest.main( + # generate_pytest_args( + # model_category, provider, test_keywords, env_key + # ) + # ) + + +if __name__ == "__main__": + main() diff --git a/llama_stack/providers/tests/ci_test_config.yml b/llama_stack/providers/tests/ci_test_config.yml new file mode 100644 index 000000000..89e3bf16e --- /dev/null +++ b/llama_stack/providers/tests/ci_test_config.yml @@ -0,0 +1,22 @@ +inference_providers: + - ollama + - fireworks + - together + - tgi + - vllm + +test_models: + text: meta-llama/Llama-3.1-8B-Instruct + vision: meta-llama/Llama-3.2-11B-Vision-Instruct + + +inference/test_vision_inference.py + test_vision_chat_completion_streaming + test_vision_chat_completion_non_streaming + +inference/test_text_inference.py + test_structured_output + test_chat_completion_streaming + test_chat_completion_non_streaming + test_chat_completion_with_tool_calling + test_chat_completion_with_tool_calling_streaming diff --git a/llama_stack/providers/tests/conftest.py b/llama_stack/providers/tests/conftest.py index 4d7831ae3..4cd818f20 100644 --- a/llama_stack/providers/tests/conftest.py +++ b/llama_stack/providers/tests/conftest.py @@ -7,7 +7,8 @@ import os from pathlib import Path from typing import Any, Dict, List, Optional - +import yaml +import json import pytest from dotenv import load_dotenv from pydantic import BaseModel @@ -73,6 +74,10 @@ def pytest_addoption(parser): parser.addoption( "--env", action="append", help="Set environment variables, e.g. --env KEY=value" ) + parser.addoption( + "--config", action="append", help="Set environment variables, e.g. --env KEY=value" + ) + def make_provider_id(providers: Dict[str, str]) -> str: @@ -148,6 +153,62 @@ def pytest_itemcollected(item): item.name = f"{item.name}[{marks}]" +# def pytest_ignore_collect(path, config): +# # print("SIXIAN ignore", path) +# # if config.getoption("--config") is None: +# # return False + +# manifest_path = Path(__file__).parent / "manifest.json" +# files = json.loads(manifest_path.read_text(encoding="utf-8"))["files"] +# file_path = set((Path(__file__).parent / f["path"]) for f in files) +# # print("SIXIAN ", path.__class__.__name__) +# # print("SIXIAN files", file_path) +# if Path(path) not in file_path: +# # print("SIXIAN ignoring", path, file_path) +# return True +# return False + +def pytest_collection_modifyitems(config, items): + manifest_path = Path(__file__).parent / "manifest.json" + manifest = json.loads(manifest_path.read_text(encoding="utf-8")) + mapping = dict() + for f in manifest["files"]: + mapping[f["basepath"]] = f["functions"] + ret_items = [] + deselected = [] + print("SIXIAN test_file", items) + + for item in items: + test_file = item.fspath.basename + # print("SIXIAN test_file", test_file, "mapping", mapping) + if test_file in mapping.keys(): + print("SIXIAN appending", item) + ret_items.append(item) + else: + deselected.append(item) + print("SIXIAN ret_items", ret_items) + config.hook.pytest_deselected(items=[deselected]) + + +# class ManifestDirectory(pytest.Directory): +# def collect(self): +# manifest_path = self.path / "manifest.json" +# manifest = json.loads(manifest_path.read_text(encoding="utf-8")) +# ihook = self.ihook +# for file in manifest["files"]: +# yield from ihook.pytest_collect_file( +# file_path=self.path / file, parent=self +# ) + + +# def pytest_collect_directory(path, parent): +# if config.getoption("--env") is None: +# return None +# if path.joinpath("manifest.json").is_file(): +# return ManifestDirectory.from_parent(parent=parent, path=path) +# return None + + pytest_plugins = [ "llama_stack.providers.tests.inference.fixtures", "llama_stack.providers.tests.safety.fixtures", diff --git a/llama_stack/providers/tests/inference/test_text_inference.py b/llama_stack/providers/tests/inference/test_text_inference.py index fd93857a3..682d1d38c 100644 --- a/llama_stack/providers/tests/inference/test_text_inference.py +++ b/llama_stack/providers/tests/inference/test_text_inference.py @@ -16,8 +16,6 @@ from llama_models.llama3.api.datatypes import ( ToolPromptFormat, ) -from pydantic import BaseModel, ValidationError - from llama_stack.apis.inference import ( ChatCompletionResponse, ChatCompletionResponseEventType, @@ -33,6 +31,9 @@ from llama_stack.apis.inference import ( UserMessage, ) from llama_stack.apis.models import Model + +from pydantic import BaseModel, ValidationError + from .utils import group_chunks diff --git a/llama_stack/providers/tests/manifest.json b/llama_stack/providers/tests/manifest.json new file mode 100644 index 000000000..97af0375e --- /dev/null +++ b/llama_stack/providers/tests/manifest.json @@ -0,0 +1,21 @@ +{ + "files": [ + { + "basepath": "test_text_inference.py", + "functions": [ + "test_structured_output", + "test_chat_completion_streaming", + "test_chat_completion_non_streaming", + "test_chat_completion_with_tool_calling", + "test_chat_completion_with_tool_calling_streaming" + ] + }, + { + "basepath": "test_vision_inference.py", + "functions": [ + "test_vision_chat_completion_streaming", + "test_vision_chat_completion_non_streaming" + ] + } + ] +} diff --git a/llama_stack/providers/tests/report_config.yml b/llama_stack/providers/tests/report_config.yml new file mode 100644 index 000000000..b952598ee --- /dev/null +++ b/llama_stack/providers/tests/report_config.yml @@ -0,0 +1,49 @@ + +inference: + providers: + - id: ollama + - id: together + env_param: TOGETHER_API_KEY + - id: fireworks + env_param: FIREWORKS_API_KEY + - id: tgi + - id: vllm + apis: + - chat_completion + - embeddings + capabilities: + - streaming + - tool_calling + - structured_output + model_tests: + - model_type: vision + model_name: meta-llama/Llama-3.2-11B-Vision-Instruct + test_path: /llama_stack/providers/tests/inference/test_vision_inference.py + test_api: + - chat_completion + test_capabilities: + - streaming + - model_type: text + model_name: meta-llama/Llama-3.1-8B-Instruct + test_path: /llama_stack/providers/tests/inference/test_text_inference.py + test_api: + - chat_completion + test_capabilities: + - streaming + - tool_calling + - structured_output + - model_type: embedding + model_name: sentence-transformers/all-MiniLM-L6-v2 + test_path: /llama_stack/providers/tests/inference/test_embeddings.py + test_api: + - embedding + test_capabilities: ~ + +memory: + providers: + - faiss + - weaviate + - pgvector + - chroma + test_path: + - /llama_stack/providers/tests/memory/test_memory.py diff --git a/llama_stack/providers/tests/test.py b/llama_stack/providers/tests/test.py new file mode 100644 index 000000000..59a9ee6b4 --- /dev/null +++ b/llama_stack/providers/tests/test.py @@ -0,0 +1,36 @@ +import os +import subprocess + +import yaml + +TEST_CONFIG_YAML = "test-config.yaml" +OUTPUT_FILE = "run_tests.sh" + + +def get_data(yaml_file_name): + with open(yaml_file_name, "r") as f: + data = yaml.safe_load(f) + return data + + +def main(): + test_config_yaml_path = os.path.join(os.path.dirname(__file__), TEST_CONFIG_YAML) + data = get_data(test_config_yaml_path) + output_file_path = os.path.join(os.path.dirname(__file__), OUTPUT_FILE) + with open(output_file_path, "w") as f: + print("Started writing to {}".format(OUTPUT_FILE)) + for provider in data["providers"].split(" "): + for model in data["inference_models"]: + inference_model, test_file = data["inference_models"][model].split(",") + f.write( + 'pytest -v -s -k "{}" --inference-model="{}" ./llama_stack/providers/tests/inference/{}\n'.format( + provider, inference_model, test_file[1:] + ) + ) + print("Finished writing to {}".format(OUTPUT_FILE)) + subprocess.run(["chmod", "+x", output_file_path]) + subprocess.run(["bash", output_file_path]) + + +if __name__ == "__main__": + main() diff --git a/rfcs/.watchman-cookie-sxyi-mbp-77593-68 b/rfcs/.watchman-cookie-sxyi-mbp-77593-68 new file mode 100755 index 000000000..e69de29bb diff --git a/rfcs/.watchman-cookie-sxyi-mbp-77593-69 b/rfcs/.watchman-cookie-sxyi-mbp-77593-69 new file mode 100755 index 000000000..e69de29bb diff --git a/rfcs/.watchman-cookie-sxyi-mbp-77593-70 b/rfcs/.watchman-cookie-sxyi-mbp-77593-70 new file mode 100755 index 000000000..e69de29bb diff --git a/tests/.watchman-cookie-sxyi-mbp-77593-68 b/tests/.watchman-cookie-sxyi-mbp-77593-68 new file mode 100755 index 000000000..e69de29bb diff --git a/tests/.watchman-cookie-sxyi-mbp-77593-69 b/tests/.watchman-cookie-sxyi-mbp-77593-69 new file mode 100755 index 000000000..e69de29bb diff --git a/tests/.watchman-cookie-sxyi-mbp-77593-70 b/tests/.watchman-cookie-sxyi-mbp-77593-70 new file mode 100755 index 000000000..e69de29bb