mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-17 18:09:51 +00:00
Testing - Memory provider fakes
Summary: Implementing Memory provider fakes as discussed in this draft https://github.com/meta-llama/llama-stack/pull/490#issuecomment-2492877393. High level changes: * Fake provider is specified via the "fake" mark * Test config will setup a fake fixture for the run of the test * Test resolver checks fixtures and upon finding a fake provider it injects InlineProviderSpec for fake provider * Fake provider gets resolved through path/naming convention * Fake provider implementaion is contained to the tests/ directory and implements stubs and method fakes with minimal functionality to simulate real provider Instructins to creating a fake * Create the "fakes" module inside the provider test directory * Inside the module implement `get_provider_impl` that will return fake implementation object * Name the fake implementation class to match the fake provider id (e.g. memory_fake -> MemoryFakeImpl) * Same rule for the config (e.g. memory_fake -> MemoryFakeConfig) * Add fake fixture (in the fixtures.py) and setup methods stubs there Test Plan: Run memory tests ``` pytest llama_stack/providers/tests/memory/test_memory.py -m "fake" -v -s --tb=short ====================================================================================================== test session starts ====================================================================================================== platform darwin -- Python 3.11.10, pytest-8.3.3, pluggy-1.5.0 -- /opt/homebrew/Caskroom/miniconda/base/envs/llama-stack/bin/python cachedir: .pytest_cache rootdir: /Users/vivic/Code/llama-stack configfile: pyproject.toml plugins: asyncio-0.24.0, anyio-4.6.2.post1 asyncio: mode=Mode.STRICT, default_loop_scope=None collected 18 items / 15 deselected / 3 selected llama_stack/providers/tests/memory/test_memory.py::TestMemory::test_banks_list[fake] PASSED llama_stack/providers/tests/memory/test_memory.py::TestMemory::test_banks_register[fake] PASSED llama_stack/providers/tests/memory/test_memory.py::TestMemory::test_query_documents[fake] The scores are: [0.5] PASSED ========================================================================================= 3 passed, 15 deselected, 10 warnings in 0.46s ========================================================================================= ```
This commit is contained in:
parent
4e6c984c26
commit
e2d1b712e2
3 changed files with 189 additions and 3 deletions
|
|
@ -10,6 +10,7 @@ import tempfile
|
|||
import pytest
|
||||
import pytest_asyncio
|
||||
|
||||
from llama_stack.apis.memory import * # noqa: F403
|
||||
from llama_stack.distribution.datatypes import Api, Provider, RemoteProviderConfig
|
||||
from llama_stack.providers.inline.memory.faiss import FaissImplConfig
|
||||
from llama_stack.providers.remote.memory.pgvector import PGVectorConfig
|
||||
|
|
@ -18,6 +19,48 @@ from llama_stack.providers.tests.resolver import construct_stack_for_test
|
|||
from llama_stack.providers.utils.kvstore import SqliteKVStoreConfig
|
||||
from ..conftest import ProviderFixture, remote_stack_fixture
|
||||
from ..env import get_env_or_fail
|
||||
from .fakes import InlineMemoryFakeImpl
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def memory_fake() -> ProviderFixture:
|
||||
InlineMemoryFakeImpl.stub_method(
|
||||
method_name="query_documents",
|
||||
return_value_matchers={
|
||||
"programming language": QueryDocumentsResponse(
|
||||
chunks=[Chunk(content="Python", token_count=1, document_id="")],
|
||||
scores=[0.1],
|
||||
),
|
||||
"AI and brain-inspired computing": QueryDocumentsResponse(
|
||||
chunks=[
|
||||
Chunk(content="neural networks", token_count=2, document_id="")
|
||||
],
|
||||
scores=[0.1],
|
||||
),
|
||||
"computer": QueryDocumentsResponse(
|
||||
chunks=[
|
||||
Chunk(content="test-chunk-1", token_count=1, document_id=""),
|
||||
Chunk(content="test-chunk-2", token_count=1, document_id=""),
|
||||
],
|
||||
scores=[0.1, 0.5],
|
||||
),
|
||||
"quantum computing": QueryDocumentsResponse(
|
||||
chunks=[Chunk(content="Python", token_count=1, document_id="")],
|
||||
scores=[0.5],
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
fixture = ProviderFixture(
|
||||
providers=[
|
||||
Provider(
|
||||
provider_id="inline_memory_fake",
|
||||
provider_type="test::fake",
|
||||
config={},
|
||||
)
|
||||
],
|
||||
)
|
||||
return fixture
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
|
|
@ -93,7 +136,7 @@ def memory_chroma() -> ProviderFixture:
|
|||
)
|
||||
|
||||
|
||||
MEMORY_FIXTURES = ["faiss", "pgvector", "weaviate", "remote", "chroma"]
|
||||
MEMORY_FIXTURES = ["fake", "faiss", "pgvector", "weaviate", "remote", "chroma"]
|
||||
|
||||
|
||||
@pytest_asyncio.fixture(scope="session")
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue