forked from phoenix-oss/llama-stack-mirror
# What does this PR do? This PR brings back the facility to not force registration of resources onto the user. This is not just annoying but actually not feasible sometimes. For example, you may have a Stack which boots up with private providers for inference for models A and B. There is no way for the user to actually know which model is being served by these providers now (to be able to register it.) How will this avoid the users needing to do registration? In a follow-up diff, I will make sure I update the sample run.yaml files so they list the models served by the distributions explicitly. So when users do `llama stack build --template <...>` and run it, their distributions come up with the right set of models they expect. For self-hosted distributions, it also allows us to have a place to explicit list the models that need to be served to make the "complete" stack (including safety, e.g.) ## Test Plan Started ollama locally with two lightweight models: Llama3.2-3B-Instruct and Llama-Guard-3-1B. Updated all the tests including agents. Here's the tests I ran so far: ```bash pytest -s -v -m "fireworks and llama_3b" test_text_inference.py::TestInference \ --env FIREWORKS_API_KEY=... pytest -s -v -m "ollama and llama_3b" test_text_inference.py::TestInference pytest -s -v -m ollama test_safety.py pytest -s -v -m faiss test_memory.py pytest -s -v -m ollama test_agents.py \ --inference-model=Llama3.2-3B-Instruct --safety-model=Llama-Guard-3-1B ``` Found a few bugs here and there pre-existing that these test runs fixed.
110 lines
3.4 KiB
Python
110 lines
3.4 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import os
|
|
import tempfile
|
|
|
|
import pytest
|
|
import pytest_asyncio
|
|
|
|
from llama_stack.distribution.datatypes import Api, Provider, RemoteProviderConfig
|
|
from llama_stack.providers.inline.memory.faiss import FaissImplConfig
|
|
from llama_stack.providers.remote.memory.pgvector import PGVectorConfig
|
|
from llama_stack.providers.remote.memory.weaviate import WeaviateConfig
|
|
from llama_stack.providers.tests.resolver import resolve_impls_for_test_v2
|
|
from llama_stack.providers.utils.kvstore import SqliteKVStoreConfig
|
|
from ..conftest import ProviderFixture, remote_stack_fixture
|
|
from ..env import get_env_or_fail
|
|
|
|
|
|
@pytest.fixture(scope="session")
|
|
def memory_remote() -> ProviderFixture:
|
|
return remote_stack_fixture()
|
|
|
|
|
|
@pytest.fixture(scope="session")
|
|
def memory_faiss() -> ProviderFixture:
|
|
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".db")
|
|
return ProviderFixture(
|
|
providers=[
|
|
Provider(
|
|
provider_id="faiss",
|
|
provider_type="inline::faiss",
|
|
config=FaissImplConfig(
|
|
kvstore=SqliteKVStoreConfig(db_path=temp_file.name).model_dump(),
|
|
).model_dump(),
|
|
)
|
|
],
|
|
)
|
|
|
|
|
|
@pytest.fixture(scope="session")
|
|
def memory_pgvector() -> ProviderFixture:
|
|
return ProviderFixture(
|
|
providers=[
|
|
Provider(
|
|
provider_id="pgvector",
|
|
provider_type="remote::pgvector",
|
|
config=PGVectorConfig(
|
|
host=os.getenv("PGVECTOR_HOST", "localhost"),
|
|
port=os.getenv("PGVECTOR_PORT", 5432),
|
|
db=get_env_or_fail("PGVECTOR_DB"),
|
|
user=get_env_or_fail("PGVECTOR_USER"),
|
|
password=get_env_or_fail("PGVECTOR_PASSWORD"),
|
|
).model_dump(),
|
|
)
|
|
],
|
|
)
|
|
|
|
|
|
@pytest.fixture(scope="session")
|
|
def memory_weaviate() -> ProviderFixture:
|
|
return ProviderFixture(
|
|
providers=[
|
|
Provider(
|
|
provider_id="weaviate",
|
|
provider_type="remote::weaviate",
|
|
config=WeaviateConfig().model_dump(),
|
|
)
|
|
],
|
|
provider_data=dict(
|
|
weaviate_api_key=get_env_or_fail("WEAVIATE_API_KEY"),
|
|
weaviate_cluster_url=get_env_or_fail("WEAVIATE_CLUSTER_URL"),
|
|
),
|
|
)
|
|
|
|
|
|
@pytest.fixture(scope="session")
|
|
def memory_chroma() -> ProviderFixture:
|
|
return ProviderFixture(
|
|
providers=[
|
|
Provider(
|
|
provider_id="chroma",
|
|
provider_type="remote::chromadb",
|
|
config=RemoteProviderConfig(
|
|
host=get_env_or_fail("CHROMA_HOST"),
|
|
port=get_env_or_fail("CHROMA_PORT"),
|
|
).model_dump(),
|
|
)
|
|
]
|
|
)
|
|
|
|
|
|
MEMORY_FIXTURES = ["faiss", "pgvector", "weaviate", "remote", "chroma"]
|
|
|
|
|
|
@pytest_asyncio.fixture(scope="session")
|
|
async def memory_stack(request):
|
|
fixture_name = request.param
|
|
fixture = request.getfixturevalue(f"memory_{fixture_name}")
|
|
|
|
impls = await resolve_impls_for_test_v2(
|
|
[Api.memory],
|
|
{"memory": fixture.providers},
|
|
fixture.provider_data,
|
|
)
|
|
|
|
return impls[Api.memory], impls[Api.memory_banks]
|