mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
## What does this PR do? This is a long-pending change and particularly important to get done now. Specifically: - we cannot "localize" (aka download) any URLs from media attachments anywhere near our modeling code. it must be done within llama-stack. - `PIL.Image` is infesting all our APIs via `ImageMedia -> InterleavedTextMedia` and that cannot be right at all. Anything in the API surface must be "naturally serializable". We need a standard `{ type: "image", image_url: "<...>" }` which is more extensible - `UserMessage`, `SystemMessage`, etc. are moved completely to llama-stack from the llama-models repository. See https://github.com/meta-llama/llama-models/pull/244 for the corresponding PR in llama-models. ## Test Plan ```bash cd llama_stack/providers/tests pytest -s -v -k "fireworks or ollama or together" inference/test_vision_inference.py pytest -s -v -k "(fireworks or ollama or together) and llama_3b" inference/test_text_inference.py pytest -s -v -k chroma memory/test_memory.py \ --env EMBEDDING_DIMENSION=384 --env CHROMA_DB_PATH=/tmp/foobar pytest -s -v -k fireworks agents/test_agents.py \ --safety-shield=meta-llama/Llama-Guard-3-8B \ --inference-model=meta-llama/Llama-3.1-8B-Instruct ``` Updated the client sdk (see PR ...), installed the SDK in the same environment and then ran the SDK tests: ```bash cd tests/client-sdk LLAMA_STACK_CONFIG=together pytest -s -v agents/test_agents.py LLAMA_STACK_CONFIG=ollama pytest -s -v memory/test_memory.py # this one needed a bit of hacking in the run.yaml to ensure I could register the vision model correctly INFERENCE_MODEL=llama3.2-vision:latest LLAMA_STACK_CONFIG=ollama pytest -s -v inference/test_inference.py ```
95 lines
2.4 KiB
Python
95 lines
2.4 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import pytest
|
|
|
|
from ..conftest import get_provider_fixture_overrides
|
|
|
|
from ..inference.fixtures import INFERENCE_FIXTURES
|
|
from .fixtures import MEMORY_FIXTURES
|
|
|
|
|
|
DEFAULT_PROVIDER_COMBINATIONS = [
|
|
pytest.param(
|
|
{
|
|
"inference": "sentence_transformers",
|
|
"memory": "faiss",
|
|
},
|
|
id="sentence_transformers",
|
|
marks=pytest.mark.sentence_transformers,
|
|
),
|
|
pytest.param(
|
|
{
|
|
"inference": "ollama",
|
|
"memory": "faiss",
|
|
},
|
|
id="ollama",
|
|
marks=pytest.mark.ollama,
|
|
),
|
|
pytest.param(
|
|
{
|
|
"inference": "sentence_transformers",
|
|
"memory": "chroma",
|
|
},
|
|
id="chroma",
|
|
marks=pytest.mark.chroma,
|
|
),
|
|
pytest.param(
|
|
{
|
|
"inference": "bedrock",
|
|
"memory": "qdrant",
|
|
},
|
|
id="qdrant",
|
|
marks=pytest.mark.qdrant,
|
|
),
|
|
pytest.param(
|
|
{
|
|
"inference": "fireworks",
|
|
"memory": "weaviate",
|
|
},
|
|
id="weaviate",
|
|
marks=pytest.mark.weaviate,
|
|
),
|
|
]
|
|
|
|
|
|
def pytest_addoption(parser):
|
|
parser.addoption(
|
|
"--embedding-model",
|
|
action="store",
|
|
default=None,
|
|
help="Specify the embedding model to use for testing",
|
|
)
|
|
|
|
|
|
def pytest_configure(config):
|
|
for fixture_name in MEMORY_FIXTURES:
|
|
config.addinivalue_line(
|
|
"markers",
|
|
f"{fixture_name}: marks tests as {fixture_name} specific",
|
|
)
|
|
|
|
|
|
def pytest_generate_tests(metafunc):
|
|
if "embedding_model" in metafunc.fixturenames:
|
|
model = metafunc.config.getoption("--embedding-model")
|
|
if model:
|
|
params = [pytest.param(model, id="")]
|
|
else:
|
|
params = [pytest.param("all-MiniLM-L6-v2", id="")]
|
|
|
|
metafunc.parametrize("embedding_model", params, indirect=True)
|
|
|
|
if "memory_stack" in metafunc.fixturenames:
|
|
available_fixtures = {
|
|
"inference": INFERENCE_FIXTURES,
|
|
"memory": MEMORY_FIXTURES,
|
|
}
|
|
combinations = (
|
|
get_provider_fixture_overrides(metafunc.config, available_fixtures)
|
|
or DEFAULT_PROVIDER_COMBINATIONS
|
|
)
|
|
metafunc.parametrize("memory_stack", combinations, indirect=True)
|