forked from phoenix-oss/llama-stack-mirror
# What does this PR do? This PR kills the notion of "pure passthrough" remote providers. You cannot specify a single provider you must specify a whole distribution (stack) as remote. This PR also significantly fixes / upgrades testing infrastructure so you can now test against a remotely hosted stack server by just doing ```bash pytest -s -v -m remote test_agents.py \ --inference-model=Llama3.1-8B-Instruct --safety-shield=Llama-Guard-3-1B \ --env REMOTE_STACK_URL=http://localhost:5001 ``` Also fixed `test_agents_persistence.py` (which was broken) and killed some deprecated testing functions. ## Test Plan All the tests.
96 lines
3.6 KiB
Python
96 lines
3.6 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from typing import List
|
|
|
|
from llama_stack.distribution.datatypes import * # noqa: F403
|
|
|
|
|
|
EMBEDDING_DEPS = [
|
|
"blobfile",
|
|
"chardet",
|
|
"pypdf",
|
|
"tqdm",
|
|
"numpy",
|
|
"scikit-learn",
|
|
"scipy",
|
|
"nltk",
|
|
"sentencepiece",
|
|
"transformers",
|
|
# this happens to work because special dependencies are always installed last
|
|
# so if there was a regular torch installed first, this would be ignored
|
|
# we need a better way to do this to identify potential conflicts, etc.
|
|
# for now, this lets us significantly reduce the size of the container which
|
|
# does not have any "local" inference code (and hence does not need GPU-enabled torch)
|
|
"torch --index-url https://download.pytorch.org/whl/cpu",
|
|
"sentence-transformers --no-deps",
|
|
]
|
|
|
|
|
|
def available_providers() -> List[ProviderSpec]:
|
|
return [
|
|
InlineProviderSpec(
|
|
api=Api.memory,
|
|
provider_type="inline::meta-reference",
|
|
pip_packages=EMBEDDING_DEPS + ["faiss-cpu"],
|
|
module="llama_stack.providers.inline.memory.faiss",
|
|
config_class="llama_stack.providers.inline.memory.faiss.FaissImplConfig",
|
|
deprecation_warning="Please use the `inline::faiss` provider instead.",
|
|
),
|
|
InlineProviderSpec(
|
|
api=Api.memory,
|
|
provider_type="inline::faiss",
|
|
pip_packages=EMBEDDING_DEPS + ["faiss-cpu"],
|
|
module="llama_stack.providers.inline.memory.faiss",
|
|
config_class="llama_stack.providers.inline.memory.faiss.FaissImplConfig",
|
|
),
|
|
remote_provider_spec(
|
|
Api.memory,
|
|
AdapterSpec(
|
|
adapter_type="chromadb",
|
|
pip_packages=EMBEDDING_DEPS + ["chromadb-client"],
|
|
module="llama_stack.providers.remote.memory.chroma",
|
|
config_class="llama_stack.distribution.datatypes.RemoteProviderConfig",
|
|
),
|
|
),
|
|
remote_provider_spec(
|
|
Api.memory,
|
|
AdapterSpec(
|
|
adapter_type="pgvector",
|
|
pip_packages=EMBEDDING_DEPS + ["psycopg2-binary"],
|
|
module="llama_stack.providers.remote.memory.pgvector",
|
|
config_class="llama_stack.providers.remote.memory.pgvector.PGVectorConfig",
|
|
),
|
|
),
|
|
remote_provider_spec(
|
|
Api.memory,
|
|
AdapterSpec(
|
|
adapter_type="weaviate",
|
|
pip_packages=EMBEDDING_DEPS + ["weaviate-client"],
|
|
module="llama_stack.providers.remote.memory.weaviate",
|
|
config_class="llama_stack.providers.remote.memory.weaviate.WeaviateConfig",
|
|
provider_data_validator="llama_stack.providers.remote.memory.weaviate.WeaviateRequestProviderData",
|
|
),
|
|
),
|
|
remote_provider_spec(
|
|
api=Api.memory,
|
|
adapter=AdapterSpec(
|
|
adapter_type="sample",
|
|
pip_packages=[],
|
|
module="llama_stack.providers.remote.memory.sample",
|
|
config_class="llama_stack.providers.remote.memory.sample.SampleConfig",
|
|
),
|
|
),
|
|
remote_provider_spec(
|
|
Api.memory,
|
|
AdapterSpec(
|
|
adapter_type="qdrant",
|
|
pip_packages=EMBEDDING_DEPS + ["qdrant-client"],
|
|
module="llama_stack.providers.remote.memory.qdrant",
|
|
config_class="llama_stack.providers.remote.memory.qdrant.QdrantConfig",
|
|
),
|
|
),
|
|
]
|