forked from phoenix-oss/llama-stack-mirror
# What does this PR do? - as title, cleaning up `import *`'s - upgrade tests to make them more robust to bad model outputs - remove import *'s in llama_stack/apis/* (skip __init__ modules) <img width="465" alt="image" src="https://github.com/user-attachments/assets/d8339c13-3b40-4ba5-9c53-0d2329726ee2" /> - run `sh run_openapi_generator.sh`, no types gets affected ## Test Plan ### Providers Tests **agents** ``` pytest -v -s llama_stack/providers/tests/agents/test_agents.py -m "together" --safety-shield meta-llama/Llama-Guard-3-8B --inference-model meta-llama/Llama-3.1-405B-Instruct-FP8 ``` **inference** ```bash # meta-reference torchrun $CONDA_PREFIX/bin/pytest -v -s -k "meta_reference" --inference-model="meta-llama/Llama-3.1-8B-Instruct" ./llama_stack/providers/tests/inference/test_text_inference.py torchrun $CONDA_PREFIX/bin/pytest -v -s -k "meta_reference" --inference-model="meta-llama/Llama-3.2-11B-Vision-Instruct" ./llama_stack/providers/tests/inference/test_vision_inference.py # together pytest -v -s -k "together" --inference-model="meta-llama/Llama-3.1-8B-Instruct" ./llama_stack/providers/tests/inference/test_text_inference.py pytest -v -s -k "together" --inference-model="meta-llama/Llama-3.2-11B-Vision-Instruct" ./llama_stack/providers/tests/inference/test_vision_inference.py pytest ./llama_stack/providers/tests/inference/test_prompt_adapter.py ``` **safety** ``` pytest -v -s llama_stack/providers/tests/safety/test_safety.py -m together --safety-shield meta-llama/Llama-Guard-3-8B ``` **memory** ``` pytest -v -s llama_stack/providers/tests/memory/test_memory.py -m "sentence_transformers" --env EMBEDDING_DIMENSION=384 ``` **scoring** ``` pytest -v -s -m llm_as_judge_scoring_together_inference llama_stack/providers/tests/scoring/test_scoring.py --judge-model meta-llama/Llama-3.2-3B-Instruct pytest -v -s -m basic_scoring_together_inference llama_stack/providers/tests/scoring/test_scoring.py pytest -v -s -m braintrust_scoring_together_inference llama_stack/providers/tests/scoring/test_scoring.py ``` **datasetio** ``` pytest -v -s -m localfs llama_stack/providers/tests/datasetio/test_datasetio.py pytest -v -s -m huggingface llama_stack/providers/tests/datasetio/test_datasetio.py ``` **eval** ``` pytest -v -s -m meta_reference_eval_together_inference llama_stack/providers/tests/eval/test_eval.py pytest -v -s -m meta_reference_eval_together_inference_huggingface_datasetio llama_stack/providers/tests/eval/test_eval.py ``` ### Client-SDK Tests ``` LLAMA_STACK_BASE_URL=http://localhost:5000 pytest -v ./tests/client-sdk ``` ### llama-stack-apps ``` PORT=5000 LOCALHOST=localhost python -m examples.agents.hello $LOCALHOST $PORT python -m examples.agents.inflation $LOCALHOST $PORT python -m examples.agents.podcast_transcript $LOCALHOST $PORT python -m examples.agents.rag_as_attachments $LOCALHOST $PORT python -m examples.agents.rag_with_memory_bank $LOCALHOST $PORT python -m examples.safety.llama_guard_demo_mm $LOCALHOST $PORT python -m examples.agents.e2e_loop_with_custom_tools $LOCALHOST $PORT # Vision model python -m examples.interior_design_assistant.app python -m examples.agent_store.app $LOCALHOST $PORT ``` ### CLI ``` which llama llama model prompt-format -m Llama3.2-11B-Vision-Instruct llama model list llama stack list-apis llama stack list-providers inference llama stack build --template ollama --image-type conda ``` ### Distributions Tests **ollama** ``` llama stack build --template ollama --image-type conda ollama run llama3.2:1b-instruct-fp16 llama stack run ./llama_stack/templates/ollama/run.yaml --env INFERENCE_MODEL=meta-llama/Llama-3.2-1B-Instruct ``` **fireworks** ``` llama stack build --template fireworks --image-type conda llama stack run ./llama_stack/templates/fireworks/run.yaml ``` **together** ``` llama stack build --template together --image-type conda llama stack run ./llama_stack/templates/together/run.yaml ``` **tgi** ``` llama stack run ./llama_stack/templates/tgi/run.yaml --env TGI_URL=http://0.0.0.0:5009 --env INFERENCE_MODEL=meta-llama/Llama-3.1-8B-Instruct ``` ## Sources Please link relevant resources if necessary. ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Ran pre-commit to handle lint / formatting issues. - [ ] Read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [ ] Updated relevant documentation. - [ ] Wrote necessary unit or integration tests.
203 lines
6.7 KiB
Python
203 lines
6.7 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
import json
|
|
import logging
|
|
|
|
from typing import Any, Dict, List, Optional
|
|
|
|
import weaviate
|
|
import weaviate.classes as wvc
|
|
from numpy.typing import NDArray
|
|
from weaviate.classes.init import Auth
|
|
from weaviate.classes.query import Filter
|
|
|
|
from llama_stack.apis.common.content_types import InterleavedContent
|
|
from llama_stack.apis.memory import (
|
|
Chunk,
|
|
Memory,
|
|
MemoryBankDocument,
|
|
QueryDocumentsResponse,
|
|
)
|
|
from llama_stack.apis.memory_banks import MemoryBank, MemoryBankType
|
|
from llama_stack.distribution.request_headers import NeedsRequestProviderData
|
|
from llama_stack.providers.datatypes import Api, MemoryBanksProtocolPrivate
|
|
from llama_stack.providers.utils.memory.vector_store import (
|
|
BankWithIndex,
|
|
EmbeddingIndex,
|
|
)
|
|
|
|
from .config import WeaviateConfig, WeaviateRequestProviderData
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
class WeaviateIndex(EmbeddingIndex):
|
|
def __init__(self, client: weaviate.Client, collection_name: str):
|
|
self.client = client
|
|
self.collection_name = collection_name
|
|
|
|
async def add_chunks(self, chunks: List[Chunk], embeddings: NDArray):
|
|
assert len(chunks) == len(
|
|
embeddings
|
|
), f"Chunk length {len(chunks)} does not match embedding length {len(embeddings)}"
|
|
|
|
data_objects = []
|
|
for i, chunk in enumerate(chunks):
|
|
data_objects.append(
|
|
wvc.data.DataObject(
|
|
properties={
|
|
"chunk_content": chunk.json(),
|
|
},
|
|
vector=embeddings[i].tolist(),
|
|
)
|
|
)
|
|
|
|
# Inserting chunks into a prespecified Weaviate collection
|
|
collection = self.client.collections.get(self.collection_name)
|
|
|
|
# TODO: make this async friendly
|
|
collection.data.insert_many(data_objects)
|
|
|
|
async def query(
|
|
self, embedding: NDArray, k: int, score_threshold: float
|
|
) -> QueryDocumentsResponse:
|
|
collection = self.client.collections.get(self.collection_name)
|
|
|
|
results = collection.query.near_vector(
|
|
near_vector=embedding.tolist(),
|
|
limit=k,
|
|
return_metadata=wvc.query.MetadataQuery(distance=True),
|
|
)
|
|
|
|
chunks = []
|
|
scores = []
|
|
for doc in results.objects:
|
|
chunk_json = doc.properties["chunk_content"]
|
|
try:
|
|
chunk_dict = json.loads(chunk_json)
|
|
chunk = Chunk(**chunk_dict)
|
|
except Exception:
|
|
log.exception(f"Failed to parse document: {chunk_json}")
|
|
continue
|
|
|
|
chunks.append(chunk)
|
|
scores.append(1.0 / doc.metadata.distance)
|
|
|
|
return QueryDocumentsResponse(chunks=chunks, scores=scores)
|
|
|
|
async def delete(self, chunk_ids: List[str]) -> None:
|
|
collection = self.client.collections.get(self.collection_name)
|
|
collection.data.delete_many(
|
|
where=Filter.by_property("id").contains_any(chunk_ids)
|
|
)
|
|
|
|
|
|
class WeaviateMemoryAdapter(
|
|
Memory,
|
|
NeedsRequestProviderData,
|
|
MemoryBanksProtocolPrivate,
|
|
):
|
|
def __init__(self, config: WeaviateConfig, inference_api: Api.inference) -> None:
|
|
self.config = config
|
|
self.inference_api = inference_api
|
|
self.client_cache = {}
|
|
self.cache = {}
|
|
|
|
def _get_client(self) -> weaviate.Client:
|
|
provider_data = self.get_request_provider_data()
|
|
assert provider_data is not None, "Request provider data must be set"
|
|
assert isinstance(provider_data, WeaviateRequestProviderData)
|
|
|
|
key = f"{provider_data.weaviate_cluster_url}::{provider_data.weaviate_api_key}"
|
|
if key in self.client_cache:
|
|
return self.client_cache[key]
|
|
|
|
client = weaviate.connect_to_weaviate_cloud(
|
|
cluster_url=provider_data.weaviate_cluster_url,
|
|
auth_credentials=Auth.api_key(provider_data.weaviate_api_key),
|
|
)
|
|
self.client_cache[key] = client
|
|
return client
|
|
|
|
async def initialize(self) -> None:
|
|
pass
|
|
|
|
async def shutdown(self) -> None:
|
|
for client in self.client_cache.values():
|
|
client.close()
|
|
|
|
async def register_memory_bank(
|
|
self,
|
|
memory_bank: MemoryBank,
|
|
) -> None:
|
|
assert (
|
|
memory_bank.memory_bank_type == MemoryBankType.vector.value
|
|
), f"Only vector banks are supported {memory_bank.memory_bank_type}"
|
|
|
|
client = self._get_client()
|
|
|
|
# Create collection if it doesn't exist
|
|
if not client.collections.exists(memory_bank.identifier):
|
|
client.collections.create(
|
|
name=memory_bank.identifier,
|
|
vectorizer_config=wvc.config.Configure.Vectorizer.none(),
|
|
properties=[
|
|
wvc.config.Property(
|
|
name="chunk_content",
|
|
data_type=wvc.config.DataType.TEXT,
|
|
),
|
|
],
|
|
)
|
|
|
|
self.cache[memory_bank.identifier] = BankWithIndex(
|
|
memory_bank,
|
|
WeaviateIndex(client=client, collection_name=memory_bank.identifier),
|
|
self.inference_api,
|
|
)
|
|
|
|
async def _get_and_cache_bank_index(self, bank_id: str) -> Optional[BankWithIndex]:
|
|
if bank_id in self.cache:
|
|
return self.cache[bank_id]
|
|
|
|
bank = await self.memory_bank_store.get_memory_bank(bank_id)
|
|
if not bank:
|
|
raise ValueError(f"Bank {bank_id} not found")
|
|
|
|
client = self._get_client()
|
|
if not client.collections.exists(bank.identifier):
|
|
raise ValueError(f"Collection with name `{bank.identifier}` not found")
|
|
|
|
index = BankWithIndex(
|
|
bank=bank,
|
|
index=WeaviateIndex(client=client, collection_name=bank_id),
|
|
inference_api=self.inference_api,
|
|
)
|
|
self.cache[bank_id] = index
|
|
return index
|
|
|
|
async def insert_documents(
|
|
self,
|
|
bank_id: str,
|
|
documents: List[MemoryBankDocument],
|
|
ttl_seconds: Optional[int] = None,
|
|
) -> None:
|
|
index = await self._get_and_cache_bank_index(bank_id)
|
|
if not index:
|
|
raise ValueError(f"Bank {bank_id} not found")
|
|
|
|
await index.insert_documents(documents)
|
|
|
|
async def query_documents(
|
|
self,
|
|
bank_id: str,
|
|
query: InterleavedContent,
|
|
params: Optional[Dict[str, Any]] = None,
|
|
) -> QueryDocumentsResponse:
|
|
index = await self._get_and_cache_bank_index(bank_id)
|
|
if not index:
|
|
raise ValueError(f"Bank {bank_id} not found")
|
|
|
|
return await index.query_documents(query, params)
|