mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-09 13:14:39 +00:00
fix(vector-io): unify score calculation to use cosine and normalize to [0,1]
This commit is contained in:
parent
9618adba89
commit
a0e0c7030b
9 changed files with 166 additions and 42 deletions
|
@ -222,3 +222,63 @@ def test_query_returns_valid_object_when_identical_to_embedding_in_vdb(
|
|||
assert len(response.chunks) > 0
|
||||
assert response.chunks[0].metadata["document_id"] == "doc1"
|
||||
assert response.chunks[0].metadata["source"] == "precomputed"
|
||||
|
||||
|
||||
def test_vector_similarity_scores_are_normalized(
|
||||
client_with_empty_registry, embedding_model_id, embedding_dimension, sample_chunks
|
||||
):
|
||||
"""Test that vector similarity scores are properly normalized to [0,1] range for all vector providers."""
|
||||
vector_db_name = "test_score_normalization_db"
|
||||
register_response = client_with_empty_registry.vector_dbs.register(
|
||||
vector_db_id=vector_db_name,
|
||||
embedding_model=embedding_model_id,
|
||||
embedding_dimension=embedding_dimension,
|
||||
)
|
||||
|
||||
actual_vector_db_id = register_response.identifier
|
||||
|
||||
# Insert sample chunks
|
||||
client_with_empty_registry.vector_io.insert(
|
||||
vector_db_id=actual_vector_db_id,
|
||||
chunks=sample_chunks,
|
||||
)
|
||||
|
||||
# Test various queries to ensure score normalization across different similarity levels
|
||||
test_queries = [
|
||||
# High similarity query that should match Python doc chunk
|
||||
"Python programming language with readable code",
|
||||
# Medium similarity query
|
||||
"artificial intelligence and machine learning systems",
|
||||
# Lower similarity query
|
||||
"What is the capital of France?",
|
||||
# High similarity query that should match neural networks chunk
|
||||
"biological neural networks and artificial neurons",
|
||||
# Very low similarity query to test edge case normalization
|
||||
"xyzabc random nonsense gibberish qwerty asdfgh",
|
||||
]
|
||||
|
||||
for query in test_queries:
|
||||
response = client_with_empty_registry.vector_io.query(
|
||||
vector_db_id=actual_vector_db_id,
|
||||
query=query,
|
||||
)
|
||||
|
||||
# Verify response structure
|
||||
assert response is not None, f"Query '{query}' returned None response"
|
||||
assert len(response.chunks) > 0, f"Query '{query}' returned no chunks"
|
||||
assert len(response.scores) > 0, f"Query '{query}' returned no scores"
|
||||
assert len(response.chunks) == len(response.scores), "Mismatch between chunks and scores count"
|
||||
|
||||
# Verify all scores are normalized to [0,1] range
|
||||
for i, score in enumerate(response.scores):
|
||||
assert isinstance(score, (int | float)), f"Score at index {i} is not numeric: {type(score)}"
|
||||
assert 0.0 <= score <= 1.0, (
|
||||
f"Score at index {i} is not normalized: {score} (should be in [0,1] range) for query '{query}'"
|
||||
)
|
||||
|
||||
# Verify scores are in descending order (most similar first)
|
||||
for i in range(1, len(response.scores)):
|
||||
assert response.scores[i - 1] >= response.scores[i], (
|
||||
f"Scores not in descending order at indices {i - 1} and {i}: "
|
||||
f"{response.scores[i - 1]} >= {response.scores[i]} for query '{query}'"
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue