forked from phoenix-oss/llama-stack-mirror
# What does this PR do? Change the Telemetry API to be able to support different use cases like returning traces for the UI and ability to export for Evals. Other changes: * Add a new trace_protocol decorator to decorate all our API methods so that any call to them will automatically get traced across all impls. * There is some issue with the decorator pattern of span creation when using async generators, where there are multiple yields with in the same context. I think its much more explicit by using the explicit context manager pattern using with. I moved the span creations in agent instance to be using with * Inject session id at the turn level, which should quickly give us all traces across turns for a given session Addresses #509 ## Test Plan ``` llama stack run /Users/dineshyv/.llama/distributions/llamastack-together/together-run.yaml PYTHONPATH=. python -m examples.agents.rag_with_memory_bank localhost 5000 curl -X POST 'http://localhost:5000/alpha/telemetry/query-traces' \ -H 'Content-Type: application/json' \ -d '{ "attribute_filters": [ { "key": "session_id", "op": "eq", "value": "dd667b87-ca4b-4d30-9265-5a0de318fc65" }], "limit": 100, "offset": 0, "order_by": ["start_time"] }' | jq . [ { "trace_id": "6902f54b83b4b48be18a6f422b13e16f", "root_span_id": "5f37b85543afc15a", "start_time": "2024-12-04T08:08:30.501587", "end_time": "2024-12-04T08:08:36.026463" }, { "trace_id": "92227dac84c0615ed741be393813fb5f", "root_span_id": "af7c5bb46665c2c8", "start_time": "2024-12-04T08:08:36.031170", "end_time": "2024-12-04T08:08:41.693301" }, { "trace_id": "7d578a6edac62f204ab479fba82f77b6", "root_span_id": "1d935e3362676896", "start_time": "2024-12-04T08:08:41.695204", "end_time": "2024-12-04T08:08:47.228016" }, { "trace_id": "dbd767d76991bc816f9f078907dc9ff2", "root_span_id": "f5a7ee76683b9602", "start_time": "2024-12-04T08:08:47.234578", "end_time": "2024-12-04T08:08:53.189412" } ] curl -X POST 'http://localhost:5000/alpha/telemetry/get-span-tree' \ -H 'Content-Type: application/json' \ -d '{ "span_id" : "6cceb4b48a156913", "max_depth": 2, "attributes_to_return": ["input"] }' | jq . % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 875 100 790 100 85 18462 1986 --:--:-- --:--:-- --:--:-- 20833 { "span_id": "6cceb4b48a156913", "trace_id": "dafa796f6aaf925f511c04cd7c67fdda", "parent_span_id": "892a66d726c7f990", "name": "retrieve_rag_context", "start_time": "2024-12-04T09:28:21.781995", "end_time": "2024-12-04T09:28:21.913352", "attributes": { "input": [ "{\"role\":\"system\",\"content\":\"You are a helpful assistant\"}", "{\"role\":\"user\",\"content\":\"What are the top 5 topics that were explained in the documentation? Only list succinct bullet points.\",\"context\":null}" ] }, "children": [ { "span_id": "1a2df181854064a8", "trace_id": "dafa796f6aaf925f511c04cd7c67fdda", "parent_span_id": "6cceb4b48a156913", "name": "MemoryRouter.query_documents", "start_time": "2024-12-04T09:28:21.787620", "end_time": "2024-12-04T09:28:21.906512", "attributes": { "input": null }, "children": [], "status": "ok" } ], "status": "ok" } ``` <img width="1677" alt="Screenshot 2024-12-04 at 9 42 56 AM" src="https://github.com/user-attachments/assets/4d3cea93-05ce-415a-93d9-4b1628631bf8">
121 lines
3.9 KiB
Python
121 lines
3.9 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
from typing import Any, Dict, List, Optional
|
|
|
|
from llama_stack.apis.datasetio import * # noqa: F403
|
|
|
|
|
|
import datasets as hf_datasets
|
|
|
|
from llama_stack.providers.datatypes import DatasetsProtocolPrivate
|
|
from llama_stack.providers.utils.datasetio.url_utils import get_dataframe_from_url
|
|
from llama_stack.providers.utils.kvstore import kvstore_impl
|
|
|
|
from .config import HuggingfaceDatasetIOConfig
|
|
|
|
DATASETS_PREFIX = "datasets:"
|
|
|
|
|
|
def load_hf_dataset(dataset_def: Dataset):
|
|
if dataset_def.metadata.get("path", None):
|
|
return hf_datasets.load_dataset(**dataset_def.metadata)
|
|
|
|
df = get_dataframe_from_url(dataset_def.url)
|
|
|
|
if df is None:
|
|
raise ValueError(f"Failed to load dataset from {dataset_def.url}")
|
|
|
|
dataset = hf_datasets.Dataset.from_pandas(df)
|
|
return dataset
|
|
|
|
|
|
class HuggingfaceDatasetIOImpl(DatasetIO, DatasetsProtocolPrivate):
|
|
def __init__(self, config: HuggingfaceDatasetIOConfig) -> None:
|
|
self.config = config
|
|
# local registry for keeping track of datasets within the provider
|
|
self.dataset_infos = {}
|
|
self.kvstore = None
|
|
|
|
async def initialize(self) -> None:
|
|
self.kvstore = await kvstore_impl(self.config.kvstore)
|
|
# Load existing datasets from kvstore
|
|
start_key = DATASETS_PREFIX
|
|
end_key = f"{DATASETS_PREFIX}\xff"
|
|
stored_datasets = await self.kvstore.range(start_key, end_key)
|
|
|
|
for dataset in stored_datasets:
|
|
dataset = Dataset.model_validate_json(dataset)
|
|
self.dataset_infos[dataset.identifier] = dataset
|
|
|
|
async def shutdown(self) -> None: ...
|
|
|
|
async def register_dataset(
|
|
self,
|
|
dataset_def: Dataset,
|
|
) -> None:
|
|
# Store in kvstore
|
|
key = f"{DATASETS_PREFIX}{dataset_def.identifier}"
|
|
await self.kvstore.set(
|
|
key=key,
|
|
value=dataset_def.json(),
|
|
)
|
|
self.dataset_infos[dataset_def.identifier] = dataset_def
|
|
|
|
async def unregister_dataset(self, dataset_id: str) -> None:
|
|
key = f"{DATASETS_PREFIX}{dataset_id}"
|
|
await self.kvstore.delete(key=key)
|
|
del self.dataset_infos[dataset_id]
|
|
|
|
async def get_rows_paginated(
|
|
self,
|
|
dataset_id: str,
|
|
rows_in_page: int,
|
|
page_token: Optional[str] = None,
|
|
filter_condition: Optional[str] = None,
|
|
) -> PaginatedRowsResult:
|
|
dataset_def = self.dataset_infos[dataset_id]
|
|
loaded_dataset = load_hf_dataset(dataset_def)
|
|
|
|
if page_token and not page_token.isnumeric():
|
|
raise ValueError("Invalid page_token")
|
|
|
|
if page_token is None or len(page_token) == 0:
|
|
next_page_token = 0
|
|
else:
|
|
next_page_token = int(page_token)
|
|
|
|
start = next_page_token
|
|
if rows_in_page == -1:
|
|
end = len(loaded_dataset)
|
|
else:
|
|
end = min(start + rows_in_page, len(loaded_dataset))
|
|
|
|
rows = [loaded_dataset[i] for i in range(start, end)]
|
|
|
|
return PaginatedRowsResult(
|
|
rows=rows,
|
|
total_count=len(rows),
|
|
next_page_token=str(end),
|
|
)
|
|
|
|
async def append_rows(self, dataset_id: str, rows: List[Dict[str, Any]]) -> None:
|
|
dataset_def = self.dataset_infos[dataset_id]
|
|
loaded_dataset = load_hf_dataset(dataset_def)
|
|
|
|
# Convert rows to HF Dataset format
|
|
new_dataset = hf_datasets.Dataset.from_list(rows)
|
|
|
|
# Concatenate the new rows with existing dataset
|
|
updated_dataset = hf_datasets.concatenate_datasets(
|
|
[loaded_dataset, new_dataset]
|
|
)
|
|
|
|
if dataset_def.metadata.get("path", None):
|
|
updated_dataset.push_to_hub(dataset_def.metadata["path"])
|
|
else:
|
|
raise NotImplementedError(
|
|
"Uploading to URL-based datasets is not supported yet"
|
|
)
|