mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-13 05:17:26 +00:00
Some checks failed
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 7s
Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
Integration Tests (Replay) / discover-tests (push) Successful in 7s
Python Package Build Test / build (3.12) (push) Failing after 7s
Vector IO Integration Tests / test-matrix (3.12, remote::qdrant) (push) Failing after 10s
Integration Tests (Replay) / Integration Tests (, , , client=, vision=) (push) Failing after 4s
Vector IO Integration Tests / test-matrix (3.13, remote::pgvector) (push) Failing after 9s
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 13s
Vector IO Integration Tests / test-matrix (3.12, remote::chromadb) (push) Failing after 12s
Python Package Build Test / build (3.13) (push) Failing after 8s
Unit Tests / unit-tests (3.13) (push) Failing after 8s
Vector IO Integration Tests / test-matrix (3.12, inline::milvus) (push) Failing after 17s
Vector IO Integration Tests / test-matrix (3.12, remote::pgvector) (push) Failing after 14s
Update ReadTheDocs / update-readthedocs (push) Failing after 9s
Vector IO Integration Tests / test-matrix (3.13, inline::sqlite-vec) (push) Failing after 11s
Vector IO Integration Tests / test-matrix (3.13, inline::faiss) (push) Failing after 18s
Vector IO Integration Tests / test-matrix (3.13, remote::chromadb) (push) Failing after 12s
Test External API and Providers / test-external (venv) (push) Failing after 16s
Unit Tests / unit-tests (3.12) (push) Failing after 16s
Vector IO Integration Tests / test-matrix (3.12, remote::weaviate) (push) Failing after 16s
Vector IO Integration Tests / test-matrix (3.12, inline::faiss) (push) Failing after 18s
Vector IO Integration Tests / test-matrix (3.13, remote::weaviate) (push) Failing after 17s
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 25s
Vector IO Integration Tests / test-matrix (3.13, remote::qdrant) (push) Failing after 17s
Vector IO Integration Tests / test-matrix (3.12, inline::sqlite-vec) (push) Failing after 30s
Vector IO Integration Tests / test-matrix (3.13, inline::milvus) (push) Failing after 28s
Pre-commit / pre-commit (push) Successful in 1m11s
# What does this PR do? `AgentEventLogger` only supports streaming responses, so I suggest adding a comment near the bottom of `demo_script.py` letting the user know this, e.g., if they change the `stream` value to `False` in the call to `create_turn`, they need to comment out the logging lines. See https://github.com/llamastack/llama-stack-client-python/issues/15 <!-- If resolving an issue, uncomment and update the line below --> <!-- Closes #[issue-number] --> ## Test Plan <!-- Describe the tests you ran to verify your changes with result summaries. *Provide clear instructions so the plan can be easily re-executed.* --> --------- Signed-off-by: Dean Wampler <dean.wampler@ibm.com>
67 lines
1.8 KiB
Python
67 lines
1.8 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from llama_stack_client import Agent, AgentEventLogger, RAGDocument, LlamaStackClient
|
|
|
|
vector_db_id = "my_demo_vector_db"
|
|
client = LlamaStackClient(base_url="http://localhost:8321")
|
|
|
|
models = client.models.list()
|
|
|
|
# Select the first LLM and first embedding models
|
|
model_id = next(m for m in models if m.model_type == "llm").identifier
|
|
embedding_model_id = (
|
|
em := next(m for m in models if m.model_type == "embedding")
|
|
).identifier
|
|
embedding_dimension = em.metadata["embedding_dimension"]
|
|
|
|
_ = client.vector_dbs.register(
|
|
vector_db_id=vector_db_id,
|
|
embedding_model=embedding_model_id,
|
|
embedding_dimension=embedding_dimension,
|
|
provider_id="faiss",
|
|
)
|
|
source = "https://www.paulgraham.com/greatwork.html"
|
|
print("rag_tool> Ingesting document:", source)
|
|
document = RAGDocument(
|
|
document_id="document_1",
|
|
content=source,
|
|
mime_type="text/html",
|
|
metadata={},
|
|
)
|
|
client.tool_runtime.rag_tool.insert(
|
|
documents=[document],
|
|
vector_db_id=vector_db_id,
|
|
chunk_size_in_tokens=50,
|
|
)
|
|
agent = Agent(
|
|
client,
|
|
model=model_id,
|
|
instructions="You are a helpful assistant",
|
|
tools=[
|
|
{
|
|
"name": "builtin::rag/knowledge_search",
|
|
"args": {"vector_db_ids": [vector_db_id]},
|
|
}
|
|
],
|
|
)
|
|
|
|
prompt = "How do you do great work?"
|
|
print("prompt>", prompt)
|
|
|
|
use_stream = True
|
|
response = agent.create_turn(
|
|
messages=[{"role": "user", "content": prompt}],
|
|
session_id=agent.create_session("rag_session"),
|
|
stream=use_stream,
|
|
)
|
|
|
|
# Only call `AgentEventLogger().log(response)` for streaming responses.
|
|
if use_stream:
|
|
for log in AgentEventLogger().log(response):
|
|
log.print()
|
|
else:
|
|
print(response)
|