Removing example section

Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
This commit is contained in:
Francisco Javier Arceo 2025-03-13 20:19:32 -04:00
parent 7eb7c94888
commit 9ba2e17032
3 changed files with 0 additions and 162 deletions

View file

@ -1,33 +0,0 @@
ROOT_DIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
OS := linux
ifeq ($(shell uname -s), Darwin)
OS = osx
endif
PYTHON_VERSION = ${shell python --version | grep -Eo '[0-9]\.[0-9]+'}
PYTHON_VERSIONS := 3.10 3.11
build-dev:
uv sync --extra dev --extra test
uv pip install -e .
. .venv/bin/activate
uv pip install sqlite-vec chardet datasets sentence_transformers pypdf
build-ollama: fix-line-endings
llama stack build --template ollama --image-type venv
fix-line-endings:
sed -i '' 's/\r$$//' llama_stack/distribution/common.sh
sed -i '' 's/\r$$//' llama_stack/distribution/build_venv.sh
test-sqlite-vec:
pytest llama_stack/providers/tests/vector_io/test_sqlite_vec.py \
-v -s --tb=short --disable-warnings --asyncio-mode=auto
test-ollama-vector-integration:
INFERENCE_MODEL=llama3.2:3b-instruct-fp16 LLAMA_STACK_CONFIG=ollama \
pytest -s -v tests/client-sdk/vector_io/test_vector_io.py
make serve-ollama:
ollama run llama3.2:3b-instruct-fp16 --keepalive 24h

View file

@ -42,23 +42,6 @@ And other important items outlined more in depth in the [GitHub documentation](h
## Nomination Process for Triage-role ## Nomination Process for Triage-role
The process for nomination for the triage role should be simple and at the discretion of the maintainers. The process for nomination for the triage role should be simple and at the discretion of the maintainers.
## Example
We tested this functionality using the @feast-dev repository and have provided screenshots outlining how to make this change.
Step 1:
![Figure 1: Select Repository Settings](./_static/triage-role-config-1.png)
Step 2:
![Figure 2: Invite Outside Collaborator](./_static/triage-role-config-2.png)
Step 3:
![Figure 3: Select Triage Role](./_static/triage-role-config-3.png)
Step 4:
![Figure 4: User Receives Triage Role](./_static/triage-role-config-4.png)
## Thank you ## Thank you
Thank you in advance for your feedback and support and we look forward to collaborating on this great project! Thank you in advance for your feedback and support and we look forward to collaborating on this great project!

View file

@ -1,112 +0,0 @@
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import os
import uuid
from termcolor import cprint
# Set environment variables
os.environ["INFERENCE_MODEL"] = "llama3.2:3b-instruct-fp16"
os.environ["LLAMA_STACK_CONFIG"] = "ollama"
# Import libraries after setting environment variables
from llama_stack_client.lib.agents.agent import Agent
from llama_stack_client.lib.agents.event_logger import EventLogger
from llama_stack_client.types import Document
from llama_stack_client.types.agent_create_params import AgentConfig
from llama_stack.distribution.library_client import LlamaStackAsLibraryClient
def main():
# Initialize the client
client = LlamaStackAsLibraryClient("ollama")
vector_db_id = f"test-vector-db-{uuid.uuid4().hex}"
_ = client.initialize()
model_id = "llama3.2:3b-instruct-fp16"
# Define the list of document URLs and create Document objects
urls = [
"chat.rst",
"llama3.rst",
"memory_optimizations.rst",
"lora_finetune.rst",
]
documents = [
Document(
document_id=f"num-{i}",
content=f"https://raw.githubusercontent.com/pytorch/torchtune/main/docs/source/tutorials/{url}",
mime_type="text/plain",
metadata={},
)
for i, url in enumerate(urls)
]
# (Optional) Use the documents as needed with your client here
client.vector_dbs.register(
provider_id="sqlite_vec",
vector_db_id=vector_db_id,
embedding_model="all-MiniLM-L6-v2",
embedding_dimension=384,
)
client.tool_runtime.rag_tool.insert(
documents=documents,
vector_db_id=vector_db_id,
chunk_size_in_tokens=512,
)
# Create agent configuration
agent_config = AgentConfig(
model=model_id,
instructions="You are a helpful assistant",
enable_session_persistence=False,
toolgroups=[
{
"name": "builtin::rag",
"args": {
"vector_db_ids": [vector_db_id],
},
}
],
)
# Instantiate the Agent
agent = Agent(client, agent_config)
# List of user prompts
user_prompts = [
"What are the top 5 topics that were explained in the documentation? Only list succinct bullet points.",
"Was anything related to 'Llama3' discussed, if so what?",
"Tell me how to use LoRA",
"What about Quantization?",
]
# Create a session for the agent
session_id = agent.create_session("test-session")
# Process each prompt and display the output
for prompt in user_prompts:
cprint(f"User> {prompt}", "green")
response = agent.create_turn(
messages=[
{
"role": "user",
"content": prompt,
}
],
session_id=session_id,
)
# Log and print events from the response
for log in EventLogger().log(response):
log.print()
if __name__ == "__main__":
main()