From 68b70d1b1fdd2c74d2643f180a673ef2671de34f Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Wed, 27 Nov 2024 15:27:10 -0800 Subject: [PATCH] playground --- llama_stack/distribution/ui/app.py | 6 +- .../distribution/ui/page/playground/chat.py | 10 +++- .../distribution/ui/page/playground/rag.py | 60 +++++++++++++++++++ 3 files changed, 74 insertions(+), 2 deletions(-) create mode 100644 llama_stack/distribution/ui/page/playground/rag.py diff --git a/llama_stack/distribution/ui/app.py b/llama_stack/distribution/ui/app.py index 2943e709c..77bb5ff6d 100644 --- a/llama_stack/distribution/ui/app.py +++ b/llama_stack/distribution/ui/app.py @@ -19,9 +19,13 @@ def main(): chat_page = st.Page( "page/playground/chat.py", title="Chat", icon="💬", default=True ) + rag_page = st.Page("page/playground/rag.py", title="RAG", icon="💬", default=False) pg = st.navigation( - {"Evaluations": [application_evaluation_page], "Playground": [chat_page]} + { + "Evaluations": [application_evaluation_page], + "Playground": [chat_page, rag_page], + } ) pg.run() diff --git a/llama_stack/distribution/ui/page/playground/chat.py b/llama_stack/distribution/ui/page/playground/chat.py index 6e96125ca..b27e6b768 100644 --- a/llama_stack/distribution/ui/page/playground/chat.py +++ b/llama_stack/distribution/ui/page/playground/chat.py @@ -54,6 +54,11 @@ with st.sidebar: ) stream = st.checkbox("Stream", value=True) + system_prompt = st.text_area( + "System Prompt", + value="You are a helpful AI assistant.", + help="Initial instructions given to the AI to set its behavior and context", + ) # Main chat interface @@ -83,7 +88,10 @@ if prompt := st.chat_input("Example: What is Llama Stack?"): full_response = "" response = llama_stack_api.client.inference.chat_completion( - messages=[{"role": "user", "content": prompt}], + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": prompt}, + ], model_id=selected_model, stream=stream, sampling_params={ diff --git a/llama_stack/distribution/ui/page/playground/rag.py b/llama_stack/distribution/ui/page/playground/rag.py new file mode 100644 index 000000000..cacdfb068 --- /dev/null +++ b/llama_stack/distribution/ui/page/playground/rag.py @@ -0,0 +1,60 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +import streamlit as st + + +def rag_chat_page(): + st.title("RAG") + + # File/Directory Upload Section + st.sidebar.subheader("Upload Documents") + uploaded_files = st.sidebar.file_uploader( + "Upload file(s) or directory", + accept_multiple_files=True, + type=["txt", "pdf", "doc", "docx"], # Add more file types as needed + ) + + # Process uploaded files + if uploaded_files: + st.sidebar.success(f"Successfully uploaded {len(uploaded_files)} files") + + # Chat Interface + if "messages" not in st.session_state: + st.session_state.messages = [] + + # Display chat history + for message in st.session_state.messages: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + + # Chat input + if prompt := st.chat_input("Ask a question about your documents"): + # Add user message to chat history + st.session_state.messages.append({"role": "user", "content": prompt}) + + # Display user message + with st.chat_message("user"): + st.markdown(prompt) + + # Here you would add your RAG logic to: + # 1. Process the uploaded documents + # 2. Create embeddings + # 3. Perform similarity search + # 4. Generate response using LLM + + # For now, just echo a placeholder response + response = f"I received your question: {prompt}\nThis is where the RAG response would go." + + # Add assistant response to chat history + st.session_state.messages.append({"role": "assistant", "content": response}) + + # Display assistant response + with st.chat_message("assistant"): + st.markdown(response) + + +rag_chat_page()