mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-17 11:09:47 +00:00
60 lines
1.8 KiB
Python
60 lines
1.8 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import streamlit as st
|
|
|
|
|
|
def rag_chat_page():
|
|
st.title("RAG")
|
|
|
|
# File/Directory Upload Section
|
|
st.sidebar.subheader("Upload Documents")
|
|
uploaded_files = st.sidebar.file_uploader(
|
|
"Upload file(s) or directory",
|
|
accept_multiple_files=True,
|
|
type=["txt", "pdf", "doc", "docx"], # Add more file types as needed
|
|
)
|
|
|
|
# Process uploaded files
|
|
if uploaded_files:
|
|
st.sidebar.success(f"Successfully uploaded {len(uploaded_files)} files")
|
|
|
|
# Chat Interface
|
|
if "messages" not in st.session_state:
|
|
st.session_state.messages = []
|
|
|
|
# Display chat history
|
|
for message in st.session_state.messages:
|
|
with st.chat_message(message["role"]):
|
|
st.markdown(message["content"])
|
|
|
|
# Chat input
|
|
if prompt := st.chat_input("Ask a question about your documents"):
|
|
# Add user message to chat history
|
|
st.session_state.messages.append({"role": "user", "content": prompt})
|
|
|
|
# Display user message
|
|
with st.chat_message("user"):
|
|
st.markdown(prompt)
|
|
|
|
# Here you would add your RAG logic to:
|
|
# 1. Process the uploaded documents
|
|
# 2. Create embeddings
|
|
# 3. Perform similarity search
|
|
# 4. Generate response using LLM
|
|
|
|
# For now, just echo a placeholder response
|
|
response = f"I received your question: {prompt}\nThis is where the RAG response would go."
|
|
|
|
# Add assistant response to chat history
|
|
st.session_state.messages.append({"role": "assistant", "content": response})
|
|
|
|
# Display assistant response
|
|
with st.chat_message("assistant"):
|
|
st.markdown(response)
|
|
|
|
|
|
rag_chat_page()
|