mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-01 16:24:44 +00:00
playground
This commit is contained in:
parent
125d98c9bd
commit
68b70d1b1f
3 changed files with 74 additions and 2 deletions
|
@ -19,9 +19,13 @@ def main():
|
|||
chat_page = st.Page(
|
||||
"page/playground/chat.py", title="Chat", icon="💬", default=True
|
||||
)
|
||||
rag_page = st.Page("page/playground/rag.py", title="RAG", icon="💬", default=False)
|
||||
|
||||
pg = st.navigation(
|
||||
{"Evaluations": [application_evaluation_page], "Playground": [chat_page]}
|
||||
{
|
||||
"Evaluations": [application_evaluation_page],
|
||||
"Playground": [chat_page, rag_page],
|
||||
}
|
||||
)
|
||||
pg.run()
|
||||
|
||||
|
|
|
@ -54,6 +54,11 @@ with st.sidebar:
|
|||
)
|
||||
|
||||
stream = st.checkbox("Stream", value=True)
|
||||
system_prompt = st.text_area(
|
||||
"System Prompt",
|
||||
value="You are a helpful AI assistant.",
|
||||
help="Initial instructions given to the AI to set its behavior and context",
|
||||
)
|
||||
|
||||
|
||||
# Main chat interface
|
||||
|
@ -83,7 +88,10 @@ if prompt := st.chat_input("Example: What is Llama Stack?"):
|
|||
full_response = ""
|
||||
|
||||
response = llama_stack_api.client.inference.chat_completion(
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
messages=[
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": prompt},
|
||||
],
|
||||
model_id=selected_model,
|
||||
stream=stream,
|
||||
sampling_params={
|
||||
|
|
60
llama_stack/distribution/ui/page/playground/rag.py
Normal file
60
llama_stack/distribution/ui/page/playground/rag.py
Normal file
|
@ -0,0 +1,60 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import streamlit as st
|
||||
|
||||
|
||||
def rag_chat_page():
|
||||
st.title("RAG")
|
||||
|
||||
# File/Directory Upload Section
|
||||
st.sidebar.subheader("Upload Documents")
|
||||
uploaded_files = st.sidebar.file_uploader(
|
||||
"Upload file(s) or directory",
|
||||
accept_multiple_files=True,
|
||||
type=["txt", "pdf", "doc", "docx"], # Add more file types as needed
|
||||
)
|
||||
|
||||
# Process uploaded files
|
||||
if uploaded_files:
|
||||
st.sidebar.success(f"Successfully uploaded {len(uploaded_files)} files")
|
||||
|
||||
# Chat Interface
|
||||
if "messages" not in st.session_state:
|
||||
st.session_state.messages = []
|
||||
|
||||
# Display chat history
|
||||
for message in st.session_state.messages:
|
||||
with st.chat_message(message["role"]):
|
||||
st.markdown(message["content"])
|
||||
|
||||
# Chat input
|
||||
if prompt := st.chat_input("Ask a question about your documents"):
|
||||
# Add user message to chat history
|
||||
st.session_state.messages.append({"role": "user", "content": prompt})
|
||||
|
||||
# Display user message
|
||||
with st.chat_message("user"):
|
||||
st.markdown(prompt)
|
||||
|
||||
# Here you would add your RAG logic to:
|
||||
# 1. Process the uploaded documents
|
||||
# 2. Create embeddings
|
||||
# 3. Perform similarity search
|
||||
# 4. Generate response using LLM
|
||||
|
||||
# For now, just echo a placeholder response
|
||||
response = f"I received your question: {prompt}\nThis is where the RAG response would go."
|
||||
|
||||
# Add assistant response to chat history
|
||||
st.session_state.messages.append({"role": "assistant", "content": response})
|
||||
|
||||
# Display assistant response
|
||||
with st.chat_message("assistant"):
|
||||
st.markdown(response)
|
||||
|
||||
|
||||
rag_chat_page()
|
Loading…
Add table
Add a link
Reference in a new issue