mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-03 09:21:45 +00:00
Playground UI: hide tool output under an expander widget.
This commit is contained in:
parent
e4d001c4e4
commit
cf42b6f801
1 changed files with 16 additions and 9 deletions
|
@ -24,6 +24,13 @@ def rag_chat_page():
|
||||||
def should_disable_input():
|
def should_disable_input():
|
||||||
return "displayed_messages" in st.session_state and len(st.session_state.displayed_messages) > 0
|
return "displayed_messages" in st.session_state and len(st.session_state.displayed_messages) > 0
|
||||||
|
|
||||||
|
def log_message(message):
|
||||||
|
with st.chat_message(message["role"]):
|
||||||
|
if "tool_output" in message and message["tool_output"]:
|
||||||
|
with st.expander(label="Tool Output", expanded=False, icon="🛠"):
|
||||||
|
st.write(message["tool_output"])
|
||||||
|
st.markdown(message["content"])
|
||||||
|
|
||||||
with st.sidebar:
|
with st.sidebar:
|
||||||
# File/Directory Upload Section
|
# File/Directory Upload Section
|
||||||
st.subheader("Upload Documents", divider=True)
|
st.subheader("Upload Documents", divider=True)
|
||||||
|
@ -146,8 +153,7 @@ def rag_chat_page():
|
||||||
|
|
||||||
# Display chat history
|
# Display chat history
|
||||||
for message in st.session_state.displayed_messages:
|
for message in st.session_state.displayed_messages:
|
||||||
with st.chat_message(message["role"]):
|
log_message(message)
|
||||||
st.markdown(message["content"])
|
|
||||||
|
|
||||||
if temperature > 0.0:
|
if temperature > 0.0:
|
||||||
strategy = {
|
strategy = {
|
||||||
|
@ -201,7 +207,7 @@ def rag_chat_page():
|
||||||
|
|
||||||
# Display assistant response
|
# Display assistant response
|
||||||
with st.chat_message("assistant"):
|
with st.chat_message("assistant"):
|
||||||
retrieval_message_placeholder = st.empty()
|
retrieval_message_placeholder = st.expander(label="Tool Output", expanded=False, icon="🛠")
|
||||||
message_placeholder = st.empty()
|
message_placeholder = st.empty()
|
||||||
full_response = ""
|
full_response = ""
|
||||||
retrieval_response = ""
|
retrieval_response = ""
|
||||||
|
@ -209,14 +215,16 @@ def rag_chat_page():
|
||||||
log.print()
|
log.print()
|
||||||
if log.role == "tool_execution":
|
if log.role == "tool_execution":
|
||||||
retrieval_response += log.content.replace("====", "").strip()
|
retrieval_response += log.content.replace("====", "").strip()
|
||||||
retrieval_message_placeholder.info(retrieval_response)
|
retrieval_message_placeholder.write(retrieval_response)
|
||||||
else:
|
else:
|
||||||
full_response += log.content
|
full_response += log.content
|
||||||
message_placeholder.markdown(full_response + "▌")
|
message_placeholder.markdown(full_response + "▌")
|
||||||
message_placeholder.markdown(full_response)
|
message_placeholder.markdown(full_response)
|
||||||
|
|
||||||
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
||||||
st.session_state.displayed_messages.append({"role": "assistant", "content": full_response})
|
st.session_state.displayed_messages.append(
|
||||||
|
{"role": "assistant", "content": full_response, "tool_output": retrieval_response}
|
||||||
|
)
|
||||||
|
|
||||||
def direct_process_prompt(prompt):
|
def direct_process_prompt(prompt):
|
||||||
# Add the system prompt in the beginning of the conversation
|
# Add the system prompt in the beginning of the conversation
|
||||||
|
@ -230,15 +238,14 @@ def rag_chat_page():
|
||||||
prompt_context = rag_response.content
|
prompt_context = rag_response.content
|
||||||
|
|
||||||
with st.chat_message("assistant"):
|
with st.chat_message("assistant"):
|
||||||
|
with st.expander(label="Retrieval Output", expanded=False):
|
||||||
|
st.write(prompt_context)
|
||||||
|
|
||||||
retrieval_message_placeholder = st.empty()
|
retrieval_message_placeholder = st.empty()
|
||||||
message_placeholder = st.empty()
|
message_placeholder = st.empty()
|
||||||
full_response = ""
|
full_response = ""
|
||||||
retrieval_response = ""
|
retrieval_response = ""
|
||||||
|
|
||||||
# Display the retrieved content
|
|
||||||
retrieval_response += str(prompt_context)
|
|
||||||
retrieval_message_placeholder.info(retrieval_response)
|
|
||||||
|
|
||||||
# Construct the extended prompt
|
# Construct the extended prompt
|
||||||
extended_prompt = f"Please answer the following query using the context below.\n\nCONTEXT:\n{prompt_context}\n\nQUERY:\n{prompt}"
|
extended_prompt = f"Please answer the following query using the context below.\n\nCONTEXT:\n{prompt_context}\n\nQUERY:\n{prompt}"
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue