Fix precommit check after moving to ruff (#927)

Lint check in main branch is failing. This fixes the lint check after we
moved to ruff in https://github.com/meta-llama/llama-stack/pull/921. We
need to move to a `ruff.toml` file as well as fixing and ignoring some
additional checks.

Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>
This commit is contained in:
Yuan Tang 2025-02-02 09:46:45 -05:00 committed by GitHub
parent 4773092dd1
commit 34ab7a3b6c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
217 changed files with 981 additions and 2681 deletions

View file

@ -74,9 +74,7 @@ def rag_chat_page():
)
available_models = llama_stack_api.client.models.list()
available_models = [
model.identifier for model in available_models if model.model_type == "llm"
]
available_models = [model.identifier for model in available_models if model.model_type == "llm"]
selected_model = st.selectbox(
"Choose a model",
available_models,
@ -137,9 +135,7 @@ def rag_chat_page():
dict(
name="builtin::rag",
args={
"vector_db_ids": [
vector_db_id for vector_db_id in selected_vector_dbs
],
"vector_db_ids": [vector_db_id for vector_db_id in selected_vector_dbs],
},
)
],
@ -186,9 +182,7 @@ def rag_chat_page():
message_placeholder.markdown(full_response + "")
message_placeholder.markdown(full_response)
st.session_state.messages.append(
{"role": "assistant", "content": full_response}
)
st.session_state.messages.append({"role": "assistant", "content": full_response})
rag_chat_page()