From 56798fbddaae50886694e1d59a02b057ecc42c41 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 28 Feb 2025 08:14:18 +0000 Subject: [PATCH 1/4] Release candidate 0.1.5rc3 --- pyproject.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index dc5659f06..031a7cbee 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "llama_stack" -version = "0.1.4" +version = "0.1.5rc3" authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] description = "Llama Stack" readme = "README.md" @@ -26,8 +26,8 @@ dependencies = [ "httpx", "huggingface-hub", "jsonschema", - "llama-models>=0.1.4", - "llama-stack-client>=0.1.4", + "llama-models>=0.1.5rc3", + "llama-stack-client>=0.1.5rc3", "prompt-toolkit", "python-dotenv", "pydantic>=2", From 31c9c6c62febc7e8d741b7f3dcf29065eb8ab432 Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Fri, 28 Feb 2025 11:10:45 -0800 Subject: [PATCH 2/4] fix: replace eval with json decoding (#1327) # What does this PR do? - Using `eval` on server is a security risk - Replace `eval` with `json.loads` [//]: # (If resolving an issue, uncomment and update the line below) [//]: # (Closes #[issue-number]) ## Test Plan ``` pytest -v -s --nbval-lax ./llama-stack/docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb ``` image [//]: # (## Documentation) --- .../providers/inline/eval/meta_reference/eval.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/llama_stack/providers/inline/eval/meta_reference/eval.py b/llama_stack/providers/inline/eval/meta_reference/eval.py index 18d408a31..7afc1089c 100644 --- a/llama_stack/providers/inline/eval/meta_reference/eval.py +++ b/llama_stack/providers/inline/eval/meta_reference/eval.py @@ -3,6 +3,7 @@ # # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. +import json from typing import Any, Dict, List, Optional from tqdm import tqdm @@ -117,7 +118,7 @@ class MetaReferenceEvalImpl( generations = [] for i, x in tqdm(enumerate(input_rows)): assert ColumnName.chat_completion_input.value in x, "Invalid input row" - input_messages = eval(str(x[ColumnName.chat_completion_input.value])) + input_messages = json.loads(x[ColumnName.chat_completion_input.value]) input_messages = [UserMessage(**x) for x in input_messages] # NOTE: only single-turn agent generation is supported. Create a new session for each input row @@ -159,7 +160,7 @@ class MetaReferenceEvalImpl( generations = [] for x in tqdm(input_rows): if ColumnName.completion_input.value in x: - input_content = eval(str(x[ColumnName.completion_input.value])) + input_content = json.loads(x[ColumnName.completion_input.value]) response = await self.inference_api.completion( model=candidate.model, content=input_content, @@ -167,9 +168,8 @@ class MetaReferenceEvalImpl( ) generations.append({ColumnName.generated_answer.value: response.completion_message.content}) elif ColumnName.chat_completion_input.value in x: - chat_completion_input_str = str(x[ColumnName.chat_completion_input.value]) - input_messages = eval(chat_completion_input_str) - input_messages = [UserMessage(**x) for x in input_messages] + chat_completion_input_json = json.loads(x[ColumnName.chat_completion_input.value]) + input_messages = [UserMessage(**x) for x in chat_completion_input_json] messages = [] if candidate.system_message: messages.append(candidate.system_message) From 75cda30df7e85501b9834632ccfaaecbbae87e02 Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Fri, 28 Feb 2025 11:25:23 -0800 Subject: [PATCH 3/4] fix: replace eval with json decoding for format_adapter (#1328) # What does this PR do? - using `eval` is a security risk [//]: # (If resolving an issue, uncomment and update the line below) [//]: # (Closes #[issue-number]) ## Test Plan - see https://github.com/meta-llama/llama-stack/pull/1327 cc @SLR722 we will need to update the corresponding dataset via ```python def update_to_json_str(): dataset = datasets.load_dataset(...) processed_dataset = dataset[split].map( lambda x: { "column": json.dumps(eval(x["column"])) } ) processed_dataset.push_to_hub(...) ``` [//]: # (## Documentation) --- .../post_training/torchtune/datasets/format_adapter.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py b/llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py index 884977803..6b607f1c7 100644 --- a/llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py +++ b/llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py @@ -10,16 +10,19 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. +import json from typing import Any, Mapping from llama_stack.providers.utils.common.data_schema_validator import ColumnName -def llama_stack_instruct_to_torchtune_instruct(sample: Mapping[str, Any]) -> Mapping[str, Any]: +def llama_stack_instruct_to_torchtune_instruct( + sample: Mapping[str, Any], +) -> Mapping[str, Any]: assert ColumnName.chat_completion_input.value in sample and ColumnName.expected_answer.value in sample, ( "Invalid input row" ) - input_messages = eval(str(sample[ColumnName.chat_completion_input.value])) + input_messages = json.loads(sample[ColumnName.chat_completion_input.value]) assert len(input_messages) == 1, "llama stack intruct dataset format only supports 1 user message" input_message = input_messages[0] @@ -37,7 +40,7 @@ def llama_stack_instruct_to_torchtune_instruct(sample: Mapping[str, Any]) -> Map def llama_stack_chat_to_torchtune_chat(sample: Mapping[str, Any]) -> Mapping[str, Any]: assert ColumnName.dialog.value in sample, "Invalid input row" role_map = {"user": "human", "assistant": "gpt"} - dialog = eval(str(sample[ColumnName.dialog.value])) + dialog = json.loads(sample[ColumnName.dialog.value]) assert len(dialog) > 1, "dialog must have at least 2 messagse" roles = [] From 5e244091898db03959fb8fe29edde10c3c6aa95d Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 28 Feb 2025 22:13:25 +0000 Subject: [PATCH 4/4] Bump version to 0.1.5.1 --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 031a7cbee..73b4e1b55 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "llama_stack" -version = "0.1.5rc3" +version = "0.1.5.1" authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] description = "Llama Stack" readme = "README.md"