From 68c976a2d8636d95bc2869df3710425d42a367e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=AA=20Nam=20Kh=C3=A1nh?= <55955273+khanhkhanhlele@users.noreply.github.com> Date: Fri, 7 Nov 2025 22:07:46 +0700 Subject: [PATCH] docs: fix typos in some files (#4101) This PR fixes typos in the file file using codespell. --- .../providers/inline/inference/meta_reference/inference.py | 2 +- .../inline/post_training/torchtune/common/checkpointer.py | 2 +- .../inline/post_training/torchtune/datasets/format_adapter.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/llama_stack/providers/inline/inference/meta_reference/inference.py b/src/llama_stack/providers/inline/inference/meta_reference/inference.py index 286335a7d..76d3fdd50 100644 --- a/src/llama_stack/providers/inline/inference/meta_reference/inference.py +++ b/src/llama_stack/providers/inline/inference/meta_reference/inference.py @@ -146,7 +146,7 @@ class MetaReferenceInferenceImpl( def check_model(self, request) -> None: if self.model_id is None or self.llama_model is None: raise RuntimeError( - "No avaible model yet, please register your requested model or add your model in the resouces first" + "No available model yet, please register your requested model or add your model in the resources first" ) elif request.model != self.model_id: raise RuntimeError(f"Model mismatch: request model: {request.model} != loaded model: {self.model_id}") diff --git a/src/llama_stack/providers/inline/post_training/torchtune/common/checkpointer.py b/src/llama_stack/providers/inline/post_training/torchtune/common/checkpointer.py index af8bd2765..43e206490 100644 --- a/src/llama_stack/providers/inline/post_training/torchtune/common/checkpointer.py +++ b/src/llama_stack/providers/inline/post_training/torchtune/common/checkpointer.py @@ -91,7 +91,7 @@ class TorchtuneCheckpointer: if checkpoint_format == "meta" or checkpoint_format is None: self._save_meta_format_checkpoint(model_file_path, state_dict, adapter_only) elif checkpoint_format == "huggingface": - # Note: for saving hugging face format checkpoints, we only suppport saving adapter weights now + # Note: for saving hugging face format checkpoints, we only support saving adapter weights now self._save_hf_format_checkpoint(model_file_path, state_dict) else: raise ValueError(f"Unsupported checkpoint format: {format}") diff --git a/src/llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py b/src/llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py index 96dd8b8dd..47452efa4 100644 --- a/src/llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py +++ b/src/llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py @@ -25,7 +25,7 @@ def llama_stack_instruct_to_torchtune_instruct( ) input_messages = json.loads(sample[ColumnName.chat_completion_input.value]) - assert len(input_messages) == 1, "llama stack intruct dataset format only supports 1 user message" + assert len(input_messages) == 1, "llama stack instruct dataset format only supports 1 user message" input_message = input_messages[0] assert "content" in input_message, "content not found in input message"