mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
docs: fix typos in some files (#4101)
This PR fixes typos in the file file using codespell.
This commit is contained in:
parent
b68a25d377
commit
68c976a2d8
3 changed files with 3 additions and 3 deletions
|
|
@ -146,7 +146,7 @@ class MetaReferenceInferenceImpl(
|
||||||
def check_model(self, request) -> None:
|
def check_model(self, request) -> None:
|
||||||
if self.model_id is None or self.llama_model is None:
|
if self.model_id is None or self.llama_model is None:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
"No avaible model yet, please register your requested model or add your model in the resouces first"
|
"No available model yet, please register your requested model or add your model in the resources first"
|
||||||
)
|
)
|
||||||
elif request.model != self.model_id:
|
elif request.model != self.model_id:
|
||||||
raise RuntimeError(f"Model mismatch: request model: {request.model} != loaded model: {self.model_id}")
|
raise RuntimeError(f"Model mismatch: request model: {request.model} != loaded model: {self.model_id}")
|
||||||
|
|
|
||||||
|
|
@ -91,7 +91,7 @@ class TorchtuneCheckpointer:
|
||||||
if checkpoint_format == "meta" or checkpoint_format is None:
|
if checkpoint_format == "meta" or checkpoint_format is None:
|
||||||
self._save_meta_format_checkpoint(model_file_path, state_dict, adapter_only)
|
self._save_meta_format_checkpoint(model_file_path, state_dict, adapter_only)
|
||||||
elif checkpoint_format == "huggingface":
|
elif checkpoint_format == "huggingface":
|
||||||
# Note: for saving hugging face format checkpoints, we only suppport saving adapter weights now
|
# Note: for saving hugging face format checkpoints, we only support saving adapter weights now
|
||||||
self._save_hf_format_checkpoint(model_file_path, state_dict)
|
self._save_hf_format_checkpoint(model_file_path, state_dict)
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Unsupported checkpoint format: {format}")
|
raise ValueError(f"Unsupported checkpoint format: {format}")
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,7 @@ def llama_stack_instruct_to_torchtune_instruct(
|
||||||
)
|
)
|
||||||
input_messages = json.loads(sample[ColumnName.chat_completion_input.value])
|
input_messages = json.loads(sample[ColumnName.chat_completion_input.value])
|
||||||
|
|
||||||
assert len(input_messages) == 1, "llama stack intruct dataset format only supports 1 user message"
|
assert len(input_messages) == 1, "llama stack instruct dataset format only supports 1 user message"
|
||||||
input_message = input_messages[0]
|
input_message = input_messages[0]
|
||||||
|
|
||||||
assert "content" in input_message, "content not found in input message"
|
assert "content" in input_message, "content not found in input message"
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue