forked from phoenix-oss/llama-stack-mirror
chore: fix mypy violations in post_training modules (#1548)
# What does this PR do? Fixes a bunch of violations. Note: this patch touches all files but post_training.py that will be significantly changed by #1437, hence leaving it out of the picture for now. [//]: # (If resolving an issue, uncomment and update the line below) [//]: # (Closes #[issue-number]) ## Test Plan Testing with https://github.com/meta-llama/llama-stack/pull/1543 Also checked that GPU training works with the change: ``` INFO: ::1:53316 - "POST /v1/post-training/supervised-fine-tune HTTP/1.1" 200 OK INFO: ::1:53316 - "GET /v1/post-training/job/status?job_uuid=test-jobb5ca2d84-d541-42f8-883b-762828b4c0e7 HTTP/1.1" 200 OK INFO: ::1:53316 - "GET /v1/post-training/job/artifacts?job_uuid=test-jobb5ca2d84-d541-42f8-883b-762828b4c0e7 HTTP/1.1" 200 OK 21:24:01.161 [END] /v1/post-training/supervised-fine-tune [StatusCode.OK] (32526.75ms) 21:23:28.769 [DEBUG] Setting manual seed to local seed 3918872849. Local seed is seed + rank = 3918872849 + 0 21:23:28.996 [INFO] Identified model_type = Llama3_2. Ignoring output.weight in checkpoint in favor of the tok_embedding.weight tied weights. 21:23:29.933 [INFO] Memory stats after model init: GPU peak memory allocation: 6.05 GiB GPU peak memory reserved: 6.10 GiB GPU peak memory active: 6.05 GiB 21:23:29.934 [INFO] Model is initialized with precision torch.bfloat16. 21:23:30.115 [INFO] Tokenizer is initialized. 21:23:30.118 [INFO] Optimizer is initialized. 21:23:30.119 [INFO] Loss is initialized. 21:23:30.896 [INFO] Dataset and Sampler are initialized. 21:23:30.898 [INFO] Learning rate scheduler is initialized. 21:23:31.618 [INFO] Memory stats after model init: GPU peak memory allocation: 6.24 GiB GPU peak memory reserved: 6.30 GiB GPU peak memory active: 6.24 GiB 21:23:31.620 [INFO] Starting checkpoint save... 21:23:59.428 [INFO] Model checkpoint of size 6.43 GB saved to /home/ec2-user/.llama/checkpoints/meta-llama/Llama-3.2-3B-Instruct-sft-0/consolidated.00.pth 21:23:59.445 [INFO] Adapter checkpoint of size 0.00 GB saved to /home/ec2-user/.llama/checkpoints/meta-llama/Llama-3.2-3B-Instruct-sft-0/adapter/adapter.pth ``` [//]: # (## Documentation) Signed-off-by: Ihar Hrachyshka <ihar.hrachyshka@gmail.com>
This commit is contained in:
parent
f86f3cf878
commit
0cbb7f7f21
9 changed files with 56 additions and 69 deletions
26
docs/_static/llama-stack-spec.html
vendored
26
docs/_static/llama-stack-spec.html
vendored
|
@ -9847,23 +9847,6 @@
|
||||||
],
|
],
|
||||||
"title": "ScoreBatchResponse"
|
"title": "ScoreBatchResponse"
|
||||||
},
|
},
|
||||||
"AlgorithmConfig": {
|
|
||||||
"oneOf": [
|
|
||||||
{
|
|
||||||
"$ref": "#/components/schemas/LoraFinetuningConfig"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"$ref": "#/components/schemas/QATFinetuningConfig"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"discriminator": {
|
|
||||||
"propertyName": "type",
|
|
||||||
"mapping": {
|
|
||||||
"LoRA": "#/components/schemas/LoraFinetuningConfig",
|
|
||||||
"QAT": "#/components/schemas/QATFinetuningConfig"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"LoraFinetuningConfig": {
|
"LoraFinetuningConfig": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -9999,7 +9982,14 @@
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
},
|
||||||
"algorithm_config": {
|
"algorithm_config": {
|
||||||
"$ref": "#/components/schemas/AlgorithmConfig"
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"$ref": "#/components/schemas/LoraFinetuningConfig"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"$ref": "#/components/schemas/QATFinetuningConfig"
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"additionalProperties": false,
|
"additionalProperties": false,
|
||||||
|
|
13
docs/_static/llama-stack-spec.yaml
vendored
13
docs/_static/llama-stack-spec.yaml
vendored
|
@ -6678,15 +6678,6 @@ components:
|
||||||
required:
|
required:
|
||||||
- results
|
- results
|
||||||
title: ScoreBatchResponse
|
title: ScoreBatchResponse
|
||||||
AlgorithmConfig:
|
|
||||||
oneOf:
|
|
||||||
- $ref: '#/components/schemas/LoraFinetuningConfig'
|
|
||||||
- $ref: '#/components/schemas/QATFinetuningConfig'
|
|
||||||
discriminator:
|
|
||||||
propertyName: type
|
|
||||||
mapping:
|
|
||||||
LoRA: '#/components/schemas/LoraFinetuningConfig'
|
|
||||||
QAT: '#/components/schemas/QATFinetuningConfig'
|
|
||||||
LoraFinetuningConfig:
|
LoraFinetuningConfig:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
@ -6770,7 +6761,9 @@ components:
|
||||||
checkpoint_dir:
|
checkpoint_dir:
|
||||||
type: string
|
type: string
|
||||||
algorithm_config:
|
algorithm_config:
|
||||||
$ref: '#/components/schemas/AlgorithmConfig'
|
oneOf:
|
||||||
|
- $ref: '#/components/schemas/LoraFinetuningConfig'
|
||||||
|
- $ref: '#/components/schemas/QATFinetuningConfig'
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
required:
|
required:
|
||||||
- job_uuid
|
- job_uuid
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Any, Dict, List, Literal, Optional, Protocol, Union
|
from typing import Any, Dict, List, Literal, Optional, Protocol
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
from typing_extensions import Annotated
|
from typing_extensions import Annotated
|
||||||
|
@ -89,7 +89,7 @@ class QATFinetuningConfig(BaseModel):
|
||||||
|
|
||||||
|
|
||||||
AlgorithmConfig = register_schema(
|
AlgorithmConfig = register_schema(
|
||||||
Annotated[Union[LoraFinetuningConfig, QATFinetuningConfig], Field(discriminator="type")],
|
Annotated[LoraFinetuningConfig | QATFinetuningConfig, Field(discriminator="type")],
|
||||||
name="AlgorithmConfig",
|
name="AlgorithmConfig",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -184,7 +184,7 @@ class PostTraining(Protocol):
|
||||||
description="Model descriptor from `llama model list`",
|
description="Model descriptor from `llama model list`",
|
||||||
),
|
),
|
||||||
checkpoint_dir: Optional[str] = None,
|
checkpoint_dir: Optional[str] = None,
|
||||||
algorithm_config: Optional[AlgorithmConfig] = None,
|
algorithm_config: Optional[LoraFinetuningConfig | QATFinetuningConfig] = None,
|
||||||
) -> PostTrainingJob: ...
|
) -> PostTrainingJob: ...
|
||||||
|
|
||||||
@webmethod(route="/post-training/preference-optimize", method="POST")
|
@webmethod(route="/post-training/preference-optimize", method="POST")
|
||||||
|
|
|
@ -9,6 +9,9 @@
|
||||||
#
|
#
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
from llama_stack.apis.common.type_system import (
|
from llama_stack.apis.common.type_system import (
|
||||||
ChatCompletionInputType,
|
ChatCompletionInputType,
|
||||||
DialogType,
|
DialogType,
|
||||||
|
@ -20,7 +23,7 @@ from llama_stack.providers.utils.common.data_schema_validator import (
|
||||||
validate_dataset_schema,
|
validate_dataset_schema,
|
||||||
)
|
)
|
||||||
|
|
||||||
EXPECTED_DATASET_SCHEMA = {
|
EXPECTED_DATASET_SCHEMA: dict[str, list[dict[str, Any]]] = {
|
||||||
"instruct": [
|
"instruct": [
|
||||||
{
|
{
|
||||||
ColumnName.chat_completion_input.value: ChatCompletionInputType(),
|
ColumnName.chat_completion_input.value: ChatCompletionInputType(),
|
||||||
|
@ -41,6 +44,9 @@ async def validate_input_dataset_schema(
|
||||||
dataset_type: str,
|
dataset_type: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
dataset_def = await datasets_api.get_dataset(dataset_id=dataset_id)
|
dataset_def = await datasets_api.get_dataset(dataset_id=dataset_id)
|
||||||
|
if not dataset_def:
|
||||||
|
raise ValueError(f"Dataset {dataset_id} does not exist.")
|
||||||
|
|
||||||
if not dataset_def.dataset_schema or len(dataset_def.dataset_schema) == 0:
|
if not dataset_def.dataset_schema or len(dataset_def.dataset_schema) == 0:
|
||||||
raise ValueError(f"Dataset {dataset_id} does not have a schema defined.")
|
raise ValueError(f"Dataset {dataset_id} does not have a schema defined.")
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ class TorchtuneCheckpointer:
|
||||||
checkpoint_files: List[str],
|
checkpoint_files: List[str],
|
||||||
output_dir: str,
|
output_dir: str,
|
||||||
model_type: str,
|
model_type: str,
|
||||||
) -> None:
|
):
|
||||||
# Fail fast if ``checkpoint_files`` is invalid
|
# Fail fast if ``checkpoint_files`` is invalid
|
||||||
# TODO: support loading more than one file
|
# TODO: support loading more than one file
|
||||||
if len(checkpoint_files) != 1:
|
if len(checkpoint_files) != 1:
|
||||||
|
@ -58,7 +58,7 @@ class TorchtuneCheckpointer:
|
||||||
"""
|
"""
|
||||||
Load Meta checkpoint from file. Currently only loading from a single file is supported.
|
Load Meta checkpoint from file. Currently only loading from a single file is supported.
|
||||||
"""
|
"""
|
||||||
state_dict: Dict[str:Any] = {}
|
state_dict: Dict[str, Any] = {}
|
||||||
model_state_dict = safe_torch_load(self._checkpoint_path)
|
model_state_dict = safe_torch_load(self._checkpoint_path)
|
||||||
if self._model_type == ModelType.LLAMA3_VISION:
|
if self._model_type == ModelType.LLAMA3_VISION:
|
||||||
from torchtune.models.llama3_2_vision._convert_weights import (
|
from torchtune.models.llama3_2_vision._convert_weights import (
|
||||||
|
@ -85,10 +85,10 @@ class TorchtuneCheckpointer:
|
||||||
state_dict: Dict[str, Any],
|
state_dict: Dict[str, Any],
|
||||||
epoch: int,
|
epoch: int,
|
||||||
adapter_only: bool = False,
|
adapter_only: bool = False,
|
||||||
checkpoint_format: str = "meta",
|
checkpoint_format: str | None = None,
|
||||||
) -> str:
|
) -> str:
|
||||||
model_file_path = Path(self._output_dir) / f"{self._model_id}-{self._training_algorithm}-{epoch}"
|
model_file_path = Path(self._output_dir) / f"{self._model_id}-{self._training_algorithm}-{epoch}"
|
||||||
if checkpoint_format == "meta":
|
if checkpoint_format == "meta" or checkpoint_format is None:
|
||||||
self._save_meta_format_checkpoint(model_file_path, state_dict, adapter_only)
|
self._save_meta_format_checkpoint(model_file_path, state_dict, adapter_only)
|
||||||
elif checkpoint_format == "huggingface":
|
elif checkpoint_format == "huggingface":
|
||||||
# Note: for saving hugging face format checkpoints, we only suppport saving adapter weights now
|
# Note: for saving hugging face format checkpoints, we only suppport saving adapter weights now
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
from typing import Any, Callable, Dict
|
from typing import Callable, Dict
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
@ -25,10 +25,13 @@ from llama_stack.apis.post_training import DatasetFormat
|
||||||
from llama_stack.models.llama.datatypes import Model
|
from llama_stack.models.llama.datatypes import Model
|
||||||
from llama_stack.models.llama.sku_list import resolve_model
|
from llama_stack.models.llama.sku_list import resolve_model
|
||||||
|
|
||||||
|
BuildLoraModelCallable = Callable[..., torch.nn.Module]
|
||||||
|
BuildTokenizerCallable = Callable[..., Llama3Tokenizer]
|
||||||
|
|
||||||
|
|
||||||
class ModelConfig(BaseModel):
|
class ModelConfig(BaseModel):
|
||||||
model_definition: Any
|
model_definition: BuildLoraModelCallable
|
||||||
tokenizer_type: Any
|
tokenizer_type: BuildTokenizerCallable
|
||||||
checkpoint_type: str
|
checkpoint_type: str
|
||||||
|
|
||||||
|
|
||||||
|
@ -51,10 +54,6 @@ DATA_FORMATS: Dict[str, Transform] = {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
BuildLoraModelCallable = Callable[..., torch.nn.Module]
|
|
||||||
BuildTokenizerCallable = Callable[..., Llama3Tokenizer]
|
|
||||||
|
|
||||||
|
|
||||||
def _validate_model_id(model_id: str) -> Model:
|
def _validate_model_id(model_id: str) -> Model:
|
||||||
model = resolve_model(model_id)
|
model = resolve_model(model_id)
|
||||||
if model is None or model.core_model_id.value not in MODEL_CONFIGS:
|
if model is None or model.core_model_id.value not in MODEL_CONFIGS:
|
||||||
|
|
|
@ -55,7 +55,7 @@ class SFTDataset(Dataset):
|
||||||
if "messages" in transformed_sample:
|
if "messages" in transformed_sample:
|
||||||
validate_messages(transformed_sample["messages"])
|
validate_messages(transformed_sample["messages"])
|
||||||
|
|
||||||
tokenized_dict = self._model_transform(transformed_sample)
|
tokenized_dict: dict[str, Any] = self._model_transform(transformed_sample)
|
||||||
|
|
||||||
if not ("tokens" in tokenized_dict and "mask" in tokenized_dict):
|
if not ("tokens" in tokenized_dict and "mask" in tokenized_dict):
|
||||||
keys_str = ", ".join(tokenized_dict.keys())
|
keys_str = ", ".join(tokenized_dict.keys())
|
||||||
|
|
|
@ -37,10 +37,10 @@ from llama_stack.apis.common.training_types import PostTrainingMetric
|
||||||
from llama_stack.apis.datasetio import DatasetIO
|
from llama_stack.apis.datasetio import DatasetIO
|
||||||
from llama_stack.apis.datasets import Datasets
|
from llama_stack.apis.datasets import Datasets
|
||||||
from llama_stack.apis.post_training import (
|
from llama_stack.apis.post_training import (
|
||||||
AlgorithmConfig,
|
|
||||||
Checkpoint,
|
Checkpoint,
|
||||||
LoraFinetuningConfig,
|
LoraFinetuningConfig,
|
||||||
OptimizerConfig,
|
OptimizerConfig,
|
||||||
|
QATFinetuningConfig,
|
||||||
TrainingConfig,
|
TrainingConfig,
|
||||||
)
|
)
|
||||||
from llama_stack.distribution.utils.config_dirs import DEFAULT_CHECKPOINT_DIR
|
from llama_stack.distribution.utils.config_dirs import DEFAULT_CHECKPOINT_DIR
|
||||||
|
@ -73,6 +73,9 @@ class LoraFinetuningSingleDevice:
|
||||||
|
|
||||||
# Currently logging only logs limited training metrics to local disk
|
# Currently logging only logs limited training metrics to local disk
|
||||||
# will figure out more loggings and how it works with telemetry in future PRs
|
# will figure out more loggings and how it works with telemetry in future PRs
|
||||||
|
|
||||||
|
_checkpointer: TorchtuneCheckpointer
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
config: TorchtunePostTrainingConfig,
|
config: TorchtunePostTrainingConfig,
|
||||||
|
@ -82,7 +85,7 @@ class LoraFinetuningSingleDevice:
|
||||||
logger_config: Dict[str, Any],
|
logger_config: Dict[str, Any],
|
||||||
model: str,
|
model: str,
|
||||||
checkpoint_dir: Optional[str],
|
checkpoint_dir: Optional[str],
|
||||||
algorithm_config: Optional[AlgorithmConfig],
|
algorithm_config: LoraFinetuningConfig | QATFinetuningConfig | None,
|
||||||
datasetio_api: DatasetIO,
|
datasetio_api: DatasetIO,
|
||||||
datasets_api: Datasets,
|
datasets_api: Datasets,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
@ -109,12 +112,12 @@ class LoraFinetuningSingleDevice:
|
||||||
return str(checkpoint_dir)
|
return str(checkpoint_dir)
|
||||||
|
|
||||||
if checkpoint_dir and checkpoint_dir != "null":
|
if checkpoint_dir and checkpoint_dir != "null":
|
||||||
self.checkpoint_dir = config.checkpoint_dir
|
self.checkpoint_dir = checkpoint_dir
|
||||||
else:
|
else:
|
||||||
model = resolve_model(self.model_id)
|
model_obj = resolve_model(self.model_id)
|
||||||
if model is None:
|
if model_obj is None:
|
||||||
raise ValueError(f"{self.model_id} not found. Your model id should be in the llama models SKU list")
|
raise ValueError(f"{self.model_id} not found. Your model id should be in the llama models SKU list")
|
||||||
self.checkpoint_dir = model_checkpoint_dir(model)
|
self.checkpoint_dir = model_checkpoint_dir(model_obj)
|
||||||
|
|
||||||
self._output_dir = str(DEFAULT_CHECKPOINT_DIR)
|
self._output_dir = str(DEFAULT_CHECKPOINT_DIR)
|
||||||
self._checkpoint_format = config.checkpoint_format
|
self._checkpoint_format = config.checkpoint_format
|
||||||
|
@ -135,16 +138,16 @@ class LoraFinetuningSingleDevice:
|
||||||
self.max_validation_steps = training_config.max_validation_steps
|
self.max_validation_steps = training_config.max_validation_steps
|
||||||
|
|
||||||
self._clip_grad_norm = 1.0
|
self._clip_grad_norm = 1.0
|
||||||
self._enable_activation_checkpointing = (
|
|
||||||
(training_config.efficiency_config.enable_activation_checkpointing)
|
self._enable_activation_checkpointing = False
|
||||||
if training_config.efficiency_config
|
self._enable_activation_offloading = False
|
||||||
else False
|
if training_config.efficiency_config:
|
||||||
)
|
if training_config.efficiency_config.enable_activation_checkpointing:
|
||||||
self._enable_activation_offloading = (
|
self._enable_activation_checkpointing = (
|
||||||
(training_config.efficiency_config.enable_activation_offloading)
|
training_config.efficiency_config.enable_activation_checkpointing
|
||||||
if training_config.efficiency_config
|
)
|
||||||
else False
|
if training_config.efficiency_config.enable_activation_offloading:
|
||||||
)
|
self._enable_activation_offloading = training_config.efficiency_config.enable_activation_offloading
|
||||||
|
|
||||||
self.datasetio_api = datasetio_api
|
self.datasetio_api = datasetio_api
|
||||||
self.datasets_api = datasets_api
|
self.datasets_api = datasets_api
|
||||||
|
@ -451,12 +454,12 @@ class LoraFinetuningSingleDevice:
|
||||||
"""
|
"""
|
||||||
# Initialize tokens count and running loss (for grad accumulation)
|
# Initialize tokens count and running loss (for grad accumulation)
|
||||||
t0 = time.perf_counter()
|
t0 = time.perf_counter()
|
||||||
running_loss = 0
|
running_loss: float = 0.0
|
||||||
num_tokens = 0
|
num_tokens = 0
|
||||||
|
|
||||||
# training artifacts
|
# training artifacts
|
||||||
checkpoints = []
|
checkpoints = []
|
||||||
memory_stats = {}
|
memory_stats: Dict[str, Any] = {}
|
||||||
|
|
||||||
# self.epochs_run should be non-zero when we're resuming from a checkpoint
|
# self.epochs_run should be non-zero when we're resuming from a checkpoint
|
||||||
for curr_epoch in range(self.epochs_run, self.total_epochs):
|
for curr_epoch in range(self.epochs_run, self.total_epochs):
|
||||||
|
@ -484,7 +487,7 @@ class LoraFinetuningSingleDevice:
|
||||||
# Loss is normalized by default so we multiply by the number of tokens
|
# Loss is normalized by default so we multiply by the number of tokens
|
||||||
# This way we can normalize by the total number of tokens if we're accumulating gradients
|
# This way we can normalize by the total number of tokens if we're accumulating gradients
|
||||||
current_loss = await self._loss_step(batch) * current_num_tokens
|
current_loss = await self._loss_step(batch) * current_num_tokens
|
||||||
running_loss += current_loss
|
running_loss += current_loss.detach().item()
|
||||||
current_loss.backward()
|
current_loss.backward()
|
||||||
|
|
||||||
# Step with optimizer
|
# Step with optimizer
|
||||||
|
@ -500,7 +503,7 @@ class LoraFinetuningSingleDevice:
|
||||||
# Update the number of steps when the weights are updated
|
# Update the number of steps when the weights are updated
|
||||||
self.global_step += 1
|
self.global_step += 1
|
||||||
|
|
||||||
loss_to_log = running_loss.item() / num_tokens
|
loss_to_log = running_loss / num_tokens
|
||||||
|
|
||||||
pbar.update(1)
|
pbar.update(1)
|
||||||
pbar.set_description(f"{curr_epoch + 1}|{self.global_step}|Loss: {loss_to_log}")
|
pbar.set_description(f"{curr_epoch + 1}|{self.global_step}|Loss: {loss_to_log}")
|
||||||
|
@ -523,7 +526,7 @@ class LoraFinetuningSingleDevice:
|
||||||
)
|
)
|
||||||
|
|
||||||
# Reset running stats for the next step
|
# Reset running stats for the next step
|
||||||
running_loss = 0
|
running_loss = 0.0
|
||||||
num_tokens = 0
|
num_tokens = 0
|
||||||
t0 = time.perf_counter()
|
t0 = time.perf_counter()
|
||||||
|
|
||||||
|
|
|
@ -228,10 +228,6 @@ exclude = [
|
||||||
"^llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers\\.py$",
|
"^llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers\\.py$",
|
||||||
"^llama_stack/providers/inline/inference/vllm/",
|
"^llama_stack/providers/inline/inference/vllm/",
|
||||||
"^llama_stack/providers/inline/post_training/common/validator\\.py$",
|
"^llama_stack/providers/inline/post_training/common/validator\\.py$",
|
||||||
"^llama_stack/providers/inline/post_training/torchtune/common/checkpointer\\.py$",
|
|
||||||
"^llama_stack/providers/inline/post_training/torchtune/common/utils\\.py$",
|
|
||||||
"^llama_stack/providers/inline/post_training/torchtune/datasets/sft\\.py$",
|
|
||||||
"^llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device\\.py$",
|
|
||||||
"^llama_stack/providers/inline/post_training/torchtune/post_training\\.py$",
|
"^llama_stack/providers/inline/post_training/torchtune/post_training\\.py$",
|
||||||
"^llama_stack/providers/inline/safety/code_scanner/",
|
"^llama_stack/providers/inline/safety/code_scanner/",
|
||||||
"^llama_stack/providers/inline/safety/llama_guard/",
|
"^llama_stack/providers/inline/safety/llama_guard/",
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue