1613 lines
80 KiB
Python
1613 lines
80 KiB
Python
"""
|
|
2026.2.1
|
|
2026.2.1
|
|
4.57.6
|
|
0.24.0
|
|
__UNSLOTH_VERSIONING__
|
|
"""
|
|
|
|
# Unsloth auto generated code
|
|
# Copyright 2023-present Daniel Han-Chen, Michael Han-Chen & the Unsloth team. All rights reserved.
|
|
#
|
|
# This program is free software: you can redistribute it and/or modify
|
|
# it under the terms of the GNU Lesser General Public License as published by
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
# (at your option) any later version.
|
|
#
|
|
# This program is distributed in the hope that it will be useful,
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
# GNU General Public License for more details.
|
|
#
|
|
# You should have received a copy of the GNU Lesser General Public License
|
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
|
|
from torch import Tensor
|
|
import torch
|
|
import torch.nn as nn
|
|
from torch.nn import functional as F
|
|
from unsloth_zoo.temporary_patches.common import torch_compile
|
|
from typing import Any, List, Optional, Tuple, Union, Dict, Set, Callable
|
|
from trl.trainer.sft_trainer import (Any, AutoProcessor, BaseTrainer, Callable, DataCollator, DataCollatorForLanguageModeling, DataCollatorForVisionLanguageModeling, Dataset, EvalPrediction, FLASH_ATTENTION_VARIANTS, IterableDataset, Optional, Path, PeftConfig, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, SFTConfig, SFTTrainer, TrainerCallback, TrainingArguments, Union, clone_chat_template, contextlib, create_model_from_path, dataclass, defaultdict, dft_loss, get_act_offloading_ctx_manager, is_conversational, logger, logging, nn, os, pack_dataset, pad, selective_log_softmax, torch, Any, AutoProcessor, Callable, DataCollator, DataCollatorForLanguageModeling, DataCollatorForVisionLanguageModeling, Dataset, EvalPrediction, FLASH_ATTENTION_VARIANTS, IterableDataset, Optional, PeftConfig, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, SFTConfig, SFTTrainer, TrainerCallback, TrainingArguments, Union, clone_chat_template, contextlib, create_model_from_path, defaultdict, dft_loss, get_act_offloading_ctx_manager, is_conversational, logger, os, pad, torch, Callable, DataCollator, DataCollatorForLanguageModeling, Dataset, IterableDataset, Optional, Union, os, pack_dataset, pad, Optional, PreTrainedModel, logger, os, torch, os)
|
|
|
|
|
|
import os
|
|
from typing import *
|
|
from dataclasses import dataclass, field
|
|
from packaging.version import Version
|
|
import torch
|
|
import numpy as np
|
|
from contextlib import nullcontext
|
|
from torch.nn import functional as F
|
|
import inspect
|
|
from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling as TransformersDataCollatorForLanguageModeling
|
|
from transformers.training_args import ParallelMode
|
|
from unsloth_zoo.device_type import DEVICE_TYPE, device_synchronize
|
|
|
|
# Wrap trainer with padding to right and enable training mode
|
|
# Also patches W&B since multiple runs must use wandb.finish()
|
|
import functools
|
|
from types import MethodType
|
|
try:
|
|
from unsloth_zoo.gradient_checkpointing import reset_unsloth_gradient_checkpointing_buffers
|
|
except:
|
|
def reset_unsloth_gradient_checkpointing_buffers(): pass
|
|
def prepare_for_training_mode(f):
|
|
@functools.wraps(f)
|
|
def wrapper(self, *args, **kwargs):
|
|
# Enable training mode
|
|
_was_training = None
|
|
# Get gradient checkpointing setting from training arguments
|
|
use_gc = getattr(self.args, 'gradient_checkpointing', True)
|
|
if hasattr(self, 'model') and hasattr(self.model, "training"):
|
|
_was_training = self.model.training
|
|
if hasattr(self, 'model') and hasattr(self.model, "for_training"):
|
|
self.model.for_training(use_gradient_checkpointing=use_gc)
|
|
output = f(self, *args, **kwargs)
|
|
# Restore previous mode when possible
|
|
if hasattr(self, 'model') and hasattr(self.model, "for_inference"):
|
|
if _was_training is False:
|
|
self.model.for_inference()
|
|
elif _was_training is True and hasattr(self.model, "for_training"):
|
|
self.model.for_training(use_gradient_checkpointing=use_gc)
|
|
# Reset gradient checkpointing buffers to free memory while staying ready for next run
|
|
try:
|
|
reset_unsloth_gradient_checkpointing_buffers()
|
|
except:
|
|
pass
|
|
# Patch W&B to enable logging on future runs, otherwise it'll overwrite the first run
|
|
try:
|
|
import wandb
|
|
wandb.finish()
|
|
except:
|
|
pass
|
|
return output
|
|
return wrapper
|
|
pass
|
|
|
|
torch_compile_options = {
|
|
"epilogue_fusion" : True,
|
|
"max_autotune" : False,
|
|
"shape_padding" : True,
|
|
"trace.enabled" : False,
|
|
"triton.cudagraphs" : False,
|
|
}
|
|
|
|
@torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,)
|
|
def chunked_hidden_states_selective_log_softmax(
|
|
hidden_states: torch.Tensor,
|
|
lm_head: torch.Tensor,
|
|
index: torch.Tensor,
|
|
chunks: int = 4,
|
|
logit_scale_multiply: float = 0.0,
|
|
logit_scale_divide: float = 0.0,
|
|
logit_softcapping: float = 0.0,
|
|
temperature: float = 1.0,
|
|
) -> torch.Tensor:
|
|
# All Unsloth Zoo code licensed under AGPL3
|
|
flat_hidden_states = hidden_states.reshape(-1, hidden_states.shape[-1])
|
|
flat_index = index.reshape(-1)
|
|
|
|
chunked_hidden_states = torch.chunk(flat_hidden_states, chunks=chunks, dim=0)
|
|
chunked_index = torch.chunk(flat_index, chunks=chunks, dim=0)
|
|
|
|
all_per_token_logps = []
|
|
|
|
for chunk_hidden_states, chunk_index in zip(chunked_hidden_states, chunked_index):
|
|
chunk_logits = chunk_hidden_states.to(lm_head.dtype) @ lm_head.t()
|
|
|
|
if logit_scale_multiply != 0.0:
|
|
chunk_logits = chunk_logits * logit_scale_multiply
|
|
if logit_scale_divide != 0.0:
|
|
chunk_logits = chunk_logits / logit_scale_divide
|
|
if logit_softcapping != 0.0:
|
|
chunk_logits = chunk_logits * torch.tanh(chunk_logits / logit_softcapping)
|
|
|
|
chunk_logits = chunk_logits.to(torch.float32)
|
|
|
|
if temperature != 1.0:
|
|
chunk_logits = chunk_logits / temperature
|
|
|
|
selected_logits = torch.gather(chunk_logits, dim=-1, index=chunk_index.unsqueeze(-1)).squeeze(-1)
|
|
logsumexp_values = torch.logsumexp(chunk_logits, dim=-1)
|
|
per_token_logps = selected_logits - logsumexp_values
|
|
all_per_token_logps.append(per_token_logps)
|
|
|
|
all_per_token_logps = torch.concat(all_per_token_logps)
|
|
|
|
all_per_token_logps = all_per_token_logps.reshape((hidden_states.shape[0], hidden_states.shape[1]))
|
|
return all_per_token_logps
|
|
|
|
@torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,)
|
|
def chunked_selective_log_softmax(logits, index):
|
|
# Split into 4 chunks only
|
|
chunked_logits = torch.chunk(logits.reshape(-1, logits.shape[-1]), chunks = 4, dim = 0)
|
|
chunked_index = torch.chunk(index.reshape(-1), chunks = 4, dim = 0)
|
|
all_per_token_logps = []
|
|
# Below loop does the same as selective_log_softmax(chunk_logits, chunk_index)
|
|
for chunk_logits, chunk_index in zip(chunked_logits, chunked_index):
|
|
chunk_logits = chunk_logits.to(torch.float32)
|
|
selected_logits = torch.gather(chunk_logits, dim = -1, index = chunk_index.unsqueeze(-1)).squeeze(-1)
|
|
logsumexp_values = torch.logsumexp(chunk_logits, dim = -1)
|
|
per_token_logps = selected_logits - logsumexp_values
|
|
all_per_token_logps.append(per_token_logps)
|
|
pass
|
|
all_per_token_logps = torch.concat(all_per_token_logps)
|
|
all_per_token_logps = all_per_token_logps.reshape((logits.shape[0], logits.shape[1]))
|
|
return all_per_token_logps
|
|
|
|
def calculate_pad_tokens_in_prompt(
|
|
input_ids: torch.Tensor,
|
|
logits_to_keep: int,
|
|
pad_token_id: int
|
|
) -> torch.Tensor:
|
|
"""
|
|
Given prompt tensor, it returns all the left padded tokens in that sequence. so [pad, pad, pad, cat] = 3 tokens
|
|
"""
|
|
if logits_to_keep >= input_ids.shape[1]:
|
|
raise ValueError("logits_to_keep must be smaller than the sequence length.")
|
|
|
|
prompt_section = input_ids[:, :-logits_to_keep]
|
|
|
|
padding_mask = (prompt_section == pad_token_id)
|
|
|
|
pad_token_counts = padding_mask.sum(dim=1)
|
|
|
|
return pad_token_counts
|
|
|
|
def create_completion_attention_mask(
|
|
completion_input_ids: torch.Tensor,
|
|
left_pad_tokens_per_prompt: torch.Tensor,
|
|
max_left_pad: int,
|
|
pad_token_id: int
|
|
) -> torch.Tensor:
|
|
"""
|
|
Given that we have a sequence, [p,p,p,c,c,c,pad,pad,pad]
|
|
|
|
Where p are extra prompt tokens we got from slicing the torch tensor, c is completion tokens
|
|
and pad are pad tokens, this function would make a completion mask that would 0 out the pad
|
|
and p tokens. so in this example [0,0,0,1,1,1,0,0,0]
|
|
"""
|
|
batch_size, completion_len = completion_input_ids.shape
|
|
device = completion_input_ids.device
|
|
|
|
num_tokens_to_mask = max_left_pad - left_pad_tokens_per_prompt
|
|
|
|
indices = torch.arange(completion_len, device=device).unsqueeze(0)
|
|
shift_mask = indices >= num_tokens_to_mask.unsqueeze(1)
|
|
|
|
non_padding_mask = (completion_input_ids != pad_token_id)
|
|
|
|
final_mask = shift_mask & non_padding_mask
|
|
|
|
return final_mask
|
|
|
|
def left_pack_padding(tensor: torch.Tensor, pad_id: int) -> torch.Tensor:
|
|
"""
|
|
Moves all padding tokens in each sequence of a batch to the right.
|
|
"""
|
|
mask = (tensor != pad_id)
|
|
# Must do stable=True since binary mark is unordered
|
|
sorted_indices = torch.argsort(mask, dim=1, descending=True, stable=True)
|
|
packed_tensor = torch.gather(tensor, 1, sorted_indices)
|
|
return packed_tensor
|
|
|
|
def align_logprobs_with_mask(
|
|
logprob_tensor: torch.Tensor,
|
|
attention_mask: torch.Tensor,
|
|
pad_value: float = 0.0
|
|
) -> torch.Tensor:
|
|
"""
|
|
Aligns a log probability tensor with a given attention mask.
|
|
"""
|
|
|
|
device = logprob_tensor.device
|
|
batch_size, logprob_seq_len = logprob_tensor.shape
|
|
mask_seq_len = attention_mask.shape[1]
|
|
|
|
padded_logprobs = torch.full(
|
|
attention_mask.shape,
|
|
fill_value=pad_value,
|
|
dtype=logprob_tensor.dtype,
|
|
device=device
|
|
)
|
|
|
|
left_pad_counts = torch.argmax(attention_mask, dim=1)
|
|
|
|
cols = torch.arange(logprob_seq_len, device=device)
|
|
dest_indices = left_pad_counts.unsqueeze(1) + cols
|
|
|
|
# Create destination row indices
|
|
# Shape: [batch_size, logprob_seq_len]
|
|
row_indices = torch.arange(batch_size, device=device).unsqueeze(1).expand_as(dest_indices)
|
|
|
|
# --- 4. Filter out-of-bounds indices and perform assignment ---
|
|
# Create a mask to identify only the indices that are within the bounds
|
|
# of the target tensor's sequence length.
|
|
valid_mask = dest_indices < mask_seq_len
|
|
|
|
# Use this mask to select only the valid row indices, column indices,
|
|
# and the corresponding values from the logprob tensor.
|
|
# This flattens the selected elements into 1D tensors.
|
|
valid_rows = row_indices[valid_mask]
|
|
valid_cols = dest_indices[valid_mask]
|
|
valid_vals = logprob_tensor[valid_mask]
|
|
|
|
# Place the valid values into their correct positions in the padded tensor
|
|
# using a single, efficient advanced indexing operation.
|
|
padded_logprobs[valid_rows, valid_cols] = valid_vals
|
|
|
|
return padded_logprobs
|
|
|
|
def autotune_batch_and_chunks(
|
|
total_input_rows,
|
|
seq_len,
|
|
hidden_size,
|
|
vocab_size,
|
|
dtype_bytes=16,
|
|
multiplier=None
|
|
):
|
|
if multiplier is None:
|
|
final_m = max(4, seq_len // 4096)
|
|
else:
|
|
final_m = multiplier
|
|
|
|
if torch.cuda.is_available():
|
|
free_bytes, _ = torch.cuda.mem_get_info()
|
|
limit_gb = (free_bytes / (1024**3))*.80
|
|
elif hasattr(torch, "xpu") and torch.xpu.is_available():
|
|
# For XPU: estimate free memory from total - reserved
|
|
total_mem = torch.xpu.get_device_properties(0).total_memory
|
|
reserved_mem = torch.xpu.memory_reserved()
|
|
free_bytes = total_mem - reserved_mem
|
|
limit_gb = (free_bytes / (1024**3)) * 0.80
|
|
else:
|
|
# Fallback: assume 8GB available
|
|
limit_gb = 8.0
|
|
|
|
bytes_to_gb = 1024**3
|
|
|
|
b_vals = torch.arange(total_input_rows, 0, -1, device='cpu', dtype=torch.float32)
|
|
|
|
hidden_gb = (b_vals * seq_len * hidden_size * dtype_bytes) / bytes_to_gb
|
|
|
|
base_logits = ((b_vals/total_input_rows) * b_vals * seq_len * vocab_size * dtype_bytes) / bytes_to_gb
|
|
logits_gb = base_logits / final_m
|
|
|
|
total_mem_gb = hidden_gb + logits_gb
|
|
|
|
valid_mask = total_mem_gb <= limit_gb
|
|
valid_indices = torch.nonzero(valid_mask, as_tuple=False)
|
|
|
|
if valid_indices.shape[0] == 0:
|
|
#This means your GPU will OOM
|
|
return 4, final_m
|
|
|
|
best_idx = valid_indices[0].item()
|
|
final_b = int(b_vals[best_idx].item())
|
|
|
|
return final_b, final_m
|
|
@dataclass
|
|
class UnslothSFTConfig(SFTConfig):
|
|
"""
|
|
|
|
Configuration class for the [`SFTTrainer`].
|
|
|
|
This class includes only the parameters that are specific to SFT training. For a full list of training arguments,
|
|
please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this class may
|
|
differ from those in [`~transformers.TrainingArguments`].
|
|
|
|
Using [`~transformers.HfArgumentParser`] we can turn this class into
|
|
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
|
|
command line.
|
|
|
|
Parameters:
|
|
> Parameters that control the model
|
|
|
|
model_init_kwargs (`dict[str, Any]`, *optional*):
|
|
Keyword arguments for [`~transformers.AutoModelForCausalLM.from_pretrained`], used when the `model`
|
|
argument of the [`SFTTrainer`] is provided as a string. If you're training a MoE architecture and want to
|
|
include the load balancing/auxilliary loss as a part of the final loss, remember to set
|
|
`output_router_logits=True` in this dictionary.
|
|
chat_template_path (`str`, *optional*):
|
|
If specified, sets the model's chat template. This can either be the path to a tokenizer (local directory
|
|
or Hugging Face Hub model) or a direct path to a Jinja template file. When using a Jinja file, you must
|
|
ensure that any special tokens referenced in the template are added to the tokenizer and that the model's
|
|
embedding layer is resized accordingly.
|
|
|
|
> Parameters that control the data preprocessing
|
|
|
|
dataset_text_field (`str`, *optional*, defaults to `"text"`):
|
|
Name of the column that contains text data in the dataset.
|
|
dataset_kwargs (`dict[str, Any]`, *optional*):
|
|
Dictionary of optional keyword arguments for the dataset preparation. The only supported key is
|
|
`skip_prepare_dataset`. When the model is a VLM, `skip_prepare_dataset` is automatically treated as `True`
|
|
regardless of the provided value, since preprocessing is done on the fly.
|
|
dataset_num_proc (`int`, *optional*):
|
|
Number of processes to use for processing the dataset.
|
|
eos_token (`str`, *optional*):
|
|
Token used to indicate the end of a turn or sequence. If `None`, it defaults to
|
|
`processing_class.eos_token`.
|
|
pad_token (`str`, *optional*):
|
|
Token used for padding. If `None`, it defaults to `processing_class.pad_token`, or if that is also `None`,
|
|
it falls back to `processing_class.eos_token`.
|
|
max_length (`int` or `None`, *optional*, defaults to `1024`):
|
|
Maximum length of the tokenized sequence. Sequences longer than `max_length` are truncated from the right.
|
|
If `None`, no truncation is applied. When packing is enabled, this value sets the sequence length.
|
|
packing (`bool`, *optional*, defaults to `False`):
|
|
Whether to group multiple sequences into fixed-length blocks to improve computational efficiency and reduce
|
|
padding. Uses `max_length` to define sequence length.
|
|
packing_strategy (`str`, *optional*, defaults to `"bfd"`):
|
|
Strategy for packing sequences. Can be either `"bfd"` (best-fit decreasing, default), or `"wrapped"`.
|
|
padding_free (`bool`, *optional*, defaults to `False`):
|
|
Whether to perform forward passes without padding by flattening all sequences in the batch into a single
|
|
continuous sequence. This reduces memory usage by eliminating padding overhead. Currently, this is only
|
|
supported with the FlashAttention 2 or 3, which can efficiently handle the flattened batch structure. When
|
|
packing is enabled with strategy `"bfd"`, padding-free is enabled, regardless of the value of this
|
|
parameter.
|
|
pad_to_multiple_of (`int`, *optional*):
|
|
If set, the sequences will be padded to a multiple of this value.
|
|
eval_packing (`bool`, *optional*):
|
|
Whether to pack the eval dataset. If `None`, uses the same value as `packing`.
|
|
|
|
> Parameters that control the training
|
|
|
|
completion_only_loss (`bool`, *optional*):
|
|
Whether to compute loss only on the completion part of the sequence. If set to `True`, loss is computed
|
|
only on the completion, which is supported only for [prompt-completion](#prompt-completion) datasets. If
|
|
`False`, loss is computed on the entire sequence. If `None` (default), the behavior depends on the dataset:
|
|
loss is computed on the completion for [prompt-completion](#prompt-completion) datasets, and on the full
|
|
sequence for [language modeling](#language-modeling) datasets.
|
|
assistant_only_loss (`bool`, *optional*, defaults to `False`):
|
|
Whether to compute loss only on the assistant part of the sequence. If set to `True`, loss is computed only
|
|
on the assistant responses, which is supported only for [conversational](#conversational) datasets. If
|
|
`False`, loss is computed on the entire sequence.
|
|
loss_type (`str`, *optional*, defaults to `"nll"`):
|
|
Type of loss to use. Possible values are `"nll"` (negative log-likelihood, default) and `"dft"` (Dynamic
|
|
Fine-Tuning, as described in [this paper](https://huggingface.co/papers/2508.05629)).
|
|
activation_offloading (`bool`, *optional*, defaults to `False`):
|
|
Whether to offload the activations to the CPU.
|
|
|
|
"""
|
|
vllm_sampling_params: Optional[Any] = field(
|
|
default = None,
|
|
metadata = {'help': 'vLLM SamplingParams'},
|
|
)
|
|
unsloth_num_chunks : Optional[int] = field(
|
|
default = -1,
|
|
metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'},
|
|
)
|
|
unsloth_logit_chunk_multiplier : Optional[int] = field(
|
|
default = None,
|
|
metadata = {'help': 'Multiplier for chunked logit computations.'},
|
|
)
|
|
unsloth_grpo_mini_batch : Optional[int] = field(
|
|
default = None,
|
|
metadata = {'help': 'Mini batch size for GRPO hidden state accumulation. Default is None unless user defines it.'},
|
|
)
|
|
max_seq_length : Optional[int] = field(
|
|
default = None,
|
|
metadata = {'help': 'Maximum sequence length to truncate to.'},
|
|
)
|
|
def __init__(
|
|
self,
|
|
output_dir = None,
|
|
overwrite_output_dir = None,
|
|
do_train = False,
|
|
do_eval = False,
|
|
do_predict = False,
|
|
eval_strategy = 'no',
|
|
prediction_loss_only = False,
|
|
per_device_train_batch_size = 4,
|
|
per_device_eval_batch_size = 4,
|
|
per_gpu_train_batch_size = None,
|
|
per_gpu_eval_batch_size = None,
|
|
gradient_accumulation_steps = 2,
|
|
eval_accumulation_steps = 2,
|
|
eval_delay = 0,
|
|
torch_empty_cache_steps = 250,
|
|
learning_rate = 5e-05,
|
|
weight_decay = 0.01,
|
|
adam_beta1 = 0.9,
|
|
adam_beta2 = 0.999,
|
|
adam_epsilon = 1e-08,
|
|
max_grad_norm = 1.0,
|
|
num_train_epochs = 3.0,
|
|
max_steps = -1,
|
|
lr_scheduler_type = 'linear',
|
|
lr_scheduler_kwargs = None,
|
|
warmup_ratio = 0.1,
|
|
warmup_steps = 0,
|
|
log_level = 'passive',
|
|
log_level_replica = 'warning',
|
|
log_on_each_node = True,
|
|
logging_dir = None,
|
|
logging_strategy = 'steps',
|
|
logging_first_step = False,
|
|
logging_steps = 1,
|
|
logging_nan_inf_filter = False,
|
|
save_strategy = 'steps',
|
|
save_steps = 500,
|
|
save_total_limit = None,
|
|
save_safetensors = True,
|
|
save_on_each_node = False,
|
|
save_only_model = False,
|
|
restore_callback_states_from_checkpoint = False,
|
|
no_cuda = False,
|
|
use_cpu = False,
|
|
use_mps_device = False,
|
|
seed = 3407,
|
|
data_seed = 3407,
|
|
jit_mode_eval = False,
|
|
bf16 = False,
|
|
fp16 = False,
|
|
fp16_opt_level = 'O1',
|
|
half_precision_backend = 'auto',
|
|
bf16_full_eval = False,
|
|
fp16_full_eval = False,
|
|
tf32 = None,
|
|
local_rank = -1,
|
|
ddp_backend = None,
|
|
tpu_num_cores = None,
|
|
tpu_metrics_debug = False,
|
|
debug = '',
|
|
dataloader_drop_last = False,
|
|
eval_steps = None,
|
|
dataloader_num_workers = 0,
|
|
dataloader_prefetch_factor = None,
|
|
past_index = -1,
|
|
run_name = None,
|
|
disable_tqdm = None,
|
|
remove_unused_columns = True,
|
|
label_names = None,
|
|
load_best_model_at_end = False,
|
|
metric_for_best_model = None,
|
|
greater_is_better = None,
|
|
ignore_data_skip = False,
|
|
fsdp = None,
|
|
fsdp_min_num_params = 0,
|
|
fsdp_config = None,
|
|
fsdp_transformer_layer_cls_to_wrap = None,
|
|
accelerator_config = None,
|
|
parallelism_config = None,
|
|
deepspeed = None,
|
|
label_smoothing_factor = 0.0,
|
|
optim = 'adamw_8bit',
|
|
optim_args = None,
|
|
adafactor = False,
|
|
group_by_length = False,
|
|
length_column_name = 'length',
|
|
report_to = 'none',
|
|
project = 'huggingface',
|
|
trackio_space_id = 'trackio',
|
|
ddp_find_unused_parameters = None,
|
|
ddp_bucket_cap_mb = None,
|
|
ddp_broadcast_buffers = None,
|
|
dataloader_pin_memory = True,
|
|
dataloader_persistent_workers = False,
|
|
skip_memory_metrics = True,
|
|
use_legacy_prediction_loop = False,
|
|
push_to_hub = False,
|
|
resume_from_checkpoint = None,
|
|
hub_model_id = None,
|
|
hub_strategy = 'every_save',
|
|
hub_token = None,
|
|
hub_private_repo = None,
|
|
hub_always_push = False,
|
|
hub_revision = None,
|
|
gradient_checkpointing = True,
|
|
gradient_checkpointing_kwargs = None,
|
|
include_inputs_for_metrics = False,
|
|
eval_do_concat_batches = True,
|
|
fp16_backend = 'auto',
|
|
push_to_hub_model_id = None,
|
|
push_to_hub_organization = None,
|
|
push_to_hub_token = None,
|
|
mp_parameters = '',
|
|
auto_find_batch_size = False,
|
|
full_determinism = False,
|
|
torchdynamo = None,
|
|
ray_scope = 'last',
|
|
ddp_timeout = 1800,
|
|
torch_compile = False,
|
|
torch_compile_backend = None,
|
|
torch_compile_mode = None,
|
|
include_tokens_per_second = False,
|
|
include_num_input_tokens_seen = False,
|
|
neftune_noise_alpha = None,
|
|
optim_target_modules = None,
|
|
batch_eval_metrics = False,
|
|
eval_on_start = False,
|
|
use_liger_kernel = False,
|
|
liger_kernel_config = None,
|
|
eval_use_gather_object = False,
|
|
average_tokens_across_devices = True,
|
|
model_init_kwargs = None,
|
|
chat_template_path = None,
|
|
dataset_text_field = 'text',
|
|
dataset_kwargs = None,
|
|
dataset_num_proc = None,
|
|
eos_token = None,
|
|
pad_token = None,
|
|
max_length = 1024,
|
|
packing = False,
|
|
packing_strategy = 'bfd',
|
|
padding_free = False,
|
|
pad_to_multiple_of = None,
|
|
eval_packing = None,
|
|
completion_only_loss = None,
|
|
assistant_only_loss = False,
|
|
loss_type = 'nll',
|
|
activation_offloading = False,
|
|
vllm_sampling_params = None,
|
|
unsloth_num_chunks = -1,
|
|
unsloth_logit_chunk_multiplier = None,
|
|
unsloth_grpo_mini_batch = None,
|
|
max_seq_length = None,
|
|
**kwargs,
|
|
):
|
|
if learning_rate < 1e-7: print(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!')
|
|
if learning_rate > 1: print(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!')
|
|
if num_train_epochs is None:
|
|
num_train_epochs = 3.0 # Default to 3 epochs if None, max_steps will override
|
|
if output_dir is None and save_strategy == 'steps' and save_steps == 500:
|
|
output_dir = 'unsloth_training_checkpoints'
|
|
save_strategy = 'no'
|
|
import multiprocessing as _mp
|
|
if _mp.get_start_method() != 'fork':
|
|
dataset_num_proc = None
|
|
elif dataset_num_proc is None:
|
|
import psutil
|
|
dataset_num_proc = min(max((psutil.cpu_count() or 1)+4, 2), 64)
|
|
memory_gb_left = psutil.virtual_memory().available / (1024**3)
|
|
if memory_gb_left <= 2: dataset_num_proc = 1
|
|
else: dataset_num_proc = min(dataset_num_proc, int(memory_gb_left))
|
|
if os.environ.get('UNSLOTH_ENABLE_FLEX_ATTENTION', '0') == '1':
|
|
from unsloth_zoo.flex_attention import HAS_FLEX_ATTENTION
|
|
if HAS_FLEX_ATTENTION and pad_to_multiple_of is None:
|
|
from unsloth_zoo.flex_attention import FLEX_ATTENTION_BLOCK_SIZE
|
|
pad_to_multiple_of = FLEX_ATTENTION_BLOCK_SIZE
|
|
|
|
|
|
super().__init__(
|
|
output_dir = output_dir,
|
|
overwrite_output_dir = overwrite_output_dir,
|
|
do_train = do_train,
|
|
do_eval = do_eval,
|
|
do_predict = do_predict,
|
|
eval_strategy = eval_strategy,
|
|
prediction_loss_only = prediction_loss_only,
|
|
per_device_train_batch_size = per_device_train_batch_size,
|
|
per_device_eval_batch_size = per_device_eval_batch_size,
|
|
per_gpu_train_batch_size = per_gpu_train_batch_size,
|
|
per_gpu_eval_batch_size = per_gpu_eval_batch_size,
|
|
gradient_accumulation_steps = gradient_accumulation_steps,
|
|
eval_accumulation_steps = eval_accumulation_steps,
|
|
eval_delay = eval_delay,
|
|
torch_empty_cache_steps = torch_empty_cache_steps,
|
|
learning_rate = learning_rate,
|
|
weight_decay = weight_decay,
|
|
adam_beta1 = adam_beta1,
|
|
adam_beta2 = adam_beta2,
|
|
adam_epsilon = adam_epsilon,
|
|
max_grad_norm = max_grad_norm,
|
|
num_train_epochs = num_train_epochs,
|
|
max_steps = max_steps,
|
|
lr_scheduler_type = lr_scheduler_type,
|
|
lr_scheduler_kwargs = lr_scheduler_kwargs,
|
|
warmup_ratio = warmup_ratio,
|
|
warmup_steps = warmup_steps,
|
|
log_level = log_level,
|
|
log_level_replica = log_level_replica,
|
|
log_on_each_node = log_on_each_node,
|
|
logging_dir = logging_dir,
|
|
logging_strategy = logging_strategy,
|
|
logging_first_step = logging_first_step,
|
|
logging_steps = logging_steps,
|
|
logging_nan_inf_filter = logging_nan_inf_filter,
|
|
save_strategy = save_strategy,
|
|
save_steps = save_steps,
|
|
save_total_limit = save_total_limit,
|
|
save_safetensors = save_safetensors,
|
|
save_on_each_node = save_on_each_node,
|
|
save_only_model = save_only_model,
|
|
restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint,
|
|
no_cuda = no_cuda,
|
|
use_cpu = use_cpu,
|
|
use_mps_device = use_mps_device,
|
|
seed = seed,
|
|
data_seed = data_seed,
|
|
jit_mode_eval = jit_mode_eval,
|
|
bf16 = bf16,
|
|
fp16 = fp16,
|
|
fp16_opt_level = fp16_opt_level,
|
|
half_precision_backend = half_precision_backend,
|
|
bf16_full_eval = bf16_full_eval,
|
|
fp16_full_eval = fp16_full_eval,
|
|
tf32 = tf32,
|
|
local_rank = local_rank,
|
|
ddp_backend = ddp_backend,
|
|
tpu_num_cores = tpu_num_cores,
|
|
tpu_metrics_debug = tpu_metrics_debug,
|
|
debug = debug,
|
|
dataloader_drop_last = dataloader_drop_last,
|
|
eval_steps = eval_steps,
|
|
dataloader_num_workers = dataloader_num_workers,
|
|
dataloader_prefetch_factor = dataloader_prefetch_factor,
|
|
past_index = past_index,
|
|
run_name = run_name,
|
|
disable_tqdm = disable_tqdm,
|
|
remove_unused_columns = remove_unused_columns,
|
|
label_names = label_names,
|
|
load_best_model_at_end = load_best_model_at_end,
|
|
metric_for_best_model = metric_for_best_model,
|
|
greater_is_better = greater_is_better,
|
|
ignore_data_skip = ignore_data_skip,
|
|
fsdp = fsdp,
|
|
fsdp_min_num_params = fsdp_min_num_params,
|
|
fsdp_config = fsdp_config,
|
|
fsdp_transformer_layer_cls_to_wrap = fsdp_transformer_layer_cls_to_wrap,
|
|
accelerator_config = accelerator_config,
|
|
parallelism_config = parallelism_config,
|
|
deepspeed = deepspeed,
|
|
label_smoothing_factor = label_smoothing_factor,
|
|
optim = optim,
|
|
optim_args = optim_args,
|
|
adafactor = adafactor,
|
|
group_by_length = group_by_length,
|
|
length_column_name = length_column_name,
|
|
report_to = report_to,
|
|
project = project,
|
|
trackio_space_id = trackio_space_id,
|
|
ddp_find_unused_parameters = ddp_find_unused_parameters,
|
|
ddp_bucket_cap_mb = ddp_bucket_cap_mb,
|
|
ddp_broadcast_buffers = ddp_broadcast_buffers,
|
|
dataloader_pin_memory = dataloader_pin_memory,
|
|
dataloader_persistent_workers = dataloader_persistent_workers,
|
|
skip_memory_metrics = skip_memory_metrics,
|
|
use_legacy_prediction_loop = use_legacy_prediction_loop,
|
|
push_to_hub = push_to_hub,
|
|
resume_from_checkpoint = resume_from_checkpoint,
|
|
hub_model_id = hub_model_id,
|
|
hub_strategy = hub_strategy,
|
|
hub_token = hub_token,
|
|
hub_private_repo = hub_private_repo,
|
|
hub_always_push = hub_always_push,
|
|
hub_revision = hub_revision,
|
|
gradient_checkpointing = gradient_checkpointing,
|
|
gradient_checkpointing_kwargs = gradient_checkpointing_kwargs,
|
|
include_inputs_for_metrics = include_inputs_for_metrics,
|
|
eval_do_concat_batches = eval_do_concat_batches,
|
|
fp16_backend = fp16_backend,
|
|
push_to_hub_model_id = push_to_hub_model_id,
|
|
push_to_hub_organization = push_to_hub_organization,
|
|
push_to_hub_token = push_to_hub_token,
|
|
mp_parameters = mp_parameters,
|
|
auto_find_batch_size = auto_find_batch_size,
|
|
full_determinism = full_determinism,
|
|
torchdynamo = torchdynamo,
|
|
ray_scope = ray_scope,
|
|
ddp_timeout = ddp_timeout,
|
|
torch_compile = torch_compile,
|
|
torch_compile_backend = torch_compile_backend,
|
|
torch_compile_mode = torch_compile_mode,
|
|
include_tokens_per_second = include_tokens_per_second,
|
|
include_num_input_tokens_seen = include_num_input_tokens_seen,
|
|
neftune_noise_alpha = neftune_noise_alpha,
|
|
optim_target_modules = optim_target_modules,
|
|
batch_eval_metrics = batch_eval_metrics,
|
|
eval_on_start = eval_on_start,
|
|
use_liger_kernel = use_liger_kernel,
|
|
liger_kernel_config = liger_kernel_config,
|
|
eval_use_gather_object = eval_use_gather_object,
|
|
average_tokens_across_devices = average_tokens_across_devices,
|
|
model_init_kwargs = model_init_kwargs,
|
|
chat_template_path = chat_template_path,
|
|
dataset_text_field = dataset_text_field,
|
|
dataset_kwargs = dataset_kwargs,
|
|
dataset_num_proc = dataset_num_proc,
|
|
eos_token = eos_token,
|
|
pad_token = pad_token,
|
|
max_length = max_length,
|
|
packing = packing,
|
|
packing_strategy = packing_strategy,
|
|
padding_free = padding_free,
|
|
pad_to_multiple_of = pad_to_multiple_of,
|
|
eval_packing = eval_packing,
|
|
completion_only_loss = completion_only_loss,
|
|
assistant_only_loss = assistant_only_loss,
|
|
loss_type = loss_type,
|
|
activation_offloading = activation_offloading,**kwargs)
|
|
self.vllm_sampling_params = vllm_sampling_params
|
|
self.unsloth_num_chunks = unsloth_num_chunks
|
|
if unsloth_grpo_mini_batch is not None:
|
|
if self.generation_batch_size >= unsloth_grpo_mini_batch:
|
|
self.unsloth_grpo_mini_batch = unsloth_grpo_mini_batch
|
|
else:
|
|
raise ValueError(
|
|
f"Unsloth GRPO mini batch size needs to be less than or equal to the effective generation batch size, "
|
|
f"which is self.per_device_train_batch_size * gradient_accumulation_steps."
|
|
)
|
|
self.unsloth_logit_chunk_multiplier = unsloth_logit_chunk_multiplier
|
|
self.max_seq_length = max_seq_length
|
|
|
|
pass
|
|
|
|
class _UnslothSFTTrainer(BaseTrainer):
|
|
""""""
|
|
|
|
_tag_names = ["trl", "sft"]
|
|
_name = "SFT"
|
|
|
|
def __init__(
|
|
self,
|
|
model: Union[str, PreTrainedModel],
|
|
args: Optional[Union[SFTConfig, TrainingArguments]] = None,
|
|
data_collator: Optional[DataCollator] = None,
|
|
train_dataset: Optional[Union[Dataset, IterableDataset]] = None,
|
|
eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None,
|
|
processing_class: Optional[Union[PreTrainedTokenizerBase, ProcessorMixin]] = None,
|
|
compute_loss_func: Optional[Callable] = None,
|
|
compute_metrics: Optional[Callable[[EvalPrediction], dict]] = None,
|
|
callbacks: Optional[list[TrainerCallback]] = None,
|
|
optimizers: tuple[Optional[torch.optim.Optimizer], Optional[torch.optim.lr_scheduler.LambdaLR]] = (None, None),
|
|
optimizer_cls_and_kwargs: Optional[tuple[type[torch.optim.Optimizer], dict[str, Any]]] = None,
|
|
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
|
|
peft_config: Optional["PeftConfig"] = None,
|
|
formatting_func: Optional[Callable[[dict], str]] = None,
|
|
):
|
|
# Args
|
|
if args is None:
|
|
model_name = model if isinstance(model, str) else model.config._name_or_path
|
|
model_name = model_name.split("/")[-1]
|
|
args = SFTConfig(f"{model_name}-SFT")
|
|
elif isinstance(args, TrainingArguments) and not isinstance(args, SFTConfig):
|
|
dict_args = args.to_dict()
|
|
dict_args["hub_token"] = args.hub_token # to_dict hides the hub_token
|
|
dict_args.pop("push_to_hub_token", None)
|
|
args = SFTConfig(**dict_args)
|
|
|
|
# Model
|
|
if isinstance(model, str):
|
|
model = create_model_from_path(model, **args.model_init_kwargs or {})
|
|
else:
|
|
if args.model_init_kwargs is not None:
|
|
logger.warning(
|
|
"You passed `model_init_kwargs` to the `SFTConfig`, but your model is already instantiated. "
|
|
"The `model_init_kwargs` will be ignored."
|
|
)
|
|
model_id = model.config._name_or_path
|
|
|
|
# Processing class
|
|
if processing_class is None:
|
|
processing_class = AutoProcessor.from_pretrained(model_id)
|
|
|
|
# Handle pad token for processors or tokenizers
|
|
if isinstance(processing_class, ProcessorMixin):
|
|
tokenizer = processing_class.tokenizer
|
|
self._is_vlm = True
|
|
elif isinstance(processing_class, PreTrainedTokenizerBase):
|
|
tokenizer = processing_class
|
|
self._is_vlm = False
|
|
else:
|
|
raise TypeError("The `processing_class` must be either a `PreTrainedTokenizerBase` or a `ProcessorMixin`")
|
|
|
|
if args.eos_token is not None:
|
|
eos_token = args.eos_token
|
|
eos_token_id = tokenizer.convert_tokens_to_ids(eos_token)
|
|
if eos_token_id is None:
|
|
raise ValueError(
|
|
f"The specified `eos_token` ('{eos_token}') is not found in the vocabulary of the given "
|
|
f"`processing_class` ({processing_class.__class__.__name__}). Ensure that the `eos_token` exists "
|
|
"in the vocabulary before using it as an EOS token."
|
|
)
|
|
tokenizer.eos_token_id = eos_token_id
|
|
|
|
if args.chat_template_path is not None:
|
|
if os.path.isfile(args.chat_template_path) and args.chat_template_path.endswith((".jinja", ".j2")):
|
|
with open(args.chat_template_path, encoding="utf-8") as chat_template_file:
|
|
processing_class.chat_template = chat_template_file.read()
|
|
added_tokens = []
|
|
else:
|
|
model, processing_class, added_tokens = clone_chat_template(
|
|
model, processing_class, args.chat_template_path
|
|
)
|
|
else:
|
|
added_tokens = []
|
|
|
|
# Catch some wrong configurations related to VLMs
|
|
if self._is_vlm and args.packing:
|
|
raise ValueError(
|
|
"Packing is not supported for vision-language models. Please set `packing=False` in the SFTConfig."
|
|
)
|
|
if self._is_vlm and args.padding_free:
|
|
raise ValueError(
|
|
"Padding-free training is yet not supported for vision-language models. Please set "
|
|
"`padding_free=False` in the `SFTConfig`."
|
|
)
|
|
if self._is_vlm and args.assistant_only_loss:
|
|
raise ValueError(
|
|
"Assistant-only loss is not yet supported for vision-language models. Please set "
|
|
"`assistant_only_loss=False` in the `SFTConfig`."
|
|
)
|
|
|
|
# PEFT configuration and model wrapping
|
|
if False:
|
|
if added_tokens:
|
|
# Ensure that the added tokens are trainable
|
|
if peft_config.trainable_token_indices is None:
|
|
peft_config.trainable_token_indices = {"embed_tokens": added_tokens}
|
|
elif "embed_tokens" not in peft_config.trainable_token_indices:
|
|
peft_config.trainable_token_indices["embed_tokens"] = added_tokens
|
|
else:
|
|
peft_config.trainable_token_indices["embed_tokens"].extend(added_tokens)
|
|
|
|
# Ensure that the lm_head is trainable
|
|
if peft_config.modules_to_save is None or "lm_head" not in peft_config.modules_to_save:
|
|
logger.warning(
|
|
"Cloning chat template added new tokens to the tokenizer, but 'lm_head' is not in PEFT's "
|
|
"`modules_to_save`. As a result, the model may not learn to generate outputs with these new "
|
|
"tokens, leading to degraded generation quality. To fix this, add "
|
|
"`modules_to_save=['lm_head']` to your PEFT configuration."
|
|
)
|
|
|
|
if peft_config.modules_to_save is None:
|
|
peft_config.modules_to_save = ["lm_head"]
|
|
else:
|
|
peft_config.modules_to_save.append("lm_head")
|
|
|
|
# In Prompt Tuning a small set of trainable virtual tokens [continuous prompt embeddings] is prepended to the
|
|
# input. We store the number of these tokens so we can account for them correctly when calculating accuracy.
|
|
self.num_virtual_tokens = 0
|
|
|
|
if False:
|
|
pass
|
|
if model.active_adapter in model.peft_config:
|
|
peft_model_config = model.peft_config[model.active_adapter]
|
|
self.num_virtual_tokens = getattr(peft_model_config, "num_virtual_tokens", 0)
|
|
|
|
# Data collator
|
|
# BFD packing requires padding-free mode; otherwise, the collator outputs padded attention masks, causing
|
|
# FlashAttention to ignore position_ids and recompute them incorrectly from the padded attention mask.
|
|
self.padding_free = args.padding_free or (args.packing and args.packing_strategy == "bfd")
|
|
use_flash_attention = model.config._attn_implementation in FLASH_ATTENTION_VARIANTS
|
|
if self.padding_free:
|
|
if data_collator is not None:
|
|
raise ValueError("Passing a custom data collator is not supported when using padding-free.")
|
|
if args.packing and args.packing_strategy == "wrapped":
|
|
logger.warning(
|
|
"You are passing `padding_free=True` with the 'wrapped' packing strategy, which is not "
|
|
"recommended. Please refer to the documentation to understand why this is not recommended."
|
|
)
|
|
if not use_flash_attention:
|
|
logger.warning(
|
|
"Padding-free training is enabled, but the attention implementation is not set to a supported "
|
|
"flash attention variant. Padding-free training flattens batches into a single sequence, and only "
|
|
"the following implementations are known to reliably support this: "
|
|
f"{', '.join(sorted(FLASH_ATTENTION_VARIANTS))}. Using other implementations may lead to "
|
|
"unexpected behavior. To ensure compatibility, set `attn_implementation` in the model "
|
|
"configuration to one of these supported options or verify that your attention mechanism can "
|
|
"handle flattened sequences."
|
|
)
|
|
# Decide whether to use completion-only loss: if not specified, then it is set to True if the dataset format
|
|
# is prompt-completion, and False if the dataset format is language modeling.
|
|
dataset_sample = next(iter(train_dataset))
|
|
if args.completion_only_loss is None:
|
|
self.completion_only_loss = "prompt" in dataset_sample and "completion" in dataset_sample
|
|
else:
|
|
self.completion_only_loss = args.completion_only_loss
|
|
|
|
self._is_vision_dataset = "image" in dataset_sample or "images" in dataset_sample
|
|
# Unsloth: override _is_vlm for VLM models that pass a bare tokenizer
|
|
if not self._is_vlm and self._is_vision_dataset:
|
|
_m = model
|
|
if hasattr(_m, "model"): _m = _m.model
|
|
if hasattr(getattr(_m, "config", None), "vision_config") or\
|
|
_m.__class__.__name__.endswith("ForConditionalGeneration"):
|
|
self._is_vlm = True
|
|
if self._is_vision_dataset and not self._is_vlm:
|
|
raise ValueError(
|
|
"The dataset appears to be vision-related (contains 'image' or 'images' keys), but the provided "
|
|
"model does not seem to be a vision-language model. Please check your model and dataset."
|
|
)
|
|
|
|
if data_collator is None and not self._is_vision_dataset:
|
|
# Get the pad token: if not provided, use the one from the processing class or the eos token
|
|
# if the processing class does not have a pad token.
|
|
pad_token = args.pad_token or tokenizer.pad_token or tokenizer.eos_token
|
|
pad_token_id = tokenizer.convert_tokens_to_ids(pad_token)
|
|
if pad_token_id is None:
|
|
raise ValueError(
|
|
f"The specified `pad_token` ('{pad_token}') is not found in the vocabulary of the given "
|
|
f"`processing_class` ({processing_class.__class__.__name__}). Ensure that the `pad_token` exists "
|
|
"in the vocabulary before using it as a padding token."
|
|
)
|
|
data_collator = DataCollatorForLanguageModeling(
|
|
pad_token_id=pad_token_id,
|
|
completion_only_loss=self.completion_only_loss,
|
|
padding_free=self.padding_free,
|
|
pad_to_multiple_of=args.pad_to_multiple_of,
|
|
)
|
|
elif data_collator is None and self._is_vision_dataset:
|
|
data_collator = DataCollatorForVisionLanguageModeling(
|
|
processor=processing_class,
|
|
max_length=args.max_length,
|
|
completion_only_loss=self.completion_only_loss,
|
|
pad_to_multiple_of=args.pad_to_multiple_of,
|
|
dataset_text_field=args.dataset_text_field,
|
|
)
|
|
|
|
if args.packing and args.packing_strategy == "bfd" and not use_flash_attention:
|
|
logger.warning(
|
|
"You are using packing, but the attention implementation is not set to a supported flash attention "
|
|
"variant. Packing gathers multiple samples into a single sequence, and only the following "
|
|
f"implementations are known to reliably support this: {', '.join(sorted(FLASH_ATTENTION_VARIANTS))}. "
|
|
"Using other implementations may lead to cross-contamination between samples. To avoid this, either "
|
|
"disable packing by setting `packing=False`, or set `attn_implementation` in the model configuration "
|
|
"to one of these supported options."
|
|
)
|
|
if args.assistant_only_loss and not is_conversational(dataset_sample):
|
|
raise ValueError(
|
|
"You set `assistant_only_loss=True`, but the dataset is not conversational. This option is only "
|
|
"supported for conversational datasets."
|
|
)
|
|
|
|
# Dataset
|
|
# Skip dataset preparation if `skip_prepare_dataset=True` in `dataset_kwargs`, or if it's a VLM, where
|
|
# preprocessing [e.g., image-to-pixel conversion] is too costly and done on the fly instead.
|
|
skip_prepare_dataset = (
|
|
args.dataset_kwargs is not None
|
|
and args.dataset_kwargs.get("skip_prepare_dataset", False)
|
|
or self._is_vision_dataset
|
|
)
|
|
if not skip_prepare_dataset:
|
|
if self.completion_only_loss and formatting_func:
|
|
raise ValueError(
|
|
"A formatting function was provided while `completion_only_loss=True`, which is incompatible. "
|
|
"Using a formatter converts the dataset to a language modeling type, conflicting with "
|
|
"completion-only loss. To resolve this, apply your formatting function before passing the "
|
|
"dataset, or disable `completion_only_loss` in `SFTConfig`."
|
|
)
|
|
self._unsloth_model_ref = model
|
|
train_dataset = self._prepare_dataset(
|
|
train_dataset, processing_class, args, args.packing, formatting_func, "train"
|
|
)
|
|
if eval_dataset is not None:
|
|
packing = args.packing if args.eval_packing is None else args.eval_packing
|
|
if isinstance(eval_dataset, dict):
|
|
eval_dataset = {
|
|
key: self._prepare_dataset(dataset, processing_class, args, packing, formatting_func, key)
|
|
for key, dataset in eval_dataset.items()
|
|
}
|
|
else:
|
|
eval_dataset = self._prepare_dataset(
|
|
eval_dataset, processing_class, args, packing, formatting_func, "eval"
|
|
)
|
|
|
|
# Loss function
|
|
if args.loss_type == "nll":
|
|
pass # use the default loss
|
|
elif args.loss_type == "dft":
|
|
if compute_loss_func is not None:
|
|
raise ValueError(
|
|
"You passed a `compute_loss_func` together with `loss_type='dft'` to the `SFTTrainer`. "
|
|
"When using `loss_type='dft'`, the loss function is internally set to the DFT loss, so passing a "
|
|
"`compute_loss_func` is not allowed."
|
|
)
|
|
compute_loss_func = dft_loss
|
|
else:
|
|
raise ValueError(f"Invalid `loss_type` {args.loss_type} passed. Supported values are 'nll' and 'dft'.")
|
|
|
|
# Initialize the metrics
|
|
self._metrics = {"train": defaultdict(list), "eval": defaultdict(list)}
|
|
self._total_train_tokens = 0
|
|
|
|
# Initialize the Trainer. Parent class will handle:
|
|
# - DeepSpeed configuration [through create_accelerator_and_postprocess]
|
|
# - FSDP setup
|
|
# - Distributed training setup
|
|
# - Optimizer and scheduler creation
|
|
|
|
super().__init__(
|
|
model=model,
|
|
args=args,
|
|
data_collator=data_collator,
|
|
train_dataset=train_dataset,
|
|
eval_dataset=eval_dataset,
|
|
processing_class=processing_class,
|
|
compute_loss_func=compute_loss_func,
|
|
compute_metrics=compute_metrics,
|
|
callbacks=callbacks,
|
|
optimizers=optimizers,
|
|
optimizer_cls_and_kwargs=optimizer_cls_and_kwargs,
|
|
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
|
|
)
|
|
|
|
# Initialize activation offloading context
|
|
if self.args.activation_offloading:
|
|
self.maybe_activation_offload_context = get_act_offloading_ctx_manager(model=self.model)
|
|
else:
|
|
self.maybe_activation_offload_context = contextlib.nullcontext()
|
|
|
|
# Add tags for models that have been loaded with the correct transformers version
|
|
if hasattr(self.model, "add_model_tags"):
|
|
self.model.add_model_tags(self._tag_names)
|
|
|
|
self.aux_loss_enabled = getattr(model.config, "output_router_logits", False)
|
|
|
|
def _prepare_dataset(
|
|
self,
|
|
dataset: Union[Dataset, IterableDataset],
|
|
processing_class,
|
|
args,
|
|
packing: bool,
|
|
formatting_func: Optional[Callable[[dict], str]],
|
|
dataset_name: str,
|
|
) -> Union[Dataset, IterableDataset]:
|
|
# All Unsloth Zoo code licensed under LGPLv3
|
|
try:
|
|
if isinstance(dataset, ConstantLengthDataset): return dataset
|
|
except:
|
|
pass
|
|
|
|
map_kwargs = {}
|
|
use_desc = isinstance(dataset, Dataset)
|
|
is_vlm = hasattr(processing_class, "tokenizer")
|
|
tokenizer = processing_class
|
|
if is_vlm: tokenizer = processing_class.tokenizer
|
|
|
|
# Dynamic detection: check if model's module defines a function
|
|
# that requires token_type_ids when is_training=True
|
|
import sys as _sys
|
|
_needs_token_type_ids = False
|
|
# Split to avoid compiler substring match on masking_utils names
|
|
_ccm = 'create_' + 'causal_mask_mapping'
|
|
_model = getattr(self, '_unsloth_model_ref', None) or getattr(self, 'model', None)
|
|
if _model is not None:
|
|
for _m in (_model, getattr(_model, 'model', None)):
|
|
if _m is None: continue
|
|
_mod = _sys.modules.get(type(_m).__module__)
|
|
if _mod is not None and hasattr(_mod, _ccm):
|
|
_needs_token_type_ids = True
|
|
break
|
|
|
|
if not _needs_token_type_ids:
|
|
# Fallback: model not yet available, check processor class MRO
|
|
for _base in type(processing_class).__mro__:
|
|
_base_mod = getattr(_base, '__module__', '')
|
|
if 'transformers.models.' in _base_mod:
|
|
_modeling_mod = _base_mod.replace('.processing_', '.modeling_')
|
|
_mod = _sys.modules.get(_modeling_mod)
|
|
if _mod is not None and hasattr(_mod, _ccm):
|
|
_needs_token_type_ids = True
|
|
break
|
|
if _needs_token_type_ids and hasattr(args, 'remove_unused_columns'):
|
|
args.remove_unused_columns = False
|
|
|
|
# Get max length
|
|
max_seq_length = getattr(args, "max_length", 0)
|
|
if max_seq_length == 0: max_seq_length = getattr(args, "max_seq_length", 0)
|
|
if max_seq_length == 0: max_seq_length = getattr(self, "max_seq_length", 0)
|
|
if max_seq_length == 0: max_seq_length = getattr(self, "max_seq", 0)
|
|
if max_seq_length == 0: raise RuntimeError("Unsloth: max_seq_length is 0! Please specify one!")
|
|
dataset_text_field = getattr(args, "dataset_text_field", "text")
|
|
do_truncation = max_seq_length != 0
|
|
do_formatting_func = False
|
|
do_tokenize = True
|
|
|
|
# Get correct column names
|
|
column_names = set(next(iter(dataset)).keys())
|
|
used_column_names = ["input_ids"]
|
|
if "attention_mask" in column_names:
|
|
used_column_names.append("attention_mask")
|
|
if _needs_token_type_ids:
|
|
used_column_names.append("token_type_ids")
|
|
|
|
# Check if already tokenized so skip
|
|
from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling
|
|
if "labels" in column_names:
|
|
# Most likely forgot data collator!
|
|
if is_vlm and not hasattr(tokenizer, "pad"):
|
|
# Check if processing_class has a .pad, if not, use tokenizer.tokenizer
|
|
raise RuntimeError(f"Unsloth: {processing_class.__class__} does not have .pad!")
|
|
self.data_collator = DataCollatorForSeq2Seq(tokenizer)
|
|
used_column_names.append("labels")
|
|
do_tokenize = False
|
|
elif "input_ids" in column_names:
|
|
# Skip dataset prep, and set data collator
|
|
if is_vlm and not hasattr(tokenizer, "pad"):
|
|
# Check if processing_class has a .pad, if not, use tokenizer.tokenizer
|
|
raise RuntimeError(f"Unsloth: {processing_class.__class__} does not have .pad!")
|
|
self.data_collator = DataCollatorForLanguageModeling(tokenizer, mlm = False)
|
|
do_tokenize = False
|
|
elif dataset_text_field not in column_names:
|
|
do_formatting_func = True
|
|
if formatting_func is None:
|
|
raise RuntimeError("Unsloth: You must specify a `formatting_func`")
|
|
pass
|
|
|
|
if do_tokenize:
|
|
# Check double BOS tokens
|
|
if do_formatting_func:
|
|
test_text = formatting_func(next(iter(dataset)))
|
|
if not isinstance(test_text, list):
|
|
raise ValueError(
|
|
"Unsloth: The `formatting_func` should return a list of processed strings."
|
|
)
|
|
test_text = test_text[0]
|
|
else:
|
|
test_text = next(iter(dataset))[dataset_text_field][0]
|
|
|
|
# Get chat template
|
|
chat_template = getattr(processing_class, 'chat_template', '')
|
|
if chat_template == '' and is_vlm:
|
|
chat_template = getattr(tokenizer, 'chat_template', '')
|
|
if chat_template is None:
|
|
chat_template = ''
|
|
|
|
# Get bos_token
|
|
add_special_tokens = True
|
|
bos_token_1 = getattr(processing_class, 'bos_token', None)
|
|
bos_token_2 = getattr(tokenizer, 'bos_token', None)
|
|
bos_token = bos_token_1 or bos_token_2
|
|
|
|
if bos_token is not None:
|
|
if test_text.startswith(bos_token) or bos_token in chat_template:
|
|
add_special_tokens = False
|
|
print("Unsloth: We found double BOS tokens - we shall remove one automatically.")
|
|
pass
|
|
|
|
# Create tokenize function
|
|
def _tokenize(example):
|
|
return tokenizer(
|
|
example[dataset_text_field] if not do_formatting_func else formatting_func(example),
|
|
truncation = do_truncation,
|
|
max_length = max_seq_length,
|
|
return_token_type_ids = _needs_token_type_ids,
|
|
add_special_tokens = add_special_tokens,
|
|
)
|
|
pass
|
|
|
|
if not isinstance(dataset, IterableDataset):
|
|
import multiprocessing as _mp
|
|
if _mp.get_start_method() != 'fork':
|
|
dataset_num_proc = None
|
|
else:
|
|
dataset_num_proc = getattr(args, "dataset_num_proc", None)
|
|
if dataset_num_proc is None:
|
|
import psutil
|
|
dataset_num_proc = min(max((psutil.cpu_count() or 1)+4, 2), 64)
|
|
memory_gb_left = psutil.virtual_memory().available / (1024**3)
|
|
if memory_gb_left <= 2:
|
|
dataset_num_proc = 1
|
|
else:
|
|
dataset_num_proc = min(dataset_num_proc, int(memory_gb_left))
|
|
map_kwargs["num_proc"] = dataset_num_proc
|
|
else:
|
|
map_kwargs["batch_size"] = dataset._ex_iterable.batch_size
|
|
|
|
if use_desc: map_kwargs["desc"] = f'Unsloth: Tokenizing ["{dataset_text_field}"]'
|
|
import warnings as _w
|
|
with _w.catch_warnings():
|
|
_w.filterwarnings("ignore", message=".*couldn't be hashed properly.*")
|
|
dataset = dataset.map(_tokenize, batched = True, remove_columns = list(column_names), **map_kwargs)
|
|
|
|
# If VLM, switch data collator since .pad is needed!
|
|
if is_vlm and not hasattr(processing_class, "pad"):
|
|
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm = False)
|
|
self.data_collator = data_collator
|
|
pass
|
|
pass
|
|
if packing:
|
|
# Try using new packing which works in TRL
|
|
try:
|
|
pack_dataset
|
|
except:
|
|
print("Unsloth: Hugging Face's packing is currently buggy - we're disabling it for now!")
|
|
return dataset
|
|
|
|
if max_seq_length == 0:
|
|
raise ValueError("When packing is enabled, `max_seq_length` can't be `None`.")
|
|
|
|
if use_desc: map_kwargs["desc"] = f"Unsloth: Packing {dataset_name} dataset"
|
|
dataset = pack_dataset(
|
|
dataset.select_columns(used_column_names),
|
|
max_seq_length,
|
|
getattr(args, "packing_strategy", "bfd"),
|
|
map_kwargs,
|
|
)
|
|
pass
|
|
return dataset
|
|
|
|
def _set_signature_columns_if_needed(self):
|
|
# If `self.args.remove_unused_columns` is True, non-signature columns are removed.
|
|
# By default, this method sets `self._signature_columns` to the model's expected inputs (usually, "input_ids"
|
|
# and "attention_mask"). When using `train_on_completion_only` we add a "completion_mask" column to the
|
|
# dataset. So we need to override the default signature columns to include "completion_mask" as well.
|
|
if self._signature_columns is None:
|
|
if self._is_vision_dataset:
|
|
self._signature_columns = ["messages", "prompt", "completion", "images", "input_ids", "labels", "attention_mask", "seq_lengths", "completion_mask", "assistant_masks"]
|
|
else:
|
|
self._signature_columns = ["input_ids", "labels", "seq_lengths", "completion_mask", "assistant_masks"]
|
|
|
|
def compute_loss(
|
|
self, model, inputs, return_outputs = False, num_items_in_batch = None
|
|
):
|
|
outputs = super().compute_loss(
|
|
model,
|
|
inputs,
|
|
return_outputs = return_outputs,
|
|
num_items_in_batch = num_items_in_batch,
|
|
)
|
|
return outputs
|
|
|
|
# Override training step to add activation offloading context.
|
|
def training_step(self, *args, **kwargs):
|
|
with self.maybe_activation_offload_context:
|
|
return super().training_step(*args, **kwargs)
|
|
|
|
def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None:
|
|
mode = "train" if self.model.training else "eval"
|
|
metrics = {key: sum(val) / len(val) for key, val in self._metrics[mode].items()} # average the metrics
|
|
|
|
# This method can be called both in training and evaluation. When called in evaluation, the keys in `logs`
|
|
# start with "eval_". We need to add the prefix "eval_" to the keys in `metrics` to match the format.
|
|
if mode == "eval":
|
|
metrics = {f"eval_{key}": val for key, val in metrics.items()}
|
|
|
|
logs.update(metrics)
|
|
super().log(logs, start_time)
|
|
self._metrics[mode].clear()
|
|
|
|
# Ensure the model card is saved along with the checkpoint
|
|
def _save_checkpoint(self, model, trial):
|
|
if self.args.hub_model_id is None:
|
|
model_name = Path(self.args.output_dir).name
|
|
else:
|
|
model_name = self.args.hub_model_id.split("/")[-1]
|
|
self.create_model_card(model_name=model_name)
|
|
super()._save_checkpoint(model, trial)
|
|
class UnslothSFTTrainer(_UnslothSFTTrainer):
|
|
"""
|
|
|
|
Trainer for Supervised Fine-Tuning (SFT) method.
|
|
|
|
This class is a wrapper around the [`~transformers.Trainer`] class and inherits all of its attributes and methods.
|
|
|
|
Example:
|
|
|
|
```python
|
|
from datasets import load_dataset
|
|
from trl import SFTTrainer
|
|
|
|
dataset = load_dataset("roneneldan/TinyStories", split="train[:1%]")
|
|
|
|
trainer = SFTTrainer(model="Qwen/Qwen2-0.5B-Instruct", train_dataset=dataset)
|
|
trainer.train()
|
|
```
|
|
|
|
Args:
|
|
model (`Union[str, PreTrainedModel]`):
|
|
Model to be trained. Can be either:
|
|
|
|
- A string, being the *model id* of a pretrained model hosted inside a model repo on huggingface.co, or a
|
|
path to a *directory* containing model weights saved using
|
|
[`~transformers.PreTrainedModel.save_pretrained`], e.g., `'./my_model_directory/'`. The model is loaded
|
|
using `<ModelArchitecture>.from_pretrained` (where `<ModelArchitecture>` is derived from the model
|
|
config) with the keyword arguments in `args.model_init_kwargs`.
|
|
- A [`~transformers.PreTrainedModel`] object.
|
|
If you're training a model with an MoE architecture and want to include the load balancing/auxilliary loss
|
|
as a part of the final loss, remember to set the `output_router_logits` config of the model to `True`.
|
|
args ([`SFTConfig`], *optional*):
|
|
Configuration for this trainer. If `None`, a default configuration is used.
|
|
data_collator ([`~transformers.DataCollator`], *optional*):
|
|
Function to use to form a batch from a list of elements of the processed `train_dataset` or `eval_dataset`.
|
|
Will default to [`~trainer.sft_trainer.DataCollatorForLanguageModeling`] if the model is a language model
|
|
and [`~trainer.sft_trainer.DataCollatorForVisionLanguageModeling`] if the model is a vision-language model.
|
|
train_dataset ([`~datasets.Dataset`] or [`~datasets.IterableDataset`]):
|
|
Dataset to use for training. SFT supports both [language modeling](#language-modeling) type and
|
|
[prompt-completion](#prompt-completion) type. The format of the samples can be either:
|
|
|
|
- [Standard](dataset_formats#standard): Each sample contains plain text.
|
|
- [Conversational](dataset_formats#conversational): Each sample contains structured messages (e.g., role
|
|
and content).
|
|
|
|
The trainer also supports processed datasets (tokenized) as long as they contain an `input_ids` field.
|
|
eval_dataset ([`~datasets.Dataset`], [`~datasets.IterableDataset`] or `dict[str, Union[Dataset, IterableDataset]]`):
|
|
Dataset to use for evaluation. It must meet the same requirements as `train_dataset`.
|
|
processing_class ([`~transformers.PreTrainedTokenizerBase`], [`~transformers.ProcessorMixin`], *optional*):
|
|
Processing class used to process the data. If `None`, the processing class is loaded from the model's name
|
|
with [`~transformers.AutoProcessor.from_pretrained`]. A padding token, `tokenizer.pad_token`, must be set.
|
|
If the processing class has not set a padding token, `tokenizer.eos_token` will be used as the default.
|
|
compute_loss_func (`Callable`, *optional*):
|
|
A function that accepts the raw model outputs, labels, and the number of items in the entire accumulated
|
|
batch (batch_size * gradient_accumulation_steps) and returns the loss. For example, see the default [loss
|
|
function](https://github.com/huggingface/transformers/blob/052e652d6d53c2b26ffde87e039b723949a53493/src/transformers/trainer.py#L3618)
|
|
used by [`Trainer`].
|
|
compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*):
|
|
The function that will be used to compute metrics at evaluation. Must take a
|
|
[`~transformers.EvalPrediction`] and return a dictionary string to metric values. When passing
|
|
[`SFTConfig`] with `batch_eval_metrics` set to `True`, your `compute_metrics` function must take a boolean
|
|
`compute_result` argument. This will be triggered after the last eval batch to signal that the function
|
|
needs to calculate and return the global summary statistics rather than accumulating the batch-level
|
|
statistics.
|
|
callbacks (list of [`~transformers.TrainerCallback`], *optional*):
|
|
List of callbacks to customize the training loop. Will add those to the list of default callbacks detailed
|
|
in [here](https://huggingface.co/docs/transformers/main_classes/callback).
|
|
|
|
If you want to remove one of the default callbacks used, use the [`~transformers.Trainer.remove_callback`]
|
|
method.
|
|
optimizers (`tuple[Optional[torch.optim.Optimizer], Optional[torch.optim.lr_scheduler.LambdaLR]]`, *optional*, defaults to `(None, None)`):
|
|
A tuple containing the optimizer and the scheduler to use. Will default to an instance of `AdamW` on your
|
|
model and a scheduler given by [`~transformers.get_linear_schedule_with_warmup`] controlled by `args`.
|
|
optimizer_cls_and_kwargs (`tuple[Type[torch.optim.Optimizer], Dict[str, Any]]`, *optional*):
|
|
A tuple containing the optimizer class and keyword arguments to use. Overrides `optim` and `optim_args` in
|
|
`args`. Incompatible with the `optimizers` argument.
|
|
|
|
Unlike `optimizers`, this argument avoids the need to place model parameters on the correct devices before
|
|
initializing the Trainer.
|
|
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*):
|
|
A function that preprocess the logits right before caching them at each evaluation step. Must take two
|
|
tensors, the logits and the labels, and return the logits once processed as desired. The modifications made
|
|
by this function will be reflected in the predictions received by `compute_metrics`.
|
|
|
|
Note that the labels (second parameter) will be `None` if the dataset does not have them.
|
|
peft_config ([`~peft.PeftConfig`], *optional*):
|
|
PEFT configuration used to wrap the model. If `None`, the model is not wrapped.
|
|
formatting_func (`Callable`, *optional*):
|
|
Formatting function applied to the dataset before tokenization. Applying the formatting function explicitly
|
|
converts the dataset into a [language modeling](#language-modeling) type.
|
|
|
|
"""
|
|
def __init__(
|
|
self,
|
|
model,
|
|
args = None,
|
|
data_collator = None,
|
|
train_dataset = None,
|
|
eval_dataset = None,
|
|
processing_class = None,
|
|
compute_loss_func = None,
|
|
compute_metrics = None,
|
|
callbacks = None,
|
|
optimizer_cls_and_kwargs = None,
|
|
preprocess_logits_for_metrics = None,
|
|
peft_config = None,
|
|
formatting_func = None,
|
|
**kwargs
|
|
):
|
|
if args is None: args = UnslothSFTConfig()
|
|
use_bf16 = getattr(args, 'bf16', False)
|
|
if type(use_bf16) is not bool: use_bf16 = False
|
|
use_fp16 = getattr(args, 'fp16', False)
|
|
if type(use_fp16) is not bool: use_fp16 = False
|
|
force_float32 = False
|
|
full_finetuning = os.environ.get('UNSLOTH_ENABLE_FULL_FINETUNING', '0') == '1'
|
|
if not full_finetuning and (os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1'):
|
|
print('Unsloth: Switching to float32 training since model cannot work with float16')
|
|
force_float32 = True
|
|
mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32')
|
|
dtype = getattr(model.config, 'dtype', None) or getattr(model.config, 'torch_dtype', None)
|
|
if dtype is None: dtype = model.get_input_embeddings().weight.dtype
|
|
from unsloth_zoo.utils import _get_dtype
|
|
dtype = _get_dtype(dtype)
|
|
float16 = dtype == torch.float16
|
|
if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')
|
|
if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')
|
|
if force_float32:
|
|
# Forced float32 training
|
|
args.fp16 = False
|
|
args.bf16 = False
|
|
os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'
|
|
if hasattr(args, 'mixed_precision'): args.mixed_precision = 'no'
|
|
# args.mixed_precision is a new argument which needs to be set now
|
|
elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32':
|
|
# Mixed precision training
|
|
args.fp16 = float16
|
|
args.bf16 = not float16
|
|
os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16'
|
|
if hasattr(args, 'mixed_precision'): args.mixed_precision = 'fp16' if float16 else 'bf16'
|
|
# args.mixed_precision is a new argument which needs to be set now
|
|
elif mixed_precision_dtype == 'bfloat16':
|
|
# Both False since bfloat16 full finetuning doesn't do any autocasting.
|
|
args.fp16 = False
|
|
args.bf16 = False
|
|
os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'
|
|
if hasattr(args, 'mixed_precision'): args.mixed_precision = 'no'
|
|
# args.mixed_precision is a new argument which needs to be set now
|
|
|
|
if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no':
|
|
args.eval_strategy = 'steps'
|
|
if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1
|
|
ga_steps = getattr(args, 'gradient_accumulation_steps', None)
|
|
if ga_steps is not None and ga_steps > 1:
|
|
from transformers import __version__ as transformers_version
|
|
if Version(transformers_version) <= Version('4.45.2'):
|
|
print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n'
|
|
'`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`')
|
|
if getattr(args, 'eval_strategy', 'no') != 'no':
|
|
eval_bsz = getattr(args, 'per_device_eval_batch_size', 8)
|
|
if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size
|
|
if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps
|
|
fp16_full_eval = getattr(args, 'fp16_full_eval', False)
|
|
if type(fp16_full_eval) is not bool: fp16_full_eval = False
|
|
bf16_full_eval = getattr(args, 'bf16_full_eval', False)
|
|
if type(bf16_full_eval) is not bool: bf16_full_eval = False
|
|
if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True
|
|
if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False
|
|
if force_float32:
|
|
args.bf16_full_eval = False
|
|
args.fp16_full_eval = False
|
|
elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16':
|
|
args.bf16_full_eval = True
|
|
args.fp16_full_eval = False
|
|
elif not bf16_full_eval and not fp16_full_eval:
|
|
args.bf16_full_eval = args.bf16
|
|
args.fp16_full_eval = args.fp16
|
|
_output_logits = False
|
|
if locals().get('compute_metrics', None) is not None: _output_logits = True
|
|
if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True
|
|
if _output_logits:
|
|
os.environ['UNSLOTH_RETURN_LOGITS'] = '1'
|
|
if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'):
|
|
pass
|
|
else:
|
|
model_max_seq_length = getattr(model, 'max_seq_length', None)
|
|
args_max_seq_length = getattr(args, 'max_seq_length', None)
|
|
if args_max_seq_length is None and model_max_seq_length is not None:
|
|
max_seq_length = model.max_seq_length
|
|
if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length
|
|
elif args_max_seq_length is not None and model_max_seq_length is not None:
|
|
if args_max_seq_length > model_max_seq_length:
|
|
print('Unsloth: You set `max_seq_length` as ' + str(args_max_seq_length) + ' but '
|
|
'the maximum the model supports is ' + str(model_max_seq_length) + '. We shall reduce it.')
|
|
args.max_seq_length = model_max_seq_length
|
|
if 'max_length' not in locals() and not hasattr(args, 'max_length'):
|
|
pass
|
|
else:
|
|
if hasattr(args, 'max_seq_length') and args.max_seq_length is not None and args.max_seq_length > 0:
|
|
if hasattr(args, 'max_length'):
|
|
args.max_length = args.max_seq_length
|
|
max_length = args.max_length
|
|
else:
|
|
model_max_length = getattr(model, 'max_seq_length', None)
|
|
if model_max_length is None: model_max_length = getattr(model, 'max_length', None)
|
|
if model_max_length is not None:
|
|
args.max_length = model_max_length
|
|
max_length = args.max_length
|
|
elif hasattr(args, 'max_length') and args.max_length is not None:
|
|
max_length = args.max_length
|
|
# if we are here, then we are in a weird case where max_length is set but max_seq_length is not set
|
|
setattr(model, 'max_seq_length', max_length)
|
|
else:
|
|
print('Unsloth: We did not find `max_seq_length` or `max_length` in the model or args. We will set it to 1024.')
|
|
args.max_length = 1024
|
|
if model is not None and hasattr(model, 'for_training'):
|
|
model.for_training(use_gradient_checkpointing=getattr(args, 'gradient_checkpointing', True))
|
|
if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right'
|
|
if 'processing_class' in locals():
|
|
if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right'
|
|
if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right'
|
|
__tokenizer = processing_class if 'processing_class' in locals() else tokenizer
|
|
from unsloth_zoo.vision_utils import UnslothVisionDataCollator
|
|
if not isinstance(data_collator, UnslothVisionDataCollator):
|
|
if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names:
|
|
data_collator = TransformersDataCollatorForLanguageModeling(
|
|
__tokenizer,
|
|
mlm = False,
|
|
mlm_probability = 0.0,
|
|
pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None),
|
|
)
|
|
elif isinstance(data_collator, TransformersDataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names:
|
|
data_collator = DataCollatorForSeq2Seq(
|
|
__tokenizer,
|
|
pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None),
|
|
)
|
|
else:
|
|
if hasattr(args, 'remove_unused_columns'): args.remove_unused_columns = False
|
|
if hasattr(args, 'dataset_text_field'): args.dataset_text_field = ''
|
|
if hasattr(args, 'dataset_kwargs'): args.dataset_kwargs = {'skip_prepare_dataset': True}
|
|
if not isinstance(data_collator, UnslothVisionDataCollator):
|
|
if not hasattr(__tokenizer, 'pad') and hasattr(__tokenizer, 'tokenizer'):
|
|
if isinstance(data_collator, DataCollatorForSeq2Seq):
|
|
data_collator = DataCollatorForSeq2Seq(
|
|
__tokenizer.tokenizer,
|
|
pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None),
|
|
)
|
|
else:
|
|
data_collator = TransformersDataCollatorForLanguageModeling(
|
|
__tokenizer.tokenizer,
|
|
mlm = False,
|
|
mlm_probability = 0.0,
|
|
pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None),
|
|
)
|
|
other_metrics = []
|
|
|
|
from unsloth_zoo.logging_utils import PatchRLStatistics
|
|
PatchRLStatistics('sft_trainer', other_metrics)
|
|
IGNORED_TOKENIZER_NAMES = os.environ.get('UNSLOTH_IGNORED_TOKENIZER_NAMES', '').split('\n')
|
|
from unsloth_zoo.tokenizer_utils import fix_untrained_tokens
|
|
from unsloth_zoo.training_utils import fix_zero_training_loss
|
|
if 'tokenizer' not in locals(): tokenizer = processing_class
|
|
fix_untrained_tokens(model, tokenizer, train_dataset, IGNORED_TOKENIZER_NAMES, eps = 1e-16)
|
|
fix_zero_training_loss(model, tokenizer, train_dataset)
|
|
|
|
# [TODO] Fix up DataParallel multiplying batch sizes
|
|
# [TODO] DDP works, but DP seems to not work? [TODO]
|
|
if getattr(args, "parallel_mode", None) == ParallelMode.NOT_DISTRIBUTED and args.n_gpu > 1:
|
|
if getattr(args, "_n_gpu", 1) != 1:
|
|
args._n_gpu = 1
|
|
if "model" in locals() and hasattr(model, "for_training"):
|
|
model.for_training(use_gradient_checkpointing=getattr(args, 'gradient_checkpointing', True))
|
|
super().__init__(
|
|
model = model,
|
|
args = args,
|
|
data_collator = data_collator,
|
|
train_dataset = train_dataset,
|
|
eval_dataset = eval_dataset,
|
|
processing_class = processing_class,
|
|
compute_loss_func = compute_loss_func,
|
|
compute_metrics = compute_metrics,
|
|
callbacks = callbacks,
|
|
optimizer_cls_and_kwargs = optimizer_cls_and_kwargs,
|
|
preprocess_logits_for_metrics = preprocess_logits_for_metrics,
|
|
peft_config = peft_config,
|
|
formatting_func = formatting_func,**kwargs)
|
|
if "model" in locals() and hasattr(model, "for_inference"):
|
|
model.for_inference()
|
|
if hasattr(self, 'neftune_hook_handle'):
|
|
self.neftune_hook_handle.remove()
|
|
if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle
|
|
if getattr(args, 'neftune_noise_alpha', None) is not None:
|
|
model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha
|
|
pass
|
|
if hasattr(self, 'accelerator'):
|
|
scaler = self.accelerator.scaler
|
|
current_model = model
|
|
while hasattr(current_model, 'model'):
|
|
current_model.accelerator_scaler = scaler
|
|
current_model = current_model.model
|
|
current_model.accelerator_scaler = scaler
|
|
pass
|
|
if hasattr(self, 'train'):
|
|
self.train = MethodType(prepare_for_training_mode(self.__class__.train), self)
|
|
pass
|
|
if hasattr(self, 'llm') and self.llm is not None and hasattr(self.llm, 'get_tokenizer'):
|
|
_vllm_tok = self.llm.get_tokenizer()
|
|
_pc = getattr(self, 'processing_class', None) or getattr(self, 'tokenizer', None)
|
|
if _vllm_tok is not None and _pc is not None and getattr(_pc, 'chat_template', None) is not None and getattr(_vllm_tok, 'chat_template', None) is None:
|
|
_vllm_tok.chat_template = _pc.chat_template
|
|
pass
|
|
|
|
pass
|
|
|
|
|
|
if hasattr(logger, "addFilter"):
|
|
import logging
|
|
class HideLoggingMessage(logging.Filter):
|
|
def __init__(self, text): self.text = text
|
|
def filter(self, x): return not (self.text in x.getMessage())
|
|
pass
|
|
logger.addFilter(HideLoggingMessage("`use_cache=True`"))
|
|
|