LLM-FineTuning-Large-Language-Models

Форк
0
184 строки · 6.8 Кб
1
import os
2
from dataclasses import dataclass, field
3
from typing import Optional
4
import torch
5
from datasets import load_dataset
6
from peft import LoraConfig
7
from transformers import (
8
    AutoModelForCausalLM,
9
    AutoTokenizer,
10
    HfArgumentParser,
11
    AutoTokenizer,
12
    TrainingArguments,
13
)
14
from peft import prepare_model_for_kbit_training, get_peft_model
15
from transformers import GPTQConfig
16

17
from trl import SFTTrainer
18

19
# Fine-tunes Llama 2 model on Guanaco dataset using GPTQ and peft.
20

21
@dataclass
22
class TrainingArguments(transformers.Seq2SeqTrainingArguments):
23
    local_rank: Optional[int] = field(default=-1, metadata={"help": "Used for multi-gpu"})
24
    per_device_train_batch_size: Optional[int] = field(default=1, metadata={"help": 'The training batch size per GPU. Increase for better speed.'})
25
    per_device_eval_batch_size: Optional[int] = field(default=1)
26
    gradient_accumulation_steps: Optional[int] = field(default=16, metadata={"help": 'How many gradients to accumulate before to perform an optimizer step'})
27
    learning_rate: Optional[float] = field(default=0.0002, metadata={"help": 'The learning rate'})
28
    max_grad_norm: Optional[float] = field(default=0.3, metadata={"help": 'Gradient clipping max norm. This is tuned and works well for all models tested.'})
29
    weight_decay: Optional[int] = field(default=0.0, metadata={"help": 'The L2 weight decay rate of AdamW'}) # use lora dropout instead for regularization if needed
30
    lora_alpha: Optional[int] = field(default=16)
31
    lora_dropout: Optional[float] = field(default=0.1)
32
    lora_r: Optional[int] = field(default=64, metadata={"help": "Lora R dimension."})
33
    max_seq_length: Optional[int] = field(default=512)
34

35
    model_name: Optional[str] = field(
36
        default="TheBloke/Llama-2-7B-GPTQ",
37
        metadata={
38
            "help": "The model that you want to train from the Hugging Face hub. E.g. gpt2, gpt2-xl, bert, etc."
39
        }
40
    )
41

42
    dataset_name: Optional[str] = field(
43
        default="timdettmers/openassistant-guanaco",
44
        metadata={"help": "The preference dataset to use."},
45
    )
46
    num_train_epochs: Optional[int] = field(
47
        default=1,
48
        metadata={"help": "The number of training epochs for the reward model."},
49
    )
50
    fp16: Optional[bool] = field(
51
        default=False,
52
        metadata={"help": "Enables fp16 training."},
53
    )
54
    bf16: Optional[bool] = field(
55
        default=False,
56
        metadata={"help": "Enables bf16 training."},
57
    )
58
    packing: Optional[bool] = field(
59
        default=False,
60
        metadata={"help": "Use packing dataset creating."},
61
    )
62
    gradient_checkpointing: Optional[bool] = field(
63
        default=True,
64
        metadata={"help": "Enables gradient checkpointing."},
65
    )
66
    optim: Optional[str] = field(
67
        default="adamw_hf",
68
        metadata={"help": "The optimizer to use."},
69
    )
70
    lr_scheduler_type: str = field(
71
        default="constant",
72
        metadata={"help": "Learning rate schedule. Constant a bit better than cosine, and has advantage for analysis"},
73
    )
74
    max_steps: int = field(default=10000, metadata={"help": "How many optimizer update steps to take"})
75
    warmup_ratio: float = field(default=0.03, metadata={"help": "Fraction of steps to do a warmup for"})
76
    group_by_length: bool = field(
77
        default=True,
78
        metadata={
79
            "help": "Group sequences into batches with same length. Saves memory and speeds up training considerably."
80
        },
81
    )
82
    save_steps: int = field(default=10, metadata={"help": "Save checkpoint every X updates steps."})
83
    logging_steps: int = field(default=10, metadata={"help": "Log every X updates steps."})
84
    merge_and_push: Optional[bool] = field(
85
        default=False,
86
        metadata={"help": "Merge and push weights after training"},
87
    )
88
    output_dir: str = field(
89
        default="./results",
90
        metadata={"help": "The output directory where the model predictions and checkpoints will be written."},
91
    )
92

93
###########################################################
94

95
parser = HfArgumentParser(TrainingArguments)
96
script_args = parser.parse_args_into_dataclasses()[0]
97

98

99
def prepare_lora_model(args):
100
    major, _ = torch.cuda.get_device_capability()
101
    if major >= 8:
102
        print("=" * 80)
103
        print("Your GPU supports bfloat16, you can accelerate training with the argument --bf16")
104
        print("=" * 80)
105

106
    # Load the entire model on the GPU 0
107
    device_map = {"":0}
108
    # switch to `device_map = "auto"` for multi-GPU
109
    # device_map = "auto"
110

111

112
    # need to disable exllama kernel
113
    # exllama kernel are not very stable for training
114
    model = AutoModelForCausalLM.from_pretrained(
115
        args.model_name,
116
        device_map=device_map,
117
        quantization_config= GPTQConfig(bits=4, use_exllama=False)
118
    )
119

120
    # check: https://github.com/huggingface/transformers/pull/24906
121
    #For fine-tuning llama2 models that have config.pretraining_tp>1 consider calling
122
    # model.config.pretraining_tp = 1
123
    model.config.pretraining_tp = 1
124

125
    lora_config = LoraConfig(
126
        lora_alpha=script_args.lora_alpha,
127
        lora_dropout=script_args.lora_dropout,
128
        r=script_args.lora_r,
129
        bias="none",
130
        task_type="CAUSAL_LM",
131
    )
132

133
    tokenizer = AutoTokenizer.from_pretrained(script_args.model_name, trust_remote_code=True)
134
    tokenizer.pad_token = tokenizer.eos_token
135

136
    return model, lora_config, tokenizer
137

138

139
training_arguments = TrainingArguments(
140
    output_dir=script_args.output_dir,
141
    per_device_train_batch_size=script_args.per_device_train_batch_size,
142
    gradient_accumulation_steps=script_args.gradient_accumulation_steps,
143
    optim=script_args.optim,
144
    save_steps=script_args.save_steps,
145
    logging_steps=script_args.logging_steps,
146
    learning_rate=script_args.learning_rate,
147
    fp16=script_args.fp16,
148
    bf16=script_args.bf16,
149
    max_grad_norm=script_args.max_grad_norm,
150
    max_steps=script_args.max_steps,
151
    warmup_ratio=script_args.warmup_ratio,
152
    group_by_length=script_args.group_by_length,
153
    lr_scheduler_type=script_args.lr_scheduler_type,
154
)
155

156
####################################################
157
model, lora_config, tokenizer = prepare_lora_model(script_args)
158
model = prepare_model_for_kbit_training(model)
159
model = get_peft_model(model, lora_config)
160

161
model.config.use_cache = False
162
dataset = load_dataset(script_args.dataset_name, split="train")
163

164
# Fix weird overflow issue with fp16 training
165
tokenizer.padding_side = "right"
166
trainer = SFTTrainer(
167
    model=model,
168
    train_dataset=dataset,
169
    dataset_text_field="text",
170
    max_seq_length=script_args.max_seq_length,
171
    tokenizer=tokenizer,
172
    args=training_arguments,
173
    packing=script_args.packing,
174
)
175

176
trainer.train()
177

178
if script_args.merge_and_push:
179
    output_dir = os.path.join(script_args.output_dir, "final_checkpoints")
180
    trainer.model.save_pretrained(output_dir)
181

182
    # Free memory for merging weights
183
    del model
184
    torch.cuda.empty_cache()

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.