LLM-FineTuning-Large-Language-Models

Форк
0
/
Falcon-7B_FineTuning_with_PEFT_and_QLORA.ipynb 
294 строки · 11.3 Кб
1
{
2
 "cells": [
3
  {
4
   "cell_type": "markdown",
5
   "metadata": {},
6
   "source": [
7
    "### Checkout my [Twitter(@rohanpaul_ai)](https://twitter.com/rohanpaul_ai) for daily LLM bits"
8
   ]
9
  },
10
  {
11
   "cell_type": "markdown",
12
   "metadata": {},
13
   "source": [
14
    "### Falcon finetuning on openassistant-guanaco\n",
15
    "\n",
16
    "# [Link to my Youtube Video Explaining this whole Notebook](https://www.youtube.com/watch?v=fEzuBFi35J4&list=PLxqBkZuBynVTzqUQCQFgetR97y1X_1uCI&index=11&ab_channel=Rohan-Paul-AI)\n",
17
    "\n",
18
    "[![Imgur](https://imgur.com/DGiAiTI.png)](https://www.youtube.com/watch?v=fEzuBFi35J4&list=PLxqBkZuBynVTzqUQCQFgetR97y1X_1uCI&index=11&ab_channel=Rohan-Paul-AI)"
19
   ]
20
  },
21
  {
22
   "cell_type": "code",
23
   "execution_count": null,
24
   "metadata": {},
25
   "outputs": [],
26
   "source": [
27
    "import torch\n",
28
    "from datasets import load_dataset\n",
29
    "from peft import LoraConfig\n",
30
    "from transformers import (\n",
31
    "    AutoModelForCausalLM,\n",
32
    "    AutoTokenizer,\n",
33
    "    BitsAndBytesConfig,\n",
34
    "    HfArgumentParser,\n",
35
    "    TrainingArguments,\n",
36
    ")\n",
37
    "from peft.tuners.lora import LoraLayer\n",
38
    "from trl import SFTTrainer\n",
39
    "from dataclasses import dataclass, field\n",
40
    "from typing import Optional\n",
41
    "\n",
42
    "@dataclass\n",
43
    "class ModelArguments:\n",
44
    "    \"\"\"\n",
45
    "    Arguments for creating and preparing the model.\n",
46
    "    \"\"\"\n",
47
    "    model_name: str = field(\n",
48
    "        default=\"tiiuae/falcon-7b\",\n",
49
    "        metadata={\"help\": \"The model name or path from the Hugging Face hub.\"},\n",
50
    "    )\n",
51
    "    use_4bit: bool = field(\n",
52
    "        default=True,\n",
53
    "        metadata={\"help\": \"Activate 4bit precision base model loading\"},\n",
54
    "    )\n",
55
    "    use_nested_quant: bool = field(\n",
56
    "        default=False,\n",
57
    "        metadata={\"help\": \"Activate nested quantization for 4bit base models\"},\n",
58
    "    )\n",
59
    "    bnb_4bit_compute_dtype: str = field(\n",
60
    "        default=\"float16\",\n",
61
    "        metadata={\"help\": \"Compute dtype for 4bit base models\"},\n",
62
    "    )\n",
63
    "    bnb_4bit_quant_type: str = field(\n",
64
    "        default=\"nf4\",\n",
65
    "        metadata={\"help\": \"Quantization type: fp4 or nf4\"},\n",
66
    "    )\n",
67
    "    lora_alpha: int = field(default=16)\n",
68
    "    lora_dropout: float = field(default=0.1)\n",
69
    "    lora_r: int = field(default=64)\n",
70
    "\n",
71
    "@dataclass\n",
72
    "class ScriptArguments:\n",
73
    "    \"\"\"\n",
74
    "    Arguments for model training and data handling.\n",
75
    "    \"\"\"\n",
76
    "    local_rank: int = field(default=-1, metadata={\"help\": \"Used for multi-gpu\"})\n",
77
    "    per_device_train_batch_size: int = field(default=4)\n",
78
    "    per_device_eval_batch_size: Optional[int] = field(default=1)\n",
79
    "    gradient_accumulation_steps: Optional[int] = field(default=4)\n",
80
    "    learning_rate: Optional[float] = field(default=2e-4)\n",
81
    "    max_grad_norm: Optional[float] = field(default=0.3)\n",
82
    "    weight_decay: Optional[int] = field(default=0.001)\n",
83
    "    max_seq_length: Optional[int] = field(default=512)\n",
84
    "    dataset_name: Optional[str] = field(\n",
85
    "        default=\"timdettmers/openassistant-guanaco\",\n",
86
    "        metadata={\"help\": \"The preference dataset to use.\"},\n",
87
    "    )\n",
88
    "    num_train_epochs: Optional[int] = field(\n",
89
    "        default=1,\n",
90
    "        metadata={\"help\": \"The number of training epochs for the reward model.\"},\n",
91
    "    )\n",
92
    "    fp16: Optional[bool] = field(\n",
93
    "        default=False,\n",
94
    "        metadata={\"help\": \"Enables fp16 training.\"},\n",
95
    "    )\n",
96
    "    bf16: Optional[bool] = field(\n",
97
    "        default=False,\n",
98
    "        metadata={\"help\": \"Enables bf16 training.\"},\n",
99
    "    )\n",
100
    "    packing: Optional[bool] = field(\n",
101
    "        default=False,\n",
102
    "        metadata={\"help\": \"Use packing dataset creating.\"},\n",
103
    "    )\n",
104
    "    gradient_checkpointing: Optional[bool] = field(\n",
105
    "        default=True,\n",
106
    "        metadata={\"help\": \"Enables gradient checkpointing.\"},\n",
107
    "    )\n",
108
    "    optim: Optional[str] = field(\n",
109
    "        default=\"paged_adamw_32bit\",\n",
110
    "        metadata={\"help\": \"The optimizer to use.\"},\n",
111
    "    )\n",
112
    "    lr_scheduler_type: str = field(\n",
113
    "        default=\"constant\",\n",
114
    "        metadata={\"help\": \"Learning rate schedule. Constant a bit better than cosine, and has advantage for analysis\"},\n",
115
    "    )\n",
116
    "    max_steps: int = field(default=10000, metadata={\"help\": \"How many optimizer update steps to take\"})\n",
117
    "    warmup_ratio: float = field(default=0.03, metadata={\"help\": \"Fraction of steps to do a warmup for\"})\n",
118
    "    group_by_length: bool = field(\n",
119
    "        default=True,\n",
120
    "        metadata={\n",
121
    "            \"help\": \"Group sequences into batches with same length. Saves memory and speeds up training considerably.\"\n",
122
    "        },\n",
123
    "    )\n",
124
    "    save_steps: int = field(default=10, metadata={\"help\": \"Save checkpoint every X updates steps.\"})\n",
125
    "    logging_steps: int = field(default=10, metadata={\"help\": \"Log every X updates steps.\"})"
126
   ]
127
  },
128
  {
129
   "cell_type": "code",
130
   "execution_count": null,
131
   "metadata": {},
132
   "outputs": [],
133
   "source": [
134
    "def get_model_peftconfig_tokenizer(args: ModelArguments):\n",
135
    "    \"\"\"\n",
136
    "    Create the model, tokenizer, and peft_config based on provided arguments.\n",
137
    "    \"\"\"\n",
138
    "    compute_dtype = getattr(torch, args.bnb_4bit_compute_dtype)\n",
139
    "\n",
140
    "    # Configure BitsAndBytes for model quantization\n",
141
    "    bnb_config = BitsAndBytesConfig(\n",
142
    "        load_in_4bit=args.use_4bit,\n",
143
    "        bnb_4bit_quant_type=args.bnb_4bit_quant_type,\n",
144
    "        bnb_4bit_compute_dtype=compute_dtype,\n",
145
    "        bnb_4bit_use_double_quant=args.use_nested_quant,\n",
146
    "    )\n",
147
    "\n",
148
    "    # Alert for bfloat16 acceleration support\n",
149
    "    if compute_dtype == torch.float16 and args.use_4bit:\n",
150
    "        major, _ = torch.cuda.get_device_capability()\n",
151
    "        if major >= 8:\n",
152
    "            print(\"=\" * 80)\n",
153
    "            print(\"Your GPU supports bfloat16, you can accelerate training with --bf16\")\n",
154
    "            print(\"=\" * 80)\n",
155
    "\n",
156
    "    # Load the model with quantization configuration\n",
157
    "    model = AutoModelForCausalLM.from_pretrained(\n",
158
    "        args.model_name, quantization_config=bnb_config, device_map={\"\": 0}, trust_remote_code=True\n",
159
    "    )\n",
160
    "\n",
161
    "    # Define Lora Configuration\n",
162
    "    peft_config = LoraConfig(\n",
163
    "        lora_alpha=args.lora_alpha,\n",
164
    "        lora_dropout=args.lora_dropout,\n",
165
    "        r=args.lora_r,\n",
166
    "        bias=\"none\",\n",
167
    "        task_type=\"CAUSAL_LM\",\n",
168
    "        target_modules=[\n",
169
    "            \"query_key_value\",\n",
170
    "            \"dense\",\n",
171
    "            \"dense_h_to_4h\",\n",
172
    "            \"dense_4h_to_h\",\n",
173
    "        ],\n",
174
    "    )\n",
175
    "\n",
176
    "    # Load the tokenizer and set padding token\n",
177
    "    tokenizer = AutoTokenizer.from_pretrained(args.model_name, trust_remote_code=True)\n",
178
    "\n",
179
    "    # Need to do below for models like Falcon-7B, GPT-2 etc,\n",
180
    "    # because it doesn't have an official pad token.\n",
181
    "    tokenizer.pad_token = tokenizer.eos_token\n",
182
    "\n",
183
    "    return model, peft_config, tokenizer"
184
   ]
185
  },
186
  {
187
   "cell_type": "code",
188
   "execution_count": null,
189
   "metadata": {},
190
   "outputs": [],
191
   "source": [
192
    "def parse_arguments():\n",
193
    "    \"\"\"\n",
194
    "    Parse Model and Script Arguments.\n",
195
    "    Returns:\n",
196
    "        ModelArguments, ScriptArguments\n",
197
    "    \"\"\"\n",
198
    "    parser = HfArgumentParser((ModelArguments, ScriptArguments))\n",
199
    "    return parser.parse_args_into_dataclasses()\n",
200
    "\n",
201
    "def load_training_data(dataset_name: str):\n",
202
    "    \"\"\"\n",
203
    "    Load dataset for training.\n",
204
    "    Args:\n",
205
    "        dataset_name (str): Name or path of the dataset.\n",
206
    "    Returns:\n",
207
    "        Dataset object\n",
208
    "    \"\"\"\n",
209
    "    return load_dataset(dataset_name, split=\"train\")\n",
210
    "\n",
211
    "def get_training_args(script_args: ScriptArguments):\n",
212
    "    \"\"\"\n",
213
    "    Get Training Arguments from ScriptArguments.\n",
214
    "    Args:\n",
215
    "        script_args (ScriptArguments): Parsed ScriptArguments.\n",
216
    "    Returns:\n",
217
    "        TrainingArguments\n",
218
    "    \"\"\"\n",
219
    "    return TrainingArguments(\n",
220
    "        output_dir=\"./results\",\n",
221
    "        per_device_train_batch_size = script_args.per_device_train_batch_size,\n",
222
    "        gradient_accumulation_steps=script_args.gradient_accumulation_steps,\n",
223
    "        optim=script_args.optim,\n",
224
    "        save_steps=script_args.save_steps,\n",
225
    "        logging_steps=script_args.logging_steps,\n",
226
    "        learning_rate=script_args.learning_rate,\n",
227
    "        fp16=script_args.fp16,\n",
228
    "        bf16=script_args.bf16,\n",
229
    "        max_grad_norm=script_args.max_grad_norm,\n",
230
    "        max_steps=script_args.max_steps,\n",
231
    "        warmup_ratio=script_args.warmup_ratio,\n",
232
    "        group_by_length=script_args.group_by_length,\n",
233
    "        lr_scheduler_type=script_args.lr_scheduler_type,\n",
234
    "    )\n",
235
    "\n",
236
    "def adjust_model_for_bf16(trainer, bf16: bool):\n",
237
    "    \"\"\"\n",
238
    "    Adjust Model Layers for bf16.\n",
239
    "    Args:\n",
240
    "        trainer (SFTTrainer): Initialized SFTTrainer object.\n",
241
    "        bf16 (bool): Flag to indicate usage of bf16.\n",
242
    "    \"\"\"\n",
243
    "    for name, module in trainer.model.named_modules():\n",
244
    "        if isinstance(module, LoraLayer) and bf16:\n",
245
    "            module = module.to(torch.bfloat16)\n",
246
    "        if \"norm\" in name:\n",
247
    "            module = module.to(torch.float32)\n",
248
    "        if \"lm_head\" in name or \"embed_tokens\" in name:\n",
249
    "            if hasattr(module, \"weight\") and bf16 and module.weight.dtype == torch.float32:\n",
250
    "                module = module.to(torch.bfloat16)\n",
251
    "\n",
252
    "# Main Execution:\n",
253
    "\n",
254
    "model_args, script_args = parse_arguments()\n",
255
    "\n",
256
    "model, peft_config, tokenizer = get_model_peftconfig_tokenizer(model_args)\n",
257
    "model.config.use_cache = False\n",
258
    "\n",
259
    "dataset = load_training_data(script_args.dataset_name)\n",
260
    "\n",
261
    "training_arguments = get_training_args(script_args)\n",
262
    "\n",
263
    "trainer = SFTTrainer(\n",
264
    "    model=model,\n",
265
    "    train_dataset=dataset,\n",
266
    "    peft_config=peft_config,\n",
267
    "    dataset_text_field=\"text\",\n",
268
    "    max_seq_length=script_args.max_seq_length,\n",
269
    "    tokenizer=tokenizer,\n",
270
    "    args=training_arguments,\n",
271
    "    packing=script_args.packing,\n",
272
    ")\n",
273
    "\n",
274
    "adjust_model_for_bf16(trainer, script_args.bf16)\n",
275
    "\n",
276
    "# Train the Model\n",
277
    "trainer.train()"
278
   ]
279
  }
280
 ],
281
 "metadata": {
282
  "kernelspec": {
283
   "display_name": "py10env",
284
   "language": "python",
285
   "name": "python3"
286
  },
287
  "language_info": {
288
   "name": "python",
289
   "version": "3.10.13"
290
  }
291
 },
292
 "nbformat": 4,
293
 "nbformat_minor": 2
294
}
295

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.