LLM-FineTuning-Large-Language-Models

Форк
0
/
Finetune_codellama-34B-with-QLoRA.ipynb 
246 строк · 11.1 Кб
1
{
2
 "cells": [
3
  {
4
   "cell_type": "markdown",
5
   "metadata": {},
6
   "source": [
7
    "## Finetune codellama-34B with QLoRA\n",
8
    "\n",
9
    "### Checkout my [Twitter(@rohanpaul_ai)](https://twitter.com/rohanpaul_ai) for daily LLM bits"
10
   ]
11
  },
12
  {
13
   "cell_type": "code",
14
   "execution_count": null,
15
   "metadata": {},
16
   "outputs": [],
17
   "source": [
18
    "import os\n",
19
    "import torch\n",
20
    "from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, BitsAndBytesConfig\n",
21
    "from datasets import load_dataset\n",
22
    "from trl import SFTTrainer\n",
23
    "from peft import AutoPeftModelForCausalLM, LoraConfig, get_peft_model, prepare_model_for_kbit_training\n",
24
    "import bitsandbytes as bnb\n",
25
    "\n",
26
    "def find_all_linear_names(model):\n",
27
    "    cls = bnb.nn.Linear4bit\n",
28
    "    lora_module_names = set()\n",
29
    "    for name, module in model.named_modules():\n",
30
    "        if isinstance(module, cls):\n",
31
    "            names = name.split('.')\n",
32
    "            lora_module_names.add(names[0] if len(names) == 1 else names[-1])\n",
33
    "\n",
34
    "    return list(lora_module_names)\n",
35
    "\n",
36
    "\n",
37
    "def print_trainable_parameters(model):\n",
38
    "  \"\"\"\n",
39
    "  Prints the number of trainable parameters in the model.\n",
40
    "  \"\"\"\n",
41
    "  trainable_params = 0\n",
42
    "  all_param = 0\n",
43
    "  for _, param in model.named_parameters():\n",
44
    "    all_param += param.numel()\n",
45
    "    if param.requires_grad:\n",
46
    "      trainable_params += param.numel()\n",
47
    "  print(\n",
48
    "      f\"trainable params: {trainable_params} || all params: {all_param} || trainables%: {100 * trainable_params / all_param}\"\n",
49
    "  )\n",
50
    "\n",
51
    "def setup_environment():\n",
52
    "    \"\"\" Sets up necessary imports and configurations. \"\"\"\n",
53
    "    output_dir = \"./results\"\n",
54
    "    model_name = \"codellama/CodeLlama-34b-hf\"\n",
55
    "    return output_dir, model_name\n",
56
    "\n",
57
    "def load_and_prepare_dataset():\n",
58
    "    \"\"\" Loads the dataset and prepares it for training. \"\"\"\n",
59
    "    return load_dataset('timdettmers/openassistant-guanaco', split=\"train\")\n",
60
    "\n",
61
    "def initialize_tokenizer(model_name):\n",
62
    "    \"\"\" Initializes and configures the tokenizer. \"\"\"\n",
63
    "    tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
64
    "    tokenizer.pad_token = tokenizer.eos_token\n",
65
    "    tokenizer.padding_side = \"right\"\n",
66
    "    return tokenizer\n",
67
    "\n",
68
    "def create_base_model(model_name):\n",
69
    "    \"\"\" Creates and configures the base model with low-bit quantization. \"\"\"\n",
70
    "    bnb_config = BitsAndBytesConfig(\n",
71
    "        load_in_4bit=True,\n",
72
    "        bnb_4bit_quant_type=\"nf4\",\n",
73
    "        bnb_4bit_compute_dtype=torch.bfloat16,\n",
74
    "        bnb_4bit_use_double_quant=True,\n",
75
    "    )\n",
76
    "    base_model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, quantization_config=bnb_config)\n",
77
    "    base_model.config.use_cache = False\n",
78
    "    return prepare_model_for_kbit_training(base_model)\n",
79
    "\n",
80
    "def apply_peft_to_model(base_model):\n",
81
    "    \"\"\" Applies prompt engineering for fine-tuning (PeFT) using LoRA to the base model. \"\"\"\n",
82
    "    peft_config = LoraConfig(\n",
83
    "        r=32,\n",
84
    "        lora_alpha=16,\n",
85
    "        target_modules=find_all_linear_names(base_model),\n",
86
    "        lora_dropout=0.05,\n",
87
    "        bias=\"none\",\n",
88
    "        task_type=\"CAUSAL_LM\",\n",
89
    "    )\n",
90
    "    return get_peft_model(base_model, peft_config)\n",
91
    "\n",
92
    "def setup_training(base_model, dataset, tokenizer):\n",
93
    "    \"\"\" Configures training arguments and initializes the trainer. \"\"\"\n",
94
    "    training_args = TrainingArguments(\n",
95
    "        per_device_train_batch_size=1,\n",
96
    "        gradient_accumulation_steps=1,\n",
97
    "        gradient_checkpointing=True,\n",
98
    "        max_grad_norm=0.3,\n",
99
    "        num_train_epochs=3,\n",
100
    "        learning_rate=1e-4,\n",
101
    "        bf16=True,\n",
102
    "        save_total_limit=3,\n",
103
    "        logging_steps=300,\n",
104
    "        output_dir=output_dir,\n",
105
    "        optim=\"paged_adamw_32bit\",\n",
106
    "        lr_scheduler_type=\"constant\",\n",
107
    "        warmup_ratio=0.05,\n",
108
    "    )\n",
109
    "    return SFTTrainer(\n",
110
    "        base_model,\n",
111
    "        train_dataset=dataset,\n",
112
    "        dataset_text_field=\"text\",\n",
113
    "        tokenizer=tokenizer,\n",
114
    "        max_seq_length=512,\n",
115
    "        args=training_args\n",
116
    "    )\n",
117
    "\n",
118
    "def train_and_save_model(trainer, output_dir):\n",
119
    "    \"\"\" Handles the training process and saves the model. \"\"\"\n",
120
    "    trainer.train()\n",
121
    "    trainer.save_model(output_dir)\n",
122
    "    final_output_dir = os.path.join(output_dir, \"final_checkpoint\")\n",
123
    "    trainer.model.save_pretrained(final_output_dir)\n",
124
    "    tokenizer.save_pretrained(final_output_dir)\n",
125
    "\n"
126
   ]
127
  },
128
  {
129
   "cell_type": "code",
130
   "execution_count": null,
131
   "metadata": {},
132
   "outputs": [],
133
   "source": [
134
    "output_dir, model_name = setup_environment()\n",
135
    "\n",
136
    "dataset = load_and_prepare_dataset()\n",
137
    "\n",
138
    "tokenizer = initialize_tokenizer(model_name)\n",
139
    "\n",
140
    "base_model = create_base_model(model_name)\n",
141
    "\n",
142
    "base_model = apply_peft_to_model(base_model)\n",
143
    "\n",
144
    "print_trainable_parameters(base_model)\n",
145
    "\n",
146
    "trainer = setup_training(base_model, dataset, tokenizer)\n",
147
    "\n",
148
    "train_and_save_model(trainer, output_dir)"
149
   ]
150
  },
151
  {
152
   "cell_type": "markdown",
153
   "metadata": {},
154
   "source": [
155
    "------------------\n",
156
    "\n",
157
    "### Explanations of the Key terms of the BitsAndBytesConfig\n",
158
    "\n",
159
    "\n",
160
    "```py\n",
161
    "bnb_config = BitsAndBytesConfig(\n",
162
    "        load_in_4bit=True,\n",
163
    "        bnb_4bit_quant_type=\"nf4\",\n",
164
    "        bnb_4bit_compute_dtype=torch.bfloat16,\n",
165
    "        bnb_4bit_use_double_quant=True,\n",
166
    "    )\n",
167
    "\n",
168
    "```\n",
169
    "\n",
170
    "👉 **`load_in_4bit` parameter** is for loading the model in 4 bits precision\n",
171
    "\n",
172
    "This means that the weights and activations of the model are represented using 4 bits instead of the usual 32 bits. This can significantly reduce the memory footprint of the model. 4-bit precision models can use up to 16x less memory than full precision models and can be up to 2x faster than full precision models.\n",
173
    "\n",
174
    "However, if you need the highest possible accuracy, then you may want to use full precision models.\n",
175
    "\n",
176
    "--------------\n",
177
    "\n",
178
    "👉 `bnb_4bit_use_double_quant=True` : This parameter enables double quantization or also called nested quantization, which applies a second quantization after the initial one. It saves an additional 0.4 bits per parameter.\n",
179
    "\n",
180
    "--------------\n",
181
    "\n",
182
    "👉 `use_nested_quant`: A flag will be applied to determine if nested (or double) quantization.\n",
183
    "\n",
184
    "--------------\n",
185
    "\n",
186
    "👉 `bnb_4bit_quant_type=\"nf4\"` : This parameter specifies the type of 4-bit quantization to be used. In this case, \"nf4\" refers to normalized float 4, which is the default quantization type.\n",
187
    "\n",
188
    "--------------\n",
189
    "\n",
190
    "👉 `bnb_4bit_compute_dtype=torch.bfloat16` : This parameter determines the compute data type used during the computation. It specifies the use of the bfloat16 data type for faster training. The compute data type can be chosen from options like float16, bfloat16, float32, etc.\n",
191
    "\n",
192
    "This configuration is needed because, while 4-bit bitsandbytes stores weights in 4-bits, the computation still happens in 16 or 32-bit and here any combination can be chosen (float16, bfloat16, float32 etc).\n",
193
    "\n",
194
    "The matrix multiplication and training will be faster if one uses a 16-bit compute dtype (and actually the default value for this parameter is torch.float32).\n",
195
    "\n",
196
    "--------------\n",
197
    "\n",
198
    "Does Floating Point 4-bit precision quantization have any hardware requirements?\n",
199
    "\n",
200
    "Note that this method is only compatible with GPUs, hence it is not possible to quantize models in 4bit on a CPU. Among GPUs, there should not be any hardware requirement about this method, therefore any GPU could be used to run the 4bit quantization as long as you have CUDA>=11.2 installed. Keep also in mind that the computation is not done in 4bit, the weights and activations are compressed to that format and the computation is still kept in the desired or native dtype.\n",
201
    "\n",
202
    "=====================\n",
203
    "\n",
204
    "FP8 and FP4 stand for Floating Point 8-bit and 4-bit precision, respectively. They are part of the minifloats family of floating point values (among other precisions, the minifloats family also includes bfloat16 and float16).\n",
205
    "\n",
206
    "----------------"
207
   ]
208
  },
209
  {
210
   "cell_type": "markdown",
211
   "metadata": {},
212
   "source": [
213
    "### Further possibilities for improving / re-organizing the code\n",
214
    "\n",
215
    "### Performance and Memory Optimization\n",
216
    "\n",
217
    "1. **Batch Size and Gradient Accumulation**: we're using a batch size of 1 with gradient accumulation. If hardware permits, increasing the batch size can improve training efficiency. Balancing between batch size and gradient accumulation steps is key for optimal GPU utilization.\n",
218
    "\n",
219
    "3. **Model Parallelism**: If we're working with very large models and have access to multiple GPUs, implementing model parallelism can be beneficial. This involves splitting the model across different GPUs.\n",
220
    "\n",
221
    "4. **Data Loading Optimization**: Optimizing data loading can have a significant impact on training speed. Consider using techniques like prefetching, multi-threaded data loading, and ensuring your dataset is stored in a fast-access storage medium.\n",
222
    "\n",
223
    "### Hyperparameter Tuning\n",
224
    "\n",
225
    "1. **Learning Rate Scheduler**: we're using a constant learning rate. Experimenting with different learning rate schedules like linear decay or cyclical learning rates might yield better results.\n",
226
    "\n",
227
    "2. **Optimizer Tweaks**: While we are using `paged_adamw_32bit`, exploring other optimizers like `AdamW` or `SGD` with momentum could offer different performance characteristics.\n",
228
    "\n",
229
    "### Advanced Techniques\n",
230
    "\n",
231
    "1. **Regularization Techniques**: Implementing regularization methods like dropout, weight decay, or more advanced techniques like data augmentation (if applicable to your task) can prevent overfitting.\n",
232
    "\n",
233
    "3. **Evaluation Strategy**: Ensure a robust evaluation strategy is in place, including validation during training and possibly more nuanced evaluation metrics tailored to your specific application.\n",
234
    "\n",
235
    "4. **Experiment Tracking**: If not already in place, integrating an experiment tracking system like Weights & Biases or TensorBoard can be very helpful for monitoring training progress and comparing different training runs."
236
   ]
237
  }
238
 ],
239
 "metadata": {
240
  "language_info": {
241
   "name": "python"
242
  }
243
 },
244
 "nbformat": 4,
245
 "nbformat_minor": 2
246
}
247

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.