oreilly-gpt-hands-on-nlg

Форк
0
/
Ingesting_New_Corpus.ipynb 
2735 строк · 83.3 Кб
1
{
2
 "cells": [
3
  {
4
   "cell_type": "markdown",
5
   "id": "eb799929",
6
   "metadata": {},
7
   "source": [
8
    "## GPT for style completion"
9
   ]
10
  },
11
  {
12
   "cell_type": "code",
13
   "execution_count": 5,
14
   "id": "65cd4e7d",
15
   "metadata": {},
16
   "outputs": [],
17
   "source": [
18
    "from transformers import GPT2Tokenizer, TextDataset, DataCollatorForLanguageModeling, GPT2LMHeadModel, pipeline, \\\n",
19
    "                         Trainer, TrainingArguments\n"
20
   ]
21
  },
22
  {
23
   "cell_type": "code",
24
   "execution_count": 2,
25
   "id": "09b74db5",
26
   "metadata": {
27
    "scrolled": true
28
   },
29
   "outputs": [],
30
   "source": [
31
    "tokenizer = GPT2Tokenizer.from_pretrained('gpt2')  # load up a standard gpt2 model\n",
32
    "\n",
33
    "tokenizer.pad_token = tokenizer.eos_token  \n",
34
    "# set our pad token to be the eos token. This lets gpt know how to fill space\n"
35
   ]
36
  },
37
  {
38
   "cell_type": "code",
39
   "execution_count": 3,
40
   "id": "47ea384e",
41
   "metadata": {},
42
   "outputs": [
43
    {
44
     "name": "stderr",
45
     "output_type": "stream",
46
     "text": [
47
      "/Users/sinanozdemir/opt/anaconda3/lib/python3.9/site-packages/transformers/data/datasets/language_modeling.py:54: FutureWarning: This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets library. You can have a look at this example script for pointers: https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py\n",
48
      "  warnings.warn(\n"
49
     ]
50
    }
51
   ],
52
   "source": [
53
    "# load up our data into a dataset\n",
54
    "pds_data = TextDataset(\n",
55
    "    tokenizer=tokenizer,\n",
56
    "    file_path='../data/PDS2.txt',  # Principles of Data Science - Sinan Ozdemir\n",
57
    "    block_size=64  # length of each chunk of text to use as a datapoint\n",
58
    ")"
59
   ]
60
  },
61
  {
62
   "cell_type": "code",
63
   "execution_count": 4,
64
   "id": "fe52e42c",
65
   "metadata": {},
66
   "outputs": [
67
    {
68
     "data": {
69
      "text/plain": [
70
       "(tensor([  200, 47231,  6418,   286,  6060,  5800,   198, 12211,  5061,   198,\n",
71
       "           198,    32, 31516,   338,  5698,   284, 13905,  7605,   290,  4583,\n",
72
       "           284,   198, 11249,   304,   171,   105,   222, 13967,  1366,    12,\n",
73
       "         15808,  5479,   198,   198, 46200,   272, 18024,  9536,   343,   198,\n",
74
       "         16012,   346, 31250,   671,   198,   198,  3483, 29138,  2751, 33363,\n",
75
       "           532,   337,  5883,  4339,    40,   628,   200, 47231,  6418,   286,\n",
76
       "          6060,  5800,   198, 12211]),\n",
77
       " torch.Size([64]))"
78
      ]
79
     },
80
     "execution_count": 4,
81
     "metadata": {},
82
     "output_type": "execute_result"
83
    }
84
   ],
85
   "source": [
86
    "pds_data[0], pds_data[0].shape  # inspect the first point"
87
   ]
88
  },
89
  {
90
   "cell_type": "code",
91
   "execution_count": 255,
92
   "id": "32b488fa",
93
   "metadata": {},
94
   "outputs": [
95
    {
96
     "name": "stdout",
97
     "output_type": "stream",
98
     "text": [
99
      "\f",
100
      "Principles of Data Science\n",
101
      "Second Edition\n",
102
      "\n",
103
      "A beginner's guide to statistical techniques and theory to\n",
104
      "build effective data-driven applications\n",
105
      "\n",
106
      "Sinan Ozdemir\n",
107
      "Sunil Kakade\n",
108
      "\n",
109
      "BIRMINGHAM - MUMBAI\n",
110
      "\n",
111
      "\f",
112
      "Principles of Data Science\n",
113
      "Second\n"
114
     ]
115
    }
116
   ],
117
   "source": [
118
    "print(tokenizer.decode(pds_data[0]))"
119
   ]
120
  },
121
  {
122
   "cell_type": "code",
123
   "execution_count": 6,
124
   "id": "6914d0d4",
125
   "metadata": {},
126
   "outputs": [],
127
   "source": [
128
    "data_collator = DataCollatorForLanguageModeling(\n",
129
    "    tokenizer=tokenizer, mlm=False,  \n",
130
    "    # MLM is Masked Language Modelling (for BERT + auto-encoding tasks)\n",
131
    ")"
132
   ]
133
  },
134
  {
135
   "cell_type": "code",
136
   "execution_count": 7,
137
   "id": "083de66d",
138
   "metadata": {},
139
   "outputs": [
140
    {
141
     "data": {
142
      "text/plain": [
143
       "{'input_ids': tensor([[   40,   716,   281,  5128],\n",
144
       "        [ 2396,   716,   314, 50256]]), 'attention_mask': tensor([[1, 1, 1, 1],\n",
145
       "        [1, 1, 1, 0]]), 'labels': tensor([[  40,  716,  281, 5128],\n",
146
       "        [2396,  716,  314, -100]])}"
147
      ]
148
     },
149
     "execution_count": 7,
150
     "metadata": {},
151
     "output_type": "execute_result"
152
    }
153
   ],
154
   "source": [
155
    "# example of how collator pads data dynamically\n",
156
    "collator_example = data_collator([tokenizer('I am an input'), tokenizer('So am I')])\n",
157
    "\n",
158
    "collator_example"
159
   ]
160
  },
161
  {
162
   "cell_type": "code",
163
   "execution_count": 8,
164
   "id": "395848aa",
165
   "metadata": {},
166
   "outputs": [
167
    {
168
     "data": {
169
      "text/plain": [
170
       "tensor([[   40,   716,   281,  5128],\n",
171
       "        [ 2396,   716,   314, 50256]])"
172
      ]
173
     },
174
     "execution_count": 8,
175
     "metadata": {},
176
     "output_type": "execute_result"
177
    }
178
   ],
179
   "source": [
180
    "collator_example.input_ids  # 50256 is our pad token id"
181
   ]
182
  },
183
  {
184
   "cell_type": "code",
185
   "execution_count": 9,
186
   "id": "cb23c58b",
187
   "metadata": {},
188
   "outputs": [
189
    {
190
     "data": {
191
      "text/plain": [
192
       "50256"
193
      ]
194
     },
195
     "execution_count": 9,
196
     "metadata": {},
197
     "output_type": "execute_result"
198
    }
199
   ],
200
   "source": [
201
    "tokenizer.pad_token_id"
202
   ]
203
  },
204
  {
205
   "cell_type": "code",
206
   "execution_count": 10,
207
   "id": "b6f0c2ea",
208
   "metadata": {},
209
   "outputs": [
210
    {
211
     "data": {
212
      "text/plain": [
213
       "tensor([[1, 1, 1, 1],\n",
214
       "        [1, 1, 1, 0]])"
215
      ]
216
     },
217
     "execution_count": 10,
218
     "metadata": {},
219
     "output_type": "execute_result"
220
    }
221
   ],
222
   "source": [
223
    "collator_example.attention_mask  # Note the 0 in the attention mask where we have a pad token"
224
   ]
225
  },
226
  {
227
   "cell_type": "code",
228
   "execution_count": 11,
229
   "id": "59808749",
230
   "metadata": {},
231
   "outputs": [
232
    {
233
     "data": {
234
      "text/plain": [
235
       "tensor([[  40,  716,  281, 5128],\n",
236
       "        [2396,  716,  314, -100]])"
237
      ]
238
     },
239
     "execution_count": 11,
240
     "metadata": {},
241
     "output_type": "execute_result"
242
    }
243
   ],
244
   "source": [
245
    "collator_example.labels  # note the -100 to ignore loss calculation for the padded token\n",
246
    "# Labels are shifted inside the GPT model so we don't need to worry about that"
247
   ]
248
  },
249
  {
250
   "cell_type": "code",
251
   "execution_count": null,
252
   "id": "7b9701db",
253
   "metadata": {},
254
   "outputs": [],
255
   "source": []
256
  },
257
  {
258
   "cell_type": "code",
259
   "execution_count": 12,
260
   "id": "f65c372b",
261
   "metadata": {
262
    "scrolled": true
263
   },
264
   "outputs": [],
265
   "source": [
266
    "model = GPT2LMHeadModel.from_pretrained('gpt2')  # load up a GPT2 model\n",
267
    "\n",
268
    "pretrained_generator = pipeline(  # create a generator with built in params\n",
269
    "    'text-generation', model=model, tokenizer='gpt2',\n",
270
    "    config={'max_length': 200, 'do_sample': True, 'top_p': 0.9, 'temperature': 0.7, 'top_k': 10}\n",
271
    ")"
272
   ]
273
  },
274
  {
275
   "cell_type": "code",
276
   "execution_count": 187,
277
   "id": "18101335",
278
   "metadata": {},
279
   "outputs": [
280
    {
281
     "name": "stderr",
282
     "output_type": "stream",
283
     "text": [
284
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
285
     ]
286
    },
287
    {
288
     "name": "stdout",
289
     "output_type": "stream",
290
     "text": [
291
      "----------\n",
292
      "This dataset shows the relationship between the number of years that a student has left the room.\n",
293
      "In this blog post, we will look at how to use this data to create user profiles for other departments\n",
294
      "based on their use of KPI.\n",
295
      "\n",
296
      "----------\n",
297
      "This dataset shows the relationship between\n",
298
      "weighting and each value for the model\n",
299
      "Let's look at some basic relationships to predict the probability of a certain type of event:\n",
300
      "data['id'] = z_test.mean()\n",
301
      "# predict a\n",
302
      "----------\n",
303
      "This dataset shows the relationship between gender (as shown by the bar chart) and each major quantitative measure of medical attention\n",
304
      "about the population:\n",
305
      "\n",
306
      "[ 463 ]\n",
307
      "\n",
308
      "\f",
309
      "Basic Statistics\n",
310
      "\n",
311
      "Chapter 18\n",
312
      "\n",
313
      "Now let's look at statistics\n",
314
      "----------\n"
315
     ]
316
    }
317
   ],
318
   "source": [
319
    "print('----------')\n",
320
    "for generated_sequence in pretrained_generator('This dataset shows the relationship', num_return_sequences=3):\n",
321
    "    print(generated_sequence['generated_text'])\n",
322
    "    print('----------')"
323
   ]
324
  },
325
  {
326
   "cell_type": "code",
327
   "execution_count": 14,
328
   "id": "0f775e4c",
329
   "metadata": {},
330
   "outputs": [
331
    {
332
     "name": "stderr",
333
     "output_type": "stream",
334
     "text": [
335
      "***** Running Evaluation *****\n",
336
      "  Num examples = 470\n",
337
      "  Batch size = 32\n"
338
     ]
339
    },
340
    {
341
     "data": {
342
      "text/html": [
343
       "\n",
344
       "    <div>\n",
345
       "      \n",
346
       "      <progress value='30' max='15' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
347
       "      [15/15 47:47]\n",
348
       "    </div>\n",
349
       "    "
350
      ],
351
      "text/plain": [
352
       "<IPython.core.display.HTML object>"
353
      ]
354
     },
355
     "metadata": {},
356
     "output_type": "display_data"
357
    },
358
    {
359
     "data": {
360
      "text/plain": [
361
       "{'eval_loss': 4.5039801597595215,\n",
362
       " 'eval_runtime': 194.2009,\n",
363
       " 'eval_samples_per_second': 2.42,\n",
364
       " 'eval_steps_per_second': 0.077}"
365
      ]
366
     },
367
     "execution_count": 14,
368
     "metadata": {},
369
     "output_type": "execute_result"
370
    }
371
   ],
372
   "source": [
373
    "training_args = TrainingArguments(\n",
374
    "    output_dir=\"./gpt2_pds\", #The output directory\n",
375
    "    overwrite_output_dir=True, #overwrite the content of the output directory\n",
376
    "    num_train_epochs=3, # number of training epochs\n",
377
    "    per_device_train_batch_size=32, # batch size for training\n",
378
    "    per_device_eval_batch_size=32,  # batch size for evaluation\n",
379
    "    logging_steps=10,\n",
380
    "    load_best_model_at_end=True,\n",
381
    "    evaluation_strategy='epoch',\n",
382
    "    save_strategy='epoch'\n",
383
    ")\n",
384
    "\n",
385
    "trainer = Trainer(\n",
386
    "    model=model,\n",
387
    "    args=training_args,\n",
388
    "    data_collator=data_collator,\n",
389
    "    train_dataset=pds_data.examples[:int(len(pds_data.examples)*.8)],\n",
390
    "    eval_dataset=pds_data.examples[int(len(pds_data.examples)*.8):]\n",
391
    ")\n",
392
    "\n",
393
    "trainer.evaluate()"
394
   ]
395
  },
396
  {
397
   "cell_type": "code",
398
   "execution_count": 15,
399
   "id": "5a280a0e",
400
   "metadata": {},
401
   "outputs": [
402
    {
403
     "name": "stderr",
404
     "output_type": "stream",
405
     "text": [
406
      "/Users/sinanozdemir/opt/anaconda3/lib/python3.9/site-packages/transformers/optimization.py:306: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
407
      "  warnings.warn(\n",
408
      "***** Running training *****\n",
409
      "  Num examples = 1878\n",
410
      "  Num Epochs = 3\n",
411
      "  Instantaneous batch size per device = 32\n",
412
      "  Total train batch size (w. parallel, distributed & accumulation) = 32\n",
413
      "  Gradient Accumulation steps = 1\n",
414
      "  Total optimization steps = 177\n"
415
     ]
416
    },
417
    {
418
     "data": {
419
      "text/html": [
420
       "\n",
421
       "    <div>\n",
422
       "      \n",
423
       "      <progress value='177' max='177' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
424
       "      [177/177 2:12:56, Epoch 3/3]\n",
425
       "    </div>\n",
426
       "    <table border=\"1\" class=\"dataframe\">\n",
427
       "  <thead>\n",
428
       " <tr style=\"text-align: left;\">\n",
429
       "      <th>Epoch</th>\n",
430
       "      <th>Training Loss</th>\n",
431
       "      <th>Validation Loss</th>\n",
432
       "    </tr>\n",
433
       "  </thead>\n",
434
       "  <tbody>\n",
435
       "    <tr>\n",
436
       "      <td>1</td>\n",
437
       "      <td>3.316500</td>\n",
438
       "      <td>3.481012</td>\n",
439
       "    </tr>\n",
440
       "    <tr>\n",
441
       "      <td>2</td>\n",
442
       "      <td>3.056100</td>\n",
443
       "      <td>3.446980</td>\n",
444
       "    </tr>\n",
445
       "    <tr>\n",
446
       "      <td>3</td>\n",
447
       "      <td>2.896000</td>\n",
448
       "      <td>3.451081</td>\n",
449
       "    </tr>\n",
450
       "  </tbody>\n",
451
       "</table><p>"
452
      ],
453
      "text/plain": [
454
       "<IPython.core.display.HTML object>"
455
      ]
456
     },
457
     "metadata": {},
458
     "output_type": "display_data"
459
    },
460
    {
461
     "name": "stderr",
462
     "output_type": "stream",
463
     "text": [
464
      "***** Running Evaluation *****\n",
465
      "  Num examples = 470\n",
466
      "  Batch size = 32\n",
467
      "Saving model checkpoint to ./gpt2_pds/checkpoint-59\n",
468
      "Configuration saved in ./gpt2_pds/checkpoint-59/config.json\n",
469
      "Model weights saved in ./gpt2_pds/checkpoint-59/pytorch_model.bin\n",
470
      "***** Running Evaluation *****\n",
471
      "  Num examples = 470\n",
472
      "  Batch size = 32\n",
473
      "Saving model checkpoint to ./gpt2_pds/checkpoint-118\n",
474
      "Configuration saved in ./gpt2_pds/checkpoint-118/config.json\n",
475
      "Model weights saved in ./gpt2_pds/checkpoint-118/pytorch_model.bin\n",
476
      "***** Running Evaluation *****\n",
477
      "  Num examples = 470\n",
478
      "  Batch size = 32\n",
479
      "Saving model checkpoint to ./gpt2_pds/checkpoint-177\n",
480
      "Configuration saved in ./gpt2_pds/checkpoint-177/config.json\n",
481
      "Model weights saved in ./gpt2_pds/checkpoint-177/pytorch_model.bin\n",
482
      "\n",
483
      "\n",
484
      "Training completed. Do not forget to share your model on huggingface.co/models =)\n",
485
      "\n",
486
      "\n",
487
      "Loading best model from ./gpt2_pds/checkpoint-118 (score: 3.4469802379608154).\n"
488
     ]
489
    },
490
    {
491
     "data": {
492
      "text/plain": [
493
       "TrainOutput(global_step=177, training_loss=3.169361955028469, metrics={'train_runtime': 8020.7923, 'train_samples_per_second': 0.702, 'train_steps_per_second': 0.022, 'total_flos': 184014913536000.0, 'train_loss': 3.169361955028469, 'epoch': 3.0})"
494
      ]
495
     },
496
     "execution_count": 15,
497
     "metadata": {},
498
     "output_type": "execute_result"
499
    }
500
   ],
501
   "source": [
502
    "trainer.train()"
503
   ]
504
  },
505
  {
506
   "cell_type": "code",
507
   "execution_count": 16,
508
   "id": "f477e12c",
509
   "metadata": {},
510
   "outputs": [
511
    {
512
     "name": "stderr",
513
     "output_type": "stream",
514
     "text": [
515
      "***** Running Evaluation *****\n",
516
      "  Num examples = 470\n",
517
      "  Batch size = 32\n"
518
     ]
519
    },
520
    {
521
     "data": {
522
      "text/html": [
523
       "\n",
524
       "    <div>\n",
525
       "      \n",
526
       "      <progress value='15' max='15' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
527
       "      [15/15 02:53]\n",
528
       "    </div>\n",
529
       "    "
530
      ],
531
      "text/plain": [
532
       "<IPython.core.display.HTML object>"
533
      ]
534
     },
535
     "metadata": {},
536
     "output_type": "display_data"
537
    },
538
    {
539
     "data": {
540
      "text/plain": [
541
       "{'eval_loss': 3.4469802379608154,\n",
542
       " 'eval_runtime': 186.6424,\n",
543
       " 'eval_samples_per_second': 2.518,\n",
544
       " 'eval_steps_per_second': 0.08,\n",
545
       " 'epoch': 3.0}"
546
      ]
547
     },
548
     "execution_count": 16,
549
     "metadata": {},
550
     "output_type": "execute_result"
551
    }
552
   ],
553
   "source": [
554
    "trainer.evaluate()  # loss decrease is slowing down so we are hitting our limit"
555
   ]
556
  },
557
  {
558
   "cell_type": "code",
559
   "execution_count": 17,
560
   "id": "b2f1abe6",
561
   "metadata": {},
562
   "outputs": [
563
    {
564
     "name": "stderr",
565
     "output_type": "stream",
566
     "text": [
567
      "Saving model checkpoint to ./gpt2_pds\n",
568
      "Configuration saved in ./gpt2_pds/config.json\n",
569
      "Model weights saved in ./gpt2_pds/pytorch_model.bin\n"
570
     ]
571
    }
572
   ],
573
   "source": [
574
    "trainer.save_model()"
575
   ]
576
  },
577
  {
578
   "cell_type": "code",
579
   "execution_count": 18,
580
   "id": "9079151c",
581
   "metadata": {
582
    "scrolled": true
583
   },
584
   "outputs": [
585
    {
586
     "name": "stderr",
587
     "output_type": "stream",
588
     "text": [
589
      "loading configuration file ./gpt2_pds/config.json\n",
590
      "Model config GPT2Config {\n",
591
      "  \"_name_or_path\": \"gpt2\",\n",
592
      "  \"activation_function\": \"gelu_new\",\n",
593
      "  \"architectures\": [\n",
594
      "    \"GPT2LMHeadModel\"\n",
595
      "  ],\n",
596
      "  \"attn_pdrop\": 0.1,\n",
597
      "  \"bos_token_id\": 50256,\n",
598
      "  \"do_sample\": true,\n",
599
      "  \"embd_pdrop\": 0.1,\n",
600
      "  \"eos_token_id\": 50256,\n",
601
      "  \"initializer_range\": 0.02,\n",
602
      "  \"layer_norm_epsilon\": 1e-05,\n",
603
      "  \"max_length\": 50,\n",
604
      "  \"model_type\": \"gpt2\",\n",
605
      "  \"n_ctx\": 1024,\n",
606
      "  \"n_embd\": 768,\n",
607
      "  \"n_head\": 12,\n",
608
      "  \"n_inner\": null,\n",
609
      "  \"n_layer\": 12,\n",
610
      "  \"n_positions\": 1024,\n",
611
      "  \"reorder_and_upcast_attn\": false,\n",
612
      "  \"resid_pdrop\": 0.1,\n",
613
      "  \"scale_attn_by_inverse_layer_idx\": false,\n",
614
      "  \"scale_attn_weights\": true,\n",
615
      "  \"summary_activation\": null,\n",
616
      "  \"summary_first_dropout\": 0.1,\n",
617
      "  \"summary_proj_to_labels\": true,\n",
618
      "  \"summary_type\": \"cls_index\",\n",
619
      "  \"summary_use_proj\": true,\n",
620
      "  \"task_specific_params\": {\n",
621
      "    \"text-generation\": {\n",
622
      "      \"do_sample\": true,\n",
623
      "      \"max_length\": 50\n",
624
      "    }\n",
625
      "  },\n",
626
      "  \"torch_dtype\": \"float32\",\n",
627
      "  \"transformers_version\": \"4.19.4\",\n",
628
      "  \"use_cache\": true,\n",
629
      "  \"vocab_size\": 50257\n",
630
      "}\n",
631
      "\n",
632
      "loading weights file ./gpt2_pds/pytorch_model.bin\n",
633
      "All model checkpoint weights were used when initializing GPT2LMHeadModel.\n",
634
      "\n",
635
      "All the weights of GPT2LMHeadModel were initialized from the model checkpoint at ./gpt2_pds.\n",
636
      "If your task is similar to the task the model of the checkpoint was trained on, you can already use GPT2LMHeadModel for predictions without further training.\n"
637
     ]
638
    }
639
   ],
640
   "source": [
641
    "loaded_model = GPT2LMHeadModel.from_pretrained('./gpt2_pds')\n",
642
    "\n",
643
    "finetuned_generator = pipeline(\n",
644
    "    'text-generation', model=loaded_model, tokenizer=tokenizer,\n",
645
    "    config={'max_length': 200, 'do_sample': True, 'top_p': 0.9, 'temperature': 0.7, 'top_k': 10}\n",
646
    ")"
647
   ]
648
  },
649
  {
650
   "cell_type": "code",
651
   "execution_count": 186,
652
   "id": "60aee57d",
653
   "metadata": {},
654
   "outputs": [
655
    {
656
     "name": "stderr",
657
     "output_type": "stream",
658
     "text": [
659
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
660
     ]
661
    },
662
    {
663
     "name": "stdout",
664
     "output_type": "stream",
665
     "text": [
666
      "----------\n",
667
      "This dataset shows the relationship between education and age.\n",
668
      "\n",
669
      "This data was obtained from\n",
670
      "pf2scs.kdf using the KdfTree method of classification, which is cross-validated with Python and the\n",
671
      "PdfFrame utility\n",
672
      "----------\n",
673
      "This dataset shows the relationship between COS and our sample distribution, we can easily see on the graph:\n",
674
      "This leads us to the next big thing: we can visualize variables as a single line graph. This graph has a cross-sectional length of\n",
675
      "----------\n",
676
      "This dataset shows the relationship between\n",
677
      "height and length of the data set:\n",
678
      "\n",
679
      "(b_height = height[1] == 0)\n",
680
      "So, that in our case is about 4200\n",
681
      "people in height, which means that\n",
682
      "I\n",
683
      "----------\n"
684
     ]
685
    }
686
   ],
687
   "source": [
688
    "# examples are now sustainably about data\n",
689
    "print('----------')\n",
690
    "for generated_sequence in finetuned_generator('This dataset shows the relationship', num_return_sequences=3):\n",
691
    "    print(generated_sequence['generated_text'])\n",
692
    "    print('----------')"
693
   ]
694
  },
695
  {
696
   "cell_type": "code",
697
   "execution_count": null,
698
   "id": "ae0a3279",
699
   "metadata": {},
700
   "outputs": [],
701
   "source": []
702
  },
703
  {
704
   "cell_type": "code",
705
   "execution_count": null,
706
   "id": "282bee9d",
707
   "metadata": {},
708
   "outputs": [],
709
   "source": []
710
  },
711
  {
712
   "cell_type": "markdown",
713
   "id": "e044664f",
714
   "metadata": {},
715
   "source": [
716
    "## GPT for code dictation"
717
   ]
718
  },
719
  {
720
   "cell_type": "code",
721
   "execution_count": 8,
722
   "id": "ebb28b49",
723
   "metadata": {},
724
   "outputs": [],
725
   "source": [
726
    "from transformers import GPT2Tokenizer, DataCollatorForLanguageModeling, TrainingArguments, Trainer, \\\n",
727
    "                         GPT2LMHeadModel, pipeline\n",
728
    "from datasets import Dataset\n",
729
    "import pandas as pd"
730
   ]
731
  },
732
  {
733
   "cell_type": "code",
734
   "execution_count": 9,
735
   "id": "cc4a8564",
736
   "metadata": {},
737
   "outputs": [
738
    {
739
     "name": "stdout",
740
     "output_type": "stream",
741
     "text": [
742
      "(50, 2)\n"
743
     ]
744
    },
745
    {
746
     "data": {
747
      "text/html": [
748
       "<div>\n",
749
       "<style scoped>\n",
750
       "    .dataframe tbody tr th:only-of-type {\n",
751
       "        vertical-align: middle;\n",
752
       "    }\n",
753
       "\n",
754
       "    .dataframe tbody tr th {\n",
755
       "        vertical-align: top;\n",
756
       "    }\n",
757
       "\n",
758
       "    .dataframe thead th {\n",
759
       "        text-align: right;\n",
760
       "    }\n",
761
       "</style>\n",
762
       "<table border=\"1\" class=\"dataframe\">\n",
763
       "  <thead>\n",
764
       "    <tr style=\"text-align: right;\">\n",
765
       "      <th></th>\n",
766
       "      <th>English</th>\n",
767
       "      <th>LaTeX</th>\n",
768
       "    </tr>\n",
769
       "  </thead>\n",
770
       "  <tbody>\n",
771
       "    <tr>\n",
772
       "      <th>0</th>\n",
773
       "      <td>integral from a to b of x squared</td>\n",
774
       "      <td>\\int_{a}^{b} x^2 \\,dx</td>\n",
775
       "    </tr>\n",
776
       "    <tr>\n",
777
       "      <th>1</th>\n",
778
       "      <td>integral from negative 1 to 1 of x squared</td>\n",
779
       "      <td>\\int_{-1}^{1} x^2 \\,dx</td>\n",
780
       "    </tr>\n",
781
       "  </tbody>\n",
782
       "</table>\n",
783
       "</div>"
784
      ],
785
      "text/plain": [
786
       "                                      English                   LaTeX\n",
787
       "0           integral from a to b of x squared   \\int_{a}^{b} x^2 \\,dx\n",
788
       "1  integral from negative 1 to 1 of x squared  \\int_{-1}^{1} x^2 \\,dx"
789
      ]
790
     },
791
     "execution_count": 9,
792
     "metadata": {},
793
     "output_type": "execute_result"
794
    }
795
   ],
796
   "source": [
797
    "data = pd.read_csv('../data/english_to_latex.csv')\n",
798
    "\n",
799
    "print(data.shape)\n",
800
    "\n",
801
    "data.head(2)"
802
   ]
803
  },
804
  {
805
   "cell_type": "code",
806
   "execution_count": 10,
807
   "id": "8a166ea6",
808
   "metadata": {},
809
   "outputs": [
810
    {
811
     "data": {
812
      "text/html": [
813
       "<div>\n",
814
       "<style scoped>\n",
815
       "    .dataframe tbody tr th:only-of-type {\n",
816
       "        vertical-align: middle;\n",
817
       "    }\n",
818
       "\n",
819
       "    .dataframe tbody tr th {\n",
820
       "        vertical-align: top;\n",
821
       "    }\n",
822
       "\n",
823
       "    .dataframe thead th {\n",
824
       "        text-align: right;\n",
825
       "    }\n",
826
       "</style>\n",
827
       "<table border=\"1\" class=\"dataframe\">\n",
828
       "  <thead>\n",
829
       "    <tr style=\"text-align: right;\">\n",
830
       "      <th></th>\n",
831
       "      <th>English</th>\n",
832
       "      <th>LaTeX</th>\n",
833
       "    </tr>\n",
834
       "  </thead>\n",
835
       "  <tbody>\n",
836
       "    <tr>\n",
837
       "      <th>0</th>\n",
838
       "      <td>integral from a to b of x squared</td>\n",
839
       "      <td>\\int_{a}^{b} x^2 \\,dx</td>\n",
840
       "    </tr>\n",
841
       "    <tr>\n",
842
       "      <th>1</th>\n",
843
       "      <td>integral from negative 1 to 1 of x squared</td>\n",
844
       "      <td>\\int_{-1}^{1} x^2 \\,dx</td>\n",
845
       "    </tr>\n",
846
       "    <tr>\n",
847
       "      <th>2</th>\n",
848
       "      <td>integral from negative 1 to infinity of x cubed</td>\n",
849
       "      <td>\\int_{-1}^{\\inf} x^3 \\,dx</td>\n",
850
       "    </tr>\n",
851
       "    <tr>\n",
852
       "      <th>3</th>\n",
853
       "      <td>integral from 0 to infinity of x squared</td>\n",
854
       "      <td>\\int_{0}^{\\inf} x^2 \\,dx</td>\n",
855
       "    </tr>\n",
856
       "    <tr>\n",
857
       "      <th>4</th>\n",
858
       "      <td>integral from 0 to infinity of y squared</td>\n",
859
       "      <td>\\int_{0}^{\\inf} y^2 \\,dy</td>\n",
860
       "    </tr>\n",
861
       "    <tr>\n",
862
       "      <th>5</th>\n",
863
       "      <td>integral from 1 to 2 of x over 2</td>\n",
864
       "      <td>\\int_{1}^{2} \\frac{x}{2} \\,dx</td>\n",
865
       "    </tr>\n",
866
       "    <tr>\n",
867
       "      <th>6</th>\n",
868
       "      <td>f of x equals x squared</td>\n",
869
       "      <td>f(x) = x^2</td>\n",
870
       "    </tr>\n",
871
       "    <tr>\n",
872
       "      <th>7</th>\n",
873
       "      <td>h of x equals x squared</td>\n",
874
       "      <td>h(x) = x^2</td>\n",
875
       "    </tr>\n",
876
       "    <tr>\n",
877
       "      <th>8</th>\n",
878
       "      <td>g of x equals x squared</td>\n",
879
       "      <td>g(x) = x^2</td>\n",
880
       "    </tr>\n",
881
       "    <tr>\n",
882
       "      <th>9</th>\n",
883
       "      <td>g of x equals x to the eighth power</td>\n",
884
       "      <td>g(x) = x^8</td>\n",
885
       "    </tr>\n",
886
       "  </tbody>\n",
887
       "</table>\n",
888
       "</div>"
889
      ],
890
      "text/plain": [
891
       "                                           English  \\\n",
892
       "0                integral from a to b of x squared   \n",
893
       "1       integral from negative 1 to 1 of x squared   \n",
894
       "2  integral from negative 1 to infinity of x cubed   \n",
895
       "3         integral from 0 to infinity of x squared   \n",
896
       "4         integral from 0 to infinity of y squared   \n",
897
       "5                 integral from 1 to 2 of x over 2   \n",
898
       "6                          f of x equals x squared   \n",
899
       "7                          h of x equals x squared   \n",
900
       "8                          g of x equals x squared   \n",
901
       "9              g of x equals x to the eighth power   \n",
902
       "\n",
903
       "                           LaTeX  \n",
904
       "0          \\int_{a}^{b} x^2 \\,dx  \n",
905
       "1         \\int_{-1}^{1} x^2 \\,dx  \n",
906
       "2      \\int_{-1}^{\\inf} x^3 \\,dx  \n",
907
       "3       \\int_{0}^{\\inf} x^2 \\,dx  \n",
908
       "4       \\int_{0}^{\\inf} y^2 \\,dy  \n",
909
       "5  \\int_{1}^{2} \\frac{x}{2} \\,dx  \n",
910
       "6                     f(x) = x^2  \n",
911
       "7                     h(x) = x^2  \n",
912
       "8                     g(x) = x^2  \n",
913
       "9                     g(x) = x^8  "
914
      ]
915
     },
916
     "execution_count": 10,
917
     "metadata": {},
918
     "output_type": "execute_result"
919
    }
920
   ],
921
   "source": [
922
    "data.head(10)"
923
   ]
924
  },
925
  {
926
   "cell_type": "code",
927
   "execution_count": null,
928
   "id": "f747b7f0",
929
   "metadata": {},
930
   "outputs": [],
931
   "source": []
932
  },
933
  {
934
   "cell_type": "code",
935
   "execution_count": null,
936
   "id": "c138752d",
937
   "metadata": {},
938
   "outputs": [],
939
   "source": []
940
  },
941
  {
942
   "cell_type": "code",
943
   "execution_count": 11,
944
   "id": "bc988ea9",
945
   "metadata": {
946
    "scrolled": true
947
   },
948
   "outputs": [],
949
   "source": [
950
    "tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n",
951
    "\n",
952
    "tokenizer.pad_token = tokenizer.eos_token\n",
953
    "\n",
954
    "# Add our singular prompt\n",
955
    "CONVERSION_PROMPT = 'LCT\\n'  # LaTeX conversion task\n",
956
    "\n",
957
    "CONVERSION_TOKEN = 'LaTeX:'\n"
958
   ]
959
  },
960
  {
961
   "cell_type": "code",
962
   "execution_count": null,
963
   "id": "3e4aa061",
964
   "metadata": {},
965
   "outputs": [],
966
   "source": []
967
  },
968
  {
969
   "cell_type": "code",
970
   "execution_count": 12,
971
   "id": "db5ebf1e",
972
   "metadata": {},
973
   "outputs": [
974
    {
975
     "name": "stdout",
976
     "output_type": "stream",
977
     "text": [
978
      "LCT\n",
979
      "English: integral from a to b of x squared\n",
980
      "LaTeX: \\int_{a}^{b} x^2 \\,dx\n"
981
     ]
982
    }
983
   ],
984
   "source": [
985
    "# This is our \"training prompt\" that we want GPT2 to recognize and learn\n",
986
    "training_examples = f'{CONVERSION_PROMPT}English: ' + data['English'] + '\\n' + CONVERSION_TOKEN + ' ' + data['LaTeX'].astype(str)\n",
987
    "\n",
988
    "print(training_examples[0])\n"
989
   ]
990
  },
991
  {
992
   "cell_type": "code",
993
   "execution_count": 13,
994
   "id": "beb1b918",
995
   "metadata": {},
996
   "outputs": [
997
    {
998
     "data": {
999
      "text/html": [
1000
       "<div>\n",
1001
       "<style scoped>\n",
1002
       "    .dataframe tbody tr th:only-of-type {\n",
1003
       "        vertical-align: middle;\n",
1004
       "    }\n",
1005
       "\n",
1006
       "    .dataframe tbody tr th {\n",
1007
       "        vertical-align: top;\n",
1008
       "    }\n",
1009
       "\n",
1010
       "    .dataframe thead th {\n",
1011
       "        text-align: right;\n",
1012
       "    }\n",
1013
       "</style>\n",
1014
       "<table border=\"1\" class=\"dataframe\">\n",
1015
       "  <thead>\n",
1016
       "    <tr style=\"text-align: right;\">\n",
1017
       "      <th></th>\n",
1018
       "      <th>text</th>\n",
1019
       "    </tr>\n",
1020
       "  </thead>\n",
1021
       "  <tbody>\n",
1022
       "    <tr>\n",
1023
       "      <th>0</th>\n",
1024
       "      <td>LCT\\nEnglish: integral from a to b of x square...</td>\n",
1025
       "    </tr>\n",
1026
       "    <tr>\n",
1027
       "      <th>1</th>\n",
1028
       "      <td>LCT\\nEnglish: integral from negative 1 to 1 of...</td>\n",
1029
       "    </tr>\n",
1030
       "  </tbody>\n",
1031
       "</table>\n",
1032
       "</div>"
1033
      ],
1034
      "text/plain": [
1035
       "                                                text\n",
1036
       "0  LCT\\nEnglish: integral from a to b of x square...\n",
1037
       "1  LCT\\nEnglish: integral from negative 1 to 1 of..."
1038
      ]
1039
     },
1040
     "execution_count": 13,
1041
     "metadata": {},
1042
     "output_type": "execute_result"
1043
    }
1044
   ],
1045
   "source": [
1046
    "task_df = pd.DataFrame({'text': training_examples})\n",
1047
    "\n",
1048
    "task_df.head(2)"
1049
   ]
1050
  },
1051
  {
1052
   "cell_type": "code",
1053
   "execution_count": null,
1054
   "id": "c98cd94a",
1055
   "metadata": {},
1056
   "outputs": [],
1057
   "source": []
1058
  },
1059
  {
1060
   "cell_type": "code",
1061
   "execution_count": 14,
1062
   "id": "b7617abd",
1063
   "metadata": {},
1064
   "outputs": [],
1065
   "source": [
1066
    "# adding the EOS token at the end so the model knows when to stop predicting\n",
1067
    "\n",
1068
    "task_df['text'] = task_df['text'].map(lambda x: f'{x}{tokenizer.eos_token}')"
1069
   ]
1070
  },
1071
  {
1072
   "cell_type": "code",
1073
   "execution_count": null,
1074
   "id": "76552d13",
1075
   "metadata": {},
1076
   "outputs": [],
1077
   "source": []
1078
  },
1079
  {
1080
   "cell_type": "code",
1081
   "execution_count": 15,
1082
   "id": "074754cb",
1083
   "metadata": {},
1084
   "outputs": [
1085
    {
1086
     "data": {
1087
      "application/vnd.jupyter.widget-view+json": {
1088
       "model_id": "574e112410714ec1b21ae25afa1efca9",
1089
       "version_major": 2,
1090
       "version_minor": 0
1091
      },
1092
      "text/plain": [
1093
       "  0%|          | 0/1 [00:00<?, ?ba/s]"
1094
      ]
1095
     },
1096
     "metadata": {},
1097
     "output_type": "display_data"
1098
    }
1099
   ],
1100
   "source": [
1101
    "latex_data = Dataset.from_pandas(task_df)  # turn a pandas DataFrame into a Dataset\n",
1102
    "\n",
1103
    "def preprocess(examples):  \n",
1104
    "    # tokenize our text but don't pad because our collator will pad for us dynamically\n",
1105
    "    return tokenizer(examples['text'], truncation=True)\n",
1106
    "\n",
1107
    "latex_data = latex_data.map(preprocess, batched=True)\n",
1108
    "\n",
1109
    "latex_data = latex_data.train_test_split(train_size=.8)"
1110
   ]
1111
  },
1112
  {
1113
   "cell_type": "code",
1114
   "execution_count": 16,
1115
   "id": "16bb726f",
1116
   "metadata": {
1117
    "scrolled": true
1118
   },
1119
   "outputs": [
1120
    {
1121
     "data": {
1122
      "text/plain": [
1123
       "{'text': 'LCT\\nEnglish: integral from a to b of x squared\\nLaTeX: \\\\int_{a}^{b} x^2 \\\\,dx<|endoftext|>',\n",
1124
       " 'input_ids': [43,\n",
1125
       "  4177,\n",
1126
       "  198,\n",
1127
       "  15823,\n",
1128
       "  25,\n",
1129
       "  19287,\n",
1130
       "  422,\n",
1131
       "  257,\n",
1132
       "  284,\n",
1133
       "  275,\n",
1134
       "  286,\n",
1135
       "  2124,\n",
1136
       "  44345,\n",
1137
       "  198,\n",
1138
       "  14772,\n",
1139
       "  49568,\n",
1140
       "  25,\n",
1141
       "  3467,\n",
1142
       "  600,\n",
1143
       "  23330,\n",
1144
       "  64,\n",
1145
       "  92,\n",
1146
       "  36796,\n",
1147
       "  65,\n",
1148
       "  92,\n",
1149
       "  2124,\n",
1150
       "  61,\n",
1151
       "  17,\n",
1152
       "  3467,\n",
1153
       "  11,\n",
1154
       "  34350,\n",
1155
       "  50256],\n",
1156
       " 'attention_mask': [1,\n",
1157
       "  1,\n",
1158
       "  1,\n",
1159
       "  1,\n",
1160
       "  1,\n",
1161
       "  1,\n",
1162
       "  1,\n",
1163
       "  1,\n",
1164
       "  1,\n",
1165
       "  1,\n",
1166
       "  1,\n",
1167
       "  1,\n",
1168
       "  1,\n",
1169
       "  1,\n",
1170
       "  1,\n",
1171
       "  1,\n",
1172
       "  1,\n",
1173
       "  1,\n",
1174
       "  1,\n",
1175
       "  1,\n",
1176
       "  1,\n",
1177
       "  1,\n",
1178
       "  1,\n",
1179
       "  1,\n",
1180
       "  1,\n",
1181
       "  1,\n",
1182
       "  1,\n",
1183
       "  1,\n",
1184
       "  1,\n",
1185
       "  1,\n",
1186
       "  1,\n",
1187
       "  1]}"
1188
      ]
1189
     },
1190
     "execution_count": 16,
1191
     "metadata": {},
1192
     "output_type": "execute_result"
1193
    }
1194
   ],
1195
   "source": [
1196
    "latex_data['train'][0]"
1197
   ]
1198
  },
1199
  {
1200
   "cell_type": "code",
1201
   "execution_count": 221,
1202
   "id": "4f1f30f4",
1203
   "metadata": {},
1204
   "outputs": [],
1205
   "source": [
1206
    "data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)"
1207
   ]
1208
  },
1209
  {
1210
   "cell_type": "code",
1211
   "execution_count": 222,
1212
   "id": "e44dd292",
1213
   "metadata": {
1214
    "scrolled": true
1215
   },
1216
   "outputs": [
1217
    {
1218
     "name": "stderr",
1219
     "output_type": "stream",
1220
     "text": [
1221
      "loading configuration file https://huggingface.co/gpt2/resolve/main/config.json from cache at /Users/sinanozdemir/.cache/huggingface/transformers/fc674cd6907b4c9e933cb42d67662436b89fa9540a1f40d7c919d0109289ad01.7d2e0efa5ca20cef4fb199382111e9d3ad96fd77b849e1d4bed13a66e1336f51\n",
1222
      "Model config GPT2Config {\n",
1223
      "  \"activation_function\": \"gelu_new\",\n",
1224
      "  \"architectures\": [\n",
1225
      "    \"GPT2LMHeadModel\"\n",
1226
      "  ],\n",
1227
      "  \"attn_pdrop\": 0.1,\n",
1228
      "  \"bos_token_id\": 50256,\n",
1229
      "  \"embd_pdrop\": 0.1,\n",
1230
      "  \"eos_token_id\": 50256,\n",
1231
      "  \"initializer_range\": 0.02,\n",
1232
      "  \"layer_norm_epsilon\": 1e-05,\n",
1233
      "  \"model_type\": \"gpt2\",\n",
1234
      "  \"n_ctx\": 1024,\n",
1235
      "  \"n_embd\": 768,\n",
1236
      "  \"n_head\": 12,\n",
1237
      "  \"n_inner\": null,\n",
1238
      "  \"n_layer\": 12,\n",
1239
      "  \"n_positions\": 1024,\n",
1240
      "  \"reorder_and_upcast_attn\": false,\n",
1241
      "  \"resid_pdrop\": 0.1,\n",
1242
      "  \"scale_attn_by_inverse_layer_idx\": false,\n",
1243
      "  \"scale_attn_weights\": true,\n",
1244
      "  \"summary_activation\": null,\n",
1245
      "  \"summary_first_dropout\": 0.1,\n",
1246
      "  \"summary_proj_to_labels\": true,\n",
1247
      "  \"summary_type\": \"cls_index\",\n",
1248
      "  \"summary_use_proj\": true,\n",
1249
      "  \"task_specific_params\": {\n",
1250
      "    \"text-generation\": {\n",
1251
      "      \"do_sample\": true,\n",
1252
      "      \"max_length\": 50\n",
1253
      "    }\n",
1254
      "  },\n",
1255
      "  \"transformers_version\": \"4.19.4\",\n",
1256
      "  \"use_cache\": true,\n",
1257
      "  \"vocab_size\": 50257\n",
1258
      "}\n",
1259
      "\n",
1260
      "loading weights file https://huggingface.co/gpt2/resolve/main/pytorch_model.bin from cache at /Users/sinanozdemir/.cache/huggingface/transformers/752929ace039baa8ef70fe21cdf9ab9445773d20e733cf693d667982e210837e.323c769945a351daa25546176f8208b3004b6f563438a7603e7932bae9025925\n",
1261
      "All model checkpoint weights were used when initializing GPT2LMHeadModel.\n",
1262
      "\n",
1263
      "All the weights of GPT2LMHeadModel were initialized from the model checkpoint at gpt2.\n",
1264
      "If your task is similar to the task the model of the checkpoint was trained on, you can already use GPT2LMHeadModel for predictions without further training.\n"
1265
     ]
1266
    }
1267
   ],
1268
   "source": [
1269
    "latex_gpt2 = GPT2LMHeadModel.from_pretrained('gpt2')"
1270
   ]
1271
  },
1272
  {
1273
   "cell_type": "code",
1274
   "execution_count": 17,
1275
   "id": "0712e12d",
1276
   "metadata": {},
1277
   "outputs": [
1278
    {
1279
     "data": {
1280
      "text/plain": [
1281
       "DatasetDict({\n",
1282
       "    train: Dataset({\n",
1283
       "        features: ['text', 'input_ids', 'attention_mask'],\n",
1284
       "        num_rows: 40\n",
1285
       "    })\n",
1286
       "    test: Dataset({\n",
1287
       "        features: ['text', 'input_ids', 'attention_mask'],\n",
1288
       "        num_rows: 10\n",
1289
       "    })\n",
1290
       "})"
1291
      ]
1292
     },
1293
     "execution_count": 17,
1294
     "metadata": {},
1295
     "output_type": "execute_result"
1296
    }
1297
   ],
1298
   "source": [
1299
    "latex_data"
1300
   ]
1301
  },
1302
  {
1303
   "cell_type": "code",
1304
   "execution_count": 223,
1305
   "id": "7d7582bd",
1306
   "metadata": {
1307
    "scrolled": true
1308
   },
1309
   "outputs": [
1310
    {
1311
     "name": "stderr",
1312
     "output_type": "stream",
1313
     "text": [
1314
      "PyTorch: setting up devices\n",
1315
      "The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).\n",
1316
      "The following columns in the evaluation set don't have a corresponding argument in `GPT2LMHeadModel.forward` and have been ignored: text. If text are not expected by `GPT2LMHeadModel.forward`,  you can safely ignore this message.\n",
1317
      "***** Running Evaluation *****\n",
1318
      "  Num examples = 10\n",
1319
      "  Batch size = 20\n"
1320
     ]
1321
    },
1322
    {
1323
     "data": {
1324
      "text/html": [
1325
       "\n",
1326
       "    <div>\n",
1327
       "      \n",
1328
       "      <progress value='2' max='1' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
1329
       "      [1/1 00:34]\n",
1330
       "    </div>\n",
1331
       "    "
1332
      ],
1333
      "text/plain": [
1334
       "<IPython.core.display.HTML object>"
1335
      ]
1336
     },
1337
     "metadata": {},
1338
     "output_type": "display_data"
1339
    },
1340
    {
1341
     "data": {
1342
      "text/plain": [
1343
       "{'eval_loss': 4.891348361968994,\n",
1344
       " 'eval_runtime': 2.9897,\n",
1345
       " 'eval_samples_per_second': 3.345,\n",
1346
       " 'eval_steps_per_second': 0.334}"
1347
      ]
1348
     },
1349
     "execution_count": 223,
1350
     "metadata": {},
1351
     "output_type": "execute_result"
1352
    }
1353
   ],
1354
   "source": [
1355
    "training_args = TrainingArguments(\n",
1356
    "    output_dir=\"./english_to_latex\",\n",
1357
    "    overwrite_output_dir=True, # overwrite the content of the output directory\n",
1358
    "    num_train_epochs=5, # number of training epochs\n",
1359
    "    per_device_train_batch_size=4, # batch size for training\n",
1360
    "    per_device_eval_batch_size=20,  # batch size for evaluation\n",
1361
    "    load_best_model_at_end=True,\n",
1362
    "    logging_steps=5,\n",
1363
    "    log_level='info',\n",
1364
    "    evaluation_strategy='epoch',\n",
1365
    "    save_strategy='epoch'\n",
1366
    ")\n",
1367
    "\n",
1368
    "trainer = Trainer(\n",
1369
    "    model=latex_gpt2,\n",
1370
    "    args=training_args,\n",
1371
    "    train_dataset=latex_data[\"train\"],\n",
1372
    "    eval_dataset=latex_data[\"test\"],\n",
1373
    "    data_collator=data_collator,\n",
1374
    ")\n",
1375
    "\n",
1376
    "trainer.evaluate()"
1377
   ]
1378
  },
1379
  {
1380
   "cell_type": "code",
1381
   "execution_count": 224,
1382
   "id": "b78c75d0",
1383
   "metadata": {
1384
    "scrolled": true
1385
   },
1386
   "outputs": [
1387
    {
1388
     "name": "stderr",
1389
     "output_type": "stream",
1390
     "text": [
1391
      "The following columns in the training set don't have a corresponding argument in `GPT2LMHeadModel.forward` and have been ignored: text. If text are not expected by `GPT2LMHeadModel.forward`,  you can safely ignore this message.\n",
1392
      "/Users/sinanozdemir/opt/anaconda3/lib/python3.9/site-packages/transformers/optimization.py:306: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
1393
      "  warnings.warn(\n",
1394
      "***** Running training *****\n",
1395
      "  Num examples = 40\n",
1396
      "  Num Epochs = 5\n",
1397
      "  Instantaneous batch size per device = 4\n",
1398
      "  Total train batch size (w. parallel, distributed & accumulation) = 4\n",
1399
      "  Gradient Accumulation steps = 1\n",
1400
      "  Total optimization steps = 50\n"
1401
     ]
1402
    },
1403
    {
1404
     "data": {
1405
      "text/html": [
1406
       "\n",
1407
       "    <div>\n",
1408
       "      \n",
1409
       "      <progress value='50' max='50' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
1410
       "      [50/50 02:57, Epoch 5/5]\n",
1411
       "    </div>\n",
1412
       "    <table border=\"1\" class=\"dataframe\">\n",
1413
       "  <thead>\n",
1414
       " <tr style=\"text-align: left;\">\n",
1415
       "      <th>Epoch</th>\n",
1416
       "      <th>Training Loss</th>\n",
1417
       "      <th>Validation Loss</th>\n",
1418
       "    </tr>\n",
1419
       "  </thead>\n",
1420
       "  <tbody>\n",
1421
       "    <tr>\n",
1422
       "      <td>1</td>\n",
1423
       "      <td>2.518400</td>\n",
1424
       "      <td>1.832018</td>\n",
1425
       "    </tr>\n",
1426
       "    <tr>\n",
1427
       "      <td>2</td>\n",
1428
       "      <td>1.231000</td>\n",
1429
       "      <td>1.002905</td>\n",
1430
       "    </tr>\n",
1431
       "    <tr>\n",
1432
       "      <td>3</td>\n",
1433
       "      <td>0.842700</td>\n",
1434
       "      <td>0.916263</td>\n",
1435
       "    </tr>\n",
1436
       "    <tr>\n",
1437
       "      <td>4</td>\n",
1438
       "      <td>0.649000</td>\n",
1439
       "      <td>0.909049</td>\n",
1440
       "    </tr>\n",
1441
       "    <tr>\n",
1442
       "      <td>5</td>\n",
1443
       "      <td>0.704200</td>\n",
1444
       "      <td>0.881874</td>\n",
1445
       "    </tr>\n",
1446
       "  </tbody>\n",
1447
       "</table><p>"
1448
      ],
1449
      "text/plain": [
1450
       "<IPython.core.display.HTML object>"
1451
      ]
1452
     },
1453
     "metadata": {},
1454
     "output_type": "display_data"
1455
    },
1456
    {
1457
     "name": "stderr",
1458
     "output_type": "stream",
1459
     "text": [
1460
      "The following columns in the evaluation set don't have a corresponding argument in `GPT2LMHeadModel.forward` and have been ignored: text. If text are not expected by `GPT2LMHeadModel.forward`,  you can safely ignore this message.\n",
1461
      "***** Running Evaluation *****\n",
1462
      "  Num examples = 10\n",
1463
      "  Batch size = 20\n",
1464
      "Saving model checkpoint to ./english_to_latex/checkpoint-10\n",
1465
      "Configuration saved in ./english_to_latex/checkpoint-10/config.json\n",
1466
      "Model weights saved in ./english_to_latex/checkpoint-10/pytorch_model.bin\n",
1467
      "The following columns in the evaluation set don't have a corresponding argument in `GPT2LMHeadModel.forward` and have been ignored: text. If text are not expected by `GPT2LMHeadModel.forward`,  you can safely ignore this message.\n",
1468
      "***** Running Evaluation *****\n",
1469
      "  Num examples = 10\n",
1470
      "  Batch size = 20\n",
1471
      "Saving model checkpoint to ./english_to_latex/checkpoint-20\n",
1472
      "Configuration saved in ./english_to_latex/checkpoint-20/config.json\n",
1473
      "Model weights saved in ./english_to_latex/checkpoint-20/pytorch_model.bin\n",
1474
      "The following columns in the evaluation set don't have a corresponding argument in `GPT2LMHeadModel.forward` and have been ignored: text. If text are not expected by `GPT2LMHeadModel.forward`,  you can safely ignore this message.\n",
1475
      "***** Running Evaluation *****\n",
1476
      "  Num examples = 10\n",
1477
      "  Batch size = 20\n",
1478
      "Saving model checkpoint to ./english_to_latex/checkpoint-30\n",
1479
      "Configuration saved in ./english_to_latex/checkpoint-30/config.json\n",
1480
      "Model weights saved in ./english_to_latex/checkpoint-30/pytorch_model.bin\n",
1481
      "The following columns in the evaluation set don't have a corresponding argument in `GPT2LMHeadModel.forward` and have been ignored: text. If text are not expected by `GPT2LMHeadModel.forward`,  you can safely ignore this message.\n",
1482
      "***** Running Evaluation *****\n",
1483
      "  Num examples = 10\n",
1484
      "  Batch size = 20\n",
1485
      "Saving model checkpoint to ./english_to_latex/checkpoint-40\n",
1486
      "Configuration saved in ./english_to_latex/checkpoint-40/config.json\n",
1487
      "Model weights saved in ./english_to_latex/checkpoint-40/pytorch_model.bin\n",
1488
      "The following columns in the evaluation set don't have a corresponding argument in `GPT2LMHeadModel.forward` and have been ignored: text. If text are not expected by `GPT2LMHeadModel.forward`,  you can safely ignore this message.\n",
1489
      "***** Running Evaluation *****\n",
1490
      "  Num examples = 10\n",
1491
      "  Batch size = 20\n",
1492
      "Saving model checkpoint to ./english_to_latex/checkpoint-50\n",
1493
      "Configuration saved in ./english_to_latex/checkpoint-50/config.json\n",
1494
      "Model weights saved in ./english_to_latex/checkpoint-50/pytorch_model.bin\n",
1495
      "\n",
1496
      "\n",
1497
      "Training completed. Do not forget to share your model on huggingface.co/models =)\n",
1498
      "\n",
1499
      "\n",
1500
      "Loading best model from ./english_to_latex/checkpoint-50 (score: 0.8818739056587219).\n"
1501
     ]
1502
    },
1503
    {
1504
     "data": {
1505
      "text/plain": [
1506
       "TrainOutput(global_step=50, training_loss=1.459311113357544, metrics={'train_runtime': 180.7542, 'train_samples_per_second': 1.106, 'train_steps_per_second': 0.277, 'total_flos': 3529483776000.0, 'train_loss': 1.459311113357544, 'epoch': 5.0})"
1507
      ]
1508
     },
1509
     "execution_count": 224,
1510
     "metadata": {},
1511
     "output_type": "execute_result"
1512
    }
1513
   ],
1514
   "source": [
1515
    "trainer.train()"
1516
   ]
1517
  },
1518
  {
1519
   "cell_type": "code",
1520
   "execution_count": 225,
1521
   "id": "d3c90137",
1522
   "metadata": {},
1523
   "outputs": [
1524
    {
1525
     "name": "stderr",
1526
     "output_type": "stream",
1527
     "text": [
1528
      "The following columns in the evaluation set don't have a corresponding argument in `GPT2LMHeadModel.forward` and have been ignored: text. If text are not expected by `GPT2LMHeadModel.forward`,  you can safely ignore this message.\n",
1529
      "***** Running Evaluation *****\n",
1530
      "  Num examples = 10\n",
1531
      "  Batch size = 20\n"
1532
     ]
1533
    },
1534
    {
1535
     "data": {
1536
      "text/html": [
1537
       "\n",
1538
       "    <div>\n",
1539
       "      \n",
1540
       "      <progress value='1' max='1' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
1541
       "      [1/1 : < :]\n",
1542
       "    </div>\n",
1543
       "    "
1544
      ],
1545
      "text/plain": [
1546
       "<IPython.core.display.HTML object>"
1547
      ]
1548
     },
1549
     "metadata": {},
1550
     "output_type": "display_data"
1551
    },
1552
    {
1553
     "data": {
1554
      "text/plain": [
1555
       "{'eval_loss': 0.8818739056587219,\n",
1556
       " 'eval_runtime': 2.9323,\n",
1557
       " 'eval_samples_per_second': 3.41,\n",
1558
       " 'eval_steps_per_second': 0.341,\n",
1559
       " 'epoch': 5.0}"
1560
      ]
1561
     },
1562
     "execution_count": 225,
1563
     "metadata": {},
1564
     "output_type": "execute_result"
1565
    }
1566
   ],
1567
   "source": [
1568
    "trainer.evaluate()  # best loss of 0.8818739"
1569
   ]
1570
  },
1571
  {
1572
   "cell_type": "code",
1573
   "execution_count": null,
1574
   "id": "6010cf42",
1575
   "metadata": {},
1576
   "outputs": [],
1577
   "source": []
1578
  },
1579
  {
1580
   "cell_type": "code",
1581
   "execution_count": 143,
1582
   "id": "7e795393",
1583
   "metadata": {},
1584
   "outputs": [],
1585
   "source": [
1586
    "# Let's try fine-tuning it again but first let's have the model read a math book"
1587
   ]
1588
  },
1589
  {
1590
   "cell_type": "code",
1591
   "execution_count": 165,
1592
   "id": "173901dd",
1593
   "metadata": {
1594
    "scrolled": true
1595
   },
1596
   "outputs": [
1597
    {
1598
     "name": "stderr",
1599
     "output_type": "stream",
1600
     "text": [
1601
      "/Users/sinanozdemir/opt/anaconda3/lib/python3.9/site-packages/transformers/data/datasets/language_modeling.py:54: FutureWarning: This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets library. You can have a look at this example script for pointers: https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py\n",
1602
      "  warnings.warn(\n",
1603
      "Creating features from dataset file at ../data\n",
1604
      "Saving features into cached file ../data/cached_lm_GPT2Tokenizer_128_latex_cheat_sheet.tex [took 0.000 s]\n",
1605
      "loading configuration file https://huggingface.co/gpt2/resolve/main/config.json from cache at /Users/sinanozdemir/.cache/huggingface/transformers/fc674cd6907b4c9e933cb42d67662436b89fa9540a1f40d7c919d0109289ad01.7d2e0efa5ca20cef4fb199382111e9d3ad96fd77b849e1d4bed13a66e1336f51\n",
1606
      "Model config GPT2Config {\n",
1607
      "  \"activation_function\": \"gelu_new\",\n",
1608
      "  \"architectures\": [\n",
1609
      "    \"GPT2LMHeadModel\"\n",
1610
      "  ],\n",
1611
      "  \"attn_pdrop\": 0.1,\n",
1612
      "  \"bos_token_id\": 50256,\n",
1613
      "  \"embd_pdrop\": 0.1,\n",
1614
      "  \"eos_token_id\": 50256,\n",
1615
      "  \"initializer_range\": 0.02,\n",
1616
      "  \"layer_norm_epsilon\": 1e-05,\n",
1617
      "  \"model_type\": \"gpt2\",\n",
1618
      "  \"n_ctx\": 1024,\n",
1619
      "  \"n_embd\": 768,\n",
1620
      "  \"n_head\": 12,\n",
1621
      "  \"n_inner\": null,\n",
1622
      "  \"n_layer\": 12,\n",
1623
      "  \"n_positions\": 1024,\n",
1624
      "  \"reorder_and_upcast_attn\": false,\n",
1625
      "  \"resid_pdrop\": 0.1,\n",
1626
      "  \"scale_attn_by_inverse_layer_idx\": false,\n",
1627
      "  \"scale_attn_weights\": true,\n",
1628
      "  \"summary_activation\": null,\n",
1629
      "  \"summary_first_dropout\": 0.1,\n",
1630
      "  \"summary_proj_to_labels\": true,\n",
1631
      "  \"summary_type\": \"cls_index\",\n",
1632
      "  \"summary_use_proj\": true,\n",
1633
      "  \"task_specific_params\": {\n",
1634
      "    \"text-generation\": {\n",
1635
      "      \"do_sample\": true,\n",
1636
      "      \"max_length\": 50\n",
1637
      "    }\n",
1638
      "  },\n",
1639
      "  \"transformers_version\": \"4.19.4\",\n",
1640
      "  \"use_cache\": true,\n",
1641
      "  \"vocab_size\": 50257\n",
1642
      "}\n",
1643
      "\n",
1644
      "loading weights file https://huggingface.co/gpt2/resolve/main/pytorch_model.bin from cache at /Users/sinanozdemir/.cache/huggingface/transformers/752929ace039baa8ef70fe21cdf9ab9445773d20e733cf693d667982e210837e.323c769945a351daa25546176f8208b3004b6f563438a7603e7932bae9025925\n",
1645
      "All model checkpoint weights were used when initializing GPT2LMHeadModel.\n",
1646
      "\n",
1647
      "All the weights of GPT2LMHeadModel were initialized from the model checkpoint at gpt2.\n",
1648
      "If your task is similar to the task the model of the checkpoint was trained on, you can already use GPT2LMHeadModel for predictions without further training.\n",
1649
      "PyTorch: setting up devices\n",
1650
      "The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).\n"
1651
     ]
1652
    }
1653
   ],
1654
   "source": [
1655
    "# Linear Algebra book by Jim Hefferon written in LaTeX for free - https://joshua.smcvt.edu/linearalgebra\n",
1656
    "\n",
1657
    "book_data = TextDataset(\n",
1658
    "    tokenizer=tokenizer,\n",
1659
    "    file_path='../data/latex_cheat_sheet.tex',  # train on a LaTeX cheat sheet they made\n",
1660
    "    block_size=128\n",
1661
    ")\n",
1662
    "\n",
1663
    "data_collator = DataCollatorForLanguageModeling(\n",
1664
    "    tokenizer=tokenizer, mlm=False,  # MLM is Masked Language Modelling\n",
1665
    ")\n",
1666
    "\n",
1667
    "latex_gpt2 = GPT2LMHeadModel.from_pretrained('gpt2')\n",
1668
    "\n",
1669
    "training_args = TrainingArguments(\n",
1670
    "    output_dir=\"./math_book\",\n",
1671
    "    overwrite_output_dir=True, # overwrite the content of the output directory\n",
1672
    "    num_train_epochs=2, # number of training epochs\n",
1673
    "    per_device_train_batch_size=32, # batch size for training\n",
1674
    "    per_device_eval_batch_size=32,  # batch size for evaluation\n",
1675
    "    load_best_model_at_end=True,\n",
1676
    "    logging_steps=1,\n",
1677
    "    eval_steps=1,\n",
1678
    "    evaluation_strategy='epoch',\n",
1679
    "    save_strategy='epoch'\n",
1680
    ")\n",
1681
    "\n",
1682
    "trainer = Trainer(\n",
1683
    "    model=latex_gpt2,\n",
1684
    "    args=training_args,\n",
1685
    "    data_collator=data_collator,\n",
1686
    "    train_dataset=book_data.examples[:int(len(book_data.examples)*.8)],\n",
1687
    "    eval_dataset=book_data.examples[int(len(book_data.examples)*.8):]\n",
1688
    ")"
1689
   ]
1690
  },
1691
  {
1692
   "cell_type": "code",
1693
   "execution_count": 166,
1694
   "id": "e466aa20",
1695
   "metadata": {
1696
    "scrolled": true
1697
   },
1698
   "outputs": [
1699
    {
1700
     "name": "stderr",
1701
     "output_type": "stream",
1702
     "text": [
1703
      "***** Running Evaluation *****\n",
1704
      "  Num examples = 21\n",
1705
      "  Batch size = 32\n"
1706
     ]
1707
    },
1708
    {
1709
     "data": {
1710
      "text/html": [
1711
       "\n",
1712
       "    <div>\n",
1713
       "      \n",
1714
       "      <progress value='2' max='1' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
1715
       "      [1/1 04:05]\n",
1716
       "    </div>\n",
1717
       "    "
1718
      ],
1719
      "text/plain": [
1720
       "<IPython.core.display.HTML object>"
1721
      ]
1722
     },
1723
     "metadata": {},
1724
     "output_type": "display_data"
1725
    },
1726
    {
1727
     "data": {
1728
      "text/plain": [
1729
       "{'eval_loss': 3.315159320831299,\n",
1730
       " 'eval_runtime': 18.102,\n",
1731
       " 'eval_samples_per_second': 1.16,\n",
1732
       " 'eval_steps_per_second': 0.055}"
1733
      ]
1734
     },
1735
     "execution_count": 166,
1736
     "metadata": {},
1737
     "output_type": "execute_result"
1738
    }
1739
   ],
1740
   "source": [
1741
    "trainer.evaluate()  # initial loss for the math book"
1742
   ]
1743
  },
1744
  {
1745
   "cell_type": "code",
1746
   "execution_count": 167,
1747
   "id": "388afa38",
1748
   "metadata": {
1749
    "scrolled": true
1750
   },
1751
   "outputs": [
1752
    {
1753
     "name": "stderr",
1754
     "output_type": "stream",
1755
     "text": [
1756
      "/Users/sinanozdemir/opt/anaconda3/lib/python3.9/site-packages/transformers/optimization.py:306: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
1757
      "  warnings.warn(\n",
1758
      "***** Running training *****\n",
1759
      "  Num examples = 80\n",
1760
      "  Num Epochs = 2\n",
1761
      "  Instantaneous batch size per device = 32\n",
1762
      "  Total train batch size (w. parallel, distributed & accumulation) = 32\n",
1763
      "  Gradient Accumulation steps = 1\n",
1764
      "  Total optimization steps = 6\n"
1765
     ]
1766
    },
1767
    {
1768
     "data": {
1769
      "text/html": [
1770
       "\n",
1771
       "    <div>\n",
1772
       "      \n",
1773
       "      <progress value='6' max='6' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
1774
       "      [6/6 07:29, Epoch 2/2]\n",
1775
       "    </div>\n",
1776
       "    <table border=\"1\" class=\"dataframe\">\n",
1777
       "  <thead>\n",
1778
       " <tr style=\"text-align: left;\">\n",
1779
       "      <th>Epoch</th>\n",
1780
       "      <th>Training Loss</th>\n",
1781
       "      <th>Validation Loss</th>\n",
1782
       "    </tr>\n",
1783
       "  </thead>\n",
1784
       "  <tbody>\n",
1785
       "    <tr>\n",
1786
       "      <td>1</td>\n",
1787
       "      <td>2.948300</td>\n",
1788
       "      <td>2.800811</td>\n",
1789
       "    </tr>\n",
1790
       "    <tr>\n",
1791
       "      <td>2</td>\n",
1792
       "      <td>2.525500</td>\n",
1793
       "      <td>2.687646</td>\n",
1794
       "    </tr>\n",
1795
       "  </tbody>\n",
1796
       "</table><p>"
1797
      ],
1798
      "text/plain": [
1799
       "<IPython.core.display.HTML object>"
1800
      ]
1801
     },
1802
     "metadata": {},
1803
     "output_type": "display_data"
1804
    },
1805
    {
1806
     "name": "stderr",
1807
     "output_type": "stream",
1808
     "text": [
1809
      "***** Running Evaluation *****\n",
1810
      "  Num examples = 21\n",
1811
      "  Batch size = 32\n",
1812
      "Saving model checkpoint to ./math_book/checkpoint-3\n",
1813
      "Configuration saved in ./math_book/checkpoint-3/config.json\n",
1814
      "Model weights saved in ./math_book/checkpoint-3/pytorch_model.bin\n",
1815
      "***** Running Evaluation *****\n",
1816
      "  Num examples = 21\n",
1817
      "  Batch size = 32\n",
1818
      "Saving model checkpoint to ./math_book/checkpoint-6\n",
1819
      "Configuration saved in ./math_book/checkpoint-6/config.json\n",
1820
      "Model weights saved in ./math_book/checkpoint-6/pytorch_model.bin\n",
1821
      "\n",
1822
      "\n",
1823
      "Training completed. Do not forget to share your model on huggingface.co/models =)\n",
1824
      "\n",
1825
      "\n",
1826
      "Loading best model from ./math_book/checkpoint-6 (score: 2.6876461505889893).\n"
1827
     ]
1828
    },
1829
    {
1830
     "data": {
1831
      "text/plain": [
1832
       "TrainOutput(global_step=6, training_loss=2.669228434562683, metrics={'train_runtime': 537.9366, 'train_samples_per_second': 0.297, 'train_steps_per_second': 0.011, 'total_flos': 10451681280000.0, 'train_loss': 2.669228434562683, 'epoch': 2.0})"
1833
      ]
1834
     },
1835
     "execution_count": 167,
1836
     "metadata": {},
1837
     "output_type": "execute_result"
1838
    }
1839
   ],
1840
   "source": [
1841
    "trainer.train()"
1842
   ]
1843
  },
1844
  {
1845
   "cell_type": "code",
1846
   "execution_count": 168,
1847
   "id": "f742df82",
1848
   "metadata": {},
1849
   "outputs": [
1850
    {
1851
     "name": "stderr",
1852
     "output_type": "stream",
1853
     "text": [
1854
      "Saving model checkpoint to ./math_book\n",
1855
      "Configuration saved in ./math_book/config.json\n",
1856
      "Model weights saved in ./math_book/pytorch_model.bin\n"
1857
     ]
1858
    }
1859
   ],
1860
   "source": [
1861
    "trainer.save_model()  # 2 epochs led to a pretty good drop in loss"
1862
   ]
1863
  },
1864
  {
1865
   "cell_type": "code",
1866
   "execution_count": null,
1867
   "id": "36706f17",
1868
   "metadata": {},
1869
   "outputs": [],
1870
   "source": []
1871
  },
1872
  {
1873
   "cell_type": "code",
1874
   "execution_count": 226,
1875
   "id": "0c820b1b",
1876
   "metadata": {
1877
    "scrolled": true
1878
   },
1879
   "outputs": [
1880
    {
1881
     "name": "stderr",
1882
     "output_type": "stream",
1883
     "text": [
1884
      "loading configuration file ./math_book/config.json\n",
1885
      "Model config GPT2Config {\n",
1886
      "  \"_name_or_path\": \"gpt2\",\n",
1887
      "  \"activation_function\": \"gelu_new\",\n",
1888
      "  \"architectures\": [\n",
1889
      "    \"GPT2LMHeadModel\"\n",
1890
      "  ],\n",
1891
      "  \"attn_pdrop\": 0.1,\n",
1892
      "  \"bos_token_id\": 50256,\n",
1893
      "  \"embd_pdrop\": 0.1,\n",
1894
      "  \"eos_token_id\": 50256,\n",
1895
      "  \"initializer_range\": 0.02,\n",
1896
      "  \"layer_norm_epsilon\": 1e-05,\n",
1897
      "  \"model_type\": \"gpt2\",\n",
1898
      "  \"n_ctx\": 1024,\n",
1899
      "  \"n_embd\": 768,\n",
1900
      "  \"n_head\": 12,\n",
1901
      "  \"n_inner\": null,\n",
1902
      "  \"n_layer\": 12,\n",
1903
      "  \"n_positions\": 1024,\n",
1904
      "  \"reorder_and_upcast_attn\": false,\n",
1905
      "  \"resid_pdrop\": 0.1,\n",
1906
      "  \"scale_attn_by_inverse_layer_idx\": false,\n",
1907
      "  \"scale_attn_weights\": true,\n",
1908
      "  \"summary_activation\": null,\n",
1909
      "  \"summary_first_dropout\": 0.1,\n",
1910
      "  \"summary_proj_to_labels\": true,\n",
1911
      "  \"summary_type\": \"cls_index\",\n",
1912
      "  \"summary_use_proj\": true,\n",
1913
      "  \"task_specific_params\": {\n",
1914
      "    \"text-generation\": {\n",
1915
      "      \"do_sample\": true,\n",
1916
      "      \"max_length\": 50\n",
1917
      "    }\n",
1918
      "  },\n",
1919
      "  \"torch_dtype\": \"float32\",\n",
1920
      "  \"transformers_version\": \"4.19.4\",\n",
1921
      "  \"use_cache\": true,\n",
1922
      "  \"vocab_size\": 50257\n",
1923
      "}\n",
1924
      "\n",
1925
      "loading weights file ./math_book/pytorch_model.bin\n",
1926
      "All model checkpoint weights were used when initializing GPT2LMHeadModel.\n",
1927
      "\n",
1928
      "All the weights of GPT2LMHeadModel were initialized from the model checkpoint at ./math_book.\n",
1929
      "If your task is similar to the task the model of the checkpoint was trained on, you can already use GPT2LMHeadModel for predictions without further training.\n",
1930
      "PyTorch: setting up devices\n",
1931
      "The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).\n",
1932
      "The following columns in the evaluation set don't have a corresponding argument in `GPT2LMHeadModel.forward` and have been ignored: text. If text are not expected by `GPT2LMHeadModel.forward`,  you can safely ignore this message.\n",
1933
      "***** Running Evaluation *****\n",
1934
      "  Num examples = 10\n",
1935
      "  Batch size = 20\n"
1936
     ]
1937
    },
1938
    {
1939
     "data": {
1940
      "text/html": [
1941
       "\n",
1942
       "    <div>\n",
1943
       "      \n",
1944
       "      <progress value='2' max='1' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
1945
       "      [1/1 00:34]\n",
1946
       "    </div>\n",
1947
       "    "
1948
      ],
1949
      "text/plain": [
1950
       "<IPython.core.display.HTML object>"
1951
      ]
1952
     },
1953
     "metadata": {},
1954
     "output_type": "display_data"
1955
    },
1956
    {
1957
     "data": {
1958
      "text/plain": [
1959
       "{'eval_loss': 4.376880168914795,\n",
1960
       " 'eval_runtime': 3.0114,\n",
1961
       " 'eval_samples_per_second': 3.321,\n",
1962
       " 'eval_steps_per_second': 0.332}"
1963
      ]
1964
     },
1965
     "execution_count": 226,
1966
     "metadata": {},
1967
     "output_type": "execute_result"
1968
    }
1969
   ],
1970
   "source": [
1971
    "math_latex_gpt2 = GPT2LMHeadModel.from_pretrained('./math_book')  # load up our gpt pre-trained\n",
1972
    "\n",
1973
    "training_args = TrainingArguments(\n",
1974
    "    output_dir=\"./math_english_to_latex\",\n",
1975
    "    overwrite_output_dir=True, #overwrite the content of the output directory\n",
1976
    "    num_train_epochs=5, # number of training epochs\n",
1977
    "    per_device_train_batch_size=4, # batch size for training\n",
1978
    "    per_device_eval_batch_size=20,  # batch size for evaluation\n",
1979
    "    load_best_model_at_end=True,\n",
1980
    "    logging_steps=5,\n",
1981
    "    log_level='info',\n",
1982
    "    evaluation_strategy='epoch',\n",
1983
    "    save_strategy='epoch'\n",
1984
    ")\n",
1985
    "\n",
1986
    "trainer = Trainer(\n",
1987
    "    model=math_latex_gpt2,\n",
1988
    "    args=training_args,\n",
1989
    "    train_dataset=latex_data[\"train\"],\n",
1990
    "    eval_dataset=latex_data[\"test\"],\n",
1991
    "    data_collator=data_collator,\n",
1992
    ")\n",
1993
    "\n",
1994
    "trainer.evaluate()  # loss is starting slightly lower than before"
1995
   ]
1996
  },
1997
  {
1998
   "cell_type": "code",
1999
   "execution_count": 227,
2000
   "id": "67a53cd6",
2001
   "metadata": {
2002
    "scrolled": true
2003
   },
2004
   "outputs": [
2005
    {
2006
     "name": "stderr",
2007
     "output_type": "stream",
2008
     "text": [
2009
      "The following columns in the training set don't have a corresponding argument in `GPT2LMHeadModel.forward` and have been ignored: text. If text are not expected by `GPT2LMHeadModel.forward`,  you can safely ignore this message.\n",
2010
      "/Users/sinanozdemir/opt/anaconda3/lib/python3.9/site-packages/transformers/optimization.py:306: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
2011
      "  warnings.warn(\n",
2012
      "***** Running training *****\n",
2013
      "  Num examples = 40\n",
2014
      "  Num Epochs = 5\n",
2015
      "  Instantaneous batch size per device = 4\n",
2016
      "  Total train batch size (w. parallel, distributed & accumulation) = 4\n",
2017
      "  Gradient Accumulation steps = 1\n",
2018
      "  Total optimization steps = 50\n"
2019
     ]
2020
    },
2021
    {
2022
     "data": {
2023
      "text/html": [
2024
       "\n",
2025
       "    <div>\n",
2026
       "      \n",
2027
       "      <progress value='50' max='50' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
2028
       "      [50/50 02:54, Epoch 5/5]\n",
2029
       "    </div>\n",
2030
       "    <table border=\"1\" class=\"dataframe\">\n",
2031
       "  <thead>\n",
2032
       " <tr style=\"text-align: left;\">\n",
2033
       "      <th>Epoch</th>\n",
2034
       "      <th>Training Loss</th>\n",
2035
       "      <th>Validation Loss</th>\n",
2036
       "    </tr>\n",
2037
       "  </thead>\n",
2038
       "  <tbody>\n",
2039
       "    <tr>\n",
2040
       "      <td>1</td>\n",
2041
       "      <td>2.311200</td>\n",
2042
       "      <td>1.584350</td>\n",
2043
       "    </tr>\n",
2044
       "    <tr>\n",
2045
       "      <td>2</td>\n",
2046
       "      <td>1.156600</td>\n",
2047
       "      <td>0.947031</td>\n",
2048
       "    </tr>\n",
2049
       "    <tr>\n",
2050
       "      <td>3</td>\n",
2051
       "      <td>0.774600</td>\n",
2052
       "      <td>0.905974</td>\n",
2053
       "    </tr>\n",
2054
       "    <tr>\n",
2055
       "      <td>4</td>\n",
2056
       "      <td>0.615500</td>\n",
2057
       "      <td>0.887093</td>\n",
2058
       "    </tr>\n",
2059
       "    <tr>\n",
2060
       "      <td>5</td>\n",
2061
       "      <td>0.669800</td>\n",
2062
       "      <td>0.867295</td>\n",
2063
       "    </tr>\n",
2064
       "  </tbody>\n",
2065
       "</table><p>"
2066
      ],
2067
      "text/plain": [
2068
       "<IPython.core.display.HTML object>"
2069
      ]
2070
     },
2071
     "metadata": {},
2072
     "output_type": "display_data"
2073
    },
2074
    {
2075
     "name": "stderr",
2076
     "output_type": "stream",
2077
     "text": [
2078
      "The following columns in the evaluation set don't have a corresponding argument in `GPT2LMHeadModel.forward` and have been ignored: text. If text are not expected by `GPT2LMHeadModel.forward`,  you can safely ignore this message.\n",
2079
      "***** Running Evaluation *****\n",
2080
      "  Num examples = 10\n",
2081
      "  Batch size = 20\n",
2082
      "Saving model checkpoint to ./math_english_to_latex/checkpoint-10\n",
2083
      "Configuration saved in ./math_english_to_latex/checkpoint-10/config.json\n",
2084
      "Model weights saved in ./math_english_to_latex/checkpoint-10/pytorch_model.bin\n",
2085
      "The following columns in the evaluation set don't have a corresponding argument in `GPT2LMHeadModel.forward` and have been ignored: text. If text are not expected by `GPT2LMHeadModel.forward`,  you can safely ignore this message.\n",
2086
      "***** Running Evaluation *****\n",
2087
      "  Num examples = 10\n",
2088
      "  Batch size = 20\n",
2089
      "Saving model checkpoint to ./math_english_to_latex/checkpoint-20\n",
2090
      "Configuration saved in ./math_english_to_latex/checkpoint-20/config.json\n",
2091
      "Model weights saved in ./math_english_to_latex/checkpoint-20/pytorch_model.bin\n",
2092
      "The following columns in the evaluation set don't have a corresponding argument in `GPT2LMHeadModel.forward` and have been ignored: text. If text are not expected by `GPT2LMHeadModel.forward`,  you can safely ignore this message.\n",
2093
      "***** Running Evaluation *****\n",
2094
      "  Num examples = 10\n",
2095
      "  Batch size = 20\n",
2096
      "Saving model checkpoint to ./math_english_to_latex/checkpoint-30\n",
2097
      "Configuration saved in ./math_english_to_latex/checkpoint-30/config.json\n",
2098
      "Model weights saved in ./math_english_to_latex/checkpoint-30/pytorch_model.bin\n",
2099
      "The following columns in the evaluation set don't have a corresponding argument in `GPT2LMHeadModel.forward` and have been ignored: text. If text are not expected by `GPT2LMHeadModel.forward`,  you can safely ignore this message.\n",
2100
      "***** Running Evaluation *****\n",
2101
      "  Num examples = 10\n",
2102
      "  Batch size = 20\n",
2103
      "Saving model checkpoint to ./math_english_to_latex/checkpoint-40\n",
2104
      "Configuration saved in ./math_english_to_latex/checkpoint-40/config.json\n",
2105
      "Model weights saved in ./math_english_to_latex/checkpoint-40/pytorch_model.bin\n",
2106
      "The following columns in the evaluation set don't have a corresponding argument in `GPT2LMHeadModel.forward` and have been ignored: text. If text are not expected by `GPT2LMHeadModel.forward`,  you can safely ignore this message.\n",
2107
      "***** Running Evaluation *****\n",
2108
      "  Num examples = 10\n",
2109
      "  Batch size = 20\n",
2110
      "Saving model checkpoint to ./math_english_to_latex/checkpoint-50\n",
2111
      "Configuration saved in ./math_english_to_latex/checkpoint-50/config.json\n",
2112
      "Model weights saved in ./math_english_to_latex/checkpoint-50/pytorch_model.bin\n",
2113
      "\n",
2114
      "\n",
2115
      "Training completed. Do not forget to share your model on huggingface.co/models =)\n",
2116
      "\n",
2117
      "\n",
2118
      "Loading best model from ./math_english_to_latex/checkpoint-50 (score: 0.8672950863838196).\n"
2119
     ]
2120
    },
2121
    {
2122
     "data": {
2123
      "text/plain": [
2124
       "TrainOutput(global_step=50, training_loss=1.3503571081161498, metrics={'train_runtime': 178.071, 'train_samples_per_second': 1.123, 'train_steps_per_second': 0.281, 'total_flos': 3529483776000.0, 'train_loss': 1.3503571081161498, 'epoch': 5.0})"
2125
      ]
2126
     },
2127
     "execution_count": 227,
2128
     "metadata": {},
2129
     "output_type": "execute_result"
2130
    }
2131
   ],
2132
   "source": [
2133
    "trainer.train()"
2134
   ]
2135
  },
2136
  {
2137
   "cell_type": "code",
2138
   "execution_count": 228,
2139
   "id": "a264d92c",
2140
   "metadata": {},
2141
   "outputs": [
2142
    {
2143
     "name": "stderr",
2144
     "output_type": "stream",
2145
     "text": [
2146
      "The following columns in the evaluation set don't have a corresponding argument in `GPT2LMHeadModel.forward` and have been ignored: text. If text are not expected by `GPT2LMHeadModel.forward`,  you can safely ignore this message.\n",
2147
      "***** Running Evaluation *****\n",
2148
      "  Num examples = 10\n",
2149
      "  Batch size = 20\n"
2150
     ]
2151
    },
2152
    {
2153
     "data": {
2154
      "text/html": [
2155
       "\n",
2156
       "    <div>\n",
2157
       "      \n",
2158
       "      <progress value='1' max='1' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
2159
       "      [1/1 : < :]\n",
2160
       "    </div>\n",
2161
       "    "
2162
      ],
2163
      "text/plain": [
2164
       "<IPython.core.display.HTML object>"
2165
      ]
2166
     },
2167
     "metadata": {},
2168
     "output_type": "display_data"
2169
    },
2170
    {
2171
     "data": {
2172
      "text/plain": [
2173
       "{'eval_loss': 0.8672950863838196,\n",
2174
       " 'eval_runtime': 2.9253,\n",
2175
       " 'eval_samples_per_second': 3.418,\n",
2176
       " 'eval_steps_per_second': 0.342,\n",
2177
       " 'epoch': 5.0}"
2178
      ]
2179
     },
2180
     "execution_count": 228,
2181
     "metadata": {},
2182
     "output_type": "execute_result"
2183
    }
2184
   ],
2185
   "source": [
2186
    "trainer.evaluate()  # pre-training on the book for one epoch led to a minor drop in loss"
2187
   ]
2188
  },
2189
  {
2190
   "cell_type": "code",
2191
   "execution_count": 229,
2192
   "id": "530c78c5",
2193
   "metadata": {},
2194
   "outputs": [
2195
    {
2196
     "name": "stderr",
2197
     "output_type": "stream",
2198
     "text": [
2199
      "Saving model checkpoint to ./math_english_to_latex\n",
2200
      "Configuration saved in ./math_english_to_latex/config.json\n",
2201
      "Model weights saved in ./math_english_to_latex/pytorch_model.bin\n"
2202
     ]
2203
    }
2204
   ],
2205
   "source": [
2206
    "trainer.save_model()  # save this model"
2207
   ]
2208
  },
2209
  {
2210
   "cell_type": "code",
2211
   "execution_count": null,
2212
   "id": "13153c32",
2213
   "metadata": {},
2214
   "outputs": [],
2215
   "source": []
2216
  },
2217
  {
2218
   "cell_type": "code",
2219
   "execution_count": 230,
2220
   "id": "c34b7af3",
2221
   "metadata": {
2222
    "scrolled": true
2223
   },
2224
   "outputs": [
2225
    {
2226
     "name": "stderr",
2227
     "output_type": "stream",
2228
     "text": [
2229
      "loading configuration file ./math_english_to_latex/config.json\n",
2230
      "Model config GPT2Config {\n",
2231
      "  \"_name_or_path\": \"./math_book\",\n",
2232
      "  \"activation_function\": \"gelu_new\",\n",
2233
      "  \"architectures\": [\n",
2234
      "    \"GPT2LMHeadModel\"\n",
2235
      "  ],\n",
2236
      "  \"attn_pdrop\": 0.1,\n",
2237
      "  \"bos_token_id\": 50256,\n",
2238
      "  \"embd_pdrop\": 0.1,\n",
2239
      "  \"eos_token_id\": 50256,\n",
2240
      "  \"initializer_range\": 0.02,\n",
2241
      "  \"layer_norm_epsilon\": 1e-05,\n",
2242
      "  \"model_type\": \"gpt2\",\n",
2243
      "  \"n_ctx\": 1024,\n",
2244
      "  \"n_embd\": 768,\n",
2245
      "  \"n_head\": 12,\n",
2246
      "  \"n_inner\": null,\n",
2247
      "  \"n_layer\": 12,\n",
2248
      "  \"n_positions\": 1024,\n",
2249
      "  \"reorder_and_upcast_attn\": false,\n",
2250
      "  \"resid_pdrop\": 0.1,\n",
2251
      "  \"scale_attn_by_inverse_layer_idx\": false,\n",
2252
      "  \"scale_attn_weights\": true,\n",
2253
      "  \"summary_activation\": null,\n",
2254
      "  \"summary_first_dropout\": 0.1,\n",
2255
      "  \"summary_proj_to_labels\": true,\n",
2256
      "  \"summary_type\": \"cls_index\",\n",
2257
      "  \"summary_use_proj\": true,\n",
2258
      "  \"task_specific_params\": {\n",
2259
      "    \"text-generation\": {\n",
2260
      "      \"do_sample\": true,\n",
2261
      "      \"max_length\": 50\n",
2262
      "    }\n",
2263
      "  },\n",
2264
      "  \"torch_dtype\": \"float32\",\n",
2265
      "  \"transformers_version\": \"4.19.4\",\n",
2266
      "  \"use_cache\": true,\n",
2267
      "  \"vocab_size\": 50257\n",
2268
      "}\n",
2269
      "\n",
2270
      "loading weights file ./math_english_to_latex/pytorch_model.bin\n",
2271
      "All model checkpoint weights were used when initializing GPT2LMHeadModel.\n",
2272
      "\n",
2273
      "All the weights of GPT2LMHeadModel were initialized from the model checkpoint at ./math_english_to_latex.\n",
2274
      "If your task is similar to the task the model of the checkpoint was trained on, you can already use GPT2LMHeadModel for predictions without further training.\n"
2275
     ]
2276
    }
2277
   ],
2278
   "source": [
2279
    "loaded_model = GPT2LMHeadModel.from_pretrained('./math_english_to_latex')\n",
2280
    "latex_generator = pipeline('text-generation', model=loaded_model, tokenizer=tokenizer)"
2281
   ]
2282
  },
2283
  {
2284
   "cell_type": "code",
2285
   "execution_count": 241,
2286
   "id": "759e9b70",
2287
   "metadata": {},
2288
   "outputs": [
2289
    {
2290
     "name": "stdout",
2291
     "output_type": "stream",
2292
     "text": [
2293
      "LCT\n",
2294
      "English: g of x equals integral from 0 to 1 of x squared\n",
2295
      "LaTeX:\n"
2296
     ]
2297
    }
2298
   ],
2299
   "source": [
2300
    "text_sample = 'g of x equals integral from 0 to 1 of x squared'\n",
2301
    "conversion_text_sample = f'{CONVERSION_PROMPT}English: {text_sample}\\n{CONVERSION_TOKEN}'\n",
2302
    "\n",
2303
    "print(conversion_text_sample)"
2304
   ]
2305
  },
2306
  {
2307
   "cell_type": "code",
2308
   "execution_count": 242,
2309
   "id": "2b2cb777",
2310
   "metadata": {},
2311
   "outputs": [
2312
    {
2313
     "name": "stderr",
2314
     "output_type": "stream",
2315
     "text": [
2316
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
2317
     ]
2318
    },
2319
    {
2320
     "name": "stdout",
2321
     "output_type": "stream",
2322
     "text": [
2323
      "LCT\n",
2324
      "English: g of x equals integral from 0 to 1 of x squared\n",
2325
      "LaTeX: g(x) = \\int_{0}^{1} x^2 \\,dx^\n"
2326
     ]
2327
    }
2328
   ],
2329
   "source": [
2330
    "print(latex_generator(\n",
2331
    "    conversion_text_sample, num_beams=5, early_stopping=True, temperature=0.7,\n",
2332
    "    max_length=len(tokenizer.encode(conversion_text_sample)) + 20\n",
2333
    ")[0]['generated_text'])"
2334
   ]
2335
  },
2336
  {
2337
   "cell_type": "code",
2338
   "execution_count": null,
2339
   "id": "bfd233ed",
2340
   "metadata": {},
2341
   "outputs": [],
2342
   "source": []
2343
  },
2344
  {
2345
   "cell_type": "code",
2346
   "execution_count": 257,
2347
   "id": "46d13e88",
2348
   "metadata": {},
2349
   "outputs": [
2350
    {
2351
     "name": "stderr",
2352
     "output_type": "stream",
2353
     "text": [
2354
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
2355
     ]
2356
    },
2357
    {
2358
     "name": "stdout",
2359
     "output_type": "stream",
2360
     "text": [
2361
      "LCT\n",
2362
      "English: r of x is sum from 0 to x of x squared\n",
2363
      "LaTeX: r(x) = \\sum_{0}^{x} x^2 \\,dx^\n"
2364
     ]
2365
    }
2366
   ],
2367
   "source": [
2368
    "# Another example\n",
2369
    "text_sample = 'r of x is sum from 0 to x of x squared'\n",
2370
    "conversion_text_sample = f'{CONVERSION_PROMPT}English: {text_sample}\\n{CONVERSION_TOKEN}'\n",
2371
    "\n",
2372
    "print(latex_generator(\n",
2373
    "    conversion_text_sample, num_beams=5, early_stopping=True, temperature=0.7,\n",
2374
    "    max_length=len(tokenizer.encode(conversion_text_sample)) + 20\n",
2375
    ")[0]['generated_text'])"
2376
   ]
2377
  },
2378
  {
2379
   "cell_type": "code",
2380
   "execution_count": null,
2381
   "id": "af4fbaf3",
2382
   "metadata": {},
2383
   "outputs": [],
2384
   "source": []
2385
  },
2386
  {
2387
   "cell_type": "code",
2388
   "execution_count": 258,
2389
   "id": "9ac6c305",
2390
   "metadata": {},
2391
   "outputs": [
2392
    {
2393
     "name": "stderr",
2394
     "output_type": "stream",
2395
     "text": [
2396
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
2397
     ]
2398
    },
2399
    {
2400
     "name": "stdout",
2401
     "output_type": "stream",
2402
     "text": [
2403
      "r of x is sum from 0 to x of x squared\n",
2404
      "LaTeX: \\int_{0}^{x}^2 \\,dx^2 \\,dx^3 \\,dx^4\n"
2405
     ]
2406
    }
2407
   ],
2408
   "source": [
2409
    "print(latex_generator(\n",
2410
    "    text_sample, num_beams=5, early_stopping=True, temperature=0.7,\n",
2411
    "    max_length=len(tokenizer.encode(conversion_text_sample)) + 20\n",
2412
    ")[0]['generated_text'])"
2413
   ]
2414
  },
2415
  {
2416
   "cell_type": "code",
2417
   "execution_count": null,
2418
   "id": "702462a6",
2419
   "metadata": {},
2420
   "outputs": [],
2421
   "source": []
2422
  },
2423
  {
2424
   "cell_type": "code",
2425
   "execution_count": null,
2426
   "id": "f5465370",
2427
   "metadata": {},
2428
   "outputs": [],
2429
   "source": []
2430
  },
2431
  {
2432
   "cell_type": "code",
2433
   "execution_count": 247,
2434
   "id": "21567e80",
2435
   "metadata": {},
2436
   "outputs": [
2437
    {
2438
     "name": "stderr",
2439
     "output_type": "stream",
2440
     "text": [
2441
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
2442
     ]
2443
    },
2444
    {
2445
     "name": "stdout",
2446
     "output_type": "stream",
2447
     "text": [
2448
      "LCT\n",
2449
      "English: pi to the 8th power\n",
2450
      "LaTeX: \\pi^8 \\,dx^2 \\,dx^3 \\,dx^4 \\\n"
2451
     ]
2452
    }
2453
   ],
2454
   "source": [
2455
    "# Another example\n",
2456
    "text_sample = 'pi to the 8th power'\n",
2457
    "conversion_text_sample = f'{CONVERSION_PROMPT}English: {text_sample}\\n{CONVERSION_TOKEN}'\n",
2458
    "\n",
2459
    "print(latex_generator(\n",
2460
    "    conversion_text_sample, num_beams=5, early_stopping=True, temperature=.7,\n",
2461
    "    max_length=len(tokenizer.encode(conversion_text_sample)) + 20\n",
2462
    ")[0]['generated_text'])"
2463
   ]
2464
  },
2465
  {
2466
   "cell_type": "code",
2467
   "execution_count": null,
2468
   "id": "cee3fabe",
2469
   "metadata": {},
2470
   "outputs": [],
2471
   "source": []
2472
  },
2473
  {
2474
   "cell_type": "code",
2475
   "execution_count": 248,
2476
   "id": "33e121e6",
2477
   "metadata": {
2478
    "scrolled": true
2479
   },
2480
   "outputs": [
2481
    {
2482
     "name": "stderr",
2483
     "output_type": "stream",
2484
     "text": [
2485
      "loading configuration file https://huggingface.co/gpt2/resolve/main/config.json from cache at /Users/sinanozdemir/.cache/huggingface/transformers/fc674cd6907b4c9e933cb42d67662436b89fa9540a1f40d7c919d0109289ad01.7d2e0efa5ca20cef4fb199382111e9d3ad96fd77b849e1d4bed13a66e1336f51\n",
2486
      "Model config GPT2Config {\n",
2487
      "  \"activation_function\": \"gelu_new\",\n",
2488
      "  \"architectures\": [\n",
2489
      "    \"GPT2LMHeadModel\"\n",
2490
      "  ],\n",
2491
      "  \"attn_pdrop\": 0.1,\n",
2492
      "  \"bos_token_id\": 50256,\n",
2493
      "  \"embd_pdrop\": 0.1,\n",
2494
      "  \"eos_token_id\": 50256,\n",
2495
      "  \"initializer_range\": 0.02,\n",
2496
      "  \"layer_norm_epsilon\": 1e-05,\n",
2497
      "  \"model_type\": \"gpt2\",\n",
2498
      "  \"n_ctx\": 1024,\n",
2499
      "  \"n_embd\": 768,\n",
2500
      "  \"n_head\": 12,\n",
2501
      "  \"n_inner\": null,\n",
2502
      "  \"n_layer\": 12,\n",
2503
      "  \"n_positions\": 1024,\n",
2504
      "  \"reorder_and_upcast_attn\": false,\n",
2505
      "  \"resid_pdrop\": 0.1,\n",
2506
      "  \"scale_attn_by_inverse_layer_idx\": false,\n",
2507
      "  \"scale_attn_weights\": true,\n",
2508
      "  \"summary_activation\": null,\n",
2509
      "  \"summary_first_dropout\": 0.1,\n",
2510
      "  \"summary_proj_to_labels\": true,\n",
2511
      "  \"summary_type\": \"cls_index\",\n",
2512
      "  \"summary_use_proj\": true,\n",
2513
      "  \"task_specific_params\": {\n",
2514
      "    \"text-generation\": {\n",
2515
      "      \"do_sample\": true,\n",
2516
      "      \"max_length\": 50\n",
2517
      "    }\n",
2518
      "  },\n",
2519
      "  \"transformers_version\": \"4.19.4\",\n",
2520
      "  \"use_cache\": true,\n",
2521
      "  \"vocab_size\": 50257\n",
2522
      "}\n",
2523
      "\n",
2524
      "loading weights file https://huggingface.co/gpt2/resolve/main/pytorch_model.bin from cache at /Users/sinanozdemir/.cache/huggingface/transformers/752929ace039baa8ef70fe21cdf9ab9445773d20e733cf693d667982e210837e.323c769945a351daa25546176f8208b3004b6f563438a7603e7932bae9025925\n",
2525
      "All model checkpoint weights were used when initializing GPT2LMHeadModel.\n",
2526
      "\n",
2527
      "All the weights of GPT2LMHeadModel were initialized from the model checkpoint at gpt2.\n",
2528
      "If your task is similar to the task the model of the checkpoint was trained on, you can already use GPT2LMHeadModel for predictions without further training.\n"
2529
     ]
2530
    }
2531
   ],
2532
   "source": [
2533
    "# Sanity check that a non-finetuned model could not have done this\n",
2534
    "non_finetuned_latex_generator = pipeline(\n",
2535
    "    'text-generation', \n",
2536
    "    model=GPT2LMHeadModel.from_pretrained('gpt2'),  # not fine-tuned!\n",
2537
    "    tokenizer=tokenizer\n",
2538
    ")"
2539
   ]
2540
  },
2541
  {
2542
   "cell_type": "code",
2543
   "execution_count": null,
2544
   "id": "0185f085",
2545
   "metadata": {},
2546
   "outputs": [],
2547
   "source": []
2548
  },
2549
  {
2550
   "cell_type": "code",
2551
   "execution_count": 249,
2552
   "id": "ab316df9",
2553
   "metadata": {},
2554
   "outputs": [
2555
    {
2556
     "name": "stderr",
2557
     "output_type": "stream",
2558
     "text": [
2559
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
2560
     ]
2561
    },
2562
    {
2563
     "name": "stdout",
2564
     "output_type": "stream",
2565
     "text": [
2566
      "LCT\n",
2567
      "English: f of x is sum from 0 to x of x squared\n",
2568
      "LaTeX: f(x) = \\sum_{0}^{x} x^2 \\,dx ###\n",
2569
      "LCT\n",
2570
      "English: f of x equals integral from 0 to pi of x to the fourth power\n",
2571
      "LaTeX: f(x) = \\int_{0}^{\\pi} x^4 \\,dx ###\n",
2572
      "LCT\n",
2573
      "English: pi to the 8th power\n",
2574
      "LaTeX: pi to the 8th power\n",
2575
      "LaTeX: f(x) = \\int_{0}\n"
2576
     ]
2577
    }
2578
   ],
2579
   "source": [
2580
    "few_shot_prompt = \"\"\"LCT\n",
2581
    "English: f of x is sum from 0 to x of x squared\n",
2582
    "LaTeX: f(x) = \\sum_{0}^{x} x^2 \\,dx \\\n",
2583
    "###\n",
2584
    "LCT\n",
2585
    "English: f of x equals integral from 0 to pi of x to the fourth power\n",
2586
    "LaTeX: f(x) = \\int_{0}^{\\pi} x^4 \\,dx \\\n",
2587
    "###\n",
2588
    "LCT\n",
2589
    "English: pi to the 8th power\n",
2590
    "LaTeX:\"\"\"\n",
2591
    "\n",
2592
    "print(non_finetuned_latex_generator(\n",
2593
    "    few_shot_prompt, num_beams=5, early_stopping=True, temperature=0.7,\n",
2594
    "    max_length=len(tokenizer.encode(few_shot_prompt)) + 20\n",
2595
    ")[0]['generated_text'])"
2596
   ]
2597
  },
2598
  {
2599
   "cell_type": "code",
2600
   "execution_count": null,
2601
   "id": "9243a0d9",
2602
   "metadata": {},
2603
   "outputs": [],
2604
   "source": []
2605
  },
2606
  {
2607
   "cell_type": "code",
2608
   "execution_count": 250,
2609
   "id": "d1f0418c",
2610
   "metadata": {},
2611
   "outputs": [
2612
    {
2613
     "name": "stderr",
2614
     "output_type": "stream",
2615
     "text": [
2616
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
2617
     ]
2618
    },
2619
    {
2620
     "name": "stdout",
2621
     "output_type": "stream",
2622
     "text": [
2623
      "English to LaTeX\n",
2624
      "English: f of x is sum from 0 to x of x squared\n",
2625
      "LaTeX: f(x) = \\sum_{0}^{x} x^2 \\,dx ###\n",
2626
      "LCT\n",
2627
      "English: f of x equals integral from 0 to pi of x to the fourth power\n",
2628
      "LaTeX: f(x) = \\int_{0}^{\\pi} x^4 \\,dx ###\n",
2629
      "LCT\n",
2630
      "English: x to the eighth power\n",
2631
      "LaTeX: f(x) = \\int_{0}^{\\pi} x^4 \\,dx\n"
2632
     ]
2633
    }
2634
   ],
2635
   "source": [
2636
    "# try another prompt\n",
2637
    "few_shot_prompt = \"\"\"English to LaTeX\n",
2638
    "English: f of x is sum from 0 to x of x squared\n",
2639
    "LaTeX: f(x) = \\sum_{0}^{x} x^2 \\,dx \\\n",
2640
    "###\n",
2641
    "LCT\n",
2642
    "English: f of x equals integral from 0 to pi of x to the fourth power\n",
2643
    "LaTeX: f(x) = \\int_{0}^{\\pi} x^4 \\,dx \\\n",
2644
    "###\n",
2645
    "LCT\n",
2646
    "English: x to the eighth power\n",
2647
    "LaTeX:\"\"\"\n",
2648
    "\n",
2649
    "print(non_finetuned_latex_generator(\n",
2650
    "    few_shot_prompt, num_beams=5, early_stopping=True, temperature=0.7,\n",
2651
    "    max_length=len(tokenizer.encode(few_shot_prompt)) + 20\n",
2652
    ")[0]['generated_text'])"
2653
   ]
2654
  },
2655
  {
2656
   "cell_type": "code",
2657
   "execution_count": null,
2658
   "id": "29c969b2",
2659
   "metadata": {},
2660
   "outputs": [],
2661
   "source": []
2662
  },
2663
  {
2664
   "cell_type": "code",
2665
   "execution_count": 251,
2666
   "id": "c87ac747",
2667
   "metadata": {},
2668
   "outputs": [
2669
    {
2670
     "name": "stderr",
2671
     "output_type": "stream",
2672
     "text": [
2673
      "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
2674
     ]
2675
    },
2676
    {
2677
     "name": "stdout",
2678
     "output_type": "stream",
2679
     "text": [
2680
      "LCT\n",
2681
      "English: pi to the 8th power\n",
2682
      "LaTeX: pi to the 8th power\n",
2683
      "\n",
2684
      "LaTeX: pi to the 8th power\n",
2685
      "\n",
2686
      "La\n"
2687
     ]
2688
    }
2689
   ],
2690
   "source": [
2691
    "print(non_finetuned_latex_generator(\n",
2692
    "    conversion_text_sample, num_beams=5, early_stopping=True, temperature=0.7,\n",
2693
    "    max_length=len(tokenizer.encode(conversion_text_sample)) + 20\n",
2694
    ")[0]['generated_text'])"
2695
   ]
2696
  },
2697
  {
2698
   "cell_type": "code",
2699
   "execution_count": null,
2700
   "id": "87615fb5",
2701
   "metadata": {},
2702
   "outputs": [],
2703
   "source": []
2704
  },
2705
  {
2706
   "cell_type": "code",
2707
   "execution_count": null,
2708
   "id": "f280e57b",
2709
   "metadata": {},
2710
   "outputs": [],
2711
   "source": []
2712
  }
2713
 ],
2714
 "metadata": {
2715
  "kernelspec": {
2716
   "display_name": "Python 3 (ipykernel)",
2717
   "language": "python",
2718
   "name": "python3"
2719
  },
2720
  "language_info": {
2721
   "codemirror_mode": {
2722
    "name": "ipython",
2723
    "version": 3
2724
   },
2725
   "file_extension": ".py",
2726
   "mimetype": "text/x-python",
2727
   "name": "python",
2728
   "nbconvert_exporter": "python",
2729
   "pygments_lexer": "ipython3",
2730
   "version": "3.11.5"
2731
  }
2732
 },
2733
 "nbformat": 4,
2734
 "nbformat_minor": 5
2735
}
2736

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.