CSS-LM

Форк
0
/
finetune_roberta_sentiment_class_noaspect.py 
584 строки · 23.9 Кб
1
import argparse
2
import logging
3
import random
4
import numpy as np
5
import os
6
import json
7
import sys
8

9
import torch
10
from transformers import RobertaTokenizer, RobertaForMaskedLM, RobertaForSequenceClassification
11
from transformers.modeling_roberta_updateRep import RobertaForMaskedLMDomainTask
12
from tqdm import tqdm, trange
13
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
14
from torch.utils.data.distributed import DistributedSampler
15
from transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
16
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
17

18

19
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
20
                    datefmt = '%m/%d/%Y %H:%M:%S',
21
                    level = logging.INFO)
22
logger = logging.getLogger(__name__)
23

24

25
class InputFeatures(object):
26
    """A single set of features of data."""
27

28
    def __init__(self, input_ids=None, attention_mask=None, segment_ids=None, label_id=None):
29
        self.input_ids = input_ids
30
        self.attention_mask = attention_mask
31
        self.segment_ids = segment_ids
32
        self.label_id = label_id
33

34
class InputExample(object):
35
    """A single training/test example for simple sequence classification."""
36
    def __init__(self, guid, sentence, aspect, sentiment=None):
37
        """Constructs a InputExample.
38

39
        Args:
40
            guid: Unique id for the example.
41
            text_a: string. The untokenized text of the first sequence. For single
42
            sequence tasks, only this sequence must be specified.
43
            text_b: (Optional) string. The untokenized text of the second sequence.
44
            Only must be specified for sequence pair tasks.
45
            label: (Optional) string. The label of the example. This should be
46
            specified for train and dev examples, but not for test examples.
47
        """
48
        self.guid = guid
49
        self.sentence = sentence
50
        self.aspect = aspect
51
        self.sentiment = sentiment
52

53

54
class DataProcessor(object):
55
    """Base class for data converters for sequence classification data sets."""
56

57
    def get_train_examples(self, data_dir):
58
        """Gets a collection of `InputExample`s for the train set."""
59
        raise NotImplementedError()
60

61
    def get_dev_examples(self, data_dir):
62
        """Gets a collection of `InputExample`s for the dev set."""
63
        raise NotImplementedError()
64

65
    def get_labels(self):
66
        """Gets the list of labels for this data set."""
67
        raise NotImplementedError()
68

69
    @classmethod
70
    def _read_json(cls, input_file):
71
        with open(input_file, "r", encoding='utf-8') as f:
72
            return json.loads(f.read())
73

74

75
class Processor_1(DataProcessor):
76
    """Processor for the CoLA data set (GLUE version)."""
77

78
    def get_train_examples(self, data_dir):
79
        """See base class."""
80
        examples = self._create_examples(
81
            self._read_json(os.path.join(data_dir, "train.json")), "train")
82
        aspect = set([x.aspect for x in examples])
83
        sentiment = set([x.sentiment for x in examples])
84
        return examples, list(aspect), list(sentiment)
85

86
    def get_dev_examples(self, data_dir):
87
        """See base class."""
88
        examples = self._create_examples(
89
            self._read_json(os.path.join(data_dir, "dev.json")), "dev")
90
        aspect = set([x.aspect for x in examples])
91
        sentiment = set([x.sentiment for x in examples])
92
        return examples, list(aspect), list(sentiment)
93

94
    def get_dev_examples(self, data_dir):
95
        """See base class."""
96
        examples = self._create_examples(
97
            self._read_json(os.path.join(data_dir, "test.json")), "test")
98
        aspect = set([x.aspect for x in examples])
99
        sentiment = set([x.sentiment for x in examples])
100
        return examples, list(aspect), list(sentiment)
101

102
    def get_labels(self):
103
        """Useless"""
104
        return ["0", "1"]
105

106
    def _create_examples(self, lines, set_type):
107
        """Creates examples for the training and dev sets."""
108
        examples = []
109
        for (i, line) in enumerate(lines):
110
            guid = "%s-%s" % (set_type, i)
111

112
            sentence = line["sentence"]
113
            aspect = line["aspect"]
114
            sentiment = line["sentiment"]
115

116
            examples.append(
117
                InputExample(guid=guid, sentence=sentence, aspect=aspect, sentiment=sentiment))
118
        return examples
119

120
def convert_examples_to_features(examples, aspect_list, sentiment_list, max_seq_length, tokenizer, task_n):
121

122
    """Loads a data file into a list of `InputBatch`s."""
123

124
    #Task_1: sentence --> aspect
125
    #Task_2: aspect+sentence --> sentiment
126
    if task_n == 1:
127
        label_list = sorted(aspect_list)
128
    elif task_n == 2:
129
        label_list = sorted(sentiment_list)
130
    else:
131
        print("Wrong task")
132
    '''
133
    for w in label_list:
134
        print(w,tokenizer.encode(w))
135
    exit()
136
    '''
137
    label_map = {label : i for i, label in enumerate(label_list)}
138

139
    features = []
140
    for (ex_index, example) in enumerate(examples):
141

142
        #Add new special tokens
143
        '''
144
        tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
145
model = GPT2Model.from_pretrained('gpt2')
146
        special_tokens_dict = {'cls_token': '<CLS>'}
147
        num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
148
        print('We have added', num_added_toks, 'tokens')
149
        model.resize_token_embeddings(len(tokenizer))
150
        '''
151

152
        '''
153
        print(tokenizer.all_special_tokens)
154
        print(tokenizer.encode(tokenizer.all_special_tokens))
155
        #['[PAD]', '[SEP]', '[CLS]', '[MASK]', '[UNK]']
156
        #[ 0, 102, 101, 103, 100]
157
        '''
158

159

160
        # The convention in BERT is:
161
        # (a) For sequence pairs:
162
        #  tokens:   [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
163
        #  type_ids: 0   0  0    0    0     0       0 0    1  1  1  1   1 1
164
        # (b) For single sequences:
165
        #  tokens:   [CLS] the dog is hairy . [SEP]
166
        #  type_ids: 0   0   0   0  0     0 0
167
        #
168
        # Where "type_ids" are used to indicate whether this is the first
169
        # sequence or the second sequence. The embedding vectors for `type=0` and
170
        # `type=1` were learned during pre-training and are added to the wordpiece
171
        # embedding vector (and position vector). This is not *strictly* necessary
172
        # since the [SEP] token unambigiously separates the sequences, but it makes
173
        # it easier for the model to learn the concept of sequences.
174
        #
175
        # For classification tasks, the first vector (corresponding to [CLS]) is
176
        # used as as the "sentence vector". Note that this only makes sense because
177
        # the entire model is fine-tuned.
178

179
        ###
180
        input_ids = tokenizer.encode(example.sentence,add_special_tokens=True)
181
        ###
182
        '''
183
        #print(tokenizer.convert_tokens_to_ids("<sep>")) #3
184
        next_input = tokenizer.encode(example.aspect, add_special_tokens=False)
185
        next_input = [3] + next_input + [2]
186
        input_ids += next_input
187
        '''
188
        ###
189
        segment_ids = [0] * len(input_ids)
190

191
        '''
192
        if task_n==2:
193
            #"[SEP]"
194
            input_ids += input_ids + [102]
195
            #sentiment: word (Next sentence)
196
            #segment_ids += [1] * (len(tokens_b) + 1)
197
        '''
198

199
        # The “Attention Mask” is simply an array of 1s and 0s indicating which tokens are padding and which aren’t (including special tokens)
200

201
        # The mask has 1 for real tokens and 0 for padding tokens. Only real
202
        # tokens are attended to.
203
        attention_mask = [1] * len(input_ids)
204

205
        # Zero-pad up to the sequence length.
206
        padding = [0] * (max_seq_length - len(input_ids))
207
        #<pad>: 1
208
        padding_id = [1] * (max_seq_length - len(input_ids))
209
        input_ids += padding_id
210
        attention_mask += padding
211
        segment_ids += padding
212

213
        try:
214
            assert len(input_ids) == max_seq_length
215
            assert len(attention_mask) == max_seq_length
216
            assert len(segment_ids) == max_seq_length
217
        except:
218
            continue
219

220
        if task_n == 1:
221
            label_id = label_map[example.aspect]
222
        elif task_n == 2:
223
            label_id = label_map[example.sentiment]
224
        else:
225
            print("Wrong task")
226

227

228
        if task_n == 1:
229
            features.append(
230
                    InputFeatures(input_ids=input_ids,
231
                                  attention_mask=attention_mask,
232
                                  segment_ids=None,
233
                                  label_id=label_id))
234
        elif task_n == 2:
235
            features.append(
236
                    InputFeatures(input_ids=input_ids,
237
                                  attention_mask=attention_mask,
238
                                  segment_ids=segment_ids,
239
                                  label_id=label_id))
240
        else:
241
            print("Wrong in convert_examples")
242

243

244
    return features
245

246

247
def main():
248
    parser = argparse.ArgumentParser()
249
    ## Required parameters
250
    ###############
251
    parser.add_argument("--data_dir",
252
                        default=None,
253
                        type=str,
254
                        required=True,
255
                        help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
256
    parser.add_argument("--output_dir",
257
                        default=None,
258
                        type=str,
259
                        required=True,
260
                        help="The output directory where the model predictions and checkpoints will be written.")
261
    parser.add_argument("--pretrain_model",
262
                        default='bert-case-uncased',
263
                        type=str,
264
                        required=True,
265
                        help="Pre-trained model")
266
    parser.add_argument("--num_labels_task",
267
                        default=None, type=int,
268
                        required=True,
269
                        help="num_labels_task")
270
    parser.add_argument("--max_seq_length",
271
                        default=128,
272
                        type=int,
273
                        help="The maximum total input sequence length after WordPiece tokenization. \n"
274
                             "Sequences longer than this will be truncated, and sequences shorter \n"
275
                             "than this will be padded.")
276
    parser.add_argument("--do_train",
277
                        default=False,
278
                        action='store_true',
279
                        help="Whether to run training.")
280
    parser.add_argument("--do_eval",
281
                        default=False,
282
                        action='store_true',
283
                        help="Whether to run eval on the dev set.")
284
    parser.add_argument("--do_lower_case",
285
                        default=False,
286
                        action='store_true',
287
                        help="Set this flag if you are using an uncased model.")
288
    parser.add_argument("--train_batch_size",
289
                        default=32,
290
                        type=int,
291
                        help="Total batch size for training.")
292
    parser.add_argument("--learning_rate",
293
                        default=5e-5,
294
                        type=float,
295
                        help="The initial learning rate for Adam.")
296
    parser.add_argument("--num_train_epochs",
297
                        default=3.0,
298
                        type=float,
299
                        help="Total number of training epochs to perform.")
300
    parser.add_argument("--warmup_proportion",
301
                        default=0.1,
302
                        type=float,
303
                        help="Proportion of training to perform linear learning rate warmup for. "
304
                             "E.g., 0.1 = 10%% of training.")
305
    parser.add_argument("--no_cuda",
306
                        default=False,
307
                        action='store_true',
308
                        help="Whether not to use CUDA when available")
309
    parser.add_argument("--local_rank",
310
                        type=int,
311
                        default=-1,
312
                        help="local_rank for distributed training on gpus")
313
    parser.add_argument('--seed',
314
                        type=int,
315
                        default=42,
316
                        help="random seed for initialization")
317
    parser.add_argument('--gradient_accumulation_steps',
318
                        type=int,
319
                        default=1,
320
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
321
    parser.add_argument('--fp16',
322
                        default=False,
323
                        action='store_true',
324
                        help="Whether to use 16-bit float precision instead of 32-bit")
325
    parser.add_argument('--loss_scale',
326
                        type=float, default=0,
327
                        help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
328
                             "0 (default value): dynamic loss scaling.\n"
329
                             "Positive power of 2: static loss scaling value.\n")
330
    parser.add_argument("--weight_decay",
331
                        default=0.0,
332
                        type=float,
333
                        help="Weight decay if we apply some.")
334
    parser.add_argument("--adam_epsilon",
335
                        default=1e-8,
336
                        type=float,
337
                        help="Epsilon for Adam optimizer.")
338
    parser.add_argument("--max_grad_norm",
339
                        default=1.0,
340
                        type=float,
341
                        help="Max gradient norm.")
342
    parser.add_argument('--fp16_opt_level',
343
                        type=str,
344
                        default='O1',
345
                        help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
346
                             "See details at https://nvidia.github.io/apex/amp.html")
347
    parser.add_argument("--task",
348
                        default=None,
349
                        type=int,
350
                        required=True,
351
                        help="Choose Task")
352
    ###############
353

354
    args = parser.parse_args()
355

356
    processors = Processor_1
357

358
    num_labels = args.num_labels_task
359

360
    if args.local_rank == -1 or args.no_cuda:
361
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
362
        n_gpu = torch.cuda.device_count()
363
    else:
364
        torch.cuda.set_device(args.local_rank)
365
        device = torch.device("cuda", args.local_rank)
366
        n_gpu = 1
367
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
368
        torch.distributed.init_process_group(backend='nccl')
369
    logger.info("device: {}, n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
370
        device, n_gpu, bool(args.local_rank != -1), args.fp16))
371

372

373

374
    if args.gradient_accumulation_steps < 1:
375
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
376
                            args.gradient_accumulation_steps))
377

378
    args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
379

380
    random.seed(args.seed)
381
    np.random.seed(args.seed)
382
    torch.manual_seed(args.seed)
383
    if n_gpu > 0:
384
        torch.cuda.manual_seed_all(args.seed)
385

386
    if not args.do_train:
387
        raise ValueError("At least one of `do_train` or `do_eval` must be True.")
388

389
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
390
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
391
    os.makedirs(args.output_dir, exist_ok=True)
392

393

394
    tokenizer = RobertaTokenizer.from_pretrained(args.pretrain_model)
395

396

397
    train_examples = None
398
    num_train_steps = None
399
    aspect_list = None
400
    sentiment_list = None
401
    processor = processors()
402
    num_labels = num_labels
403
    train_examples, aspect_list, sentiment_list = processor.get_train_examples(args.data_dir)
404

405
    if args.task == 1:
406
        num_labels = len(aspect_list)
407
    elif args.task == 2:
408
        num_labels = len(sentiment_list)
409
    else:
410
        print("What's task?")
411
        exit()
412

413
    num_train_steps = int(
414
        len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
415

416
    # Prepare model
417
    #model = RobertaForSequenceClassification.from_pretrained(args.pretrain_model, num_labels=args.num_labels_task, output_hidden_states=False, output_attentions=False, return_dict=True)
418
    model = RobertaForMaskedLMDomainTask.from_pretrained(args.pretrain_model, num_labels=args.num_labels_task, output_hidden_states=False, output_attentions=False, return_dict=True)
419

420

421
    # Prepare optimizer
422
    t_total = num_train_steps
423
    if args.local_rank != -1:
424
        t_total = t_total // torch.distributed.get_world_size()
425

426
    model.to(device)
427

428
    param_optimizer = list(model.named_parameters())
429
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
430
    #no_decay = ['bias', 'LayerNorm.weight']
431
    no_grad = ['bert.encoder.layer.11.output.dense_ent', 'bert.encoder.layer.11.output.LayerNorm_ent']
432
    param_optimizer = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_grad)]
433
    optimizer_grouped_parameters = [
434
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
435
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
436
        ]
437
    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
438
    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(t_total*0.1), num_training_steps=t_total)
439
    if args.fp16:
440
        try:
441
            from apex import amp
442
        except ImportError:
443
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
444
            exit()
445

446
        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
447

448

449
    # multi-gpu training (should be after apex fp16 initialization)
450
    if n_gpu > 1:
451
        model = torch.nn.DataParallel(model)
452

453
    # Distributed training (should be after apex fp16 initialization)
454
    if args.local_rank != -1:
455
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
456
                                                          output_device=args.local_rank,
457
                                                          find_unused_parameters=True)
458

459

460
    global_step = 0
461
    if args.do_train:
462
        train_features = convert_examples_to_features(
463
            train_examples, aspect_list, sentiment_list, args.max_seq_length, tokenizer, args.task)
464
        logger.info("***** Running training *****")
465
        logger.info("  Num examples = %d", len(train_examples))
466
        logger.info("  Batch size = %d", args.train_batch_size)
467
        logger.info("  Num steps = %d", num_train_steps)
468

469

470
        all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
471
        all_attention_mask = torch.tensor([f.attention_mask for f in train_features], dtype=torch.long)
472
        if args.task == 1:
473
            print("Excuting the task 1")
474
        elif args.task == 2:
475
            all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
476
        else:
477
            print("Wrong here2")
478

479
        all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
480

481
        if args.task == 1:
482
            train_data = TensorDataset(all_input_ids, all_attention_mask, all_label_ids)
483
        elif args.task == 2:
484
            train_data = TensorDataset(all_input_ids, all_attention_mask, all_segment_ids, all_label_ids)
485
        else:
486
            print("Wrong here1")
487

488
        '''
489
        print("========")
490
        print(train_data)
491
        print(type(train_data))
492
        exit()
493
        '''
494

495
        if args.local_rank == -1:
496
            train_sampler = RandomSampler(train_data)
497
        else:
498
            train_sampler = DistributedSampler(train_data)
499
        train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
500

501
        output_loss_file = os.path.join(args.output_dir, "loss")
502
        loss_fout = open(output_loss_file, 'w')
503
        model.train()
504

505

506
        ##########Pre-Pprocess#########
507
        ###############################
508

509

510
        for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
511
            tr_loss = 0
512
            nb_tr_examples, nb_tr_steps = 0, 0
513
            for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
514
                #batch = tuple(t.to(device) if i != 3 else t for i, t in enumerate(batch))
515
                batch = tuple(t.to(device) for i, t in enumerate(batch))
516

517
                if args.task == 1:
518
                    input_ids, attention_mask, label_ids = batch
519
                elif args.task == 2:
520
                    input_ids, attention_mask, segment_ids, label_ids = batch
521
                else:
522
                    print("Wrong here3")
523

524
                if args.task == 1:
525
                    #loss, logits, hidden_states, attentions
526
                    #output = model(input_ids=input_ids, token_type_ids=None, attention_mask=attention_mask, labels=label_ids)
527
                    #loss = output.loss
528
                    loss, logit = model(input_ids_org=input_ids, token_type_ids=None, attention_mask=attention_mask, sentence_label=label_ids, func="task_class")
529
                elif args.task == 2:
530
                    #loss, logits, hidden_states, attentions
531
                    #output = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=attention_mask, labels=label_ids)
532
                    #output = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=attention_mask, labels=label_ids)
533
                    #output = model(input_ids=input_ids, token_type_ids=None, attention_mask=attention_mask, labels=label_ids)
534
                    #loss = output.loss
535
                    loss, logit = model(input_ids_org=input_ids, token_type_ids=None, attention_mask=attention_mask, sentence_label=label_ids, func="task_class")
536
                else:
537
                    print("Wrong!!")
538

539
                if n_gpu > 1:
540
                    loss = loss.mean() # mean() to average on multi-gpu.
541
                if args.gradient_accumulation_steps > 1:
542
                    loss = loss / args.gradient_accumulation_steps
543

544
                if args.fp16:
545
                    ###
546
                    #optimizer.backward(loss)
547
                    with amp.scale_loss(loss, optimizer) as scaled_loss:
548
                        scaled_loss.backward()
549
                    ###
550
                else:
551
                    loss.backward()
552

553
                loss_fout.write("{}\n".format(loss.item()))
554
                tr_loss += loss.item()
555
                nb_tr_examples += input_ids.size(0)
556
                nb_tr_steps += 1
557
                if (step + 1) % args.gradient_accumulation_steps == 0:
558
                    # modify learning rate with special warm up BERT uses
559
                    ###
560
                    if args.fp16:
561
                        torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
562
                    else:
563
                        torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
564
                    optimizer.step()
565
                    scheduler.step()
566
                    model.zero_grad()
567
                    global_step += 1
568
                    ###
569
            if epoch < 2:
570
                continue
571
            else:
572
                model_to_save = model.module if hasattr(model, 'module') else model
573
                #output_model_file = os.path.join(args.output_dir, "pytorch_model.bin_{}".format(global_step))
574
                output_model_file = os.path.join(args.output_dir, "pytorch_model.bin_{}".format(epoch))
575
                torch.save(model_to_save.state_dict(), output_model_file)
576

577
        # Save a trained model
578
        model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
579
        output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
580
        torch.save(model_to_save.state_dict(), output_model_file)
581

582

583
if __name__ == "__main__":
584
    main()
585

586

587

588

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.