10
from transformers import RobertaTokenizer, RobertaForMaskedLM, RobertaForSequenceClassification
11
from transformers.modeling_roberta_updateRep import RobertaForMaskedLMDomainTask
12
from tqdm import tqdm, trange
13
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
14
from torch.utils.data.distributed import DistributedSampler
15
from transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
16
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
19
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
20
datefmt = '%m/%d/%Y %H:%M:%S',
22
logger = logging.getLogger(__name__)
25
class InputFeatures(object):
26
"""A single set of features of data."""
28
def __init__(self, input_ids=None, attention_mask=None, segment_ids=None, label_id=None):
29
self.input_ids = input_ids
30
self.attention_mask = attention_mask
31
self.segment_ids = segment_ids
32
self.label_id = label_id
34
class InputExample(object):
35
"""A single training/test example for simple sequence classification."""
36
def __init__(self, guid, sentence, aspect, sentiment=None):
37
"""Constructs a InputExample.
40
guid: Unique id for the example.
41
text_a: string. The untokenized text of the first sequence. For single
42
sequence tasks, only this sequence must be specified.
43
text_b: (Optional) string. The untokenized text of the second sequence.
44
Only must be specified for sequence pair tasks.
45
label: (Optional) string. The label of the example. This should be
46
specified for train and dev examples, but not for test examples.
49
self.sentence = sentence
51
self.sentiment = sentiment
54
class DataProcessor(object):
55
"""Base class for data converters for sequence classification data sets."""
57
def get_train_examples(self, data_dir):
58
"""Gets a collection of `InputExample`s for the train set."""
59
raise NotImplementedError()
61
def get_dev_examples(self, data_dir):
62
"""Gets a collection of `InputExample`s for the dev set."""
63
raise NotImplementedError()
66
"""Gets the list of labels for this data set."""
67
raise NotImplementedError()
70
def _read_json(cls, input_file):
71
with open(input_file, "r", encoding='utf-8') as f:
72
return json.loads(f.read())
75
class Processor_1(DataProcessor):
76
"""Processor for the CoLA data set (GLUE version)."""
78
def get_train_examples(self, data_dir):
80
examples = self._create_examples(
81
self._read_json(os.path.join(data_dir, "train.json")), "train")
82
aspect = set([x.aspect for x in examples])
83
sentiment = set([x.sentiment for x in examples])
84
return examples, list(aspect), list(sentiment)
86
def get_dev_examples(self, data_dir):
88
examples = self._create_examples(
89
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
90
aspect = set([x.aspect for x in examples])
91
sentiment = set([x.sentiment for x in examples])
92
return examples, list(aspect), list(sentiment)
94
def get_dev_examples(self, data_dir):
96
examples = self._create_examples(
97
self._read_json(os.path.join(data_dir, "test.json")), "test")
98
aspect = set([x.aspect for x in examples])
99
sentiment = set([x.sentiment for x in examples])
100
return examples, list(aspect), list(sentiment)
102
def get_labels(self):
106
def _create_examples(self, lines, set_type):
107
"""Creates examples for the training and dev sets."""
109
for (i, line) in enumerate(lines):
110
guid = "%s-%s" % (set_type, i)
112
sentence = line["sentence"]
113
aspect = line["aspect"]
114
sentiment = line["sentiment"]
117
InputExample(guid=guid, sentence=sentence, aspect=aspect, sentiment=sentiment))
120
def convert_examples_to_features(examples, aspect_list, sentiment_list, max_seq_length, tokenizer, task_n):
122
"""Loads a data file into a list of `InputBatch`s."""
127
label_list = sorted(aspect_list)
129
label_list = sorted(sentiment_list)
134
print(w,tokenizer.encode(w))
137
label_map = {label : i for i, label in enumerate(label_list)}
140
for (ex_index, example) in enumerate(examples):
144
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
145
model = GPT2Model.from_pretrained('gpt2')
146
special_tokens_dict = {'cls_token': '<CLS>'}
147
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
148
print('We have added', num_added_toks, 'tokens')
149
model.resize_token_embeddings(len(tokenizer))
153
print(tokenizer.all_special_tokens)
154
print(tokenizer.encode(tokenizer.all_special_tokens))
155
#['[PAD]', '[SEP]', '[CLS]', '[MASK]', '[UNK]']
156
#[ 0, 102, 101, 103, 100]
180
input_ids = tokenizer.encode(example.sentence,add_special_tokens=True)
183
#print(tokenizer.convert_tokens_to_ids("<sep>")) #3
184
next_input = tokenizer.encode(example.aspect, add_special_tokens=False)
185
next_input = [3] + next_input + [2]
186
input_ids += next_input
189
segment_ids = [0] * len(input_ids)
194
input_ids += input_ids + [102]
195
#sentiment: word (Next sentence)
196
#segment_ids += [1] * (len(tokens_b) + 1)
203
attention_mask = [1] * len(input_ids)
206
padding = [0] * (max_seq_length - len(input_ids))
208
padding_id = [1] * (max_seq_length - len(input_ids))
209
input_ids += padding_id
210
attention_mask += padding
211
segment_ids += padding
214
assert len(input_ids) == max_seq_length
215
assert len(attention_mask) == max_seq_length
216
assert len(segment_ids) == max_seq_length
221
label_id = label_map[example.aspect]
223
label_id = label_map[example.sentiment]
230
InputFeatures(input_ids=input_ids,
231
attention_mask=attention_mask,
236
InputFeatures(input_ids=input_ids,
237
attention_mask=attention_mask,
238
segment_ids=segment_ids,
241
print("Wrong in convert_examples")
248
parser = argparse.ArgumentParser()
251
parser.add_argument("--data_dir",
255
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
256
parser.add_argument("--output_dir",
260
help="The output directory where the model predictions and checkpoints will be written.")
261
parser.add_argument("--pretrain_model",
262
default='bert-case-uncased',
265
help="Pre-trained model")
266
parser.add_argument("--num_labels_task",
267
default=None, type=int,
269
help="num_labels_task")
270
parser.add_argument("--max_seq_length",
273
help="The maximum total input sequence length after WordPiece tokenization. \n"
274
"Sequences longer than this will be truncated, and sequences shorter \n"
275
"than this will be padded.")
276
parser.add_argument("--do_train",
279
help="Whether to run training.")
280
parser.add_argument("--do_eval",
283
help="Whether to run eval on the dev set.")
284
parser.add_argument("--do_lower_case",
287
help="Set this flag if you are using an uncased model.")
288
parser.add_argument("--train_batch_size",
291
help="Total batch size for training.")
292
parser.add_argument("--learning_rate",
295
help="The initial learning rate for Adam.")
296
parser.add_argument("--num_train_epochs",
299
help="Total number of training epochs to perform.")
300
parser.add_argument("--warmup_proportion",
303
help="Proportion of training to perform linear learning rate warmup for. "
304
"E.g., 0.1 = 10%% of training.")
305
parser.add_argument("--no_cuda",
308
help="Whether not to use CUDA when available")
309
parser.add_argument("--local_rank",
312
help="local_rank for distributed training on gpus")
313
parser.add_argument('--seed',
316
help="random seed for initialization")
317
parser.add_argument('--gradient_accumulation_steps',
320
help="Number of updates steps to accumulate before performing a backward/update pass.")
321
parser.add_argument('--fp16',
324
help="Whether to use 16-bit float precision instead of 32-bit")
325
parser.add_argument('--loss_scale',
326
type=float, default=0,
327
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
328
"0 (default value): dynamic loss scaling.\n"
329
"Positive power of 2: static loss scaling value.\n")
330
parser.add_argument("--weight_decay",
333
help="Weight decay if we apply some.")
334
parser.add_argument("--adam_epsilon",
337
help="Epsilon for Adam optimizer.")
338
parser.add_argument("--max_grad_norm",
341
help="Max gradient norm.")
342
parser.add_argument('--fp16_opt_level',
345
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
346
"See details at https://nvidia.github.io/apex/amp.html")
347
parser.add_argument("--task",
354
args = parser.parse_args()
356
processors = Processor_1
358
num_labels = args.num_labels_task
360
if args.local_rank == -1 or args.no_cuda:
361
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
362
n_gpu = torch.cuda.device_count()
364
torch.cuda.set_device(args.local_rank)
365
device = torch.device("cuda", args.local_rank)
368
torch.distributed.init_process_group(backend='nccl')
369
logger.info("device: {}, n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
370
device, n_gpu, bool(args.local_rank != -1), args.fp16))
374
if args.gradient_accumulation_steps < 1:
375
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
376
args.gradient_accumulation_steps))
378
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
380
random.seed(args.seed)
381
np.random.seed(args.seed)
382
torch.manual_seed(args.seed)
384
torch.cuda.manual_seed_all(args.seed)
386
if not args.do_train:
387
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
389
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
390
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
391
os.makedirs(args.output_dir, exist_ok=True)
394
tokenizer = RobertaTokenizer.from_pretrained(args.pretrain_model)
397
train_examples = None
398
num_train_steps = None
400
sentiment_list = None
401
processor = processors()
402
num_labels = num_labels
403
train_examples, aspect_list, sentiment_list = processor.get_train_examples(args.data_dir)
406
num_labels = len(aspect_list)
408
num_labels = len(sentiment_list)
410
print("What's task?")
413
num_train_steps = int(
414
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
418
model = RobertaForMaskedLMDomainTask.from_pretrained(args.pretrain_model, num_labels=args.num_labels_task, output_hidden_states=False, output_attentions=False, return_dict=True)
422
t_total = num_train_steps
423
if args.local_rank != -1:
424
t_total = t_total // torch.distributed.get_world_size()
428
param_optimizer = list(model.named_parameters())
429
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
431
no_grad = ['bert.encoder.layer.11.output.dense_ent', 'bert.encoder.layer.11.output.LayerNorm_ent']
432
param_optimizer = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_grad)]
433
optimizer_grouped_parameters = [
434
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
435
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
437
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
438
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(t_total*0.1), num_training_steps=t_total)
443
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
446
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
451
model = torch.nn.DataParallel(model)
454
if args.local_rank != -1:
455
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
456
output_device=args.local_rank,
457
find_unused_parameters=True)
462
train_features = convert_examples_to_features(
463
train_examples, aspect_list, sentiment_list, args.max_seq_length, tokenizer, args.task)
464
logger.info("***** Running training *****")
465
logger.info(" Num examples = %d", len(train_examples))
466
logger.info(" Batch size = %d", args.train_batch_size)
467
logger.info(" Num steps = %d", num_train_steps)
470
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
471
all_attention_mask = torch.tensor([f.attention_mask for f in train_features], dtype=torch.long)
473
print("Excuting the task 1")
475
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
479
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
482
train_data = TensorDataset(all_input_ids, all_attention_mask, all_label_ids)
484
train_data = TensorDataset(all_input_ids, all_attention_mask, all_segment_ids, all_label_ids)
491
print(type(train_data))
495
if args.local_rank == -1:
496
train_sampler = RandomSampler(train_data)
498
train_sampler = DistributedSampler(train_data)
499
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
501
output_loss_file = os.path.join(args.output_dir, "loss")
502
loss_fout = open(output_loss_file, 'w')
510
for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
512
nb_tr_examples, nb_tr_steps = 0, 0
513
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
515
batch = tuple(t.to(device) for i, t in enumerate(batch))
518
input_ids, attention_mask, label_ids = batch
520
input_ids, attention_mask, segment_ids, label_ids = batch
528
loss, logit = model(input_ids_org=input_ids, token_type_ids=None, attention_mask=attention_mask, sentence_label=label_ids, func="task_class")
535
loss, logit = model(input_ids_org=input_ids, token_type_ids=None, attention_mask=attention_mask, sentence_label=label_ids, func="task_class")
541
if args.gradient_accumulation_steps > 1:
542
loss = loss / args.gradient_accumulation_steps
547
with amp.scale_loss(loss, optimizer) as scaled_loss:
548
scaled_loss.backward()
553
loss_fout.write("{}\n".format(loss.item()))
554
tr_loss += loss.item()
555
nb_tr_examples += input_ids.size(0)
557
if (step + 1) % args.gradient_accumulation_steps == 0:
561
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
563
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
572
model_to_save = model.module if hasattr(model, 'module') else model
574
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin_{}".format(epoch))
575
torch.save(model_to_save.state_dict(), output_model_file)
578
model_to_save = model.module if hasattr(model, 'module') else model
579
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
580
torch.save(model_to_save.state_dict(), output_model_file)
583
if __name__ == "__main__":