openprompt

Форк
0
/
3.1_LMBFF.py 
250 строк · 11.1 Кб
1
# %% [markdown]
2
# ## Text Classification with LM-BFF.
3
# In this tutorial, we do sentiment analysis with automatic template and verbalizer generation. We use SST-2 as an example.
4

5
# %% [markdown]
6
# ### 1. load dataset
7

8
# %%
9
# import argparse
10
# parser = argparse.ArgumentParser("")
11
# parser.add_argument("--lr", type=float, default=5e-5)
12
# args = parser.parse_args()
13
from openprompt.data_utils.text_classification_dataset import SST2Processor
14
dataset = {}
15
dataset['train'] = SST2Processor().get_train_examples("../datasets/TextClassification/SST-2/16-shot/16-13")
16
dataset['validation'] = SST2Processor().get_dev_examples("../datasets/TextClassification/SST-2/16-shot/16-13")
17
dataset['test'] = SST2Processor().get_test_examples("../datasets/TextClassification/SST-2/16-shot/16-13")
18

19
# %% [markdown]
20
# ### 2. build initial verbalizer and template
21
# - note that if you wish to do automatic label word generation, the verbalizer is not the final verbalizer, and is only used for template generation.
22
# - note that if you wish to do automatic template generation, the template text may desirably include `{"meta":"labelword"}` so that label word can be used and remember to use `LMBFFTemplateGenerationTemplate` class so that "labelword" can be handled properly. Else you can just use `ManualTemplate`
23
# - below is a template that expects plain text generation at each "mask" token position
24

25
# %%
26
print('load model...')
27
from openprompt.plms import load_plm
28
# load mlm model for main tasks
29
plm, tokenizer, model_config, WrapperClass = load_plm("roberta", "roberta-large")
30

31
# load generation model for template generation
32
template_generate_model, template_generate_tokenizer, template_generate_model_config, template_tokenizer_wrapper = load_plm('t5', 't5-large')
33

34
from openprompt.prompts import ManualVerbalizer, ManualTemplate
35
verbalizer = ManualVerbalizer(tokenizer=tokenizer, num_classes=2, label_words=[['terrible'],['great']])
36

37
from openprompt.prompts.prompt_generator import LMBFFTemplateGenerationTemplate
38
template = LMBFFTemplateGenerationTemplate(tokenizer=template_generate_tokenizer, verbalizer=verbalizer, text='{"placeholder":"text_a"} {"mask"} {"meta":"labelword"} {"mask"}.')
39
# template = ManualTemplate(tokenizer=tokenizer, text='{"placeholder":"text_a"} It is {"mask"}.')
40

41
# view wrapped example
42
wrapped_example = template.wrap_one_example(dataset['train'][0])
43
print(wrapped_example)
44

45
# %%
46
# parameter setting
47
cuda = True
48
auto_t = True # whether to perform automatic template generation
49
auto_v = True # whether to perform automatic label word generation
50

51

52
# %%
53
# train util function
54
from openprompt.plms import load_plm
55
from openprompt.prompts.prompt_generator import T5TemplateGenerator
56
from openprompt.pipeline_base import PromptDataLoader, PromptForClassification
57
from openprompt.prompts import ManualTemplate
58
from openprompt.trainer import ClassificationRunner
59
import copy
60
import torch
61
from transformers import  AdamW, get_linear_schedule_with_warmup
62
import numpy as np
63

64
def fit(model, train_dataloader, val_dataloader, loss_func, optimizer):
65
    best_score = 0.0
66
    for epoch in range(5):
67
        train_loss = train_epoch(model, train_dataloader, loss_func, optimizer)
68
        score = evaluate(model, val_dataloader)
69
        if score > best_score:
70
            best_score = score
71
        print(f"Epoch {epoch+1}: Train loss={train_loss}, Eval score={score}")
72
    return best_score
73

74

75
def train_epoch(model, train_dataloader, loss_func, optimizer):
76
    model.train()
77
    loss_all = []
78
    for step, inputs in enumerate(train_dataloader):
79
        if cuda:
80
            inputs = inputs.cuda()
81
        logits = model(inputs)
82
        labels = inputs['label']
83
        loss = loss_func(logits, labels)
84
        loss.backward()
85
        loss_all.append(loss.item())
86
        optimizer.step()
87
        optimizer.zero_grad()
88
    return np.mean(loss_all)
89

90
def evaluate(model, val_dataloader):
91
    model.eval()
92
    allpreds = []
93
    alllabels = []
94
    with torch.no_grad():
95
        for step, inputs in enumerate(val_dataloader):
96
            if cuda:
97
                inputs = inputs.cuda()
98
            logits = model(inputs)
99
            labels = inputs['label']
100
            alllabels.extend(labels.cpu().tolist())
101
            allpreds.extend(torch.argmax(logits, dim=-1).cpu().tolist())
102
    acc = sum([int(i==j) for i,j in zip(allpreds, alllabels)])/len(allpreds)
103
    return acc
104

105

106
# %% [markdown]
107
# ### 3. automatic template and verbalizer generation
108

109
# %%
110
from tqdm import tqdm
111

112
class ManualTemplateWithoutParse(ManualTemplate):
113
    """The generated template from TemplateGenerator is a list of dict of parsed template_text. So no further parsing is needed."""
114
    def on_text_set(self):
115
        pass
116

117
# template generation
118
if auto_t:
119
    print('performing auto_t...')
120

121
    if cuda:
122
        template_generate_model = template_generate_model.cuda()
123
    template_generator = T5TemplateGenerator(template_generate_model, template_generate_tokenizer, template_tokenizer_wrapper, verbalizer, beam_width=5) # beam_width is set to 5 here for efficiency, to improve performance, try a larger number.
124

125

126
    dataloader = PromptDataLoader(dataset['train'], template, tokenizer=template_generate_tokenizer, tokenizer_wrapper_class=template_tokenizer_wrapper, batch_size=len(dataset['train']), decoder_max_length=128, max_seq_length=128, shuffle=False, teacher_forcing=False) # register all data at once
127
    for data in dataloader:
128
        if cuda:
129
            data = data.cuda()
130
        template_generator._register_buffer(data)
131

132
    template_generate_model.eval()
133
    print('generating...')
134
    template_texts = template_generator._get_templates()
135

136
    original_template = template.text
137
    template_texts = [template_generator.convert_template(template_text, original_template) for template_text in template_texts]
138
    # template_generator._show_template()
139
    template_generator.release_memory()
140
    # generate a number of candidate template text
141
    print(template_texts)
142
    # iterate over each candidate and select the best one
143
    best_metrics = 0.0
144
    best_template_text = None
145
    for template_text in tqdm(template_texts):
146
        template = ManualTemplateWithoutParse(tokenizer, template_text)
147
        print(f"current template: {template_text}, wrapped example: {template.wrap_one_example(dataset['train'][0])}")
148

149
        train_dataloader = PromptDataLoader(dataset['train'], template, tokenizer=tokenizer, tokenizer_wrapper_class=WrapperClass, shuffle=True)
150
        valid_dataloader = PromptDataLoader(dataset['validation'], template, tokenizer=tokenizer, tokenizer_wrapper_class=WrapperClass)
151

152
        model = PromptForClassification(copy.deepcopy(plm), template, verbalizer)
153

154
        loss_func = torch.nn.CrossEntropyLoss()
155
        no_decay = ['bias', 'LayerNorm.weight']
156
        # it's always good practice to set no decay to biase and LayerNorm parameters
157
        optimizer_grouped_parameters = [
158
            {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
159
            {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
160
        ]
161

162
        optimizer = AdamW(optimizer_grouped_parameters, lr=2e-5)
163
        if cuda:
164
            model = model.cuda()
165
        score = fit(model, train_dataloader, valid_dataloader, loss_func, optimizer)
166

167
        if score > best_metrics:
168
            print('current best score:', score)
169
            best_metrics = score
170
            best_template_text = template_text
171
    # use the best template
172
    template = ManualTemplateWithoutParse(tokenizer, text=best_template_text)
173
    print("final best template:", best_template_text)
174
    print("wrapped example:", template.wrap_one_example(dataset["train"][0]))
175

176
# %%
177
# verbalizer generation
178
from openprompt.prompts.prompt_generator import RobertaVerbalizerGenerator
179
if auto_v:
180
    print('performing auto_v...')
181
    # load generation model for template generation
182
    if cuda:
183
        plm = plm.cuda()
184
    verbalizer_generator = RobertaVerbalizerGenerator(model=plm, tokenizer=tokenizer, candidate_num=20, label_word_num_per_class=20)
185
    # to improve performance , try larger numbers
186

187
    dataloader = PromptDataLoader(dataset['train'], template, tokenizer=tokenizer, tokenizer_wrapper_class=WrapperClass, batch_size=32)
188
    for data in dataloader:
189
        if cuda:
190
            data = data.cuda()
191
        verbalizer_generator.register_buffer(data)
192
    label_words_list = verbalizer_generator.generate()
193
    verbalizer_generator.release_memory()
194

195
    # iterate over each candidate and select the best one
196
    current_verbalizer = copy.deepcopy(verbalizer)
197
    best_metrics = 0.0
198
    best_label_words = None
199
    for label_words in tqdm(label_words_list):
200
        current_verbalizer.label_words = label_words
201
        train_dataloader = PromptDataLoader(dataset['train'], template, tokenizer=tokenizer, tokenizer_wrapper_class=WrapperClass, shuffle=True)
202
        valid_dataloader = PromptDataLoader(dataset['validation'], template, tokenizer=tokenizer, tokenizer_wrapper_class=WrapperClass)
203

204
        model = PromptForClassification(copy.deepcopy(plm), template, current_verbalizer)
205

206
        loss_func = torch.nn.CrossEntropyLoss()
207
        no_decay = ['bias', 'LayerNorm.weight']
208
        # it's always good practice to set no decay to biase and LayerNorm parameters
209
        optimizer_grouped_parameters = [
210
            {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
211
            {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
212
        ]
213

214
        optimizer = AdamW(optimizer_grouped_parameters, lr=2e-5)
215
        if cuda:
216
            model = model.cuda()
217
        score = fit(model, train_dataloader, valid_dataloader, loss_func, optimizer)
218

219
        if score > best_metrics:
220
            best_metrics = score
221
            best_label_words = label_words
222
    # use the best verbalizer
223
    print("final best label words:", best_label_words)
224
    verbalizer = ManualVerbalizer(tokenizer, num_classes=2, label_words=best_label_words)
225

226
# %% [markdown]
227
# ### 4. main training loop
228

229
# %%
230
# main training loop
231
train_dataloader = PromptDataLoader(dataset['train'], template, tokenizer=tokenizer, tokenizer_wrapper_class=WrapperClass, shuffle=True)
232
valid_dataloader = PromptDataLoader(dataset['validation'], template, tokenizer=tokenizer, tokenizer_wrapper_class=WrapperClass)
233
test_dataloader = PromptDataLoader(dataset['test'], template, tokenizer=tokenizer, tokenizer_wrapper_class=WrapperClass)
234

235

236
model = PromptForClassification(copy.deepcopy(plm), template, verbalizer)
237
loss_func = torch.nn.CrossEntropyLoss()
238
no_decay = ['bias', 'LayerNorm.weight']
239
# it's always good practice to set no decay to biase and LayerNorm parameters
240
optimizer_grouped_parameters = [
241
    {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
242
    {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
243
]
244

245
optimizer = AdamW(optimizer_grouped_parameters, lr=2e-5)
246
if cuda:
247
    model = model.cuda()
248
score = fit(model, train_dataloader, valid_dataloader, loss_func, optimizer)
249
test_score = evaluate(model, test_dataloader)
250
print("Final test score:", test_score)
251

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.