Prompt-Transferability

Форк
0
338 строк · 9.4 Кб
1
# -*- coding: utf-8 -*-
2
"""pipeline.ipynb
3

4
Automatically generated by Colaboratory.
5

6
Original file is located at
7
    https://colab.research.google.com/drive/1-m2ywJVcfgCHOcEN-4agAbLz7tRGqMvM
8
"""
9

10
'''准备模型和数据'''
11
'''这里模型就用model这个变量'''
12
'''数据之后用example作为演示'''
13
'''使用的时候替换成自己的model就可以了'''
14
#import numpy as np
15
import torch
16
import config
17
#from activate_neuron.mymodel import *
18
#import activate_neuron.mymodel as mymodel
19
#from activate_neuron.utils import *
20
#import activate_neuron.utils as utils
21

22

23
#from transformers import AutoConfig, AutoModelForMaskedLM
24
#from model.modelling_roberta import RobertaForMaskedLM
25
#from reader.reader import init_dataset, init_formatter, init_test_dataset
26

27
import argparse
28
import os
29
import torch
30
import logging
31
import random
32
import numpy as np
33

34
from tools.init_tool import init_all
35
from config_parser import create_config
36
from tools.valid_tool import valid
37
from torch.autograd import Variable
38

39
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
40
                    datefmt='%m/%d/%Y %H:%M:%S',
41
                    level=logging.INFO)
42

43
logger = logging.getLogger(__name__)
44

45
def set_random_seed(seed):
46
    """Set random seed for reproducability."""
47

48
    if seed is not None and seed > 0:
49
        random.seed(seed)
50
        np.random.seed(seed)
51
        torch.manual_seed(seed)
52
        torch.cuda.manual_seed_all(seed)
53

54

55

56
def relu(tmp):
57
    return 1*(tmp > 0)*tmp
58

59
def topk(obj, k):
60
    M=-10000
61
    obj = list(obj)[:]
62
    idlist = []
63
    for i in range(k):
64
        idlist.append(obj.index(max(obj)))
65
        obj[obj.index(max(obj))]=M
66
    return idlist
67

68
def relu(tmp):
69
    return 1*(tmp > 0)*tmp
70

71
def topk(obj, k):
72
    M=-10000
73
    obj = list(obj)[:]
74
    idlist = []
75
    for i in range(k):
76
        idlist.append(obj.index(max(obj)))
77
        obj[obj.index(max(obj))]=M
78
    return idlist
79

80

81

82

83
if __name__ == "__main__":
84

85
    parser = argparse.ArgumentParser()
86
    parser.add_argument('--config', '-c', help="specific config file", required=True)
87
    parser.add_argument('--gpu', '-g', help="gpu id list")
88
    parser.add_argument('--local_rank', type=int, help='local rank', default=-1)
89
    parser.add_argument('--do_test', help="do test while training or not", action="store_true")
90
    parser.add_argument('--checkpoint', help="checkpoint file path", type=str, default=None)
91
    parser.add_argument('--comment', help="checkpoint file path", default=None)
92
    parser.add_argument("--seed", type=int, default=None)
93
    parser.add_argument("--prompt_emb_output", type=bool, default=False)
94
    parser.add_argument("--save_name", type=str, default=None)
95
    parser.add_argument("--replacing_prompt", type=str, default=None)
96
    parser.add_argument("--pre_train_mlm", default=False, action='store_true')
97
    parser.add_argument("--task_transfer_projector", default=False, action='store_true')
98
    parser.add_argument("--model_transfer_projector", default=False, action='store_true')
99
    parser.add_argument("--activate_neuron", default=True, action='store_true')
100
    parser.add_argument("--mode", type=str, default="valid")
101
    parser.add_argument("--projector", type=str, default=None)
102

103

104
    args = parser.parse_args()
105
    configFilePath = args.config
106

107

108
    config = create_config(configFilePath)
109

110

111

112
    use_gpu = True
113
    gpu_list = []
114
    if args.gpu is None:
115
        use_gpu = False
116
    else:
117
        use_gpu = True
118
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
119

120
        device_list = args.gpu.split(",")
121
        for a in range(0, len(device_list)):
122
            gpu_list.append(int(a))
123

124
    os.system("clear")
125
    config.set('distributed', 'local_rank', args.local_rank)
126
    config.set("distributed", "use", False)
127
    if config.getboolean("distributed", "use") and len(gpu_list)>1:
128
        torch.cuda.set_device(gpu_list[args.local_rank])
129
        torch.distributed.init_process_group(backend=config.get("distributed", "backend"))
130
        config.set('distributed', 'gpu_num', len(gpu_list))
131

132
    cuda = torch.cuda.is_available()
133
    logger.info("CUDA available: %s" % str(cuda))
134
    if not cuda and len(gpu_list) > 0:
135
        logger.error("CUDA is not available but specific gpu id")
136
        raise NotImplementedError
137
    set_random_seed(args.seed)
138

139

140
    ########
141
    '''
142
    formatter = "mlmPrompt"
143
    config.set("data","train_formatter_type",formatter)
144
    config.set("data","valid_formatter_type",formatter)
145
    config.set("data","test_formatter_type",formatter)
146
    config.set("model","model_name","mlmPrompt")
147
    '''
148
    ########
149

150

151

152
    parameters = init_all(config, gpu_list, args.checkpoint, args.mode, local_rank = args.local_rank, args=args)
153
    do_test = False
154

155
    model = parameters["model"]
156
    valid_dataset = parameters["valid_dataset"]
157

158

159
    ##########################
160
    ##########################
161

162

163
    '''准备hook'''
164
    '''这是提取特征的代码'''
165
    outputs=[[] for _ in range(24)]
166
    def save_ppt_outputs1_hook(n):
167
        def fn(_,__,output):
168
            #print("=====")
169
            #print(output)
170
            #print("----")
171
            #print(output.shape) #torch.Size([1, 1, 3072])
172
            #print("=====")
173
            #exit()
174
            outputs[n].append(output.detach().to("cpu"))
175
            #outputs[n].append(output.detach())
176
        return fn
177

178

179
    for n in range(24):
180
        #这里面提取feature的模组可以改变,这里因为我自定义模型的原因要两层roberta
181
        #for l in model.state_dict().keys():
182
        #    print(l)
183
        #print("====")
184
        #exit()
185

186
        #decoder
187
        model.encoder.decoder.block[n].layer[2].DenseReluDense.wi.register_forward_hook(save_ppt_outputs1_hook(n))
188

189
        #encoder
190
        #model.encoder.encoder.block[n].layer[1].DenseReluDense.wi.register_forward_hook(save_ppt_outputs1_hook(n))
191

192

193

194

195

196

197
    '''将数据通过模型'''
198
    '''hook会自动将中间层的激活储存在outputs中'''
199
    model.eval()
200
    valid(model, parameters["valid_dataset"], 1, None, config, gpu_list, parameters["output_function"], mode=args.mode, args=args)
201

202

203
    #################################################
204
    #################################################
205
    #################################################
206

207

208
    '''
209
    print(len(outputs)) #12
210
    print(len(outputs[0])) #17 epoch
211
    print(len(outputs[0][0])) #64
212
    print(len(outputs[0][0][0])) #231
213
    print(len(outputs[0][0][0][0])) #3072
214
    #outputs[][][][][] , layer:12, epoch:17, batch_size:64, input_length:231, neuron:3072
215
    '''
216

217
    #merge 17 epoch
218
    for k in range(24):
219
        #outputs[k] = relu(np.concatenate(outputs[k]))
220
        #outputs[k] = torch.relu(torch.cat(outputs[k]))
221
        outputs[k] = torch.cat(outputs[k])
222
        #print(outputs[k])
223
        #print(outputs[k].shape)
224
        #exit()
225

226

227
    '''
228
    print(len(outputs)) #12
229
    print(len(outputs[0])) #17 epoch
230
    print(len(outputs[0][0])) #64
231
    print(len(outputs[0][0][0])) #231
232
    print(len(outputs[0][0][0][0])) #3072
233
    #outputs[][][][][] , layer:12, epoch:17, batch_size:64, input_length:231, neuron:3072
234
    '''
235

236

237
    '''这部分是根据论文里的代码找到某个neuron的最大激活'''
238
    '''
239
    #划定层数
240
    #layer = np.random.randint(12)
241
    layer = torch.randint(1,12,(1,))
242
    #决定neuron
243
    #neuron = np.random.randint(3072)
244
    neuron = torch.randint(1,3072,(1,))
245
    #这里面是得到了某层的某个neuron的所有激活
246
    neuron_activation = outputs[layer][:,:,neuron]
247
    max_activation = [neuron_activation[i,:length[i]].max() for i in range(size)]
248
    print(neuron_activation)
249
    print(max_activation)
250
    exit()
251
    '''
252

253

254

255
    outputs = torch.stack(outputs)
256

257
    #decoder
258
    #print(outputs.shape)
259
    outputs = outputs[:,:1,:1,:] #12 layers, [mask]
260
    #print(outputs.shape)
261
    #exit()
262

263
    #encoder
264
    #print(outputs.shape)
265
    #outputs = outputs[:,:,100:101,:] #12 layers, [mask]
266
    #print(outputs.shape)
267
    #exit()
268

269
    #print(outputs.shape)
270
    # [12, 1, 1, 3072] --> 12, 1(batch_size), (target_length), 3072
271

272
    # [12, 2, 1, 3072] --> 12, 1(batch_size), (target_length), 3072
273

274

275
    #print(outputs)
276
    #print(save_dir)
277
    #exit()
278

279

280
    save_name = args.replacing_prompt.strip().split("/")[-1].split(".")[0]
281
    #print(save_name)
282
    #exit()
283
    dir = "task_activated_neuron"
284
    if os.path.isdir(dir):
285
        save_dir = dir+"/"+save_name
286
        if os.path.isdir(save_dir):
287
            torch.save(outputs,save_dir+"/task_activated_neuron")
288
        else:
289
            os.mkdir(save_dir)
290
            torch.save(outputs,save_dir+"/task_activated_neuron")
291
    else:
292
        os.mkdir(dir)
293
        save_dir = dir+"/"+save_name
294
        os.mkdir(save_dir)
295
        torch.save(outputs,save_dir+"/task_activated_neuron")
296

297

298
    print("==Prompt emb==")
299
    print(outputs.shape)
300
    print("Save Done")
301
    print("==============")
302

303

304

305

306

307

308

309

310

311

312
    '''
313
    size = 8 # number of the sentences
314
    length = 231 #sentence length
315
    #Activated neuron for a task-specific prompt
316
    for layer in range(1,12):
317
        for neuron in range(1,3072):
318
            neuron_activation = outputs[layer][:,:,neuron]
319
            print(outputs[layer].shape)
320
            print(neuron_activation.shape)
321
            exit()
322
            max_activation = [neuron_activation[i,:length[i]].max() for i in range(size)]
323
            print(neuron_activation)
324
            print("------------")
325
            print(max_activation)
326
            print("============")
327
    exit()
328
    '''
329

330

331

332
    '''选择头几个句子展示'''
333
    '''
334
    N = 4
335
    indexes = topk(max_activation,N)
336
    for ids in indexes:
337
        print(tokenizer.decode(example['input_ids'][ids,:length[ids]]))
338
    '''
339

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.