Prompt-Transferability

Форк
0
318 строк · 9.1 Кб
1
# -*- coding: utf-8 -*-
2
"""pipeline.ipynb
3

4
Automatically generated by Colaboratory.
5

6
Original file is located at
7
    https://colab.research.google.com/drive/1-m2ywJVcfgCHOcEN-4agAbLz7tRGqMvM
8
"""
9

10
'''准备模型和数据'''
11
'''这里模型就用model这个变量'''
12
'''数据之后用example作为演示'''
13
'''使用的时候替换成自己的model就可以了'''
14
#import numpy as np
15
import torch
16
import config
17
#from activate_neuron.mymodel import *
18
#import activate_neuron.mymodel as mymodel
19
#from activate_neuron.utils import *
20
#import activate_neuron.utils as utils
21

22

23
#from transformers import AutoConfig, AutoModelForMaskedLM
24
#from model.modelling_roberta import RobertaForMaskedLM
25
#from reader.reader import init_dataset, init_formatter, init_test_dataset
26

27
import argparse
28
import os
29
import torch
30
import logging
31
import random
32
import numpy as np
33

34
from tools.init_tool import init_all
35
from config_parser import create_config
36
from tools.valid_tool import valid
37
from torch.autograd import Variable
38

39
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
40
                    datefmt='%m/%d/%Y %H:%M:%S',
41
                    level=logging.INFO)
42

43
logger = logging.getLogger(__name__)
44

45
def set_random_seed(seed):
46
    """Set random seed for reproducability."""
47

48
    if seed is not None and seed > 0:
49
        random.seed(seed)
50
        np.random.seed(seed)
51
        torch.manual_seed(seed)
52
        torch.cuda.manual_seed_all(seed)
53

54

55

56
def relu(tmp):
57
    return 1*(tmp > 0)*tmp
58

59
def topk(obj, k):
60
    M=-10000
61
    obj = list(obj)[:]
62
    idlist = []
63
    for i in range(k):
64
        idlist.append(obj.index(max(obj)))
65
        obj[obj.index(max(obj))]=M
66
    return idlist
67

68
def relu(tmp):
69
    return 1*(tmp > 0)*tmp
70

71
def topk(obj, k):
72
    M=-10000
73
    obj = list(obj)[:]
74
    idlist = []
75
    for i in range(k):
76
        idlist.append(obj.index(max(obj)))
77
        obj[obj.index(max(obj))]=M
78
    return idlist
79

80

81

82

83
if __name__ == "__main__":
84
    parser = argparse.ArgumentParser()
85
    parser.add_argument('--config', '-c', help="specific config file", required=True)
86
    parser.add_argument('--gpu', '-g', help="gpu id list")
87
    parser.add_argument('--local_rank', type=int, help='local rank', default=-1)
88
    parser.add_argument('--do_test', help="do test while training or not", action="store_true")
89
    parser.add_argument('--checkpoint', help="checkpoint file path", type=str, default=None)
90
    parser.add_argument('--comment', help="checkpoint file path", default=None)
91
    parser.add_argument("--seed", type=int, default=None)
92
    parser.add_argument("--prompt_emb_output", type=bool, default=False)
93
    parser.add_argument("--save_name", type=str, default=None)
94
    parser.add_argument("--replacing_prompt", type=str, default=None)
95
    parser.add_argument("--pre_train_mlm", default=False, action='store_true')
96
    parser.add_argument("--task_transfer_projector", default=False, action='store_true')
97
    parser.add_argument("--model_transfer_projector", default=False, action='store_true')
98
    parser.add_argument("--activate_neuron", default=True, action='store_true')
99
    parser.add_argument("--mode", type=str, default="valid")
100
    parser.add_argument("--projector", type=str, default=None)
101

102

103
    args = parser.parse_args()
104
    configFilePath = args.config
105

106

107
    config = create_config(configFilePath)
108

109

110

111
    use_gpu = True
112
    gpu_list = []
113
    if args.gpu is None:
114
        use_gpu = False
115
    else:
116
        use_gpu = True
117
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
118

119
        device_list = args.gpu.split(",")
120
        for a in range(0, len(device_list)):
121
            gpu_list.append(int(a))
122

123
    os.system("clear")
124
    config.set('distributed', 'local_rank', args.local_rank)
125
    config.set("distributed", "use", False)
126
    if config.getboolean("distributed", "use") and len(gpu_list)>1:
127
        torch.cuda.set_device(gpu_list[args.local_rank])
128
        torch.distributed.init_process_group(backend=config.get("distributed", "backend"))
129
        config.set('distributed', 'gpu_num', len(gpu_list))
130

131
    cuda = torch.cuda.is_available()
132
    logger.info("CUDA available: %s" % str(cuda))
133
    if not cuda and len(gpu_list) > 0:
134
        logger.error("CUDA is not available but specific gpu id")
135
        raise NotImplementedError
136
    set_random_seed(args.seed)
137

138

139
    ########
140
    '''
141
    formatter = "mlmPrompt"
142
    config.set("data","train_formatter_type",formatter)
143
    config.set("data","valid_formatter_type",formatter)
144
    config.set("data","test_formatter_type",formatter)
145
    config.set("model","model_name","mlmPrompt")
146
    '''
147
    ########
148

149

150

151
    parameters = init_all(config, gpu_list, args.checkpoint, args.mode, local_rank = args.local_rank, args=args)
152
    do_test = False
153

154
    model = parameters["model"]
155
    valid_dataset = parameters["valid_dataset"]
156

157

158
    ##########################
159
    ##########################
160

161

162
    '''准备hook'''
163
    '''这是提取特征的代码'''
164
    outputs=[[] for _ in range(12)]
165
    def save_ppt_outputs1_hook(n):
166
        def fn(_,__,output):
167
            outputs[n].append(output.detach().to("cpu"))
168
            #outputs[n].append(output.detach())
169
        return fn
170

171

172
    for n in range(12):
173
        #这里面提取feature的模组可以改变,这里因为我自定义模型的原因要两层roberta
174
        #for l in model.state_dict().keys():
175
        #    print(l)
176
        #print("====")
177
        #exit()
178
        model.encoder.roberta.encoder.layer[n].intermediate.register_forward_hook(save_ppt_outputs1_hook(n))
179

180

181

182

183

184
    '''将数据通过模型'''
185
    '''hook会自动将中间层的激活储存在outputs中'''
186
    model.eval()
187
    valid(model, parameters["valid_dataset"], 1, None, config, gpu_list, parameters["output_function"], mode=args.mode, args=args)
188

189

190
    #################################################
191
    #################################################
192
    #################################################
193

194

195
    '''
196
    print(len(outputs)) #12
197
    print(len(outputs[0])) #17 epoch
198
    print(len(outputs[0][0])) #64
199
    print(len(outputs[0][0][0])) #231
200
    print(len(outputs[0][0][0][0])) #3072
201
    #outputs[][][][][] , layer:12, epoch:17, batch_size:64, input_length:231, neuron:3072
202
    '''
203

204
    #merge 17 epoch
205
    for k in range(12):
206
        #outputs[k] = relu(np.concatenate(outputs[k]))
207
        #outputs[k] = torch.relu(torch.cat(outputs[k]))
208
        outputs[k] = torch.cat(outputs[k])
209

210

211
    '''
212
    print(len(outputs)) #12
213
    print(len(outputs[0])) #17 epoch
214
    print(len(outputs[0][0])) #64
215
    print(len(outputs[0][0][0])) #231
216
    print(len(outputs[0][0][0][0])) #3072
217
    #outputs[][][][][] , layer:12, epoch:17, batch_size:64, input_length:231, neuron:3072
218
    '''
219

220

221
    '''这部分是根据论文里的代码找到某个neuron的最大激活'''
222
    '''
223
    #划定层数
224
    #layer = np.random.randint(12)
225
    layer = torch.randint(1,12,(1,))
226
    #决定neuron
227
    #neuron = np.random.randint(3072)
228
    neuron = torch.randint(1,3072,(1,))
229
    #这里面是得到了某层的某个neuron的所有激活
230
    neuron_activation = outputs[layer][:,:,neuron]
231
    max_activation = [neuron_activation[i,:length[i]].max() for i in range(size)]
232
    print(neuron_activation)
233
    print(max_activation)
234
    exit()
235
    '''
236

237

238

239
    #Activated neuron for a task-specific prompt
240
    outputs = torch.stack(outputs)
241
    #print(outputs.shape)
242
    #exit()
243

244

245
    #outputs = outputs[11:,:,:1,:]
246
    #outputs = outputs[11:,:,:100,:]
247
    #outputs = outputs[:,:,:1,:] #12 layers, [mask]
248
    #print(outputs)
249
    #print(outputs.shape)
250
    #exit()
251
    outputs = outputs[:,:,:1,:] #12 layers, [mask]
252
    #outputs = outputs[:,:,:100,:] #12 layers, [mask]+[promot]
253
    #outputs = outputs[11:,:,:100,:]
254

255
    print(outputs.shape)
256
    # [12, 128, 231, 3072] --> 12, 128(eval batcch size), 231(1 or 100), 3072
257
    #exit()
258

259

260

261

262
    save_name = args.replacing_prompt.strip().split("/")[-1].split(".")[0]
263
    dir = "task_activated_neuron"
264
    if os.path.isdir(dir):
265
        save_dir = dir+"/"+save_name
266
        if os.path.isdir(save_dir):
267
            torch.save(outputs,save_dir+"/task_activated_neuron")
268
        else:
269
            os.mkdir(save_dir)
270
            torch.save(outputs,save_dir+"/task_activated_neuron")
271
    else:
272
        os.mkdir(dir)
273
        save_dir = dir+"/"+save_name
274
        os.mkdir(save_dir)
275
        torch.save(outputs,save_dir+"/task_activated_neuron")
276

277

278
    print("==Prompt emb==")
279
    print(outputs.shape)
280
    print("Save Done")
281
    print("==============")
282

283

284

285

286

287

288

289

290

291

292
    '''
293
    size = 8 # number of the sentences
294
    length = 231 #sentence length
295
    #Activated neuron for a task-specific prompt
296
    for layer in range(1,12):
297
        for neuron in range(1,3072):
298
            neuron_activation = outputs[layer][:,:,neuron]
299
            print(outputs[layer].shape)
300
            print(neuron_activation.shape)
301
            exit()
302
            max_activation = [neuron_activation[i,:length[i]].max() for i in range(size)]
303
            print(neuron_activation)
304
            print("------------")
305
            print(max_activation)
306
            print("============")
307
    exit()
308
    '''
309

310

311

312
    '''选择头几个句子展示'''
313
    '''
314
    N = 4
315
    indexes = topk(max_activation,N)
316
    for ids in indexes:
317
        print(tokenizer.decode(example['input_ids'][ids,:length[ids]]))
318
    '''
319

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.