NRE

Форк
0
/
train.cpp 
423 строки · 12.1 Кб
1
#include <cstring>
2
#include <cstdio>
3
#include <vector>
4
#include <string>
5
#include <cstdlib>
6
#include <map>
7
#include <cmath>
8
#include <pthread.h>
9
#include <iostream>
10

11
#include<assert.h>
12
#include<ctime>
13
#include<sys/time.h>
14

15
#include "init.h"
16
#include "test.h"
17

18
using namespace std;
19

20
double score = 0;
21
float alpha1;
22

23
struct timeval t_start,t_end; 
24
long start,end;
25

26
void time_begin()
27
{
28
  
29
  gettimeofday(&t_start, NULL); 
30
  start = ((long)t_start.tv_sec)*1000+(long)t_start.tv_usec/1000; 
31
}
32
void time_end()
33
{
34
  gettimeofday(&t_end, NULL); 
35
  end = ((long)t_end.tv_sec)*1000+(long)t_end.tv_usec/1000; 
36
  cout<<"time(s):\t"<<(double(end)-double(start))/1000<<endl;
37
}
38

39

40

41
vector<float> train(int *sentence, int *trainPositionE1, int *trainPositionE2, int len, vector<int> &tip) {
42
	vector<float> r;
43
	//cout<<43<<endl;
44
	for (int i = 0; i < dimensionC; i++) {
45
		int last = i * dimension * window;
46
		int lastt = i * dimensionWPE * window;
47
		float mx[3];
48
		int ti[3];
49
		for (int i1 = 0; i1<3; i1++)
50
			mx[i1] = -FLT_MAX;
51
		int i2 = 0;
52
		for (int i1 = -window+1; i1 < len; i1++) 
53
		{
54
			float res = 0;
55
			int tot = 0;
56
			int tot1 = 0;
57
			for (int j = i1; j < i1 + window; j++)  
58
			if (j>=0&&j<len)
59
			{
60
				int last1 = sentence[j] * dimension;
61
			 	for (int k = 0; k < dimension; k++) {
62
			 		res += matrixW1Dao[last + tot] * wordVecDao[last1+k];
63
			 		tot++;
64
			 	}
65
			 	int last2 = trainPositionE1[j] * dimensionWPE;
66
			 	int last3 = trainPositionE2[j] * dimensionWPE;
67
			 	for (int k = 0; k < dimensionWPE; k++) {
68
			 		res += matrixW1PositionE1Dao[lastt + tot1] * positionVecDaoE1[last2+k];
69
			 		res += matrixW1PositionE2Dao[lastt + tot1] * positionVecDaoE2[last3+k];
70
			 		tot1++;
71
			 	}
72
			}
73
			else
74
			{
75
				tot+=dimension;
76
				tot1+=dimensionWPE;
77
			}
78
		//	for (int i2=0; i2<3; i2++)
79
			if (res > mx[i2]) {
80
				mx[i2] = res;
81
				ti[i2] = i1;
82
			}
83
			if (i1>=0&&trainPositionE1[i1]==-PositionMinE1)
84
				i2++;
85
			if (i1>=0&&trainPositionE2[i1]==-PositionMinE2)
86
				i2++;
87
			assert(i2<3);
88
		}
89
		assert(i2==2);
90
		for (int i1 = 0; i1<3; i1++)
91
		{
92
			r.push_back(mx[i1]+matrixB1Dao[3*i+i1]);
93
			tip.push_back(ti[i1]);
94
		}
95
	}
96
	//cout<<82<<endl;
97
	for (int i = 0; i < 3 * dimensionC; i++) {
98
		r[i] = CalcTanh(r[i]);
99
	}
100
	return r;
101
}
102

103
void train_gradient(int *sentence, int *trainPositionE1, int *trainPositionE2, int len, int e1, int e2, int r1, float alpha, vector<float> &r,vector<int> &tip, vector<float> &grad)
104
{
105
	for (int i = 0; i < 3 * dimensionC; i++) {
106
		if (fabs(grad[i])<1e-8)
107
			continue;
108
		int last = (i/3) * dimension * window;
109
		int tot = 0;
110
		int lastt = (i/3) * dimensionWPE * window;
111
		int tot1 = 0;
112
		float g1 = grad[i] * (1 -  r[i] * r[i]);
113
		for (int j = 0; j < window; j++)  
114
		if (tip[i]+j>=0&&tip[i]+j<len)
115
		{
116
			int last1 = sentence[tip[i] + j] * dimension;
117
			for (int k = 0; k < dimension; k++) {
118
				matrixW1[last + tot] -= g1 * wordVecDao[last1+k];
119
				wordVec[last1 + k] -= g1 * matrixW1Dao[last + tot];
120
				tot++;
121
			}
122
			int last2 = trainPositionE1[tip[i] + j] * dimensionWPE;
123
			int last3 = trainPositionE2[tip[i] + j] * dimensionWPE;
124
			for (int k = 0; k < dimensionWPE; k++) {
125
				matrixW1PositionE1[lastt + tot1] -= g1 * positionVecDaoE1[last2 + k];
126
				matrixW1PositionE2[lastt + tot1] -= g1 * positionVecDaoE2[last3 + k];
127
				positionVecE1[last2 + k] -= g1 * matrixW1PositionE1Dao[lastt + tot1];
128
				positionVecE2[last3 + k] -= g1 * matrixW1PositionE2Dao[lastt + tot1];
129
				tot1++;
130
			}
131
		}
132
		matrixB1[i] -= g1;
133
	}
134
}
135

136
float train_bags(string bags_name)
137
{
138
	int bags_size = bags_train[bags_name].size();
139
	vector<vector<float> > rList;
140
	vector<vector<int> > tipList;
141
	tipList.resize(bags_size);
142
	int r1 = -1;
143
	for (int k=0; k<bags_size; k++)
144
	{
145
		tipList[k].clear();
146
		int i = bags_train[bags_name][k];
147
		if (r1==-1)
148
			r1 = relationList[i];
149
		else
150
			assert(r1==relationList[i]);
151
		rList.push_back(train(trainLists[i], trainPositionE1[i], trainPositionE2[i], trainLength[i], tipList[k]));
152
	}
153
	
154
	vector<float> f_r;	
155
	
156
	vector<int> dropout;
157
	for (int i = 0; i < 3 * dimensionC; i++) 
158
		dropout.push_back(rand()%2);
159
	vector<float> weight;
160
	float weight_sum = 0;
161
	for (int k=0; k<bags_size; k++)
162
	{
163
		float s = 0;
164
		for (int i = 0; i < 3 * dimensionC; i++) 
165
			s += rList[k][i] * matrixRelationDao[r1 * 3 * dimensionC + i] * wt;
166
		s = exp(s); 
167
		weight.push_back(s);
168
		weight_sum += s;
169
	}
170
	for (int k=0; k<bags_size; k++)
171
		weight[k]/=weight_sum;
172
	float sum = 0;
173
	for (int j = 0; j < relationTotal; j++) {	
174
		vector<float> r;
175
		r.resize(3 * dimensionC);
176
		for (int i = 0; i < 3 * dimensionC; i++) 
177
			for (int k=0; k<bags_size; k++)
178
				r[i] += rList[k][i] * weight[k];
179
	
180
		float ss = 0;
181
		for (int i = 0; i < 3 * dimensionC; i++) {
182
			ss += dropout[i] * r[i] * matrixRelationDao[j * 3 * dimensionC + i];
183
		}
184
		ss += matrixRelationPrDao[j];
185
		f_r.push_back(exp(ss));
186
		sum+=f_r[j];
187
	}
188
	double rt = (log(f_r[r1]) - log(sum));
189
	
190
	vector<vector<float> > grad;
191
	grad.resize(bags_size);
192
	for (int k=0; k<bags_size; k++)
193
		grad[k].resize(3 * dimensionC);
194
	vector<float> g1_tmp;
195
	g1_tmp.resize(3 * dimensionC);
196
	for (int r2 = 0; r2<relationTotal; r2++)
197
	{	
198
		vector<float> r;
199
		r.resize(3 * dimensionC);
200
		for (int i = 0; i < 3 * dimensionC; i++) 
201
			for (int k=0; k<bags_size; k++)
202
				r[i] += rList[k][i] * weight[k];
203
		
204
		float g = f_r[r2]/sum*alpha1;
205
		if (r2 == r1)
206
			g -= alpha1;
207
		for (int i = 0; i < 3 * dimensionC; i++) 
208
		{
209
			float g1 = 0;
210
			if (dropout[i]!=0)
211
			{
212
				g1 += g * matrixRelationDao[r2 * dimensionC * 3 + i];
213
				matrixRelation[r2 * 3 * dimensionC + i] -= g * r[i];
214
			}
215
			g1_tmp[i]+=g1;
216
		}
217
		matrixRelationPr[r2] -= g;
218
	}
219
	for (int i = 0; i < 3 * dimensionC; i++) 
220
	{
221
		float g1 = g1_tmp[i];
222
		double tmp_sum = 0; //for rList[k][i]*weight[k]
223
		for (int k=0; k<bags_size; k++)
224
		{
225
			grad[k][i]+=g1*weight[k];
226
			grad[k][i]+=g1*rList[k][i]*weight[k]*matrixRelationDao[r1 * 3 * dimensionC + i] * wt;
227
			matrixRelation[r1 * 3 * dimensionC + i] += g1*rList[k][i]*weight[k]*rList[k][i] * wt;
228
			tmp_sum += rList[k][i]*weight[k];
229
		}	
230
		for (int k1=0; k1<bags_size; k1++)
231
		{
232
			grad[k1][i]-=g1*tmp_sum*weight[k1]*matrixRelationDao[r1 * 3 * dimensionC + i] * wt;
233
			matrixRelation[r1 * 3 * dimensionC + i] -= g1*tmp_sum*weight[k1]*rList[k1][i] * wt;
234
		}
235
	}
236

237
	//cout<<241<<endl;
238
	for (int k=0; k<bags_size; k++)
239
	{
240
		int i = bags_train[bags_name][k];
241
		train_gradient(trainLists[i], trainPositionE1[i], trainPositionE2[i], trainLength[i], headList[i], tailList[i], relationList[i], alpha1,rList[k], tipList[k], grad[k]);
242
		
243
	}
244

245
	//cout<<249<<endl;
246
	return rt;
247
}
248

249
int turn;
250

251
int test_tmp = 0;
252

253
vector<string> b_train;
254
vector<int> c_train;
255
double score_tmp = 0, score_max = 0;
256
pthread_mutex_t mutex1;
257

258
int tot_batch;
259
void* trainMode(void *id ) {
260
		unsigned long long next_random = (long long)id;
261
		test_tmp = 0;
262
	//	for (int k1 = batch; k1 > 0; k1--)
263
		while (true)
264
		{
265

266
			pthread_mutex_lock (&mutex1);
267
			if (score_tmp>=score_max)
268
			{
269
				pthread_mutex_unlock (&mutex1);
270
				break;
271
			}
272
			score_tmp+=1;
273
		//	cout<<score_tmp<<' '<<score_max<<endl;
274
			pthread_mutex_unlock (&mutex1);
275
			int j = getRand(0, c_train.size());
276
			//cout<<j<<'|';
277
			j = c_train[j];
278
			//cout<<j<<'|';
279
			//test_tmp+=bags_train[b_train[j]].size();
280
			//cout<<test_tmp<<' ';
281
			score += train_bags(b_train[j]);
282
		}
283
		//cout<<endl;
284
}
285

286
void train() {
287
	int tmp = 0;
288
	b_train.clear();
289
	c_train.clear();
290
	for (map<string,vector<int> >:: iterator it = bags_train.begin(); it!=bags_train.end(); it++)
291
	{
292
		int max_size = 1;//it->second.size()/2;
293
		for (int i=0; i<max(1,max_size); i++)
294
			c_train.push_back(b_train.size());
295
		b_train.push_back(it->first);
296
		tmp+=it->second.size();
297
	}
298
	cout<<c_train.size()<<endl;
299

300
	float con = sqrt(6.0/(dimensionC+relationTotal));
301
	float con1 = sqrt(6.0/((dimensionWPE+dimension)*window));
302
	matrixRelation = (float *)calloc(3 * dimensionC * relationTotal, sizeof(float));
303
	matrixRelationPr = (float *)calloc(relationTotal, sizeof(float));
304
	matrixRelationPrDao = (float *)calloc(relationTotal, sizeof(float));
305
	wordVecDao = (float *)calloc(dimension * wordTotal, sizeof(float));
306
	positionVecE1 = (float *)calloc(PositionTotalE1 * dimensionWPE, sizeof(float));
307
	positionVecE2 = (float *)calloc(PositionTotalE2 * dimensionWPE, sizeof(float));
308
	
309
	matrixW1 = (float*)calloc(dimensionC * dimension * window, sizeof(float));
310
	matrixW1PositionE1 = (float *)calloc(dimensionC * dimensionWPE * window, sizeof(float));
311
	matrixW1PositionE2 = (float *)calloc(dimensionC * dimensionWPE * window, sizeof(float));
312
	matrixB1 = (float*)calloc(3 * dimensionC, sizeof(float));
313

314
	for (int i = 0; i < dimensionC; i++) {
315
		int last = i * window * dimension;
316
		for (int j = dimension * window - 1; j >=0; j--)
317
			matrixW1[last + j] = getRandU(-con1, con1);
318
		last = i * window * dimensionWPE;
319
		float tmp1 = 0;
320
		float tmp2 = 0;
321
		for (int j = dimensionWPE * window - 1; j >=0; j--) {
322
			matrixW1PositionE1[last + j] = getRandU(-con1, con1);
323
			tmp1 += matrixW1PositionE1[last + j]  * matrixW1PositionE1[last + j] ;
324
			matrixW1PositionE2[last + j] = getRandU(-con1, con1);
325
			tmp2 += matrixW1PositionE2[last + j]  * matrixW1PositionE2[last + j] ;
326
		}
327
		for (int j=0; j<3; j++)
328
		matrixB1[i] = getRandU(-con1, con1);
329
	}
330

331
	for (int i = 0; i < relationTotal; i++) 
332
	{
333
		matrixRelationPr[i] = getRandU(-con, con);				//add
334
		for (int j = 0; j < 3 * dimensionC; j++)
335
			matrixRelation[i * 3 * dimensionC + j] = getRandU(-con, con);
336
	}
337

338
	for (int i = 0; i < PositionTotalE1; i++) {
339
		float tmp = 0;
340
		for (int j = 0; j < dimensionWPE; j++) {
341
			positionVecE1[i * dimensionWPE + j] = getRandU(-con1, con1);
342
			tmp += positionVecE1[i * dimensionWPE + j] * positionVecE1[i * dimensionWPE + j];
343
		}
344
	}
345

346
	for (int i = 0; i < PositionTotalE2; i++) {
347
		float tmp = 0;
348
		for (int j = 0; j < dimensionWPE; j++) {
349
			positionVecE2[i * dimensionWPE + j] = getRandU(-con1, con1);
350
			tmp += positionVecE2[i * dimensionWPE + j] * positionVecE2[i * dimensionWPE + j];
351
		}
352
	}
353

354
	matrixRelationDao = (float *)calloc(3 * dimensionC*relationTotal, sizeof(float));
355
	matrixW1Dao =  (float*)calloc(dimensionC * dimension * window, sizeof(float));
356
	matrixB1Dao =  (float*)calloc(3 * dimensionC, sizeof(float));
357
	
358
	positionVecDaoE1 = (float *)calloc(PositionTotalE1 * dimensionWPE, sizeof(float));
359
	positionVecDaoE2 = (float *)calloc(PositionTotalE2 * dimensionWPE, sizeof(float));
360
	matrixW1PositionE1Dao = (float *)calloc(dimensionC * dimensionWPE * window, sizeof(float));
361
	matrixW1PositionE2Dao = (float *)calloc(dimensionC * dimensionWPE * window, sizeof(float));
362
	/*time_begin();
363
	test();
364
	time_end();*/
365
//	return;
366
	for (turn = 0; turn < trainTimes; turn ++) {
367

368
	//	len = trainLists.size();
369
		len = c_train.size();
370
		npoch  =  len / (batch * num_threads);
371
		alpha1 = alpha*rate/batch;
372

373
		score = 0;
374
		score_max = 0;
375
		score_tmp = 0;
376
		double score1 = score;
377
		time_begin();
378
		for (int k = 1; k <= npoch; k++) {
379
			score_max += batch * num_threads;
380
			//cout<<k<<endl;
381
			memcpy(positionVecDaoE1, positionVecE1, PositionTotalE1 * dimensionWPE* sizeof(float));
382
			memcpy(positionVecDaoE2, positionVecE2, PositionTotalE2 * dimensionWPE* sizeof(float));
383
			memcpy(matrixW1PositionE1Dao, matrixW1PositionE1, dimensionC * dimensionWPE * window* sizeof(float));
384
			memcpy(matrixW1PositionE2Dao, matrixW1PositionE2, dimensionC * dimensionWPE * window* sizeof(float));
385
			memcpy(wordVecDao, wordVec, dimension * wordTotal * sizeof(float));
386

387
			memcpy(matrixW1Dao, matrixW1, sizeof(float) * dimensionC * dimension * window);
388
			memcpy(matrixB1Dao, matrixB1, sizeof(float) * 3 * dimensionC);
389
			memcpy(matrixRelationPrDao, matrixRelationPr, relationTotal * sizeof(float));				//add
390
			memcpy(matrixRelationDao, matrixRelation, 3 * dimensionC*relationTotal * sizeof(float));
391
			pthread_t *pt = (pthread_t *)malloc(num_threads * sizeof(pthread_t));
392
			for (int a = 0; a < num_threads; a++)
393
				pthread_create(&pt[a], NULL, trainMode,  (void *)a);
394
			for (int a = 0; a < num_threads; a++)
395
			pthread_join(pt[a], NULL);
396
			free(pt);
397
			if (k%(npoch/5)==0)
398
			{
399
				cout<<"npoch:\t"<<k<<'/'<<npoch<<endl;
400
				time_end();
401
				time_begin();
402
				cout<<"score:\t"<<score-score1<<' '<<score_tmp<<endl;
403
				score1 = score;
404
			}
405
		}
406
		printf("Total Score:\t%f\n",score);
407
		printf("test\n");
408
		test();
409
		//if ((turn+1)%1==0) 
410
		rate=rate*reduce;
411
	}
412
	cout<<"Train End"<<endl;
413
}
414

415
int main(int argc, char ** argv) {
416
	output_model = 1;
417
	logg = fopen("log.txt","w");
418
	cout<<"Init Begin."<<endl;
419
	init();
420
	cout<<"Init End."<<endl;
421
	train();
422
	fclose(logg);
423
}
424

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.