google-research

Форк
0
119 строк · 5.5 Кб
1
# coding=utf-8
2
# Copyright 2024 The Google Research Authors.
3
#
4
# Licensed under the Apache License, Version 2.0 (the "License");
5
# you may not use this file except in compliance with the License.
6
# You may obtain a copy of the License at
7
#
8
#     http://www.apache.org/licenses/LICENSE-2.0
9
#
10
# Unless required by applicable law or agreed to in writing, software
11
# distributed under the License is distributed on an "AS IS" BASIS,
12
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
# See the License for the specific language governing permissions and
14
# limitations under the License.
15

16
import os
17
import sys
18
import argparse
19
import numpy as np
20
import tensorflow as tf
21
import randomdata
22
import model
23
# pylint: skip-file
24

25
parser = argparse.ArgumentParser()
26
parser.add_argument('--gpu', type=str, default='0', help='GPU to use [default: GPU 0]')
27
parser.add_argument('--real_path', default='../data/resize128')
28
parser.add_argument('--fake_path', default='../data/fake')
29
parser.add_argument('--train_label', default='../data/annotations/train_label.txt')
30
parser.add_argument('--test_label', default='../data/annotations/test_label.txt')
31
parser.add_argument('--valid_label', default='../data/annotations/val_label.txt')
32
parser.add_argument('--max_epoch', type=int, default=20, help='Epoch to run [default: 20]')
33
parser.add_argument('--batch_size', type=int, default=64, help='Batch Size during training [default: 64]')
34
parser.add_argument('--n_class', type=int, default=2, help='Number of class [default: 2]')
35
parser.add_argument('--lr', type=float, default=0.1, help='Initial learning rate [default: 0.1]')
36
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
37
parser.add_argument('--optimizer', default='momentum', help='adam or momentum [default: momentum]')
38
FLAGS = parser.parse_args()
39

40

41

42
ATT_ID = {'5_o_Clock_Shadow': 0, 'Arched_Eyebrows': 1, 'Attractive': 2,
43
      'Bags_Under_Eyes': 3, 'Bald': 4, 'Bangs': 5, 'Big_Lips': 6,
44
      'Big_Nose': 7, 'Black_Hair': 8, 'Blond_Hair': 9, 'Blurry': 10,
45
      'Brown_Hair': 11, 'Bushy_Eyebrows': 12, 'Chubby': 13,
46
      'Double_Chin': 14, 'Eyeglasses': 15, 'Goatee': 16,
47
      'Gray_Hair': 17, 'Heavy_Makeup': 18, 'High_Cheekbones': 19,
48
      'Male': 20, 'Mouth_Slightly_Open': 21, 'Mustache': 22,
49
      'Narrow_Eyes': 23, 'No_Beard': 24, 'Oval_Face': 25,
50
      'Pale_Skin': 26, 'Pointy_Nose': 27, 'Receding_Hairline': 28,
51
      'Rosy_Cheeks': 29, 'Sideburns': 30, 'Smiling': 31,
52
      'Straight_Hair': 32, 'Wavy_Hair': 33, 'Wearing_Earrings': 34,
53
      'Wearing_Hat': 35, 'Wearing_Lipstick': 36,
54
      'Wearing_Necklace': 37, 'Wearing_Necktie': 38, 'Young': 39}
55

56
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
57
os.environ["CUDA_VISIBLE_DEVICES"]=FLAGS.gpu
58
# tf.set_random_seed(0)# 0 for 512
59
tf.set_random_seed(100)
60

61
(train_images, train_labels, train_att), train_iters = randomdata.data_train(FLAGS.real_path, FLAGS.train_label, 64)
62
(fake_images, fake_labels, fake_att), fake_iters = randomdata.data_fake(FLAGS.fake_path, FLAGS.train_label, 64)
63
(valid_images, valid_labels, valid_att), valid_iters = randomdata.data_test(FLAGS.real_path, FLAGS.valid_label, FLAGS.batch_size)
64
(test_images, test_labels, test_att), test_iters = randomdata.data_test(FLAGS.real_path, FLAGS.test_label, FLAGS.batch_size)
65

66
batch_images = tf.placeholder(tf.float32,[None,128,128,3])
67
batch_labels = tf.placeholder(tf.int32,[None,])
68
is_training = tf.placeholder(tf.bool)
69
lr_ph = tf.placeholder(tf.float32)
70
lr = FLAGS.lr
71

72
Y_score = model.vgg(batch_images, FLAGS.n_class, is_training)
73
Y_hat = tf.nn.softmax(Y_score)
74
Y_pred = tf.argmax(Y_hat, 1)
75
Y_label = tf.to_float(tf.one_hot(batch_labels, FLAGS.n_class))
76

77
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits = Y_score, labels = Y_label)
78
loss_op = tf.reduce_mean(cross_entropy)
79
correct_prediction = tf.equal(tf.argmax(Y_hat, 1), tf.argmax(Y_label, 1))
80
acc_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
81
update_op = tf.train.MomentumOptimizer(lr_ph, FLAGS.momentum).minimize(loss_op)
82
init = tf.global_variables_initializer()
83

84
print("================\n\n",train_iters, fake_iters)
85

86
with tf.Session() as sess:
87
  sess.run(init)
88
  for i in range(FLAGS.max_epoch):
89
    if i == 30:
90
      lr *= 0.1
91
    elif i == 40:
92
      lr *= 0.1
93

94
    for j in range(train_iters):
95
      # co_images, co_labels = sess.run([train_images,train_labels])
96
      tr_images, tr_labels = sess.run([train_images,train_labels])
97
      fa_images, fa_labels = sess.run([fake_images,fake_labels])
98
      co_images = np.concatenate((tr_images,fa_images),axis=0)
99
      co_labels = np.concatenate((tr_labels,fa_labels),axis=0)
100
      loss, acc, _ = sess.run([loss_op, acc_op, update_op], {batch_images:co_images, batch_labels:co_labels, lr_ph:lr, is_training:True})
101
      if j % 50 == 0:
102
        print('====epoch_%d====iter_%d: loss=%.4f, train_acc=%.4f' % (i, j, loss, acc))
103

104
    valid_acc = 0.0
105
    y_pred =[]
106
    y_label = []
107
    y_att = []
108
    for k in range(valid_iters):
109
      va_images, va_labels, va_att = sess.run([valid_images, valid_labels, valid_att])
110
      batch_acc, batch_pred = sess.run([acc_op,Y_pred], {batch_images:va_images, batch_labels:va_labels, is_training:False})
111
      valid_acc += batch_acc
112
      y_pred += batch_pred.tolist()
113
      y_label += va_labels.tolist()
114
      y_att += va_att.tolist()
115
    valid_acc = valid_acc / float(valid_iters)
116
    valid_eo = randomdata.cal_eo(y_att, y_label, y_pred)
117
    print('====epoch_%d: valid_acc=%.4f, valid_eo=%.4f' % (i, valid_acc, valid_eo[-1]))
118
    print('eo: ',valid_eo[0],valid_eo[1])
119
    print('eo: ',valid_eo[2],valid_eo[3])
120

121

122

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.