GFPGAN

Форк
0
/
train_gfpgan_v1.yml 
216 строк · 4.5 Кб
1
# general settings
2
name: train_GFPGANv1_512
3
model_type: GFPGANModel
4
num_gpu: auto  # officially, we use 4 GPUs
5
manual_seed: 0
6

7
# dataset and data loader settings
8
datasets:
9
  train:
10
    name: FFHQ
11
    type: FFHQDegradationDataset
12
    # dataroot_gt: datasets/ffhq/ffhq_512.lmdb
13
    dataroot_gt: datasets/ffhq/ffhq_512
14
    io_backend:
15
      # type: lmdb
16
      type: disk
17

18
    use_hflip: true
19
    mean: [0.5, 0.5, 0.5]
20
    std: [0.5, 0.5, 0.5]
21
    out_size: 512
22

23
    blur_kernel_size: 41
24
    kernel_list: ['iso', 'aniso']
25
    kernel_prob: [0.5, 0.5]
26
    blur_sigma: [0.1, 10]
27
    downsample_range: [0.8, 8]
28
    noise_range: [0, 20]
29
    jpeg_range: [60, 100]
30

31
    # color jitter and gray
32
    color_jitter_prob: 0.3
33
    color_jitter_shift: 20
34
    color_jitter_pt_prob: 0.3
35
    gray_prob: 0.01
36

37
    # If you do not want colorization, please set
38
    # color_jitter_prob: ~
39
    # color_jitter_pt_prob: ~
40
    # gray_prob: 0.01
41
    # gt_gray: True
42

43
    crop_components: true
44
    component_path: experiments/pretrained_models/FFHQ_eye_mouth_landmarks_512.pth
45
    eye_enlarge_ratio: 1.4
46

47
    # data loader
48
    use_shuffle: true
49
    num_worker_per_gpu: 6
50
    batch_size_per_gpu: 3
51
    dataset_enlarge_ratio: 1
52
    prefetch_mode: ~
53

54
  val:
55
    # Please modify accordingly to use your own validation
56
    # Or comment the val block if do not need validation during training
57
    name: validation
58
    type: PairedImageDataset
59
    dataroot_lq: datasets/faces/validation/input
60
    dataroot_gt: datasets/faces/validation/reference
61
    io_backend:
62
      type: disk
63
    mean: [0.5, 0.5, 0.5]
64
    std: [0.5, 0.5, 0.5]
65
    scale: 1
66

67
# network structures
68
network_g:
69
  type: GFPGANv1
70
  out_size: 512
71
  num_style_feat: 512
72
  channel_multiplier: 1
73
  resample_kernel: [1, 3, 3, 1]
74
  decoder_load_path: experiments/pretrained_models/StyleGAN2_512_Cmul1_FFHQ_B12G4_scratch_800k.pth
75
  fix_decoder: true
76
  num_mlp: 8
77
  lr_mlp: 0.01
78
  input_is_latent: true
79
  different_w: true
80
  narrow: 1
81
  sft_half: true
82

83
network_d:
84
  type: StyleGAN2Discriminator
85
  out_size: 512
86
  channel_multiplier: 1
87
  resample_kernel: [1, 3, 3, 1]
88

89
network_d_left_eye:
90
  type: FacialComponentDiscriminator
91

92
network_d_right_eye:
93
  type: FacialComponentDiscriminator
94

95
network_d_mouth:
96
  type: FacialComponentDiscriminator
97

98
network_identity:
99
  type: ResNetArcFace
100
  block: IRBlock
101
  layers: [2, 2, 2, 2]
102
  use_se: False
103

104
# path
105
path:
106
  pretrain_network_g: ~
107
  param_key_g: params_ema
108
  strict_load_g: ~
109
  pretrain_network_d: ~
110
  pretrain_network_d_left_eye: ~
111
  pretrain_network_d_right_eye: ~
112
  pretrain_network_d_mouth: ~
113
  pretrain_network_identity: experiments/pretrained_models/arcface_resnet18.pth
114
  # resume
115
  resume_state: ~
116
  ignore_resume_networks: ['network_identity']
117

118
# training settings
119
train:
120
  optim_g:
121
    type: Adam
122
    lr: !!float 2e-3
123
  optim_d:
124
    type: Adam
125
    lr: !!float 2e-3
126
  optim_component:
127
    type: Adam
128
    lr: !!float 2e-3
129

130
  scheduler:
131
    type: MultiStepLR
132
    milestones: [600000, 700000]
133
    gamma: 0.5
134

135
  total_iter: 800000
136
  warmup_iter: -1  # no warm up
137

138
  # losses
139
  # pixel loss
140
  pixel_opt:
141
    type: L1Loss
142
    loss_weight: !!float 1e-1
143
    reduction: mean
144
  # L1 loss used in pyramid loss, component style loss and identity loss
145
  L1_opt:
146
    type: L1Loss
147
    loss_weight: 1
148
    reduction: mean
149

150
  # image pyramid loss
151
  pyramid_loss_weight: 1
152
  remove_pyramid_loss: 50000
153
  # perceptual loss (content and style losses)
154
  perceptual_opt:
155
    type: PerceptualLoss
156
    layer_weights:
157
      # before relu
158
      'conv1_2': 0.1
159
      'conv2_2': 0.1
160
      'conv3_4': 1
161
      'conv4_4': 1
162
      'conv5_4': 1
163
    vgg_type: vgg19
164
    use_input_norm: true
165
    perceptual_weight: !!float 1
166
    style_weight: 50
167
    range_norm: true
168
    criterion: l1
169
  # gan loss
170
  gan_opt:
171
    type: GANLoss
172
    gan_type: wgan_softplus
173
    loss_weight: !!float 1e-1
174
  # r1 regularization for discriminator
175
  r1_reg_weight: 10
176
  # facial component loss
177
  gan_component_opt:
178
    type: GANLoss
179
    gan_type: vanilla
180
    real_label_val: 1.0
181
    fake_label_val: 0.0
182
    loss_weight: !!float 1
183
  comp_style_weight: 200
184
  # identity loss
185
  identity_weight: 10
186

187
  net_d_iters: 1
188
  net_d_init_iters: 0
189
  net_d_reg_every: 16
190

191
# validation settings
192
val:
193
  val_freq: !!float 5e3
194
  save_img: true
195

196
  metrics:
197
    psnr: # metric name
198
      type: calculate_psnr
199
      crop_border: 0
200
      test_y_channel: false
201

202
# logging settings
203
logger:
204
  print_freq: 100
205
  save_checkpoint_freq: !!float 5e3
206
  use_tb_logger: true
207
  wandb:
208
    project: ~
209
    resume_id: ~
210

211
# dist training settings
212
dist_params:
213
  backend: nccl
214
  port: 29500
215

216
find_unused_parameters: true
217

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.