google-research

Форк
0
/
inception_base_911.py 
384 строки · 16.2 Кб
1
# coding=utf-8
2
# Copyright 2024 The Google Research Authors.
3
#
4
# Licensed under the Apache License, Version 2.0 (the "License");
5
# you may not use this file except in compliance with the License.
6
# You may obtain a copy of the License at
7
#
8
#     http://www.apache.org/licenses/LICENSE-2.0
9
#
10
# Unless required by applicable law or agreed to in writing, software
11
# distributed under the License is distributed on an "AS IS" BASIS,
12
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
# See the License for the specific language governing permissions and
14
# limitations under the License.
15

16
"""No padding inception FCN base network for a 911x911 receptive field.
17

18
This is a variant of inception v3 FCN that takes a larger receptive field and
19
predicts a larger patch size.
20
"""
21
import tensorflow.compat.v1 as tf
22
import tf_slim as slim
23

24
# The downsampling factor of the network.
25
MODEL_DOWNSAMPLE_FACTOR = 2**4
26

27

28
def _trim_border_px(inputs, n):
29
  """Crop n pixels around the border of inputs.
30

31
  Args:
32
    inputs: a tensor of size [batch_size, height, width, channels].
33
    n: an integer for number of pixels to crop.
34

35
  Returns:
36
    cropped tensor.
37
  Raises:
38
    ValueError: if cropping leads to empty output tensor.
39
  """
40
  if n > min(inputs.shape[1], inputs.shape[2]) // 2:
41
    raise ValueError(
42
        'n (%d) can not be greater than or equal to half of the input shape.' %
43
        n)
44
  return inputs[:, n:-n, n:-n, :]
45

46

47
def nopad_inception_v3_base_911(inputs,
48
                                min_depth=16,
49
                                depth_multiplier=1.0,
50
                                num_final_1x1_conv=0,
51
                                scope=None):
52
  """Constructs a no padding Inception v3 network from inputs.
53

54
  Args:
55
    inputs: a tensor of size [batch_size, height, width, channels]. Must be
56
      floating point. If a pretrained checkpoint is used, pixel values should be
57
      the same as during training.
58
    min_depth: Minimum depth value (number of channels) for all convolution ops.
59
      Enforced when depth_multiplier < 1, and not an active constraint when
60
      depth_multiplier >= 1.
61
    depth_multiplier: Float multiplier for the depth (number of channels) for
62
      all convolution ops. The value must be greater than zero. Typical usage
63
      will be to set this value in (0, 1) to reduce the number of parameters or
64
      computation cost of the model.
65
    num_final_1x1_conv: Int, number of final 1x1 conv layers.
66
    scope: Optional variable_scope.
67

68
  Returns:
69
    tensor_out: output tensor.
70
    end_points: a set of activations for external use, for example summaries or
71
                losses.
72

73
  Raises:
74
    ValueError: if depth_multiplier <= 0
75
  """
76
  # end_points will collect relevant activations for external use, for example
77
  # summaries or losses.
78
  end_points = {}
79

80
  if depth_multiplier <= 0:
81
    raise ValueError('depth_multiplier is not greater than zero.')
82
  depth = lambda d: max(int(d * depth_multiplier), min_depth)
83

84
  with tf.variable_scope(scope, 'NopadInceptionV3', [inputs]):
85
    with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
86
                        stride=1,
87
                        padding='VALID'):
88
      # 911 x 911 x 3
89
      end_point = 'Conv2d_1a_3x3'
90
      net = slim.conv2d(inputs, depth(32), [3, 3], stride=2, scope=end_point)
91
      end_points[end_point] = net
92
      # 455 x 455 x 32
93
      end_point = 'Conv2d_2a_3x3'
94
      net = slim.conv2d(net, depth(32), [3, 3], scope=end_point)
95
      end_points[end_point] = net
96
      # 453 x 453 x 32
97
      end_point = 'Conv2d_2b_3x3'
98
      net = slim.conv2d(net, depth(64), [3, 3], scope=end_point)
99
      end_points[end_point] = net
100
      # 451 x 451 x 64
101
      end_point = 'MaxPool_3a_3x3'
102
      net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
103
      end_points[end_point] = net
104
      # 225 x 225 x 64
105
      end_point = 'Conv2d_3b_1x1'
106
      net = slim.conv2d(net, depth(80), [1, 1], scope=end_point)
107
      end_points[end_point] = net
108
      # 225 x 225 x 80.
109
      end_point = 'Conv2d_4a_3x3'
110
      net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)
111
      end_points[end_point] = net
112
      # 223 x 223 x 192.
113
      end_point = 'MaxPool_5a_3x3'
114
      net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
115
      end_points[end_point] = net
116
      # 111 x 111 x 192.
117

118
    # Inception blocks
119
    with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
120
                        stride=1,
121
                        padding='VALID'):
122
      # Mixed_5b: 107 x 107 x 256.
123
      end_point = 'Mixed_5b'
124
      with tf.variable_scope(end_point):
125
        with tf.variable_scope('Branch_0'):
126
          branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
127
        with tf.variable_scope('Branch_1'):
128
          branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
129
          branch_1 = slim.conv2d(
130
              branch_1, depth(64), [5, 5], scope='Conv2d_0b_5x5')
131
        with tf.variable_scope('Branch_2'):
132
          branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
133
          branch_2 = slim.conv2d(
134
              branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
135
          branch_2 = slim.conv2d(
136
              branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
137
        with tf.variable_scope('Branch_3'):
138
          branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
139
          branch_3 = slim.conv2d(
140
              branch_3, depth(32), [1, 1], scope='Conv2d_0b_1x1')
141
        net = tf.concat(
142
            [
143
                _trim_border_px(branch_0, 2),  # branch_0: 111 x 111 x 64
144
                branch_1,  # branch_1: 107 x 107 x 64
145
                branch_2,  # branch_2: 107 x 107 x 96
146
                _trim_border_px(branch_3, 1)  # branch_3: 109 x 109 x 32
147
            ],
148
            3)
149
      end_points[end_point] = net
150

151
      # Mixed_5c: 103 x 103 x 288.
152
      end_point = 'Mixed_5c'
153
      with tf.variable_scope(end_point):
154
        with tf.variable_scope('Branch_0'):
155
          branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
156
        with tf.variable_scope('Branch_1'):
157
          branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0b_1x1')
158
          branch_1 = slim.conv2d(
159
              branch_1, depth(64), [5, 5], scope='Conv_1_0c_5x5')
160
        with tf.variable_scope('Branch_2'):
161
          branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
162
          branch_2 = slim.conv2d(
163
              branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
164
          branch_2 = slim.conv2d(
165
              branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
166
        with tf.variable_scope('Branch_3'):
167
          branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
168
          branch_3 = slim.conv2d(
169
              branch_3, depth(64), [1, 1], scope='Conv2d_0b_1x1')
170
        net = tf.concat(
171
            [
172
                _trim_border_px(branch_0, 2),  # branch_0: 107 x 107 x 64
173
                branch_1,  # branch_1: 103 x 103 x 64
174
                branch_2,  # branch_2: 103 x 103 x 96
175
                _trim_border_px(branch_3, 1)  # branch_3: 105 x 105 x 64
176
            ],
177
            3)
178
      end_points[end_point] = net
179

180
      # Mixed_5d: 99 x 99 x 288.
181
      end_point = 'Mixed_5d'
182
      with tf.variable_scope(end_point):
183
        with tf.variable_scope('Branch_0'):
184
          branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
185
        with tf.variable_scope('Branch_1'):
186
          branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
187
          branch_1 = slim.conv2d(
188
              branch_1, depth(64), [5, 5], scope='Conv2d_0b_5x5')
189
        with tf.variable_scope('Branch_2'):
190
          branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
191
          branch_2 = slim.conv2d(
192
              branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
193
          branch_2 = slim.conv2d(
194
              branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
195
        with tf.variable_scope('Branch_3'):
196
          branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
197
          branch_3 = slim.conv2d(
198
              branch_3, depth(64), [1, 1], scope='Conv2d_0b_1x1')
199
        net = tf.concat(
200
            [
201
                _trim_border_px(branch_0, 2),  # branch_0: 103 x 103 x 64
202
                branch_1,  # branch_1: 99 x 99 x 64
203
                branch_2,  # branch_2: 99 x 99 x 96
204
                _trim_border_px(branch_3, 1)  # branch_2: 101 x 101 x 64
205
            ],
206
            3)
207

208
      end_points[end_point] = net
209

210
      # Mixed_6a: 49 x 49 x 768.
211
      end_point = 'Mixed_6a'
212
      with tf.variable_scope(end_point):
213
        with tf.variable_scope('Branch_0'):
214
          branch_0 = slim.conv2d(
215
              net,
216
              depth(384), [3, 3],
217
              stride=2,
218
              padding='VALID',
219
              scope='Conv2d_1a_1x1')
220
        with tf.variable_scope('Branch_1'):
221
          branch_1 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
222
          branch_1 = slim.conv2d(
223
              branch_1,
224
              depth(96), [3, 3],
225
              stride=2,
226
              padding='VALID',
227
              scope='Conv2d_1a_1x1')
228
        with tf.variable_scope('Branch_2'):
229
          branch_2 = slim.max_pool2d(
230
              net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3')
231
        net = tf.concat(
232
            [
233
                branch_0,  # branch_0: 49 x 49 x 384
234
                branch_1,  # branch_1: 49 x 49 x 96
235
                branch_2,  # branch_2: 49 x 49 x 288
236
            ],
237
            3)
238
      end_points[end_point] = net
239

240
      # Mixed_6b: 37 x 37 x 768.
241
      end_point = 'Mixed_6b'
242
      with tf.variable_scope(end_point):
243
        with tf.variable_scope('Branch_0'):
244
          branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
245
        with tf.variable_scope('Branch_1'):
246
          branch_1 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
247
          branch_1 = slim.conv2d(
248
              branch_1, depth(128), [1, 7], scope='Conv2d_0b_1x7')
249
          branch_1 = slim.conv2d(
250
              branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
251
        with tf.variable_scope('Branch_2'):
252
          branch_2 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
253
          branch_2 = slim.conv2d(
254
              branch_2, depth(128), [7, 1], scope='Conv2d_0b_7x1')
255
          branch_2 = slim.conv2d(
256
              branch_2, depth(128), [1, 7], scope='Conv2d_0c_1x7')
257
          branch_2 = slim.conv2d(
258
              branch_2, depth(128), [7, 1], scope='Conv2d_0d_7x1')
259
          branch_2 = slim.conv2d(
260
              branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
261
        with tf.variable_scope('Branch_3'):
262
          branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
263
          branch_3 = slim.conv2d(
264
              branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
265
        net = tf.concat(
266
            [
267
                _trim_border_px(branch_0, 6),  # branch_0: 49 x 49 x 192
268
                _trim_border_px(branch_1, 3),  # branch_1: 43 x 43 x 192
269
                branch_2,  # branch_2: 37 x 37 x 192
270
                _trim_border_px(branch_3, 5)  # branch_3: 47 x 47 x 192
271
            ],
272
            3)
273
      end_points[end_point] = net
274

275
      # Mixed_6c: 25 x 25 x 768.
276
      end_point = 'Mixed_6c'
277
      with tf.variable_scope(end_point):
278
        with tf.variable_scope('Branch_0'):
279
          branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
280
        with tf.variable_scope('Branch_1'):
281
          branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
282
          branch_1 = slim.conv2d(
283
              branch_1, depth(160), [1, 7], scope='Conv2d_0b_1x7')
284
          branch_1 = slim.conv2d(
285
              branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
286
        with tf.variable_scope('Branch_2'):
287
          branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
288
          branch_2 = slim.conv2d(
289
              branch_2, depth(160), [7, 1], scope='Conv2d_0b_7x1')
290
          branch_2 = slim.conv2d(
291
              branch_2, depth(160), [1, 7], scope='Conv2d_0c_1x7')
292
          branch_2 = slim.conv2d(
293
              branch_2, depth(160), [7, 1], scope='Conv2d_0d_7x1')
294
          branch_2 = slim.conv2d(
295
              branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
296
        with tf.variable_scope('Branch_3'):
297
          branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
298
          branch_3 = slim.conv2d(
299
              branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
300
        net = tf.concat(
301
            [
302
                _trim_border_px(branch_0, 6),  # branch_0: 37 x 37 x 192
303
                _trim_border_px(branch_1, 3),  # branch_1: 31 x 31 x 192
304
                branch_2,  # branch_2: 25 x 25 x 192
305
                _trim_border_px(branch_3, 5)  # branch_3: 35 x 35 x 192
306
            ],
307
            3)
308
      end_points[end_point] = net
309

310
      # mixed_6: 13 x 13 x 768.
311
      end_point = 'Mixed_6d'
312
      with tf.variable_scope(end_point):
313
        with tf.variable_scope('Branch_0'):
314
          branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
315
        with tf.variable_scope('Branch_1'):
316
          branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
317
          branch_1 = slim.conv2d(
318
              branch_1, depth(160), [1, 7], scope='Conv2d_0b_1x7')
319
          branch_1 = slim.conv2d(
320
              branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
321
        with tf.variable_scope('Branch_2'):
322
          branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
323
          branch_2 = slim.conv2d(
324
              branch_2, depth(160), [7, 1], scope='Conv2d_0b_7x1')
325
          branch_2 = slim.conv2d(
326
              branch_2, depth(160), [1, 7], scope='Conv2d_0c_1x7')
327
          branch_2 = slim.conv2d(
328
              branch_2, depth(160), [7, 1], scope='Conv2d_0d_7x1')
329
          branch_2 = slim.conv2d(
330
              branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
331
        with tf.variable_scope('Branch_3'):
332
          branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
333
          branch_3 = slim.conv2d(
334
              branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
335
        net = tf.concat(
336
            [
337
                _trim_border_px(branch_0, 6),  # branch_0: 25 x 25 x 192
338
                _trim_border_px(branch_1, 3),  # branch_1: 19 x 19 x 192
339
                branch_2,  # branch_2: 13 x 13 x 192
340
                _trim_border_px(branch_3, 5)  # branch_3: 23 x 23 x 192
341
            ],
342
            3)
343
      end_points[end_point] = net
344

345
      # Mixed_6e: 1 x 1 x 768.
346
      end_point = 'Mixed_6e'
347
      with tf.variable_scope(end_point):
348
        with tf.variable_scope('Branch_0'):
349
          branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
350
        with tf.variable_scope('Branch_1'):
351
          branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
352
          branch_1 = slim.conv2d(
353
              branch_1, depth(192), [1, 7], scope='Conv2d_0b_1x7')
354
          branch_1 = slim.conv2d(
355
              branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
356
        with tf.variable_scope('Branch_2'):
357
          branch_2 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
358
          branch_2 = slim.conv2d(
359
              branch_2, depth(192), [7, 1], scope='Conv2d_0b_7x1')
360
          branch_2 = slim.conv2d(
361
              branch_2, depth(192), [1, 7], scope='Conv2d_0c_1x7')
362
          branch_2 = slim.conv2d(
363
              branch_2, depth(192), [7, 1], scope='Conv2d_0d_7x1')
364
          branch_2 = slim.conv2d(
365
              branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
366
        with tf.variable_scope('Branch_3'):
367
          branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
368
          branch_3 = slim.conv2d(
369
              branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
370
        net = tf.concat(
371
            [
372
                _trim_border_px(branch_0, 6),  # branch_0: 13 x 13 x 192
373
                _trim_border_px(branch_1, 3),  # branch_1: 7 x 7 x 192
374
                branch_2,  # branch_2: 1 x 1 x 192
375
                _trim_border_px(branch_3, 5)  # branch_3: 11 x 11 x 192
376
            ],
377
            3)
378
      end_points[end_point] = net
379

380
      for i in range(num_final_1x1_conv):
381
        slim.conv2d(
382
            net, depth(256), [1, 1], scope='Final_Conv2d_{}_1x1'.format(i))
383
        end_points['Final_Conv2d_{}_1x1'.format(i)] = net
384
      return net, end_points
385

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.