google-research

Форк
0
/
float_part201_train.ipynb 
2397 строк · 100.3 Кб
1
{
2
 "cells": [
3
  {
4
   "cell_type": "code",
5
   "execution_count": null,
6
   "metadata": {},
7
   "outputs": [],
8
   "source": [
9
    "#@title Copyright 2022 Google LLC, licensed under the Apache License, Version 2.0 (the \"License\")\n",
10
    "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
11
    "# you may not use this file except in compliance with the License.\n",
12
    "# You may obtain a copy of the License at\n",
13
    "#\n",
14
    "# https://www.apache.org/licenses/LICENSE-2.0\n",
15
    "#\n",
16
    "# Unless required by applicable law or agreed to in writing, software\n",
17
    "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
18
    "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
19
    "# See the License for the specific language governing permissions and\n",
20
    "# limitations under the License."
21
   ]
22
  },
23
  {
24
   "cell_type": "code",
25
   "execution_count": 1,
26
   "metadata": {},
27
   "outputs": [],
28
   "source": [
29
    "import glob \n",
30
    "import math\n",
31
    "import sys\n",
32
    "import os\n",
33
    "import cv2\n",
34
    "import glob\n",
35
    "import numpy as np\n",
36
    "import pickle\n",
37
    "import matplotlib.pylab as plt\n",
38
    "import time\n",
39
    "import random\n",
40
    "import math\n",
41
    "import collections\n",
42
    "import queue\n",
43
    "import collections\n",
44
    "import threading\n",
45
    "import functools\n",
46
    "from tqdm.notebook import tqdm\n",
47
    "from typing import Dict, Type, Any, Callable, Union, List, Optional\n",
48
    "import matplotlib.pyplot as plt\n",
49
    "import seaborn as sns\n",
50
    "\n",
51
    "import torch\n",
52
    "from torch import nn\n",
53
    "import torch.optim as optim\n",
54
    "from torch.nn import functional as F\n",
55
    "from torch.autograd import Variable\n",
56
    "from torch.utils.data import Dataset, DataLoader\n",
57
    "import torch.backends.cudnn as cudnn\n",
58
    "from torchinfo import summary\n",
59
    "import torch.utils.model_zoo as model_zoo\n",
60
    "from torch.nn.parallel.data_parallel import DataParallel\n",
61
    "from torch.nn.modules.batchnorm import _BatchNorm\n",
62
    "from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast"
63
   ]
64
  },
65
  {
66
   "cell_type": "markdown",
67
   "metadata": {},
68
   "source": [
69
    "**Synchorised Batch Norm**\n",
70
    "Citation : https://github.com/vacancy/Synchronized-BatchNorm-PyTorch\n",
71
    "Implements a Synchronised Batch Norm for distributed learning (using DataParallel here)."
72
   ]
73
  },
74
  {
75
   "cell_type": "code",
76
   "execution_count": 2,
77
   "metadata": {},
78
   "outputs": [],
79
   "source": [
80
    "class FutureResult(object):\n",
81
    "    \"\"\"A thread-safe future implementation. Used only as one-to-one pipe.\"\"\"\n",
82
    "\n",
83
    "    def __init__(self):\n",
84
    "        self._result = None\n",
85
    "        self._lock = threading.Lock()\n",
86
    "        self._cond = threading.Condition(self._lock)\n",
87
    "\n",
88
    "    def put(self, result):\n",
89
    "        with self._lock:\n",
90
    "            assert self._result is None, 'Previous result has\\'t been fetched.'\n",
91
    "            self._result = result\n",
92
    "            self._cond.notify()\n",
93
    "\n",
94
    "    def get(self):\n",
95
    "        with self._lock:\n",
96
    "            if self._result is None:\n",
97
    "                self._cond.wait()\n",
98
    "\n",
99
    "            res = self._result\n",
100
    "            self._result = None\n",
101
    "            return res\n",
102
    "\n",
103
    "\n",
104
    "_MasterRegistry = collections.namedtuple('MasterRegistry', ['result'])\n",
105
    "_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result'])\n",
106
    "\n",
107
    "\n",
108
    "class SlavePipe(_SlavePipeBase):\n",
109
    "    \"\"\"Pipe for master-slave communication.\"\"\"\n",
110
    "\n",
111
    "    def run_slave(self, msg):\n",
112
    "        self.queue.put((self.identifier, msg))\n",
113
    "        ret = self.result.get()\n",
114
    "        self.queue.put(True)\n",
115
    "        return ret\n",
116
    "\n",
117
    "\n",
118
    "class SyncMaster(object):\n",
119
    "    \"\"\"An abstract `SyncMaster` object.\n",
120
    "    - During the replication, as the data parallel will trigger an callback of each module, all slave devices should\n",
121
    "    call `register(id)` and obtain an `SlavePipe` to communicate with the master.\n",
122
    "    - During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected,\n",
123
    "    and passed to a registered callback.\n",
124
    "    - After receiving the messages, the master device should gather the information and determine to message passed\n",
125
    "    back to each slave devices.\n",
126
    "    \"\"\"\n",
127
    "\n",
128
    "    def __init__(self, master_callback):\n",
129
    "        \"\"\"\n",
130
    "        Args:\n",
131
    "            master_callback: a callback to be invoked after having collected messages from slave devices.\n",
132
    "        \"\"\"\n",
133
    "        self._master_callback = master_callback\n",
134
    "        self._queue = queue.Queue()\n",
135
    "        self._registry = collections.OrderedDict()\n",
136
    "        self._activated = False\n",
137
    "\n",
138
    "    def __getstate__(self):\n",
139
    "        return {'master_callback': self._master_callback}\n",
140
    "\n",
141
    "    def __setstate__(self, state):\n",
142
    "        self.__init__(state['master_callback'])\n",
143
    "\n",
144
    "    def register_slave(self, identifier):\n",
145
    "        \"\"\"\n",
146
    "        Register an slave device.\n",
147
    "        Args:\n",
148
    "            identifier: an identifier, usually is the device id.\n",
149
    "        Returns: a `SlavePipe` object which can be used to communicate with the master device.\n",
150
    "        \"\"\"\n",
151
    "        if self._activated:\n",
152
    "            assert self._queue.empty(), 'Queue is not clean before next initialization.'\n",
153
    "            self._activated = False\n",
154
    "            self._registry.clear()\n",
155
    "        future = FutureResult()\n",
156
    "        self._registry[identifier] = _MasterRegistry(future)\n",
157
    "        return SlavePipe(identifier, self._queue, future)\n",
158
    "\n",
159
    "    def run_master(self, master_msg):\n",
160
    "        \"\"\"\n",
161
    "        Main entry for the master device in each forward pass.\n",
162
    "        The messages were first collected from each devices (including the master device), and then\n",
163
    "        an callback will be invoked to compute the message to be sent back to each devices\n",
164
    "        (including the master device).\n",
165
    "        Args:\n",
166
    "            master_msg: the message that the master want to send to itself. This will be placed as the first\n",
167
    "            message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.\n",
168
    "        Returns: the message to be sent back to the master device.\n",
169
    "        \"\"\"\n",
170
    "        self._activated = True\n",
171
    "\n",
172
    "        intermediates = [(0, master_msg)]\n",
173
    "        for i in range(self.nr_slaves):\n",
174
    "            intermediates.append(self._queue.get())\n",
175
    "\n",
176
    "        results = self._master_callback(intermediates)\n",
177
    "        assert results[0][0] == 0, 'The first result should belongs to the master.'\n",
178
    "\n",
179
    "        for i, res in results:\n",
180
    "            if i == 0:\n",
181
    "                continue\n",
182
    "            self._registry[i].result.put(res)\n",
183
    "\n",
184
    "        for i in range(self.nr_slaves):\n",
185
    "            assert self._queue.get() is True\n",
186
    "\n",
187
    "        return results[0][1]\n",
188
    "\n",
189
    "    @property\n",
190
    "    def nr_slaves(self):\n",
191
    "        return len(self._registry)"
192
   ]
193
  },
194
  {
195
   "cell_type": "code",
196
   "execution_count": 3,
197
   "metadata": {},
198
   "outputs": [],
199
   "source": [
200
    "def _sum_ft(tensor):\n",
201
    "    \"\"\"sum over the first and last dimention\"\"\"\n",
202
    "    return tensor.sum(dim=0).sum(dim=-1)\n",
203
    "\n",
204
    "\n",
205
    "def _unsqueeze_ft(tensor):\n",
206
    "    \"\"\"add new dementions at the front and the tail\"\"\"\n",
207
    "    return tensor.unsqueeze(0).unsqueeze(-1)\n",
208
    "\n",
209
    "\n",
210
    "_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size'])\n",
211
    "_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std'])\n",
212
    "\n",
213
    "\n",
214
    "class _SynchronizedBatchNorm(_BatchNorm):\n",
215
    "    def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True):\n",
216
    "        super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine)\n",
217
    "\n",
218
    "        self._sync_master = SyncMaster(self._data_parallel_master)\n",
219
    "\n",
220
    "        self._is_parallel = False\n",
221
    "        self._parallel_id = None\n",
222
    "        self._slave_pipe = None\n",
223
    "\n",
224
    "    def forward(self, input):\n",
225
    "        # If it is not parallel computation or is in evaluation mode, use PyTorch's implementation.\n",
226
    "        if not (self._is_parallel and self.training):\n",
227
    "            return F.batch_norm(\n",
228
    "                input, self.running_mean, self.running_var, self.weight, self.bias,\n",
229
    "                self.training, self.momentum, self.eps)\n",
230
    "\n",
231
    "        # Resize the input to (B, C, -1).\n",
232
    "        input_shape = input.size()\n",
233
    "        input = input.view(input.size(0), self.num_features, -1)\n",
234
    "\n",
235
    "        # Compute the sum and square-sum.\n",
236
    "        sum_size = input.size(0) * input.size(2)\n",
237
    "        input_sum = _sum_ft(input)\n",
238
    "        input_ssum = _sum_ft(input ** 2)\n",
239
    "\n",
240
    "        # Reduce-and-broadcast the statistics.\n",
241
    "        if self._parallel_id == 0:\n",
242
    "            mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))\n",
243
    "        else:\n",
244
    "            mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))\n",
245
    "\n",
246
    "        # Compute the output.\n",
247
    "        if self.affine:\n",
248
    "            # MJY:: Fuse the multiplication for speed.\n",
249
    "            output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias)\n",
250
    "        else:\n",
251
    "            output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)\n",
252
    "\n",
253
    "        # Reshape it.\n",
254
    "        return output.view(input_shape)\n",
255
    "\n",
256
    "    def __data_parallel_replicate__(self, ctx, copy_id):\n",
257
    "        self._is_parallel = True\n",
258
    "        self._parallel_id = copy_id\n",
259
    "\n",
260
    "        # parallel_id == 0 means master device.\n",
261
    "        if self._parallel_id == 0:\n",
262
    "            ctx.sync_master = self._sync_master\n",
263
    "        else:\n",
264
    "            self._slave_pipe = ctx.sync_master.register_slave(copy_id)\n",
265
    "\n",
266
    "    def _data_parallel_master(self, intermediates):\n",
267
    "        \"\"\"Reduce the sum and square-sum, compute the statistics, and broadcast it.\"\"\"\n",
268
    "\n",
269
    "        # Always using same \"device order\" makes the ReduceAdd operation faster.\n",
270
    "        # Thanks to:: Tete Xiao (http://tetexiao.com/)\n",
271
    "        intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())\n",
272
    "\n",
273
    "        to_reduce = [i[1][:2] for i in intermediates]\n",
274
    "        to_reduce = [j for i in to_reduce for j in i]  # flatten\n",
275
    "        target_gpus = [i[1].sum.get_device() for i in intermediates]\n",
276
    "\n",
277
    "        sum_size = sum([i[1].sum_size for i in intermediates])\n",
278
    "        sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)\n",
279
    "        mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)\n",
280
    "\n",
281
    "        broadcasted = Broadcast.apply(target_gpus, mean, inv_std)\n",
282
    "\n",
283
    "        outputs = []\n",
284
    "        for i, rec in enumerate(intermediates):\n",
285
    "            outputs.append((rec[0], _MasterMessage(*broadcasted[i * 2:i * 2 + 2])))\n",
286
    "\n",
287
    "        return outputs\n",
288
    "\n",
289
    "    def _compute_mean_std(self, sum_, ssum, size):\n",
290
    "        \"\"\"Compute the mean and standard-deviation with sum and square-sum. This method\n",
291
    "        also maintains the moving average on the master device.\"\"\"\n",
292
    "        assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'\n",
293
    "        mean = sum_ / size\n",
294
    "        sumvar = ssum - sum_ * mean\n",
295
    "        unbias_var = sumvar / (size - 1)\n",
296
    "        bias_var = sumvar / size\n",
297
    "\n",
298
    "        self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data\n",
299
    "        self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data\n",
300
    "\n",
301
    "        return mean, bias_var.clamp(self.eps) ** -0.5"
302
   ]
303
  },
304
  {
305
   "cell_type": "code",
306
   "execution_count": 4,
307
   "metadata": {},
308
   "outputs": [],
309
   "source": [
310
    "class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):\n",
311
    "    r\"\"\"Applies Batch Normalization over a 4d input that is seen as a mini-batch\n",
312
    "    of 3d inputs\n",
313
    "    .. math::\n",
314
    "        y = \\frac{x - mean[x]}{ \\sqrt{Var[x] + \\epsilon}} * gamma + beta\n",
315
    "    This module differs from the built-in PyTorch BatchNorm2d as the mean and\n",
316
    "    standard-deviation are reduced across all devices during training.\n",
317
    "    For example, when one uses `nn.DataParallel` to wrap the network during\n",
318
    "    training, PyTorch's implementation normalize the tensor on each device using\n",
319
    "    the statistics only on that device, which accelerated the computation and\n",
320
    "    is also easy to implement, but the statistics might be inaccurate.\n",
321
    "    Instead, in this synchronized version, the statistics will be computed\n",
322
    "    over all training samples distributed on multiple devices.\n",
323
    "    Note that, for one-GPU or CPU-only case, this module behaves exactly same\n",
324
    "    as the built-in PyTorch implementation.\n",
325
    "    The mean and standard-deviation are calculated per-dimension over\n",
326
    "    the mini-batches and gamma and beta are learnable parameter vectors\n",
327
    "    of size C (where C is the input size).\n",
328
    "    During training, this layer keeps a running estimate of its computed mean\n",
329
    "    and variance. The running sum is kept with a default momentum of 0.1.\n",
330
    "    During evaluation, this running mean/variance is used for normalization.\n",
331
    "    Because the BatchNorm is done over the `C` dimension, computing statistics\n",
332
    "    on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm\n",
333
    "    Args:\n",
334
    "        num_features: num_features from an expected input of\n",
335
    "            size batch_size x num_features x height x width\n",
336
    "        eps: a value added to the denominator for numerical stability.\n",
337
    "            Default: 1e-5\n",
338
    "        momentum: the value used for the running_mean and running_var\n",
339
    "            computation. Default: 0.1\n",
340
    "        affine: a boolean value that when set to ``True``, gives the layer learnable\n",
341
    "            affine parameters. Default: ``True``\n",
342
    "    Shape:\n",
343
    "        - Input: :math:`(N, C, H, W)`\n",
344
    "        - Output: :math:`(N, C, H, W)` (same shape as input)\n",
345
    "    Examples:\n",
346
    "        >>> # With Learnable Parameters\n",
347
    "        >>> m = SynchronizedBatchNorm2d(100)\n",
348
    "        >>> # Without Learnable Parameters\n",
349
    "        >>> m = SynchronizedBatchNorm2d(100, affine=False)\n",
350
    "        >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))\n",
351
    "        >>> output = m(input)\n",
352
    "    \"\"\"\n",
353
    "\n",
354
    "    def _check_input_dim(self, input):\n",
355
    "        if input.dim() != 4:\n",
356
    "            raise ValueError('expected 4D input (got {}D input)'\n",
357
    "                             .format(input.dim()))\n",
358
    "        super(SynchronizedBatchNorm2d, self)._check_input_dim(input)"
359
   ]
360
  },
361
  {
362
   "cell_type": "code",
363
   "execution_count": null,
364
   "metadata": {},
365
   "outputs": [],
366
   "source": [
367
    "\"\"\" \n",
368
    "For handling DataParallel with Synchorisned Batch Norm.\n",
369
    "\"\"\"\n",
370
    "\n",
371
    "class CallbackContext(object):\n",
372
    "    pass\n",
373
    "\n",
374
    "class DataParallelWithCallback(DataParallel):\n",
375
    "    \"\"\"\n",
376
    "    Data Parallel with a replication callback.\n",
377
    "    An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by\n",
378
    "    original `replicate` function.\n",
379
    "    The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`\n",
380
    "    Examples:\n",
381
    "        > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)\n",
382
    "        > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])\n",
383
    "        # sync_bn.__data_parallel_replicate__ will be invoked.\n",
384
    "    \"\"\"\n",
385
    "\n",
386
    "    def replicate(self, module, device_ids):\n",
387
    "        modules = super(DataParallelWithCallback, self).replicate(module, device_ids)\n",
388
    "        execute_replication_callbacks(modules)\n",
389
    "        return modules\n",
390
    "\n",
391
    "\n",
392
    "def execute_replication_callbacks(modules):\n",
393
    "    \"\"\"\n",
394
    "    Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.\n",
395
    "    The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`\n",
396
    "    Note that, as all modules are isomorphism, we assign each sub-module with a context\n",
397
    "    (shared among multiple copies of this module on different devices).\n",
398
    "    Through this context, different copies can share some information.\n",
399
    "    We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback\n",
400
    "    of any slave copies.\n",
401
    "    \"\"\"\n",
402
    "    master_copy = modules[0]\n",
403
    "    nr_modules = len(list(master_copy.modules()))\n",
404
    "    ctxs = [CallbackContext() for _ in range(nr_modules)]\n",
405
    "\n",
406
    "    for i, module in enumerate(modules):\n",
407
    "        for j, m in enumerate(module.modules()):\n",
408
    "            if hasattr(m, '__data_parallel_replicate__'):\n",
409
    "                m.__data_parallel_replicate__(ctxs[j], i)\n",
410
    "\n",
411
    "\n",
412
    "def patch_replication_callback(data_parallel):\n",
413
    "    \"\"\"\n",
414
    "    Monkey-patch an existing `DataParallel` object. Add the replication callback.\n",
415
    "    Useful when you have customized `DataParallel` implementation.\n",
416
    "    Examples:\n",
417
    "        > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)\n",
418
    "        > sync_bn = DataParallel(sync_bn, device_ids=[0, 1])\n",
419
    "        > patch_replication_callback(sync_bn)\n",
420
    "        # this is equivalent to\n",
421
    "        > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)\n",
422
    "        > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])\n",
423
    "    \"\"\"\n",
424
    "\n",
425
    "    assert isinstance(data_parallel, DataParallel)\n",
426
    "\n",
427
    "    old_replicate = data_parallel.replicate\n",
428
    "\n",
429
    "    @functools.wraps(old_replicate)\n",
430
    "    def new_replicate(module, device_ids):\n",
431
    "        modules = old_replicate(module, device_ids)\n",
432
    "        execute_replication_callbacks(modules)\n",
433
    "        return modules\n",
434
    "\n",
435
    "    data_parallel.replicate = new_replicate"
436
   ]
437
  },
438
  {
439
   "cell_type": "markdown",
440
   "metadata": {},
441
   "source": [
442
    "**ResNet**\n",
443
    "Basic Implementation of ResNet models."
444
   ]
445
  },
446
  {
447
   "cell_type": "code",
448
   "execution_count": 5,
449
   "metadata": {},
450
   "outputs": [],
451
   "source": [
452
    "class Bottleneck(nn.Module):\n",
453
    "    expansion = 4\n",
454
    "\n",
455
    "    def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, BatchNorm=None):\n",
456
    "        super(Bottleneck, self).__init__()\n",
457
    "        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n",
458
    "        self.bn1 = BatchNorm(planes)\n",
459
    "        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n",
460
    "                               dilation=dilation, padding=dilation, bias=False)\n",
461
    "        self.bn2 = BatchNorm(planes)\n",
462
    "        self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n",
463
    "        self.bn3 = BatchNorm(planes * 4)\n",
464
    "        self.relu = nn.ReLU(inplace=True)\n",
465
    "        self.downsample = downsample\n",
466
    "        self.stride = stride\n",
467
    "        self.dilation = dilation\n",
468
    "\n",
469
    "    def forward(self, x):\n",
470
    "        residual = x\n",
471
    "\n",
472
    "        out = self.conv1(x)\n",
473
    "        out = self.bn1(out)\n",
474
    "        out = self.relu(out)\n",
475
    "\n",
476
    "        out = self.conv2(out)\n",
477
    "        out = self.bn2(out)\n",
478
    "        out = self.relu(out)\n",
479
    "\n",
480
    "        out = self.conv3(out)\n",
481
    "        out = self.bn3(out)\n",
482
    "\n",
483
    "        if self.downsample is not None:\n",
484
    "            residual = self.downsample(x)\n",
485
    "\n",
486
    "        out += residual\n",
487
    "        out = self.relu(out)\n",
488
    "\n",
489
    "        return out\n",
490
    "\n",
491
    "class ResNet(nn.Module):\n",
492
    "\n",
493
    "    def __init__(self, block, layers, output_stride, BatchNorm, pretrained=True):\n",
494
    "        self.inplanes = 64\n",
495
    "        super(ResNet, self).__init__()\n",
496
    "        blocks = [1, 2, 4]\n",
497
    "        if output_stride == 16:\n",
498
    "            strides = [1, 2, 2, 1]\n",
499
    "            dilations = [1, 1, 1, 2]\n",
500
    "        elif output_stride == 8:\n",
501
    "            strides = [1, 2, 1, 1]\n",
502
    "            dilations = [1, 1, 2, 4]\n",
503
    "        else:\n",
504
    "            raise NotImplementedError\n",
505
    "\n",
506
    "        # Modules\n",
507
    "        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n",
508
    "                                bias=False)\n",
509
    "        self.bn1 = BatchNorm(64)\n",
510
    "        self.relu = nn.ReLU(inplace=True)\n",
511
    "        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n",
512
    "\n",
513
    "        self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], dilation=dilations[0], BatchNorm=BatchNorm)\n",
514
    "        self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1], BatchNorm=BatchNorm)\n",
515
    "        self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2], BatchNorm=BatchNorm)\n",
516
    "        self.layer4 = self._make_MG_unit(block, 512, blocks=blocks, stride=strides[3], dilation=dilations[3], BatchNorm=BatchNorm)\n",
517
    "        # self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], dilation=dilations[3], BatchNorm=BatchNorm)\n",
518
    "        self._init_weight()\n",
519
    "\n",
520
    "        if pretrained:\n",
521
    "            self._load_pretrained_model()\n",
522
    "\n",
523
    "    def _make_layer(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):\n",
524
    "        downsample = None\n",
525
    "        if stride != 1 or self.inplanes != planes * block.expansion:\n",
526
    "            downsample = nn.Sequential(\n",
527
    "                nn.Conv2d(self.inplanes, planes * block.expansion,\n",
528
    "                          kernel_size=1, stride=stride, bias=False),\n",
529
    "                BatchNorm(planes * block.expansion),\n",
530
    "            )\n",
531
    "\n",
532
    "        layers = []\n",
533
    "        layers.append(block(self.inplanes, planes, stride, dilation, downsample, BatchNorm))\n",
534
    "        self.inplanes = planes * block.expansion\n",
535
    "        for i in range(1, blocks):\n",
536
    "            layers.append(block(self.inplanes, planes, dilation=dilation, BatchNorm=BatchNorm))\n",
537
    "\n",
538
    "        return nn.Sequential(*layers)\n",
539
    "\n",
540
    "    def _make_MG_unit(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):\n",
541
    "        downsample = None\n",
542
    "        if stride != 1 or self.inplanes != planes * block.expansion:\n",
543
    "            downsample = nn.Sequential(\n",
544
    "                nn.Conv2d(self.inplanes, planes * block.expansion,\n",
545
    "                          kernel_size=1, stride=stride, bias=False),\n",
546
    "                BatchNorm(planes * block.expansion),\n",
547
    "            )\n",
548
    "\n",
549
    "        layers = []\n",
550
    "        layers.append(block(self.inplanes, planes, stride, dilation=blocks[0]*dilation,\n",
551
    "                            downsample=downsample, BatchNorm=BatchNorm))\n",
552
    "        self.inplanes = planes * block.expansion\n",
553
    "        for i in range(1, len(blocks)):\n",
554
    "            layers.append(block(self.inplanes, planes, stride=1,\n",
555
    "                                dilation=blocks[i]*dilation, BatchNorm=BatchNorm))\n",
556
    "\n",
557
    "        return nn.Sequential(*layers)\n",
558
    "\n",
559
    "    def forward(self, input):\n",
560
    "        x = self.conv1(input)\n",
561
    "        x = self.bn1(x)\n",
562
    "        x = self.relu(x)\n",
563
    "        x = self.maxpool(x)\n",
564
    "\n",
565
    "        x = self.layer1(x)\n",
566
    "        low_level_feat = x\n",
567
    "        x = self.layer2(x)\n",
568
    "        x = self.layer3(x)\n",
569
    "        x = self.layer4(x)\n",
570
    "        return x, low_level_feat\n",
571
    "\n",
572
    "    def _init_weight(self):\n",
573
    "        for m in self.modules():\n",
574
    "            if isinstance(m, nn.Conv2d):\n",
575
    "                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n",
576
    "                m.weight.data.normal_(0, math.sqrt(2. / n))\n",
577
    "            elif isinstance(m, SynchronizedBatchNorm2d):\n",
578
    "                m.weight.data.fill_(1)\n",
579
    "                m.bias.data.zero_()\n",
580
    "            elif isinstance(m, nn.BatchNorm2d):\n",
581
    "                m.weight.data.fill_(1)\n",
582
    "                m.bias.data.zero_()\n",
583
    "\n",
584
    "    def _load_pretrained_model(self):\n",
585
    "        pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/resnet101-5d3b4d8f.pth')\n",
586
    "        model_dict = {}\n",
587
    "        state_dict = self.state_dict()\n",
588
    "        for k, v in pretrain_dict.items():\n",
589
    "            if k in state_dict:\n",
590
    "                model_dict[k] = v\n",
591
    "        state_dict.update(model_dict)\n",
592
    "        self.load_state_dict(state_dict)\n",
593
    "\n",
594
    "def ResNet101(output_stride, BatchNorm, pretrained=True):\n",
595
    "    \"\"\"Constructs a ResNet-101 model.\n",
596
    "    Args:\n",
597
    "        pretrained (bool): If True, returns a model pre-trained on ImageNet\n",
598
    "    \"\"\"\n",
599
    "    model = ResNet(Bottleneck, [3, 4, 23, 3], output_stride, BatchNorm, pretrained=pretrained)\n",
600
    "    return model\n",
601
    "\n",
602
    "def ResNet50(output_stride, BatchNorm, pretrained=True):\n",
603
    "    \"\"\"Constructs a ResNet-101 model.\n",
604
    "    Args:\n",
605
    "        pretrained (bool): If True, returns a model pre-trained on ImageNet\n",
606
    "    \"\"\"\n",
607
    "    model = ResNet(Bottleneck, [3, 4, 6, 3], output_stride, BatchNorm, pretrained=pretrained)\n",
608
    "    return model"
609
   ]
610
  },
611
  {
612
   "cell_type": "markdown",
613
   "metadata": {},
614
   "source": [
615
    "**FLOAT**\n",
616
    "Inspired from DeepLabV3+"
617
   ]
618
  },
619
  {
620
   "cell_type": "markdown",
621
   "metadata": {},
622
   "source": [
623
    "**Backbone**\n",
624
    "Function returning ResNet backbone. Modifiable to use other backbones."
625
   ]
626
  },
627
  {
628
   "cell_type": "code",
629
   "execution_count": 6,
630
   "metadata": {},
631
   "outputs": [],
632
   "source": [
633
    "def build_backbone(backbone, output_stride, BatchNorm):\n",
634
    "    if backbone == 'resnet101':\n",
635
    "        return ResNet101(output_stride, BatchNorm)\n",
636
    "    elif backbone == 'resnet50':\n",
637
    "        return ResNet50(output_stride, BatchNorm)\n",
638
    "    else:\n",
639
    "        raise NotImplementedError"
640
   ]
641
  },
642
  {
643
   "cell_type": "markdown",
644
   "metadata": {},
645
   "source": [
646
    "**ASPP**\n",
647
    "Atrous Spatial Pyramid Pooling module from DeepLabV3+"
648
   ]
649
  },
650
  {
651
   "cell_type": "code",
652
   "execution_count": 7,
653
   "metadata": {},
654
   "outputs": [],
655
   "source": [
656
    "class _ASPPModule(nn.Module):\n",
657
    "    def __init__(self, inplanes, planes, kernel_size, padding, dilation, BatchNorm):\n",
658
    "        super(_ASPPModule, self).__init__()\n",
659
    "        self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size,\n",
660
    "                                            stride=1, padding=padding, dilation=dilation, bias=False)\n",
661
    "        self.bn = BatchNorm(planes)\n",
662
    "        self.relu = nn.ReLU()\n",
663
    "\n",
664
    "        self._init_weight()\n",
665
    "\n",
666
    "    def forward(self, x):\n",
667
    "        x = self.atrous_conv(x)\n",
668
    "        x = self.bn(x)\n",
669
    "\n",
670
    "        return self.relu(x)\n",
671
    "\n",
672
    "    def _init_weight(self):\n",
673
    "        for m in self.modules():\n",
674
    "            if isinstance(m, nn.Conv2d):\n",
675
    "                torch.nn.init.kaiming_normal_(m.weight)\n",
676
    "            elif isinstance(m, SynchronizedBatchNorm2d):\n",
677
    "                m.weight.data.fill_(1)\n",
678
    "                m.bias.data.zero_()\n",
679
    "            elif isinstance(m, nn.BatchNorm2d):\n",
680
    "                m.weight.data.fill_(1)\n",
681
    "                m.bias.data.zero_()\n",
682
    "\n",
683
    "class ASPP(nn.Module):\n",
684
    "    def __init__(self, backbone, output_stride, BatchNorm):\n",
685
    "        super(ASPP, self).__init__()\n",
686
    "        if backbone == 'drn':\n",
687
    "            inplanes = 512\n",
688
    "        elif backbone == 'mobilenet':\n",
689
    "            inplanes = 320\n",
690
    "        else:\n",
691
    "            inplanes = 2048\n",
692
    "        if output_stride == 16:\n",
693
    "            dilations = [1, 6, 12, 18]\n",
694
    "        elif output_stride == 8:\n",
695
    "            dilations = [1, 12, 24, 36]\n",
696
    "        else:\n",
697
    "            raise NotImplementedError\n",
698
    "\n",
699
    "        self.aspp1 = _ASPPModule(inplanes, 256, 1, padding=0, dilation=dilations[0], BatchNorm=BatchNorm)\n",
700
    "        self.aspp2 = _ASPPModule(inplanes, 256, 3, padding=dilations[1], dilation=dilations[1], BatchNorm=BatchNorm)\n",
701
    "        self.aspp3 = _ASPPModule(inplanes, 256, 3, padding=dilations[2], dilation=dilations[2], BatchNorm=BatchNorm)\n",
702
    "        self.aspp4 = _ASPPModule(inplanes, 256, 3, padding=dilations[3], dilation=dilations[3], BatchNorm=BatchNorm)\n",
703
    "\n",
704
    "        self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),\n",
705
    "                                             nn.Conv2d(inplanes, 256, 1, stride=1, bias=False),\n",
706
    "                                             BatchNorm(256),\n",
707
    "                                             nn.ReLU())\n",
708
    "        self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)\n",
709
    "        self.bn1 = BatchNorm(256)\n",
710
    "        self.relu = nn.ReLU()\n",
711
    "        self.dropout = nn.Dropout(0.5)\n",
712
    "        self._init_weight()\n",
713
    "\n",
714
    "    def forward(self, x):\n",
715
    "        x1 = self.aspp1(x)\n",
716
    "        x2 = self.aspp2(x)\n",
717
    "        x3 = self.aspp3(x)\n",
718
    "        x4 = self.aspp4(x)\n",
719
    "        x5 = self.global_avg_pool(x)\n",
720
    "        x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)\n",
721
    "        x = torch.cat((x1, x2, x3, x4, x5), dim=1)\n",
722
    "\n",
723
    "        x = self.conv1(x)\n",
724
    "        x = self.bn1(x)\n",
725
    "        x = self.relu(x)\n",
726
    "\n",
727
    "        return self.dropout(x)\n",
728
    "\n",
729
    "    def _init_weight(self):\n",
730
    "        for m in self.modules():\n",
731
    "            if isinstance(m, nn.Conv2d):\n",
732
    "                # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n",
733
    "                # m.weight.data.normal_(0, math.sqrt(2. / n))\n",
734
    "                torch.nn.init.kaiming_normal_(m.weight)\n",
735
    "            elif isinstance(m, SynchronizedBatchNorm2d):\n",
736
    "                m.weight.data.fill_(1)\n",
737
    "                m.bias.data.zero_()\n",
738
    "            elif isinstance(m, nn.BatchNorm2d):\n",
739
    "                m.weight.data.fill_(1)\n",
740
    "                m.bias.data.zero_()\n",
741
    "\n",
742
    "\n",
743
    "def build_aspp(backbone, output_stride, BatchNorm):\n",
744
    "    return ASPP(backbone, output_stride, BatchNorm)"
745
   ]
746
  },
747
  {
748
   "cell_type": "markdown",
749
   "metadata": {},
750
   "source": [
751
    "**Decoder**\n",
752
    "Decoder from DeepLabV3+"
753
   ]
754
  },
755
  {
756
   "cell_type": "code",
757
   "execution_count": 8,
758
   "metadata": {},
759
   "outputs": [],
760
   "source": [
761
    "class Decoder(nn.Module):\n",
762
    "    def __init__(self, num_classes, backbone, BatchNorm):\n",
763
    "        super(Decoder, self).__init__()\n",
764
    "        if backbone == 'resnet101' or backbone == 'resnet50' or backbone == 'drn':\n",
765
    "            low_level_inplanes = 256\n",
766
    "        elif backbone == 'xception':\n",
767
    "            low_level_inplanes = 128\n",
768
    "        elif backbone == 'mobilenet':\n",
769
    "            low_level_inplanes = 24\n",
770
    "        else:\n",
771
    "            raise NotImplementedError\n",
772
    "\n",
773
    "        self.conv1 = nn.Conv2d(low_level_inplanes, 48, 1, bias=False)\n",
774
    "        self.bn1 = BatchNorm(48)\n",
775
    "        self.relu = nn.ReLU()\n",
776
    "        self.last_conv = nn.Sequential(nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1, bias=False),\n",
777
    "                                       BatchNorm(256),\n",
778
    "                                       nn.ReLU(),\n",
779
    "                                       nn.Dropout(0.5),\n",
780
    "                                       nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),\n",
781
    "                                       BatchNorm(256),\n",
782
    "                                       nn.ReLU(),\n",
783
    "                                       nn.Dropout(0.1),\n",
784
    "                                       nn.Conv2d(256, num_classes, kernel_size=1, stride=1))\n",
785
    "        self._init_weight()\n",
786
    "\n",
787
    "\n",
788
    "    def forward(self, x, low_level_feat):\n",
789
    "        low_level_feat = self.conv1(low_level_feat)\n",
790
    "        low_level_feat = self.bn1(low_level_feat)\n",
791
    "        low_level_feat = self.relu(low_level_feat)\n",
792
    "\n",
793
    "        x = F.interpolate(x, size=low_level_feat.size()[2:], mode='bilinear', align_corners=True)\n",
794
    "        x = torch.cat((x, low_level_feat), dim=1)\n",
795
    "        x = self.last_conv(x)\n",
796
    "\n",
797
    "        return x\n",
798
    "\n",
799
    "    def _init_weight(self):\n",
800
    "        for m in self.modules():\n",
801
    "            if isinstance(m, nn.Conv2d):\n",
802
    "                torch.nn.init.kaiming_normal_(m.weight)\n",
803
    "            elif isinstance(m, SynchronizedBatchNorm2d):\n",
804
    "                m.weight.data.fill_(1)\n",
805
    "                m.bias.data.zero_()\n",
806
    "            elif isinstance(m, nn.BatchNorm2d):\n",
807
    "                m.weight.data.fill_(1)\n",
808
    "                m.bias.data.zero_()\n",
809
    "\n",
810
    "def build_decoder(num_classes, backbone, BatchNorm):\n",
811
    "    return Decoder(num_classes, backbone, BatchNorm)\n"
812
   ]
813
  },
814
  {
815
   "cell_type": "markdown",
816
   "metadata": {},
817
   "source": [
818
    "**Float**\n",
819
    "Using DeepLabV3+'s modules."
820
   ]
821
  },
822
  {
823
   "cell_type": "code",
824
   "execution_count": 9,
825
   "metadata": {},
826
   "outputs": [],
827
   "source": [
828
    "class Float(nn.Module):\n",
829
    "    def __init__(self, num_anim_classes, num_inanim_classes, backbone='resnet101', output_stride=16,\n",
830
    "                 sync_bn=True, freeze_bn=False):\n",
831
    "        super(Float, self).__init__()\n",
832
    "        if backbone == 'drn':\n",
833
    "            output_stride = 8\n",
834
    "\n",
835
    "        if sync_bn == True:\n",
836
    "            BatchNorm = SynchronizedBatchNorm2d\n",
837
    "        else:\n",
838
    "            BatchNorm = nn.BatchNorm2d\n",
839
    "\n",
840
    "        self.backbone = build_backbone(backbone, output_stride, BatchNorm)\n",
841
    "\n",
842
    "        self.anim_aspp = build_aspp(backbone, output_stride, BatchNorm)\n",
843
    "        self.anim_decoder = build_decoder(num_anim_classes, backbone, BatchNorm)\n",
844
    "\n",
845
    "        self.inanim_aspp = build_aspp(backbone, output_stride, BatchNorm)\n",
846
    "        self.inanim_decoder = build_decoder(num_inanim_classes, backbone, BatchNorm)\n",
847
    "\n",
848
    "        self.lrfb_aspp = build_aspp(backbone, output_stride, BatchNorm)\n",
849
    "        self.lr_decoder = build_decoder(3, backbone, BatchNorm)\n",
850
    "        self.fb_decoder = build_decoder(3, backbone, BatchNorm)\n",
851
    "\n",
852
    "        self.freeze_bn = freeze_bn\n",
853
    "\n",
854
    "    def forward(self, input):\n",
855
    "        x, low_level_feat = self.backbone(input)\n",
856
    "        anim_x = self.anim_aspp(x)\n",
857
    "        anim_x = self.anim_decoder(anim_x, low_level_feat)\n",
858
    "        anim_x = F.interpolate(anim_x, size=input.size()[2:], mode='bilinear', align_corners=True)\n",
859
    "\n",
860
    "        inanim_x = self.inanim_aspp(x)\n",
861
    "        inanim_x = self.inanim_decoder(inanim_x, low_level_feat)\n",
862
    "        inanim_x = F.interpolate(inanim_x, size=input.size()[2:], mode='bilinear', align_corners=True)\n",
863
    "\n",
864
    "        lrfb_x = self.lrfb_aspp(x)\n",
865
    "        lr = self.lr_decoder(lrfb_x, low_level_feat)\n",
866
    "        lr = F.interpolate(lr, size=input.size()[2:], mode='bilinear', align_corners=True)\n",
867
    "\n",
868
    "        fb = self.fb_decoder(lrfb_x, low_level_feat)\n",
869
    "        fb = F.interpolate(fb, size=input.size()[2:], mode='bilinear', align_corners=True)            \n",
870
    "        return anim_x, inanim_x, lr, fb\n",
871
    "\n",
872
    "    def freeze_bn(self):\n",
873
    "        for m in self.modules():\n",
874
    "            if isinstance(m, SynchronizedBatchNorm2d):\n",
875
    "                m.eval()\n",
876
    "            elif isinstance(m, nn.BatchNorm2d):\n",
877
    "                m.eval()\n",
878
    "\n",
879
    "    def get_1x_lr_params(self):\n",
880
    "        modules = [self.backbone]\n",
881
    "        for i in range(len(modules)):\n",
882
    "            for m in modules[i].named_modules():\n",
883
    "                if self.freeze_bn:\n",
884
    "                    if isinstance(m[1], nn.Conv2d):\n",
885
    "                        for p in m[1].parameters():\n",
886
    "                            if p.requires_grad:\n",
887
    "                                yield p\n",
888
    "                else:\n",
889
    "                    if isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) \\\n",
890
    "                            or isinstance(m[1], nn.BatchNorm2d):\n",
891
    "                        for p in m[1].parameters():\n",
892
    "                            if p.requires_grad:\n",
893
    "                                yield p\n",
894
    "\n",
895
    "    def get_10x_lr_params(self):\n",
896
    "        modules = [self.anim_aspp, self.inanim_aspp, self.lrfb_aspp, self.anim_decoder,\n",
897
    "                   self.inanim_decoder, self.lr_decoder, self.fb_decoder]\n",
898
    "        for i in range(len(modules)):\n",
899
    "            for m in modules[i].named_modules():\n",
900
    "                if self.freeze_bn:\n",
901
    "                    if isinstance(m[1], nn.Conv2d):\n",
902
    "                        for p in m[1].parameters():\n",
903
    "                            if p.requires_grad:\n",
904
    "                                yield p\n",
905
    "                else:\n",
906
    "                    if isinstance(m[1], nn.Conv2d) or isinstance(m[1], SynchronizedBatchNorm2d) \\\n",
907
    "                            or isinstance(m[1], nn.BatchNorm2d):\n",
908
    "                        for p in m[1].parameters():\n",
909
    "                            if p.requires_grad:\n",
910
    "                                yield p"
911
   ]
912
  },
913
  {
914
   "cell_type": "markdown",
915
   "metadata": {},
916
   "source": [
917
    "**Metric calculator functions**"
918
   ]
919
  },
920
  {
921
   "cell_type": "code",
922
   "execution_count": 11,
923
   "metadata": {},
924
   "outputs": [],
925
   "source": [
926
    "class AverageMeter(object):\n",
927
    "    \"\"\"Used for updatable average loss computation.\"\"\"\n",
928
    "    def __init__(self):\n",
929
    "        self.reset()\n",
930
    "\n",
931
    "    def reset(self):\n",
932
    "        self.val = 0\n",
933
    "        self.avg = 0\n",
934
    "        self.sum = 0\n",
935
    "        self.count = 0\n",
936
    "\n",
937
    "    def update(self, val, n=1):\n",
938
    "        self.val = val\n",
939
    "        self.sum += val * n\n",
940
    "        self.count += n\n",
941
    "        self.avg = self.sum / self.count"
942
   ]
943
  },
944
  {
945
   "cell_type": "code",
946
   "execution_count": 12,
947
   "metadata": {},
948
   "outputs": [],
949
   "source": [
950
    "class sqIOUMeter(object):\n",
951
    "    \"\"\"Used for updatable sqIOU style average (per class) calcuation.\"\"\"\n",
952
    "    def __init__(self, n_classes):\n",
953
    "        self.n_classes = n_classes\n",
954
    "        self.vals = {}\n",
955
    "        self.counts = {}\n",
956
    "        for i in range(self.n_classes):\n",
957
    "            self.vals[i] = 0\n",
958
    "            self.counts[i] = 0\n",
959
    "\n",
960
    "    def update(self, val_d, count_d):\n",
961
    "        sqiou = []\n",
962
    "        for i in range(self.n_classes):\n",
963
    "            self.vals[i] += val_d[i]\n",
964
    "            self.counts[i] += count_d[i]\n",
965
    "            if self.counts[i] > 0:\n",
966
    "                sqiou.append(self.vals[i] / self.counts[i])\n",
967
    "\n",
968
    "        self.avg = np.mean(sqiou)"
969
   ]
970
  },
971
  {
972
   "cell_type": "code",
973
   "execution_count": 13,
974
   "metadata": {},
975
   "outputs": [],
976
   "source": [
977
    "\"\"\"Used for updatated mIOU calcuation among other metrics.\"\"\"\n",
978
    "class Evaluator(object):\n",
979
    "    def __init__(self, num_class):\n",
980
    "        self.num_class = num_class\n",
981
    "        self.confusion_matrix = np.zeros((self.num_class,)*2)\n",
982
    "        \n",
983
    "    def set_confusion_matrix(self, conf_mat):\n",
984
    "        self.confusion_matrix = np.copy(conf_mat)\n",
985
    "\n",
986
    "    def Pixel_Accuracy(self):\n",
987
    "        Acc = np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum()\n",
988
    "        return Acc\n",
989
    "\n",
990
    "    def Pixel_Accuracy_Class(self):\n",
991
    "        Acc = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1)\n",
992
    "        Acc = np.nanmean(Acc)\n",
993
    "        return Acc\n",
994
    "\n",
995
    "    def Mean_Intersection_over_Union(self):\n",
996
    "        MIoU = np.diag(self.confusion_matrix) / (\n",
997
    "                    np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -\n",
998
    "                    np.diag(self.confusion_matrix))\n",
999
    "        MIoU = np.nanmean(MIoU)\n",
1000
    "        return MIoU\n",
1001
    "    \n",
1002
    "    def Mean_Intersection_over_Union_PerClass(self):\n",
1003
    "        MIoU = np.diag(self.confusion_matrix) / (\n",
1004
    "                    np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -\n",
1005
    "                    np.diag(self.confusion_matrix))\n",
1006
    "        return MIoU\n",
1007
    "\n",
1008
    "    def Frequency_Weighted_Intersection_over_Union(self):\n",
1009
    "        freq = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix)\n",
1010
    "        iu = np.diag(self.confusion_matrix) / (\n",
1011
    "                    np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -\n",
1012
    "                    np.diag(self.confusion_matrix))\n",
1013
    "\n",
1014
    "        FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()\n",
1015
    "        return FWIoU\n",
1016
    "\n",
1017
    "    def _generate_matrix(self, gt_image, pre_image):\n",
1018
    "        mask = (gt_image >= 0) & (gt_image < self.num_class)\n",
1019
    "        label = self.num_class * gt_image[mask].astype('int') + pre_image[mask]\n",
1020
    "        count = np.bincount(label, minlength=self.num_class**2)\n",
1021
    "        confusion_matrix = count.reshape(self.num_class, self.num_class)\n",
1022
    "        return confusion_matrix\n",
1023
    "\n",
1024
    "    def add_batch(self, gt_image, pre_image):\n",
1025
    "        assert gt_image.shape == pre_image.shape\n",
1026
    "        self.confusion_matrix += self._generate_matrix(gt_image, pre_image)\n",
1027
    "\n",
1028
    "    def reset(self):\n",
1029
    "        self.confusion_matrix = np.zeros((self.num_class,) * 2)"
1030
   ]
1031
  },
1032
  {
1033
   "cell_type": "code",
1034
   "execution_count": 14,
1035
   "metadata": {},
1036
   "outputs": [],
1037
   "source": [
1038
    "\"\"\"\n",
1039
    "Returns the per class mIOU and per class count in a batch.\n",
1040
    "\"\"\"\n",
1041
    "def jaccard_perpart(y_pred, y_true, num_classes):\n",
1042
    "    num_parts = y_pred.shape[1]\n",
1043
    "    y_pred = torch.argmax(y_pred, 1)\n",
1044
    "    y_pred = y_pred.type(torch.LongTensor)\n",
1045
    "    y_true = y_true.type(torch.LongTensor)\n",
1046
    "    y_pred = F.one_hot(y_pred, num_classes=num_classes)\n",
1047
    "    y_true = F.one_hot(y_true, num_classes=num_classes)\n",
1048
    "    nbs = y_pred.shape[0]\n",
1049
    "    ious = {}\n",
1050
    "    counts = {}\n",
1051
    "    for i in range(num_parts):\n",
1052
    "        pred = y_pred[:,:,:,i]\n",
1053
    "        gt = y_true[:,:,:,i]\n",
1054
    "        inter = torch.logical_and(pred, gt)\n",
1055
    "        union = torch.logical_or(pred, gt)\n",
1056
    "        iou = torch.sum(inter, [1,2]) / torch.sum(union, [1,2])\n",
1057
    "        legal = torch.sum(gt, [1,2]) > 0\n",
1058
    "        ious[i] = torch.sum(iou[legal])\n",
1059
    "        counts[i] = torch.sum(legal)\n",
1060
    "\n",
1061
    "    return ious, counts\n",
1062
    "\n",
1063
    "\"\"\"\n",
1064
    "Returns the per class 'intersection over ground truth' and per class count in a batch.\n",
1065
    "\"\"\"\n",
1066
    "def iogt_perpart(y_pred, y_true, num_classes):\n",
1067
    "    num_parts = y_pred.shape[1]\n",
1068
    "    y_pred = torch.argmax(y_pred, 1)\n",
1069
    "    y_pred = y_pred.type(torch.LongTensor)\n",
1070
    "    y_true = y_true.type(torch.LongTensor)\n",
1071
    "    y_pred = F.one_hot(y_pred, num_classes=num_classes)\n",
1072
    "    y_true = F.one_hot(y_true, num_classes=num_classes)\n",
1073
    "    nbs = y_pred.shape[0]\n",
1074
    "    iogts = {}\n",
1075
    "    counts = {}\n",
1076
    "    for i in range(1, num_parts):\n",
1077
    "        pred = y_pred[:,:,:,i]\n",
1078
    "        gt = y_true[:,:,:,i]\n",
1079
    "        inter = torch.logical_and(pred, gt)\n",
1080
    "        iogt = torch.sum(inter, [1,2]) / torch.sum(gt, [1,2])\n",
1081
    "        legal = torch.sum(gt, [1,2]) > 0\n",
1082
    "        iogts[i-1] = torch.sum(iogt[legal])\n",
1083
    "        counts[i-1] = torch.sum(legal)\n",
1084
    "\n",
1085
    "    return iogts, counts"
1086
   ]
1087
  },
1088
  {
1089
   "cell_type": "markdown",
1090
   "metadata": {},
1091
   "source": [
1092
    "**Losses**"
1093
   ]
1094
  },
1095
  {
1096
   "cell_type": "code",
1097
   "execution_count": 16,
1098
   "metadata": {},
1099
   "outputs": [],
1100
   "source": [
1101
    "\"\"\"\n",
1102
    "Dilated mask computation for adjacent graph loss.\n",
1103
    "\"\"\"\n",
1104
    "def compute_dilated_mask(image, class_index, dilation_kernel, ignore_label=None):\n",
1105
    "    # Mask of the specific class\n",
1106
    "    mask = torch.eq(image, class_index).float()\n",
1107
    "\n",
1108
    "    if ignore_label is not None:\n",
1109
    "        mask_ignore = 1.0 - torch.eq(image, ignore_label).float()\n",
1110
    "        mask = tf.multiply(mask_ignore, mask)\n",
1111
    "\n",
1112
    "    dilated_mask = nn.MaxPool2d(kernel_size=dilation_kernel, stride=1, padding=1)(mask)\n",
1113
    "\n",
1114
    "    return dilated_mask"
1115
   ]
1116
  },
1117
  {
1118
   "cell_type": "code",
1119
   "execution_count": 17,
1120
   "metadata": {},
1121
   "outputs": [],
1122
   "source": [
1123
    "\"\"\"\n",
1124
    "Returns adjacency matrix for graph loss computation.\n",
1125
    "The adjacency matrix contains how many pixels overlap after dilation for a pair of parts.\n",
1126
    "\"\"\"\n",
1127
    "def compute_adj_mat(image, adj_mat, num_classes, present_classes, ignore_label, dilation_kernel, weighted):\n",
1128
    "\n",
1129
    "    num_present_classes = present_classes.shape[0]\n",
1130
    "    i = 1\n",
1131
    "\n",
1132
    "    while (i < num_present_classes):\n",
1133
    "        j = i + 1\n",
1134
    "\n",
1135
    "        first_dilated_mask = compute_dilated_mask(image, present_classes[i], dilation_kernel)\n",
1136
    "\n",
1137
    "        while (j < num_present_classes):\n",
1138
    "            second_dilated_mask = compute_dilated_mask(image, present_classes[j], dilation_kernel)\n",
1139
    "\n",
1140
    "            intersection = torch.mul(first_dilated_mask, second_dilated_mask)\n",
1141
    "\n",
1142
    "            adjacent_pixels = torch.sum(intersection).type(torch.int)\n",
1143
    "\n",
1144
    "            # WeightedAdjMat - The class1-class2 value contains the number of adjacent pixels if the 2 classes\n",
1145
    "            # are adjacent,  0 otherwise\n",
1146
    "            if weighted:\n",
1147
    "                indices = torch.Tensor([[present_classes[i]], [present_classes[j]], [0]])\n",
1148
    "                values = torch.reshape(adjacent_pixels, [1]).cpu()\n",
1149
    "                shape = [num_classes, num_classes, 1]\n",
1150
    "                delta = torch.sparse_coo_tensor(indices, values, shape)\n",
1151
    "                adj_mat = adj_mat + delta.to_dense()\n",
1152
    "\n",
1153
    "            # SimpleAdjMat - The class1-class2 value contains 1 if the 2 classes are adjacent, 0 otherwise\n",
1154
    "            else:\n",
1155
    "                value = adjacent_pixels > 0\n",
1156
    "                value = value.float()\n",
1157
    "                indices = torch.Tensor([[present_classes[i], present_classes[j], 0]])\n",
1158
    "                values = torch.reshape(value, [1])\n",
1159
    "                shape = [num_classes, num_classes, 1]\n",
1160
    "                delta = torch.sparse_coo_tensor(indices, values, shape)\n",
1161
    "                adj_mat = adj_mat + delta.to_dense()\n",
1162
    "\n",
1163
    "            j = j + 1\n",
1164
    "\n",
1165
    "        \n",
1166
    "        i = i + 1\n",
1167
    "\n",
1168
    "    return adj_mat"
1169
   ]
1170
  },
1171
  {
1172
   "cell_type": "code",
1173
   "execution_count": 18,
1174
   "metadata": {},
1175
   "outputs": [],
1176
   "source": [
1177
    "\"\"\"\n",
1178
    "Computes and retunrs the graph loss. MSE Loss between the graph from the prediction vs the ground truth.\n",
1179
    "\"\"\"\n",
1180
    "def adjacent_graph_loss(pred, gt, num_classes, weighted=True,\n",
1181
    "                        ignore_label=None, lambda_loss=0.1,\n",
1182
    "                        dilation_kernel=3):\n",
1183
    "    pred = F.interpolate(pred, size=gt.shape[1:], mode='bilinear', align_corners=False)\n",
1184
    "    pred = torch.argmax(pred, dim=1)\n",
1185
    "    \n",
1186
    "    concat = torch.cat([torch.reshape(pred, [-1]), torch.reshape(gt, [-1])], 0)\n",
1187
    "    unique = torch.unique(concat, sorted=True)\n",
1188
    "    \n",
1189
    "    logits_adj_mat = torch.zeros([num_classes, num_classes, 1], dtype=torch.int32)\n",
1190
    "    labels_adj_mat = torch.zeros([num_classes, num_classes, 1], dtype=torch.int32)\n",
1191
    "    \n",
1192
    "    logits_adj_mat = compute_adj_mat(image=pred,\n",
1193
    "                                     adj_mat=logits_adj_mat,\n",
1194
    "                                     num_classes=num_classes,\n",
1195
    "                                     present_classes=unique,\n",
1196
    "                                     ignore_label=ignore_label,\n",
1197
    "                                     dilation_kernel=dilation_kernel,\n",
1198
    "                                     weighted=weighted)\n",
1199
    "\n",
1200
    "    labels_adj_mat = compute_adj_mat(image=gt,\n",
1201
    "                                     adj_mat=labels_adj_mat,\n",
1202
    "                                     num_classes=num_classes,\n",
1203
    "                                     present_classes=unique,\n",
1204
    "                                     ignore_label=ignore_label,\n",
1205
    "                                     dilation_kernel=dilation_kernel,\n",
1206
    "                                     weighted=weighted)\n",
1207
    "    \n",
1208
    "    logits_adj_mat = logits_adj_mat.type(torch.DoubleTensor)\n",
1209
    "    labels_adj_mat = labels_adj_mat.type(torch.DoubleTensor)\n",
1210
    "    if weighted:\n",
1211
    "        logits_adj_mat = F.normalize(logits_adj_mat, dim=0)\n",
1212
    "        labels_adj_mat = F.normalize(labels_adj_mat, dim=0)\n",
1213
    "        \n",
1214
    "    loss = nn.MSELoss()(logits_adj_mat, labels_adj_mat)\n",
1215
    "    return loss * lambda_loss"
1216
   ]
1217
  },
1218
  {
1219
   "cell_type": "code",
1220
   "execution_count": 19,
1221
   "metadata": {},
1222
   "outputs": [],
1223
   "source": [
1224
    "\"\"\"\n",
1225
    "Loss forcing all parts to belong to an object.\n",
1226
    "\"\"\"\n",
1227
    "def objmask_loss(pred, macro_gt, num_classes, weighted=True,\n",
1228
    "                 ignore_label=None, lambda_loss=0.001,\n",
1229
    "                 dilation_kernel=3, label_weights=None):\n",
1230
    "    pred = F.interpolate(pred, size=macro_gt.shape[1:], mode='bilinear', align_corners=False)\n",
1231
    "\n",
1232
    "    macro_class_logits = torch.split(pred, [1, num_classes-1], dim=1)\n",
1233
    "    macro_logits_sum = []\n",
1234
    "    for i in range(len(macro_class_logits)):\n",
1235
    "            macro_logits_sum.append(torch.sum(macro_class_logits[i], axis=1))\n",
1236
    "    \n",
1237
    "    macro_pred = torch.stack(macro_logits_sum, axis=1)\n",
1238
    "    loss = nn.CrossEntropyLoss(weight=label_weights)(macro_pred, macro_gt)\n",
1239
    "    return loss * lambda_loss"
1240
   ]
1241
  },
1242
  {
1243
   "cell_type": "code",
1244
   "execution_count": 20,
1245
   "metadata": {},
1246
   "outputs": [],
1247
   "source": [
1248
    "\"\"\"\n",
1249
    "Standard crossentropy loss between prediction and ground truth.\n",
1250
    "\"\"\"\n",
1251
    "def crossentropy_loss(pred, gt, lambda_loss=1.0, label_weights=None):\n",
1252
    "    pred = F.interpolate(pred, size=gt.shape[1:], mode='bilinear', align_corners=False)\n",
1253
    "\n",
1254
    "    loss = nn.CrossEntropyLoss(weight=label_weights)(pred, gt)\n",
1255
    "    return loss * lambda_loss"
1256
   ]
1257
  },
1258
  {
1259
   "cell_type": "code",
1260
   "execution_count": 21,
1261
   "metadata": {},
1262
   "outputs": [],
1263
   "source": [
1264
    "\"\"\"\n",
1265
    "Mapping from object category label and factored part label to original dataset label category.\n",
1266
    "\"\"\"\n",
1267
    "def aggregate_parts_to_classes(num_classes=201, animate=True):\n",
1268
    "\n",
1269
    "    # Animate parts : Head(1), Torso(2), Leg(3), Tail(4), Wing(5), Lower Arm(6), Neck(7),\n",
1270
    "    #                 Eye(8), Ear(9), Nose(10), Muzzle(11), Horn(12), Mouth(13),\n",
1271
    "    #                 Hair(14), Foot(15), Hand(16), Paw(17), Hoof(18), Beak(19), Eyebrow(20),\n",
1272
    "    #                 Lower Leg (21), Upper Leg (22), Upper Arm (23)\n",
1273
    "\n",
1274
    "    map_pc = {}\n",
1275
    "    for i in range(num_classes):\n",
1276
    "        map_pc[i] = 0\n",
1277
    "\n",
1278
    "    if animate:\n",
1279
    "        map_pc[15] = 19  # Bird\n",
1280
    "        map_pc[16] = 1\n",
1281
    "        map_pc[17] = 8\n",
1282
    "        map_pc[18] = 15\n",
1283
    "        map_pc[19] = 3\n",
1284
    "        map_pc[20] = 5\n",
1285
    "        map_pc[21] = 7\n",
1286
    "        map_pc[22] = 8\n",
1287
    "        map_pc[23] = 15\n",
1288
    "        map_pc[24] = 3\n",
1289
    "        map_pc[25] = 5\n",
1290
    "        map_pc[26] = 4\n",
1291
    "        map_pc[27] = 2\n",
1292
    "\n",
1293
    "        map_pc[57] = 1  # Cat\n",
1294
    "        map_pc[58] = 3\n",
1295
    "        map_pc[59] = 17\n",
1296
    "        map_pc[60] = 9\n",
1297
    "        map_pc[61] = 8\n",
1298
    "        map_pc[62] = 3\n",
1299
    "        map_pc[63] = 17\n",
1300
    "        map_pc[64] = 7\n",
1301
    "        map_pc[65] = 10\n",
1302
    "        map_pc[66] = 3\n",
1303
    "        map_pc[67] = 17\n",
1304
    "        map_pc[68] = 9\n",
1305
    "        map_pc[69] = 8\n",
1306
    "        map_pc[70] = 3\n",
1307
    "        map_pc[71] = 17\n",
1308
    "        map_pc[72] = 4\n",
1309
    "        map_pc[73] = 2\n",
1310
    "\n",
1311
    "        map_pc[75] = 1  # Cow\n",
1312
    "        map_pc[76] = 21\n",
1313
    "        map_pc[77] = 22\n",
1314
    "        map_pc[78] = 9\n",
1315
    "        map_pc[79] = 8\n",
1316
    "        map_pc[80] = 21\n",
1317
    "        map_pc[81] = 22\n",
1318
    "        map_pc[82] = 12\n",
1319
    "        map_pc[83] = 11\n",
1320
    "        map_pc[84] = 7\n",
1321
    "        map_pc[85] = 21\n",
1322
    "        map_pc[86] = 22\n",
1323
    "        map_pc[87] = 9\n",
1324
    "        map_pc[88] = 8\n",
1325
    "        map_pc[89] = 21\n",
1326
    "        map_pc[90] = 22\n",
1327
    "        map_pc[91] = 12\n",
1328
    "        map_pc[92] = 4\n",
1329
    "        map_pc[93] = 2\n",
1330
    "\n",
1331
    "        map_pc[95] = 1  # Dog\n",
1332
    "        map_pc[96] = 3\n",
1333
    "        map_pc[97] = 17\n",
1334
    "        map_pc[98] = 9\n",
1335
    "        map_pc[99] = 8\n",
1336
    "        map_pc[100] = 3\n",
1337
    "        map_pc[101] = 17\n",
1338
    "        map_pc[102] = 11\n",
1339
    "        map_pc[103] = 7\n",
1340
    "        map_pc[104] = 10\n",
1341
    "        map_pc[105] = 3\n",
1342
    "        map_pc[106] = 17\n",
1343
    "        map_pc[107] = 9\n",
1344
    "        map_pc[108] = 8\n",
1345
    "        map_pc[109] = 3\n",
1346
    "        map_pc[110] = 17\n",
1347
    "        map_pc[111] = 4\n",
1348
    "        map_pc[112] = 2\n",
1349
    "\n",
1350
    "        map_pc[113] = 1  # Horse\n",
1351
    "        map_pc[114] = 18\n",
1352
    "        map_pc[115] = 21\n",
1353
    "        map_pc[116] = 22\n",
1354
    "        map_pc[117] = 9\n",
1355
    "        map_pc[118] = 8\n",
1356
    "        map_pc[119] = 18\n",
1357
    "        map_pc[120] = 21\n",
1358
    "        map_pc[121] = 22\n",
1359
    "        map_pc[122] = 11\n",
1360
    "        map_pc[123] = 7\n",
1361
    "        map_pc[124] = 18\n",
1362
    "        map_pc[125] = 21\n",
1363
    "        map_pc[126] = 22\n",
1364
    "        map_pc[127] = 9\n",
1365
    "        map_pc[128] = 8\n",
1366
    "        map_pc[129] = 18\n",
1367
    "        map_pc[130] = 21\n",
1368
    "        map_pc[131] = 22\n",
1369
    "        map_pc[132] = 4\n",
1370
    "        map_pc[133] = 2\n",
1371
    "\n",
1372
    "        map_pc[140] = 14  # Person\n",
1373
    "        map_pc[141] = 1\n",
1374
    "        map_pc[142] = 9\n",
1375
    "        map_pc[143] = 8\n",
1376
    "        map_pc[144] = 20\n",
1377
    "        map_pc[145] = 15\n",
1378
    "        map_pc[146] = 16\n",
1379
    "        map_pc[147] = 6\n",
1380
    "        map_pc[148] = 21\n",
1381
    "        map_pc[149] = 23\n",
1382
    "        map_pc[150] = 22\n",
1383
    "        map_pc[151] = 13\n",
1384
    "        map_pc[152] = 7\n",
1385
    "        map_pc[153] = 10\n",
1386
    "        map_pc[154] = 9\n",
1387
    "        map_pc[155] = 8\n",
1388
    "        map_pc[156] = 20\n",
1389
    "        map_pc[157] = 15\n",
1390
    "        map_pc[158] = 16\n",
1391
    "        map_pc[159] = 6\n",
1392
    "        map_pc[160] = 21\n",
1393
    "        map_pc[161] = 23\n",
1394
    "        map_pc[162] = 22\n",
1395
    "        map_pc[163] = 2\n",
1396
    "\n",
1397
    "        map_pc[166] = 1  # Sheep\n",
1398
    "        map_pc[167] = 21\n",
1399
    "        map_pc[168] = 22\n",
1400
    "        map_pc[169] = 9\n",
1401
    "        map_pc[170] = 8\n",
1402
    "        map_pc[171] = 21\n",
1403
    "        map_pc[172] = 22\n",
1404
    "        map_pc[173] = 12\n",
1405
    "        map_pc[174] = 11\n",
1406
    "        map_pc[175] = 7\n",
1407
    "        map_pc[176] = 21\n",
1408
    "        map_pc[177] = 22\n",
1409
    "        map_pc[178] = 9\n",
1410
    "        map_pc[179] = 8\n",
1411
    "        map_pc[180] = 21\n",
1412
    "        map_pc[181] = 22\n",
1413
    "        map_pc[182] = 12\n",
1414
    "        map_pc[183] = 4\n",
1415
    "        map_pc[184] = 2\n",
1416
    "        \n",
1417
    "    # Inanimate parts : Body (1), Wheel (2), Wing (3), Stern (4), Engine(5), Light (6),\n",
1418
    "    #                   Plate (7), Screen (8), Pot (9), Plant (10), Window (11), Bottle Body (12),\n",
1419
    "    #                   Bottle Cap (13), Saddle (14), Handlebar(15), Chainwheel (16), Side (17),\n",
1420
    "    #                   Roof (18), Mirror (19), Door (20), Head (21), Head Side (22), Head Roof (23),\n",
1421
    "    #                   Coach(24), Coach Side (25), Coach Roof (26), TV Frame (27), Tail (28)\n",
1422
    "        \n",
1423
    "    else:\n",
1424
    "        map_pc[1] = 1  # Aeroplane\n",
1425
    "        map_pc[2] = 5\n",
1426
    "        map_pc[3] = 3\n",
1427
    "        map_pc[4] = 3\n",
1428
    "        map_pc[5] = 4\n",
1429
    "        map_pc[6] = 28\n",
1430
    "        map_pc[7] = 2\n",
1431
    "\n",
1432
    "        map_pc[8] = 2 # Bicycle\n",
1433
    "        map_pc[9] = 16\n",
1434
    "        map_pc[10] = 1\n",
1435
    "        map_pc[11] = 2\n",
1436
    "        map_pc[12] = 15\n",
1437
    "        map_pc[13] = 6\n",
1438
    "        map_pc[14] = 14\n",
1439
    "\n",
1440
    "        map_pc[29] = 12 # Bottle\n",
1441
    "        map_pc[30] = 13\n",
1442
    "\n",
1443
    "        map_pc[31] = 7 # Bus\n",
1444
    "        map_pc[32] = 17\n",
1445
    "        map_pc[33] = 20\n",
1446
    "        map_pc[34] = 7\n",
1447
    "        map_pc[35] = 17\n",
1448
    "        map_pc[36] = 6\n",
1449
    "        map_pc[37] = 19\n",
1450
    "        map_pc[38] = 17\n",
1451
    "        map_pc[39] = 19\n",
1452
    "        map_pc[40] = 17\n",
1453
    "        map_pc[41] = 18\n",
1454
    "        map_pc[42] = 2\n",
1455
    "        map_pc[43] = 11\n",
1456
    "\n",
1457
    "        map_pc[44] = 7 # Car\n",
1458
    "        map_pc[45] = 17\n",
1459
    "        map_pc[46] = 20\n",
1460
    "        map_pc[47] = 7\n",
1461
    "        map_pc[48] = 17\n",
1462
    "        map_pc[49] = 6\n",
1463
    "        map_pc[50] = 19\n",
1464
    "        map_pc[51] = 17\n",
1465
    "        map_pc[52] = 19\n",
1466
    "        map_pc[53] = 17\n",
1467
    "        map_pc[54] = 18\n",
1468
    "        map_pc[55] = 2\n",
1469
    "        map_pc[56] = 11\n",
1470
    "\n",
1471
    "        map_pc[134] = 2 # Motorbike\n",
1472
    "        map_pc[135] = 1\n",
1473
    "        map_pc[136] = 2\n",
1474
    "        map_pc[137] = 15\n",
1475
    "        map_pc[138] = 6\n",
1476
    "        map_pc[139] = 14\n",
1477
    "\n",
1478
    "        map_pc[164] = 10 # Potted plant\n",
1479
    "        map_pc[165] = 9\n",
1480
    "        \n",
1481
    "        map_pc[186] = 25 # Train\n",
1482
    "        map_pc[187] = 25\n",
1483
    "        map_pc[188] = 25\n",
1484
    "        map_pc[189] = 25\n",
1485
    "        map_pc[190] = 26\n",
1486
    "        map_pc[191] = 24\n",
1487
    "        map_pc[192] = 21\n",
1488
    "        map_pc[193] = 22\n",
1489
    "        map_pc[194] = 22\n",
1490
    "        map_pc[195] = 22\n",
1491
    "        map_pc[196] = 22\n",
1492
    "        map_pc[197] = 23\n",
1493
    "        map_pc[198] = 6\n",
1494
    "\n",
1495
    "        map_pc[199] = 27 # Tv monitor\n",
1496
    "        map_pc[200] = 8\n",
1497
    "\n",
1498
    "    return map_pc"
1499
   ]
1500
  },
1501
  {
1502
   "cell_type": "code",
1503
   "execution_count": 22,
1504
   "metadata": {},
1505
   "outputs": [],
1506
   "source": [
1507
    "\"\"\"\n",
1508
    "Basic image and label transforms for the dataset.\n",
1509
    "\"\"\"\n",
1510
    "from PIL import Image, ImageOps, ImageFilter\n",
1511
    "from torchvision import transforms\n",
1512
    "\n",
1513
    "class Normalize(object):\n",
1514
    "    \"\"\"Normalize a tensor image with mean and standard deviation.\n",
1515
    "    Args:\n",
1516
    "        mean (tuple): means for each channel.\n",
1517
    "        std (tuple): standard deviations for each channel.\n",
1518
    "    \"\"\"\n",
1519
    "    def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)):\n",
1520
    "        self.mean = mean\n",
1521
    "        self.std = std\n",
1522
    "\n",
1523
    "    def __call__(self, sample):\n",
1524
    "        img = sample['image']\n",
1525
    "        anim_obj = sample['anim_obj']\n",
1526
    "        inanim_obj = sample['inanim_obj']\n",
1527
    "        anim = sample['anim']\n",
1528
    "        inanim = sample['inanim']\n",
1529
    "        adj1 = sample['adj1']\n",
1530
    "        adj2 = sample['adj2']\n",
1531
    "        adj1_o = sample['adj1_o']\n",
1532
    "        adj2_o = sample['adj2_o']\n",
1533
    "\n",
1534
    "        img = np.array(img).astype(np.float32)\n",
1535
    "        anim_obj = np.array(anim_obj).astype(np.float32)\n",
1536
    "        inanim_obj = np.array(inanim_obj).astype(np.float32)\n",
1537
    "        anim = np.array(anim).astype(np.float32)\n",
1538
    "        inanim = np.array(inanim).astype(np.float32)\n",
1539
    "        adj1 = np.array(adj1).astype(np.float32)\n",
1540
    "        adj2 = np.array(adj2).astype(np.float32)\n",
1541
    "        adj1_o = np.array(adj1_o).astype(np.float32)\n",
1542
    "        adj2_o = np.array(adj2_o).astype(np.float32)\n",
1543
    "        img /= 255.0\n",
1544
    "        img -= self.mean\n",
1545
    "        img /= self.std\n",
1546
    "\n",
1547
    "        return {'image': img,\n",
1548
    "                'anim_obj': anim_obj,\n",
1549
    "                'inanim_obj': inanim_obj,\n",
1550
    "                'anim': anim,\n",
1551
    "                'inanim': inanim,\n",
1552
    "                'adj1':adj1,\n",
1553
    "                'adj2':adj2,\n",
1554
    "                'adj1_o':adj1_o,\n",
1555
    "                'adj2_o':adj2_o}\n",
1556
    "\n",
1557
    "\n",
1558
    "class ToTensor(object):\n",
1559
    "    \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n",
1560
    "\n",
1561
    "    def __call__(self, sample):\n",
1562
    "        # swap color axis because\n",
1563
    "        # numpy image: H x W x C\n",
1564
    "        # torch image: C X H X W\n",
1565
    "        img = sample['image']\n",
1566
    "        anim_obj = sample['anim_obj']\n",
1567
    "        inanim_obj = sample['inanim_obj']\n",
1568
    "        anim = sample['anim']\n",
1569
    "        inanim = sample['inanim']\n",
1570
    "        adj1 = sample['adj1']\n",
1571
    "        adj2 = sample['adj2']\n",
1572
    "        adj1_o = sample['adj1_o']\n",
1573
    "        adj2_o = sample['adj2_o']\n",
1574
    "\n",
1575
    "        img = np.array(img).astype(np.float32).transpose((2, 0, 1))\n",
1576
    "        anim_obj = np.array(anim_obj).astype(np.float32)\n",
1577
    "        inanim_obj = np.array(inanim_obj).astype(np.float32)\n",
1578
    "        anim = np.array(anim).astype(np.float32)\n",
1579
    "        inanim = np.array(inanim).astype(np.float32)\n",
1580
    "        adj1 = np.array(adj1).astype(np.float32)\n",
1581
    "        adj2 = np.array(adj2).astype(np.float32)\n",
1582
    "        adj1_o = np.array(adj1_o).astype(np.float32)\n",
1583
    "        adj2_o = np.array(adj2_o).astype(np.float32)\n",
1584
    "\n",
1585
    "        img = torch.from_numpy(img).float()\n",
1586
    "        anim_obj = torch.from_numpy(anim_obj).float()\n",
1587
    "        inanim_obj = torch.from_numpy(inanim_obj).float()\n",
1588
    "        anim = torch.from_numpy(anim).float()\n",
1589
    "        inanim = torch.from_numpy(inanim).float()\n",
1590
    "        adj1 = torch.from_numpy(adj1).float()\n",
1591
    "        adj2 = torch.from_numpy(adj2).float()\n",
1592
    "        adj1_o = torch.from_numpy(adj1_o).float()\n",
1593
    "        adj2_o = torch.from_numpy(adj2_o).float()\n",
1594
    "\n",
1595
    "        return {'image': img,\n",
1596
    "                'anim_obj': anim_obj,\n",
1597
    "                'inanim_obj': inanim_obj,\n",
1598
    "                'anim': anim,\n",
1599
    "                'inanim': inanim,\n",
1600
    "                'adj1':adj1,\n",
1601
    "                'adj2':adj2,\n",
1602
    "                'adj1_o':adj1_o,\n",
1603
    "                'adj2_o':adj2_o}\n",
1604
    "\n",
1605
    "\n",
1606
    "class RandomHorizontalFlip(object):\n",
1607
    "    def __call__(self, sample):\n",
1608
    "        img = sample['image']\n",
1609
    "        anim_obj = sample['anim_obj']\n",
1610
    "        inanim_obj = sample['inanim_obj']\n",
1611
    "        anim = sample['anim']\n",
1612
    "        inanim = sample['inanim']\n",
1613
    "        adj1 = sample['adj1']\n",
1614
    "        adj2 = sample['adj2']\n",
1615
    "        adj1_o = sample['adj1_o']\n",
1616
    "        adj2_o = sample['adj2_o']\n",
1617
    "\n",
1618
    "        if random.random() < 0.5:\n",
1619
    "            img = img.transpose(Image.FLIP_LEFT_RIGHT)\n",
1620
    "            anim_obj = anim_obj.transpose(Image.FLIP_LEFT_RIGHT)\n",
1621
    "            inanim_obj = inanim_obj.transpose(Image.FLIP_LEFT_RIGHT)\n",
1622
    "            anim = anim.transpose(Image.FLIP_LEFT_RIGHT)\n",
1623
    "            inanim = inanim.transpose(Image.FLIP_LEFT_RIGHT)\n",
1624
    "            adj2 = adj2.transpose(Image.FLIP_LEFT_RIGHT)\n",
1625
    "            adj2_o = adj2_o.transpose(Image.FLIP_LEFT_RIGHT)\n",
1626
    "\n",
1627
    "            adj1 = adj1.transpose(Image.FLIP_LEFT_RIGHT)\n",
1628
    "            adj1 = np.array(adj1).astype(np.float32)\n",
1629
    "            adj1_flip = np.copy(adj1)\n",
1630
    "            adj1_flip = adj1_flip + ((adj1 == 1) * 1) # 1 => 2\n",
1631
    "            adj1_flip = adj1_flip - ((adj1 == 2) * 1) # 2 => 1\n",
1632
    "            adj1 = Image.fromarray(adj1_flip)\n",
1633
    "\n",
1634
    "            adj1_o = adj1_o.transpose(Image.FLIP_LEFT_RIGHT)\n",
1635
    "            adj1_o = np.array(adj1_o).astype(np.float32)\n",
1636
    "            adj1_o_flip = np.copy(adj1_o)\n",
1637
    "            adj1_o_flip = adj1_o_flip + ((adj1_o == 1) * 1) # 1 => 2\n",
1638
    "            adj1_o_flip = adj1_o_flip - ((adj1_o == 2) * 1) # 2 => 1\n",
1639
    "            adj1_o = Image.fromarray(adj1_o_flip)\n",
1640
    "\n",
1641
    "        return {'image': img,\n",
1642
    "                'anim_obj': anim_obj,\n",
1643
    "                'inanim_obj': inanim_obj,\n",
1644
    "                'anim': anim,\n",
1645
    "                'inanim': inanim,\n",
1646
    "                'adj1':adj1,\n",
1647
    "                'adj2':adj2,\n",
1648
    "                'adj1_o':adj1_o,\n",
1649
    "                'adj2_o':adj2_o}\n",
1650
    "\n",
1651
    "\n",
1652
    "class RandomRotate(object):\n",
1653
    "    def __init__(self, degree):\n",
1654
    "        self.degree = degree\n",
1655
    "\n",
1656
    "    def __call__(self, sample):\n",
1657
    "        img = sample['image']\n",
1658
    "        anim_obj = sample['anim_obj']\n",
1659
    "        inanim_obj = sample['inanim_obj']\n",
1660
    "        anim = sample['anim']\n",
1661
    "        inanim = sample['inanim']\n",
1662
    "        adj1 = sample['adj1']\n",
1663
    "        adj2 = sample['adj2']\n",
1664
    "        adj1_o = sample['adj1_o']\n",
1665
    "        adj2_o = sample['adj2_o']\n",
1666
    "\n",
1667
    "        rotate_degree = random.uniform(-1*self.degree, self.degree)\n",
1668
    "        img = img.rotate(rotate_degree, Image.BILINEAR)\n",
1669
    "        anim_obj = anim_obj.rotate(rotate_degree, Image.NEAREST)\n",
1670
    "        inanim_obj = inanim_obj.rotate(rotate_degree, Image.NEAREST)\n",
1671
    "        anim = anim.rotate(rotate_degree, Image.NEAREST)\n",
1672
    "        inanim = inanim.rotate(rotate_degree, Image.NEAREST)\n",
1673
    "        adj1 = adj1.rotate(rotate_degree, Image.NEAREST)\n",
1674
    "        adj2 = adj2.rotate(rotate_degree, Image.NEAREST)\n",
1675
    "        adj1_o = adj1_o.rotate(rotate_degree, Image.NEAREST)\n",
1676
    "        adj2_o = adj2_o.rotate(rotate_degree, Image.NEAREST)\n",
1677
    "\n",
1678
    "        return {'image': img,\n",
1679
    "                'anim_obj': anim_obj,\n",
1680
    "                'inanim_obj': inanim_obj,\n",
1681
    "                'anim': anim,\n",
1682
    "                'inanim': inanim,\n",
1683
    "                'adj1':adj1,\n",
1684
    "                'adj2':adj2,\n",
1685
    "                'adj1_o':adj1_o,\n",
1686
    "                'adj2_o':adj2_o}\n",
1687
    "\n",
1688
    "\n",
1689
    "class RandomGaussianBlur(object):\n",
1690
    "    def __call__(self, sample):\n",
1691
    "        img = sample['image']\n",
1692
    "        anim_obj = sample['anim_obj']\n",
1693
    "        inanim_obj = sample['inanim_obj']\n",
1694
    "        anim = sample['anim']\n",
1695
    "        inanim = sample['inanim']\n",
1696
    "        adj1 = sample['adj1']\n",
1697
    "        adj2 = sample['adj2']\n",
1698
    "        adj1_o = sample['adj1_o']\n",
1699
    "        adj2_o = sample['adj2_o']\n",
1700
    "\n",
1701
    "        if random.random() < 0.5:\n",
1702
    "            img = img.filter(ImageFilter.GaussianBlur(\n",
1703
    "                radius=random.random()))\n",
1704
    "\n",
1705
    "        return {'image': img,\n",
1706
    "                'anim_obj': anim_obj,\n",
1707
    "                'inanim_obj': inanim_obj,\n",
1708
    "                'anim': anim,\n",
1709
    "                'inanim': inanim,\n",
1710
    "                'adj1':adj1,\n",
1711
    "                'adj2':adj2,\n",
1712
    "                'adj1_o':adj1_o,\n",
1713
    "                'adj2_o':adj2_o}\n",
1714
    "\n",
1715
    "\n",
1716
    "class RandomScaleCrop(object):\n",
1717
    "    def __init__(self, base_size, crop_size, fill=0):\n",
1718
    "        self.base_size = base_size\n",
1719
    "        self.crop_size = crop_size\n",
1720
    "        self.fill = fill\n",
1721
    "\n",
1722
    "    def __call__(self, sample):\n",
1723
    "        img = sample['image']\n",
1724
    "        anim_obj = sample['anim_obj']\n",
1725
    "        inanim_obj = sample['inanim_obj']\n",
1726
    "        anim = sample['anim']\n",
1727
    "        inanim = sample['inanim']\n",
1728
    "        adj1 = sample['adj1']\n",
1729
    "        adj2 = sample['adj2']\n",
1730
    "        adj1_o = sample['adj1_o']\n",
1731
    "        adj2_o = sample['adj2_o']\n",
1732
    "\n",
1733
    "        # random scale (short edge)\n",
1734
    "        short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))\n",
1735
    "        w, h = img.size\n",
1736
    "        if h > w:\n",
1737
    "            ow = short_size\n",
1738
    "            oh = int(1.0 * h * ow / w)\n",
1739
    "        else:\n",
1740
    "            oh = short_size\n",
1741
    "            ow = int(1.0 * w * oh / h)\n",
1742
    "        img = img.resize((ow, oh), Image.BILINEAR)\n",
1743
    "        anim_obj = anim_obj.resize((ow, oh), Image.NEAREST)\n",
1744
    "        inanim_obj = inanim_obj.resize((ow, oh), Image.NEAREST)\n",
1745
    "        anim = anim.resize((ow, oh), Image.NEAREST)\n",
1746
    "        inanim = inanim.resize((ow, oh), Image.NEAREST)\n",
1747
    "        adj1 = adj1.resize((ow, oh), Image.NEAREST)\n",
1748
    "        adj2 = adj2.resize((ow, oh), Image.NEAREST)\n",
1749
    "        adj1_o = adj1_o.resize((ow, oh), Image.NEAREST)\n",
1750
    "        adj2_o = adj2_o.resize((ow, oh), Image.NEAREST)\n",
1751
    "\n",
1752
    "        # pad crop\n",
1753
    "        if short_size < self.crop_size:\n",
1754
    "            padh = self.crop_size - oh if oh < self.crop_size else 0\n",
1755
    "            padw = self.crop_size - ow if ow < self.crop_size else 0\n",
1756
    "            img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)\n",
1757
    "            anim_obj = ImageOps.expand(anim_obj, border=(0, 0, padw, padh), fill=self.fill)\n",
1758
    "            inanim_obj = ImageOps.expand(inanim_obj, border=(0, 0, padw, padh), fill=self.fill)\n",
1759
    "            anim = ImageOps.expand(anim, border=(0, 0, padw, padh), fill=self.fill)\n",
1760
    "            inanim = ImageOps.expand(inanim, border=(0, 0, padw, padh), fill=self.fill)\n",
1761
    "            adj1 = ImageOps.expand(adj1, border=(0, 0, padw, padh), fill=self.fill)\n",
1762
    "            adj2 = ImageOps.expand(adj2, border=(0, 0, padw, padh), fill=self.fill)\n",
1763
    "            adj1_o = ImageOps.expand(adj1_o, border=(0, 0, padw, padh), fill=self.fill)\n",
1764
    "            adj2_o = ImageOps.expand(adj2_o, border=(0, 0, padw, padh), fill=self.fill)\n",
1765
    "\n",
1766
    "        # random crop crop_size\n",
1767
    "        w, h = img.size\n",
1768
    "        x1 = random.randint(0, w - self.crop_size)\n",
1769
    "        y1 = random.randint(0, h - self.crop_size)\n",
1770
    "        img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n",
1771
    "        anim_obj = anim_obj.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n",
1772
    "        inanim_obj = inanim_obj.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n",
1773
    "        anim = anim.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n",
1774
    "        inanim = inanim.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n",
1775
    "        adj1 = adj1.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n",
1776
    "        adj2 = adj2.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n",
1777
    "        adj1_o = adj1_o.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n",
1778
    "        adj2_o = adj2_o.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n",
1779
    "\n",
1780
    "        return {'image': img,\n",
1781
    "                'anim_obj': anim_obj,\n",
1782
    "                'inanim_obj': inanim_obj,\n",
1783
    "                'anim': anim,\n",
1784
    "                'inanim': inanim,\n",
1785
    "                'adj1':adj1,\n",
1786
    "                'adj2':adj2,\n",
1787
    "                'adj1_o':adj1_o,\n",
1788
    "                'adj2_o':adj2_o}\n",
1789
    "\n",
1790
    "\n",
1791
    "class FixScaleCrop(object):\n",
1792
    "    def __init__(self, crop_size):\n",
1793
    "        self.crop_size = crop_size\n",
1794
    "\n",
1795
    "    def __call__(self, sample):\n",
1796
    "        img = sample['image']\n",
1797
    "        anim_obj = sample['anim_obj']\n",
1798
    "        inanim_obj = sample['inanim_obj']\n",
1799
    "        anim = sample['anim']\n",
1800
    "        inanim = sample['inanim']\n",
1801
    "        adj1 = sample['adj1']\n",
1802
    "        adj2 = sample['adj2']\n",
1803
    "        adj1_o = sample['adj1_o']\n",
1804
    "        adj2_o = sample['adj2_o']\n",
1805
    "\n",
1806
    "        w, h = img.size\n",
1807
    "        if w > h:\n",
1808
    "            oh = self.crop_size\n",
1809
    "            ow = int(1.0 * w * oh / h)\n",
1810
    "        else:\n",
1811
    "            ow = self.crop_size\n",
1812
    "            oh = int(1.0 * h * ow / w)\n",
1813
    "        img = img.resize((ow, oh), Image.BILINEAR)\n",
1814
    "        anim_obj = anim_obj.resize((ow, oh), Image.NEAREST)\n",
1815
    "        inanim_obj = inanim_obj.resize((ow, oh), Image.NEAREST)\n",
1816
    "        anim = anim.resize((ow, oh), Image.NEAREST)\n",
1817
    "        inanim = inanim.resize((ow, oh), Image.NEAREST)\n",
1818
    "        adj1 = adj1.resize((ow, oh), Image.NEAREST)\n",
1819
    "        adj2 = adj2.resize((ow, oh), Image.NEAREST)\n",
1820
    "        adj1_o = adj1_o.resize((ow, oh), Image.NEAREST)\n",
1821
    "        adj2_o = adj2_o.resize((ow, oh), Image.NEAREST)\n",
1822
    "\n",
1823
    "        # center crop\n",
1824
    "        w, h = img.size\n",
1825
    "        x1 = int(round((w - self.crop_size) / 2.))\n",
1826
    "        y1 = int(round((h - self.crop_size) / 2.))\n",
1827
    "        img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n",
1828
    "        anim_obj = anim_obj.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n",
1829
    "        inanim_obj = inanim_obj.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n",
1830
    "        anim = anim.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n",
1831
    "        inanim = inanim.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n",
1832
    "        adj1 = adj1.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n",
1833
    "        adj2 = adj2.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n",
1834
    "        adj1_o = adj1_o.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n",
1835
    "        adj2_o = adj2_o.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n",
1836
    "\n",
1837
    "        return {'image': img,\n",
1838
    "                'anim_obj': anim_obj,\n",
1839
    "                'inanim_obj': inanim_obj,\n",
1840
    "                'anim': anim,\n",
1841
    "                'inanim': inanim,\n",
1842
    "                'adj1':adj1,\n",
1843
    "                'adj2':adj2,\n",
1844
    "                'adj1_o':adj1_o,\n",
1845
    "                'adj2_o':adj2_o}\n",
1846
    "\n",
1847
    "class FixedResize(object):\n",
1848
    "    def __init__(self, size):\n",
1849
    "        self.size = (size, size)  # size: (h, w)\n",
1850
    "\n",
1851
    "    def __call__(self, sample):\n",
1852
    "        img = sample['image']\n",
1853
    "        anim_obj = sample['anim_obj']\n",
1854
    "        inanim_obj = sample['inanim_obj']\n",
1855
    "        anim = sample['anim']\n",
1856
    "        inanim = sample['inanim']\n",
1857
    "        adj1 = sample['adj1']\n",
1858
    "        adj2 = sample['adj2']\n",
1859
    "        adj1_o = sample['adj1_o']\n",
1860
    "        adj2_o = sample['adj2_o']\n",
1861
    "\n",
1862
    "        # assert img.size == mask.size\n",
1863
    "\n",
1864
    "        img = img.resize(self.size, Image.BILINEAR)\n",
1865
    "        anim_obj = anim_obj.resize(self.size, Image.NEAREST)\n",
1866
    "        inanim_obj = inanim_obj.resize(self.size, Image.NEAREST)\n",
1867
    "        anim = anim.resize(self.size, Image.NEAREST)\n",
1868
    "        inanim = inanim.resize(self.size, Image.NEAREST)\n",
1869
    "        adj1 = adj1.resize(self.size, Image.NEAREST)\n",
1870
    "        adj2 = adj2.resize(self.size, Image.NEAREST)\n",
1871
    "        adj1_o = adj1_o.resize(self.size, Image.NEAREST)\n",
1872
    "        adj2_o = adj2_o.resize(self.size, Image.NEAREST)\n",
1873
    "\n",
1874
    "        return {'image': img,\n",
1875
    "                'anim_obj': anim_obj,\n",
1876
    "                'inanim_obj': inanim_obj,\n",
1877
    "                'anim': anim,\n",
1878
    "                'inanim': inanim,\n",
1879
    "                'adj1':adj1,\n",
1880
    "                'adj2':adj2,\n",
1881
    "                'adj1_o':adj1_o,\n",
1882
    "                'adj2_o':adj2_o}\n",
1883
    "    \n",
1884
    "class ResizeMasks(object):\n",
1885
    "    def __init__(self, crop_size):\n",
1886
    "        self.crop_size = crop_size\n",
1887
    "\n",
1888
    "    def __call__(self, sample):\n",
1889
    "        img = sample['image']\n",
1890
    "        anim_obj = sample['anim_obj']\n",
1891
    "        inanim_obj = sample['inanim_obj']\n",
1892
    "        anim = sample['anim']\n",
1893
    "        inanim = sample['inanim']\n",
1894
    "        adj1 = sample['adj1']\n",
1895
    "        adj2 = sample['adj2']\n",
1896
    "        adj1_o = sample['adj1_o']\n",
1897
    "        adj2_o = sample['adj2_o']\n",
1898
    "\n",
1899
    "        w, h = img.size\n",
1900
    "        short_size = 0\n",
1901
    "        if w > h:\n",
1902
    "            ow = self.crop_size\n",
1903
    "            oh = int(1.0 * h * ow / w)\n",
1904
    "            short_size = oh\n",
1905
    "        else:\n",
1906
    "            oh = self.crop_size\n",
1907
    "            ow = int(1.0 * w * oh / h)\n",
1908
    "            short_size = ow\n",
1909
    "\n",
1910
    "        img = img.resize((ow, oh), Image.BILINEAR)\n",
1911
    "        anim_obj = anim_obj.resize((ow, oh), Image.NEAREST)\n",
1912
    "        inanim_obj = inanim_obj.resize((ow, oh), Image.NEAREST)\n",
1913
    "        anim = anim.resize((ow, oh), Image.NEAREST)\n",
1914
    "        inanim = inanim.resize((ow, oh), Image.NEAREST)\n",
1915
    "        adj1 = adj1.resize((ow, oh), Image.NEAREST)\n",
1916
    "        adj2 = adj2.resize((ow, oh), Image.NEAREST)\n",
1917
    "        adj1_o = adj1_o.resize((ow, oh), Image.NEAREST)\n",
1918
    "        adj2_o = adj2_o.resize((ow, oh), Image.NEAREST)\n",
1919
    "\n",
1920
    "        if short_size < self.crop_size:\n",
1921
    "            padh = self.crop_size - oh if oh < self.crop_size else 0\n",
1922
    "            padw = self.crop_size - ow if ow < self.crop_size else 0\n",
1923
    "            img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)\n",
1924
    "            anim_obj = ImageOps.expand(anim_obj, border=(0, 0, padw, padh), fill=0)\n",
1925
    "            inanim_obj = ImageOps.expand(inanim_obj, border=(0, 0, padw, padh), fill=0)\n",
1926
    "            anim = ImageOps.expand(anim, border=(0, 0, padw, padh), fill=0)\n",
1927
    "            inanim = ImageOps.expand(inanim, border=(0, 0, padw, padh), fill=0)\n",
1928
    "            adj1 = ImageOps.expand(adj1, border=(0, 0, padw, padh), fill=0)\n",
1929
    "            adj2 = ImageOps.expand(adj2, border=(0, 0, padw, padh), fill=0)\n",
1930
    "            adj1_o = ImageOps.expand(adj1_o, border=(0, 0, padw, padh), fill=0)\n",
1931
    "            adj2_o = ImageOps.expand(adj2_o, border=(0, 0, padw, padh), fill=0)\n",
1932
    "\n",
1933
    "        return {'image': img,\n",
1934
    "                'anim_obj': anim_obj,\n",
1935
    "                'inanim_obj': inanim_obj,\n",
1936
    "                'anim': anim,\n",
1937
    "                'inanim': inanim,\n",
1938
    "                'adj1':adj1,\n",
1939
    "                'adj2':adj2,\n",
1940
    "                'adj1_o':adj1_o,\n",
1941
    "                'adj2_o':adj2_o}"
1942
   ]
1943
  },
1944
  {
1945
   "cell_type": "code",
1946
   "execution_count": 30,
1947
   "metadata": {},
1948
   "outputs": [],
1949
   "source": [
1950
    "\"\"\"\n",
1951
    "Dataset class : manages animate/inanimate separation for images and labels.\n",
1952
    "\"\"\"\n",
1953
    "class SegmentationDataset(Dataset):\n",
1954
    "    def __init__(self, folder, mode='train',\n",
1955
    "                 input_shape=(513, 513), num_classes=201):\n",
1956
    "\n",
1957
    "        with open(folder + mode + '.txt') as f:\n",
1958
    "            self.image_path_list = f.read().splitlines()\n",
1959
    "\n",
1960
    "        self.input_shape = input_shape\n",
1961
    "        self.mode = mode\n",
1962
    "        self.folder = folder\n",
1963
    "        self.num_classes = num_classes\n",
1964
    "        self.anim_aggregation_map = aggregate_parts_to_classes(num_classes, animate=True)\n",
1965
    "        self.inanim_aggregation_map = aggregate_parts_to_classes(num_classes, animate=False)\n",
1966
    "        self.anim_classes = [3, 8, 10, 12, 13, 15, 17]\n",
1967
    "        self.inanim_classes = [1, 2, 4, 5, 6, 7, 9, 11, 14, 16, 18, 19, 20]\n",
1968
    "\n",
1969
    "    def __len__(self):\n",
1970
    "        return len(self.image_path_list)\n",
1971
    "\n",
1972
    "    def __getitem__(self, i):\n",
1973
    "        \n",
1974
    "        image_path = self.folder + 'images/' + self.image_path_list[i] + '.png'\n",
1975
    "        part_label_path = self.folder + 'parts201/' + self.image_path_list[i] + '.png'\n",
1976
    "        obj_label_path = self.folder + 'objs21/' + self.image_path_list[i] + '.png'\n",
1977
    "\n",
1978
    "        sample = {}\n",
1979
    "        sample['image'] = Image.open(image_path)\n",
1980
    "        org_size = sample['image'].size\n",
1981
    "\n",
1982
    "        part_label = np.array(Image.open(part_label_path))\n",
1983
    "        anim_label = self.aggregate_anim_labels(part_label)\n",
1984
    "        sample['anim'] = Image.fromarray(anim_label)\n",
1985
    "        inanim_label = self.aggregate_inanim_labels(part_label)\n",
1986
    "        sample['inanim'] = Image.fromarray(inanim_label)\n",
1987
    "\n",
1988
    "        obj_label = np.array(Image.open(obj_label_path))\n",
1989
    "        anim_obj_label = self.anim_remove_objs(obj_label)\n",
1990
    "        sample['anim_obj'] = Image.fromarray(anim_obj_label)\n",
1991
    "        inanim_obj_label = self.inanim_remove_objs(obj_label)\n",
1992
    "        sample['inanim_obj'] = Image.fromarray(inanim_obj_label)\n",
1993
    "        \n",
1994
    "        # Full adj\n",
1995
    "        anim_lr_path = self.folder + 'full_adj201/animate/left_right/' + self.image_path_list[i] + '.png'\n",
1996
    "        inanim_lr_path = self.folder + 'full_adj201/inanimate/left_right/' + self.image_path_list[i] + '.png'\n",
1997
    "        anim_lr = np.array(Image.open(anim_lr_path))\n",
1998
    "        inanim_lr = np.array(Image.open(inanim_lr_path))\n",
1999
    "        overlap = (anim_lr > 0) * (inanim_lr > 0)\n",
2000
    "        anim_lr -= anim_lr * overlap\n",
2001
    "        sample['adj1'] = Image.fromarray(anim_lr + inanim_lr)\n",
2002
    "        \n",
2003
    "        anim_fb_path = self.folder + 'full_adj201/animate/front_back/' + self.image_path_list[i] + '.png'\n",
2004
    "        inanim_fb_path = self.folder + 'full_adj201/inanimate/front_back/' + self.image_path_list[i] + '.png'\n",
2005
    "        anim_fb = np.array(Image.open(anim_fb_path))\n",
2006
    "        inanim_fb = np.array(Image.open(inanim_fb_path))\n",
2007
    "        overlap = (anim_fb > 0) * (inanim_fb > 0)\n",
2008
    "        anim_fb -= anim_fb * overlap\n",
2009
    "        sample['adj2'] = Image.fromarray(anim_fb + inanim_fb)\n",
2010
    "\n",
2011
    "        # Adj\n",
2012
    "        anim_lr_path = self.folder + 'adj201/animate/left_right/' + self.image_path_list[i] + '.png'\n",
2013
    "        inanim_lr_path = self.folder + 'adj201/inanimate/left_right/' + self.image_path_list[i] + '.png'\n",
2014
    "        anim_lr = np.array(Image.open(anim_lr_path))\n",
2015
    "        inanim_lr = np.array(Image.open(inanim_lr_path))\n",
2016
    "        overlap = (anim_lr > 0) * (inanim_lr > 0)\n",
2017
    "        anim_lr -= anim_lr * overlap\n",
2018
    "        sample['adj1_o'] = Image.fromarray(anim_lr + inanim_lr)\n",
2019
    "\n",
2020
    "        anim_fb_path = self.folder + 'adj201/animate/front_back/' + self.image_path_list[i] + '.png'\n",
2021
    "        inanim_fb_path = self.folder + 'adj201/inanimate/front_back/' + self.image_path_list[i] + '.png'\n",
2022
    "        anim_fb = np.array(Image.open(anim_fb_path))\n",
2023
    "        inanim_fb = np.array(Image.open(inanim_fb_path))\n",
2024
    "        overlap = (anim_fb > 0) * (inanim_fb > 0)\n",
2025
    "        anim_fb -= anim_fb * overlap\n",
2026
    "        sample['adj2_o'] = Image.fromarray(anim_fb + inanim_fb)\n",
2027
    "\n",
2028
    "        sample = self.transform_tr(sample)\n",
2029
    "            \n",
2030
    "        sample['path'] = self.image_path_list[i]\n",
2031
    "        sample['orgsize'] = org_size\n",
2032
    "\n",
2033
    "        return sample\n",
2034
    "\n",
2035
    "    def anim_remove_objs(self, obj_label):\n",
2036
    "        final_label = np.zeros(obj_label.shape)\n",
2037
    "        for i in self.anim_classes:\n",
2038
    "            obj = (obj_label == i)\n",
2039
    "            obj = obj.astype(float)\n",
2040
    "            final_label += (obj * i)\n",
2041
    "\n",
2042
    "        return final_label\n",
2043
    "    \n",
2044
    "    def inanim_remove_objs(self, obj_label):\n",
2045
    "        final_label = np.zeros(obj_label.shape)\n",
2046
    "        for i in self.inanim_classes:\n",
2047
    "            obj = (obj_label == i)\n",
2048
    "            obj = obj.astype(float)\n",
2049
    "            final_label += (obj * i)\n",
2050
    "\n",
2051
    "        return final_label\n",
2052
    "\n",
2053
    "    def aggregate_anim_labels(self, part_label):\n",
2054
    "        final_label = np.zeros(part_label.shape)\n",
2055
    "        for i in range(self.num_classes):\n",
2056
    "            part = (part_label == i)\n",
2057
    "            part = part.astype(float)\n",
2058
    "            final_label += self.anim_aggregation_map[i] * part\n",
2059
    "\n",
2060
    "        return final_label\n",
2061
    "    \n",
2062
    "    def aggregate_inanim_labels(self, part_label):\n",
2063
    "        final_label = np.zeros(part_label.shape)\n",
2064
    "        for i in range(self.num_classes):\n",
2065
    "            part = (part_label == i)\n",
2066
    "            part = part.astype(float)\n",
2067
    "            final_label += self.inanim_aggregation_map[i] * part\n",
2068
    "\n",
2069
    "        return final_label\n",
2070
    "\n",
2071
    "    def transform_tr(self, sample):\n",
2072
    "        composed_transforms = transforms.Compose([\n",
2073
    "            RandomHorizontalFlip(),\n",
2074
    "            RandomScaleCrop(base_size=513, crop_size=513),\n",
2075
    "            RandomGaussianBlur(),\n",
2076
    "            Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n",
2077
    "            ToTensor()])\n",
2078
    "\n",
2079
    "        return composed_transforms(sample)\n",
2080
    "\n",
2081
    "    def transform_val(self, sample):\n",
2082
    "        composed_transforms = transforms.Compose([\n",
2083
    "            ResizeMasks(crop_size=770),\n",
2084
    "            Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n",
2085
    "            ToTensor()])\n",
2086
    "\n",
2087
    "        return composed_transforms(sample)"
2088
   ]
2089
  },
2090
  {
2091
   "cell_type": "code",
2092
   "execution_count": null,
2093
   "metadata": {},
2094
   "outputs": [],
2095
   "source": [
2096
    "class LR_Scheduler(object):\n",
2097
    "    \"\"\"Learning Rate Scheduler\n",
2098
    "    Step mode: ``lr = baselr * 0.1 ^ {floor(epoch-1 / lr_step)}``\n",
2099
    "    Cosine mode: ``lr = baselr * 0.5 * (1 + cos(iter/maxiter))``\n",
2100
    "    Poly mode: ``lr = baselr * (1 - iter/maxiter) ^ 0.9``\n",
2101
    "    Args:\n",
2102
    "        args:\n",
2103
    "          :attr:`args.lr_scheduler` lr scheduler mode (`cos`, `poly`),\n",
2104
    "          :attr:`args.lr` base learning rate, :attr:`args.epochs` number of epochs,\n",
2105
    "          :attr:`args.lr_step`\n",
2106
    "        iters_per_epoch: number of iterations per epoch\n",
2107
    "    \"\"\"\n",
2108
    "    def __init__(self, mode, base_lr, num_epochs, iters_per_epoch=0,\n",
2109
    "                 lr_step=0, warmup_epochs=0):\n",
2110
    "        self.mode = mode\n",
2111
    "        print('Using {} LR Scheduler!'.format(self.mode))\n",
2112
    "        self.lr = base_lr\n",
2113
    "        if mode == 'step':\n",
2114
    "            assert lr_step\n",
2115
    "        self.lr_step = lr_step\n",
2116
    "        self.iters_per_epoch = iters_per_epoch\n",
2117
    "        self.N = num_epochs * iters_per_epoch\n",
2118
    "        self.epoch = -1\n",
2119
    "        self.warmup_iters = warmup_epochs * iters_per_epoch\n",
2120
    "\n",
2121
    "    def __call__(self, optimizer, i, epoch):\n",
2122
    "        T = epoch * self.iters_per_epoch + i\n",
2123
    "        if self.mode == 'cos':\n",
2124
    "            lr = 0.5 * self.lr * (1 + math.cos(1.0 * T / self.N * math.pi))\n",
2125
    "        elif self.mode == 'poly':\n",
2126
    "            lr = self.lr * pow((1 - 1.0 * T / self.N), 0.9)\n",
2127
    "        elif self.mode == 'step':\n",
2128
    "            lr = self.lr * (0.1 ** (epoch // self.lr_step))\n",
2129
    "        else:\n",
2130
    "            raise NotImplemented\n",
2131
    "        # warm up lr schedule\n",
2132
    "        if self.warmup_iters > 0 and T < self.warmup_iters:\n",
2133
    "            lr = lr * 1.0 * T / self.warmup_iters\n",
2134
    "        if epoch > self.epoch:\n",
2135
    "            print('\\n=>Epoches %i, learning rate = %.4f' % (epoch, lr))\n",
2136
    "            self.epoch = epoch\n",
2137
    "        assert lr >= 0\n",
2138
    "        self._adjust_learning_rate(optimizer, lr)\n",
2139
    "\n",
2140
    "    def _adjust_learning_rate(self, optimizer, lr):\n",
2141
    "        if len(optimizer.param_groups) == 1:\n",
2142
    "            optimizer.param_groups[0]['lr'] = lr\n",
2143
    "        else:\n",
2144
    "            # enlarge the lr at the head\n",
2145
    "            optimizer.param_groups[0]['lr'] = lr\n",
2146
    "            for i in range(1, len(optimizer.param_groups)):\n",
2147
    "                optimizer.param_groups[i]['lr'] = lr * 10"
2148
   ]
2149
  },
2150
  {
2151
   "cell_type": "code",
2152
   "execution_count": 31,
2153
   "metadata": {},
2154
   "outputs": [],
2155
   "source": [
2156
    "\"\"\"\n",
2157
    "Number of animate and inanimate decoder heads.\n",
2158
    "\"\"\"\n",
2159
    "num_anim_classes = 24\n",
2160
    "num_inanim_classes = 29"
2161
   ]
2162
  },
2163
  {
2164
   "cell_type": "code",
2165
   "execution_count": 32,
2166
   "metadata": {},
2167
   "outputs": [],
2168
   "source": [
2169
    "\"\"\"\n",
2170
    "Setting up training parameters : data path, batch_size and the training and validation datasets.\n",
2171
    "\"\"\"\n",
2172
    "\n",
2173
    "PATH = '/path/to/dataset/'\n",
2174
    "batch_size = 16\n",
2175
    "\n",
2176
    "train_dataset = SegmentationDataset(PATH)\n",
2177
    "train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=4)\n",
2178
    "valid_dataset = SegmentationDataset(PATH, mode='val')\n",
2179
    "valid_dataloader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=4)"
2180
   ]
2181
  },
2182
  {
2183
   "cell_type": "code",
2184
   "execution_count": 26,
2185
   "metadata": {},
2186
   "outputs": [],
2187
   "source": [
2188
    "model = Float(backbone='resnet101', num_anim_classes=num_anim_classes, num_inanim_classes=num_inanim_classes)"
2189
   ]
2190
  },
2191
  {
2192
   "cell_type": "code",
2193
   "execution_count": null,
2194
   "metadata": {},
2195
   "outputs": [],
2196
   "source": [
2197
    "gpu_ids = [0,1,2,3,4,5,6,7]\n",
2198
    "lr = 0.007\n",
2199
    "\n",
2200
    "train_params = [{'params': model.get_1x_lr_params(), 'lr': lr},\n",
2201
    "                {'params': model.get_10x_lr_params(), 'lr': lr * 10}]\n",
2202
    "\n",
2203
    "optimizer = optim.SGD(train_params, momentum=0.9, weight_decay=1e-4)\n",
2204
    "lr_scheduler = LR_Scheduler('poly', lr, 60, len(train_dataloader))"
2205
   ]
2206
  },
2207
  {
2208
   "cell_type": "code",
2209
   "execution_count": 28,
2210
   "metadata": {},
2211
   "outputs": [],
2212
   "source": [
2213
    "\"\"\"\n",
2214
    "Parallezing the model deploying simultaneous training on multiple GPUs.\n",
2215
    "\"\"\"\n",
2216
    "if torch.cuda.device_count() > 1:\n",
2217
    "    model = torch.nn.DataParallel(model, device_ids=gpu_ids)\n",
2218
    "    patch_replication_callback(model)\n",
2219
    "    model.cuda()"
2220
   ]
2221
  },
2222
  {
2223
   "cell_type": "code",
2224
   "execution_count": null,
2225
   "metadata": {
2226
    "scrolled": true
2227
   },
2228
   "outputs": [],
2229
   "source": [
2230
    "\"\"\"\n",
2231
    "Model training.\n",
2232
    "\"\"\"\n",
2233
    "\n",
2234
    "model.train()\n",
2235
    "label_weights = torch.Tensor([0.3, 1.0, 1.0]).cuda()\n",
2236
    "\n",
2237
    "for epoch in range(60):\n",
2238
    "    epoch_start = time.time()\n",
2239
    "    train_loss_avg = AverageMeter()\n",
2240
    "    anim_sqiou_avg = sqIOUMeter(num_anim_classes)\n",
2241
    "    anim_miou_avg = Evaluator(num_anim_classes)\n",
2242
    "    inanim_sqiou_avg = sqIOUMeter(num_inanim_classes)\n",
2243
    "    inanim_miou_avg = Evaluator(num_inanim_classes)\n",
2244
    "    # sqiogt --> sq Intersection over Ground Truth (not penalizing predicting extra)\n",
2245
    "    adj1_sqiogt_avg = sqIOUMeter(2)\n",
2246
    "    adj2_sqiogt_avg = sqIOUMeter(2)\n",
2247
    "\n",
2248
    "    tbar = tqdm(train_dataloader)\n",
2249
    "    for i, sample in enumerate(tbar):\n",
2250
    "        images = sample['image'].float()\n",
2251
    "\n",
2252
    "        anim_objs = sample['anim_obj'].type(torch.LongTensor)\n",
2253
    "        anim_objs = anim_objs > 0\n",
2254
    "        anim_objs = anim_objs.type(torch.LongTensor)\n",
2255
    "\n",
2256
    "        inanim_objs = sample['inanim_obj'].type(torch.LongTensor)\n",
2257
    "        inanim_objs = inanim_objs > 0\n",
2258
    "        inanim_objs = inanim_objs.type(torch.LongTensor)\n",
2259
    "\n",
2260
    "        anims = sample['anim'].type(torch.LongTensor)\n",
2261
    "        inanims = sample['inanim'].type(torch.LongTensor)\n",
2262
    "\n",
2263
    "        adj1 = sample['adj1'].type(torch.LongTensor)\n",
2264
    "        adj2 = sample['adj2'].type(torch.LongTensor)\n",
2265
    "        adj1_o = sample['adj1_o'].type(torch.LongTensor)\n",
2266
    "        adj2_o = sample['adj2_o'].type(torch.LongTensor)\n",
2267
    "        nb = images.shape[0]\n",
2268
    "\n",
2269
    "        images = images.cuda()\n",
2270
    "        anim_objs = anim_objs.cuda()\n",
2271
    "        inanim_objs = inanim_objs.cuda()\n",
2272
    "        anims = anims.cuda()\n",
2273
    "        inanims = inanims.cuda()\n",
2274
    "        adj1 = adj1.cuda()\n",
2275
    "        adj2 = adj2.cuda()\n",
2276
    "        adj1_o = adj1_o.cuda()\n",
2277
    "        adj2_o = adj2_o.cuda()\n",
2278
    "\n",
2279
    "        lr_scheduler(optimizer, i, epoch)\n",
2280
    "        optimizer.zero_grad()\n",
2281
    "        anim_pred, inanim_pred, pred_adj1, pred_adj2 = model(images)\n",
2282
    "\n",
2283
    "        anim_ce_loss = crossentropy_loss(anim_pred, anims)\n",
2284
    "        anim_graph_loss = adjacent_graph_loss(anim_pred, anims, num_anim_classes)\n",
2285
    "        anim_obj_loss = objmask_loss(anim_pred, anim_objs, num_anim_classes)\n",
2286
    "        \n",
2287
    "        inanim_ce_loss = crossentropy_loss(inanim_pred, inanims)\n",
2288
    "        inanim_graph_loss = adjacent_graph_loss(inanim_pred, inanims, num_inanim_classes)\n",
2289
    "        inanim_obj_loss = objmask_loss(inanim_pred, inanim_objs, num_inanim_classes)\n",
2290
    "\n",
2291
    "        adj1_loss = crossentropy_loss(pred_adj1, adj1, lambda_loss=0.5, label_weights=label_weights)\n",
2292
    "        adj2_loss = crossentropy_loss(pred_adj2, adj2, lambda_loss=0.5, label_weights=label_weights)\n",
2293
    "\n",
2294
    "        anim_loss = anim_ce_loss + anim_graph_loss + anim_obj_loss\n",
2295
    "        inanim_loss = inanim_ce_loss + inanim_graph_loss + inanim_obj_loss\n",
2296
    "        loss = anim_loss + inanim_loss + adj1_loss + adj2_loss\n",
2297
    "\n",
2298
    "        ious, counts = jaccard_perpart(anim_pred, anims, num_anim_classes)\n",
2299
    "        for cl in range(num_anim_classes):\n",
2300
    "            ious[cl] = ious[cl].item()\n",
2301
    "            counts[cl] = counts[cl].item()\n",
2302
    "        anim_sqiou_avg.update(ious, counts)\n",
2303
    "\n",
2304
    "        ious, counts = jaccard_perpart(inanim_pred, inanims, num_inanim_classes)\n",
2305
    "        for cl in range(num_inanim_classes):\n",
2306
    "            ious[cl] = ious[cl].item()\n",
2307
    "            counts[cl] = counts[cl].item()\n",
2308
    "        inanim_sqiou_avg.update(ious, counts)\n",
2309
    "\n",
2310
    "        ious, counts = iogt_perpart(pred_adj1, adj1_o, 3)\n",
2311
    "        for cl in range(2):\n",
2312
    "            ious[cl] = ious[cl].item()\n",
2313
    "            counts[cl] = counts[cl].item()\n",
2314
    "        adj1_sqiogt_avg.update(ious, counts)\n",
2315
    "\n",
2316
    "        ious, counts = iogt_perpart(pred_adj2, adj2_o, 3)\n",
2317
    "        for cl in range(2):\n",
2318
    "            ious[cl] = ious[cl].item()\n",
2319
    "            counts[cl] = counts[cl].item()\n",
2320
    "        adj2_sqiogt_avg.update(ious, counts)\n",
2321
    "\n",
2322
    "        loss.backward()\n",
2323
    "        optimizer.step()\n",
2324
    "        train_loss = loss.item()\n",
2325
    "        train_loss_avg.update(train_loss, nb)\n",
2326
    "\n",
2327
    "        anims_ = anims.cpu().detach().numpy()\n",
2328
    "        anim_pred_ = anim_pred.cpu().detach().numpy()\n",
2329
    "        anim_pred_ = np.argmax(anim_pred_, 1)\n",
2330
    "        anim_miou_avg.add_batch(anims_, anim_pred_)\n",
2331
    "\n",
2332
    "        inanims_ = inanims.cpu().detach().numpy()\n",
2333
    "        inanim_pred_ = inanim_pred.cpu().detach().numpy()\n",
2334
    "        inanim_pred_ = np.argmax(inanim_pred_, 1)\n",
2335
    "        inanim_miou_avg.add_batch(inanims_, inanim_pred_)\n",
2336
    "\n",
2337
    "        tbar.set_description('a:%.4f, i:%.4f, lr:%.3f, fb:%.3f' % (anim_sqiou_avg.Mean_Intersection_over_Union(),\n",
2338
    "                                                                   inanim_sqiou_avg.Mean_Intersection_over_Union(),\n",
2339
    "                                                                   adj1_sqiogt_avg.avg,\n",
2340
    "                                                                   adj2_sqiogt_avg.avg))\n",
2341
    "\n",
2342
    "    epoch_end = time.time()\n",
2343
    "\n",
2344
    "    print('Epoch:', epoch)\n",
2345
    "    print('Loss    : {:.5f}'.format(loss.item()))\n",
2346
    "    print('Loss Avg: {:.5f}'.format(train_loss_avg.avg))\n",
2347
    "\n",
2348
    "    print('Anim pqIOU   : {:.5f}'.format(anim_sqiou_avg.avg))\n",
2349
    "    print('Anim mIOU    : {:.5f}'.format(anim_miou_avg.Mean_Intersection_over_Union()))\n",
2350
    "\n",
2351
    "    print('Inanim pqIOU   : {:.5f}'.format(inanim_sqiou_avg.avg))\n",
2352
    "    print('Inanim mIOU    : {:.5f}'.format(inanim_miou_avg.Mean_Intersection_over_Union()))\n",
2353
    "\n",
2354
    "    print('Left    : {:.5f}'.format(adj1_sqiogt_avg.vals[0]/adj1_sqiogt_avg.counts[0]))\n",
2355
    "    print('Right   : {:.5f}'.format(adj1_sqiogt_avg.vals[1]/adj1_sqiogt_avg.counts[1]))\n",
2356
    "    print('LR      : {:.5f}'.format(adj1_sqiogt_avg.avg))\n",
2357
    "\n",
2358
    "    print('Front   : {:.5f}'.format(adj2_sqiogt_avg.vals[0]/adj2_sqiogt_avg.counts[0]))\n",
2359
    "    print('Back    : {:.5f}'.format(adj2_sqiogt_avg.vals[1]/adj2_sqiogt_avg.counts[1]))\n",
2360
    "    print('FB      : {:.5f}'.format(adj2_sqiogt_avg.avg))\n",
2361
    "    print(\"Epoch time taken:\", str(epoch_end-epoch_start))\n",
2362
    "    print('----------------------------------------')"
2363
   ]
2364
  },
2365
  {
2366
   "cell_type": "code",
2367
   "execution_count": 30,
2368
   "metadata": {},
2369
   "outputs": [],
2370
   "source": [
2371
    "# Save trained model.\n",
2372
    "torch.save(model.module.state_dict(), '/path/to/saved/model/destination')"
2373
   ]
2374
  }
2375
 ],
2376
 "metadata": {
2377
  "kernelspec": {
2378
   "display_name": "Python 3",
2379
   "language": "python",
2380
   "name": "python3"
2381
  },
2382
  "language_info": {
2383
   "codemirror_mode": {
2384
    "name": "ipython",
2385
    "version": 3
2386
   },
2387
   "file_extension": ".py",
2388
   "mimetype": "text/x-python",
2389
   "name": "python",
2390
   "nbconvert_exporter": "python",
2391
   "pygments_lexer": "ipython3",
2392
   "version": "3.7.6"
2393
  }
2394
 },
2395
 "nbformat": 4,
2396
 "nbformat_minor": 4
2397
}
2398

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.