pytorch

Форк
0
3458 строк · 109.5 Кб
1
#include "caffe2/core/common.h"
2

3
#if defined(C10_MOBILE) && defined(CAFFE2_USE_MPSCNN_TEST)
4

5
#include "mpscnn_context.h"
6
#include "mpscnn_graph_mask.h"
7

8
#include "caffe2/core/logging.h"
9
#include "caffe2/core/operator_schema.h"
10
#include "caffe2/core/workspace.h"
11
#include "caffe2/utils/math.h"
12
#include "caffe2/utils/proto_utils.h"
13

14
#import <UIKit/UIDevice.h>
15

16
#define SYSTEM_VERSION_GREATER_THAN_OR_EQUAL_TO(v) \
17
  ([[[UIDevice currentDevice] systemVersion]       \
18
       compare:v                                   \
19
       options:NSNumericSearch] != NSOrderedAscending)
20

21
namespace caffe2 {
22

23
/* Utility functions for operator definition */
24
void add_arg_int(OperatorDef& op, string name, int value) {
25
  auto& arg = *(op.add_arg());
26
  arg.set_name(name);
27
  arg.set_i(value);
28
}
29

30
void add_arg_str(OperatorDef& op, string name, string value) {
31
  auto& arg = *(op.add_arg());
32
  arg.set_name(name);
33
  arg.set_s(value);
34
}
35

36
void add_arg_float(OperatorDef& op, string name, float value) {
37
  auto& arg = *(op.add_arg());
38
  arg.set_name(name);
39
  arg.set_f(value);
40
}
41

42
void add_arg_int_list(
43
    OperatorDef& op,
44
    std::vector<string> names,
45
    std::vector<int> values) {
46
  CAFFE_ENFORCE_EQ(names.size(), values.size());
47
  for (auto i = 0; i < names.size(); i++) {
48
    add_arg_int(op, names[i], values[i]);
49
  }
50
}
51

52
void add_arg_str_list(
53
    OperatorDef& op,
54
    std::vector<string> names,
55
    std::vector<string> values) {
56
  CAFFE_ENFORCE_EQ(names.size(), values.size());
57
  for (auto i = 0; i < names.size(); i++) {
58
    add_arg_str(op, names[i], values[i]);
59
  }
60
}
61

62
void add_inputs(OperatorDef& op, std::vector<string> inputs) {
63
  for (auto i = 0; i < inputs.size(); i++) {
64
    op.add_input(inputs[i]);
65
  }
66
}
67

68
void add_outputs(OperatorDef& op, std::vector<string> outputs) {
69
  for (auto i = 0; i < outputs.size(); i++) {
70
    op.add_output(outputs[i]);
71
  }
72
}
73

74
void testMPSCNN() {
75
  // initialize.
76
  getMPSCNNContext();
77

78
  {
79
    for (const auto C : std::vector<size_t>{1, 2, 3, 4, 8, 11, 12}) {
80
      for (const auto H : std::vector<size_t>{1, 7, 15, 39}) {
81
        for (const auto W : std::vector<size_t>{1, 7, 15, 39}) {
82
          for (const auto N : std::vector<size_t>{1, 2}) {
83
            for (const auto BS : std::vector<size_t>{1, 2}) {
84
              LOG(INFO) << "MPSCNNCopyFrom/To Test";
85
              auto mtl = [&](size_t i) {
86
                return std::string("X_mtl_") + std::to_string(i);
87
              };
88
              auto cpu = [&](size_t i) {
89
                return std::string("X_cpu_") + std::to_string(i);
90
              };
91
              auto y_cpu = [&](size_t i) {
92
                return std::string("Y_cpu_") + std::to_string(i);
93
              };
94

95
              Workspace ws;
96
              for (auto i = 0; i < N; ++i) {
97
                auto* t = BlobGetMutableTensor(ws.CreateBlob(cpu(i)), CPU);
98
                t->Resize(BS, C, H, W);
99
                CPUContext ctx;
100
                math::RandGaussian<float, CPUContext>(
101
                    t->size(), 0, 1, t->mutable_data<float>(), &ctx);
102
              }
103

104
              NetDef netdef;
105
              {
106
                auto& op = *(netdef.add_op());
107
                op.set_type("CopyToMPSCNN");
108
                for (auto i = 0; i < N; ++i) {
109
                  op.add_input(cpu(i));
110
                  op.add_output(mtl(i));
111
                }
112
              }
113
              {
114
                auto& op = *(netdef.add_op());
115
                op.set_type("CopyFromMPSCNN");
116
                for (auto i = 0; i < N; ++i) {
117
                  op.add_input(mtl(i));
118
                  op.add_output(y_cpu(i));
119
                }
120
              }
121

122
              ws.RunNetOnce(netdef);
123
              for (auto i = 0; i < N; ++i) {
124
                const auto& t1 = ws.GetBlob(cpu(i))->Get<TensorCPU>();
125
                const auto& t2 = ws.GetBlob(y_cpu(i))->Get<TensorCPU>();
126
                CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
127
                for (auto i = 0; i < t1.size(); ++i) {
128
                  // FP16 <-> FP32 round trip.
129
                  TORCH_CHECK_NEAR(t1.data<float>()[i], t2.data<float>()[i], 1e-2);
130
                }
131
              }
132
            }
133
          }
134
        }
135
      }
136
    }
137
  }
138

139
  {
140
    for (const auto ndim : std::vector<size_t>{1, 2, 3, 4}) {
141
      for (const auto N : std::vector<size_t>{1, 2}) {
142
        LOG(INFO) << "MPSCNNCopyFrom/To ndim Test";
143
        auto mtl = [&](size_t i) {
144
          return std::string("X_mtl_") + std::to_string(i);
145
        };
146
        auto cpu = [&](size_t i) {
147
          return std::string("X_cpu_") + std::to_string(i);
148
        };
149
        auto y_cpu = [&](size_t i) {
150
          return std::string("Y_cpu_") + std::to_string(i);
151
        };
152

153
        Workspace ws;
154
        for (auto i = 0; i < N; ++i) {
155
          auto* t = BlobGetMutableTensor(ws.CreateBlob(cpu(i)), CPU);
156
          switch (ndim) {
157
            case 1:
158
              t->Resize(5);
159
              break;
160
            case 2:
161
              t->Resize(5, 3);
162
              break;
163
            case 3:
164
              t->Resize(5, 3, 4);
165
              break;
166
            case 4:
167
              t->Resize(5, 3, 4, 2);
168
              break;
169
          }
170
          CPUContext ctx;
171
          math::RandGaussian<float, CPUContext>(
172
              t->size(), 0, 1, t->mutable_data<float>(), &ctx);
173
        }
174

175
        NetDef netdef;
176
        {
177
          auto& op = *(netdef.add_op());
178
          op.set_type("CopyToMPSCNN");
179
          for (auto i = 0; i < N; ++i) {
180
            op.add_input(cpu(i));
181
            op.add_output(mtl(i));
182
          }
183
        }
184
        {
185
          auto& op = *(netdef.add_op());
186
          op.set_type("CopyFromMPSCNN");
187
          for (auto i = 0; i < N; ++i) {
188
            op.add_input(mtl(i));
189
            op.add_output(y_cpu(i));
190
          }
191
        }
192

193
        ws.RunNetOnce(netdef);
194
        for (auto i = 0; i < N; ++i) {
195
          const auto& t1 = ws.GetBlob(cpu(i))->Get<TensorCPU>();
196
          const auto& t2 = ws.GetBlob(y_cpu(i))->Get<TensorCPU>();
197
          CAFFE_ENFORCE_EQ(t1.size(), t2.size());
198
          for (auto i = 0; i < t1.size(); ++i) {
199
            // FP16 <-> FP32 round trip.
200
            TORCH_CHECK_NEAR(t1.data<float>()[i], t2.data<float>()[i], 1e-2);
201
          }
202
        }
203
      }
204
    }
205
  }
206

207
  {
208
    for (const auto& batch_size : std::vector<int>{{1, 2}}) {
209
      for (const auto& channels : std::vector<int>{{3, 8}}) {
210
        LOG(INFO) << "MPSCNNNormalizePlanarYUV Test: ";
211
        Workspace ws;
212
        {
213
          auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
214
          t->Resize(batch_size, channels, 8, 13);
215
          CPUContext ctx;
216
          math::RandGaussian<float, CPUContext>(
217
              t->size(), 0, 1, t->mutable_data<float>(), &ctx);
218
        }
219

220
        {
221
          auto* t = BlobGetMutableTensor(ws.CreateBlob("mean"), CPU);
222
          t->Resize(1, channels);
223
          CPUContext ctx;
224
          math::RandGaussian<float, CPUContext>(
225
              t->size(), 0, 1, t->mutable_data<float>(), &ctx);
226
        }
227
        {
228
          auto* t = BlobGetMutableTensor(ws.CreateBlob("stddev"), CPU);
229
          t->Resize(1, channels);
230
          CPUContext ctx;
231
          math::RandUniform<float, CPUContext>(
232
              t->size(), 0.5, 1.5, t->mutable_data<float>(), &ctx);
233
        }
234

235
        NetDef netdef;
236
        {
237
          auto& op = *(netdef.add_op());
238
          op.set_type("CopyToMPSCNN");
239
          op.add_input("X_cpu");
240
          op.add_output("X_mtl");
241
        }
242

243
        {
244
          auto& op = *(netdef.add_op());
245
          op.set_type("MPSCNNNormalizePlanarYUV");
246
          op.add_input("X_mtl");
247
          op.add_input("mean");
248
          op.add_input("stddev");
249
          op.add_output("Y_mtl");
250
        }
251

252
        {
253
          auto& op = *(netdef.add_op());
254
          op.set_type("CopyFromMPSCNN");
255
          op.add_input("Y_mtl");
256
          op.add_output("Y_cpu");
257
        }
258

259
        {
260
          auto& op = *(netdef.add_op());
261
          op.set_type("NormalizePlanarYUV");
262
          op.add_input("X_cpu");
263
          op.add_input("mean");
264
          op.add_input("stddev");
265
          op.add_output("Y_ref");
266
        }
267

268
        ws.RunNetOnce(netdef);
269
        const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
270
        const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
271

272
        CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
273
        for (auto i = 0; i < t1.size(); ++i) {
274
          // FP16 <-> FP32 round trip, accumulation, etc.
275
          const float t1_i = t1.data<float>()[i];
276
          const float t2_i = t2.data<float>()[i];
277
          TORCH_CHECK_NEAR(t1_i, t2_i, 0.1);
278
        }
279
      }
280
    }
281
  }
282

283
  {
284
    LOG(INFO) << "MPSCNNInstanceNorm Test";
285
    enum class PreluTy { NONE, CHANNEL, SHARED };
286
    for (const auto batchSize : {1, 2}) {
287
      for (const auto channels : {3, 8}) {
288
        for (const auto prelu :
289
             {PreluTy::NONE, PreluTy::CHANNEL, PreluTy::SHARED}) {
290
          for (const auto dim : {10, 40}) {
291
            Workspace ws;
292
            {
293
              auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
294
              t->Resize(batchSize, channels, dim, dim);
295
              CPUContext ctx;
296
              // Too noisy.
297
              math::RandGaussian<float, CPUContext>(
298
                  t->size(), 0, 3, t->mutable_data<float>(), &ctx);
299
            }
300

301
            {
302
              auto* t = BlobGetMutableTensor(ws.CreateBlob("W"), CPU);
303
              t->Resize(channels);
304
              CPUContext ctx;
305
              for (auto i = 0; i < t->size(); ++i) {
306
                t->mutable_data<float>()[i] = i;
307
              }
308
              // Too noisy.
309
              // math::RandGaussian<float, CPUContext>(t->size(), 0, 1,
310
              // t->mutable_data<float>(), &ctx);
311
            }
312
            {
313
              auto* t = BlobGetMutableTensor(ws.CreateBlob("b"), CPU);
314
              t->Resize(channels);
315
              CPUContext ctx;
316
              for (auto i = 0; i < t->size(); ++i) {
317
                t->mutable_data<float>()[i] = 8 - 2 * i;
318
              }
319
              // Too noisy.
320
              // math::RandGaussian<float, CPUContext>(t->size(), 0, 1,
321
              // t->mutable_data<float>(), &ctx);
322
            }
323
            {
324
              auto* t = BlobGetMutableTensor(ws.CreateBlob("pw"), CPU);
325
              t->Resize(prelu == PreluTy::SHARED ? 1 : channels);
326
              CPUContext ctx;
327
              // Too noisy.
328
              math::RandGaussian<float, CPUContext>(
329
                  t->size(), 0, 1, t->mutable_data<float>(), &ctx);
330
            }
331

332
            NetDef netdef;
333
            {
334
              auto& op = *(netdef.add_op());
335
              op.set_type("CopyToMPSCNN");
336
              op.add_input("X_cpu");
337
              op.add_output("X_mtl");
338
            }
339

340
            {
341
              auto& op = *(netdef.add_op());
342
              op.set_type(
343
                  prelu == PreluTy::NONE ? "MPSCNNInstanceNorm"
344
                                         : "MPSCNNInstanceNormPRelu");
345
              op.add_input("X_mtl");
346
              op.add_input("W");
347
              op.add_input("b");
348
              if (prelu != PreluTy::NONE) {
349
                op.add_input("pw");
350
              }
351
              op.add_output("Y_mtl");
352
            }
353

354
            {
355
              auto& op = *(netdef.add_op());
356
              op.set_type("CopyFromMPSCNN");
357
              op.add_input("Y_mtl");
358
              op.add_output("Y_cpu");
359
            }
360

361
            {
362
              auto& op = *(netdef.add_op());
363
              op.set_type("InstanceNorm");
364
              op.add_input("X_cpu");
365
              op.add_input("W");
366
              op.add_input("b");
367
              auto& arg = *(op.add_arg());
368
              arg.set_name("order");
369
              arg.set_s("NCHW");
370
              op.add_output("Y_ref");
371
            }
372

373
            if (prelu != PreluTy::NONE) {
374
              auto& op = *(netdef.add_op());
375
              op.set_type("PRelu");
376
              op.add_input("Y_ref");
377
              op.add_input("pw");
378
              auto& arg = *(op.add_arg());
379
              arg.set_name("order");
380
              arg.set_s("NCHW");
381
              op.add_output("Y_ref");
382
            }
383

384
            ws.RunNetOnce(netdef);
385
            const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
386
            const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
387

388
            CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
389
            for (auto i = 0; i < t1.size(); ++i) {
390
              // FP16 <-> FP32 round trip, accumulation, etc.
391
              const float t1_i = t1.data<float>()[i];
392
              const float t2_i = t2.data<float>()[i];
393
              // Can be larger due to FP errors.
394
              constexpr float tol = 5.0e-2;
395
              CHECK(std::abs(t1_i - t2_i) <= (tol + tol * std::abs(t1_i)))
396
                  << t1_i << ", " << t2_i;
397
            }
398
          }
399
        }
400
      }
401
    }
402
  }
403

404
  {
405
    for (const auto& shared : std::vector<bool>{{true, false}}) {
406
      for (const auto& array : std::vector<bool>{{true, false}}) {
407
        for (const auto& batch_size : std::vector<int>{{1, 2}}) {
408
          LOG(INFO) << "MPSCNNPRelu Test: " << shared << array << batch_size;
409
          Workspace ws;
410
          const auto channels = array ? 12 : 3;
411
          {
412
            auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
413
            t->Resize(batch_size, channels, 8, 13);
414
            CPUContext ctx;
415
            math::RandGaussian<float, CPUContext>(
416
                t->size(), 0, 1, t->mutable_data<float>(), &ctx);
417
          }
418

419
          {
420
            auto* t = BlobGetMutableTensor(ws.CreateBlob("b"), CPU);
421
            t->Resize(shared ? channels : 1);
422
            CPUContext ctx;
423
            math::RandGaussian<float, CPUContext>(
424
                t->size(), 0, 1, t->mutable_data<float>(), &ctx);
425
          }
426

427
          NetDef netdef;
428
          {
429
            auto& op = *(netdef.add_op());
430
            op.set_type("CopyToMPSCNN");
431
            op.add_input("X_cpu");
432
            op.add_output("X_mtl");
433
          }
434

435
          {
436
            auto& op = *(netdef.add_op());
437
            op.set_type("MPSCNNPRelu");
438
            op.add_input("X_mtl");
439
            op.add_input("b");
440
            op.add_output("Y_mtl");
441
          }
442

443
          {
444
            auto& op = *(netdef.add_op());
445
            op.set_type("CopyFromMPSCNN");
446
            op.add_input("Y_mtl");
447
            op.add_output("Y_cpu");
448
          }
449

450
          {
451
            auto& op = *(netdef.add_op());
452
            op.set_type("PRelu");
453
            op.add_input("X_cpu");
454
            op.add_input("b");
455
            auto& arg = *(op.add_arg());
456
            arg.set_name("order");
457
            arg.set_s("NCHW");
458
            op.add_output("Y_ref");
459
          }
460

461
          ws.RunNetOnce(netdef);
462
          const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
463
          const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
464

465
          CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
466
          for (auto i = 0; i < t1.size(); ++i) {
467
            // FP16 <-> FP32 round trip, accumulation, etc.
468
            const float t1_i = t1.data<float>()[i];
469
            const float t2_i = t2.data<float>()[i];
470
            TORCH_CHECK_NEAR(t1_i, t2_i, 0.1);
471
          }
472
        }
473
      }
474
    }
475
  }
476

477
  {
478
    for (const auto& channels : std::vector<size_t>{3, 12, 15}) {
479
      for (const auto& batch_size : std::vector<size_t>{1, 2}) {
480
        LOG(INFO) << "MPSCNNSpatialBN Test: " << channels;
481
        Workspace ws;
482
        {
483
          auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
484
          t->Resize(batch_size, channels, 8, 13);
485
          CPUContext ctx;
486
          math::RandGaussian<float, CPUContext>(
487
              t->size(), 0, 1, t->mutable_data<float>(), &ctx);
488
        }
489

490
        for (const std::string name : {"scale", "bias", "mean", "var"}) {
491
          auto* t = BlobGetMutableTensor(ws.CreateBlob(name), CPU);
492
          t->Resize(channels);
493
          CPUContext ctx;
494
          // High mean to avoid var division by zero.
495
          math::RandGaussian<float, CPUContext>(
496
              t->size(), 0, 1, t->mutable_data<float>(), &ctx);
497
          if (name == "var") {
498
            for (auto i = 0; i < t->size(); ++i) {
499
              t->mutable_data<float>()[i] =
500
                  std::abs(t->mutable_data<float>()[i]) + 0.5;
501
            }
502
          }
503
        }
504

505
        NetDef netdef;
506
        {
507
          auto& op = *(netdef.add_op());
508
          op.set_type("CopyToMPSCNN");
509
          op.add_input("X_cpu");
510
          op.add_output("X_mtl");
511
        }
512

513
        {
514
          auto& op = *(netdef.add_op());
515
          op.set_type("MPSCNNSpatialBN");
516
          op.add_input("X_mtl");
517
          op.add_input("scale");
518
          op.add_input("bias");
519
          op.add_input("mean");
520
          op.add_input("var");
521
          {
522
            auto& arg = *(op.add_arg());
523
            arg.set_name(OpSchema::Arg_IsTest);
524
            arg.set_i(1);
525
          }
526

527
          op.add_output("Y_mtl");
528
        }
529

530
        {
531
          auto& op = *(netdef.add_op());
532
          op.set_type("CopyFromMPSCNN");
533
          op.add_input("Y_mtl");
534
          op.add_output("Y_cpu");
535
        }
536

537
        {
538
          auto& op = *(netdef.add_op());
539
          op.set_type("SpatialBN");
540
          op.add_input("X_cpu");
541
          op.add_input("scale");
542
          op.add_input("bias");
543
          op.add_input("mean");
544
          op.add_input("var");
545
          {
546
            auto& arg = *(op.add_arg());
547
            arg.set_name(OpSchema::Arg_IsTest);
548
            arg.set_i(1);
549
          }
550

551
          op.add_output("Y_ref");
552
        }
553

554
        ws.RunNetOnce(netdef);
555
        const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
556
        const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
557

558
        CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
559
        for (auto i = 0; i < t1.size(); ++i) {
560
          // FP16 <-> FP32 round trip, accumulation, etc.
561
          const float t1_i = t1.data<float>()[i];
562
          const float t2_i = t2.data<float>()[i];
563
          TORCH_CHECK_NEAR(t1_i, t2_i, 0.1);
564
        }
565
      }
566
    }
567
  }
568

569
  {
570
    for (const auto& batchSize : std::vector<size_t>{2, 1}) {
571
      for (const auto& H : std::vector<size_t>{1, 8}) {
572
        for (const auto& W : std::vector<size_t>{1, 8}) {
573
          for (const auto& CIn : std::vector<size_t>{1, 12, 224}) {
574
            for (const auto& COut : std::vector<size_t>{1, 12, 224}) {
575
              LOG(INFO) << "MPSCNNFC Test";
576
              Workspace ws;
577
              {
578
                auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
579
                t->Resize(batchSize, CIn, H, W);
580
                CPUContext ctx;
581
                math::RandGaussian<float, CPUContext>(
582
                    t->size(), 0, 1, t->mutable_data<float>(), &ctx);
583
              }
584

585
              {
586
                auto* t = BlobGetMutableTensor(ws.CreateBlob("W"), CPU);
587
                t->Resize(COut, CIn * H * W);
588
                CPUContext ctx;
589
                math::RandGaussian<float, CPUContext>(
590
                    t->size(), 0, 1, t->mutable_data<float>(), &ctx);
591
              }
592

593
              {
594
                auto* t = BlobGetMutableTensor(ws.CreateBlob("b"), CPU);
595
                t->Resize(COut);
596
                CPUContext ctx;
597
                math::RandGaussian<float, CPUContext>(
598
                    t->size(), 0, 0.0001, t->mutable_data<float>(), &ctx);
599
              }
600

601
              NetDef netdef;
602
              {
603
                auto& op = *(netdef.add_op());
604
                op.set_type("CopyToMPSCNN");
605
                op.add_input("X_cpu");
606
                op.add_output("X_mtl");
607
              }
608

609
              {
610
                auto& op = *(netdef.add_op());
611
                op.set_type("MPSCNNFC");
612
                op.add_input("X_mtl");
613
                op.add_input("W");
614
                op.add_input("b");
615
                op.add_output("Y_mtl");
616
              }
617

618
              {
619
                auto& op = *(netdef.add_op());
620
                op.set_type("CopyFromMPSCNN");
621
                op.add_input("Y_mtl");
622
                op.add_output("Y_cpu");
623
              }
624
              {
625
                auto& op = *(netdef.add_op());
626
                op.set_type("FC");
627
                op.add_input("X_cpu");
628
                op.add_input("W");
629
                op.add_input("b");
630
                auto& arg = *(op.add_arg());
631
                arg.set_name("order");
632
                arg.set_s("NCHW");
633
                op.add_output("Y_ref");
634
              }
635

636
              ws.RunNetOnce(netdef);
637
              const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
638
              const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
639
              CAFFE_ENFORCE_EQ(t2.ndim(), 4);
640
              CAFFE_ENFORCE_EQ(t1.ndim(), 2);
641
              CAFFE_ENFORCE(t2.dim32(2) == 1 && t2.dim32(3) == 1);
642
              const_cast<TensorCPU&>(t2).Reshape(
643
                  std::vector<int64_t>{int64_t(batchSize), int64_t(COut)});
644
              // Note dims do not match, as Metal leaves a 1x1 spatial
645
              // dimension.
646
              CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
647

648
              for (auto i = 0; i < t1.size(); ++i) {
649
                // FP16 <-> FP32 round trip, accumulation, etc.
650
                const float t1_i = t1.data<float>()[i];
651
                const float t2_i = t2.data<float>()[i];
652
                // LOG(INFO) << "i: " << i << ", cpu: " << t1_i << ", mtl: " <<
653
                // t2_i;
654
                TORCH_CHECK_NEAR(t1_i, t2_i, 0.7);
655
              }
656
            }
657
          }
658
        }
659
      }
660
    }
661
  }
662

663
  {
664
    for (const auto& pool : {"MaxPool", "AveragePool"}) {
665
      for (const auto& global_pooling : {true, false}) {
666
        for (const auto& batchSize : std::vector<size_t>{1, 2}) {
667
          for (const auto& stride_h : std::vector<int>{1, 2, 3}) {
668
            for (const auto& stride_w : std::vector<int>{1, 2, 3}) {
669
              for (const auto& kernel_h : std::vector<int>{1, 3, 5}) {
670
                for (const auto& kernel_w : std::vector<int>{1, 3, 5}) {
671
                  for (const auto& pad_l : std::vector<int>{0, kernel_w / 2}) {
672
                    for (const auto& pad_r :
673
                         std::vector<int>{0, kernel_w / 2}) {
674
                      for (const auto& pad_t :
675
                           std::vector<int>{0, kernel_h / 2}) {
676
                        for (const auto& pad_b :
677
                             std::vector<int>{0, kernel_h / 2}) {
678
                          // Waiting response from Apple
679
                          if (kernel_h != kernel_w) {
680
                            continue;
681
                          }
682
                          LOG(INFO) << "MPSCNNPool Test: " << pool;
683
                          Workspace ws;
684
                          {
685
                            auto* t = BlobGetMutableTensor(
686
                                ws.CreateBlob("X_cpu"), CPU);
687
                            t->Resize(batchSize, 8, 8, 13);
688
                            CPUContext ctx;
689
                            math::RandGaussian<float, CPUContext>(
690
                                t->size(),
691
                                0,
692
                                1,
693
                                t->mutable_data<float>(),
694
                                &ctx);
695
                          }
696

697
                          NetDef netdef;
698
#define ADD_ARGS(op)                                   \
699
  do {                                                 \
700
    if (global_pooling) {                              \
701
      add_arg_int(op, "stride", 1);                    \
702
    } else {                                           \
703
      add_arg_int_list(                                \
704
          op,                                          \
705
          std::vector<string>{"pad_l",                 \
706
                              "pad_r",                 \
707
                              "pad_t",                 \
708
                              "pad_b",                 \
709
                              "kernel_w",              \
710
                              "kernel_h",              \
711
                              "stride_w",              \
712
                              "stride_h"},             \
713
          std::vector<int>{pad_l,                      \
714
                           pad_r,                      \
715
                           pad_t,                      \
716
                           pad_b,                      \
717
                           kernel_w,                   \
718
                           kernel_h,                   \
719
                           stride_w,                   \
720
                           stride_h});                 \
721
    }                                                  \
722
    add_arg_int(op, "global_pooling", global_pooling); \
723
  } while (false)
724
                          {
725
                            auto& op = *(netdef.add_op());
726
                            op.set_type("CopyToMPSCNN");
727
                            op.add_input("X_cpu");
728
                            op.add_output("X_mtl");
729
                          }
730

731
                          {
732
                            auto& op = *(netdef.add_op());
733
                            op.set_type(std::string("MPSCNN") + pool);
734
                            op.add_input("X_mtl");
735
                            ADD_ARGS(op);
736
                            op.add_output("Y_mtl");
737
                          }
738

739
                          {
740
                            auto& op = *(netdef.add_op());
741
                            op.set_type("CopyFromMPSCNN");
742
                            op.add_input("Y_mtl");
743
                            op.add_output("Y_cpu");
744
                          }
745

746
                          {
747
                            auto& op = *(netdef.add_op());
748
                            op.set_type(pool);
749
                            op.add_input("X_cpu");
750
                            ADD_ARGS(op);
751
                            op.add_output("Y_ref");
752
                          }
753
#undef ADD_ARGS
754

755
                          ws.RunNetOnce(netdef);
756
                          const auto& t2 =
757
                              ws.GetBlob("Y_cpu")->Get<TensorCPU>();
758
                          const auto& t1 =
759
                              ws.GetBlob("Y_ref")->Get<TensorCPU>();
760

761
                          CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
762
                          for (auto i = 0; i < t1.size(); ++i) {
763
                            // FP16 <-> FP32 round trip, accumulation, etc.
764
                            const float t1_i = t1.data<float>()[i];
765
                            const float t2_i = t2.data<float>()[i];
766
                            TORCH_CHECK_NEAR(t1_i, t2_i, 0.1);
767
                          }
768
                        }
769
                      }
770
                    }
771
                  }
772
                }
773
              }
774
            }
775
          }
776
        }
777
      }
778
    }
779
  }
780

781
  {
782
    LOG(INFO) << "MPSCNNPadImage Test";
783
    for (const auto dims :
784
         std::vector<std::vector<size_t>>{{1, 3, 50, 80}, {1, 12, 50, 80}}) {
785
      Workspace ws;
786
      {
787
        auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
788
        t->Resize(dims);
789
        CPUContext ctx;
790
        math::RandGaussian<float, CPUContext>(
791
            t->size(), 0, 1, t->mutable_data<float>(), &ctx);
792
      }
793

794
      NetDef netdef;
795
      {
796
        auto& op = *(netdef.add_op());
797
        op.set_type("CopyToMPSCNN");
798
        op.add_input("X_cpu");
799
        op.add_output("X_mtl");
800
      }
801

802
      {
803
        auto& op = *(netdef.add_op());
804
        op.set_type("MPSCNNPadImage");
805
        op.add_input("X_mtl");
806
        {
807
          auto& arg = *(op.add_arg());
808
          arg.set_name("pad");
809
          arg.set_i(10);
810
        }
811
        {
812
          auto& arg = *(op.add_arg());
813
          arg.set_name("mode");
814
          arg.set_s("reflect");
815
        }
816
        op.add_output("Y_mtl");
817
      }
818

819
      {
820
        auto& op = *(netdef.add_op());
821
        op.set_type("CopyFromMPSCNN");
822
        op.add_input("Y_mtl");
823
        op.add_output("Y_cpu");
824
      }
825

826
      {
827
        auto& op = *(netdef.add_op());
828
        op.set_type("PadImage");
829
        op.add_input("X_cpu");
830
        {
831
          auto& arg = *(op.add_arg());
832
          arg.set_name("pad");
833
          arg.set_i(10);
834
        }
835
        {
836
          auto& arg = *(op.add_arg());
837
          arg.set_name("mode");
838
          arg.set_s("reflect");
839
        }
840
        op.add_output("Y_ref");
841
      }
842

843
      ws.RunNetOnce(netdef);
844
      const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
845
      const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
846

847
      CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
848
      for (auto i = 0; i < t1.size(); ++i) {
849
        // FP16 <-> FP32 round trip, accumulation, etc.
850
        const float t1_i = t1.data<float>()[i];
851
        const float t2_i = t2.data<float>()[i];
852
        // LOG(INFO) << "i: " << i << ", " << "CPU: " << t1_i << ", MTL: " <<
853
        // t2_i;
854
        TORCH_CHECK_NEAR(t1_i, t2_i, 0.01);
855
      }
856
    }
857
  }
858

859
  {
860
    LOG(INFO) << "MPSCNNPreprocess Test";
861
    Workspace ws;
862
    {
863
      auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
864
      t->Resize(1, 8, 13, 4);
865
      CPUContext ctx;
866
      for (auto i = 0; i < t->size(); ++i) {
867
        t->mutable_data<uint8_t>()[i] = rand() % 255;
868
      }
869
    }
870

871
    {
872
      auto* t = BlobGetMutableTensor(ws.CreateBlob("mean"), CPU);
873
      t->Resize(3);
874
      CPUContext ctx;
875
      t->mutable_data<float>()[0] = 100;
876
      t->mutable_data<float>()[1] = 50;
877
      t->mutable_data<float>()[2] = 150;
878
    }
879

880
    NetDef netdef;
881

882
    {
883
      auto& op = *(netdef.add_op());
884
      op.set_type("MPSCNNPackedInt8BGRANHWCToNCHWCStylizerPreprocess");
885
      op.add_input("X_cpu");
886
      op.add_input("mean");
887
      {
888
        auto& arg = *(op.add_arg());
889
        arg.set_name("noise_std");
890
        arg.set_f(0.00001);
891
      }
892
      {
893
        auto& arg = *(op.add_arg());
894
        arg.set_name("noise_size");
895
        arg.set_i(512);
896
      }
897

898
      op.add_output("Y_mtl");
899
    }
900

901
    {
902
      auto& op = *(netdef.add_op());
903
      op.set_type("CopyFromMPSCNN");
904
      op.add_input("Y_mtl");
905
      op.add_output("Y_cpu");
906
    }
907

908
    {
909
      auto& op = *(netdef.add_op());
910
      op.set_type("PackedInt8BGRANHWCToNCHWCStylizerPreprocess");
911
      op.add_input("X_cpu");
912
      op.add_input("mean");
913
      {
914
        auto& arg = *(op.add_arg());
915
        arg.set_name("noise_std");
916
        arg.set_f(0.00001);
917
      }
918
      {
919
        auto& arg = *(op.add_arg());
920
        arg.set_name("noise_size");
921
        arg.set_i(512);
922
      }
923
      op.add_output("Y_ref");
924
    }
925

926
    ws.RunNetOnce(netdef);
927
    const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
928
    const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
929

930
    CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
931
    for (auto i = 0; i < t1.size(); ++i) {
932
      // FP16 <-> FP32 round trip, accumulation, etc.
933
      const float t1_i = t1.data<float>()[i];
934
      const float t2_i = t2.data<float>()[i];
935
      TORCH_CHECK_NEAR(t1_i, t2_i, 0.1);
936
    }
937
  }
938

939
  {
940
    LOG(INFO) << "MPSCNNDeprocess Test";
941
    Workspace ws;
942
    {
943
      auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
944
      t->Resize(1, 3, 8, 24);
945
      CPUContext ctx;
946
      for (auto i = 0; i < t->size(); ++i) {
947
        t->mutable_data<float>()[i] = rand() % 255;
948
      }
949
    }
950

951
    {
952
      auto* t = BlobGetMutableTensor(ws.CreateBlob("mean"), CPU);
953
      t->Resize(3);
954
      CPUContext ctx;
955
      t->mutable_data<float>()[0] = 100;
956
      t->mutable_data<float>()[1] = 50;
957
      t->mutable_data<float>()[2] = 150;
958
    }
959

960
    NetDef netdef;
961

962
    {
963
      auto& op = *(netdef.add_op());
964
      op.set_type("CopyToMPSCNN");
965
      op.add_input("X_cpu");
966
      op.add_output("X_mtl");
967
    }
968

969
    {
970
      auto& op = *(netdef.add_op());
971
      op.set_type("MPSCNNBRGNCHWCToPackedInt8BGRAStylizerDeprocess");
972
      op.add_input("X_mtl");
973
      op.add_input("mean");
974
      op.add_output("Y_cpu");
975
    }
976

977
    {
978
      auto& op = *(netdef.add_op());
979
      op.set_type("BRGNCHWCToPackedInt8BGRAStylizerDeprocess");
980
      op.add_input("X_cpu");
981
      op.add_input("mean");
982
      op.add_output("Y_ref");
983
    }
984

985
    ws.RunNetOnce(netdef);
986
    const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
987
    const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
988

989
    CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
990
    for (auto i = 0; i < t1.size(); ++i) {
991
      // FP16 <-> FP32 round trip, accumulation, etc.
992
      const float t1_i = t1.data<uint8_t>()[i];
993
      const float t2_i = t2.data<uint8_t>()[i];
994
      TORCH_CHECK_NEAR(t1_i, t2_i, 0.1);
995
    }
996
  }
997

998
  {
999
    LOG(INFO) << "MPSCNNDeprocess Test";
1000
    Workspace ws;
1001
    {
1002
      auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
1003
      t->Resize(1, 3, 1280, 720);
1004
      CPUContext ctx;
1005
      for (auto i = 0; i < t->size(); ++i) {
1006
        t->mutable_data<float>()[i] = rand() % 1000 - 500;
1007
      }
1008
    }
1009

1010
    {
1011
      auto* t = BlobGetMutableTensor(ws.CreateBlob("mean"), CPU);
1012
      t->Resize(3);
1013
      CPUContext ctx;
1014
      t->mutable_data<float>()[0] = 30;
1015
      t->mutable_data<float>()[1] = 40;
1016
      t->mutable_data<float>()[2] = 50;
1017
    }
1018

1019
    NetDef netdef;
1020

1021
    {
1022
      auto& op = *(netdef.add_op());
1023
      op.set_type("CopyToMPSCNN");
1024
      op.add_input("X_cpu");
1025
      op.add_output("X_mtl");
1026
    }
1027

1028
    {
1029
      auto& op = *(netdef.add_op());
1030
      op.set_type("MPSCNNBRGNCHWCToPackedInt8BGRAStylizerDeprocess");
1031
      op.add_input("X_mtl");
1032
      op.add_input("mean");
1033
      op.add_output("Y_cpu");
1034
    }
1035

1036
    {
1037
      auto& op = *(netdef.add_op());
1038
      op.set_type("BRGNCHWCToPackedInt8BGRAStylizerDeprocess");
1039
      op.add_input("X_cpu");
1040
      op.add_input("mean");
1041
      op.add_output("Y_ref");
1042
    }
1043

1044
    ws.RunNetOnce(netdef);
1045
    const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
1046
    const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
1047

1048
    CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
1049
    for (auto i = 0; i < t1.size(); ++i) {
1050
      // FP16 <-> FP32 round trip, accumulation, etc.
1051
      const float t1_i = t1.data<uint8_t>()[i];
1052
      const float t2_i = t2.data<uint8_t>()[i];
1053
      TORCH_CHECK_NEAR(t1_i, t2_i, 0.1);
1054
    }
1055
  }
1056

1057
  @autoreleasepool {
1058
    for (const auto& batchSize : std::vector<int>{1, 2}) {
1059
      for (const auto& stride_h : std::vector<int>{1, 2, 3}) {
1060
        for (const auto& stride_w : std::vector<int>{1, 2, 3}) {
1061
          for (const auto& kernel_h : std::vector<int>{1, 3, 8}) {
1062
            for (const auto& kernel_w : std::vector<int>{1, 3, 8}) {
1063
              for (const auto& pad_l : std::vector<int>{0, kernel_w / 2}) {
1064
                for (const auto& pad_r : std::vector<int>{0, kernel_w / 2}) {
1065
                  for (const auto& pad_t : std::vector<int>{0, kernel_h / 2}) {
1066
                    for (const auto& pad_b :
1067
                         std::vector<int>{0, kernel_h / 2}) {
1068
                      // Waiting response from Apple
1069
                      if (kernel_h != kernel_w) {
1070
                        continue;
1071
                      }
1072
                      LOG(INFO) << "MPSCNNConv Test";
1073
                      Workspace ws;
1074
                      {
1075
                        auto* t =
1076
                            BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
1077
                        t->Resize(batchSize, 12, 57, 72);
1078
                        CPUContext ctx;
1079
                        math::RandGaussian<float, CPUContext>(
1080
                            t->size(), 0, 1, t->mutable_data<float>(), &ctx);
1081
                      }
1082

1083
                      {
1084
                        auto* t = BlobGetMutableTensor(ws.CreateBlob("W"), CPU);
1085
                        t->Resize(8, 12, kernel_h, kernel_w);
1086
                        CPUContext ctx;
1087
                        math::RandGaussian<float, CPUContext>(
1088
                            8 * 12 * kernel_h * kernel_w,
1089
                            0,
1090
                            1,
1091
                            t->mutable_data<float>(),
1092
                            &ctx);
1093
                      }
1094

1095
                      {
1096
                        auto* t = BlobGetMutableTensor(ws.CreateBlob("b"), CPU);
1097
                        t->Resize(8);
1098
                        CPUContext ctx;
1099
                        math::RandGaussian<float, CPUContext>(
1100
                            8, 0, 1, t->mutable_data<float>(), &ctx);
1101
                      }
1102

1103
                      NetDef netdef;
1104
#define ADD_ARGS(op)                     \
1105
  do {                                   \
1106
    add_arg_str(op, "order", "NCHW");    \
1107
    add_arg_int_list(                    \
1108
        op,                              \
1109
        std::vector<string>{"stride_h",  \
1110
                            "stride_w",  \
1111
                            "pad_l",     \
1112
                            "pad_r",     \
1113
                            "pad_t",     \
1114
                            "pad_b",     \
1115
                            "kernel_w",  \
1116
                            "kernel_h"}, \
1117
        std::vector<int>{stride_h,       \
1118
                         stride_w,       \
1119
                         pad_l,          \
1120
                         pad_r,          \
1121
                         pad_t,          \
1122
                         pad_b,          \
1123
                         kernel_w,       \
1124
                         kernel_h});     \
1125
  } while (false)
1126
                      {
1127
                        auto& op = *(netdef.add_op());
1128
                        op.set_type("CopyToMPSCNN");
1129
                        op.add_input("X_cpu");
1130
                        op.add_output("X_mtl");
1131
                      }
1132

1133
                      {
1134
                        auto& op = *(netdef.add_op());
1135
                        op.set_type("MPSCNNConv");
1136
                        op.add_input("X_mtl");
1137
                        op.add_input("W");
1138
                        op.add_input("b");
1139
                        ADD_ARGS(op);
1140
                        op.add_output("Y_mtl");
1141
                      }
1142

1143
                      {
1144
                        auto& op = *(netdef.add_op());
1145
                        op.set_type("CopyFromMPSCNN");
1146
                        op.add_input("Y_mtl");
1147
                        op.add_output("Y_cpu");
1148
                      }
1149

1150
                      {
1151
                        auto& op = *(netdef.add_op());
1152
                        op.set_type("Conv");
1153
                        op.add_input("X_cpu");
1154
                        op.add_input("W");
1155
                        op.add_input("b");
1156
                        ADD_ARGS(op);
1157
                        op.add_output("Y_ref");
1158
                      }
1159
#undef ADD_ARGS
1160
                      ws.RunNetOnce(netdef);
1161
                      const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
1162
                      const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
1163

1164
                      CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
1165
                      for (auto i = 0; i < t1.size(); ++i) {
1166
                        // FP16 <-> FP32 round trip, accumulation, etc.
1167
                        const float t1_i = t1.data<float>()[i];
1168
                        const float t2_i = t2.data<float>()[i];
1169
                        TORCH_CHECK_NEAR(t1_i, t2_i, 0.2);
1170
                      }
1171
                    }
1172
                  }
1173
                }
1174
              }
1175
            }
1176
          }
1177
        }
1178
      }
1179
    }
1180
  }
1181

1182
  @autoreleasepool {
1183
    bool runtimeAtLeastIOS11 = SYSTEM_VERSION_GREATER_THAN_OR_EQUAL_TO(@"11.0");
1184
    if (runtimeAtLeastIOS11) {
1185
      for (const auto& batchSize : std::vector<int>{1, 2}) {
1186
        for (const auto& input_channels : std::vector<int>{32, 64, 128, 256}) {
1187
          for (const auto& channel_multiplier : std::vector<int>{1}) {
1188
            LOG(INFO) << "MPSCNNDepthwiseConv Test";
1189
            Workspace ws;
1190
            int output_channels = input_channels * channel_multiplier;
1191
            {
1192
              auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
1193
              t->Resize(batchSize, input_channels, 57, 72);
1194
              CPUContext ctx;
1195
              math::RandGaussian<float, CPUContext>(
1196
                  t->size(), 0, 1, t->mutable_data<float>(), &ctx);
1197
            }
1198

1199
            {
1200
              auto* t = BlobGetMutableTensor(ws.CreateBlob("W"), CPU);
1201
              t->Resize(output_channels, 1, 3, 3);
1202
              CPUContext ctx;
1203
              math::RandGaussian<float, CPUContext>(
1204
                  t->size(), 0, 1, t->mutable_data<float>(), &ctx);
1205
            }
1206

1207
            {
1208
              auto* t = BlobGetMutableTensor(ws.CreateBlob("b"), CPU);
1209
              t->Resize(output_channels);
1210
              CPUContext ctx;
1211
              math::RandGaussian<float, CPUContext>(
1212
                  t->size(), 0, 1, t->mutable_data<float>(), &ctx);
1213
            }
1214

1215
            NetDef netdef;
1216
#define ADD_ARGS(op)                                      \
1217
  do {                                                    \
1218
    add_arg_str(op, "order", "NCHW");                     \
1219
    add_arg_int_list(                                     \
1220
        op,                                               \
1221
        std::vector<string>{"stride", "kernel", "group"}, \
1222
        std::vector<int>{1, 3, input_channels});          \
1223
  } while (false)
1224
            {
1225
              auto& op = *(netdef.add_op());
1226
              op.set_type("CopyToMPSCNN");
1227
              op.add_input("X_cpu");
1228
              op.add_output("X_mtl");
1229
            }
1230

1231
            {
1232
              auto& op = *(netdef.add_op());
1233
              op.set_type("MPSCNNConv");
1234
              op.add_input("X_mtl");
1235
              op.add_input("W");
1236
              op.add_input("b");
1237
              ADD_ARGS(op);
1238
              op.add_output("Y_mtl");
1239
            }
1240

1241
            {
1242
              auto& op = *(netdef.add_op());
1243
              op.set_type("CopyFromMPSCNN");
1244
              op.add_input("Y_mtl");
1245
              op.add_output("Y_cpu");
1246
            }
1247

1248
            {
1249
              auto& op = *(netdef.add_op());
1250
              op.set_type("Conv");
1251
              op.add_input("X_cpu");
1252
              op.add_input("W");
1253
              op.add_input("b");
1254
              ADD_ARGS(op);
1255
              op.add_output("Y_ref");
1256
            }
1257
#undef ADD_ARGS
1258
            ws.RunNetOnce(netdef);
1259
            const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
1260
            const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
1261

1262
            CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
1263
            for (auto i = 0; i < t1.size(); ++i) {
1264
              // FP16 <-> FP32 round trip, accumulation, etc.
1265
              const float t1_i = t1.data<float>()[i];
1266
              const float t2_i = t2.data<float>()[i];
1267
              TORCH_CHECK_NEAR(t1_i, t2_i, 0.3);
1268
            }
1269
          }
1270
        }
1271
      }
1272
    }
1273
  }
1274

1275
  {
1276
    LOG(INFO) << "MPSCNNConvRelu Test";
1277
    Workspace ws;
1278
    {
1279
      auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
1280
      t->Resize(1, 12, 57, 72);
1281
      CPUContext ctx;
1282
      math::RandGaussian<float, CPUContext>(
1283
          t->size(), 0, 1, t->mutable_data<float>(), &ctx);
1284
    }
1285

1286
    {
1287
      auto* t = BlobGetMutableTensor(ws.CreateBlob("W"), CPU);
1288
      t->Resize(8, 12, 3, 3);
1289
      CPUContext ctx;
1290
      math::RandGaussian<float, CPUContext>(
1291
          8 * 12 * 3 * 3, 0, 1, t->mutable_data<float>(), &ctx);
1292
    }
1293

1294
    {
1295
      auto* t = BlobGetMutableTensor(ws.CreateBlob("b"), CPU);
1296
      t->Resize(8);
1297
      CPUContext ctx;
1298
      math::RandGaussian<float, CPUContext>(
1299
          8, 0, 1, t->mutable_data<float>(), &ctx);
1300
    }
1301

1302
    NetDef netdef;
1303
    {
1304
      auto& op = *(netdef.add_op());
1305
      op.set_type("CopyToMPSCNN");
1306
      op.add_input("X_cpu");
1307
      op.add_output("X_mtl");
1308
    }
1309

1310
    {
1311
      auto& op = *(netdef.add_op());
1312
      op.set_type("MPSCNNConvRelu");
1313
      op.add_input("X_mtl");
1314
      op.add_input("W");
1315
      op.add_input("b");
1316
      {
1317
        auto& arg = *(op.add_arg());
1318
        arg.set_name("order");
1319
        arg.set_s("NCHW");
1320
      }
1321
      {
1322
        auto& arg = *(op.add_arg());
1323
        arg.set_name("kernel");
1324
        arg.set_i(3);
1325
      }
1326
      {
1327
        auto& arg = *(op.add_arg());
1328
        arg.set_name("pad");
1329
        arg.set_i(1);
1330
      }
1331
      op.add_output("Y_mtl");
1332
    }
1333

1334
    {
1335
      auto& op = *(netdef.add_op());
1336
      op.set_type("CopyFromMPSCNN");
1337
      op.add_input("Y_mtl");
1338
      op.add_output("Y_cpu");
1339
    }
1340

1341
    {
1342
      auto& op = *(netdef.add_op());
1343
      op.set_type("Conv");
1344
      op.add_input("X_cpu");
1345
      op.add_input("W");
1346
      op.add_input("b");
1347
      {
1348
        auto& arg = *(op.add_arg());
1349
        arg.set_name("order");
1350
        arg.set_s("NCHW");
1351
      }
1352
      {
1353
        auto& arg = *(op.add_arg());
1354
        arg.set_name("kernel");
1355
        arg.set_i(3);
1356
      }
1357
      {
1358
        auto& arg = *(op.add_arg());
1359
        arg.set_name("pad");
1360
        arg.set_i(1);
1361
      }
1362
      op.add_output("Y_ref");
1363
    }
1364

1365
    {
1366
      auto& op = *(netdef.add_op());
1367
      op.set_type("Relu");
1368
      op.add_input("Y_ref");
1369
      op.add_output("Y_ref");
1370
    }
1371

1372
    ws.RunNetOnce(netdef);
1373
    const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
1374
    const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
1375

1376
    CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
1377
    for (auto i = 0; i < t1.size(); ++i) {
1378
      // FP16 <-> FP32 round trip, accumulation, etc.
1379
      const float t1_i = t1.data<float>()[i];
1380
      const float t2_i = t2.data<float>()[i];
1381
      TORCH_CHECK_NEAR(t1_i, t2_i, 0.1);
1382
    }
1383
  }
1384

1385
  {
1386
    LOG(INFO) << "MPSConv Test";
1387
    Workspace ws;
1388
    {
1389
      auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
1390
      t->Resize(1, 12, 57, 72);
1391
      CPUContext ctx;
1392
      math::RandGaussian<float, CPUContext>(
1393
          t->size(), 0, 1, t->mutable_data<float>(), &ctx);
1394
    }
1395

1396
    {
1397
      auto* t = BlobGetMutableTensor(ws.CreateBlob("W"), CPU);
1398
      t->Resize(8, 12, 3, 3);
1399
      CPUContext ctx;
1400
      math::RandGaussian<float, CPUContext>(
1401
          8 * 12 * 3 * 3, 0, 1, t->mutable_data<float>(), &ctx);
1402
    }
1403

1404
    {
1405
      auto* t = BlobGetMutableTensor(ws.CreateBlob("b"), CPU);
1406
      t->Resize(8);
1407
      CPUContext ctx;
1408
      math::RandGaussian<float, CPUContext>(
1409
          8, 0, 1, t->mutable_data<float>(), &ctx);
1410
    }
1411

1412
    NetDef netdef;
1413
    {
1414
      auto& op = *(netdef.add_op());
1415
      op.set_type("CopyToMPSCNN");
1416
      op.add_input("X_cpu");
1417
      op.add_output("X_mtl");
1418
    }
1419

1420
    {
1421
      auto& op = *(netdef.add_op());
1422
      op.set_type("MPSCNNConv");
1423
      op.add_input("X_mtl");
1424
      op.add_input("W");
1425
      op.add_input("b");
1426
      {
1427
        auto& arg = *(op.add_arg());
1428
        arg.set_name("order");
1429
        arg.set_s("NCHW");
1430
      }
1431
      {
1432
        auto& arg = *(op.add_arg());
1433
        arg.set_name("kernel");
1434
        arg.set_i(3);
1435
      }
1436
      {
1437
        auto& arg = *(op.add_arg());
1438
        arg.set_name("pad");
1439
        arg.set_i(0);
1440
      }
1441
      op.add_output("Y_mtl");
1442
    }
1443

1444
    {
1445
      auto& op = *(netdef.add_op());
1446
      op.set_type("CopyFromMPSCNN");
1447
      op.add_input("Y_mtl");
1448
      op.add_output("Y_cpu");
1449
    }
1450

1451
    {
1452
      auto& op = *(netdef.add_op());
1453
      op.set_type("Conv");
1454
      op.add_input("X_cpu");
1455
      op.add_input("W");
1456
      op.add_input("b");
1457
      {
1458
        auto& arg = *(op.add_arg());
1459
        arg.set_name("order");
1460
        arg.set_s("NCHW");
1461
      }
1462
      {
1463
        auto& arg = *(op.add_arg());
1464
        arg.set_name("kernel");
1465
        arg.set_i(3);
1466
      }
1467
      {
1468
        auto& arg = *(op.add_arg());
1469
        arg.set_name("pad");
1470
        arg.set_i(0);
1471
      }
1472
      op.add_output("Y_ref");
1473
    }
1474

1475
    ws.RunNetOnce(netdef);
1476
    const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
1477
    const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
1478

1479
    CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
1480
    for (auto i = 0; i < t1.size(); ++i) {
1481
      // FP16 <-> FP32 round trip, accumulation, etc.
1482
      const float t1_i = t1.data<float>()[i];
1483
      const float t2_i = t2.data<float>()[i];
1484
      TORCH_CHECK_NEAR(t1_i, t2_i, 0.1);
1485
    }
1486
  }
1487

1488
  {
1489
    for (const auto& batchSize : {1, 2}) {
1490
      for (const auto& C : {1, 2}) {
1491
        for (const auto& M : {1, 2}) {
1492
          for (const auto& K : {3, 4}) {
1493
            for (const auto& P : {1, 2}) {
1494
              LOG(INFO) << "MPSConv Test";
1495
              Workspace ws;
1496
              {
1497
                auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
1498
                t->Resize(batchSize, C, 12, 16);
1499
                CPUContext ctx;
1500
                math::RandGaussian<float, CPUContext>(
1501
                    t->size(), 0, 1, t->mutable_data<float>(), &ctx);
1502
              }
1503

1504
              {
1505
                auto* t = BlobGetMutableTensor(ws.CreateBlob("W"), CPU);
1506
                t->Resize(M, C, K, K);
1507
                CPUContext ctx;
1508
                math::RandGaussian<float, CPUContext>(
1509
                    t->size(), 0, 1, t->mutable_data<float>(), &ctx);
1510
              }
1511

1512
              {
1513
                auto* t = BlobGetMutableTensor(ws.CreateBlob("b"), CPU);
1514
                t->Resize(M);
1515
                CPUContext ctx;
1516
                math::RandGaussian<float, CPUContext>(
1517
                    t->size(), 0, 1, t->mutable_data<float>(), &ctx);
1518
              }
1519

1520
              NetDef netdef;
1521
              {
1522
                auto& op = *(netdef.add_op());
1523
                op.set_type("CopyToMPSCNN");
1524
                op.add_input("X_cpu");
1525
                op.add_output("X_mtl");
1526
              }
1527

1528
              {
1529
                auto& op = *(netdef.add_op());
1530
                op.set_type("MPSCNNConv");
1531
                op.add_input("X_mtl");
1532
                op.add_input("W");
1533
                op.add_input("b");
1534
                {
1535
                  auto& arg = *(op.add_arg());
1536
                  arg.set_name("order");
1537
                  arg.set_s("NCHW");
1538
                }
1539
                {
1540
                  auto& arg = *(op.add_arg());
1541
                  arg.set_name("kernel");
1542
                  arg.set_i(K);
1543
                }
1544
                {
1545
                  auto& arg = *(op.add_arg());
1546
                  arg.set_name("pad");
1547
                  arg.set_i(P);
1548
                }
1549
                op.add_output("Y_mtl");
1550
              }
1551

1552
              {
1553
                auto& op = *(netdef.add_op());
1554
                op.set_type("CopyFromMPSCNN");
1555
                op.add_input("Y_mtl");
1556
                op.add_output("Y_cpu");
1557
              }
1558

1559
              {
1560
                auto& op = *(netdef.add_op());
1561
                op.set_type("Conv");
1562
                op.add_input("X_cpu");
1563
                op.add_input("W");
1564
                op.add_input("b");
1565
                {
1566
                  auto& arg = *(op.add_arg());
1567
                  arg.set_name("order");
1568
                  arg.set_s("NCHW");
1569
                }
1570
                {
1571
                  auto& arg = *(op.add_arg());
1572
                  arg.set_name("kernel");
1573
                  arg.set_i(K);
1574
                }
1575
                {
1576
                  auto& arg = *(op.add_arg());
1577
                  arg.set_name("pad");
1578
                  arg.set_i(P);
1579
                }
1580
                op.add_output("Y_ref");
1581
              }
1582

1583
              ws.RunNetOnce(netdef);
1584
              const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
1585
              const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
1586

1587
              CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
1588
              for (auto i = 0; i < t1.size(); ++i) {
1589
                // FP16 <-> FP32 round trip, accumulation, etc.
1590
                const float t1_i = t1.data<float>()[i];
1591
                const float t2_i = t2.data<float>()[i];
1592
                TORCH_CHECK_NEAR(t1_i, t2_i, 0.1);
1593
              }
1594
            }
1595
          }
1596
        }
1597
      }
1598
    }
1599
  }
1600

1601
  {
1602
    for (const auto& batchSize : {1, 2}) {
1603
      for (const auto& group : {1, 2}) {
1604
        for (const auto& C : {8, 16}) {
1605
          for (const auto& M : {8, 16}) {
1606
            for (const auto& K : {3, 4}) {
1607
              for (const auto& P : {1, 2}) {
1608
                LOG(INFO) << "MPSCNNConv Test - group";
1609
                Workspace ws;
1610
                {
1611
                  auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
1612
                  t->Resize(batchSize, C, 12, 16);
1613
                  CPUContext ctx;
1614
                  math::RandGaussian<float, CPUContext>(
1615
                      t->size(), 0, 1, t->mutable_data<float>(), &ctx);
1616
                }
1617

1618
                {
1619
                  auto* t = BlobGetMutableTensor(ws.CreateBlob("W"), CPU);
1620
                  t->Resize(M, C / group, K, K);
1621
                  CPUContext ctx;
1622
                  math::RandGaussian<float, CPUContext>(
1623
                      t->size(), 0, 1, t->mutable_data<float>(), &ctx);
1624
                }
1625

1626
                {
1627
                  auto* t = BlobGetMutableTensor(ws.CreateBlob("b"), CPU);
1628
                  t->Resize(M);
1629
                  CPUContext ctx;
1630
                  math::RandGaussian<float, CPUContext>(
1631
                      t->size(), 0, 1, t->mutable_data<float>(), &ctx);
1632
                }
1633

1634
                NetDef netdef;
1635
                {
1636
                  auto& op = *(netdef.add_op());
1637
                  op.set_type("CopyToMPSCNN");
1638
                  op.add_input("X_cpu");
1639
                  op.add_output("X_mtl");
1640
                }
1641

1642
                {
1643
                  auto& op = *(netdef.add_op());
1644
                  op.set_type("MPSCNNConv");
1645
                  op.add_input("X_mtl");
1646
                  op.add_input("W");
1647
                  op.add_input("b");
1648
                  {
1649
                    auto& arg = *(op.add_arg());
1650
                    arg.set_name("order");
1651
                    arg.set_s("NCHW");
1652
                  }
1653
                  {
1654
                    auto& arg = *(op.add_arg());
1655
                    arg.set_name("kernel");
1656
                    arg.set_i(K);
1657
                  }
1658
                  {
1659
                    auto& arg = *(op.add_arg());
1660
                    arg.set_name("pad");
1661
                    arg.set_i(P);
1662
                  }
1663
                  {
1664
                    auto& arg = *(op.add_arg());
1665
                    arg.set_name("group");
1666
                    arg.set_i(group);
1667
                  }
1668
                  op.add_output("Y_mtl");
1669
                }
1670

1671
                {
1672
                  auto& op = *(netdef.add_op());
1673
                  op.set_type("CopyFromMPSCNN");
1674
                  op.add_input("Y_mtl");
1675
                  op.add_output("Y_cpu");
1676
                }
1677

1678
                {
1679
                  auto& op = *(netdef.add_op());
1680
                  op.set_type("Conv");
1681
                  op.add_input("X_cpu");
1682
                  op.add_input("W");
1683
                  op.add_input("b");
1684
                  {
1685
                    auto& arg = *(op.add_arg());
1686
                    arg.set_name("order");
1687
                    arg.set_s("NCHW");
1688
                  }
1689
                  {
1690
                    auto& arg = *(op.add_arg());
1691
                    arg.set_name("kernel");
1692
                    arg.set_i(K);
1693
                  }
1694
                  {
1695
                    auto& arg = *(op.add_arg());
1696
                    arg.set_name("pad");
1697
                    arg.set_i(P);
1698
                  }
1699
                  {
1700
                    auto& arg = *(op.add_arg());
1701
                    arg.set_name("group");
1702
                    arg.set_i(group);
1703
                  }
1704
                  op.add_output("Y_ref");
1705
                }
1706

1707
                ws.RunNetOnce(netdef);
1708
                const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
1709
                const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
1710

1711
                CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
1712
                for (auto i = 0; i < t1.size(); ++i) {
1713
                  // FP16 <-> FP32 round trip, accumulation, etc.
1714
                  const float t1_i = t1.data<float>()[i];
1715
                  const float t2_i = t2.data<float>()[i];
1716
                  TORCH_CHECK_NEAR(t1_i, t2_i, 0.1);
1717
                }
1718
              }
1719
            }
1720
          }
1721
        }
1722
      }
1723
    }
1724
  }
1725

1726
  {
1727
    LOG(INFO) << "MPSCNNMul Test";
1728
    Workspace ws;
1729
    {
1730
      auto* t = BlobGetMutableTensor(ws.CreateBlob("X0_cpu"), CPU);
1731
      t->Resize(1, 12, 57, 72);
1732
      CPUContext ctx;
1733
      math::RandGaussian<float, CPUContext>(
1734
          t->size(), 0, 1, t->mutable_data<float>(), &ctx);
1735
    }
1736

1737
    {
1738
      auto* t = BlobGetMutableTensor(ws.CreateBlob("X1_cpu"), CPU);
1739
      t->Resize(72);
1740
      CPUContext ctx;
1741
      math::RandGaussian<float, CPUContext>(
1742
          t->size(), 0, 1, t->mutable_data<float>(), &ctx);
1743
    }
1744

1745
    NetDef netdef;
1746
    {
1747
      auto& op = *(netdef.add_op());
1748
      op.set_type("CopyToMPSCNN");
1749
      op.add_input("X0_cpu");
1750
      op.add_output("X0_mtl");
1751
    }
1752

1753
    {
1754
      auto& op = *(netdef.add_op());
1755
      op.set_type("MPSCNNMul");
1756
      op.add_input("X0_mtl");
1757
      op.add_input("X1_cpu");
1758
      op.add_output("Y_mtl");
1759
      add_arg_int(op, "broadcast", 1);
1760
    }
1761

1762
    {
1763
      auto& op = *(netdef.add_op());
1764
      op.set_type("CopyFromMPSCNN");
1765
      op.add_input("Y_mtl");
1766
      op.add_output("Y_cpu");
1767
    }
1768

1769
    {
1770
      auto& op = *(netdef.add_op());
1771
      op.set_type("Mul");
1772
      op.add_input("X0_cpu");
1773
      op.add_input("X1_cpu");
1774
      op.add_output("Y_ref");
1775
      add_arg_int(op, "broadcast", 1);
1776
    }
1777

1778
    ws.RunNetOnce(netdef);
1779
    const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
1780
    const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
1781

1782
    CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
1783
    for (auto i = 0; i < t1.size(); ++i) {
1784
      // FP16 <-> FP32 round trip, accumulation, etc.
1785
      const float t1_i = t1.data<float>()[i];
1786
      const float t2_i = t2.data<float>()[i];
1787
      TORCH_CHECK_NEAR(t1_i, t2_i, 0.02);
1788
    }
1789
  }
1790

1791
  {
1792
    LOG(INFO) << "MPSCNNSub Test";
1793
    Workspace ws;
1794
    {
1795
      auto* t = BlobGetMutableTensor(ws.CreateBlob("X0_cpu"), CPU);
1796
      t->Resize(1, 12, 57, 72);
1797
      CPUContext ctx;
1798
      math::RandGaussian<float, CPUContext>(
1799
          t->size(), 0, 1, t->mutable_data<float>(), &ctx);
1800
    }
1801

1802
    {
1803
      auto* t = BlobGetMutableTensor(ws.CreateBlob("X1_cpu"), CPU);
1804
      t->Resize(72);
1805
      CPUContext ctx;
1806
      math::RandGaussian<float, CPUContext>(
1807
          t->size(), 0, 1, t->mutable_data<float>(), &ctx);
1808
    }
1809

1810
    NetDef netdef;
1811
    {
1812
      auto& op = *(netdef.add_op());
1813
      op.set_type("CopyToMPSCNN");
1814
      op.add_input("X0_cpu");
1815
      op.add_output("X0_mtl");
1816
    }
1817

1818
    {
1819
      auto& op = *(netdef.add_op());
1820
      op.set_type("MPSCNNSub");
1821
      op.add_input("X0_mtl");
1822
      op.add_input("X1_cpu");
1823
      op.add_output("Y_mtl");
1824
      add_arg_int(op, "broadcast", 1);
1825
    }
1826

1827
    {
1828
      auto& op = *(netdef.add_op());
1829
      op.set_type("CopyFromMPSCNN");
1830
      op.add_input("Y_mtl");
1831
      op.add_output("Y_cpu");
1832
    }
1833

1834
    {
1835
      auto& op = *(netdef.add_op());
1836
      op.set_type("Sub");
1837
      op.add_input("X0_cpu");
1838
      op.add_input("X1_cpu");
1839
      op.add_output("Y_ref");
1840
      add_arg_int(op, "broadcast", 1);
1841
    }
1842

1843
    ws.RunNetOnce(netdef);
1844
    const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
1845
    const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
1846

1847
    CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
1848
    for (auto i = 0; i < t1.size(); ++i) {
1849
      // FP16 <-> FP32 round trip, accumulation, etc.
1850
      const float t1_i = t1.data<float>()[i];
1851
      const float t2_i = t2.data<float>()[i];
1852
      TORCH_CHECK_NEAR(t1_i, t2_i, 0.01);
1853
    }
1854
  }
1855

1856
  {
1857
    LOG(INFO) << "MPSAdd Test";
1858
    Workspace ws;
1859
    {
1860
      auto* t = BlobGetMutableTensor(ws.CreateBlob("X0_cpu"), CPU);
1861
      t->Resize(1, 12, 57, 72);
1862
      CPUContext ctx;
1863
      math::RandGaussian<float, CPUContext>(
1864
          t->size(), 0, 1, t->mutable_data<float>(), &ctx);
1865
    }
1866

1867
    {
1868
      auto* t = BlobGetMutableTensor(ws.CreateBlob("X1_cpu"), CPU);
1869
      t->Resize(1, 12, 57, 72);
1870
      CPUContext ctx;
1871
      math::RandGaussian<float, CPUContext>(
1872
          t->size(), 0, 1, t->mutable_data<float>(), &ctx);
1873
    }
1874

1875
    NetDef netdef;
1876
    {
1877
      auto& op = *(netdef.add_op());
1878
      op.set_type("CopyToMPSCNN");
1879
      op.add_input("X0_cpu");
1880
      op.add_output("X0_mtl");
1881
      op.add_input("X1_cpu");
1882
      op.add_output("X1_mtl");
1883
    }
1884

1885
    {
1886
      auto& op = *(netdef.add_op());
1887
      op.set_type("MPSCNNAdd");
1888
      op.add_input("X0_mtl");
1889
      op.add_input("X1_mtl");
1890
      op.add_output("Y_mtl");
1891
    }
1892

1893
    {
1894
      auto& op = *(netdef.add_op());
1895
      op.set_type("CopyFromMPSCNN");
1896
      op.add_input("Y_mtl");
1897
      op.add_output("Y_cpu");
1898
    }
1899

1900
    {
1901
      auto& op = *(netdef.add_op());
1902
      op.set_type("Add");
1903
      op.add_input("X0_cpu");
1904
      op.add_input("X1_cpu");
1905
      op.add_output("Y_ref");
1906
    }
1907

1908
    ws.RunNetOnce(netdef);
1909
    const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
1910
    const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
1911

1912
    CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
1913
    for (auto i = 0; i < t1.size(); ++i) {
1914
      // FP16 <-> FP32 round trip, accumulation, etc.
1915
      const float t1_i = t1.data<float>()[i];
1916
      const float t2_i = t2.data<float>()[i];
1917
      TORCH_CHECK_NEAR(t1_i, t2_i, 0.01);
1918
    }
1919
  }
1920

1921
  {
1922
    LOG(INFO) << "MPSAdd Test";
1923
    Workspace ws;
1924
    {
1925
      auto* t = BlobGetMutableTensor(ws.CreateBlob("X0_cpu"), CPU);
1926
      t->Resize(1, 12, 57, 72);
1927
      CPUContext ctx;
1928
      math::RandGaussian<float, CPUContext>(
1929
          t->size(), 0, 1, t->mutable_data<float>(), &ctx);
1930
    }
1931

1932
    {
1933
      auto* t = BlobGetMutableTensor(ws.CreateBlob("X1_cpu"), CPU);
1934
      t->Resize(1, 12, 57, 72);
1935
      CPUContext ctx;
1936
      math::RandGaussian<float, CPUContext>(
1937
          t->size(), 0, 1, t->mutable_data<float>(), &ctx);
1938
    }
1939

1940
    NetDef netdef;
1941
    {
1942
      auto& op = *(netdef.add_op());
1943
      op.set_type("CopyToMPSCNN");
1944
      op.add_input("X0_cpu");
1945
      op.add_output("X0_mtl");
1946
      op.add_input("X1_cpu");
1947
      op.add_output("X1_mtl");
1948

1949
      // First input is read twice.
1950
      {
1951
        auto& arg = *(op.add_arg());
1952
        arg.set_name("__mpscnn_read_count__");
1953
        arg.add_ints(2);
1954
        arg.add_ints(1);
1955
      }
1956
    }
1957

1958
    {
1959
      auto& op = *(netdef.add_op());
1960
      op.set_type("MPSCNNAdd");
1961
      op.add_input("X0_mtl");
1962
      op.add_input("X1_mtl");
1963
      op.add_output("X2_mtl");
1964
    }
1965

1966
    {
1967
      auto& op = *(netdef.add_op());
1968
      op.set_type("MPSCNNAdd");
1969
      op.add_input("X0_mtl");
1970
      op.add_input("X2_mtl");
1971
      op.add_output("Y_mtl");
1972
    }
1973

1974
    {
1975
      auto& op = *(netdef.add_op());
1976
      op.set_type("CopyFromMPSCNN");
1977
      op.add_input("Y_mtl");
1978
      op.add_output("Y_cpu");
1979
    }
1980

1981
    {
1982
      auto& op = *(netdef.add_op());
1983
      op.set_type("Add");
1984
      op.add_input("X0_cpu");
1985
      op.add_input("X1_cpu");
1986
      op.add_output("X2_cpu");
1987
    }
1988

1989
    {
1990
      auto& op = *(netdef.add_op());
1991
      op.set_type("Add");
1992
      op.add_input("X0_cpu");
1993
      op.add_input("X2_cpu");
1994
      op.add_output("Y_ref");
1995
    }
1996

1997
    ws.RunNetOnce(netdef);
1998
    const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
1999
    const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
2000

2001
    CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
2002
    for (auto i = 0; i < t1.size(); ++i) {
2003
      // FP16 <-> FP32 round trip, accumulation, etc.
2004
      const float t1_i = t1.data<float>()[i];
2005
      const float t2_i = t2.data<float>()[i];
2006
      TORCH_CHECK_NEAR(t1_i, t2_i, 0.05);
2007
    }
2008
  }
2009

2010
  {
2011
    for (const auto& n : {"Relu", "Tanh", "Sigmoid"}) {
2012
      LOG(INFO) << "MPSCNNNeuron Test: " << n;
2013
      Workspace ws;
2014
      {
2015
        auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
2016
        t->Resize(1, 4, 12, 12);
2017
        CPUContext ctx;
2018
        math::RandGaussian<float, CPUContext>(
2019
            t->size(), 0, 1, t->mutable_data<float>(), &ctx);
2020
      }
2021

2022
      NetDef netdef;
2023
      {
2024
        auto& op = *(netdef.add_op());
2025
        op.set_type("CopyToMPSCNN");
2026
        op.add_input("X_cpu");
2027
        op.add_output("X_mtl");
2028
      }
2029

2030
      {
2031
        auto& op = *(netdef.add_op());
2032
        op.set_type(std::string("MPSCNN") + n);
2033
        op.add_input("X_mtl");
2034
        op.add_output("Y_mtl");
2035
      }
2036

2037
      {
2038
        auto& op = *(netdef.add_op());
2039
        op.set_type("CopyFromMPSCNN");
2040
        op.add_input("Y_mtl");
2041
        op.add_output("Y_cpu");
2042
      }
2043

2044
      {
2045
        auto& op = *(netdef.add_op());
2046
        op.set_type(n);
2047
        op.add_input("X_cpu");
2048
        op.add_output("Y_ref");
2049
      }
2050

2051
      ws.RunNetOnce(netdef);
2052
      const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
2053
      const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
2054

2055
      CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
2056
      for (auto i = 0; i < t1.size(); ++i) {
2057
        // FP16 <-> FP32 round trip, accumulation, etc.
2058
        const float t1_i = t1.data<float>()[i];
2059
        const float t2_i = t2.data<float>()[i];
2060
        TORCH_CHECK_NEAR(t1_i, t2_i, 0.02);
2061
      }
2062
    }
2063
  }
2064

2065
  {
2066
    LOG(INFO) << "MPSCNNDropout Test";
2067
    Workspace ws;
2068
    {
2069
      auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
2070
      t->Resize(1, 12, 57, 72);
2071
      CPUContext ctx;
2072
      math::RandGaussian<float, CPUContext>(
2073
          t->size(), 0, 1, t->mutable_data<float>(), &ctx);
2074
    }
2075

2076
    NetDef netdef;
2077
    {
2078
      auto& op = *(netdef.add_op());
2079
      op.set_type("CopyToMPSCNN");
2080
      op.add_input("X_cpu");
2081
      op.add_output("X_mtl");
2082
    }
2083

2084
    {
2085
      auto& op = *(netdef.add_op());
2086
      op.set_type("MPSCNNDropout");
2087
      op.add_input("X_mtl");
2088
      {
2089
        auto& arg = *(op.add_arg());
2090
        arg.set_name(OpSchema::Arg_IsTest);
2091
        arg.set_i(1);
2092
      }
2093
      op.add_output("Y_mtl");
2094
      op.add_output("Y_mask_mtl");
2095
    }
2096

2097
    {
2098
      auto& op = *(netdef.add_op());
2099
      op.set_type("CopyFromMPSCNN");
2100
      op.add_input("Y_mtl");
2101
      op.add_output("Y_cpu");
2102
    }
2103

2104
    {
2105
      auto& op = *(netdef.add_op());
2106
      op.set_type("Dropout");
2107
      op.add_input("X_cpu");
2108
      {
2109
        auto& arg = *(op.add_arg());
2110
        arg.set_name(OpSchema::Arg_IsTest);
2111
        arg.set_i(1);
2112
      }
2113
      op.add_output("Y_ref");
2114
      op.add_output("Y_mask");
2115
    }
2116

2117
    ws.RunNetOnce(netdef);
2118
    const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
2119
    const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
2120
    CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
2121
    LOG(INFO) << t1.sizes();
2122
    for (auto i = 0; i < t1.size(); ++i) {
2123
      // FP16 <-> FP32 round trip, accumulation, etc.
2124
      const float t1_i = t1.data<float>()[i];
2125
      const float t2_i = t2.data<float>()[i];
2126
      TORCH_CHECK_NEAR(t1_i, t2_i, 0.1);
2127
    }
2128
  }
2129

2130
  {
2131
    for (const auto scale : std::vector<float>{1.0, 2.0, 0.0625}) {
2132
      for (const auto channels : std::vector<size_t>{1, 3, 5, 8}) {
2133
        for (const auto pool : std::vector<size_t>{1, 3, 7}) {
2134
          for (const auto sampling_ratio : std::vector<size_t>{0, 1, 2, 3}) {
2135
            LOG(INFO) << "MPSCNNRoIWarp Test - sampling_ratio:"
2136
                      << sampling_ratio << "- pool: " << pool
2137
                      << " - scale: " << scale;
2138
            Workspace ws;
2139
            {
2140
              auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
2141
              t->Resize(1, channels, 40, 40);
2142
              CPUContext ctx;
2143
              math::RandGaussian<float, CPUContext>(
2144
                  t->size(), 4, 2, t->mutable_data<float>(), &ctx);
2145
            }
2146
            {
2147
              // Use the batch-first encoding (n, [bbox])
2148
              auto* t = BlobGetMutableTensor(ws.CreateBlob("R"), CPU);
2149
              t->Resize(6, 5);
2150
              for (auto i = 0; i < t->dim32(0); ++i) {
2151
                t->mutable_data<float>()[5 * i + 0] = 0; // batch
2152
                t->mutable_data<float>()[5 * i + 1] = (i % 4 + 1) * 1.0 / scale;
2153
                t->mutable_data<float>()[5 * i + 2] = (i % 5 + 1) * 1.0 / scale;
2154
                t->mutable_data<float>()[5 * i + 3] = (i % 3 + 7) * 1.0 / scale;
2155
                t->mutable_data<float>()[5 * i + 4] = (i % 4 + 7) * 1.0 / scale;
2156
              }
2157
            }
2158

2159
            NetDef netdef;
2160
            {
2161
              auto& op = *(netdef.add_op());
2162
              op.set_type("CopyToMPSCNN");
2163
              op.add_input("X_cpu");
2164
              op.add_output("X_mtl");
2165
            }
2166

2167
            {
2168
              auto& op = *(netdef.add_op());
2169
              op.set_type("MPSCNNRoIWarp");
2170
              op.add_input("X_mtl");
2171
              op.add_input("R");
2172
              {
2173
                auto& arg = *(op.add_arg());
2174
                arg.set_name("sampling_ratio");
2175
                arg.set_i(sampling_ratio);
2176
              }
2177
              {
2178
                auto& arg = *(op.add_arg());
2179
                arg.set_name("pooled_h");
2180
                arg.set_i(pool);
2181
              }
2182
              {
2183
                auto& arg = *(op.add_arg());
2184
                arg.set_name("pooled_w");
2185
                arg.set_i(pool);
2186
              }
2187
              {
2188
                auto& arg = *(op.add_arg());
2189
                arg.set_name("spatial_scale");
2190
                arg.set_f(scale);
2191
              }
2192
              op.add_output("Y_mtl");
2193
            }
2194

2195
            {
2196
              auto& op = *(netdef.add_op());
2197
              op.set_type("CopyFromMPSCNN");
2198
              op.add_input("Y_mtl");
2199
              op.add_output("Y_cpu");
2200
            }
2201

2202
            {
2203
              auto& op = *(netdef.add_op());
2204
              op.set_type("RoIWarp");
2205
              op.add_input("X_cpu");
2206
              op.add_input("R");
2207
              {
2208
                auto& arg = *(op.add_arg());
2209
                arg.set_name("sampling_ratio");
2210
                arg.set_i(sampling_ratio);
2211
              }
2212
              {
2213
                auto& arg = *(op.add_arg());
2214
                arg.set_name("pooled_h");
2215
                arg.set_i(pool);
2216
              }
2217
              {
2218
                auto& arg = *(op.add_arg());
2219
                arg.set_name("pooled_w");
2220
                arg.set_i(pool);
2221
              }
2222
              {
2223
                auto& arg = *(op.add_arg());
2224
                arg.set_name("spatial_scale");
2225
                arg.set_f(scale);
2226
              }
2227
              op.add_output("Y_ref");
2228
            }
2229

2230
            ws.RunNetOnce(netdef);
2231
            const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
2232
            const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
2233

2234
            CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
2235
            LOG(INFO) << t1.sizes();
2236
            for (auto i = 0; i < t1.size(); ++i) {
2237
              // FP16 <-> FP32 round trip, accumulation, etc.
2238
              const float t1_i = t1.data<float>()[i];
2239
              const float t2_i = t2.data<float>()[i];
2240
              TORCH_CHECK_NEAR(t1_i, t2_i, 0.1);
2241
            }
2242
          }
2243
        }
2244
      }
2245
    }
2246
  }
2247

2248
  {
2249
    for (const auto scale : std::vector<float>{1.0, 2.0, 0.0625}) {
2250
      for (const auto pool : std::vector<size_t>{1, 3, 7}) {
2251
        LOG(INFO) << "MPSCNNRoIWarp Test 2";
2252
        Workspace ws;
2253
        {
2254
          auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
2255
          t->Resize(1, 8, 40, 40);
2256
          CPUContext ctx;
2257
          math::RandGaussian<float, CPUContext>(
2258
              t->size(), 4, 2, t->mutable_data<float>(), &ctx);
2259
        }
2260
        {
2261
          auto* t = BlobGetMutableTensor(ws.CreateBlob("R"), CPU);
2262
          t->Resize(6, 4);
2263
          for (auto i = 0; i < t->dim32(0); ++i) {
2264
            t->mutable_data<float>()[4 * i + 0] = (i % 4 + 1) * 1.0 / scale;
2265
            t->mutable_data<float>()[4 * i + 1] = (i % 5 + 1) * 1.0 / scale;
2266
            t->mutable_data<float>()[4 * i + 2] = (i % 3 + 7) * 1.0 / scale;
2267
            t->mutable_data<float>()[4 * i + 3] = (i % 4 + 7) * 1.0 / scale;
2268
          }
2269
        }
2270

2271
        NetDef netdef;
2272
        {
2273
          auto& op = *(netdef.add_op());
2274
          op.set_type("CopyToMPSCNN");
2275
          op.add_input("X_cpu");
2276
          op.add_output("X_mtl");
2277
        }
2278

2279
        {
2280
          auto& op = *(netdef.add_op());
2281
          op.set_type("MPSCNNRoIWarp");
2282
          op.add_input("X_mtl");
2283
          op.add_input("R");
2284
          {
2285
            auto& arg = *(op.add_arg());
2286
            arg.set_name("sampling_ratio");
2287
            arg.set_i(1);
2288
          }
2289
          {
2290
            auto& arg = *(op.add_arg());
2291
            arg.set_name("pooled_h");
2292
            arg.set_i(pool);
2293
          }
2294
          {
2295
            auto& arg = *(op.add_arg());
2296
            arg.set_name("pooled_w");
2297
            arg.set_i(pool);
2298
          }
2299
          {
2300
            auto& arg = *(op.add_arg());
2301
            arg.set_name("spatial_scale");
2302
            arg.set_f(scale);
2303
          }
2304
          op.add_output("Y_mtl");
2305
        }
2306

2307
        {
2308
          auto& op = *(netdef.add_op());
2309
          op.set_type("CopyFromMPSCNN");
2310
          op.add_input("Y_mtl");
2311
          op.add_output("Y_cpu");
2312
        }
2313

2314
        {
2315
          auto& op = *(netdef.add_op());
2316
          op.set_type("RoIWarp");
2317
          op.add_input("X_cpu");
2318
          op.add_input("R");
2319
          {
2320
            auto& arg = *(op.add_arg());
2321
            arg.set_name("sampling_ratio");
2322
            arg.set_i(1);
2323
          }
2324
          {
2325
            auto& arg = *(op.add_arg());
2326
            arg.set_name("pooled_h");
2327
            arg.set_i(pool);
2328
          }
2329
          {
2330
            auto& arg = *(op.add_arg());
2331
            arg.set_name("pooled_w");
2332
            arg.set_i(pool);
2333
          }
2334
          {
2335
            auto& arg = *(op.add_arg());
2336
            arg.set_name("spatial_scale");
2337
            arg.set_f(scale);
2338
          }
2339
          op.add_output("Y_ref");
2340
        }
2341

2342
        ws.RunNetOnce(netdef);
2343
        const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
2344
        const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
2345

2346
        CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
2347
        LOG(INFO) << t1.sizes();
2348
        for (auto i = 0; i < t1.size(); ++i) {
2349
          // FP16 <-> FP32 round trip, accumulation, etc.
2350
          const float t1_i = t1.data<float>()[i];
2351
          const float t2_i = t2.data<float>()[i];
2352
          TORCH_CHECK_NEAR(t1_i, t2_i, 0.1);
2353
        }
2354
      }
2355
    }
2356
  }
2357

2358
  {
2359
    for (const auto height_scale : std::vector<float>{1.0, 0.5, 1.7}) {
2360
      for (const auto width_scale : std::vector<float>{1.0, 0.5, 2.3}) {
2361
        for (const auto C : std::vector<float>{2, 7, 11}) {
2362
          for (const auto N : std::vector<float>{1, 2}) {
2363
            LOG(INFO) << "MPSCNNResizeNearestOp Test";
2364
            Workspace ws;
2365
            {
2366
              auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
2367
              t->Resize(N, C, 37, 89);
2368
              CPUContext ctx;
2369
              math::RandGaussian<float, CPUContext>(
2370
                  t->size(), 4, 2, t->mutable_data<float>(), &ctx);
2371
            }
2372
            NetDef netdef;
2373
            {
2374
              auto& op = *(netdef.add_op());
2375
              op.set_type("CopyToMPSCNN");
2376
              op.add_input("X_cpu");
2377
              op.add_output("X_mtl");
2378
            }
2379

2380
            {
2381
              auto& op = *(netdef.add_op());
2382
              op.set_type("MPSCNNResizeNearest");
2383
              op.add_input("X_mtl");
2384
              {
2385
                auto& arg = *(op.add_arg());
2386
                arg.set_name("height_scale");
2387
                arg.set_f(height_scale);
2388
              }
2389
              {
2390
                auto& arg = *(op.add_arg());
2391
                arg.set_name("width_scale");
2392
                arg.set_f(width_scale);
2393
              }
2394
              op.add_output("Y_mtl");
2395
            }
2396

2397
            {
2398
              auto& op = *(netdef.add_op());
2399
              op.set_type("CopyFromMPSCNN");
2400
              op.add_input("Y_mtl");
2401
              op.add_output("Y_cpu");
2402
            }
2403

2404
            {
2405
              auto& op = *(netdef.add_op());
2406
              op.set_type("ResizeNearest");
2407
              op.add_input("X_cpu");
2408
              {
2409
                auto& arg = *(op.add_arg());
2410
                arg.set_name("height_scale");
2411
                arg.set_f(height_scale);
2412
              }
2413
              {
2414
                auto& arg = *(op.add_arg());
2415
                arg.set_name("width_scale");
2416
                arg.set_f(width_scale);
2417
              }
2418
              op.add_output("Y_ref");
2419
            }
2420

2421
            ws.RunNetOnce(netdef);
2422
            const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
2423
            const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
2424

2425
            CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
2426
            LOG(INFO) << t1.sizes();
2427
            for (auto i = 0; i < t1.size(); ++i) {
2428
              // FP16 <-> FP32 round trip, accumulation, etc.
2429
              const float t1_i = t1.data<float>()[i];
2430
              const float t2_i = t2.data<float>()[i];
2431
              TORCH_CHECK_NEAR(t1_i, t2_i, 0.1);
2432
            }
2433
          }
2434
        }
2435
      }
2436
    }
2437
  }
2438

2439
  {
2440
    LOG(INFO) << "MPSCNNGenerateProposals Test: \n";
2441
    Workspace ws;
2442
    auto num_images = 1;
2443
    auto A = 2; // # anchors
2444
    auto H = 4; // height
2445
    auto W = 5; // width
2446
    vector<float> scores{
2447
        5.44218998e-03, 1.19207997e-03, 1.12379994e-03, 1.17181998e-03,
2448
        1.20544003e-03, 6.17993006e-04, 1.05261997e-05, 8.91025957e-06,
2449
        9.29536981e-09, 6.09605013e-05, 4.72735002e-04, 1.13482002e-10,
2450
        1.50015003e-05, 4.45032993e-06, 3.21612994e-08, 8.02662980e-04,
2451
        1.40488002e-04, 3.12508007e-07, 3.02616991e-06, 1.97759000e-08,
2452
        2.66913995e-02, 5.26766013e-03, 5.05053019e-03, 5.62100019e-03,
2453
        5.37420018e-03, 5.26280981e-03, 2.48894998e-04, 1.06842002e-04,
2454
        3.92931997e-06, 1.79388002e-03, 4.79440019e-03, 3.41609990e-07,
2455
        5.20430971e-04, 3.34090000e-05, 2.19159006e-07, 2.28786003e-03,
2456
        5.16703985e-05, 4.04523007e-06, 1.79227004e-06, 5.32449000e-08};
2457
    vector<float> bbx{
2458
        -1.65040009e-02, -1.84051003e-02, -1.85930002e-02, -2.08263006e-02,
2459
        -1.83814000e-02, -2.89172009e-02, -3.89706008e-02, -7.52277970e-02,
2460
        -1.54091999e-01, -2.55433004e-02, -1.77490003e-02, -1.10340998e-01,
2461
        -4.20190990e-02, -2.71421000e-02, 6.89801015e-03,  5.71171008e-02,
2462
        -1.75665006e-01, 2.30021998e-02,  3.08554992e-02,  -1.39333997e-02,
2463
        3.40579003e-01,  3.91070992e-01,  3.91624004e-01,  3.92527014e-01,
2464
        3.91445011e-01,  3.79328012e-01,  4.26631987e-01,  3.64892989e-01,
2465
        2.76894987e-01,  5.13985991e-01,  3.79999995e-01,  1.80457994e-01,
2466
        4.37402993e-01,  4.18545991e-01,  2.51549989e-01,  4.48318988e-01,
2467
        1.68564007e-01,  4.65440989e-01,  4.21891987e-01,  4.45928007e-01,
2468
        3.27155995e-03,  3.71480011e-03,  3.60032008e-03,  4.27092984e-03,
2469
        3.74579988e-03,  5.95752988e-03,  -3.14473989e-03, 3.52022005e-03,
2470
        -1.88564006e-02, 1.65188999e-03,  1.73791999e-03,  -3.56074013e-02,
2471
        -1.66615995e-04, 3.14146001e-03,  -1.11830998e-02, -5.35363983e-03,
2472
        6.49790000e-03,  -9.27671045e-03, -2.83346009e-02, -1.61233004e-02,
2473
        -2.15505004e-01, -2.19910994e-01, -2.20872998e-01, -2.12831005e-01,
2474
        -2.19145000e-01, -2.27687001e-01, -3.43973994e-01, -2.75869995e-01,
2475
        -3.19516987e-01, -2.50418007e-01, -2.48537004e-01, -5.08224010e-01,
2476
        -2.28724003e-01, -2.82402009e-01, -3.75815988e-01, -2.86352992e-01,
2477
        -5.28333001e-02, -4.43836004e-01, -4.55134988e-01, -4.34897989e-01,
2478
        -5.65053988e-03, -9.25739005e-04, -1.06790999e-03, -2.37016007e-03,
2479
        -9.71166010e-04, -8.90910998e-03, -1.17592998e-02, -2.08992008e-02,
2480
        -4.94231991e-02, 6.63906988e-03,  3.20469006e-03,  -6.44695014e-02,
2481
        -3.11607006e-03, 2.02738005e-03,  1.48096997e-02,  4.39785011e-02,
2482
        -8.28424022e-02, 3.62076014e-02,  2.71668993e-02,  1.38250999e-02,
2483
        6.76669031e-02,  1.03252999e-01,  1.03255004e-01,  9.89722982e-02,
2484
        1.03646003e-01,  4.79663983e-02,  1.11014001e-01,  9.31736007e-02,
2485
        1.15768999e-01,  1.04014002e-01,  -8.90677981e-03, 1.13103002e-01,
2486
        1.33085996e-01,  1.25405997e-01,  1.50051996e-01,  -1.13038003e-01,
2487
        7.01059997e-02,  1.79651007e-01,  1.41055003e-01,  1.62841007e-01,
2488
        -1.00247003e-02, -8.17587040e-03, -8.32176022e-03, -8.90108012e-03,
2489
        -8.13035015e-03, -1.77263003e-02, -3.69572006e-02, -3.51580009e-02,
2490
        -5.92143014e-02, -1.80795006e-02, -5.46086021e-03, -4.10550982e-02,
2491
        -1.83081999e-02, -2.15411000e-02, -1.17953997e-02, 3.33894007e-02,
2492
        -5.29635996e-02, -6.97528012e-03, -3.15250992e-03, -3.27355005e-02,
2493
        1.29676998e-01,  1.16080999e-01,  1.15947001e-01,  1.21797003e-01,
2494
        1.16089001e-01,  1.44875005e-01,  1.15617000e-01,  1.31586999e-01,
2495
        1.74735002e-02,  1.21973999e-01,  1.31596997e-01,  2.48907991e-02,
2496
        6.18605018e-02,  1.12855002e-01,  -6.99798986e-02, 9.58312973e-02,
2497
        1.53593004e-01,  -8.75087008e-02, -4.92327996e-02, -3.32239009e-02};
2498
    vector<float> im_info{60, 80, 0.166667};
2499
    vector<float> anchors{-38, -16, 53, 31, -120, -120, 135, 135};
2500
    {
2501
      auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
2502
      t->Resize(num_images, A, H, W);
2503
      for (auto i = 0; i < t->size(); ++i) {
2504
        t->mutable_data<float>()[i] = scores[i];
2505
      }
2506
    }
2507

2508
    {
2509
      auto* t = BlobGetMutableTensor(ws.CreateBlob("bbox_delta_cpu"), CPU);
2510
      t->Resize(num_images, 4 * A, H, W);
2511
      for (auto i = 0; i < t->size(); ++i) {
2512
        t->mutable_data<float>()[i] = bbx[i];
2513
      }
2514
    }
2515

2516
    {
2517
      auto* t = BlobGetMutableTensor(ws.CreateBlob("im_info"), CPU);
2518
      t->Resize(num_images, 3);
2519
      for (auto i = 0; i < t->size(); ++i) {
2520
        t->mutable_data<float>()[i] = im_info[i];
2521
      }
2522
    }
2523

2524
    {
2525
      auto* t = BlobGetMutableTensor(ws.CreateBlob("anchors"), CPU);
2526
      t->Resize(A, 4);
2527
      for (auto i = 0; i < t->size(); ++i) {
2528
        t->mutable_data<float>()[i] = anchors[i];
2529
      }
2530
    }
2531

2532
    NetDef netdef;
2533

2534
    {
2535
      auto& op = *(netdef.add_op());
2536
      op.set_type("MPSCNNGenerateProposalsCPP");
2537
      op.add_input("X_cpu");
2538
      op.add_input("bbox_delta_cpu");
2539
      op.add_input("im_info");
2540
      op.add_input("anchors");
2541
      op.add_output("rois");
2542
      op.add_output("rois_probs");
2543
    }
2544

2545
    {
2546
      auto& op = *(netdef.add_op());
2547
      op.set_type("GenerateProposalsCPP");
2548
      op.add_input("X_cpu");
2549
      op.add_input("bbox_delta_cpu");
2550
      op.add_input("im_info");
2551
      op.add_input("anchors");
2552
      op.add_output("rois_ref");
2553
      op.add_output("rois_probs_ref");
2554
    }
2555

2556
    ws.RunNetOnce(netdef);
2557
    const auto& t2 = ws.GetBlob("rois")->Get<TensorCPU>();
2558
    const auto& t1 = ws.GetBlob("rois_ref")->Get<TensorCPU>();
2559

2560
    const auto& t4 = ws.GetBlob("rois_probs")->Get<TensorCPU>();
2561
    const auto& t3 = ws.GetBlob("rois_probs_ref")->Get<TensorCPU>();
2562

2563
    LOG(INFO) << "t1: " << t1.size() << " t2: " << t2.size();
2564

2565
    const float HALF_MIN_VAL = 6.103515625e-05;
2566
    for (auto i = 0; i < fmin(t1.size(), t2.size()); ++i) {
2567
      // FP16 <-> FP32 round trip, accumulation, etc.
2568
      const float t1_i = t1.data<float>()[i];
2569
      const float t2_i = t2.data<float>()[i];
2570
      const float t3_i = t3.data<float>()[i / 5];
2571
      if (t3_i - HALF_MIN_VAL * 2 > 0) {
2572
        LOG(INFO) << i << " " << t1_i << " " << t2_i << " " << t3_i;
2573
        TORCH_CHECK_NEAR(t1_i, t2_i, 0.1);
2574
      }
2575
    }
2576

2577
    for (auto i = 0; i < fmin(t3.size(), t4.size()); ++i) {
2578
      // FP16 <-> FP32 round trip, accumulation, etc.
2579
      const float t3_i = t3.data<float>()[i];
2580
      const float t4_i = t4.data<float>()[i];
2581
      LOG(INFO) << i << " " << t3_i;
2582
      TORCH_CHECK_NEAR(t3_i, t4_i, 0.1);
2583
    }
2584
  }
2585

2586
  {
2587
    for (const auto& batchSize : std::vector<size_t>{1, 2}) {
2588
      LOG(INFO) << "MPSCNNSoftmax Test";
2589
      Workspace ws;
2590
      {
2591
        auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
2592
        // Only works for spatial dimension of (1, 1) - weird.
2593
        t->Resize(batchSize, 12, 1, 1);
2594
        CPUContext ctx;
2595
        math::RandGaussian<float, CPUContext>(
2596
            t->size(), 0, 1, t->mutable_data<float>(), &ctx);
2597
      }
2598

2599
      NetDef netdef;
2600
      {
2601
        auto& op = *(netdef.add_op());
2602
        op.set_type("CopyToMPSCNN");
2603
        op.add_input("X_cpu");
2604
        op.add_output("X_mtl");
2605
      }
2606

2607
      {
2608
        auto& op = *(netdef.add_op());
2609
        op.set_type("MPSCNNSoftmax");
2610
        op.add_input("X_mtl");
2611
        op.add_output("Y_mtl");
2612
      }
2613

2614
      {
2615
        auto& op = *(netdef.add_op());
2616
        op.set_type("CopyFromMPSCNN");
2617
        op.add_input("Y_mtl");
2618
        op.add_output("Y_cpu");
2619
      }
2620

2621
      {
2622
        auto& op = *(netdef.add_op());
2623
        op.set_type("Softmax");
2624
        op.add_input("X_cpu");
2625
        op.add_output("Y_ref");
2626
      }
2627

2628
      ws.RunNetOnce(netdef);
2629
      const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
2630
      const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
2631
      CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
2632
      LOG(INFO) << t1.sizes();
2633
      for (auto i = 0; i < t1.size(); ++i) {
2634
        // FP16 <-> FP32 round trip, accumulation, etc.
2635
        const float t1_i = t1.data<float>()[i];
2636
        const float t2_i = t2.data<float>()[i];
2637
        TORCH_CHECK_NEAR(t1_i, t2_i, 0.1);
2638
      }
2639
    }
2640
  }
2641

2642
  @autoreleasepool {
2643
    for (const auto& inputChannels : std::vector<size_t>{3, 8}) {
2644
      for (const auto& outputChannels : std::vector<size_t>{3, 8}) {
2645
        for (const auto& batchSize : std::vector<size_t>{1, 2}) {
2646
          for (const auto& stride_h : std::vector<int>{1, 2, 3}) {
2647
            for (const auto& stride_w : std::vector<int>{1, 2, 3}) {
2648
              for (const auto& kernel_h : std::vector<int>{3}) {
2649
                for (const auto& kernel_w : std::vector<int>{3}) {
2650
                  for (const auto& pad_l : std::vector<int>{0, kernel_w / 2}) {
2651
                    for (const auto& pad_r :
2652
                         std::vector<int>{0, kernel_w / 2}) {
2653
                      for (const auto& pad_t :
2654
                           std::vector<int>{0, kernel_h / 2}) {
2655
                        for (const auto& pad_b :
2656
                             std::vector<int>{0, kernel_h / 2}) {
2657
                          for (const auto& adj : {0, 1, 2, 3}) {
2658
                            if (adj >= fmin(stride_h, stride_w)) {
2659
                              continue;
2660
                            }
2661

2662
                            LOG(INFO) << "MPSConvTranspose Test";
2663
                            Workspace ws;
2664
                            {
2665
                              auto* t = BlobGetMutableTensor(
2666
                                  ws.CreateBlob("X_cpu"), CPU);
2667
                              t->Resize(batchSize, inputChannels, 8, 12);
2668
                              CPUContext ctx;
2669
                              math::RandGaussian<float, CPUContext>(
2670
                                  t->size(),
2671
                                  0,
2672
                                  1,
2673
                                  t->mutable_data<float>(),
2674
                                  &ctx);
2675
                            }
2676

2677
                            {
2678
                              auto* t =
2679
                                  BlobGetMutableTensor(ws.CreateBlob("W"), CPU);
2680
                              t->Resize(
2681
                                  inputChannels,
2682
                                  outputChannels,
2683
                                  kernel_h,
2684
                                  kernel_w);
2685
                              CPUContext ctx;
2686
                              math::RandGaussian<float, CPUContext>(
2687
                                  t->size(),
2688
                                  0,
2689
                                  1,
2690
                                  t->mutable_data<float>(),
2691
                                  &ctx);
2692
                            }
2693

2694
                            {
2695
                              auto* t =
2696
                                  BlobGetMutableTensor(ws.CreateBlob("b"), CPU);
2697
                              t->Resize(outputChannels);
2698
                              CPUContext ctx;
2699
                              math::RandGaussian<float, CPUContext>(
2700
                                  t->size(),
2701
                                  0,
2702
                                  1,
2703
                                  t->mutable_data<float>(),
2704
                                  &ctx);
2705
                            }
2706

2707
                            NetDef netdef;
2708
                            {
2709
                              auto& op = *(netdef.add_op());
2710
                              op.set_type("CopyToMPSCNN");
2711
                              op.add_input("X_cpu");
2712
                              op.add_output("X_mtl");
2713
                            }
2714

2715
                            {
2716
                              auto& op = *(netdef.add_op());
2717
                              op.set_type("MPSCNNConvTranspose");
2718
                              op.add_input("X_mtl");
2719
                              op.add_input("W");
2720
                              op.add_input("b");
2721
#define ADD_ARGS(op)                    \
2722
  do {                                  \
2723
    add_arg_str(op, "order", "NCHW");   \
2724
    add_arg_int_list(                   \
2725
        op,                             \
2726
        std::vector<string>{"kernel_h", \
2727
                            "kernel_w", \
2728
                            "pad_t",    \
2729
                            "pad_b",    \
2730
                            "pad_l",    \
2731
                            "pad_r",    \
2732
                            "stride_w", \
2733
                            "stride_h", \
2734
                            "adj"},     \
2735
        std::vector<int>{kernel_h,      \
2736
                         kernel_w,      \
2737
                         pad_t,         \
2738
                         pad_b,         \
2739
                         pad_l,         \
2740
                         pad_r,         \
2741
                         stride_w,      \
2742
                         stride_h,      \
2743
                         adj});         \
2744
  } while (false)
2745
                              ADD_ARGS(op);
2746
                              op.add_output("Y_mtl");
2747
                            }
2748

2749
                            {
2750
                              auto& op = *(netdef.add_op());
2751
                              op.set_type("CopyFromMPSCNN");
2752
                              op.add_input("Y_mtl");
2753
                              op.add_output("Y_cpu");
2754
                            }
2755

2756
                            {
2757
                              auto& op = *(netdef.add_op());
2758
                              op.set_type("ConvTranspose");
2759
                              op.add_input("X_cpu");
2760
                              op.add_input("W");
2761
                              op.add_input("b");
2762
                              ADD_ARGS(op);
2763
                              op.add_output("Y_ref");
2764
                            }
2765
#undef ADD_ARGS
2766

2767
                            ws.RunNetOnce(netdef);
2768
                            const auto& t2 =
2769
                                ws.GetBlob("Y_cpu")->Get<TensorCPU>();
2770
                            const auto& t1 =
2771
                                ws.GetBlob("Y_ref")->Get<TensorCPU>();
2772
                            CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
2773
                            LOG(INFO) << t1.sizes();
2774
                            for (auto i = 0; i < t1.size(); ++i) {
2775
                              // FP16 <-> FP32 round trip, accumulation, etc.
2776
                              const float t1_i = t1.data<float>()[i];
2777
                              const float t2_i = t2.data<float>()[i];
2778
                              constexpr float tol = 2.0e-2;
2779
                              CHECK(
2780
                                  std::abs(t1_i - t2_i) <=
2781
                                  (tol + tol * std::abs(t1_i)))
2782
                                  << t1_i << ", " << t2_i;
2783
                            }
2784
                          }
2785
                        }
2786
                      }
2787
                    }
2788
                  }
2789
                }
2790
              }
2791
            }
2792
          }
2793
        }
2794
      }
2795
    }
2796
  }
2797

2798
  {
2799
    for (const auto array : std::vector<bool>{true, false}) {
2800
      for (auto numInputs = 2; numInputs <= 4; numInputs++) {
2801
        for (const auto batchSize : std::vector<size_t>{1, 2}) {
2802
          auto mtl = [&](size_t i) {
2803
            return std::string("X_mtl_") + std::to_string(i);
2804
          };
2805
          auto cpu = [&](size_t i) {
2806
            return std::string("X_cpu_") + std::to_string(i);
2807
          };
2808

2809
          LOG(INFO) << "MPSCNNConcat Test" << array << ", " << numInputs << ", "
2810
                    << batchSize;
2811
          Workspace ws;
2812
          for (auto i = 0; i < numInputs; ++i) {
2813
            auto* t = BlobGetMutableTensor(ws.CreateBlob(cpu(i)), CPU);
2814
            t->Resize(batchSize, array ? (i + 1) * 4 : 4, 10, 10);
2815
            CPUContext ctx;
2816
            math::RandGaussian<float, CPUContext>(
2817
                t->size(), 0, 1, t->mutable_data<float>(), &ctx);
2818
          }
2819

2820
          NetDef netdef;
2821
          {
2822
            auto& op = *(netdef.add_op());
2823
            op.set_type("CopyToMPSCNN");
2824
            for (auto i = 0; i < numInputs; ++i) {
2825
              op.add_input(cpu(i));
2826
              op.add_output(mtl(i));
2827
            }
2828
          }
2829

2830
          {
2831
            auto& op = *(netdef.add_op());
2832
            op.set_type("MPSCNNConcat");
2833
            for (auto i = 0; i < numInputs; ++i) {
2834
              op.add_input(mtl(i));
2835
            }
2836
            {
2837
              auto& arg = *(op.add_arg());
2838
              arg.set_name("order");
2839
              arg.set_s("NCHW");
2840
            }
2841
            op.add_output("Y_mtl");
2842
            op.add_output("Y_mtl_mask");
2843
          }
2844

2845
          {
2846
            auto& op = *(netdef.add_op());
2847
            op.set_type("CopyFromMPSCNN");
2848
            op.add_input("Y_mtl");
2849
            op.add_output("Y_cpu");
2850
          }
2851

2852
          {
2853
            auto& op = *(netdef.add_op());
2854
            op.set_type("Concat");
2855
            for (auto i = 0; i < numInputs; ++i) {
2856
              op.add_input(cpu(i));
2857
            }
2858
            {
2859
              auto& arg = *(op.add_arg());
2860
              arg.set_name("order");
2861
              arg.set_s("NCHW");
2862
            }
2863

2864
            op.add_output("Y_ref");
2865
            op.add_output("Y_ref_mask");
2866
          }
2867

2868
          ws.RunNetOnce(netdef);
2869
          const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
2870

2871
          const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
2872
          CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
2873
          LOG(INFO) << t1.sizes();
2874
          for (auto i = 0; i < t1.size(); ++i) {
2875
            // FP16 <-> FP32 round trip, accumulation, etc.
2876
            const float t1_i = t1.data<float>()[i];
2877
            const float t2_i = t2.data<float>()[i];
2878
            TORCH_CHECK_NEAR(t1_i, t2_i, 0.1);
2879
          }
2880
        }
2881
      }
2882
    }
2883
  }
2884

2885
  @autoreleasepool {
2886
    for (const auto& batchSize : std::vector<size_t>{1, 2, 3, 4}) {
2887
      for (const auto& inputChannels :
2888
           std::vector<size_t>{1, 2, 3, 4, 16, 24, 32, 48, 96, 128, 256}) {
2889
        for (const auto& groups : std::vector<int>{1, 4, 8, 16}) {
2890
          if (inputChannels % groups != 0) {
2891
            continue;
2892
          }
2893
          Workspace ws;
2894
          {
2895
            auto* t = BlobGetMutableTensor(ws.CreateBlob("X_cpu"), CPU);
2896
            t->Resize(batchSize, inputChannels, 53, 47);
2897
            CPUContext ctx;
2898
            math::RandGaussian<float, CPUContext>(
2899
                t->size(), 0, 1, t->mutable_data<float>(), &ctx);
2900
          }
2901
          NetDef netdef;
2902
#define ADD_ARGS(op)                                          \
2903
  do {                                                        \
2904
    add_arg_str(op, "order", "NCHW");                         \
2905
    add_arg_int_list(                                         \
2906
        op,                                                   \
2907
        std::vector<string>{"kernel_w", "kernel_h", "group"}, \
2908
        std::vector<int>{1, 1, groups});                      \
2909
  } while (false)
2910
          {
2911
            auto& op = *(netdef.add_op());
2912
            op.set_type("CopyToMPSCNN");
2913
            op.add_input("X_cpu");
2914
            op.add_output("X_mtl");
2915
          }
2916
          {
2917
            auto& op = *(netdef.add_op());
2918
            op.set_type("MPSCNNChannelShuffle");
2919
            op.add_input("X_mtl");
2920
            ADD_ARGS(op);
2921
            op.add_output("Y_mtl");
2922
          }
2923
          {
2924
            auto& op = *(netdef.add_op());
2925
            op.set_type("CopyFromMPSCNN");
2926
            op.add_input("Y_mtl");
2927
            op.add_output("Y_cpu");
2928
          }
2929
          {
2930
            auto& op = *(netdef.add_op());
2931
            op.set_type("ChannelShuffle");
2932
            op.add_input("X_cpu");
2933
            ADD_ARGS(op);
2934
            op.add_output("Y_ref");
2935
          }
2936
#undef ADD_ARGS
2937
          ws.RunNetOnce(netdef);
2938
          const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
2939
          const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
2940

2941
          CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
2942
          for (auto i = 0; i < t1.size(); ++i) {
2943
            // FP16 <-> FP32 round trip, accumulation, etc.
2944
            const float t1_i = t1.data<float>()[i];
2945
            const float t2_i = t2.data<float>()[i];
2946
            TORCH_CHECK_NEAR(t1_i, t2_i, 0.1);
2947
          }
2948
        }
2949
      }
2950
    }
2951
  }
2952

2953
  {
2954
    for (const auto channelCount : std::vector<size_t>{1, 2, 3, 4}) {
2955
      for (auto numInputs = 2; numInputs <= 4; numInputs++) {
2956
        for (const auto batchSize : std::vector<size_t>{1, 2}) {
2957
          auto mtl = [&](size_t i) {
2958
            return std::string("X_mtl_") + std::to_string(i);
2959
          };
2960
          auto cpu = [&](size_t i) {
2961
            return std::string("X_cpu_") + std::to_string(i);
2962
          };
2963

2964
          LOG(INFO) << "MPSCNNConcat(edge case) Test" << channelCount << ", "
2965
                    << numInputs << ", " << batchSize;
2966
          Workspace ws;
2967
          for (auto i = 0; i < numInputs; ++i) {
2968
            auto* t = BlobGetMutableTensor(ws.CreateBlob(cpu(i)), CPU);
2969
            t->Resize(batchSize, channelCount, 9, 17);
2970
            CPUContext ctx;
2971
            math::RandGaussian<float, CPUContext>(
2972
                t->size(), 0, 1, t->mutable_data<float>(), &ctx);
2973
          }
2974

2975
          NetDef netdef;
2976
          {
2977
            auto& op = *(netdef.add_op());
2978
            op.set_type("CopyToMPSCNN");
2979
            for (auto i = 0; i < numInputs; ++i) {
2980
              op.add_input(cpu(i));
2981
              op.add_output(mtl(i));
2982
            }
2983
          }
2984

2985
          {
2986
            auto& op = *(netdef.add_op());
2987
            op.set_type("MPSCNNConcat");
2988
            for (auto i = 0; i < numInputs; ++i) {
2989
              op.add_input(mtl(i));
2990
            }
2991
            {
2992
              auto& arg = *(op.add_arg());
2993
              arg.set_name("order");
2994
              arg.set_s("NCHW");
2995
            }
2996
            op.add_output("Y_mtl");
2997
            op.add_output("Y_mtl_mask");
2998
          }
2999

3000
          {
3001
            auto& op = *(netdef.add_op());
3002
            op.set_type("CopyFromMPSCNN");
3003
            op.add_input("Y_mtl");
3004
            op.add_output("Y_cpu");
3005
          }
3006

3007
          {
3008
            auto& op = *(netdef.add_op());
3009
            op.set_type("Concat");
3010
            for (auto i = 0; i < numInputs; ++i) {
3011
              op.add_input(cpu(i));
3012
            }
3013
            {
3014
              auto& arg = *(op.add_arg());
3015
              arg.set_name("order");
3016
              arg.set_s("NCHW");
3017
            }
3018

3019
            op.add_output("Y_ref");
3020
            op.add_output("Y_ref_mask");
3021
          }
3022

3023
          ws.RunNetOnce(netdef);
3024
          const auto& t1 = ws.GetBlob("Y_ref")->Get<TensorCPU>();
3025

3026
          const auto& t2 = ws.GetBlob("Y_cpu")->Get<TensorCPU>();
3027
          CAFFE_ENFORCE_EQ(t1.sizes(), t2.sizes());
3028
          LOG(INFO) << t1.sizes();
3029
          for (auto i = 0; i < t1.size(); ++i) {
3030
            // FP16 <-> FP32 round trip, accumulation, etc.
3031
            const float t1_i = t1.data<float>()[i];
3032
            const float t2_i = t2.data<float>()[i];
3033
            TORCH_CHECK_NEAR(t1_i, t2_i, 0.1);
3034
          }
3035
        }
3036
      }
3037
    }
3038
  }
3039

3040
  {
3041
    LOG(INFO) << "MPSCNNReadCount Test";
3042
    NetDef netdef;
3043
    {
3044
      auto& op = *(netdef.add_op());
3045
      op.add_input("X_cpu");
3046
      op.add_output("X_mtl");
3047
    }
3048

3049
    {
3050
      auto& op = *(netdef.add_op());
3051
      op.add_input("X_mtl");
3052
      op.add_output("X_mtl");
3053
    }
3054

3055
    {
3056
      auto& op = *(netdef.add_op());
3057
      op.add_input("X_mtl");
3058
      op.add_output("Y");
3059
    }
3060

3061
    {
3062
      auto& op = *(netdef.add_op());
3063
      op.add_input("X_mtl");
3064
      op.add_output("X_mtl");
3065
    }
3066
    netdef = annotateDefWithReadCounts(netdef);
3067
    auto rc = [&](size_t i) -> size_t {
3068
      auto* arg = GetMutableArgument(
3069
          "__mpscnn_read_count__", false, netdef.mutable_op(i));
3070
      if (!arg) {
3071
        return 1;
3072
      }
3073
      return arg->i();
3074
    };
3075
    TORCH_CHECK_EQ(rc(0), 1);
3076
    TORCH_CHECK_EQ(rc(1), 2);
3077
    TORCH_CHECK_EQ(rc(2), 1);
3078
    TORCH_CHECK_EQ(rc(3), 1);
3079
  }
3080

3081
  {
3082
    for (const auto& computeOp : std::vector<std::string>{"FC", "Conv"}) {
3083
      LOG(INFO) << "MPSCNNRewriteForMetal Fusion/Copy Test";
3084
      NetDef netdef;
3085
      netdef.add_external_input("X");
3086
      netdef.add_external_output("Y");
3087
      // These two ops can be fused.
3088
      {
3089
        auto& op = *(netdef.add_op());
3090
        op.set_type(computeOp);
3091
        op.add_input("X");
3092
        op.add_input("W");
3093
        op.add_input("b");
3094
        op.add_output("Y");
3095
      }
3096
      {
3097
        auto& op = *(netdef.add_op());
3098
        op.set_type("Relu");
3099
        op.add_input("Y");
3100
        op.add_output("Y");
3101
      }
3102
      {
3103
        auto& op = *(netdef.add_op());
3104
        op.set_type(computeOp);
3105
        op.add_input("X2");
3106
        op.add_input("W");
3107
        op.add_input("b");
3108
        op.add_output("Y2");
3109
      }
3110
      {
3111
        auto& op = *(netdef.add_op());
3112
        op.set_type("Relu");
3113
        op.add_input("Y2");
3114
        op.add_output("Y");
3115
      }
3116
      netdef = rewriteForMetal(netdef);
3117
      auto ty = [&](size_t i) { return netdef.op(i).type(); };
3118
      auto i0 = [&](size_t i) { return netdef.op(i).input(0); };
3119
      auto o0 = [&](size_t i) { return netdef.op(i).output(0); };
3120
      TORCH_CHECK_EQ(netdef.op_size(), 4);
3121
      TORCH_CHECK_EQ(ty(0), "CopyToMPSCNN");
3122
      TORCH_CHECK_EQ(ty(1), std::string("MPSCNN") + computeOp + std::string("Relu"));
3123
      TORCH_CHECK_EQ(ty(2), std::string("MPSCNN") + computeOp + std::string("Relu"));
3124
      TORCH_CHECK_EQ(ty(3), "CopyFromMPSCNN");
3125
      TORCH_CHECK_EQ(i0(0), "X");
3126
      TORCH_CHECK_EQ(i0(1), o0(0));
3127
      TORCH_CHECK_EQ(i0(2), "X2");
3128
      TORCH_CHECK_EQ(o0(2), i0(3));
3129
      TORCH_CHECK_EQ(o0(3), "Y");
3130
      TORCH_CHECK_EQ(netdef.external_input(0), "X");
3131
      TORCH_CHECK_EQ(netdef.external_output(0), "Y");
3132
    }
3133
  }
3134

3135
  {
3136
    LOG(INFO) << "MPSCNNRewriteForMetal Failure Test";
3137
    NetDef netdef;
3138
    netdef.add_external_input("X");
3139
    netdef.add_external_output("Y");
3140
    {
3141
      auto& op = *(netdef.add_op());
3142
      op.set_type("Conv");
3143
      op.add_input("X");
3144
      op.add_input("W");
3145
      op.add_input("b");
3146
      op.add_output("Y1");
3147
    }
3148
    {
3149
      auto& op = *(netdef.add_op());
3150
      op.set_type("Conv");
3151
      op.add_input("X");
3152
      op.add_input("W");
3153
      op.add_input("b");
3154
      op.add_output("Y2");
3155
    }
3156

3157
    {
3158
      auto& op = *(netdef.add_op());
3159
      op.set_type("Concat");
3160
      op.add_input("Y1");
3161
      op.add_input("Y2");
3162
      op.add_output("Y");
3163
    }
3164
    try {
3165
      netdef = rewriteForMetal(netdef);
3166
      CHECK(false) << "Shouldn't reach here, due to multiple usages of X";
3167
    } catch (const std::exception& e) {
3168
      // Nothing.
3169
    }
3170
  }
3171

3172
  {
3173
    LOG(INFO) << "MPSCNNRewriteForMetal out-of-place Fusion Test";
3174
    NetDef netdef;
3175
    netdef.add_external_input("X");
3176
    netdef.add_external_output("Z");
3177
    {
3178
      auto& op = *(netdef.add_op());
3179
      op.set_type("Conv");
3180
      op.add_input("X");
3181
      op.add_input("W");
3182
      op.add_input("b");
3183
      op.add_output("Y");
3184
    }
3185
    {
3186
      auto& op = *(netdef.add_op());
3187
      op.set_type("Relu");
3188
      op.add_input("Y");
3189
      op.add_output("Z");
3190
    }
3191
    {
3192
      auto& op = *(netdef.add_op());
3193
      op.set_type("Relu");
3194
      op.add_input("Z");
3195
      op.add_output("Z");
3196
    }
3197
    netdef = rewriteForMetal(netdef);
3198
    TORCH_CHECK_EQ(netdef.op_size(), 4);
3199
    auto ty = [&](size_t i) { return netdef.op(i).type(); };
3200
    auto i0 = [&](size_t i) { return netdef.op(i).input(0); };
3201
    auto o0 = [&](size_t i) { return netdef.op(i).output(0); };
3202
    TORCH_CHECK_EQ(ty(0), "CopyToMPSCNN");
3203
    TORCH_CHECK_EQ(ty(1), "MPSCNNConvRelu");
3204
    TORCH_CHECK_EQ(ty(2), "MPSCNNRelu");
3205
    TORCH_CHECK_EQ(ty(3), "CopyFromMPSCNN");
3206
    TORCH_CHECK_EQ(i0(1), o0(0));
3207
    TORCH_CHECK_EQ(o0(1), "Z");
3208
    TORCH_CHECK_EQ(i0(2), "Z");
3209
    TORCH_CHECK_EQ(o0(2), i0(3));
3210
  }
3211

3212
  {
3213
    LOG(INFO) << "MPSCNNRewriteForMetal out-of-place fusion failure test";
3214
    NetDef netdef;
3215
    netdef.add_external_input("X");
3216
    netdef.add_external_output("Z");
3217
    {
3218
      auto& op = *(netdef.add_op());
3219
      op.set_type("Conv");
3220
      op.add_input("X");
3221
      op.add_input("W");
3222
      op.add_input("b");
3223
      op.add_output("Y");
3224
    }
3225
    {
3226
      auto& op = *(netdef.add_op());
3227
      op.set_type("Relu");
3228
      op.add_input("Y");
3229
      op.add_output("Z");
3230
    }
3231
    {
3232
      auto& op = *(netdef.add_op());
3233
      op.set_type("Relu");
3234
      op.add_input("Y");
3235
      op.add_output("Z");
3236
    }
3237
    netdef = rewriteForMetal(netdef);
3238
    TORCH_CHECK_EQ(netdef.op_size(), 5);
3239
    auto ty = [&](size_t i) { return netdef.op(i).type(); };
3240
    auto i0 = [&](size_t i) { return netdef.op(i).input(0); };
3241
    auto o0 = [&](size_t i) { return netdef.op(i).output(0); };
3242
    TORCH_CHECK_EQ(ty(0), "CopyToMPSCNN");
3243
    TORCH_CHECK_EQ(ty(1), "MPSCNNConv");
3244
    TORCH_CHECK_EQ(ty(2), "MPSCNNRelu");
3245
    TORCH_CHECK_EQ(ty(3), "MPSCNNRelu");
3246
    TORCH_CHECK_EQ(ty(4), "CopyFromMPSCNN");
3247
    TORCH_CHECK_EQ(i0(1), o0(0));
3248
    TORCH_CHECK_EQ(o0(1), "Y");
3249
    TORCH_CHECK_EQ(i0(2), o0(1));
3250
    TORCH_CHECK_EQ(o0(2), "Z");
3251
    TORCH_CHECK_EQ(i0(3), o0(1));
3252
    TORCH_CHECK_EQ(o0(3), i0(4));
3253
  }
3254

3255
  {
3256
    LOG(INFO) << "MPSCNNRewriteForMetal PreProcess/Deprocess Test";
3257
    NetDef netdef;
3258
    {
3259
      auto& op = *(netdef.add_op());
3260
      op.set_type("PackedInt8BGRANHWCToNCHWCStylizerPreprocess");
3261
      op.add_input("X");
3262
      op.add_output("Y");
3263
    }
3264
    {
3265
      auto& op = *(netdef.add_op());
3266
      op.set_type("Relu");
3267
      op.add_input("Y");
3268
      op.add_output("Y");
3269
    }
3270
    {
3271
      auto& op = *(netdef.add_op());
3272
      op.set_type("BRGNCHWCToPackedInt8BGRAStylizerDeprocess");
3273
      op.add_input("Y");
3274
      op.add_output("Z");
3275
    }
3276
    netdef = rewriteForMetal(netdef);
3277
    auto ty = [&](size_t i) { return netdef.op(i).type(); };
3278
    auto i0 = [&](size_t i) { return netdef.op(i).input(0); };
3279
    auto o0 = [&](size_t i) { return netdef.op(i).output(0); };
3280
    TORCH_CHECK_EQ(netdef.op_size(), 3);
3281
    TORCH_CHECK_EQ(ty(0), "MPSCNNPackedInt8BGRANHWCToNCHWCStylizerPreprocess");
3282
    TORCH_CHECK_EQ(ty(1), "MPSCNNRelu");
3283
    TORCH_CHECK_EQ(ty(2), "MPSCNNBRGNCHWCToPackedInt8BGRAStylizerDeprocess");
3284
    TORCH_CHECK_EQ(i0(0), "X");
3285
    TORCH_CHECK_EQ(i0(1), o0(0));
3286
    TORCH_CHECK_EQ(i0(2), o0(1));
3287
    TORCH_CHECK_EQ(o0(2), "Z");
3288
  }
3289
  LOG(INFO) << "All MPSCNN tests passed.";
3290
}
3291

3292
NetDef truncateAfter(NetDef def, size_t idx) {
3293
  // idx = 0, net = 10 -> remove 9
3294
  // idx = 0, net = 1 -> remove 0
3295
  const auto toRemove = def.op_size() - idx - 1;
3296
  for (auto i = 0; i < toRemove; ++i) {
3297
    def.mutable_op()->RemoveLast();
3298
  }
3299
  TORCH_CHECK_EQ(def.op_size(), idx + 1);
3300
  return def;
3301
}
3302

3303
NetDef addMPSCNNCopyFinalizer(NetDef def) {
3304
  TORCH_CHECK_GE(def.op_size(), 1);
3305
  const auto name = def.mutable_op(def.op_size() - 1)->output(0);
3306
  def.mutable_op(def.op_size() - 1)->set_output(0, "METAL_COPIER");
3307
  {
3308
    auto& op = *(def.add_op());
3309
    op.set_type("CopyFromMPSCNN");
3310
    op.add_input("METAL_COPIER");
3311
    op.add_output(name);
3312
  }
3313
  return def;
3314
}
3315

3316
void compareModels(const NetDef& initNet, NetDef predictNet) {
3317
  auto* arg = predictNet.mutable_op(0)->mutable_arg(0);
3318
  TORCH_CHECK_EQ(arg->name(), "noise_std");
3319
  arg->set_f(0.000001);
3320

3321
  NetDef metalPredictNet;
3322
  CAFFE_ENFORCE(tryConvertToMPSCNN(initNet, predictNet, &metalPredictNet));
3323

3324
  // TODO: consider last op as well.
3325
  for (auto i = 0; i < predictNet.op_size(); ++i) {
3326
    auto truncatedPredictNet = truncateAfter(predictNet, i);
3327
    auto truncatedMetalPredictNet = truncateAfter(metalPredictNet, i);
3328
    // For all but the last op, we need to add a copy op.
3329
    if (i != predictNet.op_size() - 1) {
3330
      truncatedMetalPredictNet =
3331
          addMPSCNNCopyFinalizer(truncatedMetalPredictNet);
3332
    }
3333

3334
    dumpDef(truncatedPredictNet);
3335
    dumpDef(truncatedMetalPredictNet);
3336

3337
    Workspace cws;
3338
    cws.RunNetOnce(initNet);
3339
    {
3340
      auto* t = BlobGetMutableTensor(
3341
          cws.CreateBlob(predictNet.external_input(0)), CPU);
3342
      t->Resize(1, 224, 224, 4);
3343
      for (auto i = 0; i < t->size(); ++i) {
3344
        t->mutable_data<uint8_t>()[i] = i % 225;
3345
      }
3346
    }
3347
    cws.RunNetOnce(truncatedPredictNet);
3348

3349
    Workspace mws;
3350
    mws.RunNetOnce(initNet);
3351
    {
3352
      auto* t = BlobGetMutableTensor(
3353
          mws.CreateBlob(predictNet.external_input(0)), CPU);
3354
      t->Resize(1, 224, 224, 4);
3355
      for (auto i = 0; i < t->size(); ++i) {
3356
        t->mutable_data<uint8_t>()[i] = i % 225;
3357
      }
3358
    }
3359
    mws.RunNetOnce(truncatedMetalPredictNet);
3360

3361
    const auto name =
3362
        truncatedPredictNet.op(truncatedPredictNet.op_size() - 1).output(0);
3363

3364
    LOG(INFO) << "Checking correspondence for name: " << name << ", idx: " << i;
3365
    {
3366
      const auto& mt = mws.GetBlob(name)->Get<TensorCPU>();
3367
      const auto& ct = cws.GetBlob(name)->Get<TensorCPU>();
3368
      TORCH_CHECK_EQ(mt.sizes(), ct.sizes());
3369
      for (auto j = 0; j < mt.size(); ++j) {
3370
        if (mt.IsType<float>()) {
3371
          if (j < 10) {
3372
            LOG(INFO) << "i: " << i << ", j: " << j
3373
                      << ", CPU: " << ct.data<float>()[j]
3374
                      << ", MTL: " << mt.data<float>()[j];
3375
          }
3376
          TORCH_CHECK_NEAR(mt.data<float>()[j], ct.data<float>()[j], 5);
3377
        } else {
3378
          CHECK(mt.IsType<uint8_t>());
3379
          if (j < 10) {
3380
            LOG(INFO) << "i: " << i << ", j: " << j
3381
                      << ", CPU: " << ct.data<uint8_t>()[j]
3382
                      << ", MTL: " << mt.data<uint8_t>()[j];
3383
          }
3384
          TORCH_CHECK_NEAR(mt.data<uint8_t>()[j], ct.data<uint8_t>()[j], 5);
3385
        }
3386
      }
3387
    }
3388
  }
3389
}
3390
void verifyRewrite(
3391
    const NetDef& initNet,
3392
    const NetDef& net,
3393
    std::vector<int> inputDims) {
3394
  NetDef metalPredictNet;
3395
  NetDef predictNet = setSpecialArgs(net);
3396
  CAFFE_ENFORCE(tryConvertToMPSCNNIntermediateCopies(
3397
      initNet, predictNet, &metalPredictNet));
3398
  dumpDef(predictNet);
3399
  dumpDef(metalPredictNet);
3400

3401
#define RUN_NET(ws, predictNet)                            \
3402
  ws.RunNetOnce(initNet);                                  \
3403
  {                                                        \
3404
    auto* t = BlobGetMutableTensor(                        \
3405
        ws.CreateBlob(predictNet.external_input(0)), CPU); \
3406
    t->Resize(inputDims);                                  \
3407
    CPUContext ctx;                                        \
3408
    math::RandGaussian<float, CPUContext>(                 \
3409
        t->size(), 0, 1, t->mutable_data<float>(), &ctx);  \
3410
  }                                                        \
3411
  ws.RunNetOnce(predictNet);
3412

3413
  // initialize
3414
  getMPSCNNContext();
3415

3416
  Workspace cws;
3417
  RUN_NET(cws, predictNet);
3418

3419
  Workspace mws;
3420
  RUN_NET(mws, metalPredictNet);
3421

3422
  for (auto i = 0; i < predictNet.external_output_size(); i++) {
3423
    auto blobName = predictNet.external_output(i);
3424
    LOG(INFO) << "Checking output blob:" << blobName;
3425
    const auto& mt = mws.GetBlob(blobName)->Get<Tensor>();
3426
    const auto& ct = cws.GetBlob(blobName)->Get<Tensor>();
3427
    if (mt.size() == 0 || ct.size() == 0) {
3428
      LOG(INFO) << "One of the operator failed.";
3429
      return;
3430
    }
3431
    // TORCH_CHECK_EQ(mt.sizes(), ct.sizes());
3432
    for (auto j = 0; j < fmin(mt.size(), ct.size()); ++j) {
3433
      if (mt.IsType<float>()) {
3434
        if (j < 10) {
3435
          LOG(INFO) << "i: " << i << ", j: " << j
3436
                    << ", CPU: " << ct.data<float>()[j]
3437
                    << ", MTL: " << mt.data<float>()[j];
3438
        }
3439
        // Disabling check for now because of precision issues
3440
        // TORCH_CHECK_NEAR(mt.data<float>()[j], ct.data<float>()[j], 5);
3441
      } else {
3442
        LOG(INFO) << "Type uint8_t";
3443
        CHECK(mt.IsType<uint8_t>());
3444
        if (j < 10) {
3445
          LOG(INFO) << "i: " << i << ", j: " << j
3446
                    << ", CPU: " << ct.data<uint8_t>()[j]
3447
                    << ", MTL: " << mt.data<uint8_t>()[j];
3448
        }
3449
        // Disabling check for now.
3450
        // TORCH_CHECK_NEAR(mt.data<uint8_t>()[j], ct.data<uint8_t>()[j], 5);
3451
      }
3452
    }
3453
  }
3454
  LOG(INFO) << "rewrite test passed.";
3455
}
3456
}
3457

3458
#endif
3459

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.