21
#include "caffe2/core/timer.h"
22
#include "caffe2/utils/string_utils.h"
23
#include <torch/csrc/autograd/grad_mode.h>
24
#include <torch/csrc/jit/mobile/module.h>
25
#include <torch/csrc/jit/mobile/import.h>
26
#include <torch/csrc/jit/serialization/import.h>
27
#include <torch/script.h>
29
#include <c10/mobile/CPUCachingAllocator.h>
32
using namespace std::chrono;
34
C10_DEFINE_string(model, "", "The given torch script model to benchmark.");
35
C10_DEFINE_int(iter, 10, "The number of iterations to run.");
39
"Whether to print performance stats for AI-PEP.");
41
int main(int argc, char** argv) {
43
"Run model load time benchmark for pytorch model.\n"
45
"./load_benchmark_torch"
46
" --model=<model_file>"
48
if (!c10::ParseCommandLineFlags(&argc, &argv)) {
49
std::cerr << "Failed to parse command line flags!" << std::endl;
53
std::cout << "Starting benchmark." << std::endl;
56
"Number of main runs should be non negative, provided ",
61
std::vector<long> times;
63
for (int i = 0; i < FLAGS_iter; ++i) {
64
auto start = high_resolution_clock::now();
66
#if BUILD_LITE_INTERPRETER
67
auto module = torch::jit::_load_for_mobile(FLAGS_model);
69
auto module = torch::jit::load(FLAGS_model);
72
auto stop = high_resolution_clock::now();
73
auto duration = duration_cast<microseconds>(stop - start);
74
times.push_back(duration.count());
77
const double micros = static_cast<double>(timer.MicroSeconds());
78
if (FLAGS_report_pep) {
79
for (auto t : times) {
80
std::cout << R"(PyTorchObserver {"type": "NET", "unit": "us", )"
81
<< R"("metric": "latency", "value": ")"
82
<< t << R"("})" << std::endl;
86
const double iters = static_cast<double>(FLAGS_iter);
87
std::cout << "Main run finished. Microseconds per iter: "
89
<< ". Iters per second: " << 1000.0 * 1000 * iters / micros