1
#include <torch/csrc/jit/mobile/import.h>
2
#include <torch/csrc/jit/mobile/parse_bytecode.h>
3
#include <torch/csrc/jit/mobile/parse_operators.h>
5
#include <ATen/core/ivalue.h>
6
#include <ATen/core/qualified_name.h>
7
#include <c10/util/Exception.h>
8
#include <c10/util/Optional.h>
9
#include <c10/util/ScopeExit.h>
10
#include <c10/util/irange.h>
11
#include <caffe2/serialize/in_memory_adapter.h>
12
#include <caffe2/serialize/inline_container.h>
13
#include <caffe2/serialize/read_adapter_interface.h>
14
#include <caffe2/serialize/versions.h>
15
#include <torch/csrc/jit/api/compilation_unit.h>
16
#include <torch/csrc/jit/mobile/file_format.h>
17
#include <torch/csrc/jit/mobile/flatbuffer_loader.h>
18
#include <torch/csrc/jit/mobile/observer.h>
19
#include <torch/csrc/jit/mobile/type_parser.h>
20
#include <torch/csrc/jit/mobile/upgrader_mobile.h>
21
#include <torch/csrc/jit/runtime/instruction.h>
22
#include <torch/csrc/jit/serialization/import_export_constants.h>
23
#include <torch/csrc/jit/serialization/import_export_functions.h>
24
#include <torch/csrc/jit/serialization/import_read.h>
25
#include <torch/custom_class.h>
29
// The import process to serialize the bytecode package.
30
// An example for bytecode.pkl of a small mobile_module looks like:
31
// (4, # model version number (caffe2::serialize::kProducedBytecodeVersion)
35
// '__torch__.m.forward',
43
// ('operators', (('aten::Int', 'Tensor'),)),
46
// ('register_size', 2)),
47
// # schema -- optional (forward-compatible addition to version 4)
49
// ((('name', 'x'), ('type', 'Tensor'), ('default_value', 13)),
50
// ...)), # more args follow here
52
// ((('name', ''), ('type', 'Tensor'), ('default_value', None)),
53
// ...)), # more return values follow here
55
// # more methods follow here
58
// In addition, the module debugging information can be saved
59
// in mobile_debug_handles.pkl. An example for it looks like:
61
// ('__torch__.m.forward',
62
// (('module_debug_handles', 10))))
63
// Here 10 is the debug handle.
64
// We also store separately and optionally callstack_debug_map.
65
// This serializes inlined callstack (InlinedCallStack data structure)
66
// corresponding to the debug handles.
67
// Callstack_debug_map serializes tuples of
68
// (int64_t(debug_handle), int64_t(source_range_tag), InlinedCallStack)
69
// source_range_tag maps to .debug_pkl files where this tag maps it to
71
// InlinedCallStack is serialized as:
72
// IValue(InlinedCallStack) = {IValue(ModuleInstanceInfo),
73
// int64_t(source_range_tag), IValue(InlinedCallStack)} ModuleInstanceInfo is
74
// serialized as a tuple of (class_type_name, instance_name)
76
// Note that currently the backward compatibility is not supported by bytecode.
77
// This format and process need to be revisited and redesigned if we want to
78
// support backward compatibility in future.
80
// Note that the following function-schema fields are not supported:
81
// - Argument::{known_length_,kwarg_only_}
82
// - FunctionSchema::{overload_name_, is_vararg_, is_varret_}
86
using caffe2::serialize::MemoryReadAdapter;
87
using caffe2::serialize::PyTorchStreamReader;
88
using caffe2::serialize::ReadAdapterInterface;
90
OpCode parseOpCode(const char* str);
92
TypePtr resolveTypeNameMobile(
93
const c10::QualifiedName& qn,
94
std::shared_ptr<CompilationUnit> compilation_unit) {
95
// HACK: first we check whether the name starts with special prefix to
96
// tell if it's a supported pytorch class type. There are two special
97
// prefixes. "__torch__" for nn module, and "torch.jit" from to_backend.
99
// check today, but there is no guarantee that this is the case. The
100
// real solution is to merge type parsers so we can share class
102
static const c10::QualifiedName torchPrefix = "__torch__";
103
static const c10::QualifiedName jitPrefix = "torch.jit";
104
if (torchPrefix.isPrefixOf(qn) || jitPrefix.isPrefixOf(qn)) {
105
if (compilation_unit->get_class(qn) == nullptr) {
106
auto typeptr = ClassType::create(qn, compilation_unit, true);
107
compilation_unit->register_type(typeptr);
109
return compilation_unit->get_class(qn);
111
return c10::parseType(qn.qualifiedName());
115
c10::StrongTypePtr typeResolverMobile(
116
const c10::QualifiedName& qn,
117
const std::shared_ptr<CompilationUnit>& compilation_unit) {
118
return c10::StrongTypePtr(
119
compilation_unit, resolveTypeNameMobile(qn, compilation_unit));
122
c10::intrusive_ptr<c10::ivalue::Object> objLoaderMobile(
123
const at::StrongTypePtr& type,
125
mobile::CompilationUnit& mobile_compilation_unit) {
126
auto cls = type.type_->expect<at::ClassType>();
127
auto qn = cls->name();
128
c10::QualifiedName method_name(qn.value(), "__setstate__");
129
auto setstate = mobile_compilation_unit.find_function(method_name);
130
auto find_custom_class_with_setstate = [&qn]() -> c10::ClassTypePtr {
131
auto custom_class_type = torch::jit::getCustomClass(qn->qualifiedName());
132
if (custom_class_type && custom_class_type->findMethod("__setstate__")) {
133
return custom_class_type;
138
auto obj = c10::ivalue::Object::create(type, 0);
139
Stack stack({obj, input});
140
setstate->run(stack);
142
} else if (auto custom_class_type = find_custom_class_with_setstate()) {
143
auto obj = c10::ivalue::Object::create(
144
c10::StrongTypePtr(nullptr, custom_class_type), 1);
145
Stack stack({obj, input});
146
custom_class_type->getMethod("__setstate__").run(stack);
149
auto dict = std::move(input).toGenericDict();
150
size_t ndict = dict.size();
151
auto obj = c10::ivalue::Object::create(type, ndict);
152
auto it = dict.begin();
153
for (const auto i : c10::irange(ndict)) {
154
cls->addOrCheckAttribute(it->key().toStringRef(), it->key().type());
155
obj->setSlot(i, it->value());
162
bool isTensorInBytecodeArchive(
163
caffe2::serialize::PyTorchStreamReader& stream_reader) {
164
auto records = stream_reader.getAllRecords();
165
for (const auto& record : records) {
166
if (record.find("bytecode/") != std::string::npos) {
175
void tryRegisterMethod(const std::vector<c10::Argument>& args, Function& func) {
176
if (args.empty() || args[0].name() != "self") {
180
if (auto cls = args[0].type()->castRaw<ClassType>()) {
181
if (C10_UNLIKELY(cls->findMethod(func.name()))) {
184
cls->addMethod(&func);
188
// The deserializer class which loads the bytecode package from bc files.
189
class BytecodeDeserializer final {
191
explicit BytecodeDeserializer(
192
std::unique_ptr<PyTorchStreamReader> reader,
193
uint64_t module_load_options = 0);
194
mobile::Module deserialize(c10::optional<at::Device> device);
195
mobile::Module deserialize(
196
c10::optional<at::Device> device,
197
ExtraFilesMap& extra_files);
198
void deserialize_only_extra(
199
c10::optional<at::Device> device,
200
ExtraFilesMap& extra_files);
203
TypePtr resolveTypeName(const c10::QualifiedName& qn);
204
void init_upgrader(mobile::Function* function);
206
c10::ivalue::TupleElements&& vals,
207
c10::optional<c10::ivalue::TupleElements>&& debug_handles,
208
mobile::CompilationUnit& mcu);
209
c10::IValue readArchive(
210
const std::string& archive_name,
211
std::shared_ptr<mobile::CompilationUnit> mcu);
212
void parseFunctionSchema(
213
const std::string& function_name,
215
const int64_t& model_version,
216
mobile::Function* function);
217
std::shared_ptr<CompilationUnit> compilation_unit_;
218
std::unordered_set<std::string> imported_libs_;
219
std::unique_ptr<PyTorchStreamReader> reader_{};
220
c10::optional<at::Device> device_;
221
uint64_t module_load_options_;
222
// From `version` or `.data/version` in model.ptl and it's compute
223
// dynamically. It's used for finding the minimum required runtime to run all
224
// operators from the given model. If it's less than the current runtime,
225
// upgrader will be applied at loading stage.
226
uint64_t operator_version_;
227
uint64_t bytecode_version_;
230
BytecodeDeserializer::BytecodeDeserializer(
231
std::unique_ptr<PyTorchStreamReader> reader,
232
uint64_t module_load_options)
233
: compilation_unit_(std::make_shared<CompilationUnit>()),
234
reader_(std::move(reader)),
235
module_load_options_(module_load_options) {}
237
TypePtr BytecodeDeserializer::resolveTypeName(const c10::QualifiedName& qn) {
238
return resolveTypeNameMobile(qn, compilation_unit_);
241
// It requires compilation_unit_ when parsing function schema. Keep it in
242
// BytecodeDeserializer. It may be refacotred later to make it independent
243
// of the specific BytecodeDeserializer, like parsing other tables
244
void BytecodeDeserializer::parseFunctionSchema(
245
const std::string& function_name,
247
const int64_t& model_version,
248
mobile::Function* function) {
250
if (schemaTable) { // (schema is optional for back compat)
251
auto parseArgList = [this,
252
function](c10::ivalue::TupleElements&& argTables) {
253
std::vector<c10::Argument> args;
254
for (auto& argTable : argTables) {
255
auto argTableElements = std::move(argTable.toTupleRef()).elements();
257
expect_field(argTableElements, "name", BYTECODE_INDEX_ARGUMENT_NAME)
259
c10::TypePtr type = resolveTypeName(
261
argTableElements, "type", BYTECODE_INDEX_ARGUMENT_TYPE))
263
IValue default_value = expect_field(
266
BYTECODE_INDEX_ARGUMENT_DEFAULT_VALUE);
271
std::move(default_value));
273
tryRegisterMethod(args, *function);
276
auto schemaTableElements = std::move(schemaTable->toTupleRef()).elements();
277
auto arg_list = std::move(expect_field(
280
BYTECODE_INDEX_SCHEMA_ARGUMENTS)
286
schemaTableElements, "returns", BYTECODE_INDEX_SCHEMA_RETURNS)
289
c10::FunctionSchema schema(
291
"" /*overload_name*/,
292
parseArgList(std::move(arg_list)),
293
parseArgList(std::move(ret_list)),
294
false /*is_varargs*/,
295
false /*is_varret*/);
296
function->setSchema(std::move(schema));
300
void BytecodeDeserializer::init_upgrader(mobile::Function* function) {
301
for (auto& byteCodeFunctionWithOperator : getUpgraderBytecodeList()) {
302
function->append_function(byteCodeFunctionWithOperator.function);
306
void BytecodeDeserializer::parseMethods(
307
c10::ivalue::TupleElements&& vals,
308
c10::optional<c10::ivalue::TupleElements>&& debug_handles,
309
mobile::CompilationUnit& mcu) {
310
TORCH_CHECK(!vals.empty(), "Bytecode has no elements. ");
311
// Initialized with the version number when kProducedBytecodeVersion was
312
// introduced. The old models (some of them already in production) without
313
// version number are seen as version 3 (deprecated).
314
constexpr uint64_t default_version = 0x3L;
315
bytecode_version_ = default_version;
316
size_t method_i_start = 0;
317
if (vals[0].isInt()) {
318
bytecode_version_ = vals[0].toInt();
322
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
323
caffe2::serialize::kMinSupportedBytecodeVersion <= bytecode_version_ &&
324
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
325
bytecode_version_ <= caffe2::serialize::kMaxSupportedBytecodeVersion,
326
"Lite Interpreter version number does not match. ",
327
"The model version must be between ",
328
caffe2::serialize::kMinSupportedBytecodeVersion,
330
caffe2::serialize::kMaxSupportedBytecodeVersion,
331
" but the model version is ",
336
debug_handles->size() == vals.size(),
337
"The numbers of bytecode values and debug info values do not match.");
340
// Process all methods in this mobile module.
341
for (const auto i : c10::irange(method_i_start, vals.size())) {
342
auto element = std::move(vals[i]);
343
auto m_tuple = std::move(element.toTupleRef()).elements();
344
const std::string& function_name = m_tuple[0].toStringRef();
345
auto codeTableElements =
346
std::move(std::move(m_tuple[1]).toTupleRef()).elements();
347
IValue* schemaTable = // older files do not store function schema
348
(bytecode_version_ > 0x4L ||
349
(bytecode_version_ == 0x4L && m_tuple.size() >= 3))
353
std::make_unique<mobile::Function>(c10::QualifiedName(function_name));
358
codeTableElements, "instructions", BYTECODE_INDEX_INSTRUCTION)
362
std::move(expect_field(
363
codeTableElements, "operators", BYTECODE_INDEX_OPERATOR)
367
std::move(expect_field(
368
codeTableElements, "constants", BYTECODE_INDEX_CONSTANT)
372
std::move(expect_field(codeTableElements, "types", BYTECODE_INDEX_TYPE)
375
int64_t register_size =
377
codeTableElements, "register_size", BYTECODE_INDEX_REGISTER_SIZE)
380
c10::ivalue::TupleElements debug_handles_m_tuple;
382
debug_handles_m_tuple =
383
std::move(std::move((*debug_handles)[i]).toTupleRef()).elements();
385
init_upgrader(function.get());
386
// 1. First pass all operators from models
387
parseOperators(std::move(ops_list), module_load_options_, function.get());
389
// 2. Decides if upgrader is needed
391
(operator_version_ < caffe2::serialize::kProducedFileFormatVersion);
396
debug_handles_m_tuple,
399
// 3. If upgrader is needed, change change the OP instrunction to CALL
400
// instruction (In next PR, use_upgrader will be parsed to parseInstruction
401
// function and do the actual change)
403
applyUpgrader(function.get(), operator_version_);
406
parseConstants(consts_list, function.get());
408
parseTypes(types_list, function.get());
410
function->set_register_size(register_size);
413
function_name, schemaTable, bytecode_version_, function.get());
415
mcu.register_function(std::move(function));
419
void BytecodeDeserializer::deserialize_only_extra(
420
c10::optional<at::Device> device,
421
ExtraFilesMap& extra_files) {
423
for (const auto& kv : extra_files) {
424
const std::string& key = "extra/" + kv.first;
425
if (reader_->hasRecord(key)) {
426
auto [meta_ptr, meta_size] = reader_->getRecord(key);
427
extra_files[kv.first] =
428
std::string(static_cast<char*>(meta_ptr.get()), meta_size);
433
mobile::Module BytecodeDeserializer::deserialize(
434
c10::optional<at::Device> device,
435
ExtraFilesMap& extra_files) {
436
deserialize_only_extra(device, extra_files);
437
return deserialize(device);
440
mobile::Module BytecodeDeserializer::deserialize(
441
c10::optional<at::Device> device) {
443
auto mcu = std::make_shared<mobile::CompilationUnit>();
445
// bvals can have 2 possible formats:
447
// 1. Old format: bvals is an array (Tuple) of N elements, each element being
448
// itself a Tuple(method_name, method_table).
450
// 2. New format: bvals is an array (Tuple) of 1+N elements. The first element
451
// being a Tuple (int, table), and the integer stands for the bytecode version
452
// number. The rest of the elements are the same as before.
454
auto bvals = std::move(readArchive("bytecode", mcu).toTupleRef()).elements();
456
c10::optional<c10::ivalue::TupleElements> debug_handles;
457
bool has_debug_handles{false};
458
if (reader_->hasRecord("mobile_debug_handles.pkl")) {
460
std::move(readArchive("mobile_debug_handles", mcu).toTupleRef())
462
has_debug_handles = true;
464
operator_version_ = reader_->version();
465
parseMethods(std::move(bvals), std::move(debug_handles), *mcu);
466
auto m = mobile::Module(readArchive("data", mcu).toObject(), mcu);
467
m.set_min_operator_version(operator_version_);
468
m.set_bytecode_version(bytecode_version_);
469
m.setHasDebugHandles(has_debug_handles);
470
#if defined(SYMBOLICATE_MOBILE_DEBUG_HANDLE)
471
MobileDebugTable debug_table = MobileDebugTable(reader_, compilation_unit_);
472
m.setDebugTable(std::move(debug_table));
477
c10::IValue BytecodeDeserializer::readArchive(
478
const std::string& archive_name,
479
std::shared_ptr<mobile::CompilationUnit> mcu) {
480
auto type_resolver = [this](const c10::QualifiedName& qn) {
481
return typeResolverMobile(qn, compilation_unit_);
484
auto obj_loader = [&](const at::StrongTypePtr& type, const IValue& input) {
485
return objLoaderMobile(type, input, *mcu);
488
bool bytecode_tensor_in_constants_archive =
489
(archive_name == "bytecode" &&
490
!isTensorInBytecodeArchive(*reader_.get()));
492
auto ivalues = torch::jit::readArchiveAndTensors(
494
/*pickle_prefix=*/"",
496
bytecode_tensor_in_constants_archive ? "constants/" : "",
505
mobile::Module _load_for_mobile_impl(
506
std::unique_ptr<ReadAdapterInterface> rai,
507
c10::optional<c10::Device> device,
508
ExtraFilesMap& extra_files,
509
uint64_t module_load_options) {
510
auto observer = torch::observerConfig().getModuleObserver();
511
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
512
auto instance_key = std::rand();
514
std::unordered_map<std::string, std::string> metadata_map;
516
observer->onEnterLoadModel(instance_key);
517
auto defaultExtraFileList = observer->getDefaultExtraFiles();
518
// Add files in defaultExtraFileList to fail_extra_files and extra_files
519
for (const auto& fileName : defaultExtraFileList) {
520
extra_files.insert(std::make_pair(fileName, ""));
524
const size_t model_size = rai != nullptr ? rai->size() : 0;
525
auto reader = std::make_unique<PyTorchStreamReader>(std::move(rai));
526
if (module_load_options &
527
MobileModuleLoadOptions::PARSE_ALL_EXTRA_FILE_MAPS) {
528
// ExtraFilesMap is serialized with a "extra/", hence it is necessary to
529
// account for when we de-serialize de-serialized filemap key values contain
530
// prefix and we need to remove prior to construct the map. "extra/" string
531
// has a length of 6 characters, hence we need only sub-string 6th position
532
// of a string. Please refer to following link for a detail:
533
// https://www.internalfb.com/code/fbsource/[9996fcb7a6fb]/fbcode/caffe2/torch/csrc/jit/mobile/import.cpp?lines=427-434
534
std::vector<std::string> all_files = reader->getAllRecords();
535
for (auto& file_name : all_files) {
536
if (file_name.find("extra/") == 0) {
537
extra_files[file_name.substr(6)] = "";
541
BytecodeDeserializer deserializer(std::move(reader), module_load_options);
543
std::string error_message;
544
auto guard = c10::make_scope_exit([&]() {
548
deserializer.deserialize_only_extra(device, extra_files);
550
metadata_map = observer->processMetadataFromExtra(extra_files);
552
observer->onFailLoadModel(
554
error_message.empty() ? "Unknown exception" : error_message.c_str(),
559
mobile::Module result = deserializer.deserialize(device, extra_files);
561
// Add model_name and model_size to metadata_map
562
extra_files.insert(std::make_pair("model_name", result.name()));
564
std::make_pair("model_size", std::to_string(model_size)));
565
metadata_map = observer->processMetadataFromExtra(extra_files);
566
observer->onExitLoadModel(instance_key, metadata_map);
568
result.setMetadata(metadata_map);
571
} catch (c10::Error& error) {
572
error_message = error.what();
573
TORCH_RETHROW(error);
577
mobile::Module _load_mobile_from_bytes(
578
const std::shared_ptr<char>& data,
580
c10::optional<c10::Device> device,
581
ExtraFilesMap& extra_files,
582
uint64_t module_load_options) {
583
TORCH_CHECK(size >= kFileFormatHeaderSize, "Format error");
584
auto format = getFileFormat(data.get());
586
case FileFormat::ZipFileFormat: {
587
std::unique_ptr<ReadAdapterInterface> rai =
588
std::make_unique<MemoryReadAdapter>(data.get(), size);
589
return _load_for_mobile_impl(
590
std::move(rai), device, extra_files, module_load_options);
592
case FileFormat::FlatbufferFileFormat: {
593
return parse_and_initialize_mobile_module(
594
data, size, device, &extra_files);
597
TORCH_CHECK(false, "Format error");
604
mobile::Module _load_for_mobile(
606
c10::optional<at::Device> device) {
607
ExtraFilesMap extra_files;
608
return _load_for_mobile(in, device, extra_files);
611
mobile::Module _load_for_mobile(
612
const std::string& filename,
613
c10::optional<at::Device> device) {
614
ExtraFilesMap extra_files;
615
return _load_for_mobile(filename, device, extra_files);
618
mobile::Module _load_for_mobile(
619
std::unique_ptr<ReadAdapterInterface> rai,
620
c10::optional<c10::Device> device) {
621
ExtraFilesMap extra_files;
622
return _load_for_mobile(std::move(rai), device, extra_files);
625
mobile::Module _load_for_mobile(
627
c10::optional<at::Device> device,
628
ExtraFilesMap& extra_files,
629
uint64_t module_load_options) {
630
if (getFileFormat(in) == FileFormat::FlatbufferFileFormat) {
631
auto [data, size] = get_stream_content(in);
632
return _load_mobile_from_bytes(
633
data, size, device, extra_files, module_load_options);
635
std::unique_ptr<IStreamAdapter> rai = std::make_unique<IStreamAdapter>(&in);
636
auto module = _load_for_mobile_impl(
637
std::move(rai), device, extra_files, module_load_options);
641
mobile::Module _load_for_mobile(
642
const std::string& filename,
643
c10::optional<at::Device> device,
644
ExtraFilesMap& extra_files) {
645
return _load_for_mobile(
646
filename, device, extra_files, kDefaultMobileLoadOptions);
649
mobile::Module _load_for_mobile(
650
const std::string& filename,
651
c10::optional<at::Device> device,
652
ExtraFilesMap& extra_files,
653
uint64_t module_load_options) {
654
auto format = getFileFormat(filename);
656
if (format == FileFormat::FlatbufferFileFormat) {
657
auto [data, size] = get_file_content(filename.c_str());
658
return _load_mobile_from_bytes(
659
data, size, device, extra_files, module_load_options);
662
std::unique_ptr<FileAdapter> rai = std::make_unique<FileAdapter>(filename);
663
return _load_for_mobile_impl(
664
std::move(rai), device, extra_files, module_load_options);
667
TORCH_API mobile::Module _load_for_mobile(
668
std::unique_ptr<ReadAdapterInterface> rai,
669
c10::optional<c10::Device> device,
670
ExtraFilesMap& extra_files,
671
uint64_t module_load_options) {
672
// TODO optimize file read for non-flatbuffer models
673
auto [data, size] = get_rai_content(rai.get());
674
return _load_mobile_from_bytes(
675
data, size, device, extra_files, module_load_options);
678
void _load_extra_only_for_mobile(
679
const std::string& filename,
680
c10::optional<at::Device> device,
681
ExtraFilesMap& extra_files) {
682
auto observer = torch::observerConfig().getModuleObserver();
683
// NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
684
auto instance_key = std::rand();
686
observer->onEnterLoadModel(instance_key);
689
auto format = getFileFormat(filename);
691
case FileFormat::ZipFileFormat: {
692
std::unique_ptr<FileAdapter> rai =
693
std::make_unique<FileAdapter>(filename);
694
auto reader = std::make_unique<PyTorchStreamReader>(std::move(rai));
695
BytecodeDeserializer deserializer(std::move(reader));
696
deserializer.deserialize_only_extra(device, extra_files);
699
case FileFormat::FlatbufferFileFormat: {
700
// TODO: the current flatbuffers implementation will always load the
701
// whole module including the extra files. Ideally it should be
702
// possible to just get the extra files given data
703
load_mobile_module_from_file(filename, c10::nullopt, &extra_files);
707
TORCH_CHECK(false, "Format error");
714
std::set<std::string> _export_operator_list(
715
torch::jit::mobile::Module& module) {
716
std::set<std::string> operator_list;
717
for (Method func : module.get_methods()) {
718
const Function& function = func.function();
719
const auto& code = function.get_code();
720
// op_names below isn't a list of unique operator names. In fact
721
// it can contain the same operator name many many times, so we need
722
// to de-dup the list by adding all the operator names into
723
// an std::set<std::string>.
724
std::vector<c10::OperatorName> const& op_names = code.op_names_;
725
for (auto& op_name : op_names) {
726
operator_list.insert(toString(op_name));
729
return operator_list;