pytorch

Форк
0
/
module.cpp 
355 строк · 11.2 Кб
1
#include <torch/csrc/jit/mobile/module.h>
2

3
#include <torch/csrc/jit/backends/backend_exception.h>
4
#include <torch/csrc/jit/mobile/interpreter.h>
5
#include <torch/csrc/jit/mobile/observer.h>
6
#include <torch/csrc/jit/mobile/type_parser.h>
7
#include <torch/csrc/jit/runtime/jit_exception.h>
8
#include <exception>
9

10
#include <ATen/record_function.h>
11
#include <c10/util/ScopeExit.h>
12
#include <c10/util/irange.h>
13

14
namespace torch {
15
namespace jit {
16
std::ostream& operator<<(std::ostream& out, Instruction inst);
17
namespace mobile {
18

19
void CompilationUnit::register_function(std::unique_ptr<Function> fn) {
20
  methods_.emplace_back(std::move(fn));
21
}
22

23
const Function* CompilationUnit::find_function(
24
    const c10::QualifiedName& qn) const {
25
  for (auto& fn : methods_) {
26
    if (fn->qualname() == qn) {
27
      return fn.get();
28
    }
29
  }
30
  return nullptr;
31
}
32

33
Function* CompilationUnit::find_function(const c10::QualifiedName& qn) {
34
  // NOLINTNEXTLINE
35
  return const_cast<Function*>(
36
      static_cast<const CompilationUnit*>(this)->find_function(qn));
37
}
38

39
Method Module::get_method(const std::string& name) const {
40
  if (auto method = find_method(name)) {
41
    return *method;
42
  }
43
  AT_ERROR("Method '", name, "' is not defined.");
44
}
45

46
bool Module::compareMethodSchemas(
47
    const std::string& name_1,
48
    const std::string& name_2) {
49
  c10::optional<c10::FunctionSchema> schema_1, schema_2;
50
  for (const auto& fn : cu_->methods()) {
51
    if (fn->name() == name_1) {
52
      schema_1 = fn->getSchema();
53
    }
54
    if (fn->name() == name_2) {
55
      schema_2 = fn->getSchema();
56
    }
57
  }
58
  if (schema_1.has_value() && schema_2.has_value()) {
59
    return (schema_1 == schema_2);
60
  }
61
  return false;
62
}
63

64
void Module::unsafeRemoveMethod(const std::string& basename) {
65
  int64_t i = 0;
66
  for (; i < static_cast<int64_t>(cu_->methods().size()); ++i) {
67
    if ((cu_->methods()[i])->name() == basename) {
68
      break;
69
    }
70
  }
71
  object_->type()->unsafeRemoveMethod(basename);
72
  cu_->unsafeRemoveFunction(i);
73
}
74

75
void Module::unsafeCopyMethod(
76
    const std::string& new_method_name,
77
    const Function& to_be_copied) {
78
  TORCH_CHECK(
79
      !find_method(new_method_name).has_value(),
80
      "Trying to replace existing method.");
81
  const c10::QualifiedName& tobe_copied_name = to_be_copied.qualname();
82
  c10::QualifiedName qualified_method_name(
83
      tobe_copied_name.prefix(), new_method_name);
84
  std::unique_ptr<Function> new_fn = std::make_unique<Function>(
85
      qualified_method_name, to_be_copied.get_code(), to_be_copied.getSchema());
86
  object_->type()->addMethod(new_fn.get());
87
  cu_->register_function(std::move(new_fn));
88
}
89

90
c10::optional<Method> Module::find_method(const std::string& basename) const {
91
  for (const auto& fn : cu_->methods()) {
92
    if (fn->name() == basename) {
93
      return c10::make_optional<Method>(Method(this, fn.get()));
94
    }
95
  }
96
  return c10::nullopt;
97
}
98

99
namespace {
100
// For JIT, there is a private function to get all modules by iteration in
101
// struct slot_iterator_impl (jit/api/module.h). The following function use
102
// recursion to mimic the logic without allocating extra memory to get module
103
// list and set training attribute directly.
104
void set_train_recurse(
105
    const c10::intrusive_ptr<c10::ivalue::Object>& obj,
106
    bool on) {
107
  if (auto slot = obj->type()->findAttributeSlot("training")) {
108
    obj->setSlot(*slot, on);
109
  } else {
110
    TORCH_INTERNAL_ASSERT(
111
        false,
112
        "'training' attribute not found. Did you accidentally "
113
        "call .eval() before saving your model?");
114
  }
115
  for (const auto& slot : obj->slots()) {
116
    // slots is a list of IValue. Continue setting training attribute only
117
    // if the slot is an object and a module.
118
    if (slot.isObject() && slot.toObjectRef().type()->is_module()) {
119
      set_train_recurse(slot.toObject(), on);
120
    }
121
  }
122
}
123

124
void slot_params_recurse(
125
    const c10::intrusive_ptr<c10::ivalue::Object>& obj,
126
    std::vector<at::Tensor>* params) {
127
  for (const auto& slot : obj->slots()) {
128
    if (slot.isTensor()) {
129
      params->emplace_back(slot.toTensor());
130
    } else if (slot.isObject()) {
131
      slot_params_recurse(slot.toObject(), params);
132
    }
133
  }
134
}
135

136
void slot_named_params_recurse(
137
    const c10::intrusive_ptr<c10::ivalue::Object>& obj,
138
    std::map<std::string, at::Tensor>* params,
139
    const std::string& parent_name) {
140
  auto slots = obj->slots();
141
  size_t nslots = slots.size();
142
  for (const auto i : c10::irange(nslots)) {
143
    auto slot = slots[i];
144
    std::string name = parent_name.empty() ? parent_name : parent_name + ".";
145
    name += obj->type()->getAttributeName(i);
146
    // TODO: Fix this filter. Requires_grad is not the appropriate
147
    // filter of a parameter, but is a temporary hack to help probable
148
    // users of this api. The correct behavior is to filter by the
149
    // obj->type->is_parameter() but this currently always returns
150
    // false on mobile.
151
    if (slot.isTensor() && slot.toTensor().requires_grad()) {
152
      (*params)[name] = slot.toTensor();
153
    } else if (slot.isObject()) {
154
      slot_named_params_recurse(slot.toObject(), params, name);
155
    }
156
  }
157
}
158

159
#if defined(SYMBOLICATE_MOBILE_DEBUG_HANDLE)
160
std::string getTopModuleTypeName(const Module& m) {
161
  std::string name;
162
  if (m._ivalue()->type() && m._ivalue()->type()->name()) {
163
    name = m._ivalue()->type()->name().value().name();
164
  }
165
  return name;
166
}
167
#endif
168

169
} // namespace
170

171
const std::vector<at::Tensor> Module::parameters() const {
172
  std::vector<at::Tensor> params;
173
  slot_params_recurse(object_, &params);
174
  return params;
175
}
176

177
// Returns a mapping for all attributes that requires_grad=True in a module.
178
// This behavior differs from full torch script modules. This is a bug,
179
// but currently there is no way to correctly label parameters in the
180
// loading of a mobile module. TODO
181
const std::map<std::string, at::Tensor> Module::named_parameters() const {
182
  std::map<std::string, at::Tensor> params;
183
  const std::string name = "";
184
  slot_named_params_recurse(object_, &params, name);
185
  return params;
186
}
187

188
std::string Module::getModuleHierarchy(const int64_t debug_handle) const {
189
#if defined(SYMBOLICATE_MOBILE_DEBUG_HANDLE)
190
  return getDebugTable().getModuleHierarchyInfo(
191
      debug_handle, getTopModuleTypeName(*this));
192
#else
193
  return "";
194
#endif
195
}
196

197
std::string Module::getCallStack(const int64_t debug_handle) const {
198
#if defined(SYMBOLICATE_MOBILE_DEBUG_HANDLE)
199
  return getDebugTable().getSourceDebugString(
200
      debug_handle, getTopModuleTypeName(*this));
201
#else
202
  return "";
203
#endif
204
}
205

206
// We will continue to support this API for now as this is being relied upon
207
// for profiling.
208
// We really need to change this part, so in the next step for profiling support
209
// for delegates, the first thing will be to rewrite how profiling is done
210
// for lite interpreter.
211
std::string Module::get_forward_method_debug_info(int64_t debug_handle) const {
212
#if defined(SYMBOLICATE_MOBILE_DEBUG_HANDLE)
213
  return getDebugTable().getModuleHierarchyInfo(
214
      debug_handle, getTopModuleTypeName(*this));
215
#else
216
  return "";
217
#endif
218
}
219

220
void Module::train(bool on) {
221
  set_train_recurse(object_, on);
222
}
223

224
bool Module::is_training() const {
225
  if (auto slot = object_->type()->findAttributeSlot("training")) {
226
    return object_->getSlot(*slot).toBool();
227
  }
228
  return true;
229
}
230

231
const std::vector<Method> Module::get_methods() const {
232
  std::vector<Method> methods;
233
  for (std::unique_ptr<Function>& fn : cu_->methods()) {
234
    methods.emplace_back(this, fn.get());
235
  }
236
  return methods;
237
}
238

239
Method::Method(const Module* owner, Function* function)
240
    : owner_(owner), function_(function) {}
241

242
void Method::run(Stack& stack) const {
243
  auto observer = torch::observerConfig().getModuleObserver();
244
  // NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand)
245
  auto instance_key = std::rand();
246
  /* if the metadata dict doesn't contain "model_name", copy the metadata and
247
  set the value of "model_name" as name() */
248
  std::unordered_map<std::string, std::string> copied_metadata =
249
      owner_->getMetadata();
250

251
  if (observer) {
252
    observer->onEnterRunMethod(instance_key);
253
  }
254

255
  auto debug_info = std::make_shared<MobileDebugInfo>();
256
  std::string name = copied_metadata["model_name"];
257
  debug_info->setModelName(name);
258
  debug_info->setMethodName(function_->name());
259
  at::DebugInfoGuard guard(at::DebugInfoKind::MOBILE_RUNTIME_INFO, debug_info);
260

261
  std::string error_message;
262
  auto failure_guard = c10::make_scope_exit([&]() {
263
    if (!observer) {
264
      return;
265
    }
266

267
#if defined(SYMBOLICATE_MOBILE_DEBUG_HANDLE)
268
    if (error_message.empty()) {
269
      error_message = owner_->getDebugTable().getSourceDebugString(
270
          function_->getExceptionDebugHandles(), getTopModuleTypeName(*owner_));
271
    }
272
#endif
273

274
    observer->onFailRunMethod(
275
        copied_metadata,
276
        function_->name(),
277
        instance_key,
278
        error_message.empty() ? "Unknown exception" : error_message.c_str());
279
  });
280

281
  try {
282
    stack.insert(stack.begin(), owner_->_ivalue()); // self
283
    function_->run(stack);
284
    if (observer) {
285
      observer->onExitRunMethod(
286
          copied_metadata, function_->name(), instance_key);
287
    }
288
    failure_guard.release();
289
    // This exception must be caught first as it derived from c10::Error
290
  } catch (c10::BackendRuntimeException& e) {
291
#if defined(SYMBOLICATE_MOBILE_DEBUG_HANDLE)
292
    for (auto handle : function_->getExceptionDebugHandles()) {
293
      e.pushDebugHandle(handle);
294
    }
295
    // symbolicate all handles
296
    auto debug_string = owner_->getDebugTable().getSourceDebugString(
297
        e.getDebugHandles(), getTopModuleTypeName(*owner_));
298
    e.add_context(debug_string);
299
#endif
300
    error_message = e.what();
301
    TORCH_RETHROW(e);
302
  } catch (c10::Error& error) {
303
#if defined(SYMBOLICATE_MOBILE_DEBUG_HANDLE)
304
    auto debug_string = owner_->getDebugTable().getSourceDebugString(
305
        function_->getExceptionDebugHandles(), getTopModuleTypeName(*owner_));
306
    error.add_context(debug_string);
307
#endif
308
    error_message = error.what();
309
    TORCH_RETHROW(error);
310
  }
311
}
312

313
c10::IValue Method::operator()(std::vector<c10::IValue> stack) const {
314
  run(stack);
315
  TORCH_INTERNAL_ASSERT(!stack.empty());
316
  return stack.front();
317
}
318

319
static c10::optional<std::string> print_type(const c10::Type& t) {
320
  auto namedType = t.cast<c10::NamedType>();
321
  if (namedType && namedType->name()) {
322
    return namedType->name().value().qualifiedName();
323
  }
324
  if (auto dyn = t.castRaw<c10::DynamicType>()) {
325
    return dyn->fallback()->annotation_str();
326
  }
327
  return c10::nullopt;
328
}
329

330
TORCH_API ModuleInfo get_module_info(const mobile::Module& module) {
331
  ModuleInfo minfo;
332
  minfo.operator_version = module.min_operator_version();
333
  minfo.bytecode_version = module.bytecode_version();
334
  std::vector<std::string> type_name_list;
335
  for (const auto& func_ptr : module.compilation_unit().methods()) {
336
    const auto& function = *func_ptr;
337
    for (const auto i : c10::irange(function.get_code().op_names_.size())) {
338
      const auto& op = function.get_code().op_names_[i];
339
      minfo.opname_to_num_args[mobile::operator_str(op)] =
340
          function.get_code().operator_input_sizes_[i];
341
    }
342
    for (const c10::TypePtr& tp : function.get_code().types_) {
343
      type_name_list.push_back(tp->annotation_str(print_type));
344
    }
345
    minfo.function_names.insert(function.qualname().qualifiedName());
346
  }
347
  c10::TypeParser parser(type_name_list);
348
  parser.parseList();
349
  minfo.type_names = parser.getContainedTypes();
350
  return minfo;
351
}
352

353
} // namespace mobile
354
} // namespace jit
355
} // namespace torch
356

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.