pytorch
1#include <torch/csrc/jit/backends/backend_debug_handler.h>2
3#include <stack>4
5namespace torch {6namespace jit {7
8std::atomic<DebugHandleType> BackendDebugInfoRecorder::unique_debug_handle_{0};9
10int64_t BackendDebugInfoRecorder::getNextDebugHandle(const Node* node) {11InlinedCallStackPtr cs_ptr;12if (node->callstack().has_value()) {13cs_ptr = node->callstack().value();14} else {15cs_ptr = c10::intrusive_ptr<InlinedCallStack>();16}17DebugHandleType debug_handle = unique_debug_handle_;18const SourceRange& range = node->sourceRange();19handles_to_inlined_callstack_ptrs_[debug_handle] =20std::make_tuple(range, node->kind().toQualString(), cs_ptr);21// This increment is with seq memory order.22// Not trying to perf optimizing this for now.23unique_debug_handle_++;24return debug_handle;25}
26
27BackendDebugInfoMapType BackendDebugInfoRecorder::stopRecording() {28// Note that this is return by copy and since29// InlinedCallStackPtrs are intrusive ptr it will result in30// bump of refcount. Not performant, but this is not intented31// to be used in perf critical path.32// Alternate might be do move but that will be destructive33return handles_to_inlined_callstack_ptrs_;34}
35
36} // namespace jit37} // namespace torch38