1
#include <torch/csrc/utils/tensor_list.h>
3
#include <c10/util/irange.h>
4
#include <pybind11/pybind11.h>
5
#include <torch/csrc/Exceptions.h>
6
#include <torch/csrc/autograd/python_variable.h>
7
#include <torch/csrc/utils/pybind.h>
8
#include <torch/csrc/utils/python_scalars.h>
15
static PyObject* recursive_to_list(
20
ScalarType scalarType,
22
int64_t ndim = static_cast<int64_t>(sizes.size());
24
return torch::utils::load_scalar(data, scalarType);
27
auto list = THPObjectPtr(PyList_New(n));
30
for (const auto i : c10::irange(n)) {
31
PyObject* obj = recursive_to_list(
32
data, sizes, strides, dim + 1, scalarType, elementSize);
35
PyList_SET_ITEM(list.get(), i, obj);
36
auto advance_data_ptr = strides[dim] * elementSize;
37
TORCH_INTERNAL_ASSERT(data || (advance_data_ptr == 0));
38
data += advance_data_ptr;
40
return list.release();
43
PyObject* tensor_to_list(const Tensor& tensor) {
46
py::reinterpret_steal<py::object>(THPVariable_Wrap(tensor));
48
!tensor.unsafeGetTensorImpl()->is_python_dispatch(),
49
".tolist() is not supported for tensor subclasses, got ",
50
Py_TYPE(pytensor.ptr())->tp_name);
52
Tensor data = tensor.resolve_conj().resolve_neg();
53
if (!data.device().is_cpu()) {
54
pybind11::gil_scoped_release no_gil;
55
data = data.toBackend(Backend::CPU);
58
tensor.numel() == 0 || data.const_data_ptr(),
59
"tolist() shouldn't be called on a tensor with unallocated storage");
60
return recursive_to_list(
61
(const char*)data.const_data_ptr(),
66
tensor.numel() == 0 ? 0 : data.dtype().itemsize());