1
// Note(jiayq): the import_array function is done inside
2
// caffe2_python.cc. Read
3
// http://docs.scipy.org/doc/numpy-1.10.1/reference/c-api.array.html#miscellaneous
7
#include "pybind_state.h"
9
#include <pybind11/pybind11.h>
10
#include <pybind11/stl.h>
12
#include <caffe2/ideep/ideep_utils.h>
13
#include "caffe2/ideep/operators/operator_fallback_ideep.h"
18
USE_IDEEP_DEF_ALIASES();
23
REGISTER_IDEEP_OPERATOR(Python, IDEEPFallbackOp<PythonOp<CPUContext, false>>);
25
REGISTER_BLOB_FETCHER((TypeMeta::Id<itensor>()), IDeepFetcher);
26
REGISTER_BLOB_FEEDER(IDEEP, IDeepFeeder);
28
class IDeepFetcher : public BlobFetcherBase {
29
TypeMeta type_transform(const itensor& atensor) {
30
switch (atensor.get_data_type()) {
31
case itensor::data_type::f32:
32
return TypeMeta::Make<float>();
33
case itensor::data_type::s32:
34
return TypeMeta::Make<int>();
35
case itensor::data_type::s8:
36
return TypeMeta::Make<int8_t>();
37
case itensor::data_type::u8:
38
return TypeMeta::Make<uint8_t>();
40
// Should we throw exception?
46
pybind11::object Fetch(const Blob& blob) override {
48
return FetchTensor(blob.Get<itensor>(), true).obj;
49
} catch (ideep::error& e) {
50
LOG(ERROR) << "IDEEP error: " << e.message;
55
FetchedBlob FetchTensor(const itensor& atensor, bool force_copy) {
59
(atensor.ndims() != 0) &&
60
(atensor.get_nelems() == 0 || atensor.get_data_handle() != nullptr),
61
"Trying to fetch uninitialized tensor");
62
// NOTE: Only support float so far.
63
const int numpy_type = NPY_FLOAT;
66
"Unsupported ideep memory data type? This usually should not happen "
67
"since ideep memory usually only do float and double.");
69
bool need_reorder = atensor.need_reorder();
70
if (atensor.get_data_type() == idtype::f32 && !atensor.has_scale()) {
71
// For FP32 path, only support NCHW format input, so if atensor
72
// has NHWC format, we need reorder it to NCHW format.
73
dims = atensor.get_dims();
74
need_reorder = need_reorder || atensor.get_desc().is_nhwc();
76
dims = atensor.get_public_format_dims();
78
std::vector<npy_intp> npy_dims(dims.begin(), dims.end());
80
result.copied = force_copy || need_reorder;
81
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
84
result.obj = py::reinterpret_steal<py::object>(
85
PyArray_SimpleNew(atensor.ndims(), npy_dims.data(), numpy_type));
86
outPtr = static_cast<void*>(
87
PyArray_DATA(reinterpret_cast<PyArrayObject*>(result.obj.ptr())));
89
outPtr = atensor.get_data_handle();
90
result.obj = py::reinterpret_steal<py::object>(PyArray_SimpleNewFromData(
91
atensor.ndims(), npy_dims.data(), numpy_type, outPtr));
94
if (numpy_type == NPY_OBJECT) {
95
CAFFE_THROW("We don't support strings.");
99
if (atensor.get_data_type() == idtype::f32 && !atensor.has_scale()) {
100
itensor temp_ten(atensor.get_desc().to_default_format(), outPtr);
101
atensor.reorder_to(temp_ten);
103
atensor.to_public(outPtr);
109
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
114
class IDeepFeeder : public BlobFeederBase {
115
itensor::data_type type_transform(const TypeMeta meta) {
116
if (meta == TypeMeta::Make<float>())
117
return itensor::data_type::f32;
118
else if (meta == TypeMeta::Make<int>())
119
return itensor::data_type::s32;
120
else if (meta == TypeMeta::Make<int8_t>())
121
return itensor::data_type::s8;
122
else if (meta == TypeMeta::Make<uint8_t>())
123
return itensor::data_type::u8;
125
return itensor::data_type::undef;
130
const DeviceOption& option,
131
PyArrayObject* original_array,
134
PyArrayObject* array = PyArray_GETCONTIGUOUS(original_array);
135
auto g = MakeGuard([&]() { Py_XDECREF(array); });
136
const auto npy_type = PyArray_TYPE(array);
137
const TypeMeta meta = NumpyTypeToCaffe(npy_type);
140
ScalarType::Undefined,
141
"This numpy data type is not supported: ",
145
int ndim = PyArray_NDIM(array);
146
npy_intp* npy_dims = PyArray_DIMS(array);
149
for (int i = 0; i < ndim; i++) {
150
adims.push_back(static_cast<itensor::dims::value_type>(npy_dims[i]));
156
CAFFE_THROW("IDeep doesn't support string");
159
auto type = type_transform(meta);
160
if (tensor->get_dims() != adims || type != tensor->get_data_type()) {
161
tensor->resize(adims, type);
163
tensor->feed_from(adims, type, static_cast<void*>(PyArray_DATA(array)));
166
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
170
bool ZeroDim(PyArrayObject* array) {
172
int ndim = PyArray_NDIM(array);
175
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
180
const DeviceOption& option,
181
PyArrayObject* original_array,
183
bool in_place) override {
186
PyArrayObject* array = PyArray_GETCONTIGUOUS(original_array);
187
auto g = MakeGuard([&]() { Py_XDECREF(array); });
189
const auto npy_type = PyArray_TYPE(array);
190
const TypeMeta meta = NumpyTypeToCaffe(npy_type);
192
// TODO: if necessary, use dispatcher.
193
if ((in_place && blob->IsType<itensor>()) ||
194
(meta.Match<float>() && !ZeroDim(original_array))) {
195
FeedTensor(option, original_array, blob->GetMutable<itensor>());
197
DeviceOption cpu_option(option);
198
cpu_option.set_device_type(DeviceTypeProto::PROTO_CPU);
199
TensorFeeder<CPUContext> cpu_tensor_feeder;
201
cpu_tensor_feeder.FeedTensor(
204
BlobGetMutableTensor(blob, OptionToDevice(cpu_option).type()),
207
blob->Reset<Tensor>(new Tensor(
208
cpu_tensor_feeder.FeedTensor(cpu_option, original_array)));
211
} catch (ideep::error& e) {
212
LOG(ERROR) << "IDEEP error: " << e.message;
216
CAFFE_THROW("Caffe2 was compiled without NumPy support.");