pytorch
1#include "caffe2/contrib/fakelowp/quant_lut_fp16_fake_op.h"
2
3namespace caffe2 {
4
5REGISTER_CPU_OPERATOR(TanhQuantFakeFp16NNPI, TanhInt8QuantizeNNPIOp);
6
7OPERATOR_SCHEMA(TanhQuantFakeFp16NNPI)
8.Arg("Y_scale", "Output tensor quantization scale")
9.Arg("Y_zero_point", "Output tensor quantization offset")
10.NumInputs(1)
11.NumOutputs(1)
12.SetDoc(R"DOC(
13Apply TanH and convert the result to Int8.
14<details>
15</details>
16)DOC")
17.Input(0, "X", "Float Tensor X.")
18.Output(0, "Y", "Int8 Tensor Y.");
19
20} // namespace caffe2
21