1
# Owner(s): ["module: onnx"]
3
"""Test the support on onnxscript in PyTorch-ONNX converter with onnxruntime."""
9
from onnxscript.onnx_types import FLOAT
10
from torch.onnx._internal import jit_utils
11
from torch.testing._internal import common_utils
14
class TestONNXScriptRuntime(onnx_test_common._TestONNXRuntime):
16
# 1. local function is supported after opset 15
17
# 2. onnx-script requires users to determine opset in local function
20
def test_selu_from_onnxscript_example(self):
21
x = torch.randn(1, 2, 3, 4, requires_grad=True)
22
model = torch.nn.SELU()
24
from onnxscript.onnx_opset import opset15 as op
26
custom_opset = onnxscript.values.Opset(domain="onnx-script", version=1)
28
@onnxscript.script(custom_opset)
32
# default value is not supported by onnxscript
33
alpha = 1.67326 # auto wrapped as Constants
35
alphaX = op.CastLike(alpha, X)
36
gammaX = op.CastLike(gamma, X)
37
neg = gammaX * (alphaX * op.Exp(X) - alphaX)
39
zero = op.CastLike(0, X)
40
return op.Where(X <= zero, neg, pos)
42
def custom_selu(g: jit_utils.GraphContext, X):
43
return g.onnxscript_op(Selu, X).setType(X.type())
45
torch.onnx.register_custom_op_symbolic(
46
symbolic_name="aten::selu",
47
symbolic_fn=custom_selu,
48
opset_version=self.opset_version,
50
self.run_test(model, x)
52
def test_layer_norm(self):
57
class N(torch.nn.Module):
58
def __init__(self, prob):
60
self.dropout = torch.nn.Dropout(prob)
63
return self.dropout(x)
65
class M(torch.nn.Module):
66
def __init__(self, num_layers):
68
self.num_layers = num_layers
69
self.lns = torch.nn.ModuleList(
70
[torch.nn.LayerNorm(3, eps=i) for i in range(num_layers)]
72
self.celu1 = torch.nn.CELU(1.0)
73
self.celu2 = torch.nn.CELU(2.0)
76
def forward(self, x, y, z):
81
return res1 + res2, self.dropout(z)
85
from onnxscript.onnx_opset import opset15 as op
87
custom_opset = onnxscript.values.Opset(domain="onnxscript", version=1)
89
@onnxscript.script(custom_opset)
91
X, axes: List[int], weight: FLOAT[...], bias: FLOAT[...], eps: float
93
mean = op.ReduceMean(X, axes=axes)
94
D = X - mean # op.Sub(X, mean)
95
DD = D * D # op.Mul(D, D)
96
var = op.ReduceMean(DD, axes=axes)
97
vareps = var + eps # op.Add(var, eps)
98
stddev = op.Sqrt(vareps)
99
invstddev = op.Reciprocal(stddev)
100
normalized = D * invstddev # op.Mul(D, invstddev)
101
normalizedw = op.CastLike(
103
) # Type issue if missing this Op
104
normalizedscaled = normalizedw * weight # op.Mul(normalized, weight)
105
return normalizedscaled + bias
107
@torch.onnx.symbolic_helper.parse_args("v", "is", "v", "v", "f", "none")
108
def custom_layer_norm(
109
g, input, normalized_shape, weight, bias, eps, cudnn_enable
111
# comprehension is not supported by onnxscript
112
axes = [-i for i in range(len(normalized_shape), 0, -1)]
113
return g.onnxscript_op(
114
layer_norm, input, weight, bias, axes_i=axes, eps_f=eps
115
).setType(input.type())
117
torch.onnx.register_custom_op_symbolic(
118
symbolic_name="aten::layer_norm",
119
symbolic_fn=custom_layer_norm,
120
opset_version=self.opset_version,
123
self.run_test(model, (x, y, z))
126
if __name__ == "__main__":
127
common_utils.run_tests()