1
dnl Copyright (c) 2019, 2020, Red Hat Inc. All rights reserved.
2
dnl DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
dnl This code is free software; you can redistribute it and/or modify it
5
dnl under the terms of the GNU General Public License version 2 only, as
6
dnl published by the Free Software Foundation.
8
dnl This code is distributed in the hope that it will be useful, but WITHOUT
9
dnl ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
dnl FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11
dnl version 2 for more details (a copy is included in the LICENSE file that
12
dnl accompanied this code).
14
dnl You should have received a copy of the GNU General Public License version
15
dnl 2 along with this work; if not, write to the Free Software Foundation,
16
dnl Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
dnl Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19
dnl or visit www.oracle.com if you need additional information or have any
23
dnl Process this file with m4 aarch64_ad.m4 to generate instructions used in
28
// BEGIN This section of the file is automatically generated. Do not edit --------------
29
// This section is generated from aarch64_ad.m4
31
define(`upcase', `translit(`$*', `a-z', `A-Z')')dnl
32
define(`downcase', `translit(`$*', `A-Z', `a-z')')dnl
33
define(`ORL2I', `ifelse($1,I,orL2I)')dnl
35
define(`BASE_SHIFT_INSN',
36
`// This pattern is automatically generated from aarch64_ad.m4
37
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
38
instruct $2$1_reg_$4_reg(iReg$1NoSp dst,
39
iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2,
41
match(Set dst ($2$1 src1 (ifelse($4, RotateRight, $4, $4$1) src2 src3)));
43
ins_cost(1.9 * INSN_COST);
44
format %{ "$3 $dst, $src1, $src2, $5 $src3" %}
47
__ $3(as_Register($dst$$reg),
48
as_Register($src1$$reg),
49
as_Register($src2$$reg),
51
$src3$$constant & ifelse($1,I,0x1f,0x3f));
54
ins_pipe(ialu_reg_reg_shift);
57
define(`NEG_SHIFT_INSN',
58
`// This pattern is automatically generated from aarch64_ad.m4
59
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
60
instruct Neg$1_reg_$2_reg(iReg$1NoSp dst,
61
imm$1`0' zero, iReg$1`'ORL2I($1) src1, immI src2) %{
62
match(Set dst (Sub$1 zero ($2$1 src1 src2)));
64
ins_cost(1.9 * INSN_COST);
65
format %{ "ifelse($1, I, negw, neg) $dst, $src1, $3 $src2" %}
68
__ ifelse($1, I, negw, neg)(as_Register($dst$$reg), as_Register($src1$$reg),
69
Assembler::$3, $src2$$constant & ifelse($1,I,0x1f,0x3f));
72
ins_pipe(ialu_reg_shift);
75
define(`BASE_INVERTED_INSN',
76
`// This pattern is automatically generated from aarch64_ad.m4
77
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
78
instruct $2$1_reg_not_reg(iReg$1NoSp dst,
79
iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_M1 m1) %{
80
dnl This ifelse is because hotspot reassociates (xor (xor ..)..)
81
dnl into this canonical form.
83
match(Set dst (Xor$1 m1 (Xor$1 src2 src1)));,
84
match(Set dst ($2$1 src1 (Xor$1 src2 m1)));)
86
format %{ "$3 $dst, $src1, $src2" %}
89
__ $3(as_Register($dst$$reg),
90
as_Register($src1$$reg),
91
as_Register($src2$$reg),
95
ins_pipe(ialu_reg_reg);
98
define(`INVERTED_SHIFT_INSN',
99
`// This pattern is automatically generated from aarch64_ad.m4
100
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
101
// val ifelse($2, Xor, ^, $2, And, &, |) (-1 ^ (val ifelse($4, RShift, >>, $4, LShift, <<, $4, URShift, >>>, ror) shift)) ==> $3
102
instruct $2$1_reg_$4_not_reg(iReg$1NoSp dst,
103
iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2,
104
immI src3, imm$1_M1 src4) %{
105
dnl This ifelse is because hotspot reassociates (xor (xor ..)..)
106
dnl into this canonical form.
108
match(Set dst ($2$1 src4 (Xor$1(ifelse($4, RotateRight, $4, $4$1) src2 src3) src1)));,
109
match(Set dst ($2$1 src1 (Xor$1(ifelse($4, RotateRight, $4, $4$1) src2 src3) src4)));)
110
ins_cost(1.9 * INSN_COST);
111
format %{ "$3 $dst, $src1, $src2, $5 $src3" %}
114
__ $3(as_Register($dst$$reg),
115
as_Register($src1$$reg),
116
as_Register($src2$$reg),
118
$src3$$constant & ifelse($1,I,0x1f,0x3f));
121
ins_pipe(ialu_reg_reg_shift);
125
`// This pattern is automatically generated from aarch64_ad.m4
126
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
127
instruct reg$1_not_reg(iReg$1NoSp dst,
128
iReg$1`'ORL2I($1) src1, imm$1_M1 m1,
130
match(Set dst (Xor$1 src1 m1));
132
format %{ "$2 $dst, $src1, zr" %}
135
__ $2(as_Register($dst$$reg),
136
as_Register($src1$$reg),
145
define(`BOTH_SHIFT_INSNS',
146
`BASE_SHIFT_INSN(I, $1, ifelse($2,andr,andw,$2w), $3, $4)
147
BASE_SHIFT_INSN(L, $1, $2, $3, $4)')dnl
149
define(`BOTH_NEG_SHIFT_INSNS',
150
`NEG_SHIFT_INSN($1, URShift, LSR)
151
NEG_SHIFT_INSN($1, RShift, ASR)
152
NEG_SHIFT_INSN($1, LShift, LSL)')dnl
154
define(`BOTH_INVERTED_INSNS',
155
`BASE_INVERTED_INSN(I, $1, $2w, $3, $4)
156
BASE_INVERTED_INSN(L, $1, $2, $3, $4)')dnl
158
define(`BOTH_INVERTED_SHIFT_INSNS',
159
`INVERTED_SHIFT_INSN(I, $1, $2w, $3, $4)
160
INVERTED_SHIFT_INSN(L, $1, $2, $3, $4)')dnl
162
define(`ALL_SHIFT_KINDS_WITHOUT_ROR',
163
`BOTH_SHIFT_INSNS($1, $2, URShift, LSR)
164
BOTH_SHIFT_INSNS($1, $2, RShift, ASR)
165
BOTH_SHIFT_INSNS($1, $2, LShift, LSL)')dnl
167
define(`ALL_SHIFT_KINDS',
168
`ALL_SHIFT_KINDS_WITHOUT_ROR($1, $2)
169
BOTH_SHIFT_INSNS($1, $2, RotateRight, ROR)')dnl
171
define(`ALL_INVERTED_SHIFT_KINDS',
172
`BOTH_INVERTED_SHIFT_INSNS($1, $2, URShift, LSR)
173
BOTH_INVERTED_SHIFT_INSNS($1, $2, RShift, ASR)
174
BOTH_INVERTED_SHIFT_INSNS($1, $2, RotateRight, ROR)
175
BOTH_INVERTED_SHIFT_INSNS($1, $2, LShift, LSL)')dnl
179
BOTH_NEG_SHIFT_INSNS(I)
180
BOTH_NEG_SHIFT_INSNS(L)
181
BOTH_INVERTED_INSNS(And, bic)
182
BOTH_INVERTED_INSNS(Or, orn)
183
BOTH_INVERTED_INSNS(Xor, eon)
184
ALL_INVERTED_SHIFT_KINDS(And, bic)
185
ALL_INVERTED_SHIFT_KINDS(Xor, eon)
186
ALL_INVERTED_SHIFT_KINDS(Or, orn)
187
ALL_SHIFT_KINDS(And, andr)
188
ALL_SHIFT_KINDS(Xor, eor)
189
ALL_SHIFT_KINDS(Or, orr)
190
ALL_SHIFT_KINDS_WITHOUT_ROR(Add, add)
191
ALL_SHIFT_KINDS_WITHOUT_ROR(Sub, sub)
193
dnl EXTEND mode, rshift_op, src, lshift_count, rshift_count
194
define(`EXTEND', `($2$1 (LShift$1 $3 $4) $5)')dnl
195
define(`BFM_INSN',`// This pattern is automatically generated from aarch64_ad.m4
196
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
198
// Shift Left followed by Shift Right.
199
// This idiom is used by the compiler for the i2b bytecode etc.
200
instruct $4$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift_count, immI rshift_count)
202
match(Set dst EXTEND($1, $3, src, lshift_count, rshift_count));
203
ins_cost(INSN_COST * 2);
204
format %{ "$4 $dst, $src, $rshift_count - $lshift_count, #$2 - $lshift_count" %}
206
int lshift = $lshift_count$$constant & $2;
207
int rshift = $rshift_count$$constant & $2;
209
int r = (rshift - lshift) & $2;
210
__ $4(as_Register($dst$$reg),
211
as_Register($src$$reg),
215
ins_pipe(ialu_reg_shift);
218
BFM_INSN(L, 63, RShift, sbfm)
219
BFM_INSN(I, 31, RShift, sbfmw)
220
BFM_INSN(L, 63, URShift, ubfm)
221
BFM_INSN(I, 31, URShift, ubfmw)
223
// Bitfield extract with shift & mask
225
`// This pattern is automatically generated from aarch64_ad.m4
226
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
227
instruct $3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI rshift, imm$1_bitmask mask)
229
match(Set dst (And$1 ($2$1 src rshift) mask));
230
// Make sure we are not going to exceed what $3 can do.
231
predicate((exact_log2$6(n->in(2)->get_$5() + 1) + (n->in(1)->in(2)->get_int() & $4)) <= ($4 + 1));
234
format %{ "$3 $dst, $src, $rshift, $mask" %}
236
int rshift = $rshift$$constant & $4;
237
intptr_t mask = $mask$$constant;
238
int width = exact_log2$6(mask+1);
239
__ $3(as_Register($dst$$reg),
240
as_Register($src$$reg), rshift, width);
242
ins_pipe(ialu_reg_shift);
245
BFX_INSN(I, URShift, ubfxw, 31, int)
246
BFX_INSN(L, URShift, ubfx, 63, long, _long)
248
// This pattern is automatically generated from aarch64_ad.m4
249
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
251
// We can use ubfx when extending an And with a mask when we know mask
252
// is positive. We know that because immI_bitmask guarantees it.
253
instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
255
match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
256
// Make sure we are not going to exceed what ubfxw can do.
257
predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
259
ins_cost(INSN_COST * 2);
260
format %{ "ubfx $dst, $src, $rshift, $mask" %}
262
int rshift = $rshift$$constant & 31;
263
intptr_t mask = $mask$$constant;
264
int width = exact_log2(mask+1);
265
__ ubfx(as_Register($dst$$reg),
266
as_Register($src$$reg), rshift, width);
268
ins_pipe(ialu_reg_shift);
271
define(`UBFIZ_INSN', `// This pattern is automatically generated from aarch64_ad.m4
272
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
274
// We can use ubfiz when masking by a positive number and then left shifting the result.
275
// We know that the mask is positive because imm$1_bitmask guarantees it.
276
instruct $3$1$8(iReg$2NoSp dst, iReg$1`'ORL2I($1) src, immI lshift, $7 mask)
279
match(Set dst (LShift$1 (And$1 src mask) lshift));,
280
match(Set dst ($8 (LShift$1 (And$1 src mask) lshift)));)
282
predicate(($6(n->in(1)->in(2)->get_$5() + 1) + (n->in(2)->get_int() & $4)) <= ($4 + 1));,
283
predicate(($6(n->in(1)->in(1)->in(2)->get_$5() + 1) + (n->in(1)->in(2)->get_int() & $4)) <= 31);)
286
format %{ "$3 $dst, $src, $lshift, $mask" %}
288
int lshift = $lshift$$constant & $4;
289
intptr_t mask = $mask$$constant;
290
int width = $6(mask+1);
291
__ $3(as_Register($dst$$reg),
292
as_Register($src$$reg), lshift, width);
294
ins_pipe(ialu_reg_shift);
297
UBFIZ_INSN(I, I, ubfizw, 31, int, exact_log2, immI_bitmask)
298
UBFIZ_INSN(L, L, ubfiz, 63, long, exact_log2_long, immL_bitmask)
299
UBFIZ_INSN(I, L, ubfizw, 31, int, exact_log2, immI_bitmask, ConvI2L)
300
UBFIZ_INSN(L, I, ubfiz, 63, long, exact_log2_long, immL_positive_bitmaskI, ConvL2I)
302
define(`BFX1_INSN', `// This pattern is automatically generated from aarch64_ad.m4
303
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
305
// If there is a convert $1 to $2 block between and And$1 and a LShift$2, we can also match ubfiz
306
instruct ubfiz$1Conv$3$9(iReg$2NoSp dst, iReg$1`'ORL2I($1) src, immI lshift, $8 mask)
308
match(Set dst (LShift$2 (Conv$3 (And$1 src mask)) lshift));
309
predicate(($4(n->in(1)->in(1)->in(2)->$5() + 1) + (n->in(2)->get_int() & $6)) <= $7);
312
format %{ "ubfiz $dst, $src, $lshift, $mask" %}
314
int lshift = $lshift$$constant & $6;
315
intptr_t mask = $mask$$constant;
316
int width = exact_log2(mask+1);
317
__ ubfiz(as_Register($dst$$reg),
318
as_Register($src$$reg), lshift, width);
320
ins_pipe(ialu_reg_shift);
323
BFX1_INSN(I, L, I2L, exact_log2, get_int, 63, (63 + 1), immI_bitmask)
324
BFX1_INSN(L, I, L2I, exact_log2_long, get_long, 31, 31, immL_positive_bitmaskI, x)
325
// This pattern is automatically generated from aarch64_ad.m4
326
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
328
// Can skip int2long conversions after AND with small bitmask
329
instruct ubfizIConvI2LAndI(iRegLNoSp dst, iRegI src, immI_bitmask msk)
331
match(Set dst (ConvI2L (AndI src msk)));
333
format %{ "ubfiz $dst, $src, 0, exact_log2($msk + 1) " %}
335
__ ubfiz(as_Register($dst$$reg), as_Register($src$$reg), 0, exact_log2($msk$$constant + 1));
337
ins_pipe(ialu_reg_shift);
342
define(`EXTRACT_INSN',`
343
// This pattern is automatically generated from aarch64_ad.m4
344
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
345
instruct extr$3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI lshift, immI rshift, rFlagsReg cr)
347
match(Set dst ($3$1 (LShift$1 src1 lshift) (URShift$1 src2 rshift)));
348
predicate(0 == (((n->in(1)->in(2)->get_int() & $2) + (n->in(2)->in(2)->get_int() & $2)) & $2));
351
format %{ "extr $dst, $src1, $src2, #$rshift" %}
354
__ $4(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
355
$rshift$$constant & $2);
357
ins_pipe(ialu_reg_reg_extr);
360
EXTRACT_INSN(L, 63, Or, extr)
361
EXTRACT_INSN(I, 31, Or, extrw)
362
EXTRACT_INSN(L, 63, Add, extr)
363
EXTRACT_INSN(I, 31, Add, extrw)
364
define(ROTATE_INSN, `// This pattern is automatically generated from aarch64_ad.m4
365
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
366
instruct $2$1_$3(iReg$1NoSp dst, iReg$1 src, ifelse($3, reg, iReg, imm)I shift)
368
match(Set dst (ifelse($2, ror, RotateRight, RotateLeft) src shift));
371
format %{ "ifelse($2, ror, ror, rol) $dst, $src, $shift" %}
373
ifelse($2, rol, ins_encode %{
374
__ subw(rscratch1, zr, as_Register($shift$$reg));, ins_encode %{)
376
ifelse($1, I, extrw, extr)(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
377
$shift$$constant & ifelse($1, I, 0x1f, 0x3f)),
378
ifelse($1, I, rorvw, rorv)(as_Register($dst$$reg), as_Register($src$$reg), ifelse($2, rol, rscratch1, as_Register($shift$$reg))));
380
ins_pipe(ialu_reg_reg_vshift);
383
ROTATE_INSN(I, ror, imm)
384
ROTATE_INSN(L, ror, imm)
385
ROTATE_INSN(I, ror, reg)
386
ROTATE_INSN(L, ror, reg)
387
ROTATE_INSN(I, rol, reg)
388
ROTATE_INSN(L, rol, reg)
389
dnl rol_imm has been transformed to ror_imm during GVN.
391
// Add/subtract (extended)
392
dnl ADD_SUB_EXTENDED(mode, size, add node, shift node, insn, shift type, wordsize
393
define(`ADD_SUB_CONV', `
394
// This pattern is automatically generated from aarch64_ad.m4
395
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
396
instruct $3Ext$1(iReg$2NoSp dst, iReg$2`'ORL2I($2) src1, iReg$1`'ORL2I($1) src2, rFlagsReg cr)
398
match(Set dst ($3$2 src1 (ConvI2L src2)));
400
format %{ "$4 $dst, $src1, $src2, $5" %}
403
__ $4(as_Register($dst$$reg), as_Register($src1$$reg),
404
as_Register($src2$$reg), ext::$5);
406
ins_pipe(ialu_reg_reg);
408
ADD_SUB_CONV(I,L,Add,add,sxtw)
409
ADD_SUB_CONV(I,L,Sub,sub,sxtw)
411
define(`ADD_SUB_EXTENDED', `
412
// This pattern is automatically generated from aarch64_ad.m4
413
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
414
instruct $3Ext$1_$6(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI_`'eval($7-$2) lshift, immI_`'eval($7-$2) rshift, rFlagsReg cr)
416
match(Set dst ($3$1 src1 EXTEND($1, $4, src2, lshift, rshift)));
418
format %{ "$5 $dst, $src1, $src2, $6" %}
421
__ $5(as_Register($dst$$reg), as_Register($src1$$reg),
422
as_Register($src2$$reg), ext::$6);
424
ins_pipe(ialu_reg_reg);
426
ADD_SUB_EXTENDED(I,16,Add,RShift,add,sxth,32)
427
ADD_SUB_EXTENDED(I,8,Add,RShift,add,sxtb,32)
428
ADD_SUB_EXTENDED(I,8,Add,URShift,add,uxtb,32)
429
ADD_SUB_EXTENDED(L,16,Add,RShift,add,sxth,64)
430
ADD_SUB_EXTENDED(L,32,Add,RShift,add,sxtw,64)
431
ADD_SUB_EXTENDED(L,8,Add,RShift,add,sxtb,64)
432
ADD_SUB_EXTENDED(L,8,Add,URShift,add,uxtb,64)
434
dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, shift type)
435
define(`ADD_SUB_ZERO_EXTEND', `// This pattern is automatically generated from aarch64_ad.m4
436
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
437
instruct $3Ext$1_$5_and(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, rFlagsReg cr)
439
match(Set dst ($3$1 src1 (And$1 src2 mask)));
441
format %{ "$4 $dst, $src1, $src2, $5" %}
444
__ $4(as_Register($dst$$reg), as_Register($src1$$reg),
445
as_Register($src2$$reg), ext::$5);
447
ins_pipe(ialu_reg_reg);
451
ADD_SUB_ZERO_EXTEND(I,255,Add,addw,uxtb)
452
ADD_SUB_ZERO_EXTEND(I,65535,Add,addw,uxth)
453
ADD_SUB_ZERO_EXTEND(L,255,Add,add,uxtb)
454
ADD_SUB_ZERO_EXTEND(L,65535,Add,add,uxth)
455
ADD_SUB_ZERO_EXTEND(L,4294967295,Add,add,uxtw)
457
ADD_SUB_ZERO_EXTEND(I,255,Sub,subw,uxtb)
458
ADD_SUB_ZERO_EXTEND(I,65535,Sub,subw,uxth)
459
ADD_SUB_ZERO_EXTEND(L,255,Sub,sub,uxtb)
460
ADD_SUB_ZERO_EXTEND(L,65535,Sub,sub,uxth)
461
ADD_SUB_ZERO_EXTEND(L,4294967295,Sub,sub,uxtw)
463
dnl ADD_SUB_ZERO_EXTEND_SHIFT(mode, size, add node, insn, ext type)
464
define(`ADD_SUB_EXTENDED_SHIFT', `// This pattern is automatically generated from aarch64_ad.m4
465
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
466
instruct $3Ext$1_$6_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immIExt lshift2, immI_`'eval($7-$2) lshift1, immI_`'eval($7-$2) rshift1, rFlagsReg cr)
468
match(Set dst ($3$1 src1 (LShift$1 EXTEND($1, $4, src2, lshift1, rshift1) lshift2)));
469
ins_cost(1.9 * INSN_COST);
470
format %{ "$5 $dst, $src1, $src2, $6 #lshift2" %}
473
__ $5(as_Register($dst$$reg), as_Register($src1$$reg),
474
as_Register($src2$$reg), ext::$6, ($lshift2$$constant));
476
ins_pipe(ialu_reg_reg_shift);
479
dnl $1 $2 $3 $4 $5 $6 $7
480
ADD_SUB_EXTENDED_SHIFT(L,8,Add,RShift,add,sxtb,64)
481
ADD_SUB_EXTENDED_SHIFT(L,16,Add,RShift,add,sxth,64)
482
ADD_SUB_EXTENDED_SHIFT(L,32,Add,RShift,add,sxtw,64)
484
ADD_SUB_EXTENDED_SHIFT(L,8,Sub,RShift,sub,sxtb,64)
485
ADD_SUB_EXTENDED_SHIFT(L,16,Sub,RShift,sub,sxth,64)
486
ADD_SUB_EXTENDED_SHIFT(L,32,Sub,RShift,sub,sxtw,64)
488
ADD_SUB_EXTENDED_SHIFT(I,8,Add,RShift,addw,sxtb,32)
489
ADD_SUB_EXTENDED_SHIFT(I,16,Add,RShift,addw,sxth,32)
491
ADD_SUB_EXTENDED_SHIFT(I,8,Sub,RShift,subw,sxtb,32)
492
ADD_SUB_EXTENDED_SHIFT(I,16,Sub,RShift,subw,sxth,32)
494
dnl ADD_SUB_CONV_SHIFT(mode, add node, insn, ext type)
495
define(`ADD_SUB_CONV_SHIFT', `// This pattern is automatically generated from aarch64_ad.m4
496
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
497
instruct $2ExtI_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
499
match(Set dst ($2$1 src1 (LShiftL (ConvI2L src2) lshift)));
500
ins_cost(1.9 * INSN_COST);
501
format %{ "$3 $dst, $src1, $src2, $4 #lshift" %}
504
__ $3(as_Register($dst$$reg), as_Register($src1$$reg),
505
as_Register($src2$$reg), ext::$4, ($lshift$$constant));
507
ins_pipe(ialu_reg_reg_shift);
510
ADD_SUB_CONV_SHIFT(L,Add,add,sxtw)
511
ADD_SUB_CONV_SHIFT(L,Sub,sub,sxtw)
513
dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, ext type)
514
define(`ADD_SUB_ZERO_EXTEND_SHIFT', `// This pattern is automatically generated from aarch64_ad.m4
515
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
516
instruct $3Ext$1_$5_and_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, immIExt lshift, rFlagsReg cr)
518
match(Set dst ($3$1 src1 (LShift$1 (And$1 src2 mask) lshift)));
519
ins_cost(1.9 * INSN_COST);
520
format %{ "$4 $dst, $src1, $src2, $5 #lshift" %}
523
__ $4(as_Register($dst$$reg), as_Register($src1$$reg),
524
as_Register($src2$$reg), ext::$5, ($lshift$$constant));
526
ins_pipe(ialu_reg_reg_shift);
530
ADD_SUB_ZERO_EXTEND_SHIFT(L,255,Add,add,uxtb)
531
ADD_SUB_ZERO_EXTEND_SHIFT(L,65535,Add,add,uxth)
532
ADD_SUB_ZERO_EXTEND_SHIFT(L,4294967295,Add,add,uxtw)
534
ADD_SUB_ZERO_EXTEND_SHIFT(L,255,Sub,sub,uxtb)
535
ADD_SUB_ZERO_EXTEND_SHIFT(L,65535,Sub,sub,uxth)
536
ADD_SUB_ZERO_EXTEND_SHIFT(L,4294967295,Sub,sub,uxtw)
538
ADD_SUB_ZERO_EXTEND_SHIFT(I,255,Add,addw,uxtb)
539
ADD_SUB_ZERO_EXTEND_SHIFT(I,65535,Add,addw,uxth)
541
ADD_SUB_ZERO_EXTEND_SHIFT(I,255,Sub,subw,uxtb)
542
ADD_SUB_ZERO_EXTEND_SHIFT(I,65535,Sub,subw,uxth)
544
define(`CMOV_INSN', `// This pattern is automatically generated from aarch64_ad.m4
545
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
546
instruct cmov$1_reg_reg_$3(iReg$1NoSp dst, iReg$1 src1, iReg$1 src2, rFlagsReg cr)
548
effect(DEF dst, USE src1, USE src2, USE cr);
549
ins_cost(INSN_COST * 2);
550
format %{ "$2 $dst, $src1, $src2 $3\t" %}
553
__ $2($dst$$Register,
556
Assembler::upcase($3));
558
ins_pipe(icond_reg_reg);
561
CMOV_INSN(I, cselw, lt)
562
CMOV_INSN(I, cselw, gt)
564
define(`CMOV_DRAW_INSN', `// This pattern is automatically generated from aarch64_ad.m4
565
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
566
instruct cmov$1_reg_imm$2_$4(iReg$1NoSp dst, iReg$1 src1, rFlagsReg cr)
568
effect(DEF dst, USE src1, USE cr);
569
ins_cost(INSN_COST * 2);
570
format %{ "$3 $dst, $src1, zr $4\t" %}
573
__ $3($dst$$Register,
576
Assembler::upcase($4));
581
CMOV_DRAW_INSN(I, 0, cselw, lt)
582
CMOV_DRAW_INSN(I, 0, cselw, gt)
583
CMOV_DRAW_INSN(I, 1, csincw, le)
584
CMOV_DRAW_INSN(I, 1, csincw, gt)
585
CMOV_DRAW_INSN(I, M1, csinvw, lt)
586
CMOV_DRAW_INSN(I, M1, csinvw, ge)
588
define(`MINMAX_DRAW_INSN', `// This pattern is automatically generated from aarch64_ad.m4
589
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
591
instruct downcase($1)$2_reg_imm$4(iReg$2NoSp dst, iReg$2`'ORL2I($2) src, imm$2$3$4 imm),
592
instruct downcase($1)$2_imm$4_reg(iReg$2NoSp dst, imm$2$3$4 imm, iReg$2`'ORL2I($2) src))
595
match(Set dst ($1$2 src imm));,
596
match(Set dst ($1$2 imm src));)
597
ins_cost(INSN_COST * 3);
600
comp$2_reg_imm0(cr, src);
601
cmov$2_reg_imm$4_$5(dst, src, cr);
605
MINMAX_DRAW_INSN(Min, I, , 0, lt)
606
MINMAX_DRAW_INSN(Min, I, , 0, lt, rev)
607
MINMAX_DRAW_INSN(Min, I, _, 1, le)
608
MINMAX_DRAW_INSN(Min, I, _, 1, le, rev)
609
MINMAX_DRAW_INSN(Min, I, _, M1, lt)
610
MINMAX_DRAW_INSN(Min, I, _, M1, lt, rev)
612
MINMAX_DRAW_INSN(Max, I, , 0, gt)
613
MINMAX_DRAW_INSN(Max, I, , 0, gt, rev)
614
MINMAX_DRAW_INSN(Max, I, _, 1, gt)
615
MINMAX_DRAW_INSN(Max, I, _, 1, gt, rev)
616
MINMAX_DRAW_INSN(Max, I, _, M1, ge)
617
MINMAX_DRAW_INSN(Max, I, _, M1, ge, rev)
619
define(`BITS_REVERSE', `// This pattern is automatically generated from aarch64_ad.m4
620
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
621
instruct bits_reverse_$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src)
623
match(Set dst (Reverse$1 src));
625
format %{ "$2 $dst, $src" %}
627
__ $2($dst$$Register, $src$$Register);
632
BITS_REVERSE(I, rbitw)