jdk

Форк
0
/
aarch64_ad.m4 
633 строки · 22.4 Кб
1
dnl Copyright (c) 2019, 2020, Red Hat Inc. All rights reserved.
2
dnl DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3
dnl
4
dnl This code is free software; you can redistribute it and/or modify it
5
dnl under the terms of the GNU General Public License version 2 only, as
6
dnl published by the Free Software Foundation.
7
dnl
8
dnl This code is distributed in the hope that it will be useful, but WITHOUT
9
dnl ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
dnl FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
11
dnl version 2 for more details (a copy is included in the LICENSE file that
12
dnl accompanied this code).
13
dnl
14
dnl You should have received a copy of the GNU General Public License version
15
dnl 2 along with this work; if not, write to the Free Software Foundation,
16
dnl Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17
dnl
18
dnl Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19
dnl or visit www.oracle.com if you need additional information or have any
20
dnl questions.
21
dnl
22
dnl
23
dnl Process this file with m4 aarch64_ad.m4 to generate instructions used in
24
dnl aarch64.ad:
25
dnl 1. the arithmetic
26
dnl 2. shift patterns
27
dnl
28
// BEGIN This section of the file is automatically generated. Do not edit --------------
29
// This section is generated from aarch64_ad.m4
30

31
define(`upcase', `translit(`$*', `a-z', `A-Z')')dnl
32
define(`downcase', `translit(`$*', `A-Z', `a-z')')dnl
33
define(`ORL2I', `ifelse($1,I,orL2I)')dnl
34
dnl
35
define(`BASE_SHIFT_INSN',
36
`// This pattern is automatically generated from aarch64_ad.m4
37
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
38
instruct $2$1_reg_$4_reg(iReg$1NoSp dst,
39
                         iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2,
40
                         immI src3) %{
41
  match(Set dst ($2$1 src1 (ifelse($4, RotateRight, $4, $4$1) src2 src3)));
42

43
  ins_cost(1.9 * INSN_COST);
44
  format %{ "$3  $dst, $src1, $src2, $5 $src3" %}
45

46
  ins_encode %{
47
    __ $3(as_Register($dst$$reg),
48
              as_Register($src1$$reg),
49
              as_Register($src2$$reg),
50
              Assembler::$5,
51
              $src3$$constant & ifelse($1,I,0x1f,0x3f));
52
  %}
53

54
  ins_pipe(ialu_reg_reg_shift);
55
%}
56
')dnl
57
define(`NEG_SHIFT_INSN',
58
`// This pattern is automatically generated from aarch64_ad.m4
59
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
60
instruct Neg$1_reg_$2_reg(iReg$1NoSp dst,
61
                              imm$1`0' zero, iReg$1`'ORL2I($1) src1, immI src2) %{
62
  match(Set dst (Sub$1 zero ($2$1 src1 src2)));
63

64
  ins_cost(1.9 * INSN_COST);
65
  format %{ "ifelse($1, I, negw, neg)  $dst, $src1, $3 $src2" %}
66

67
  ins_encode %{
68
    __ ifelse($1, I, negw, neg)(as_Register($dst$$reg), as_Register($src1$$reg),
69
            Assembler::$3, $src2$$constant & ifelse($1,I,0x1f,0x3f));
70
  %}
71

72
  ins_pipe(ialu_reg_shift);
73
%}
74
')dnl
75
define(`BASE_INVERTED_INSN',
76
`// This pattern is automatically generated from aarch64_ad.m4
77
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
78
instruct $2$1_reg_not_reg(iReg$1NoSp dst,
79
                         iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_M1 m1) %{
80
dnl This ifelse is because hotspot reassociates (xor (xor ..)..)
81
dnl into this canonical form.
82
  ifelse($2,Xor,
83
    match(Set dst (Xor$1 m1 (Xor$1 src2 src1)));,
84
    match(Set dst ($2$1 src1 (Xor$1 src2 m1)));)
85
  ins_cost(INSN_COST);
86
  format %{ "$3  $dst, $src1, $src2" %}
87

88
  ins_encode %{
89
    __ $3(as_Register($dst$$reg),
90
              as_Register($src1$$reg),
91
              as_Register($src2$$reg),
92
              Assembler::LSL, 0);
93
  %}
94

95
  ins_pipe(ialu_reg_reg);
96
%}
97
')dnl
98
define(`INVERTED_SHIFT_INSN',
99
`// This pattern is automatically generated from aarch64_ad.m4
100
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
101
// val ifelse($2, Xor, ^, $2, And, &, |) (-1 ^ (val ifelse($4, RShift, >>, $4, LShift, <<, $4, URShift, >>>, ror) shift)) ==> $3
102
instruct $2$1_reg_$4_not_reg(iReg$1NoSp dst,
103
                         iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2,
104
                         immI src3, imm$1_M1 src4) %{
105
dnl This ifelse is because hotspot reassociates (xor (xor ..)..)
106
dnl into this canonical form.
107
  ifelse($2,Xor,
108
    match(Set dst ($2$1 src4 (Xor$1(ifelse($4, RotateRight, $4, $4$1) src2 src3) src1)));,
109
    match(Set dst ($2$1 src1 (Xor$1(ifelse($4, RotateRight, $4, $4$1) src2 src3) src4)));)
110
  ins_cost(1.9 * INSN_COST);
111
  format %{ "$3  $dst, $src1, $src2, $5 $src3" %}
112

113
  ins_encode %{
114
    __ $3(as_Register($dst$$reg),
115
              as_Register($src1$$reg),
116
              as_Register($src2$$reg),
117
              Assembler::$5,
118
              $src3$$constant & ifelse($1,I,0x1f,0x3f));
119
  %}
120

121
  ins_pipe(ialu_reg_reg_shift);
122
%}
123
')dnl
124
define(`NOT_INSN',
125
`// This pattern is automatically generated from aarch64_ad.m4
126
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
127
instruct reg$1_not_reg(iReg$1NoSp dst,
128
                         iReg$1`'ORL2I($1) src1, imm$1_M1 m1,
129
                         rFlagsReg cr) %{
130
  match(Set dst (Xor$1 src1 m1));
131
  ins_cost(INSN_COST);
132
  format %{ "$2  $dst, $src1, zr" %}
133

134
  ins_encode %{
135
    __ $2(as_Register($dst$$reg),
136
              as_Register($src1$$reg),
137
              zr,
138
              Assembler::LSL, 0);
139
  %}
140

141
  ins_pipe(ialu_reg);
142
%}
143
')dnl
144
dnl
145
define(`BOTH_SHIFT_INSNS',
146
`BASE_SHIFT_INSN(I, $1, ifelse($2,andr,andw,$2w), $3, $4)
147
BASE_SHIFT_INSN(L, $1, $2, $3, $4)')dnl
148
dnl
149
define(`BOTH_NEG_SHIFT_INSNS',
150
`NEG_SHIFT_INSN($1, URShift, LSR)
151
NEG_SHIFT_INSN($1, RShift, ASR)
152
NEG_SHIFT_INSN($1, LShift, LSL)')dnl
153
dnl
154
define(`BOTH_INVERTED_INSNS',
155
`BASE_INVERTED_INSN(I, $1, $2w, $3, $4)
156
BASE_INVERTED_INSN(L, $1, $2, $3, $4)')dnl
157
dnl
158
define(`BOTH_INVERTED_SHIFT_INSNS',
159
`INVERTED_SHIFT_INSN(I, $1, $2w, $3, $4)
160
INVERTED_SHIFT_INSN(L, $1, $2, $3, $4)')dnl
161
dnl
162
define(`ALL_SHIFT_KINDS_WITHOUT_ROR',
163
`BOTH_SHIFT_INSNS($1, $2, URShift, LSR)
164
BOTH_SHIFT_INSNS($1, $2, RShift, ASR)
165
BOTH_SHIFT_INSNS($1, $2, LShift, LSL)')dnl
166
dnl
167
define(`ALL_SHIFT_KINDS',
168
`ALL_SHIFT_KINDS_WITHOUT_ROR($1, $2)
169
BOTH_SHIFT_INSNS($1, $2, RotateRight, ROR)')dnl
170
dnl
171
define(`ALL_INVERTED_SHIFT_KINDS',
172
`BOTH_INVERTED_SHIFT_INSNS($1, $2, URShift, LSR)
173
BOTH_INVERTED_SHIFT_INSNS($1, $2, RShift, ASR)
174
BOTH_INVERTED_SHIFT_INSNS($1, $2, RotateRight, ROR)
175
BOTH_INVERTED_SHIFT_INSNS($1, $2, LShift, LSL)')dnl
176
dnl
177
NOT_INSN(L, eon)
178
NOT_INSN(I, eonw)
179
BOTH_NEG_SHIFT_INSNS(I)
180
BOTH_NEG_SHIFT_INSNS(L)
181
BOTH_INVERTED_INSNS(And, bic)
182
BOTH_INVERTED_INSNS(Or, orn)
183
BOTH_INVERTED_INSNS(Xor, eon)
184
ALL_INVERTED_SHIFT_KINDS(And, bic)
185
ALL_INVERTED_SHIFT_KINDS(Xor, eon)
186
ALL_INVERTED_SHIFT_KINDS(Or, orn)
187
ALL_SHIFT_KINDS(And, andr)
188
ALL_SHIFT_KINDS(Xor, eor)
189
ALL_SHIFT_KINDS(Or, orr)
190
ALL_SHIFT_KINDS_WITHOUT_ROR(Add, add)
191
ALL_SHIFT_KINDS_WITHOUT_ROR(Sub, sub)
192
dnl
193
dnl EXTEND mode, rshift_op, src, lshift_count, rshift_count
194
define(`EXTEND', `($2$1 (LShift$1 $3 $4) $5)')dnl
195
define(`BFM_INSN',`// This pattern is automatically generated from aarch64_ad.m4
196
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
197

198
// Shift Left followed by Shift Right.
199
// This idiom is used by the compiler for the i2b bytecode etc.
200
instruct $4$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift_count, immI rshift_count)
201
%{
202
  match(Set dst EXTEND($1, $3, src, lshift_count, rshift_count));
203
  ins_cost(INSN_COST * 2);
204
  format %{ "$4  $dst, $src, $rshift_count - $lshift_count, #$2 - $lshift_count" %}
205
  ins_encode %{
206
    int lshift = $lshift_count$$constant & $2;
207
    int rshift = $rshift_count$$constant & $2;
208
    int s = $2 - lshift;
209
    int r = (rshift - lshift) & $2;
210
    __ $4(as_Register($dst$$reg),
211
            as_Register($src$$reg),
212
            r, s);
213
  %}
214

215
  ins_pipe(ialu_reg_shift);
216
%}
217
')dnl
218
BFM_INSN(L, 63, RShift, sbfm)
219
BFM_INSN(I, 31, RShift, sbfmw)
220
BFM_INSN(L, 63, URShift, ubfm)
221
BFM_INSN(I, 31, URShift, ubfmw)
222
dnl
223
// Bitfield extract with shift & mask
224
define(`BFX_INSN',
225
`// This pattern is automatically generated from aarch64_ad.m4
226
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
227
instruct $3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI rshift, imm$1_bitmask mask)
228
%{
229
  match(Set dst (And$1 ($2$1 src rshift) mask));
230
  // Make sure we are not going to exceed what $3 can do.
231
  predicate((exact_log2$6(n->in(2)->get_$5() + 1) + (n->in(1)->in(2)->get_int() & $4)) <= ($4 + 1));
232

233
  ins_cost(INSN_COST);
234
  format %{ "$3 $dst, $src, $rshift, $mask" %}
235
  ins_encode %{
236
    int rshift = $rshift$$constant & $4;
237
    intptr_t mask = $mask$$constant;
238
    int width = exact_log2$6(mask+1);
239
    __ $3(as_Register($dst$$reg),
240
            as_Register($src$$reg), rshift, width);
241
  %}
242
  ins_pipe(ialu_reg_shift);
243
%}
244
')
245
BFX_INSN(I, URShift, ubfxw, 31, int)
246
BFX_INSN(L, URShift, ubfx,  63, long, _long)
247

248
// This pattern is automatically generated from aarch64_ad.m4
249
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
250

251
// We can use ubfx when extending an And with a mask when we know mask
252
// is positive.  We know that because immI_bitmask guarantees it.
253
instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
254
%{
255
  match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
256
  // Make sure we are not going to exceed what ubfxw can do.
257
  predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
258

259
  ins_cost(INSN_COST * 2);
260
  format %{ "ubfx $dst, $src, $rshift, $mask" %}
261
  ins_encode %{
262
    int rshift = $rshift$$constant & 31;
263
    intptr_t mask = $mask$$constant;
264
    int width = exact_log2(mask+1);
265
    __ ubfx(as_Register($dst$$reg),
266
            as_Register($src$$reg), rshift, width);
267
  %}
268
  ins_pipe(ialu_reg_shift);
269
%}
270

271
define(`UBFIZ_INSN', `// This pattern is automatically generated from aarch64_ad.m4
272
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
273

274
// We can use ubfiz when masking by a positive number and then left shifting the result.
275
// We know that the mask is positive because imm$1_bitmask guarantees it.
276
instruct $3$1$8(iReg$2NoSp dst, iReg$1`'ORL2I($1) src, immI lshift, $7 mask)
277
%{
278
  ifelse($8,,
279
    match(Set dst (LShift$1 (And$1 src mask) lshift));,
280
    match(Set dst ($8 (LShift$1 (And$1 src mask) lshift)));)
281
  ifelse($8,,
282
    predicate(($6(n->in(1)->in(2)->get_$5() + 1) + (n->in(2)->get_int() & $4)) <= ($4 + 1));,
283
    predicate(($6(n->in(1)->in(1)->in(2)->get_$5() + 1) + (n->in(1)->in(2)->get_int() & $4)) <= 31);)
284

285
  ins_cost(INSN_COST);
286
  format %{ "$3 $dst, $src, $lshift, $mask" %}
287
  ins_encode %{
288
    int lshift = $lshift$$constant & $4;
289
    intptr_t mask = $mask$$constant;
290
    int width = $6(mask+1);
291
    __ $3(as_Register($dst$$reg),
292
          as_Register($src$$reg), lshift, width);
293
  %}
294
  ins_pipe(ialu_reg_shift);
295
%}
296
')
297
UBFIZ_INSN(I, I, ubfizw, 31, int,  exact_log2,      immI_bitmask)
298
UBFIZ_INSN(L, L, ubfiz,  63, long, exact_log2_long, immL_bitmask)
299
UBFIZ_INSN(I, L, ubfizw, 31, int,  exact_log2,      immI_bitmask,           ConvI2L)
300
UBFIZ_INSN(L, I, ubfiz,  63, long, exact_log2_long, immL_positive_bitmaskI, ConvL2I)
301

302
define(`BFX1_INSN', `// This pattern is automatically generated from aarch64_ad.m4
303
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
304

305
// If there is a convert $1 to $2 block between and And$1 and a LShift$2, we can also match ubfiz
306
instruct ubfiz$1Conv$3$9(iReg$2NoSp dst, iReg$1`'ORL2I($1) src, immI lshift, $8 mask)
307
%{
308
  match(Set dst (LShift$2 (Conv$3 (And$1 src mask)) lshift));
309
  predicate(($4(n->in(1)->in(1)->in(2)->$5() + 1) + (n->in(2)->get_int() & $6)) <= $7);
310

311
  ins_cost(INSN_COST);
312
  format %{ "ubfiz $dst, $src, $lshift, $mask" %}
313
  ins_encode %{
314
    int lshift = $lshift$$constant & $6;
315
    intptr_t mask = $mask$$constant;
316
    int width = exact_log2(mask+1);
317
    __ ubfiz(as_Register($dst$$reg),
318
             as_Register($src$$reg), lshift, width);
319
  %}
320
  ins_pipe(ialu_reg_shift);
321
%}
322
')dnl
323
BFX1_INSN(I, L, I2L, exact_log2,      get_int,  63, (63 + 1), immI_bitmask)
324
BFX1_INSN(L, I, L2I, exact_log2_long, get_long, 31, 31,       immL_positive_bitmaskI, x)
325
// This pattern is automatically generated from aarch64_ad.m4
326
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
327

328
// Can skip int2long conversions after AND with small bitmask
329
instruct ubfizIConvI2LAndI(iRegLNoSp dst, iRegI src, immI_bitmask msk)
330
%{
331
  match(Set dst (ConvI2L (AndI src msk)));
332
  ins_cost(INSN_COST);
333
  format %{ "ubfiz $dst, $src, 0, exact_log2($msk + 1) " %}
334
  ins_encode %{
335
    __ ubfiz(as_Register($dst$$reg), as_Register($src$$reg), 0, exact_log2($msk$$constant + 1));
336
  %}
337
  ins_pipe(ialu_reg_shift);
338
%}
339

340

341
// Rotations
342
define(`EXTRACT_INSN',`
343
// This pattern is automatically generated from aarch64_ad.m4
344
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
345
instruct extr$3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI lshift, immI rshift, rFlagsReg cr)
346
%{
347
  match(Set dst ($3$1 (LShift$1 src1 lshift) (URShift$1 src2 rshift)));
348
  predicate(0 == (((n->in(1)->in(2)->get_int() & $2) + (n->in(2)->in(2)->get_int() & $2)) & $2));
349

350
  ins_cost(INSN_COST);
351
  format %{ "extr $dst, $src1, $src2, #$rshift" %}
352

353
  ins_encode %{
354
    __ $4(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
355
            $rshift$$constant & $2);
356
  %}
357
  ins_pipe(ialu_reg_reg_extr);
358
%}
359
')dnl
360
EXTRACT_INSN(L, 63, Or, extr)
361
EXTRACT_INSN(I, 31, Or, extrw)
362
EXTRACT_INSN(L, 63, Add, extr)
363
EXTRACT_INSN(I, 31, Add, extrw)
364
define(ROTATE_INSN, `// This pattern is automatically generated from aarch64_ad.m4
365
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
366
instruct $2$1_$3(iReg$1NoSp dst, iReg$1 src, ifelse($3, reg, iReg, imm)I shift)
367
%{
368
  match(Set dst (ifelse($2, ror, RotateRight, RotateLeft) src shift));
369

370
  ins_cost(INSN_COST);
371
  format %{ "ifelse($2, ror, ror, rol)    $dst, $src, $shift" %}
372

373
  ifelse($2, rol, ins_encode %{
374
     __ subw(rscratch1, zr, as_Register($shift$$reg));, ins_encode %{)
375
     __ ifelse($3, imm,
376
        ifelse($1, I, extrw, extr)(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
377
               $shift$$constant & ifelse($1, I, 0x1f, 0x3f)),
378
        ifelse($1, I, rorvw, rorv)(as_Register($dst$$reg), as_Register($src$$reg), ifelse($2, rol, rscratch1, as_Register($shift$$reg))));
379
  %}
380
  ins_pipe(ialu_reg_reg_vshift);
381
%}
382
')dnl
383
ROTATE_INSN(I, ror, imm)
384
ROTATE_INSN(L, ror, imm)
385
ROTATE_INSN(I, ror, reg)
386
ROTATE_INSN(L, ror, reg)
387
ROTATE_INSN(I, rol, reg)
388
ROTATE_INSN(L, rol, reg)
389
dnl rol_imm has been transformed to ror_imm during GVN.
390

391
// Add/subtract (extended)
392
dnl ADD_SUB_EXTENDED(mode, size, add node, shift node, insn, shift type, wordsize
393
define(`ADD_SUB_CONV', `
394
// This pattern is automatically generated from aarch64_ad.m4
395
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
396
instruct $3Ext$1(iReg$2NoSp dst, iReg$2`'ORL2I($2) src1, iReg$1`'ORL2I($1) src2, rFlagsReg cr)
397
%{
398
  match(Set dst ($3$2 src1 (ConvI2L src2)));
399
  ins_cost(INSN_COST);
400
  format %{ "$4  $dst, $src1, $src2, $5" %}
401

402
   ins_encode %{
403
     __ $4(as_Register($dst$$reg), as_Register($src1$$reg),
404
            as_Register($src2$$reg), ext::$5);
405
   %}
406
  ins_pipe(ialu_reg_reg);
407
%}')dnl
408
ADD_SUB_CONV(I,L,Add,add,sxtw)
409
ADD_SUB_CONV(I,L,Sub,sub,sxtw)
410
dnl
411
define(`ADD_SUB_EXTENDED', `
412
// This pattern is automatically generated from aarch64_ad.m4
413
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
414
instruct $3Ext$1_$6(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI_`'eval($7-$2) lshift, immI_`'eval($7-$2) rshift, rFlagsReg cr)
415
%{
416
  match(Set dst ($3$1 src1 EXTEND($1, $4, src2, lshift, rshift)));
417
  ins_cost(INSN_COST);
418
  format %{ "$5  $dst, $src1, $src2, $6" %}
419

420
   ins_encode %{
421
     __ $5(as_Register($dst$$reg), as_Register($src1$$reg),
422
            as_Register($src2$$reg), ext::$6);
423
   %}
424
  ins_pipe(ialu_reg_reg);
425
%}')dnl
426
ADD_SUB_EXTENDED(I,16,Add,RShift,add,sxth,32)
427
ADD_SUB_EXTENDED(I,8,Add,RShift,add,sxtb,32)
428
ADD_SUB_EXTENDED(I,8,Add,URShift,add,uxtb,32)
429
ADD_SUB_EXTENDED(L,16,Add,RShift,add,sxth,64)
430
ADD_SUB_EXTENDED(L,32,Add,RShift,add,sxtw,64)
431
ADD_SUB_EXTENDED(L,8,Add,RShift,add,sxtb,64)
432
ADD_SUB_EXTENDED(L,8,Add,URShift,add,uxtb,64)
433
dnl
434
dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, shift type)
435
define(`ADD_SUB_ZERO_EXTEND', `// This pattern is automatically generated from aarch64_ad.m4
436
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
437
instruct $3Ext$1_$5_and(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, rFlagsReg cr)
438
%{
439
  match(Set dst ($3$1 src1 (And$1 src2 mask)));
440
  ins_cost(INSN_COST);
441
  format %{ "$4  $dst, $src1, $src2, $5" %}
442

443
   ins_encode %{
444
     __ $4(as_Register($dst$$reg), as_Register($src1$$reg),
445
            as_Register($src2$$reg), ext::$5);
446
   %}
447
  ins_pipe(ialu_reg_reg);
448
%}
449
')
450
dnl
451
ADD_SUB_ZERO_EXTEND(I,255,Add,addw,uxtb)
452
ADD_SUB_ZERO_EXTEND(I,65535,Add,addw,uxth)
453
ADD_SUB_ZERO_EXTEND(L,255,Add,add,uxtb)
454
ADD_SUB_ZERO_EXTEND(L,65535,Add,add,uxth)
455
ADD_SUB_ZERO_EXTEND(L,4294967295,Add,add,uxtw)
456
dnl
457
ADD_SUB_ZERO_EXTEND(I,255,Sub,subw,uxtb)
458
ADD_SUB_ZERO_EXTEND(I,65535,Sub,subw,uxth)
459
ADD_SUB_ZERO_EXTEND(L,255,Sub,sub,uxtb)
460
ADD_SUB_ZERO_EXTEND(L,65535,Sub,sub,uxth)
461
ADD_SUB_ZERO_EXTEND(L,4294967295,Sub,sub,uxtw)
462
dnl
463
dnl ADD_SUB_ZERO_EXTEND_SHIFT(mode, size, add node, insn, ext type)
464
define(`ADD_SUB_EXTENDED_SHIFT', `// This pattern is automatically generated from aarch64_ad.m4
465
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
466
instruct $3Ext$1_$6_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immIExt lshift2, immI_`'eval($7-$2) lshift1, immI_`'eval($7-$2) rshift1, rFlagsReg cr)
467
%{
468
  match(Set dst ($3$1 src1 (LShift$1 EXTEND($1, $4, src2, lshift1, rshift1) lshift2)));
469
  ins_cost(1.9 * INSN_COST);
470
  format %{ "$5  $dst, $src1, $src2, $6 #lshift2" %}
471

472
   ins_encode %{
473
     __ $5(as_Register($dst$$reg), as_Register($src1$$reg),
474
            as_Register($src2$$reg), ext::$6, ($lshift2$$constant));
475
   %}
476
  ins_pipe(ialu_reg_reg_shift);
477
%}
478
')
479
dnl                   $1 $2 $3   $4   $5   $6  $7
480
ADD_SUB_EXTENDED_SHIFT(L,8,Add,RShift,add,sxtb,64)
481
ADD_SUB_EXTENDED_SHIFT(L,16,Add,RShift,add,sxth,64)
482
ADD_SUB_EXTENDED_SHIFT(L,32,Add,RShift,add,sxtw,64)
483
dnl
484
ADD_SUB_EXTENDED_SHIFT(L,8,Sub,RShift,sub,sxtb,64)
485
ADD_SUB_EXTENDED_SHIFT(L,16,Sub,RShift,sub,sxth,64)
486
ADD_SUB_EXTENDED_SHIFT(L,32,Sub,RShift,sub,sxtw,64)
487
dnl
488
ADD_SUB_EXTENDED_SHIFT(I,8,Add,RShift,addw,sxtb,32)
489
ADD_SUB_EXTENDED_SHIFT(I,16,Add,RShift,addw,sxth,32)
490
dnl
491
ADD_SUB_EXTENDED_SHIFT(I,8,Sub,RShift,subw,sxtb,32)
492
ADD_SUB_EXTENDED_SHIFT(I,16,Sub,RShift,subw,sxth,32)
493
dnl
494
dnl ADD_SUB_CONV_SHIFT(mode, add node, insn, ext type)
495
define(`ADD_SUB_CONV_SHIFT', `// This pattern is automatically generated from aarch64_ad.m4
496
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
497
instruct $2ExtI_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
498
%{
499
  match(Set dst ($2$1 src1 (LShiftL (ConvI2L src2) lshift)));
500
  ins_cost(1.9 * INSN_COST);
501
  format %{ "$3  $dst, $src1, $src2, $4 #lshift" %}
502

503
   ins_encode %{
504
     __ $3(as_Register($dst$$reg), as_Register($src1$$reg),
505
            as_Register($src2$$reg), ext::$4, ($lshift$$constant));
506
   %}
507
  ins_pipe(ialu_reg_reg_shift);
508
%}
509
')dnl
510
ADD_SUB_CONV_SHIFT(L,Add,add,sxtw)
511
ADD_SUB_CONV_SHIFT(L,Sub,sub,sxtw)
512
dnl
513
dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, ext type)
514
define(`ADD_SUB_ZERO_EXTEND_SHIFT', `// This pattern is automatically generated from aarch64_ad.m4
515
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
516
instruct $3Ext$1_$5_and_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, immIExt lshift, rFlagsReg cr)
517
%{
518
  match(Set dst ($3$1 src1 (LShift$1 (And$1 src2 mask) lshift)));
519
  ins_cost(1.9 * INSN_COST);
520
  format %{ "$4  $dst, $src1, $src2, $5 #lshift" %}
521

522
   ins_encode %{
523
     __ $4(as_Register($dst$$reg), as_Register($src1$$reg),
524
            as_Register($src2$$reg), ext::$5, ($lshift$$constant));
525
   %}
526
  ins_pipe(ialu_reg_reg_shift);
527
%}
528
')dnl
529
dnl                       $1 $2  $3  $4  $5
530
ADD_SUB_ZERO_EXTEND_SHIFT(L,255,Add,add,uxtb)
531
ADD_SUB_ZERO_EXTEND_SHIFT(L,65535,Add,add,uxth)
532
ADD_SUB_ZERO_EXTEND_SHIFT(L,4294967295,Add,add,uxtw)
533
dnl
534
ADD_SUB_ZERO_EXTEND_SHIFT(L,255,Sub,sub,uxtb)
535
ADD_SUB_ZERO_EXTEND_SHIFT(L,65535,Sub,sub,uxth)
536
ADD_SUB_ZERO_EXTEND_SHIFT(L,4294967295,Sub,sub,uxtw)
537
dnl
538
ADD_SUB_ZERO_EXTEND_SHIFT(I,255,Add,addw,uxtb)
539
ADD_SUB_ZERO_EXTEND_SHIFT(I,65535,Add,addw,uxth)
540
dnl
541
ADD_SUB_ZERO_EXTEND_SHIFT(I,255,Sub,subw,uxtb)
542
ADD_SUB_ZERO_EXTEND_SHIFT(I,65535,Sub,subw,uxth)
543
dnl
544
define(`CMOV_INSN', `// This pattern is automatically generated from aarch64_ad.m4
545
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
546
instruct cmov$1_reg_reg_$3(iReg$1NoSp dst, iReg$1 src1, iReg$1 src2, rFlagsReg cr)
547
%{
548
  effect(DEF dst, USE src1, USE src2, USE cr);
549
  ins_cost(INSN_COST * 2);
550
  format %{ "$2 $dst, $src1, $src2 $3\t"  %}
551

552
  ins_encode %{
553
    __ $2($dst$$Register,
554
             $src1$$Register,
555
             $src2$$Register,
556
             Assembler::upcase($3));
557
  %}
558
  ins_pipe(icond_reg_reg);
559
%}
560
')dnl
561
CMOV_INSN(I, cselw, lt)
562
CMOV_INSN(I, cselw, gt)
563
dnl
564
define(`CMOV_DRAW_INSN', `// This pattern is automatically generated from aarch64_ad.m4
565
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
566
instruct cmov$1_reg_imm$2_$4(iReg$1NoSp dst, iReg$1 src1, rFlagsReg cr)
567
%{
568
  effect(DEF dst, USE src1, USE cr);
569
  ins_cost(INSN_COST * 2);
570
  format %{ "$3 $dst, $src1, zr $4\t"  %}
571

572
  ins_encode %{
573
    __ $3($dst$$Register,
574
             $src1$$Register,
575
             zr,
576
             Assembler::upcase($4));
577
  %}
578
  ins_pipe(icond_reg);
579
%}
580
')dnl
581
CMOV_DRAW_INSN(I, 0, cselw, lt)
582
CMOV_DRAW_INSN(I, 0, cselw, gt)
583
CMOV_DRAW_INSN(I, 1, csincw, le)
584
CMOV_DRAW_INSN(I, 1, csincw, gt)
585
CMOV_DRAW_INSN(I, M1, csinvw, lt)
586
CMOV_DRAW_INSN(I, M1, csinvw, ge)
587
dnl
588
define(`MINMAX_DRAW_INSN', `// This pattern is automatically generated from aarch64_ad.m4
589
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
590
ifelse($6,,
591
instruct downcase($1)$2_reg_imm$4(iReg$2NoSp dst, iReg$2`'ORL2I($2) src, imm$2$3$4 imm),
592
instruct downcase($1)$2_imm$4_reg(iReg$2NoSp dst, imm$2$3$4 imm, iReg$2`'ORL2I($2) src))
593
%{
594
  ifelse($6,,
595
  match(Set dst ($1$2 src imm));,
596
  match(Set dst ($1$2 imm src));)
597
  ins_cost(INSN_COST * 3);
598
  expand %{
599
    rFlagsReg cr;
600
    comp$2_reg_imm0(cr, src);
601
    cmov$2_reg_imm$4_$5(dst, src, cr);
602
  %}
603
%}
604
')dnl
605
MINMAX_DRAW_INSN(Min, I,  , 0, lt)
606
MINMAX_DRAW_INSN(Min, I,  , 0, lt, rev)
607
MINMAX_DRAW_INSN(Min, I, _, 1, le)
608
MINMAX_DRAW_INSN(Min, I, _, 1, le, rev)
609
MINMAX_DRAW_INSN(Min, I, _, M1, lt)
610
MINMAX_DRAW_INSN(Min, I, _, M1, lt, rev)
611
dnl
612
MINMAX_DRAW_INSN(Max, I,  , 0, gt)
613
MINMAX_DRAW_INSN(Max, I,  , 0, gt, rev)
614
MINMAX_DRAW_INSN(Max, I, _, 1, gt)
615
MINMAX_DRAW_INSN(Max, I, _, 1, gt, rev)
616
MINMAX_DRAW_INSN(Max, I, _, M1, ge)
617
MINMAX_DRAW_INSN(Max, I, _, M1, ge, rev)
618
dnl
619
define(`BITS_REVERSE', `// This pattern is automatically generated from aarch64_ad.m4
620
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
621
instruct bits_reverse_$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src)
622
%{
623
  match(Set dst (Reverse$1 src));
624
  ins_cost(INSN_COST);
625
  format %{ "$2  $dst, $src" %}
626
  ins_encode %{
627
    __ $2($dst$$Register, $src$$Register);
628
  %}
629
  ins_pipe(ialu_reg);
630
%}
631
')dnl
632
BITS_REVERSE(I, rbitw)
633
BITS_REVERSE(L, rbit)
634

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.