2
* Tiny Code Generator for QEMU
4
* Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5
* Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6
* Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
8
* Permission is hereby granted, free of charge, to any person obtaining a copy
9
* of this software and associated documentation files (the "Software"), to deal
10
* in the Software without restriction, including without limitation the rights
11
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12
* copies of the Software, and to permit persons to whom the Software is
13
* furnished to do so, subject to the following conditions:
15
* The above copyright notice and this permission notice shall be included in
16
* all copies or substantial portions of the Software.
18
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27
#include "../tcg-ldst.c.inc"
28
#include "../tcg-pool.c.inc"
31
#define TCG_CT_CONST_S16 (1 << 8)
32
#define TCG_CT_CONST_S32 (1 << 9)
33
#define TCG_CT_CONST_U32 (1 << 10)
34
#define TCG_CT_CONST_ZERO (1 << 11)
35
#define TCG_CT_CONST_P32 (1 << 12)
36
#define TCG_CT_CONST_INV (1 << 13)
37
#define TCG_CT_CONST_INVRISBG (1 << 14)
38
#define TCG_CT_CONST_CMP (1 << 15)
40
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 16)
41
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
43
/* Several places within the instruction set 0 means "no register"
44
rather than TCG_REG_R0. */
47
/* A scratch register that may be be used throughout the backend. */
48
#define TCG_TMP0 TCG_REG_R1
50
#define TCG_GUEST_BASE_REG TCG_REG_R13
52
/* All of the following instructions are prefixed with their instruction
53
format, and are defined as 8- or 16-bit quantities, even when the two
54
halves of the 16-bit quantity may appear 32 bits apart in the insn.
55
This makes it easy to copy the values from the tables in Appendix B. */
56
typedef enum S390Opcode {
131
RIEg_LOCGHI = 0xec46,
169
RRFa_MSGRKC = 0xb9ed,
191
RRFam_SELGR = 0xb9e3,
195
RRFc_POPCNT = 0xb9e1,
279
VRRc_VCEQ = 0xe7f8, /* we leave the m5 cs field 0 */
280
VRRc_VCH = 0xe7fb, /* " */
281
VRRc_VCHL = 0xe7f9, /* " */
282
VRRc_VERLLV = 0xe773,
284
VRRc_VESRAV = 0xe77a,
285
VRRc_VESRLV = 0xe778,
298
VRRc_VPKS = 0xe797, /* we leave the m5 cs field 0 */
323
#ifdef CONFIG_DEBUG_TCG
324
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
325
"%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
326
"%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
327
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
328
"%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
329
"%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
330
"%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
331
"%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
335
/* Since R6 is a potential argument register, choose it last of the
336
call-saved registers. Likewise prefer the call-clobbered registers
337
in reverse order to maximize the chance of avoiding the arguments. */
338
static const int tcg_target_reg_alloc_order[] = {
339
/* Call saved registers. */
348
/* Call clobbered registers. */
352
/* Argument registers, in reverse order of allocation. */
358
/* V8-V15 are call saved, and omitted. */
385
static const int tcg_target_call_iarg_regs[] = {
393
static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
395
tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
396
tcg_debug_assert(slot == 0);
404
#define S390_CC_NE (S390_CC_LT | S390_CC_GT)
405
#define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
406
#define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
407
#define S390_CC_NEVER 0
408
#define S390_CC_ALWAYS 15
410
#define S390_TM_EQ 8 /* CC == 0 */
411
#define S390_TM_NE 7 /* CC in {1,2,3} */
413
/* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
414
static const uint8_t tcg_cond_to_s390_cond[16] = {
415
[TCG_COND_EQ] = S390_CC_EQ,
416
[TCG_COND_NE] = S390_CC_NE,
417
[TCG_COND_TSTEQ] = S390_CC_EQ,
418
[TCG_COND_TSTNE] = S390_CC_NE,
419
[TCG_COND_LT] = S390_CC_LT,
420
[TCG_COND_LE] = S390_CC_LE,
421
[TCG_COND_GT] = S390_CC_GT,
422
[TCG_COND_GE] = S390_CC_GE,
423
[TCG_COND_LTU] = S390_CC_LT,
424
[TCG_COND_LEU] = S390_CC_LE,
425
[TCG_COND_GTU] = S390_CC_GT,
426
[TCG_COND_GEU] = S390_CC_GE,
429
/* Condition codes that result from a LOAD AND TEST. Here, we have no
430
unsigned instruction variation, however since the test is vs zero we
431
can re-map the outcomes appropriately. */
432
static const uint8_t tcg_cond_to_ltr_cond[16] = {
433
[TCG_COND_EQ] = S390_CC_EQ,
434
[TCG_COND_NE] = S390_CC_NE,
435
[TCG_COND_TSTEQ] = S390_CC_ALWAYS,
436
[TCG_COND_TSTNE] = S390_CC_NEVER,
437
[TCG_COND_LT] = S390_CC_LT,
438
[TCG_COND_LE] = S390_CC_LE,
439
[TCG_COND_GT] = S390_CC_GT,
440
[TCG_COND_GE] = S390_CC_GE,
441
[TCG_COND_LTU] = S390_CC_NEVER,
442
[TCG_COND_LEU] = S390_CC_EQ,
443
[TCG_COND_GTU] = S390_CC_NE,
444
[TCG_COND_GEU] = S390_CC_ALWAYS,
447
static const tcg_insn_unit *tb_ret_addr;
448
uint64_t s390_facilities[3];
450
static inline bool is_general_reg(TCGReg r)
452
return r <= TCG_REG_R15;
455
static inline bool is_vector_reg(TCGReg r)
457
return r >= TCG_REG_V0 && r <= TCG_REG_V31;
460
static bool patch_reloc(tcg_insn_unit *src_rw, int type,
461
intptr_t value, intptr_t addend)
463
const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
468
pcrel2 = (tcg_insn_unit *)value - src_rx;
472
if (pcrel2 == (int16_t)pcrel2) {
473
tcg_patch16(src_rw, pcrel2);
478
if (pcrel2 == (int32_t)pcrel2) {
479
tcg_patch32(src_rw, pcrel2);
484
if (value == sextract64(value, 0, 20)) {
485
old = *(uint32_t *)src_rw & 0xf00000ff;
486
old |= ((value & 0xfff) << 16) | ((value & 0xff000) >> 4);
487
tcg_patch32(src_rw, old);
492
g_assert_not_reached();
497
static int is_const_p16(uint64_t val)
499
for (int i = 0; i < 4; ++i) {
500
uint64_t mask = 0xffffull << (i * 16);
501
if ((val & ~mask) == 0) {
508
static int is_const_p32(uint64_t val)
510
if ((val & 0xffffffff00000000ull) == 0) {
513
if ((val & 0x00000000ffffffffull) == 0) {
520
* Accept bit patterns like these:
525
* Copied from gcc sources.
527
static bool risbg_mask(uint64_t c)
530
/* We don't change the number of transitions by inverting,
531
so make sure we start with the LSB zero. */
535
/* Reject all zeros or all ones. */
539
/* Find the first transition. */
541
/* Invert to look for a second transition. */
543
/* Erase the first transition. */
545
/* Find the second transition, if any. */
547
/* Match if all the bits are 1's, or if c is zero. */
551
/* Test if a constant matches the constraint. */
552
static bool tcg_target_const_match(int64_t val, int ct,
553
TCGType type, TCGCond cond, int vece)
557
if (ct & TCG_CT_CONST) {
560
if (type == TCG_TYPE_I32) {
561
uval = (uint32_t)val;
565
if (ct & TCG_CT_CONST_CMP) {
569
ct |= TCG_CT_CONST_S32 | TCG_CT_CONST_U32; /* CGFI or CLGFI */
575
ct |= TCG_CT_CONST_S32; /* CGFI */
581
ct |= TCG_CT_CONST_U32; /* CLGFI */
585
if (is_const_p16(uval) >= 0) {
586
return true; /* TMxx */
588
if (risbg_mask(uval)) {
589
return true; /* RISBG */
593
g_assert_not_reached();
597
if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
600
if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
603
if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
606
if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
610
if (ct & TCG_CT_CONST_INV) {
613
if ((ct & TCG_CT_CONST_P32) && is_const_p32(val) >= 0) {
616
if ((ct & TCG_CT_CONST_INVRISBG) && risbg_mask(~val)) {
622
/* Emit instructions according to the given instruction format. */
624
static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
626
tcg_out16(s, (op << 8) | (r1 << 4) | r2);
629
static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
630
TCGReg r1, TCGReg r2)
632
tcg_out32(s, (op << 16) | (r1 << 4) | r2);
635
/* RRF-a without the m4 field */
636
static void tcg_out_insn_RRFa(TCGContext *s, S390Opcode op,
637
TCGReg r1, TCGReg r2, TCGReg r3)
639
tcg_out32(s, (op << 16) | (r3 << 12) | (r1 << 4) | r2);
642
/* RRF-a with the m4 field */
643
static void tcg_out_insn_RRFam(TCGContext *s, S390Opcode op,
644
TCGReg r1, TCGReg r2, TCGReg r3, int m4)
646
tcg_out32(s, (op << 16) | (r3 << 12) | (m4 << 8) | (r1 << 4) | r2);
649
static void tcg_out_insn_RRFc(TCGContext *s, S390Opcode op,
650
TCGReg r1, TCGReg r2, int m3)
652
tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
655
static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
657
tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
660
static void tcg_out_insn_RIEg(TCGContext *s, S390Opcode op, TCGReg r1,
663
tcg_out16(s, (op & 0xff00) | (r1 << 4) | m3);
664
tcg_out32(s, (i2 << 16) | (op & 0xff));
667
static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
669
tcg_out16(s, op | (r1 << 4));
673
static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
674
TCGReg b2, TCGReg r3, int disp)
676
tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
680
static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
681
TCGReg b2, TCGReg r3, int disp)
683
tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
684
tcg_out32(s, (op & 0xff) | (b2 << 28)
685
| ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
688
#define tcg_out_insn_RX tcg_out_insn_RS
689
#define tcg_out_insn_RXY tcg_out_insn_RSY
691
static int RXB(TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4)
694
* Shift bit 4 of each regno to its corresponding bit of RXB.
695
* RXB itself begins at bit 8 of the instruction so 8 - 4 = 4
696
* is the left-shift of the 4th operand.
698
return ((v1 & 0x10) << (4 + 3))
699
| ((v2 & 0x10) << (4 + 2))
700
| ((v3 & 0x10) << (4 + 1))
701
| ((v4 & 0x10) << (4 + 0));
704
static void tcg_out_insn_VRIa(TCGContext *s, S390Opcode op,
705
TCGReg v1, uint16_t i2, int m3)
707
tcg_debug_assert(is_vector_reg(v1));
708
tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4));
710
tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12));
713
static void tcg_out_insn_VRIb(TCGContext *s, S390Opcode op,
714
TCGReg v1, uint8_t i2, uint8_t i3, int m4)
716
tcg_debug_assert(is_vector_reg(v1));
717
tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4));
718
tcg_out16(s, (i2 << 8) | (i3 & 0xff));
719
tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12));
722
static void tcg_out_insn_VRIc(TCGContext *s, S390Opcode op,
723
TCGReg v1, uint16_t i2, TCGReg v3, int m4)
725
tcg_debug_assert(is_vector_reg(v1));
726
tcg_debug_assert(is_vector_reg(v3));
727
tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf));
729
tcg_out16(s, (op & 0x00ff) | RXB(v1, v3, 0, 0) | (m4 << 12));
732
static void tcg_out_insn_VRRa(TCGContext *s, S390Opcode op,
733
TCGReg v1, TCGReg v2, int m3)
735
tcg_debug_assert(is_vector_reg(v1));
736
tcg_debug_assert(is_vector_reg(v2));
737
tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
738
tcg_out32(s, (op & 0x00ff) | RXB(v1, v2, 0, 0) | (m3 << 12));
741
static void tcg_out_insn_VRRc(TCGContext *s, S390Opcode op,
742
TCGReg v1, TCGReg v2, TCGReg v3, int m4)
744
tcg_debug_assert(is_vector_reg(v1));
745
tcg_debug_assert(is_vector_reg(v2));
746
tcg_debug_assert(is_vector_reg(v3));
747
tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
748
tcg_out16(s, v3 << 12);
749
tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, 0) | (m4 << 12));
752
static void tcg_out_insn_VRRe(TCGContext *s, S390Opcode op,
753
TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4)
755
tcg_debug_assert(is_vector_reg(v1));
756
tcg_debug_assert(is_vector_reg(v2));
757
tcg_debug_assert(is_vector_reg(v3));
758
tcg_debug_assert(is_vector_reg(v4));
759
tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
760
tcg_out16(s, v3 << 12);
761
tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, v4) | (v4 << 12));
764
static void tcg_out_insn_VRRf(TCGContext *s, S390Opcode op,
765
TCGReg v1, TCGReg r2, TCGReg r3)
767
tcg_debug_assert(is_vector_reg(v1));
768
tcg_debug_assert(is_general_reg(r2));
769
tcg_debug_assert(is_general_reg(r3));
770
tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r2);
771
tcg_out16(s, r3 << 12);
772
tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0));
775
static void tcg_out_insn_VRSa(TCGContext *s, S390Opcode op, TCGReg v1,
776
intptr_t d2, TCGReg b2, TCGReg v3, int m4)
778
tcg_debug_assert(is_vector_reg(v1));
779
tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
780
tcg_debug_assert(is_general_reg(b2));
781
tcg_debug_assert(is_vector_reg(v3));
782
tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf));
783
tcg_out16(s, b2 << 12 | d2);
784
tcg_out16(s, (op & 0x00ff) | RXB(v1, v3, 0, 0) | (m4 << 12));
787
static void tcg_out_insn_VRSb(TCGContext *s, S390Opcode op, TCGReg v1,
788
intptr_t d2, TCGReg b2, TCGReg r3, int m4)
790
tcg_debug_assert(is_vector_reg(v1));
791
tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
792
tcg_debug_assert(is_general_reg(b2));
793
tcg_debug_assert(is_general_reg(r3));
794
tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r3);
795
tcg_out16(s, b2 << 12 | d2);
796
tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12));
799
static void tcg_out_insn_VRSc(TCGContext *s, S390Opcode op, TCGReg r1,
800
intptr_t d2, TCGReg b2, TCGReg v3, int m4)
802
tcg_debug_assert(is_general_reg(r1));
803
tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
804
tcg_debug_assert(is_general_reg(b2));
805
tcg_debug_assert(is_vector_reg(v3));
806
tcg_out16(s, (op & 0xff00) | (r1 << 4) | (v3 & 0xf));
807
tcg_out16(s, b2 << 12 | d2);
808
tcg_out16(s, (op & 0x00ff) | RXB(0, v3, 0, 0) | (m4 << 12));
811
static void tcg_out_insn_VRX(TCGContext *s, S390Opcode op, TCGReg v1,
812
TCGReg b2, TCGReg x2, intptr_t d2, int m3)
814
tcg_debug_assert(is_vector_reg(v1));
815
tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
816
tcg_debug_assert(is_general_reg(x2));
817
tcg_debug_assert(is_general_reg(b2));
818
tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | x2);
819
tcg_out16(s, (b2 << 12) | d2);
820
tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12));
823
/* Emit an opcode with "type-checking" of the format. */
824
#define tcg_out_insn(S, FMT, OP, ...) \
825
glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
828
/* emit 64-bit shifts */
829
static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
830
TCGReg src, TCGReg sh_reg, int sh_imm)
832
tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
835
/* emit 32-bit shifts */
836
static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
837
TCGReg sh_reg, int sh_imm)
839
tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
842
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
849
if (likely(is_general_reg(dst) && is_general_reg(src))) {
850
tcg_out_insn(s, RR, LR, dst, src);
856
if (likely(is_general_reg(dst))) {
857
if (likely(is_general_reg(src))) {
858
tcg_out_insn(s, RRE, LGR, dst, src);
860
tcg_out_insn(s, VRSc, VLGV, dst, 0, 0, src, 3);
863
} else if (is_general_reg(src)) {
864
tcg_out_insn(s, VRSb, VLVG, dst, 0, 0, src, 3);
871
tcg_out_insn(s, VRRa, VLR, dst, src, 0);
875
g_assert_not_reached();
880
static const S390Opcode li_insns[4] = {
881
RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
883
static const S390Opcode oi_insns[4] = {
884
RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
886
static const S390Opcode lif_insns[2] = {
887
RIL_LLILF, RIL_LLIHF,
889
static const S390Opcode tm_insns[4] = {
890
RI_TMLL, RI_TMLH, RI_TMHL, RI_TMHH
893
/* load a register with an immediate value */
894
static void tcg_out_movi(TCGContext *s, TCGType type,
895
TCGReg ret, tcg_target_long sval)
897
tcg_target_ulong uval = sval;
901
if (type == TCG_TYPE_I32) {
902
uval = (uint32_t)sval;
903
sval = (int32_t)sval;
906
/* Try all 32-bit insns that can load it in one go. */
907
if (sval >= -0x8000 && sval < 0x8000) {
908
tcg_out_insn(s, RI, LGHI, ret, sval);
912
i = is_const_p16(uval);
914
tcg_out_insn_RI(s, li_insns[i], ret, uval >> (i * 16));
918
/* Try all 48-bit insns that can load it in one go. */
919
if (sval == (int32_t)sval) {
920
tcg_out_insn(s, RIL, LGFI, ret, sval);
924
i = is_const_p32(uval);
926
tcg_out_insn_RIL(s, lif_insns[i], ret, uval >> (i * 32));
930
/* Try for PC-relative address load. For odd addresses, add one. */
931
pc_off = tcg_pcrel_diff(s, (void *)sval) >> 1;
932
if (pc_off == (int32_t)pc_off) {
933
tcg_out_insn(s, RIL, LARL, ret, pc_off);
935
tcg_out_insn(s, RI, AGHI, ret, 1);
940
/* Otherwise, load it by parts. */
941
i = is_const_p16((uint32_t)uval);
943
tcg_out_insn_RI(s, li_insns[i], ret, uval >> (i * 16));
945
tcg_out_insn(s, RIL, LLILF, ret, uval);
948
i = is_const_p16(uval);
950
tcg_out_insn_RI(s, oi_insns[i + 2], ret, uval >> (i * 16));
952
tcg_out_insn(s, RIL, OIHF, ret, uval);
956
/* Emit a load/store type instruction. Inputs are:
957
DATA: The register to be loaded or stored.
958
BASE+OFS: The effective address.
959
OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
960
OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
962
static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
963
TCGReg data, TCGReg base, TCGReg index,
966
if (ofs < -0x80000 || ofs >= 0x80000) {
967
/* Combine the low 20 bits of the offset with the actual load insn;
968
the high 44 bits must come from an immediate load. */
969
tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
970
tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
973
/* If we were already given an index register, add it in. */
974
if (index != TCG_REG_NONE) {
975
tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
980
if (opc_rx && ofs >= 0 && ofs < 0x1000) {
981
tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
983
tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
987
static void tcg_out_vrx_mem(TCGContext *s, S390Opcode opc_vrx,
988
TCGReg data, TCGReg base, TCGReg index,
989
tcg_target_long ofs, int m3)
991
if (ofs < 0 || ofs >= 0x1000) {
992
if (ofs >= -0x80000 && ofs < 0x80000) {
993
tcg_out_insn(s, RXY, LAY, TCG_TMP0, base, index, ofs);
995
index = TCG_REG_NONE;
998
tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs);
999
if (index != TCG_REG_NONE) {
1000
tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
1006
tcg_out_insn_VRX(s, opc_vrx, data, base, index, ofs, m3);
1009
/* load data without address translation or endianness conversion */
1010
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
1011
TCGReg base, intptr_t ofs)
1015
if (likely(is_general_reg(data))) {
1016
tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
1019
tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_32);
1023
if (likely(is_general_reg(data))) {
1024
tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
1030
tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_64);
1034
/* Hint quadword aligned. */
1035
tcg_out_vrx_mem(s, VRX_VL, data, base, TCG_REG_NONE, ofs, 4);
1039
g_assert_not_reached();
1043
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
1044
TCGReg base, intptr_t ofs)
1048
if (likely(is_general_reg(data))) {
1049
tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
1051
tcg_out_vrx_mem(s, VRX_VSTEF, data, base, TCG_REG_NONE, ofs, 1);
1056
if (likely(is_general_reg(data))) {
1057
tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
1063
tcg_out_vrx_mem(s, VRX_VSTEG, data, base, TCG_REG_NONE, ofs, 0);
1067
/* Hint quadword aligned. */
1068
tcg_out_vrx_mem(s, VRX_VST, data, base, TCG_REG_NONE, ofs, 4);
1072
g_assert_not_reached();
1076
static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1077
TCGReg base, intptr_t ofs)
1082
static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
1087
static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
1088
tcg_target_long imm)
1090
/* This function is only used for passing structs by reference. */
1091
tcg_out_mem(s, RX_LA, RXY_LAY, rd, rs, TCG_REG_NONE, imm);
1094
static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
1095
int msb, int lsb, int ofs, int z)
1098
tcg_out16(s, (RIEf_RISBG & 0xff00) | (dest << 4) | src);
1099
tcg_out16(s, (msb << 8) | (z << 7) | lsb);
1100
tcg_out16(s, (ofs << 8) | (RIEf_RISBG & 0xff));
1103
static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1105
tcg_out_insn(s, RRE, LGBR, dest, src);
1108
static void tcg_out_ext8u(TCGContext *s, TCGReg dest, TCGReg src)
1110
tcg_out_insn(s, RRE, LLGCR, dest, src);
1113
static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1115
tcg_out_insn(s, RRE, LGHR, dest, src);
1118
static void tcg_out_ext16u(TCGContext *s, TCGReg dest, TCGReg src)
1120
tcg_out_insn(s, RRE, LLGHR, dest, src);
1123
static void tcg_out_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
1125
tcg_out_insn(s, RRE, LGFR, dest, src);
1128
static void tcg_out_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
1130
tcg_out_insn(s, RRE, LLGFR, dest, src);
1133
static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
1135
tcg_out_ext32s(s, dest, src);
1138
static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
1140
tcg_out_ext32u(s, dest, src);
1143
static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg dest, TCGReg src)
1145
tcg_out_mov(s, TCG_TYPE_I32, dest, src);
1148
static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
1151
if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
1152
/* Achieve wraparound by swapping msb and lsb. */
1153
msb = 64 - ctz64(~val);
1154
lsb = clz64(~val) - 1;
1157
lsb = 63 - ctz64(val);
1159
tcg_out_risbg(s, out, in, msb, lsb, 0, 1);
1162
static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
1164
static const S390Opcode ni_insns[4] = {
1165
RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
1167
static const S390Opcode nif_insns[2] = {
1170
uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
1173
/* Look for the zero-extensions. */
1174
if ((val & valid) == 0xffffffff) {
1175
tcg_out_ext32u(s, dest, dest);
1178
if ((val & valid) == 0xff) {
1179
tcg_out_ext8u(s, dest, dest);
1182
if ((val & valid) == 0xffff) {
1183
tcg_out_ext16u(s, dest, dest);
1187
i = is_const_p16(~val & valid);
1189
tcg_out_insn_RI(s, ni_insns[i], dest, val >> (i * 16));
1193
i = is_const_p32(~val & valid);
1194
tcg_debug_assert(i == 0 || type != TCG_TYPE_I32);
1196
tcg_out_insn_RIL(s, nif_insns[i], dest, val >> (i * 32));
1200
if (risbg_mask(val)) {
1201
tgen_andi_risbg(s, dest, dest, val);
1205
g_assert_not_reached();
1208
static void tgen_ori(TCGContext *s, TCGReg dest, uint64_t val)
1210
static const S390Opcode oif_insns[2] = {
1216
i = is_const_p16(val);
1218
tcg_out_insn_RI(s, oi_insns[i], dest, val >> (i * 16));
1222
i = is_const_p32(val);
1224
tcg_out_insn_RIL(s, oif_insns[i], dest, val >> (i * 32));
1228
g_assert_not_reached();
1231
static void tgen_xori(TCGContext *s, TCGReg dest, uint64_t val)
1233
switch (is_const_p32(val)) {
1235
tcg_out_insn(s, RIL, XILF, dest, val);
1238
tcg_out_insn(s, RIL, XIHF, dest, val >> 32);
1241
g_assert_not_reached();
1245
static int tgen_cmp2(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1246
TCGArg c2, bool c2const, bool need_carry, int *inv_cc)
1248
bool is_unsigned = is_unsigned_cond(c);
1249
TCGCond inv_c = tcg_invert_cond(c);
1252
if (is_tst_cond(c)) {
1253
tcg_debug_assert(!need_carry);
1256
if (type == TCG_TYPE_I32) {
1257
tcg_out_insn(s, RRFa, NRK, TCG_REG_R0, r1, c2);
1259
tcg_out_insn(s, RRFa, NGRK, TCG_REG_R0, r1, c2);
1264
if (type == TCG_TYPE_I32) {
1268
int i = is_const_p16(c2);
1270
tcg_out_insn_RI(s, tm_insns[i], r1, c2 >> (i * 16));
1271
*inv_cc = c == TCG_COND_TSTEQ ? S390_TM_NE : S390_TM_EQ;
1272
return *inv_cc ^ 15;
1275
if (risbg_mask(c2)) {
1276
tgen_andi_risbg(s, TCG_REG_R0, r1, c2);
1279
g_assert_not_reached();
1284
if (!(is_unsigned && need_carry)) {
1285
if (type == TCG_TYPE_I32) {
1286
tcg_out_insn(s, RR, LTR, r1, r1);
1288
tcg_out_insn(s, RRE, LTGR, r1, r1);
1290
*inv_cc = tcg_cond_to_ltr_cond[inv_c];
1291
return tcg_cond_to_ltr_cond[c];
1295
if (!is_unsigned && c2 == (int16_t)c2) {
1296
op = (type == TCG_TYPE_I32 ? RI_CHI : RI_CGHI);
1297
tcg_out_insn_RI(s, op, r1, c2);
1301
if (type == TCG_TYPE_I32) {
1302
op = (is_unsigned ? RIL_CLFI : RIL_CFI);
1303
tcg_out_insn_RIL(s, op, r1, c2);
1307
/* Should match TCG_CT_CONST_CMP. */
1313
tcg_debug_assert(c2 == (int32_t)c2);
1318
if (c2 == (int32_t)c2) {
1327
tcg_debug_assert(c2 == (uint32_t)c2);
1331
g_assert_not_reached();
1333
tcg_out_insn_RIL(s, op, r1, c2);
1334
} else if (type == TCG_TYPE_I32) {
1335
op = (is_unsigned ? RR_CLR : RR_CR);
1336
tcg_out_insn_RR(s, op, r1, c2);
1338
op = (is_unsigned ? RRE_CLGR : RRE_CGR);
1339
tcg_out_insn_RRE(s, op, r1, c2);
1343
*inv_cc = tcg_cond_to_s390_cond[inv_c];
1344
return tcg_cond_to_s390_cond[c];
1347
static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1348
TCGArg c2, bool c2const, bool need_carry)
1351
return tgen_cmp2(s, type, c, r1, c2, c2const, need_carry, &inv_cc);
1354
static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
1355
TCGReg dest, TCGReg c1, TCGArg c2,
1356
bool c2const, bool neg)
1360
/* With LOC2, we can always emit the minimum 3 insns. */
1361
if (HAVE_FACILITY(LOAD_ON_COND2)) {
1362
/* Emit: d = 0, d = (cc ? 1 : d). */
1363
cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1364
tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1365
tcg_out_insn(s, RIEg, LOCGHI, dest, neg ? -1 : 1, cc);
1374
/* Swap operands so that we can use LEU/GTU/GT/LE. */
1379
cond = tcg_swap_cond(cond);
1388
/* X != 0 is X > 0. */
1389
if (c2const && c2 == 0) {
1390
cond = TCG_COND_GTU;
1399
* The result of a compare has CC=2 for GT and CC=3 unused.
1400
* ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit.
1402
tgen_cmp(s, type, cond, c1, c2, c2const, true);
1403
tcg_out_movi(s, type, dest, 0);
1404
tcg_out_insn(s, RRE, ALCGR, dest, dest);
1406
if (type == TCG_TYPE_I32) {
1407
tcg_out_insn(s, RR, LCR, dest, dest);
1409
tcg_out_insn(s, RRE, LCGR, dest, dest);
1415
/* X == 0 is X <= 0. */
1416
if (c2const && c2 == 0) {
1417
cond = TCG_COND_LEU;
1426
* As above, but we're looking for borrow, or !carry.
1427
* The second insn computes d - d - borrow, or -1 for true
1428
* and 0 for false. So we must mask to 1 bit afterward.
1430
tgen_cmp(s, type, cond, c1, c2, c2const, true);
1431
tcg_out_insn(s, RRE, SLBGR, dest, dest);
1433
tgen_andi(s, type, dest, 1);
1438
g_assert_not_reached();
1441
cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1442
/* Emit: d = 0, t = 1, d = (cc ? t : d). */
1443
tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1444
tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, neg ? -1 : 1);
1445
tcg_out_insn(s, RRFc, LOCGR, dest, TCG_TMP0, cc);
1448
static void tgen_movcond_int(TCGContext *s, TCGType type, TCGReg dest,
1449
TCGArg v3, int v3const, TCGReg v4,
1456
if (HAVE_FACILITY(LOAD_ON_COND2)) {
1457
/* Emit: if (cc) dest = v3. */
1458
tcg_out_insn(s, RIEg, LOCGHI, dest, v3, cc);
1461
tcg_out_insn(s, RI, LGHI, TCG_TMP0, v3);
1464
/* LGR+LOCGHI is larger than LGHI+LOCGR. */
1465
tcg_out_insn(s, RI, LGHI, dest, v3);
1470
if (HAVE_FACILITY(MISC_INSN_EXT3)) {
1471
/* Emit: dest = cc ? v3 : v4. */
1472
tcg_out_insn(s, RRFam, SELGR, dest, v3, v4, cc);
1478
tcg_out_mov(s, type, dest, v3);
1484
/* Emit: if (cc) dest = src. */
1485
tcg_out_insn(s, RRFc, LOCGR, dest, src, cc);
1488
static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1489
TCGReg c1, TCGArg c2, int c2const,
1490
TCGArg v3, int v3const, TCGReg v4)
1494
cc = tgen_cmp2(s, type, c, c1, c2, c2const, false, &inv_cc);
1495
tgen_movcond_int(s, type, dest, v3, v3const, v4, cc, inv_cc);
1498
static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1,
1499
TCGArg a2, int a2const)
1501
/* Since this sets both R and R+1, we have no choice but to store the
1502
result into R0, allowing R1 == TCG_TMP0 to be clobbered as well. */
1503
QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1);
1504
tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1);
1506
if (a2const && a2 == 64) {
1507
tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0);
1512
* Conditions from FLOGR are:
1513
* 2 -> one bit found
1514
* 8 -> no one bit found
1516
tgen_movcond_int(s, TCG_TYPE_I64, dest, a2, a2const, TCG_REG_R0, 8, 2);
1519
static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1521
/* With MIE3, and bit 0 of m4 set, we get the complete result. */
1522
if (HAVE_FACILITY(MISC_INSN_EXT3)) {
1523
if (type == TCG_TYPE_I32) {
1524
tcg_out_ext32u(s, dest, src);
1527
tcg_out_insn(s, RRFc, POPCNT, dest, src, 8);
1531
/* Without MIE3, each byte gets the count of bits for the byte. */
1532
tcg_out_insn(s, RRFc, POPCNT, dest, src, 0);
1534
/* Multiply to sum each byte at the top of the word. */
1535
if (type == TCG_TYPE_I32) {
1536
tcg_out_insn(s, RIL, MSFI, dest, 0x01010101);
1537
tcg_out_sh32(s, RS_SRL, dest, TCG_REG_NONE, 24);
1539
tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0x0101010101010101ull);
1540
tcg_out_insn(s, RRE, MSGR, dest, TCG_TMP0);
1541
tcg_out_sh64(s, RSY_SRLG, dest, dest, TCG_REG_NONE, 56);
1545
static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
1546
int ofs, int len, int z)
1548
int lsb = (63 - ofs);
1549
int msb = lsb - (len - 1);
1550
tcg_out_risbg(s, dest, src, msb, lsb, ofs, z);
1553
static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src,
1556
tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1);
1559
static void tgen_gotoi(TCGContext *s, int cc, const tcg_insn_unit *dest)
1561
ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
1562
if (off == (int16_t)off) {
1563
tcg_out_insn(s, RI, BRC, cc, off);
1564
} else if (off == (int32_t)off) {
1565
tcg_out_insn(s, RIL, BRCL, cc, off);
1567
tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1568
tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1572
static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
1575
tgen_gotoi(s, cc, l->u.value_ptr);
1577
tcg_out16(s, RI_BRC | (cc << 4));
1578
tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, 2);
1583
static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1584
TCGReg r1, TCGReg r2, TCGLabel *l)
1586
tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
1588
tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1590
tcg_out16(s, cc << 12 | (opc & 0xff));
1593
static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1594
TCGReg r1, int i2, TCGLabel *l)
1596
tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
1598
tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1600
tcg_out16(s, (i2 << 8) | (opc & 0xff));
1603
static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1604
TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
1608
if (!is_tst_cond(c)) {
1609
bool is_unsigned = is_unsigned_cond(c);
1613
cc = tcg_cond_to_s390_cond[c];
1616
opc = (type == TCG_TYPE_I32
1617
? (is_unsigned ? RIEb_CLRJ : RIEb_CRJ)
1618
: (is_unsigned ? RIEb_CLGRJ : RIEb_CGRJ));
1619
tgen_compare_branch(s, opc, cc, r1, c2, l);
1624
* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1625
* If the immediate we've been given does not fit that range, we'll
1626
* fall back to separate compare and branch instructions using the
1627
* larger comparison range afforded by COMPARE IMMEDIATE.
1629
if (type == TCG_TYPE_I32) {
1632
in_range = (uint32_t)c2 == (uint8_t)c2;
1635
in_range = (int32_t)c2 == (int8_t)c2;
1640
in_range = (uint64_t)c2 == (uint8_t)c2;
1643
in_range = (int64_t)c2 == (int8_t)c2;
1647
tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
1652
cc = tgen_cmp(s, type, c, r1, c2, c2const, false);
1653
tgen_branch(s, cc, l);
1656
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *dest)
1658
ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
1659
if (off == (int32_t)off) {
1660
tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1662
tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1663
tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1667
static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
1668
const TCGHelperInfo *info)
1670
tcg_out_call_int(s, dest);
1680
bool tcg_target_has_memory_bswap(MemOp memop)
1684
if ((memop & MO_SIZE) <= MO_64) {
1689
* Reject 16-byte memop with 16-byte atomicity,
1690
* but do allow a pair of 64-bit operations.
1692
aa = atom_and_align_for_opc(tcg_ctx, memop, MO_ATOM_IFALIGN, true);
1693
return aa.atom <= MO_64;
1696
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
1699
switch (opc & (MO_SSIZE | MO_BSWAP)) {
1701
tcg_out_insn(s, RXY, LLGC, data, h.base, h.index, h.disp);
1704
tcg_out_insn(s, RXY, LGB, data, h.base, h.index, h.disp);
1707
case MO_UW | MO_BSWAP:
1708
/* swapped unsigned halfword load with upper bits zeroed */
1709
tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp);
1710
tcg_out_ext16u(s, data, data);
1713
tcg_out_insn(s, RXY, LLGH, data, h.base, h.index, h.disp);
1716
case MO_SW | MO_BSWAP:
1717
/* swapped sign-extended halfword load */
1718
tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp);
1719
tcg_out_ext16s(s, TCG_TYPE_REG, data, data);
1722
tcg_out_insn(s, RXY, LGH, data, h.base, h.index, h.disp);
1725
case MO_UL | MO_BSWAP:
1726
/* swapped unsigned int load with upper bits zeroed */
1727
tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp);
1728
tcg_out_ext32u(s, data, data);
1731
tcg_out_insn(s, RXY, LLGF, data, h.base, h.index, h.disp);
1734
case MO_SL | MO_BSWAP:
1735
/* swapped sign-extended int load */
1736
tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp);
1737
tcg_out_ext32s(s, data, data);
1740
tcg_out_insn(s, RXY, LGF, data, h.base, h.index, h.disp);
1743
case MO_UQ | MO_BSWAP:
1744
tcg_out_insn(s, RXY, LRVG, data, h.base, h.index, h.disp);
1747
tcg_out_insn(s, RXY, LG, data, h.base, h.index, h.disp);
1751
g_assert_not_reached();
1755
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
1758
switch (opc & (MO_SIZE | MO_BSWAP)) {
1760
if (h.disp >= 0 && h.disp < 0x1000) {
1761
tcg_out_insn(s, RX, STC, data, h.base, h.index, h.disp);
1763
tcg_out_insn(s, RXY, STCY, data, h.base, h.index, h.disp);
1767
case MO_UW | MO_BSWAP:
1768
tcg_out_insn(s, RXY, STRVH, data, h.base, h.index, h.disp);
1771
if (h.disp >= 0 && h.disp < 0x1000) {
1772
tcg_out_insn(s, RX, STH, data, h.base, h.index, h.disp);
1774
tcg_out_insn(s, RXY, STHY, data, h.base, h.index, h.disp);
1778
case MO_UL | MO_BSWAP:
1779
tcg_out_insn(s, RXY, STRV, data, h.base, h.index, h.disp);
1782
if (h.disp >= 0 && h.disp < 0x1000) {
1783
tcg_out_insn(s, RX, ST, data, h.base, h.index, h.disp);
1785
tcg_out_insn(s, RXY, STY, data, h.base, h.index, h.disp);
1789
case MO_UQ | MO_BSWAP:
1790
tcg_out_insn(s, RXY, STRVG, data, h.base, h.index, h.disp);
1793
tcg_out_insn(s, RXY, STG, data, h.base, h.index, h.disp);
1797
g_assert_not_reached();
1801
static const TCGLdstHelperParam ldst_helper_param = {
1802
.ntmp = 1, .tmp = { TCG_TMP0 }
1805
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1807
MemOp opc = get_memop(lb->oi);
1809
if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
1810
(intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
1814
tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
1815
tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]);
1816
tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);
1818
tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1822
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1824
MemOp opc = get_memop(lb->oi);
1826
if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
1827
(intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
1831
tcg_out_st_helper_args(s, lb, &ldst_helper_param);
1832
tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE]);
1834
tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1838
/* We're expecting to use a 20-bit negative offset on the tlb memory ops. */
1839
#define MIN_TLB_MASK_TABLE_OFS -(1 << 19)
1842
* For system-mode, perform the TLB load and compare.
1843
* For user-mode, perform any required alignment tests.
1844
* In both cases, return a TCGLabelQemuLdst structure if the slow path
1845
* is required and fill in @h with the host address for the fast path.
1847
static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
1848
TCGReg addr_reg, MemOpIdx oi,
1851
TCGType addr_type = s->addr_type;
1852
TCGLabelQemuLdst *ldst = NULL;
1853
MemOp opc = get_memop(oi);
1854
MemOp s_bits = opc & MO_SIZE;
1857
h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, s_bits == MO_128);
1858
a_mask = (1 << h->aa.align) - 1;
1860
if (tcg_use_softmmu) {
1861
unsigned s_mask = (1 << s_bits) - 1;
1862
int mem_index = get_mmuidx(oi);
1863
int fast_off = tlb_mask_table_ofs(s, mem_index);
1864
int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1865
int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1869
ldst = new_ldst_label(s);
1870
ldst->is_ld = is_ld;
1872
ldst->addrlo_reg = addr_reg;
1874
tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE,
1875
s->page_bits - CPU_TLB_ENTRY_BITS);
1877
tcg_out_insn(s, RXY, NG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, mask_off);
1878
tcg_out_insn(s, RXY, AG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, table_off);
1881
* For aligned accesses, we check the first byte and include the
1882
* alignment bits within the address. For unaligned access, we
1883
* check that we don't cross pages using the address of the last
1884
* byte of the access.
1886
a_off = (a_mask >= s_mask ? 0 : s_mask - a_mask);
1887
tlb_mask = (uint64_t)s->page_mask | a_mask;
1889
tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask);
1891
tcg_out_insn(s, RX, LA, TCG_REG_R0, addr_reg, TCG_REG_NONE, a_off);
1892
tgen_andi(s, addr_type, TCG_REG_R0, tlb_mask);
1896
ofs = offsetof(CPUTLBEntry, addr_read);
1898
ofs = offsetof(CPUTLBEntry, addr_write);
1900
if (addr_type == TCG_TYPE_I32) {
1901
ofs += HOST_BIG_ENDIAN * 4;
1902
tcg_out_insn(s, RX, C, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
1904
tcg_out_insn(s, RXY, CG, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
1907
tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1908
ldst->label_ptr[0] = s->code_ptr++;
1910
h->index = TCG_TMP0;
1911
tcg_out_insn(s, RXY, LG, h->index, TCG_TMP0, TCG_REG_NONE,
1912
offsetof(CPUTLBEntry, addend));
1914
if (addr_type == TCG_TYPE_I32) {
1915
tcg_out_insn(s, RRE, ALGFR, h->index, addr_reg);
1916
h->base = TCG_REG_NONE;
1923
ldst = new_ldst_label(s);
1924
ldst->is_ld = is_ld;
1926
ldst->addrlo_reg = addr_reg;
1928
tcg_debug_assert(a_mask <= 0xffff);
1929
tcg_out_insn(s, RI, TMLL, addr_reg, a_mask);
1931
tcg_out16(s, RI_BRC | (S390_TM_NE << 4));
1932
ldst->label_ptr[0] = s->code_ptr++;
1936
if (addr_type == TCG_TYPE_I32) {
1937
tcg_out_ext32u(s, TCG_TMP0, addr_reg);
1940
if (guest_base < 0x80000) {
1941
h->index = TCG_REG_NONE;
1942
h->disp = guest_base;
1944
h->index = TCG_GUEST_BASE_REG;
1952
static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1953
MemOpIdx oi, TCGType data_type)
1955
TCGLabelQemuLdst *ldst;
1958
ldst = prepare_host_addr(s, &h, addr_reg, oi, true);
1959
tcg_out_qemu_ld_direct(s, get_memop(oi), data_reg, h);
1962
ldst->type = data_type;
1963
ldst->datalo_reg = data_reg;
1964
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1968
static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1969
MemOpIdx oi, TCGType data_type)
1971
TCGLabelQemuLdst *ldst;
1974
ldst = prepare_host_addr(s, &h, addr_reg, oi, false);
1975
tcg_out_qemu_st_direct(s, get_memop(oi), data_reg, h);
1978
ldst->type = data_type;
1979
ldst->datalo_reg = data_reg;
1980
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1984
static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
1985
TCGReg addr_reg, MemOpIdx oi, bool is_ld)
1987
TCGLabel *l1 = NULL, *l2 = NULL;
1988
TCGLabelQemuLdst *ldst;
1994
ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
1996
use_pair = h.aa.atom < MO_128;
1997
need_bswap = get_memop(oi) & MO_BSWAP;
2001
* Atomicity requires we use LPQ. If we've already checked for
2002
* 16-byte alignment, that's all we need. If we arrive with
2003
* lesser alignment, we have determined that less than 16-byte
2004
* alignment can be satisfied with two 8-byte loads.
2006
if (h.aa.align < MO_128) {
2008
l1 = gen_new_label();
2009
l2 = gen_new_label();
2011
tcg_out_insn(s, RI, TMLL, addr_reg, 15);
2012
tgen_branch(s, S390_TM_NE, l1);
2015
tcg_debug_assert(!need_bswap);
2016
tcg_debug_assert(datalo & 1);
2017
tcg_debug_assert(datahi == datalo - 1);
2018
insn = is_ld ? RXY_LPQ : RXY_STPQ;
2019
tcg_out_insn_RXY(s, insn, datahi, h.base, h.index, h.disp);
2022
tgen_branch(s, S390_CC_ALWAYS, l2);
2023
tcg_out_label(s, l1);
2030
d1 = datalo, d2 = datahi;
2031
insn = is_ld ? RXY_LRVG : RXY_STRVG;
2033
d1 = datahi, d2 = datalo;
2034
insn = is_ld ? RXY_LG : RXY_STG;
2037
if (h.base == d1 || h.index == d1) {
2038
tcg_out_insn(s, RXY, LAY, TCG_TMP0, h.base, h.index, h.disp);
2040
h.index = TCG_REG_NONE;
2043
tcg_out_insn_RXY(s, insn, d1, h.base, h.index, h.disp);
2044
tcg_out_insn_RXY(s, insn, d2, h.base, h.index, h.disp + 8);
2047
tcg_out_label(s, l2);
2051
ldst->type = TCG_TYPE_I128;
2052
ldst->datalo_reg = datalo;
2053
ldst->datahi_reg = datahi;
2054
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
2058
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
2060
/* Reuse the zeroing that exists for goto_ptr. */
2062
tgen_gotoi(s, S390_CC_ALWAYS, tcg_code_gen_epilogue);
2064
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
2065
tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
2069
static void tcg_out_goto_tb(TCGContext *s, int which)
2072
* Branch displacement must be aligned for atomic patching;
2073
* see if we need to add extra nop before branch
2075
if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
2078
tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
2079
set_jmp_insn_offset(s, which);
2081
set_jmp_reset_offset(s, which);
2084
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
2085
uintptr_t jmp_rx, uintptr_t jmp_rw)
2087
if (!HAVE_FACILITY(GEN_INST_EXT)) {
2090
/* patch the branch destination */
2091
uintptr_t addr = tb->jmp_target_addr[n];
2092
intptr_t disp = addr - (jmp_rx - 2);
2093
qatomic_set((int32_t *)jmp_rw, disp / 2);
2094
/* no need to flush icache explicitly */
2097
# define OP_32_64(x) \
2098
case glue(glue(INDEX_op_,x),_i32): \
2099
case glue(glue(INDEX_op_,x),_i64)
2101
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2102
const TCGArg args[TCG_MAX_OP_ARGS],
2103
const int const_args[TCG_MAX_OP_ARGS])
2109
case INDEX_op_goto_ptr:
2111
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
2115
/* ??? LLC (RXY format) is only present with the extended-immediate
2116
facility, whereas LLGC is always present. */
2117
tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
2121
/* ??? LB is no smaller than LGB, so no point to using it. */
2122
tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
2126
/* ??? LLH (RXY format) is only present with the extended-immediate
2127
facility, whereas LLGH is always present. */
2128
tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
2131
case INDEX_op_ld16s_i32:
2132
tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
2135
case INDEX_op_ld_i32:
2136
tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2140
tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
2141
TCG_REG_NONE, args[2]);
2145
tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
2146
TCG_REG_NONE, args[2]);
2149
case INDEX_op_st_i32:
2150
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2153
case INDEX_op_add_i32:
2154
a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2155
if (const_args[2]) {
2158
if (a2 == (int16_t)a2) {
2159
tcg_out_insn(s, RI, AHI, a0, a2);
2162
tcg_out_insn(s, RIL, AFI, a0, a2);
2165
tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
2166
} else if (a0 == a1) {
2167
tcg_out_insn(s, RR, AR, a0, a2);
2169
tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
2172
case INDEX_op_sub_i32:
2173
a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2174
if (const_args[2]) {
2177
} else if (a0 == a1) {
2178
tcg_out_insn(s, RR, SR, a0, a2);
2180
tcg_out_insn(s, RRFa, SRK, a0, a1, a2);
2184
case INDEX_op_and_i32:
2185
a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2186
if (const_args[2]) {
2187
tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2188
tgen_andi(s, TCG_TYPE_I32, a0, a2);
2189
} else if (a0 == a1) {
2190
tcg_out_insn(s, RR, NR, a0, a2);
2192
tcg_out_insn(s, RRFa, NRK, a0, a1, a2);
2195
case INDEX_op_or_i32:
2196
a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2197
if (const_args[2]) {
2198
tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2199
tgen_ori(s, a0, a2);
2200
} else if (a0 == a1) {
2201
tcg_out_insn(s, RR, OR, a0, a2);
2203
tcg_out_insn(s, RRFa, ORK, a0, a1, a2);
2206
case INDEX_op_xor_i32:
2207
a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2208
if (const_args[2]) {
2209
tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2210
tcg_out_insn(s, RIL, XILF, a0, a2);
2211
} else if (a0 == a1) {
2212
tcg_out_insn(s, RR, XR, args[0], args[2]);
2214
tcg_out_insn(s, RRFa, XRK, a0, a1, a2);
2218
case INDEX_op_andc_i32:
2219
a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2220
if (const_args[2]) {
2221
tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2222
tgen_andi(s, TCG_TYPE_I32, a0, (uint32_t)~a2);
2224
tcg_out_insn(s, RRFa, NCRK, a0, a1, a2);
2227
case INDEX_op_orc_i32:
2228
a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2229
if (const_args[2]) {
2230
tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2231
tgen_ori(s, a0, (uint32_t)~a2);
2233
tcg_out_insn(s, RRFa, OCRK, a0, a1, a2);
2236
case INDEX_op_eqv_i32:
2237
a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
2238
if (const_args[2]) {
2239
tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2240
tcg_out_insn(s, RIL, XILF, a0, ~a2);
2242
tcg_out_insn(s, RRFa, NXRK, a0, a1, a2);
2245
case INDEX_op_nand_i32:
2246
tcg_out_insn(s, RRFa, NNRK, args[0], args[1], args[2]);
2248
case INDEX_op_nor_i32:
2249
tcg_out_insn(s, RRFa, NORK, args[0], args[1], args[2]);
2252
case INDEX_op_neg_i32:
2253
tcg_out_insn(s, RR, LCR, args[0], args[1]);
2255
case INDEX_op_not_i32:
2256
tcg_out_insn(s, RRFa, NORK, args[0], args[1], args[1]);
2259
case INDEX_op_mul_i32:
2260
a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2261
if (const_args[2]) {
2262
tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2263
if (a2 == (int16_t)a2) {
2264
tcg_out_insn(s, RI, MHI, a0, a2);
2266
tcg_out_insn(s, RIL, MSFI, a0, a2);
2268
} else if (a0 == a1) {
2269
tcg_out_insn(s, RRE, MSR, a0, a2);
2271
tcg_out_insn(s, RRFa, MSRKC, a0, a1, a2);
2275
case INDEX_op_div2_i32:
2276
tcg_debug_assert(args[0] == args[2]);
2277
tcg_debug_assert(args[1] == args[3]);
2278
tcg_debug_assert((args[1] & 1) == 0);
2279
tcg_debug_assert(args[0] == args[1] + 1);
2280
tcg_out_insn(s, RR, DR, args[1], args[4]);
2282
case INDEX_op_divu2_i32:
2283
tcg_debug_assert(args[0] == args[2]);
2284
tcg_debug_assert(args[1] == args[3]);
2285
tcg_debug_assert((args[1] & 1) == 0);
2286
tcg_debug_assert(args[0] == args[1] + 1);
2287
tcg_out_insn(s, RRE, DLR, args[1], args[4]);
2290
case INDEX_op_shl_i32:
2294
a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
2296
if (const_args[2]) {
2297
tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2);
2299
tcg_out_sh32(s, op, a0, a2, 0);
2302
/* Using tcg_out_sh64 here for the format; it is a 32-bit shift. */
2303
if (const_args[2]) {
2304
tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2);
2306
tcg_out_sh64(s, op2, a0, a1, a2, 0);
2310
case INDEX_op_shr_i32:
2314
case INDEX_op_sar_i32:
2319
case INDEX_op_rotl_i32:
2320
/* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
2321
if (const_args[2]) {
2322
tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
2324
tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
2327
case INDEX_op_rotr_i32:
2328
if (const_args[2]) {
2329
tcg_out_sh64(s, RSY_RLL, args[0], args[1],
2330
TCG_REG_NONE, (32 - args[2]) & 31);
2332
tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2333
tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
2337
case INDEX_op_bswap16_i32:
2338
a0 = args[0], a1 = args[1], a2 = args[2];
2339
tcg_out_insn(s, RRE, LRVR, a0, a1);
2340
if (a2 & TCG_BSWAP_OS) {
2341
tcg_out_sh32(s, RS_SRA, a0, TCG_REG_NONE, 16);
2343
tcg_out_sh32(s, RS_SRL, a0, TCG_REG_NONE, 16);
2346
case INDEX_op_bswap16_i64:
2347
a0 = args[0], a1 = args[1], a2 = args[2];
2348
tcg_out_insn(s, RRE, LRVGR, a0, a1);
2349
if (a2 & TCG_BSWAP_OS) {
2350
tcg_out_sh64(s, RSY_SRAG, a0, a0, TCG_REG_NONE, 48);
2352
tcg_out_sh64(s, RSY_SRLG, a0, a0, TCG_REG_NONE, 48);
2356
case INDEX_op_bswap32_i32:
2357
tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
2359
case INDEX_op_bswap32_i64:
2360
a0 = args[0], a1 = args[1], a2 = args[2];
2361
tcg_out_insn(s, RRE, LRVR, a0, a1);
2362
if (a2 & TCG_BSWAP_OS) {
2363
tcg_out_ext32s(s, a0, a0);
2364
} else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
2365
tcg_out_ext32u(s, a0, a0);
2369
case INDEX_op_add2_i32:
2370
if (const_args[4]) {
2371
tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
2373
tcg_out_insn(s, RR, ALR, args[0], args[4]);
2375
tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
2377
case INDEX_op_sub2_i32:
2378
if (const_args[4]) {
2379
tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
2381
tcg_out_insn(s, RR, SLR, args[0], args[4]);
2383
tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
2387
tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
2390
case INDEX_op_brcond_i32:
2391
tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
2392
args[1], const_args[1], arg_label(args[3]));
2394
case INDEX_op_setcond_i32:
2395
tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
2396
args[2], const_args[2], false);
2398
case INDEX_op_negsetcond_i32:
2399
tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
2400
args[2], const_args[2], true);
2402
case INDEX_op_movcond_i32:
2403
tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
2404
args[2], const_args[2], args[3], const_args[3], args[4]);
2407
case INDEX_op_qemu_ld_a32_i32:
2408
case INDEX_op_qemu_ld_a64_i32:
2409
tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32);
2411
case INDEX_op_qemu_ld_a32_i64:
2412
case INDEX_op_qemu_ld_a64_i64:
2413
tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I64);
2415
case INDEX_op_qemu_st_a32_i32:
2416
case INDEX_op_qemu_st_a64_i32:
2417
tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I32);
2419
case INDEX_op_qemu_st_a32_i64:
2420
case INDEX_op_qemu_st_a64_i64:
2421
tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I64);
2423
case INDEX_op_qemu_ld_a32_i128:
2424
case INDEX_op_qemu_ld_a64_i128:
2425
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
2427
case INDEX_op_qemu_st_a32_i128:
2428
case INDEX_op_qemu_st_a64_i128:
2429
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
2432
case INDEX_op_ld16s_i64:
2433
tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
2435
case INDEX_op_ld32u_i64:
2436
tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
2438
case INDEX_op_ld32s_i64:
2439
tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
2441
case INDEX_op_ld_i64:
2442
tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2445
case INDEX_op_st32_i64:
2446
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
2448
case INDEX_op_st_i64:
2449
tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
2452
case INDEX_op_add_i64:
2453
a0 = args[0], a1 = args[1], a2 = args[2];
2454
if (const_args[2]) {
2457
if (a2 == (int16_t)a2) {
2458
tcg_out_insn(s, RI, AGHI, a0, a2);
2461
if (a2 == (int32_t)a2) {
2462
tcg_out_insn(s, RIL, AGFI, a0, a2);
2465
if (a2 == (uint32_t)a2) {
2466
tcg_out_insn(s, RIL, ALGFI, a0, a2);
2469
if (-a2 == (uint32_t)-a2) {
2470
tcg_out_insn(s, RIL, SLGFI, a0, -a2);
2474
tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
2475
} else if (a0 == a1) {
2476
tcg_out_insn(s, RRE, AGR, a0, a2);
2478
tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
2481
case INDEX_op_sub_i64:
2482
a0 = args[0], a1 = args[1], a2 = args[2];
2483
if (const_args[2]) {
2487
tcg_out_insn(s, RRFa, SGRK, a0, a1, a2);
2491
case INDEX_op_and_i64:
2492
a0 = args[0], a1 = args[1], a2 = args[2];
2493
if (const_args[2]) {
2494
tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2495
tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
2497
tcg_out_insn(s, RRFa, NGRK, a0, a1, a2);
2500
case INDEX_op_or_i64:
2501
a0 = args[0], a1 = args[1], a2 = args[2];
2502
if (const_args[2]) {
2503
tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2504
tgen_ori(s, a0, a2);
2506
tcg_out_insn(s, RRFa, OGRK, a0, a1, a2);
2509
case INDEX_op_xor_i64:
2510
a0 = args[0], a1 = args[1], a2 = args[2];
2511
if (const_args[2]) {
2512
tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2513
tgen_xori(s, a0, a2);
2515
tcg_out_insn(s, RRFa, XGRK, a0, a1, a2);
2519
case INDEX_op_andc_i64:
2520
a0 = args[0], a1 = args[1], a2 = args[2];
2521
if (const_args[2]) {
2522
tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2523
tgen_andi(s, TCG_TYPE_I64, a0, ~a2);
2525
tcg_out_insn(s, RRFa, NCGRK, a0, a1, a2);
2528
case INDEX_op_orc_i64:
2529
a0 = args[0], a1 = args[1], a2 = args[2];
2530
if (const_args[2]) {
2531
tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2532
tgen_ori(s, a0, ~a2);
2534
tcg_out_insn(s, RRFa, OCGRK, a0, a1, a2);
2537
case INDEX_op_eqv_i64:
2538
a0 = args[0], a1 = args[1], a2 = args[2];
2539
if (const_args[2]) {
2540
tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2541
tgen_xori(s, a0, ~a2);
2543
tcg_out_insn(s, RRFa, NXGRK, a0, a1, a2);
2546
case INDEX_op_nand_i64:
2547
tcg_out_insn(s, RRFa, NNGRK, args[0], args[1], args[2]);
2549
case INDEX_op_nor_i64:
2550
tcg_out_insn(s, RRFa, NOGRK, args[0], args[1], args[2]);
2553
case INDEX_op_neg_i64:
2554
tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
2556
case INDEX_op_not_i64:
2557
tcg_out_insn(s, RRFa, NOGRK, args[0], args[1], args[1]);
2559
case INDEX_op_bswap64_i64:
2560
tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
2563
case INDEX_op_mul_i64:
2564
a0 = args[0], a1 = args[1], a2 = args[2];
2565
if (const_args[2]) {
2566
tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2567
if (a2 == (int16_t)a2) {
2568
tcg_out_insn(s, RI, MGHI, a0, a2);
2570
tcg_out_insn(s, RIL, MSGFI, a0, a2);
2572
} else if (a0 == a1) {
2573
tcg_out_insn(s, RRE, MSGR, a0, a2);
2575
tcg_out_insn(s, RRFa, MSGRKC, a0, a1, a2);
2579
case INDEX_op_div2_i64:
2581
* ??? We get an unnecessary sign-extension of the dividend
2582
* into op0 with this definition, but as we do in fact always
2583
* produce both quotient and remainder using INDEX_op_div_i64
2584
* instead requires jumping through even more hoops.
2586
tcg_debug_assert(args[0] == args[2]);
2587
tcg_debug_assert(args[1] == args[3]);
2588
tcg_debug_assert((args[1] & 1) == 0);
2589
tcg_debug_assert(args[0] == args[1] + 1);
2590
tcg_out_insn(s, RRE, DSGR, args[1], args[4]);
2592
case INDEX_op_divu2_i64:
2593
tcg_debug_assert(args[0] == args[2]);
2594
tcg_debug_assert(args[1] == args[3]);
2595
tcg_debug_assert((args[1] & 1) == 0);
2596
tcg_debug_assert(args[0] == args[1] + 1);
2597
tcg_out_insn(s, RRE, DLGR, args[1], args[4]);
2599
case INDEX_op_mulu2_i64:
2600
tcg_debug_assert(args[0] == args[2]);
2601
tcg_debug_assert((args[1] & 1) == 0);
2602
tcg_debug_assert(args[0] == args[1] + 1);
2603
tcg_out_insn(s, RRE, MLGR, args[1], args[3]);
2605
case INDEX_op_muls2_i64:
2606
tcg_debug_assert((args[1] & 1) == 0);
2607
tcg_debug_assert(args[0] == args[1] + 1);
2608
tcg_out_insn(s, RRFa, MGRK, args[1], args[2], args[3]);
2611
case INDEX_op_shl_i64:
2614
if (const_args[2]) {
2615
tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
2617
tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
2620
case INDEX_op_shr_i64:
2623
case INDEX_op_sar_i64:
2627
case INDEX_op_rotl_i64:
2628
if (const_args[2]) {
2629
tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2630
TCG_REG_NONE, args[2]);
2632
tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2635
case INDEX_op_rotr_i64:
2636
if (const_args[2]) {
2637
tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2638
TCG_REG_NONE, (64 - args[2]) & 63);
2640
/* We can use the smaller 32-bit negate because only the
2641
low 6 bits are examined for the rotate. */
2642
tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2643
tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2647
case INDEX_op_add2_i64:
2648
if (const_args[4]) {
2649
if ((int64_t)args[4] >= 0) {
2650
tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
2652
tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
2655
tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2657
tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2659
case INDEX_op_sub2_i64:
2660
if (const_args[4]) {
2661
if ((int64_t)args[4] >= 0) {
2662
tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
2664
tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
2667
tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2669
tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2672
case INDEX_op_brcond_i64:
2673
tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2674
args[1], const_args[1], arg_label(args[3]));
2676
case INDEX_op_setcond_i64:
2677
tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2678
args[2], const_args[2], false);
2680
case INDEX_op_negsetcond_i64:
2681
tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2682
args[2], const_args[2], true);
2684
case INDEX_op_movcond_i64:
2685
tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
2686
args[2], const_args[2], args[3], const_args[3], args[4]);
2690
a0 = args[0], a1 = args[1], a2 = args[2];
2691
if (const_args[1]) {
2692
tgen_deposit(s, a0, a2, args[3], args[4], 1);
2694
/* Since we can't support "0Z" as a constraint, we allow a1 in
2695
any register. Fix things up as if a matching constraint. */
2697
TCGType type = (opc == INDEX_op_deposit_i64);
2699
tcg_out_mov(s, type, TCG_TMP0, a2);
2702
tcg_out_mov(s, type, a0, a1);
2704
tgen_deposit(s, a0, a2, args[3], args[4], 0);
2709
tgen_extract(s, args[0], args[1], args[2], args[3]);
2712
case INDEX_op_clz_i64:
2713
tgen_clz(s, args[0], args[1], args[2], const_args[2]);
2716
case INDEX_op_ctpop_i32:
2717
tgen_ctpop(s, TCG_TYPE_I32, args[0], args[1]);
2719
case INDEX_op_ctpop_i64:
2720
tgen_ctpop(s, TCG_TYPE_I64, args[0], args[1]);
2724
/* The host memory model is quite strong, we simply need to
2725
serialize the instruction stream. */
2726
if (args[0] & TCG_MO_ST_LD) {
2727
/* fast-bcr-serialization facility (45) is present */
2728
tcg_out_insn(s, RR, BCR, 14, 0);
2732
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2733
case INDEX_op_mov_i64:
2734
case INDEX_op_call: /* Always emitted via tcg_out_call. */
2735
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
2736
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
2737
case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
2738
case INDEX_op_ext8s_i64:
2739
case INDEX_op_ext8u_i32:
2740
case INDEX_op_ext8u_i64:
2741
case INDEX_op_ext16s_i32:
2742
case INDEX_op_ext16s_i64:
2743
case INDEX_op_ext16u_i32:
2744
case INDEX_op_ext16u_i64:
2745
case INDEX_op_ext32s_i64:
2746
case INDEX_op_ext32u_i64:
2747
case INDEX_op_ext_i32_i64:
2748
case INDEX_op_extu_i32_i64:
2749
case INDEX_op_extrl_i64_i32:
2751
g_assert_not_reached();
2755
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
2756
TCGReg dst, TCGReg src)
2758
if (is_general_reg(src)) {
2759
/* Replicate general register into two MO_64. */
2760
tcg_out_insn(s, VRRf, VLVGP, dst, src, src);
2761
if (vece == MO_64) {
2768
* Recall that the "standard" integer, within a vector, is the
2769
* rightmost element of the leftmost doubleword, a-la VLLEZ.
2771
tcg_out_insn(s, VRIc, VREP, dst, (8 >> vece) - 1, src, vece);
2775
static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
2776
TCGReg dst, TCGReg base, intptr_t offset)
2778
tcg_out_vrx_mem(s, VRX_VLREP, dst, base, TCG_REG_NONE, offset, vece);
2782
static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
2783
TCGReg dst, int64_t val)
2785
int i, mask, msb, lsb;
2787
/* Look for int16_t elements. */
2788
if (vece <= MO_16 ||
2789
(vece == MO_32 ? (int32_t)val : val) == (int16_t)val) {
2790
tcg_out_insn(s, VRIa, VREPI, dst, val, vece);
2794
/* Look for bit masks. */
2795
if (vece == MO_32) {
2796
if (risbg_mask((int32_t)val)) {
2797
/* Handle wraparound by swapping msb and lsb. */
2798
if ((val & 0x80000001u) == 0x80000001u) {
2799
msb = 32 - ctz32(~val);
2800
lsb = clz32(~val) - 1;
2803
lsb = 31 - ctz32(val);
2805
tcg_out_insn(s, VRIb, VGM, dst, msb, lsb, MO_32);
2809
if (risbg_mask(val)) {
2810
/* Handle wraparound by swapping msb and lsb. */
2811
if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
2812
/* Handle wraparound by swapping msb and lsb. */
2813
msb = 64 - ctz64(~val);
2814
lsb = clz64(~val) - 1;
2817
lsb = 63 - ctz64(val);
2819
tcg_out_insn(s, VRIb, VGM, dst, msb, lsb, MO_64);
2824
/* Look for all bytes 0x00 or 0xff. */
2825
for (i = mask = 0; i < 8; i++) {
2826
uint8_t byte = val >> (i * 8);
2829
} else if (byte != 0) {
2834
tcg_out_insn(s, VRIa, VGBM, dst, mask * 0x0101, 0);
2838
/* Otherwise, stuff it in the constant pool. */
2839
tcg_out_insn(s, RIL, LARL, TCG_TMP0, 0);
2840
new_pool_label(s, val, R_390_PC32DBL, s->code_ptr - 2, 2);
2841
tcg_out_insn(s, VRX, VLREP, dst, TCG_TMP0, TCG_REG_NONE, 0, MO_64);
2844
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2845
unsigned vecl, unsigned vece,
2846
const TCGArg args[TCG_MAX_OP_ARGS],
2847
const int const_args[TCG_MAX_OP_ARGS])
2849
TCGType type = vecl + TCG_TYPE_V64;
2850
TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
2853
case INDEX_op_ld_vec:
2854
tcg_out_ld(s, type, a0, a1, a2);
2856
case INDEX_op_st_vec:
2857
tcg_out_st(s, type, a0, a1, a2);
2859
case INDEX_op_dupm_vec:
2860
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2863
case INDEX_op_abs_vec:
2864
tcg_out_insn(s, VRRa, VLP, a0, a1, vece);
2866
case INDEX_op_neg_vec:
2867
tcg_out_insn(s, VRRa, VLC, a0, a1, vece);
2869
case INDEX_op_not_vec:
2870
tcg_out_insn(s, VRRc, VNO, a0, a1, a1, 0);
2873
case INDEX_op_add_vec:
2874
tcg_out_insn(s, VRRc, VA, a0, a1, a2, vece);
2876
case INDEX_op_sub_vec:
2877
tcg_out_insn(s, VRRc, VS, a0, a1, a2, vece);
2879
case INDEX_op_and_vec:
2880
tcg_out_insn(s, VRRc, VN, a0, a1, a2, 0);
2882
case INDEX_op_andc_vec:
2883
tcg_out_insn(s, VRRc, VNC, a0, a1, a2, 0);
2885
case INDEX_op_mul_vec:
2886
tcg_out_insn(s, VRRc, VML, a0, a1, a2, vece);
2888
case INDEX_op_or_vec:
2889
tcg_out_insn(s, VRRc, VO, a0, a1, a2, 0);
2891
case INDEX_op_orc_vec:
2892
tcg_out_insn(s, VRRc, VOC, a0, a1, a2, 0);
2894
case INDEX_op_xor_vec:
2895
tcg_out_insn(s, VRRc, VX, a0, a1, a2, 0);
2897
case INDEX_op_nand_vec:
2898
tcg_out_insn(s, VRRc, VNN, a0, a1, a2, 0);
2900
case INDEX_op_nor_vec:
2901
tcg_out_insn(s, VRRc, VNO, a0, a1, a2, 0);
2903
case INDEX_op_eqv_vec:
2904
tcg_out_insn(s, VRRc, VNX, a0, a1, a2, 0);
2907
case INDEX_op_shli_vec:
2908
tcg_out_insn(s, VRSa, VESL, a0, a2, TCG_REG_NONE, a1, vece);
2910
case INDEX_op_shri_vec:
2911
tcg_out_insn(s, VRSa, VESRL, a0, a2, TCG_REG_NONE, a1, vece);
2913
case INDEX_op_sari_vec:
2914
tcg_out_insn(s, VRSa, VESRA, a0, a2, TCG_REG_NONE, a1, vece);
2916
case INDEX_op_rotli_vec:
2917
tcg_out_insn(s, VRSa, VERLL, a0, a2, TCG_REG_NONE, a1, vece);
2919
case INDEX_op_shls_vec:
2920
tcg_out_insn(s, VRSa, VESL, a0, 0, a2, a1, vece);
2922
case INDEX_op_shrs_vec:
2923
tcg_out_insn(s, VRSa, VESRL, a0, 0, a2, a1, vece);
2925
case INDEX_op_sars_vec:
2926
tcg_out_insn(s, VRSa, VESRA, a0, 0, a2, a1, vece);
2928
case INDEX_op_rotls_vec:
2929
tcg_out_insn(s, VRSa, VERLL, a0, 0, a2, a1, vece);
2931
case INDEX_op_shlv_vec:
2932
tcg_out_insn(s, VRRc, VESLV, a0, a1, a2, vece);
2934
case INDEX_op_shrv_vec:
2935
tcg_out_insn(s, VRRc, VESRLV, a0, a1, a2, vece);
2937
case INDEX_op_sarv_vec:
2938
tcg_out_insn(s, VRRc, VESRAV, a0, a1, a2, vece);
2940
case INDEX_op_rotlv_vec:
2941
tcg_out_insn(s, VRRc, VERLLV, a0, a1, a2, vece);
2944
case INDEX_op_smin_vec:
2945
tcg_out_insn(s, VRRc, VMN, a0, a1, a2, vece);
2947
case INDEX_op_smax_vec:
2948
tcg_out_insn(s, VRRc, VMX, a0, a1, a2, vece);
2950
case INDEX_op_umin_vec:
2951
tcg_out_insn(s, VRRc, VMNL, a0, a1, a2, vece);
2953
case INDEX_op_umax_vec:
2954
tcg_out_insn(s, VRRc, VMXL, a0, a1, a2, vece);
2957
case INDEX_op_bitsel_vec:
2958
tcg_out_insn(s, VRRe, VSEL, a0, a2, args[3], a1);
2961
case INDEX_op_cmp_vec:
2962
switch ((TCGCond)args[3]) {
2964
tcg_out_insn(s, VRRc, VCEQ, a0, a1, a2, vece);
2967
tcg_out_insn(s, VRRc, VCH, a0, a1, a2, vece);
2970
tcg_out_insn(s, VRRc, VCHL, a0, a1, a2, vece);
2973
g_assert_not_reached();
2977
case INDEX_op_s390_vuph_vec:
2978
tcg_out_insn(s, VRRa, VUPH, a0, a1, vece);
2980
case INDEX_op_s390_vupl_vec:
2981
tcg_out_insn(s, VRRa, VUPL, a0, a1, vece);
2983
case INDEX_op_s390_vpks_vec:
2984
tcg_out_insn(s, VRRc, VPKS, a0, a1, a2, vece);
2987
case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
2988
case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
2990
g_assert_not_reached();
2994
int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2997
case INDEX_op_abs_vec:
2998
case INDEX_op_add_vec:
2999
case INDEX_op_and_vec:
3000
case INDEX_op_andc_vec:
3001
case INDEX_op_bitsel_vec:
3002
case INDEX_op_eqv_vec:
3003
case INDEX_op_nand_vec:
3004
case INDEX_op_neg_vec:
3005
case INDEX_op_nor_vec:
3006
case INDEX_op_not_vec:
3007
case INDEX_op_or_vec:
3008
case INDEX_op_orc_vec:
3009
case INDEX_op_rotli_vec:
3010
case INDEX_op_rotls_vec:
3011
case INDEX_op_rotlv_vec:
3012
case INDEX_op_sari_vec:
3013
case INDEX_op_sars_vec:
3014
case INDEX_op_sarv_vec:
3015
case INDEX_op_shli_vec:
3016
case INDEX_op_shls_vec:
3017
case INDEX_op_shlv_vec:
3018
case INDEX_op_shri_vec:
3019
case INDEX_op_shrs_vec:
3020
case INDEX_op_shrv_vec:
3021
case INDEX_op_smax_vec:
3022
case INDEX_op_smin_vec:
3023
case INDEX_op_sub_vec:
3024
case INDEX_op_umax_vec:
3025
case INDEX_op_umin_vec:
3026
case INDEX_op_xor_vec:
3028
case INDEX_op_cmp_vec:
3029
case INDEX_op_cmpsel_vec:
3030
case INDEX_op_rotrv_vec:
3032
case INDEX_op_mul_vec:
3033
return vece < MO_64;
3034
case INDEX_op_ssadd_vec:
3035
case INDEX_op_sssub_vec:
3036
return vece < MO_64 ? -1 : 0;
3042
static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0,
3043
TCGv_vec v1, TCGv_vec v2, TCGCond cond)
3045
bool need_swap = false, need_inv = false;
3063
need_swap = need_inv = true;
3066
g_assert_not_reached();
3070
cond = tcg_invert_cond(cond);
3074
t1 = v1, v1 = v2, v2 = t1;
3075
cond = tcg_swap_cond(cond);
3078
vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
3079
tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
3084
static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
3085
TCGv_vec v1, TCGv_vec v2, TCGCond cond)
3087
if (expand_vec_cmp_noinv(type, vece, v0, v1, v2, cond)) {
3088
tcg_gen_not_vec(vece, v0, v0);
3092
static void expand_vec_cmpsel(TCGType type, unsigned vece, TCGv_vec v0,
3093
TCGv_vec c1, TCGv_vec c2,
3094
TCGv_vec v3, TCGv_vec v4, TCGCond cond)
3096
TCGv_vec t = tcg_temp_new_vec(type);
3098
if (expand_vec_cmp_noinv(type, vece, t, c1, c2, cond)) {
3099
/* Invert the sense of the compare by swapping arguments. */
3100
tcg_gen_bitsel_vec(vece, v0, t, v4, v3);
3102
tcg_gen_bitsel_vec(vece, v0, t, v3, v4);
3104
tcg_temp_free_vec(t);
3107
static void expand_vec_sat(TCGType type, unsigned vece, TCGv_vec v0,
3108
TCGv_vec v1, TCGv_vec v2, TCGOpcode add_sub_opc)
3110
TCGv_vec h1 = tcg_temp_new_vec(type);
3111
TCGv_vec h2 = tcg_temp_new_vec(type);
3112
TCGv_vec l1 = tcg_temp_new_vec(type);
3113
TCGv_vec l2 = tcg_temp_new_vec(type);
3115
tcg_debug_assert (vece < MO_64);
3117
/* Unpack with sign-extension. */
3118
vec_gen_2(INDEX_op_s390_vuph_vec, type, vece,
3119
tcgv_vec_arg(h1), tcgv_vec_arg(v1));
3120
vec_gen_2(INDEX_op_s390_vuph_vec, type, vece,
3121
tcgv_vec_arg(h2), tcgv_vec_arg(v2));
3123
vec_gen_2(INDEX_op_s390_vupl_vec, type, vece,
3124
tcgv_vec_arg(l1), tcgv_vec_arg(v1));
3125
vec_gen_2(INDEX_op_s390_vupl_vec, type, vece,
3126
tcgv_vec_arg(l2), tcgv_vec_arg(v2));
3128
/* Arithmetic on a wider element size. */
3129
vec_gen_3(add_sub_opc, type, vece + 1, tcgv_vec_arg(h1),
3130
tcgv_vec_arg(h1), tcgv_vec_arg(h2));
3131
vec_gen_3(add_sub_opc, type, vece + 1, tcgv_vec_arg(l1),
3132
tcgv_vec_arg(l1), tcgv_vec_arg(l2));
3134
/* Pack with saturation. */
3135
vec_gen_3(INDEX_op_s390_vpks_vec, type, vece + 1,
3136
tcgv_vec_arg(v0), tcgv_vec_arg(h1), tcgv_vec_arg(l1));
3138
tcg_temp_free_vec(h1);
3139
tcg_temp_free_vec(h2);
3140
tcg_temp_free_vec(l1);
3141
tcg_temp_free_vec(l2);
3144
void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3148
TCGv_vec v0, v1, v2, v3, v4, t0;
3151
v0 = temp_tcgv_vec(arg_temp(a0));
3152
v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3153
v2 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3156
case INDEX_op_cmp_vec:
3157
expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
3160
case INDEX_op_cmpsel_vec:
3161
v3 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3162
v4 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3163
expand_vec_cmpsel(type, vece, v0, v1, v2, v3, v4, va_arg(va, TCGArg));
3166
case INDEX_op_rotrv_vec:
3167
t0 = tcg_temp_new_vec(type);
3168
tcg_gen_neg_vec(vece, t0, v2);
3169
tcg_gen_rotlv_vec(vece, v0, v1, t0);
3170
tcg_temp_free_vec(t0);
3173
case INDEX_op_ssadd_vec:
3174
expand_vec_sat(type, vece, v0, v1, v2, INDEX_op_add_vec);
3176
case INDEX_op_sssub_vec:
3177
expand_vec_sat(type, vece, v0, v1, v2, INDEX_op_sub_vec);
3181
g_assert_not_reached();
3186
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
3189
case INDEX_op_goto_ptr:
3192
case INDEX_op_ld8u_i32:
3193
case INDEX_op_ld8u_i64:
3194
case INDEX_op_ld8s_i32:
3195
case INDEX_op_ld8s_i64:
3196
case INDEX_op_ld16u_i32:
3197
case INDEX_op_ld16u_i64:
3198
case INDEX_op_ld16s_i32:
3199
case INDEX_op_ld16s_i64:
3200
case INDEX_op_ld_i32:
3201
case INDEX_op_ld32u_i64:
3202
case INDEX_op_ld32s_i64:
3203
case INDEX_op_ld_i64:
3204
return C_O1_I1(r, r);
3206
case INDEX_op_st8_i32:
3207
case INDEX_op_st8_i64:
3208
case INDEX_op_st16_i32:
3209
case INDEX_op_st16_i64:
3210
case INDEX_op_st_i32:
3211
case INDEX_op_st32_i64:
3212
case INDEX_op_st_i64:
3213
return C_O0_I2(r, r);
3215
case INDEX_op_add_i32:
3216
case INDEX_op_add_i64:
3217
case INDEX_op_shl_i64:
3218
case INDEX_op_shr_i64:
3219
case INDEX_op_sar_i64:
3220
case INDEX_op_rotl_i32:
3221
case INDEX_op_rotl_i64:
3222
case INDEX_op_rotr_i32:
3223
case INDEX_op_rotr_i64:
3224
case INDEX_op_setcond_i32:
3225
case INDEX_op_negsetcond_i32:
3226
return C_O1_I2(r, r, ri);
3227
case INDEX_op_setcond_i64:
3228
case INDEX_op_negsetcond_i64:
3229
return C_O1_I2(r, r, rC);
3231
case INDEX_op_clz_i64:
3232
return C_O1_I2(r, r, rI);
3234
case INDEX_op_sub_i32:
3235
case INDEX_op_sub_i64:
3236
case INDEX_op_and_i32:
3237
case INDEX_op_or_i32:
3238
case INDEX_op_xor_i32:
3239
return C_O1_I2(r, r, ri);
3240
case INDEX_op_and_i64:
3241
return C_O1_I2(r, r, rNKR);
3242
case INDEX_op_or_i64:
3243
case INDEX_op_xor_i64:
3244
return C_O1_I2(r, r, rK);
3246
case INDEX_op_andc_i32:
3247
case INDEX_op_orc_i32:
3248
case INDEX_op_eqv_i32:
3249
return C_O1_I2(r, r, ri);
3250
case INDEX_op_andc_i64:
3251
return C_O1_I2(r, r, rKR);
3252
case INDEX_op_orc_i64:
3253
case INDEX_op_eqv_i64:
3254
return C_O1_I2(r, r, rNK);
3256
case INDEX_op_nand_i32:
3257
case INDEX_op_nand_i64:
3258
case INDEX_op_nor_i32:
3259
case INDEX_op_nor_i64:
3260
return C_O1_I2(r, r, r);
3262
case INDEX_op_mul_i32:
3263
return (HAVE_FACILITY(MISC_INSN_EXT2)
3265
: C_O1_I2(r, 0, ri));
3266
case INDEX_op_mul_i64:
3267
return (HAVE_FACILITY(MISC_INSN_EXT2)
3269
: C_O1_I2(r, 0, rJ));
3271
case INDEX_op_shl_i32:
3272
case INDEX_op_shr_i32:
3273
case INDEX_op_sar_i32:
3274
return C_O1_I2(r, r, ri);
3276
case INDEX_op_brcond_i32:
3277
return C_O0_I2(r, ri);
3278
case INDEX_op_brcond_i64:
3279
return C_O0_I2(r, rC);
3281
case INDEX_op_bswap16_i32:
3282
case INDEX_op_bswap16_i64:
3283
case INDEX_op_bswap32_i32:
3284
case INDEX_op_bswap32_i64:
3285
case INDEX_op_bswap64_i64:
3286
case INDEX_op_neg_i32:
3287
case INDEX_op_neg_i64:
3288
case INDEX_op_not_i32:
3289
case INDEX_op_not_i64:
3290
case INDEX_op_ext8s_i32:
3291
case INDEX_op_ext8s_i64:
3292
case INDEX_op_ext8u_i32:
3293
case INDEX_op_ext8u_i64:
3294
case INDEX_op_ext16s_i32:
3295
case INDEX_op_ext16s_i64:
3296
case INDEX_op_ext16u_i32:
3297
case INDEX_op_ext16u_i64:
3298
case INDEX_op_ext32s_i64:
3299
case INDEX_op_ext32u_i64:
3300
case INDEX_op_ext_i32_i64:
3301
case INDEX_op_extu_i32_i64:
3302
case INDEX_op_extract_i32:
3303
case INDEX_op_extract_i64:
3304
case INDEX_op_ctpop_i32:
3305
case INDEX_op_ctpop_i64:
3306
return C_O1_I1(r, r);
3308
case INDEX_op_qemu_ld_a32_i32:
3309
case INDEX_op_qemu_ld_a64_i32:
3310
case INDEX_op_qemu_ld_a32_i64:
3311
case INDEX_op_qemu_ld_a64_i64:
3312
return C_O1_I1(r, r);
3313
case INDEX_op_qemu_st_a32_i64:
3314
case INDEX_op_qemu_st_a64_i64:
3315
case INDEX_op_qemu_st_a32_i32:
3316
case INDEX_op_qemu_st_a64_i32:
3317
return C_O0_I2(r, r);
3318
case INDEX_op_qemu_ld_a32_i128:
3319
case INDEX_op_qemu_ld_a64_i128:
3320
return C_O2_I1(o, m, r);
3321
case INDEX_op_qemu_st_a32_i128:
3322
case INDEX_op_qemu_st_a64_i128:
3323
return C_O0_I3(o, m, r);
3325
case INDEX_op_deposit_i32:
3326
case INDEX_op_deposit_i64:
3327
return C_O1_I2(r, rZ, r);
3329
case INDEX_op_movcond_i32:
3330
return C_O1_I4(r, r, ri, rI, r);
3331
case INDEX_op_movcond_i64:
3332
return C_O1_I4(r, r, rC, rI, r);
3334
case INDEX_op_div2_i32:
3335
case INDEX_op_div2_i64:
3336
case INDEX_op_divu2_i32:
3337
case INDEX_op_divu2_i64:
3338
return C_O2_I3(o, m, 0, 1, r);
3340
case INDEX_op_mulu2_i64:
3341
return C_O2_I2(o, m, 0, r);
3342
case INDEX_op_muls2_i64:
3343
return C_O2_I2(o, m, r, r);
3345
case INDEX_op_add2_i32:
3346
case INDEX_op_sub2_i32:
3347
return C_N1_O1_I4(r, r, 0, 1, ri, r);
3349
case INDEX_op_add2_i64:
3350
case INDEX_op_sub2_i64:
3351
return C_N1_O1_I4(r, r, 0, 1, rJU, r);
3353
case INDEX_op_st_vec:
3354
return C_O0_I2(v, r);
3355
case INDEX_op_ld_vec:
3356
case INDEX_op_dupm_vec:
3357
return C_O1_I1(v, r);
3358
case INDEX_op_dup_vec:
3359
return C_O1_I1(v, vr);
3360
case INDEX_op_abs_vec:
3361
case INDEX_op_neg_vec:
3362
case INDEX_op_not_vec:
3363
case INDEX_op_rotli_vec:
3364
case INDEX_op_sari_vec:
3365
case INDEX_op_shli_vec:
3366
case INDEX_op_shri_vec:
3367
case INDEX_op_s390_vuph_vec:
3368
case INDEX_op_s390_vupl_vec:
3369
return C_O1_I1(v, v);
3370
case INDEX_op_add_vec:
3371
case INDEX_op_sub_vec:
3372
case INDEX_op_and_vec:
3373
case INDEX_op_andc_vec:
3374
case INDEX_op_or_vec:
3375
case INDEX_op_orc_vec:
3376
case INDEX_op_xor_vec:
3377
case INDEX_op_nand_vec:
3378
case INDEX_op_nor_vec:
3379
case INDEX_op_eqv_vec:
3380
case INDEX_op_cmp_vec:
3381
case INDEX_op_mul_vec:
3382
case INDEX_op_rotlv_vec:
3383
case INDEX_op_rotrv_vec:
3384
case INDEX_op_shlv_vec:
3385
case INDEX_op_shrv_vec:
3386
case INDEX_op_sarv_vec:
3387
case INDEX_op_smax_vec:
3388
case INDEX_op_smin_vec:
3389
case INDEX_op_umax_vec:
3390
case INDEX_op_umin_vec:
3391
case INDEX_op_s390_vpks_vec:
3392
return C_O1_I2(v, v, v);
3393
case INDEX_op_rotls_vec:
3394
case INDEX_op_shls_vec:
3395
case INDEX_op_shrs_vec:
3396
case INDEX_op_sars_vec:
3397
return C_O1_I2(v, v, r);
3398
case INDEX_op_bitsel_vec:
3399
return C_O1_I3(v, v, v, v);
3402
g_assert_not_reached();
3407
* Mainline glibc added HWCAP_S390_VX before it was kernel abi.
3408
* Some distros have fixed this up locally, others have not.
3410
#ifndef HWCAP_S390_VXRS
3411
#define HWCAP_S390_VXRS 2048
3414
static void query_s390_facilities(void)
3416
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
3419
/* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
3420
is present on all 64-bit systems, but let's check for it anyway. */
3421
if (hwcap & HWCAP_S390_STFLE) {
3422
register int r0 __asm__("0") = ARRAY_SIZE(s390_facilities) - 1;
3423
register void *r1 __asm__("1") = s390_facilities;
3426
asm volatile(".word 0xb2b0,0x1000"
3427
: "=r"(r0) : "r"(r0), "r"(r1) : "memory", "cc");
3431
* Use of vector registers requires os support beyond the facility bit.
3432
* If the kernel does not advertise support, disable the facility bits.
3433
* There is nothing else we currently care about in the 3rd word, so
3434
* disable VECTOR with one store.
3436
if (!(hwcap & HWCAP_S390_VXRS)) {
3437
s390_facilities[2] = 0;
3441
* Minimum supported cpu revision is z196.
3442
* Check for all required facilities.
3443
* ZARCH_ACTIVE is done via preprocessor check for 64-bit.
3445
if (!HAVE_FACILITY(LONG_DISP)) {
3446
which = "long-displacement";
3449
if (!HAVE_FACILITY(EXT_IMM)) {
3450
which = "extended-immediate";
3453
if (!HAVE_FACILITY(GEN_INST_EXT)) {
3454
which = "general-instructions-extension";
3458
* Facility 45 is a big bin that contains: distinct-operands,
3459
* fast-BCR-serialization, high-word, population-count,
3460
* interlocked-access-1, and load/store-on-condition-1
3462
if (!HAVE_FACILITY(45)) {
3469
error_report("%s: missing required facility %s", __func__, which);
3473
static void tcg_target_init(TCGContext *s)
3475
query_s390_facilities();
3477
tcg_target_available_regs[TCG_TYPE_I32] = 0xffff;
3478
tcg_target_available_regs[TCG_TYPE_I64] = 0xffff;
3479
if (HAVE_FACILITY(VECTOR)) {
3480
tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
3481
tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
3484
tcg_target_call_clobber_regs = 0;
3485
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
3486
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
3487
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
3488
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
3489
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
3490
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
3491
/* The r6 register is technically call-saved, but it's also a parameter
3492
register, so it can get killed by setup for the qemu_st helper. */
3493
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
3494
/* The return register can be considered call-clobbered. */
3495
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
3497
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
3498
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
3499
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
3500
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
3501
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
3502
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
3503
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
3504
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
3505
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
3506
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
3507
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
3508
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
3509
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V20);
3510
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V21);
3511
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V22);
3512
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V23);
3513
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
3514
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
3515
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
3516
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V27);
3517
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V28);
3518
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V29);
3519
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V30);
3520
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V31);
3522
s->reserved_regs = 0;
3523
tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
3524
/* XXX many insns can't be used with R0, so we better avoid it for now */
3525
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
3526
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
3529
#define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \
3530
+ TCG_STATIC_CALL_ARGS_SIZE \
3531
+ CPU_TEMP_BUF_NLONGS * sizeof(long)))
3533
static void tcg_target_qemu_prologue(TCGContext *s)
3535
/* stmg %r6,%r15,48(%r15) (save registers) */
3536
tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
3538
/* aghi %r15,-frame_size */
3539
tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE);
3541
tcg_set_frame(s, TCG_REG_CALL_STACK,
3542
TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
3543
CPU_TEMP_BUF_NLONGS * sizeof(long));
3545
if (!tcg_use_softmmu && guest_base >= 0x80000) {
3546
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
3547
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
3550
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
3552
/* br %r3 (go to TB) */
3553
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
3556
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
3557
* and fall through to the rest of the epilogue.
3559
tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
3560
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, 0);
3563
tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
3565
/* lmg %r6,%r15,fs+48(%r15) (restore registers) */
3566
tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
3569
/* br %r14 (return) */
3570
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
3573
static void tcg_out_tb_start(TCGContext *s)
3578
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
3580
memset(p, 0x07, count * sizeof(tcg_insn_unit));
3585
uint8_t fde_def_cfa[4];
3586
uint8_t fde_reg_ofs[18];
3589
/* We're expecting a 2 byte uleb128 encoded value. */
3590
QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3592
#define ELF_HOST_MACHINE EM_S390
3594
static const DebugFrame debug_frame = {
3595
.h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3598
.h.cie.code_align = 1,
3599
.h.cie.data_align = 8, /* sleb128 8 */
3600
.h.cie.return_column = TCG_REG_R14,
3602
/* Total FDE size does not include the "len" member. */
3603
.h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3606
12, TCG_REG_CALL_STACK, /* DW_CFA_def_cfa %r15, ... */
3607
(FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3611
0x86, 6, /* DW_CFA_offset, %r6, 48 */
3612
0x87, 7, /* DW_CFA_offset, %r7, 56 */
3613
0x88, 8, /* DW_CFA_offset, %r8, 64 */
3614
0x89, 9, /* DW_CFA_offset, %r92, 72 */
3615
0x8a, 10, /* DW_CFA_offset, %r10, 80 */
3616
0x8b, 11, /* DW_CFA_offset, %r11, 88 */
3617
0x8c, 12, /* DW_CFA_offset, %r12, 96 */
3618
0x8d, 13, /* DW_CFA_offset, %r13, 104 */
3619
0x8e, 14, /* DW_CFA_offset, %r14, 112 */
3623
void tcg_register_jit(const void *buf, size_t buf_size)
3625
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));