llvm-project
116 строк · 5.0 Кб
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes='sroa<preserve-cfg>' -S | FileCheck %s --check-prefixes=CHECK,CHECK-PRESERVE-CFG
3; RUN: opt < %s -passes='sroa<modify-cfg>' -S | FileCheck %s --check-prefixes=CHECK,CHECK-MODIFY-CFG
4
5; This test checks that SROA runs mem2reg on scalable vectors.
6
7define <vscale x 16 x i1> @alloca_nxv16i1(<vscale x 16 x i1> %pg) {
8; CHECK-LABEL: @alloca_nxv16i1(
9; CHECK-NEXT: ret <vscale x 16 x i1> [[PG:%.*]]
10;
11%pg.addr = alloca <vscale x 16 x i1>
12store <vscale x 16 x i1> %pg, ptr %pg.addr
13%1 = load <vscale x 16 x i1>, ptr %pg.addr
14ret <vscale x 16 x i1> %1
15}
16
17define <vscale x 16 x i8> @alloca_nxv16i8(<vscale x 16 x i8> %vec) {
18; CHECK-LABEL: @alloca_nxv16i8(
19; CHECK-NEXT: ret <vscale x 16 x i8> [[VEC:%.*]]
20;
21%vec.addr = alloca <vscale x 16 x i8>
22store <vscale x 16 x i8> %vec, ptr %vec.addr
23%1 = load <vscale x 16 x i8>, ptr %vec.addr
24ret <vscale x 16 x i8> %1
25}
26
27; Test scalable alloca that can't be promoted. Mem2Reg only considers
28; non-volatile loads and stores for promotion.
29define <vscale x 16 x i8> @unpromotable_alloca(<vscale x 16 x i8> %vec) {
30; CHECK-LABEL: @unpromotable_alloca(
31; CHECK-NEXT: [[VEC_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
32; CHECK-NEXT: store volatile <vscale x 16 x i8> [[VEC:%.*]], ptr [[VEC_ADDR]], align 16
33; CHECK-NEXT: [[TMP1:%.*]] = load volatile <vscale x 16 x i8>, ptr [[VEC_ADDR]], align 16
34; CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
35;
36%vec.addr = alloca <vscale x 16 x i8>
37store volatile <vscale x 16 x i8> %vec, ptr %vec.addr
38%1 = load volatile <vscale x 16 x i8>, ptr %vec.addr
39ret <vscale x 16 x i8> %1
40}
41
42; Test we bail out when using an alloca of a fixed-length vector (VLS) that was
43; bitcasted to a scalable vector.
44define <vscale x 4 x i32> @cast_alloca_to_svint32_t(<vscale x 4 x i32> %type.coerce) {
45; CHECK-LABEL: @cast_alloca_to_svint32_t(
46; CHECK-NEXT: [[TYPE:%.*]] = alloca <16 x i32>, align 64
47; CHECK-NEXT: [[TYPE_ADDR:%.*]] = alloca <16 x i32>, align 64
48; CHECK-NEXT: store <vscale x 4 x i32> [[TYPE_COERCE:%.*]], ptr [[TYPE]], align 16
49; CHECK-NEXT: [[TYPE1:%.*]] = load <16 x i32>, ptr [[TYPE]], align 64
50; CHECK-NEXT: store <16 x i32> [[TYPE1]], ptr [[TYPE_ADDR]], align 64
51; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr [[TYPE_ADDR]], align 64
52; CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 4 x i32>, ptr [[TYPE_ADDR]], align 16
53; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
54;
55%type = alloca <16 x i32>
56%type.addr = alloca <16 x i32>
57store <vscale x 4 x i32> %type.coerce, ptr %type
58%type1 = load <16 x i32>, ptr %type
59store <16 x i32> %type1, ptr %type.addr
60%1 = load <16 x i32>, ptr %type.addr
61%2 = load <vscale x 4 x i32>, ptr %type.addr
62ret <vscale x 4 x i32> %2
63}
64
65; When casting from VLA to VLS via memory check we bail out when producing a
66; GEP where the element type is a scalable vector.
67define <vscale x 4 x i32> @cast_alloca_from_svint32_t() {
68; CHECK-LABEL: @cast_alloca_from_svint32_t(
69; CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16
70; CHECK-NEXT: store <16 x i32> undef, ptr [[RETVAL_COERCE]], align 16
71; CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[RETVAL_COERCE]], align 16
72; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
73;
74%retval = alloca <16 x i32>
75%retval.coerce = alloca <vscale x 4 x i32>
76call void @llvm.memcpy.p0.p0.i64(ptr align 16 %retval.coerce, ptr align 16 %retval, i64 64, i1 false)
77%1 = load <vscale x 4 x i32>, ptr %retval.coerce
78ret <vscale x 4 x i32> %1
79}
80
81; Test we bail out when using an alloca of a fixed-length vector (VLS) that was
82; bitcasted to a scalable vector.
83define void @select_load_alloca_to_svdouble_t() {
84; CHECK-LABEL: @select_load_alloca_to_svdouble_t(
85; CHECK-NEXT: [[Z:%.*]] = alloca <16 x half>, align 32
86; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 0, 0
87; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], ptr [[Z]], ptr null
88; CHECK-NEXT: [[VAL:%.*]] = load <vscale x 2 x double>, ptr [[COND]], align 16
89; CHECK-NEXT: ret void
90;
91%z = alloca <16 x half>
92%cmp = icmp eq i32 0, 0
93%cond = select i1 %cmp, ptr %z, ptr null
94%val = load <vscale x 2 x double>, ptr %cond, align 16
95ret void
96}
97
98define void @select_store_alloca_to_svdouble_t(<vscale x 2 x double> %val) {
99; CHECK-LABEL: @select_store_alloca_to_svdouble_t(
100; CHECK-NEXT: [[Z:%.*]] = alloca <16 x half>, align 32
101; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 0, 0
102; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], ptr [[Z]], ptr null
103; CHECK-NEXT: store <vscale x 2 x double> [[VAL:%.*]], ptr [[COND]], align 16
104; CHECK-NEXT: ret void
105;
106%z = alloca <16 x half>
107%cmp = icmp eq i32 0, 0
108%cond = select i1 %cmp, ptr %z, ptr null
109store <vscale x 2 x double> %val, ptr %cond, align 16
110ret void
111}
112
113declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
114;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
115; CHECK-MODIFY-CFG: {{.*}}
116; CHECK-PRESERVE-CFG: {{.*}}
117