llvm-project
229 строк · 10.2 Кб
1; XFAIL: *
2; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
3; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -structurizecfg %s | FileCheck %s
4
5; StructurizeCFG::orderNodes used an arbitrary and nonsensical sorting
6; function which broke the basic backedge identification algorithm. It
7; would use RPO order, but then do a weird partial sort by the loop
8; depth assuming blocks are sorted by loop. However a block can appear
9; in between blocks of a loop that is not part of a loop, breaking the
10; assumption of the sort.
11;
12; The collectInfos must be done in RPO order. The actual
13; structurization order I think is less important, but unless the loop
14; headers are identified in RPO order, it finds the wrong set of back
15; edges.
16
17define amdgpu_kernel void @loop_backedge_misidentified(ptr addrspace(1) %arg0) #0 {
18; CHECK-LABEL: @loop_backedge_misidentified(
19; CHECK-NEXT: entry:
20; CHECK-NEXT: [[TMP:%.*]] = load volatile <2 x i32>, ptr addrspace(1) undef, align 16
21; CHECK-NEXT: [[LOAD1:%.*]] = load volatile <2 x float>, ptr addrspace(1) undef
22; CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
23; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[ARG0:%.*]], i32 [[TID]]
24; CHECK-NEXT: [[I_INITIAL:%.*]] = load volatile i32, ptr addrspace(1) [[GEP]], align 4
25; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
26; CHECK: LOOP.HEADER:
27; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_INITIAL]], [[ENTRY:%.*]] ], [ [[TMP10:%.*]], [[FLOW4:%.*]] ]
28; CHECK-NEXT: call void asm sideeffect "s_nop 0x100b
29; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[I]] to i64
30; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) null, i64 [[TMP12]]
31; CHECK-NEXT: [[TMP14:%.*]] = load <4 x i32>, ptr addrspace(1) [[TMP13]], align 16
32; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i32> [[TMP14]], i64 0
33; CHECK-NEXT: [[TMP16:%.*]] = and i32 [[TMP15]], 65535
34; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i32 [[TMP16]], 1
35; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[TMP17]], true
36; CHECK-NEXT: br i1 [[TMP0]], label [[BB62:%.*]], label [[FLOW:%.*]]
37; CHECK: Flow2:
38; CHECK-NEXT: br label [[FLOW]]
39; CHECK: bb18:
40; CHECK-NEXT: [[TMP19:%.*]] = extractelement <2 x i32> [[TMP]], i64 0
41; CHECK-NEXT: [[TMP22:%.*]] = lshr i32 [[TMP19]], 16
42; CHECK-NEXT: [[TMP24:%.*]] = urem i32 [[TMP22]], 52
43; CHECK-NEXT: [[TMP25:%.*]] = mul nuw nsw i32 [[TMP24]], 52
44; CHECK-NEXT: br label [[INNER_LOOP:%.*]]
45; CHECK: Flow3:
46; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[TMP59:%.*]], [[INNER_LOOP_BREAK:%.*]] ], [ [[TMP7:%.*]], [[FLOW]] ]
47; CHECK-NEXT: [[TMP2:%.*]] = phi i1 [ true, [[INNER_LOOP_BREAK]] ], [ [[TMP8:%.*]], [[FLOW]] ]
48; CHECK-NEXT: br i1 [[TMP2]], label [[END_ELSE_BLOCK:%.*]], label [[FLOW4]]
49; CHECK: INNER_LOOP:
50; CHECK-NEXT: [[INNER_LOOP_J:%.*]] = phi i32 [ [[INNER_LOOP_J_INC:%.*]], [[INNER_LOOP]] ], [ [[TMP25]], [[BB18:%.*]] ]
51; CHECK-NEXT: call void asm sideeffect "
52; CHECK-NEXT: [[INNER_LOOP_J_INC]] = add nsw i32 [[INNER_LOOP_J]], 1
53; CHECK-NEXT: [[INNER_LOOP_CMP:%.*]] = icmp eq i32 [[INNER_LOOP_J]], 0
54; CHECK-NEXT: br i1 [[INNER_LOOP_CMP]], label [[INNER_LOOP_BREAK]], label [[INNER_LOOP]]
55; CHECK: INNER_LOOP_BREAK:
56; CHECK-NEXT: [[TMP59]] = extractelement <4 x i32> [[TMP14]], i64 2
57; CHECK-NEXT: call void asm sideeffect "s_nop 23 ", "~{memory}"() #0
58; CHECK-NEXT: br label [[FLOW3:%.*]]
59; CHECK: bb62:
60; CHECK-NEXT: [[LOAD13:%.*]] = icmp ult i32 [[TMP16]], 271
61; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[LOAD13]], true
62; CHECK-NEXT: br i1 [[TMP3]], label [[INCREMENT_I:%.*]], label [[FLOW1:%.*]]
63; CHECK: Flow1:
64; CHECK-NEXT: [[TMP4:%.*]] = phi i32 [ [[INC_I:%.*]], [[INCREMENT_I]] ], [ undef, [[BB62]] ]
65; CHECK-NEXT: [[TMP5:%.*]] = phi i1 [ true, [[INCREMENT_I]] ], [ false, [[BB62]] ]
66; CHECK-NEXT: [[TMP6:%.*]] = phi i1 [ false, [[INCREMENT_I]] ], [ true, [[BB62]] ]
67; CHECK-NEXT: br i1 [[TMP6]], label [[BB64:%.*]], label [[FLOW2:%.*]]
68; CHECK: bb64:
69; CHECK-NEXT: call void asm sideeffect "s_nop 42", "~{memory}"() #0
70; CHECK-NEXT: br label [[FLOW2]]
71; CHECK: Flow:
72; CHECK-NEXT: [[TMP7]] = phi i32 [ [[TMP4]], [[FLOW2]] ], [ undef, [[LOOP_HEADER]] ]
73; CHECK-NEXT: [[TMP8]] = phi i1 [ [[TMP5]], [[FLOW2]] ], [ false, [[LOOP_HEADER]] ]
74; CHECK-NEXT: [[TMP9:%.*]] = phi i1 [ false, [[FLOW2]] ], [ true, [[LOOP_HEADER]] ]
75; CHECK-NEXT: br i1 [[TMP9]], label [[BB18]], label [[FLOW3]]
76; CHECK: INCREMENT_I:
77; CHECK-NEXT: [[INC_I]] = add i32 [[I]], 1
78; CHECK-NEXT: call void asm sideeffect "s_nop 0x1336
79; CHECK-NEXT: br label [[FLOW1]]
80; CHECK: END_ELSE_BLOCK:
81; CHECK-NEXT: [[I_FINAL:%.*]] = phi i32 [ [[TMP1]], [[FLOW3]] ]
82; CHECK-NEXT: call void asm sideeffect "s_nop 0x1337
83; CHECK-NEXT: [[CMP_END_ELSE_BLOCK:%.*]] = icmp eq i32 [[I_FINAL]], -1
84; CHECK-NEXT: br label [[FLOW4]]
85; CHECK: Flow4:
86; CHECK-NEXT: [[TMP10]] = phi i32 [ [[I_FINAL]], [[END_ELSE_BLOCK]] ], [ undef, [[FLOW3]] ]
87; CHECK-NEXT: [[TMP11:%.*]] = phi i1 [ [[CMP_END_ELSE_BLOCK]], [[END_ELSE_BLOCK]] ], [ true, [[FLOW3]] ]
88; CHECK-NEXT: br i1 [[TMP11]], label [[RETURN:%.*]], label [[LOOP_HEADER]]
89; CHECK: RETURN:
90; CHECK-NEXT: call void asm sideeffect "s_nop 0x99
91; CHECK-NEXT: store volatile <2 x float> [[LOAD1]], ptr addrspace(1) undef, align 8
92; CHECK-NEXT: ret void
93;
94entry:
95%tmp = load volatile <2 x i32>, ptr addrspace(1) undef, align 16
96%load1 = load volatile <2 x float>, ptr addrspace(1) undef
97%tid = call i32 @llvm.amdgcn.workitem.id.x()
98%gep = getelementptr inbounds i32, ptr addrspace(1) %arg0, i32 %tid
99%i.initial = load volatile i32, ptr addrspace(1) %gep, align 4
100br label %LOOP.HEADER
101
102LOOP.HEADER:
103%i = phi i32 [ %i.final, %END_ELSE_BLOCK ], [ %i.initial, %entry ]
104call void asm sideeffect "s_nop 0x100b ; loop $0 ", "r,~{memory}"(i32 %i) #0
105%tmp12 = zext i32 %i to i64
106%tmp13 = getelementptr inbounds <4 x i32>, ptr addrspace(1) null, i64 %tmp12
107%tmp14 = load <4 x i32>, ptr addrspace(1) %tmp13, align 16
108%tmp15 = extractelement <4 x i32> %tmp14, i64 0
109%tmp16 = and i32 %tmp15, 65535
110%tmp17 = icmp eq i32 %tmp16, 1
111br i1 %tmp17, label %bb18, label %bb62
112
113bb18:
114%tmp19 = extractelement <2 x i32> %tmp, i64 0
115%tmp22 = lshr i32 %tmp19, 16
116%tmp24 = urem i32 %tmp22, 52
117%tmp25 = mul nuw nsw i32 %tmp24, 52
118br label %INNER_LOOP
119
120INNER_LOOP:
121%inner.loop.j = phi i32 [ %tmp25, %bb18 ], [ %inner.loop.j.inc, %INNER_LOOP ]
122call void asm sideeffect "; inner loop body", ""() #0
123%inner.loop.j.inc = add nsw i32 %inner.loop.j, 1
124%inner.loop.cmp = icmp eq i32 %inner.loop.j, 0
125br i1 %inner.loop.cmp, label %INNER_LOOP_BREAK, label %INNER_LOOP
126
127INNER_LOOP_BREAK:
128%tmp59 = extractelement <4 x i32> %tmp14, i64 2
129call void asm sideeffect "s_nop 23 ", "~{memory}"() #0
130br label %END_ELSE_BLOCK
131
132bb62:
133%load13 = icmp ult i32 %tmp16, 271
134br i1 %load13, label %bb64, label %INCREMENT_I
135
136bb64:
137call void asm sideeffect "s_nop 42", "~{memory}"() #0
138br label %RETURN
139
140INCREMENT_I:
141%inc.i = add i32 %i, 1
142call void asm sideeffect "s_nop 0x1336 ; increment $0", "v,~{memory}"(i32 %inc.i) #0
143br label %END_ELSE_BLOCK
144
145END_ELSE_BLOCK:
146%i.final = phi i32 [ %tmp59, %INNER_LOOP_BREAK ], [ %inc.i, %INCREMENT_I ]
147call void asm sideeffect "s_nop 0x1337 ; end else block $0", "v,~{memory}"(i32 %i.final) #0
148%cmp.end.else.block = icmp eq i32 %i.final, -1
149br i1 %cmp.end.else.block, label %RETURN, label %LOOP.HEADER
150
151RETURN:
152call void asm sideeffect "s_nop 0x99 ; ClosureEval return", "~{memory}"() #0
153store volatile <2 x float> %load1, ptr addrspace(1) undef, align 8
154ret void
155}
156
157; The same function, except break to return block goes directly to the
158; return, which managed to hide the bug.
159define amdgpu_kernel void @loop_backedge_misidentified_alt(ptr addrspace(1) %arg0) #0 {
160entry:
161%tmp = load volatile <2 x i32>, ptr addrspace(1) undef, align 16
162%load1 = load volatile <2 x float>, ptr addrspace(1) undef
163%tid = call i32 @llvm.amdgcn.workitem.id.x()
164%gep = getelementptr inbounds i32, ptr addrspace(1) %arg0, i32 %tid
165%i.initial = load volatile i32, ptr addrspace(1) %gep, align 4
166br label %LOOP.HEADER
167
168LOOP.HEADER:
169%i = phi i32 [ %i.final, %END_ELSE_BLOCK ], [ %i.initial, %entry ]
170call void asm sideeffect "s_nop 0x100b ; loop $0 ", "r,~{memory}"(i32 %i) #0
171%tmp12 = zext i32 %i to i64
172%tmp13 = getelementptr inbounds <4 x i32>, ptr addrspace(1) null, i64 %tmp12
173%tmp14 = load <4 x i32>, ptr addrspace(1) %tmp13, align 16
174%tmp15 = extractelement <4 x i32> %tmp14, i64 0
175%tmp16 = and i32 %tmp15, 65535
176%tmp17 = icmp eq i32 %tmp16, 1
177br i1 %tmp17, label %bb18, label %bb62
178
179bb18:
180%tmp19 = extractelement <2 x i32> %tmp, i64 0
181%tmp22 = lshr i32 %tmp19, 16
182%tmp24 = urem i32 %tmp22, 52
183%tmp25 = mul nuw nsw i32 %tmp24, 52
184br label %INNER_LOOP
185
186INNER_LOOP:
187%inner.loop.j = phi i32 [ %tmp25, %bb18 ], [ %inner.loop.j.inc, %INNER_LOOP ]
188call void asm sideeffect "; inner loop body", ""() #0
189%inner.loop.j.inc = add nsw i32 %inner.loop.j, 1
190%inner.loop.cmp = icmp eq i32 %inner.loop.j, 0
191br i1 %inner.loop.cmp, label %INNER_LOOP_BREAK, label %INNER_LOOP
192
193INNER_LOOP_BREAK:
194%tmp59 = extractelement <4 x i32> %tmp14, i64 2
195call void asm sideeffect "s_nop 23 ", "~{memory}"() #0
196br label %END_ELSE_BLOCK
197
198bb62:
199%load13 = icmp ult i32 %tmp16, 271
200;br i1 %load13, label %bb64, label %INCREMENT_I
201; branching directly to the return avoids the bug
202br i1 %load13, label %RETURN, label %INCREMENT_I
203
204
205bb64:
206call void asm sideeffect "s_nop 42", "~{memory}"() #0
207br label %RETURN
208
209INCREMENT_I:
210%inc.i = add i32 %i, 1
211call void asm sideeffect "s_nop 0x1336 ; increment $0", "v,~{memory}"(i32 %inc.i) #0
212br label %END_ELSE_BLOCK
213
214END_ELSE_BLOCK:
215%i.final = phi i32 [ %tmp59, %INNER_LOOP_BREAK ], [ %inc.i, %INCREMENT_I ]
216call void asm sideeffect "s_nop 0x1337 ; end else block $0", "v,~{memory}"(i32 %i.final) #0
217%cmp.end.else.block = icmp eq i32 %i.final, -1
218br i1 %cmp.end.else.block, label %RETURN, label %LOOP.HEADER
219
220RETURN:
221call void asm sideeffect "s_nop 0x99 ; ClosureEval return", "~{memory}"() #0
222store volatile <2 x float> %load1, ptr addrspace(1) undef, align 8
223ret void
224}
225
226declare i32 @llvm.amdgcn.workitem.id.x() #1
227
228attributes #0 = { convergent nounwind }
229attributes #1 = { convergent nounwind readnone }
230