llvm-project
3133 строки · 121.2 Кб
1//===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The implementation for the loop memory dependence that was originally
10// developed for the loop vectorizer.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Analysis/LoopAccessAnalysis.h"
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
17#include "llvm/ADT/EquivalenceClasses.h"
18#include "llvm/ADT/PointerIntPair.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
21#include "llvm/ADT/SmallPtrSet.h"
22#include "llvm/ADT/SmallSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/Analysis/AliasAnalysis.h"
25#include "llvm/Analysis/AliasSetTracker.h"
26#include "llvm/Analysis/LoopAnalysisManager.h"
27#include "llvm/Analysis/LoopInfo.h"
28#include "llvm/Analysis/LoopIterator.h"
29#include "llvm/Analysis/MemoryLocation.h"
30#include "llvm/Analysis/OptimizationRemarkEmitter.h"
31#include "llvm/Analysis/ScalarEvolution.h"
32#include "llvm/Analysis/ScalarEvolutionExpressions.h"
33#include "llvm/Analysis/TargetLibraryInfo.h"
34#include "llvm/Analysis/TargetTransformInfo.h"
35#include "llvm/Analysis/ValueTracking.h"
36#include "llvm/Analysis/VectorUtils.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/Constants.h"
39#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/DebugLoc.h"
41#include "llvm/IR/DerivedTypes.h"
42#include "llvm/IR/DiagnosticInfo.h"
43#include "llvm/IR/Dominators.h"
44#include "llvm/IR/Function.h"
45#include "llvm/IR/GetElementPtrTypeIterator.h"
46#include "llvm/IR/InstrTypes.h"
47#include "llvm/IR/Instruction.h"
48#include "llvm/IR/Instructions.h"
49#include "llvm/IR/Operator.h"
50#include "llvm/IR/PassManager.h"
51#include "llvm/IR/PatternMatch.h"
52#include "llvm/IR/Type.h"
53#include "llvm/IR/Value.h"
54#include "llvm/IR/ValueHandle.h"
55#include "llvm/Support/Casting.h"
56#include "llvm/Support/CommandLine.h"
57#include "llvm/Support/Debug.h"
58#include "llvm/Support/ErrorHandling.h"
59#include "llvm/Support/raw_ostream.h"
60#include <algorithm>
61#include <cassert>
62#include <cstdint>
63#include <iterator>
64#include <utility>
65#include <variant>
66#include <vector>
67
68using namespace llvm;
69using namespace llvm::PatternMatch;
70
71#define DEBUG_TYPE "loop-accesses"
72
73static cl::opt<unsigned, true>
74VectorizationFactor("force-vector-width", cl::Hidden,
75cl::desc("Sets the SIMD width. Zero is autoselect."),
76cl::location(VectorizerParams::VectorizationFactor));
77unsigned VectorizerParams::VectorizationFactor;
78
79static cl::opt<unsigned, true>
80VectorizationInterleave("force-vector-interleave", cl::Hidden,
81cl::desc("Sets the vectorization interleave count. "
82"Zero is autoselect."),
83cl::location(
84VectorizerParams::VectorizationInterleave));
85unsigned VectorizerParams::VectorizationInterleave;
86
87static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold(
88"runtime-memory-check-threshold", cl::Hidden,
89cl::desc("When performing memory disambiguation checks at runtime do not "
90"generate more than this number of comparisons (default = 8)."),
91cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8));
92unsigned VectorizerParams::RuntimeMemoryCheckThreshold;
93
94/// The maximum iterations used to merge memory checks
95static cl::opt<unsigned> MemoryCheckMergeThreshold(
96"memory-check-merge-threshold", cl::Hidden,
97cl::desc("Maximum number of comparisons done when trying to merge "
98"runtime memory checks. (default = 100)"),
99cl::init(100));
100
101/// Maximum SIMD width.
102const unsigned VectorizerParams::MaxVectorWidth = 64;
103
104/// We collect dependences up to this threshold.
105static cl::opt<unsigned>
106MaxDependences("max-dependences", cl::Hidden,
107cl::desc("Maximum number of dependences collected by "
108"loop-access analysis (default = 100)"),
109cl::init(100));
110
111/// This enables versioning on the strides of symbolically striding memory
112/// accesses in code like the following.
113/// for (i = 0; i < N; ++i)
114/// A[i * Stride1] += B[i * Stride2] ...
115///
116/// Will be roughly translated to
117/// if (Stride1 == 1 && Stride2 == 1) {
118/// for (i = 0; i < N; i+=4)
119/// A[i:i+3] += ...
120/// } else
121/// ...
122static cl::opt<bool> EnableMemAccessVersioning(
123"enable-mem-access-versioning", cl::init(true), cl::Hidden,
124cl::desc("Enable symbolic stride memory access versioning"));
125
126/// Enable store-to-load forwarding conflict detection. This option can
127/// be disabled for correctness testing.
128static cl::opt<bool> EnableForwardingConflictDetection(
129"store-to-load-forwarding-conflict-detection", cl::Hidden,
130cl::desc("Enable conflict detection in loop-access analysis"),
131cl::init(true));
132
133static cl::opt<unsigned> MaxForkedSCEVDepth(
134"max-forked-scev-depth", cl::Hidden,
135cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),
136cl::init(5));
137
138static cl::opt<bool> SpeculateUnitStride(
139"laa-speculate-unit-stride", cl::Hidden,
140cl::desc("Speculate that non-constant strides are unit in LAA"),
141cl::init(true));
142
143static cl::opt<bool, true> HoistRuntimeChecks(
144"hoist-runtime-checks", cl::Hidden,
145cl::desc(
146"Hoist inner loop runtime memory checks to outer loop if possible"),
147cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(true));
148bool VectorizerParams::HoistRuntimeChecks;
149
150bool VectorizerParams::isInterleaveForced() {
151return ::VectorizationInterleave.getNumOccurrences() > 0;
152}
153
154const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
155const DenseMap<Value *, const SCEV *> &PtrToStride,
156Value *Ptr) {
157const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
158
159// If there is an entry in the map return the SCEV of the pointer with the
160// symbolic stride replaced by one.
161DenseMap<Value *, const SCEV *>::const_iterator SI = PtrToStride.find(Ptr);
162if (SI == PtrToStride.end())
163// For a non-symbolic stride, just return the original expression.
164return OrigSCEV;
165
166const SCEV *StrideSCEV = SI->second;
167// Note: This assert is both overly strong and overly weak. The actual
168// invariant here is that StrideSCEV should be loop invariant. The only
169// such invariant strides we happen to speculate right now are unknowns
170// and thus this is a reasonable proxy of the actual invariant.
171assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
172
173ScalarEvolution *SE = PSE.getSE();
174const auto *CT = SE->getOne(StrideSCEV->getType());
175PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
176auto *Expr = PSE.getSCEV(Ptr);
177
178LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
179<< " by: " << *Expr << "\n");
180return Expr;
181}
182
183RuntimeCheckingPtrGroup::RuntimeCheckingPtrGroup(
184unsigned Index, RuntimePointerChecking &RtCheck)
185: High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
186AddressSpace(RtCheck.Pointers[Index]
187.PointerValue->getType()
188->getPointerAddressSpace()),
189NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
190Members.push_back(Index);
191}
192
193/// Calculate Start and End points of memory access.
194/// Let's assume A is the first access and B is a memory access on N-th loop
195/// iteration. Then B is calculated as:
196/// B = A + Step*N .
197/// Step value may be positive or negative.
198/// N is a calculated back-edge taken count:
199/// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
200/// Start and End points are calculated in the following way:
201/// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
202/// where SizeOfElt is the size of single memory access in bytes.
203///
204/// There is no conflict when the intervals are disjoint:
205/// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
206static std::pair<const SCEV *, const SCEV *> getStartAndEndForAccess(
207const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy,
208PredicatedScalarEvolution &PSE,
209DenseMap<const SCEV *, std::pair<const SCEV *, const SCEV *>>
210&PointerBounds) {
211ScalarEvolution *SE = PSE.getSE();
212
213auto [Iter, Ins] = PointerBounds.insert(
214{PtrExpr, {SE->getCouldNotCompute(), SE->getCouldNotCompute()}});
215if (!Ins)
216return Iter->second;
217
218const SCEV *ScStart;
219const SCEV *ScEnd;
220
221if (SE->isLoopInvariant(PtrExpr, Lp)) {
222ScStart = ScEnd = PtrExpr;
223} else if (auto *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr)) {
224const SCEV *Ex = PSE.getSymbolicMaxBackedgeTakenCount();
225
226ScStart = AR->getStart();
227ScEnd = AR->evaluateAtIteration(Ex, *SE);
228const SCEV *Step = AR->getStepRecurrence(*SE);
229
230// For expressions with negative step, the upper bound is ScStart and the
231// lower bound is ScEnd.
232if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
233if (CStep->getValue()->isNegative())
234std::swap(ScStart, ScEnd);
235} else {
236// Fallback case: the step is not constant, but we can still
237// get the upper and lower bounds of the interval by using min/max
238// expressions.
239ScStart = SE->getUMinExpr(ScStart, ScEnd);
240ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
241}
242} else
243return {SE->getCouldNotCompute(), SE->getCouldNotCompute()};
244
245assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
246assert(SE->isLoopInvariant(ScEnd, Lp)&& "ScEnd needs to be invariant");
247
248// Add the size of the pointed element to ScEnd.
249auto &DL = Lp->getHeader()->getDataLayout();
250Type *IdxTy = DL.getIndexType(PtrExpr->getType());
251const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
252ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
253
254Iter->second = {ScStart, ScEnd};
255return Iter->second;
256}
257
258/// Calculate Start and End points of memory access using
259/// getStartAndEndForAccess.
260void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr,
261Type *AccessTy, bool WritePtr,
262unsigned DepSetId, unsigned ASId,
263PredicatedScalarEvolution &PSE,
264bool NeedsFreeze) {
265const auto &[ScStart, ScEnd] = getStartAndEndForAccess(
266Lp, PtrExpr, AccessTy, PSE, DC.getPointerBounds());
267assert(!isa<SCEVCouldNotCompute>(ScStart) &&
268!isa<SCEVCouldNotCompute>(ScEnd) &&
269"must be able to compute both start and end expressions");
270Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
271NeedsFreeze);
272}
273
274bool RuntimePointerChecking::tryToCreateDiffCheck(
275const RuntimeCheckingPtrGroup &CGI, const RuntimeCheckingPtrGroup &CGJ) {
276// If either group contains multiple different pointers, bail out.
277// TODO: Support multiple pointers by using the minimum or maximum pointer,
278// depending on src & sink.
279if (CGI.Members.size() != 1 || CGJ.Members.size() != 1)
280return false;
281
282PointerInfo *Src = &Pointers[CGI.Members[0]];
283PointerInfo *Sink = &Pointers[CGJ.Members[0]];
284
285// If either pointer is read and written, multiple checks may be needed. Bail
286// out.
287if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
288!DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty())
289return false;
290
291ArrayRef<unsigned> AccSrc =
292DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
293ArrayRef<unsigned> AccSink =
294DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
295// If either pointer is accessed multiple times, there may not be a clear
296// src/sink relation. Bail out for now.
297if (AccSrc.size() != 1 || AccSink.size() != 1)
298return false;
299
300// If the sink is accessed before src, swap src/sink.
301if (AccSink[0] < AccSrc[0])
302std::swap(Src, Sink);
303
304auto *SrcAR = dyn_cast<SCEVAddRecExpr>(Src->Expr);
305auto *SinkAR = dyn_cast<SCEVAddRecExpr>(Sink->Expr);
306if (!SrcAR || !SinkAR || SrcAR->getLoop() != DC.getInnermostLoop() ||
307SinkAR->getLoop() != DC.getInnermostLoop())
308return false;
309
310SmallVector<Instruction *, 4> SrcInsts =
311DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
312SmallVector<Instruction *, 4> SinkInsts =
313DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
314Type *SrcTy = getLoadStoreType(SrcInsts[0]);
315Type *DstTy = getLoadStoreType(SinkInsts[0]);
316if (isa<ScalableVectorType>(SrcTy) || isa<ScalableVectorType>(DstTy))
317return false;
318
319const DataLayout &DL =
320SinkAR->getLoop()->getHeader()->getDataLayout();
321unsigned AllocSize =
322std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));
323
324// Only matching constant steps matching the AllocSize are supported at the
325// moment. This simplifies the difference computation. Can be extended in the
326// future.
327auto *Step = dyn_cast<SCEVConstant>(SinkAR->getStepRecurrence(*SE));
328if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||
329Step->getAPInt().abs() != AllocSize)
330return false;
331
332IntegerType *IntTy =
333IntegerType::get(Src->PointerValue->getContext(),
334DL.getPointerSizeInBits(CGI.AddressSpace));
335
336// When counting down, the dependence distance needs to be swapped.
337if (Step->getValue()->isNegative())
338std::swap(SinkAR, SrcAR);
339
340const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkAR->getStart(), IntTy);
341const SCEV *SrcStartInt = SE->getPtrToIntExpr(SrcAR->getStart(), IntTy);
342if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
343isa<SCEVCouldNotCompute>(SrcStartInt))
344return false;
345
346const Loop *InnerLoop = SrcAR->getLoop();
347// If the start values for both Src and Sink also vary according to an outer
348// loop, then it's probably better to avoid creating diff checks because
349// they may not be hoisted. We should instead let llvm::addRuntimeChecks
350// do the expanded full range overlap checks, which can be hoisted.
351if (HoistRuntimeChecks && InnerLoop->getParentLoop() &&
352isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
353auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
354auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
355const Loop *StartARLoop = SrcStartAR->getLoop();
356if (StartARLoop == SinkStartAR->getLoop() &&
357StartARLoop == InnerLoop->getParentLoop() &&
358// If the diff check would already be loop invariant (due to the
359// recurrences being the same), then we prefer to keep the diff checks
360// because they are cheaper.
361SrcStartAR->getStepRecurrence(*SE) !=
362SinkStartAR->getStepRecurrence(*SE)) {
363LLVM_DEBUG(dbgs() << "LAA: Not creating diff runtime check, since these "
364"cannot be hoisted out of the outer loop\n");
365return false;
366}
367}
368
369LLVM_DEBUG(dbgs() << "LAA: Creating diff runtime check for:\n"
370<< "SrcStart: " << *SrcStartInt << '\n'
371<< "SinkStartInt: " << *SinkStartInt << '\n');
372DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
373Src->NeedsFreeze || Sink->NeedsFreeze);
374return true;
375}
376
377SmallVector<RuntimePointerCheck, 4> RuntimePointerChecking::generateChecks() {
378SmallVector<RuntimePointerCheck, 4> Checks;
379
380for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
381for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
382const RuntimeCheckingPtrGroup &CGI = CheckingGroups[I];
383const RuntimeCheckingPtrGroup &CGJ = CheckingGroups[J];
384
385if (needsChecking(CGI, CGJ)) {
386CanUseDiffCheck = CanUseDiffCheck && tryToCreateDiffCheck(CGI, CGJ);
387Checks.push_back(std::make_pair(&CGI, &CGJ));
388}
389}
390}
391return Checks;
392}
393
394void RuntimePointerChecking::generateChecks(
395MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
396assert(Checks.empty() && "Checks is not empty");
397groupChecks(DepCands, UseDependencies);
398Checks = generateChecks();
399}
400
401bool RuntimePointerChecking::needsChecking(
402const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
403for (const auto &I : M.Members)
404for (const auto &J : N.Members)
405if (needsChecking(I, J))
406return true;
407return false;
408}
409
410/// Compare \p I and \p J and return the minimum.
411/// Return nullptr in case we couldn't find an answer.
412static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
413ScalarEvolution *SE) {
414const SCEV *Diff = SE->getMinusSCEV(J, I);
415const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
416
417if (!C)
418return nullptr;
419return C->getValue()->isNegative() ? J : I;
420}
421
422bool RuntimeCheckingPtrGroup::addPointer(unsigned Index,
423RuntimePointerChecking &RtCheck) {
424return addPointer(
425Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
426RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
427RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);
428}
429
430bool RuntimeCheckingPtrGroup::addPointer(unsigned Index, const SCEV *Start,
431const SCEV *End, unsigned AS,
432bool NeedsFreeze,
433ScalarEvolution &SE) {
434assert(AddressSpace == AS &&
435"all pointers in a checking group must be in the same address space");
436
437// Compare the starts and ends with the known minimum and maximum
438// of this set. We need to know how we compare against the min/max
439// of the set in order to be able to emit memchecks.
440const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
441if (!Min0)
442return false;
443
444const SCEV *Min1 = getMinFromExprs(End, High, &SE);
445if (!Min1)
446return false;
447
448// Update the low bound expression if we've found a new min value.
449if (Min0 == Start)
450Low = Start;
451
452// Update the high bound expression if we've found a new max value.
453if (Min1 != End)
454High = End;
455
456Members.push_back(Index);
457this->NeedsFreeze |= NeedsFreeze;
458return true;
459}
460
461void RuntimePointerChecking::groupChecks(
462MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
463// We build the groups from dependency candidates equivalence classes
464// because:
465// - We know that pointers in the same equivalence class share
466// the same underlying object and therefore there is a chance
467// that we can compare pointers
468// - We wouldn't be able to merge two pointers for which we need
469// to emit a memcheck. The classes in DepCands are already
470// conveniently built such that no two pointers in the same
471// class need checking against each other.
472
473// We use the following (greedy) algorithm to construct the groups
474// For every pointer in the equivalence class:
475// For each existing group:
476// - if the difference between this pointer and the min/max bounds
477// of the group is a constant, then make the pointer part of the
478// group and update the min/max bounds of that group as required.
479
480CheckingGroups.clear();
481
482// If we need to check two pointers to the same underlying object
483// with a non-constant difference, we shouldn't perform any pointer
484// grouping with those pointers. This is because we can easily get
485// into cases where the resulting check would return false, even when
486// the accesses are safe.
487//
488// The following example shows this:
489// for (i = 0; i < 1000; ++i)
490// a[5000 + i * m] = a[i] + a[i + 9000]
491//
492// Here grouping gives a check of (5000, 5000 + 1000 * m) against
493// (0, 10000) which is always false. However, if m is 1, there is no
494// dependence. Not grouping the checks for a[i] and a[i + 9000] allows
495// us to perform an accurate check in this case.
496//
497// The above case requires that we have an UnknownDependence between
498// accesses to the same underlying object. This cannot happen unless
499// FoundNonConstantDistanceDependence is set, and therefore UseDependencies
500// is also false. In this case we will use the fallback path and create
501// separate checking groups for all pointers.
502
503// If we don't have the dependency partitions, construct a new
504// checking pointer group for each pointer. This is also required
505// for correctness, because in this case we can have checking between
506// pointers to the same underlying object.
507if (!UseDependencies) {
508for (unsigned I = 0; I < Pointers.size(); ++I)
509CheckingGroups.push_back(RuntimeCheckingPtrGroup(I, *this));
510return;
511}
512
513unsigned TotalComparisons = 0;
514
515DenseMap<Value *, SmallVector<unsigned>> PositionMap;
516for (unsigned Index = 0; Index < Pointers.size(); ++Index) {
517auto [It, _] = PositionMap.insert({Pointers[Index].PointerValue, {}});
518It->second.push_back(Index);
519}
520
521// We need to keep track of what pointers we've already seen so we
522// don't process them twice.
523SmallSet<unsigned, 2> Seen;
524
525// Go through all equivalence classes, get the "pointer check groups"
526// and add them to the overall solution. We use the order in which accesses
527// appear in 'Pointers' to enforce determinism.
528for (unsigned I = 0; I < Pointers.size(); ++I) {
529// We've seen this pointer before, and therefore already processed
530// its equivalence class.
531if (Seen.count(I))
532continue;
533
534MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
535Pointers[I].IsWritePtr);
536
537SmallVector<RuntimeCheckingPtrGroup, 2> Groups;
538auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
539
540// Because DepCands is constructed by visiting accesses in the order in
541// which they appear in alias sets (which is deterministic) and the
542// iteration order within an equivalence class member is only dependent on
543// the order in which unions and insertions are performed on the
544// equivalence class, the iteration order is deterministic.
545for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
546MI != ME; ++MI) {
547auto PointerI = PositionMap.find(MI->getPointer());
548assert(PointerI != PositionMap.end() &&
549"pointer in equivalence class not found in PositionMap");
550for (unsigned Pointer : PointerI->second) {
551bool Merged = false;
552// Mark this pointer as seen.
553Seen.insert(Pointer);
554
555// Go through all the existing sets and see if we can find one
556// which can include this pointer.
557for (RuntimeCheckingPtrGroup &Group : Groups) {
558// Don't perform more than a certain amount of comparisons.
559// This should limit the cost of grouping the pointers to something
560// reasonable. If we do end up hitting this threshold, the algorithm
561// will create separate groups for all remaining pointers.
562if (TotalComparisons > MemoryCheckMergeThreshold)
563break;
564
565TotalComparisons++;
566
567if (Group.addPointer(Pointer, *this)) {
568Merged = true;
569break;
570}
571}
572
573if (!Merged)
574// We couldn't add this pointer to any existing set or the threshold
575// for the number of comparisons has been reached. Create a new group
576// to hold the current pointer.
577Groups.push_back(RuntimeCheckingPtrGroup(Pointer, *this));
578}
579}
580
581// We've computed the grouped checks for this partition.
582// Save the results and continue with the next one.
583llvm::copy(Groups, std::back_inserter(CheckingGroups));
584}
585}
586
587bool RuntimePointerChecking::arePointersInSamePartition(
588const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
589unsigned PtrIdx2) {
590return (PtrToPartition[PtrIdx1] != -1 &&
591PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
592}
593
594bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
595const PointerInfo &PointerI = Pointers[I];
596const PointerInfo &PointerJ = Pointers[J];
597
598// No need to check if two readonly pointers intersect.
599if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
600return false;
601
602// Only need to check pointers between two different dependency sets.
603if (PointerI.DependencySetId == PointerJ.DependencySetId)
604return false;
605
606// Only need to check pointers in the same alias set.
607if (PointerI.AliasSetId != PointerJ.AliasSetId)
608return false;
609
610return true;
611}
612
613void RuntimePointerChecking::printChecks(
614raw_ostream &OS, const SmallVectorImpl<RuntimePointerCheck> &Checks,
615unsigned Depth) const {
616unsigned N = 0;
617for (const auto &[Check1, Check2] : Checks) {
618const auto &First = Check1->Members, &Second = Check2->Members;
619
620OS.indent(Depth) << "Check " << N++ << ":\n";
621
622OS.indent(Depth + 2) << "Comparing group (" << Check1 << "):\n";
623for (unsigned K : First)
624OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
625
626OS.indent(Depth + 2) << "Against group (" << Check2 << "):\n";
627for (unsigned K : Second)
628OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
629}
630}
631
632void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const {
633
634OS.indent(Depth) << "Run-time memory checks:\n";
635printChecks(OS, Checks, Depth);
636
637OS.indent(Depth) << "Grouped accesses:\n";
638for (const auto &CG : CheckingGroups) {
639OS.indent(Depth + 2) << "Group " << &CG << ":\n";
640OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
641<< ")\n";
642for (unsigned Member : CG.Members) {
643OS.indent(Depth + 6) << "Member: " << *Pointers[Member].Expr << "\n";
644}
645}
646}
647
648namespace {
649
650/// Analyses memory accesses in a loop.
651///
652/// Checks whether run time pointer checks are needed and builds sets for data
653/// dependence checking.
654class AccessAnalysis {
655public:
656/// Read or write access location.
657typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
658typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
659
660AccessAnalysis(Loop *TheLoop, AAResults *AA, LoopInfo *LI,
661MemoryDepChecker::DepCandidates &DA,
662PredicatedScalarEvolution &PSE,
663SmallPtrSetImpl<MDNode *> &LoopAliasScopes)
664: TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE),
665LoopAliasScopes(LoopAliasScopes) {
666// We're analyzing dependences across loop iterations.
667BAA.enableCrossIterationMode();
668}
669
670/// Register a load and whether it is only read from.
671void addLoad(MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
672Value *Ptr = const_cast<Value *>(Loc.Ptr);
673AST.add(adjustLoc(Loc));
674Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
675if (IsReadOnly)
676ReadOnlyPtr.insert(Ptr);
677}
678
679/// Register a store.
680void addStore(MemoryLocation &Loc, Type *AccessTy) {
681Value *Ptr = const_cast<Value *>(Loc.Ptr);
682AST.add(adjustLoc(Loc));
683Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
684}
685
686/// Check if we can emit a run-time no-alias check for \p Access.
687///
688/// Returns true if we can emit a run-time no alias check for \p Access.
689/// If we can check this access, this also adds it to a dependence set and
690/// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
691/// we will attempt to use additional run-time checks in order to get
692/// the bounds of the pointer.
693bool createCheckForAccess(RuntimePointerChecking &RtCheck,
694MemAccessInfo Access, Type *AccessTy,
695const DenseMap<Value *, const SCEV *> &Strides,
696DenseMap<Value *, unsigned> &DepSetId,
697Loop *TheLoop, unsigned &RunningDepId,
698unsigned ASId, bool ShouldCheckStride, bool Assume);
699
700/// Check whether we can check the pointers at runtime for
701/// non-intersection.
702///
703/// Returns true if we need no check or if we do and we can generate them
704/// (i.e. the pointers have computable bounds).
705bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
706Loop *TheLoop, const DenseMap<Value *, const SCEV *> &Strides,
707Value *&UncomputablePtr, bool ShouldCheckWrap = false);
708
709/// Goes over all memory accesses, checks whether a RT check is needed
710/// and builds sets of dependent accesses.
711void buildDependenceSets() {
712processMemAccesses();
713}
714
715/// Initial processing of memory accesses determined that we need to
716/// perform dependency checking.
717///
718/// Note that this can later be cleared if we retry memcheck analysis without
719/// dependency checking (i.e. FoundNonConstantDistanceDependence).
720bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
721
722/// We decided that no dependence analysis would be used. Reset the state.
723void resetDepChecks(MemoryDepChecker &DepChecker) {
724CheckDeps.clear();
725DepChecker.clearDependences();
726}
727
728MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; }
729
730const DenseMap<Value *, SmallVector<const Value *, 16>> &
731getUnderlyingObjects() {
732return UnderlyingObjects;
733}
734
735private:
736typedef MapVector<MemAccessInfo, SmallSetVector<Type *, 1>> PtrAccessMap;
737
738/// Adjust the MemoryLocation so that it represents accesses to this
739/// location across all iterations, rather than a single one.
740MemoryLocation adjustLoc(MemoryLocation Loc) const {
741// The accessed location varies within the loop, but remains within the
742// underlying object.
743Loc.Size = LocationSize::beforeOrAfterPointer();
744Loc.AATags.Scope = adjustAliasScopeList(Loc.AATags.Scope);
745Loc.AATags.NoAlias = adjustAliasScopeList(Loc.AATags.NoAlias);
746return Loc;
747}
748
749/// Drop alias scopes that are only valid within a single loop iteration.
750MDNode *adjustAliasScopeList(MDNode *ScopeList) const {
751if (!ScopeList)
752return nullptr;
753
754// For the sake of simplicity, drop the whole scope list if any scope is
755// iteration-local.
756if (any_of(ScopeList->operands(), [&](Metadata *Scope) {
757return LoopAliasScopes.contains(cast<MDNode>(Scope));
758}))
759return nullptr;
760
761return ScopeList;
762}
763
764/// Go over all memory access and check whether runtime pointer checks
765/// are needed and build sets of dependency check candidates.
766void processMemAccesses();
767
768/// Map of all accesses. Values are the types used to access memory pointed to
769/// by the pointer.
770PtrAccessMap Accesses;
771
772/// The loop being checked.
773const Loop *TheLoop;
774
775/// List of accesses that need a further dependence check.
776MemAccessInfoList CheckDeps;
777
778/// Set of pointers that are read only.
779SmallPtrSet<Value*, 16> ReadOnlyPtr;
780
781/// Batched alias analysis results.
782BatchAAResults BAA;
783
784/// An alias set tracker to partition the access set by underlying object and
785//intrinsic property (such as TBAA metadata).
786AliasSetTracker AST;
787
788LoopInfo *LI;
789
790/// Sets of potentially dependent accesses - members of one set share an
791/// underlying pointer. The set "CheckDeps" identfies which sets really need a
792/// dependence check.
793MemoryDepChecker::DepCandidates &DepCands;
794
795/// Initial processing of memory accesses determined that we may need
796/// to add memchecks. Perform the analysis to determine the necessary checks.
797///
798/// Note that, this is different from isDependencyCheckNeeded. When we retry
799/// memcheck analysis without dependency checking
800/// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
801/// cleared while this remains set if we have potentially dependent accesses.
802bool IsRTCheckAnalysisNeeded = false;
803
804/// The SCEV predicate containing all the SCEV-related assumptions.
805PredicatedScalarEvolution &PSE;
806
807DenseMap<Value *, SmallVector<const Value *, 16>> UnderlyingObjects;
808
809/// Alias scopes that are declared inside the loop, and as such not valid
810/// across iterations.
811SmallPtrSetImpl<MDNode *> &LoopAliasScopes;
812};
813
814} // end anonymous namespace
815
816/// Check whether a pointer can participate in a runtime bounds check.
817/// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
818/// by adding run-time checks (overflow checks) if necessary.
819static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr,
820const SCEV *PtrScev, Loop *L, bool Assume) {
821// The bounds for loop-invariant pointer is trivial.
822if (PSE.getSE()->isLoopInvariant(PtrScev, L))
823return true;
824
825const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
826
827if (!AR && Assume)
828AR = PSE.getAsAddRec(Ptr);
829
830if (!AR)
831return false;
832
833return AR->isAffine();
834}
835
836/// Check whether a pointer address cannot wrap.
837static bool isNoWrap(PredicatedScalarEvolution &PSE,
838const DenseMap<Value *, const SCEV *> &Strides, Value *Ptr, Type *AccessTy,
839Loop *L) {
840const SCEV *PtrScev = PSE.getSCEV(Ptr);
841if (PSE.getSE()->isLoopInvariant(PtrScev, L))
842return true;
843
844int64_t Stride = getPtrStride(PSE, AccessTy, Ptr, L, Strides).value_or(0);
845if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
846return true;
847
848return false;
849}
850
851static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
852function_ref<void(Value *)> AddPointer) {
853SmallPtrSet<Value *, 8> Visited;
854SmallVector<Value *> WorkList;
855WorkList.push_back(StartPtr);
856
857while (!WorkList.empty()) {
858Value *Ptr = WorkList.pop_back_val();
859if (!Visited.insert(Ptr).second)
860continue;
861auto *PN = dyn_cast<PHINode>(Ptr);
862// SCEV does not look through non-header PHIs inside the loop. Such phis
863// can be analyzed by adding separate accesses for each incoming pointer
864// value.
865if (PN && InnermostLoop.contains(PN->getParent()) &&
866PN->getParent() != InnermostLoop.getHeader()) {
867for (const Use &Inc : PN->incoming_values())
868WorkList.push_back(Inc);
869} else
870AddPointer(Ptr);
871}
872}
873
874// Walk back through the IR for a pointer, looking for a select like the
875// following:
876//
877// %offset = select i1 %cmp, i64 %a, i64 %b
878// %addr = getelementptr double, double* %base, i64 %offset
879// %ld = load double, double* %addr, align 8
880//
881// We won't be able to form a single SCEVAddRecExpr from this since the
882// address for each loop iteration depends on %cmp. We could potentially
883// produce multiple valid SCEVAddRecExprs, though, and check all of them for
884// memory safety/aliasing if needed.
885//
886// If we encounter some IR we don't yet handle, or something obviously fine
887// like a constant, then we just add the SCEV for that term to the list passed
888// in by the caller. If we have a node that may potentially yield a valid
889// SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
890// ourselves before adding to the list.
891static void findForkedSCEVs(
892ScalarEvolution *SE, const Loop *L, Value *Ptr,
893SmallVectorImpl<PointerIntPair<const SCEV *, 1, bool>> &ScevList,
894unsigned Depth) {
895// If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
896// we've exceeded our limit on recursion, just return whatever we have
897// regardless of whether it can be used for a forked pointer or not, along
898// with an indication of whether it might be a poison or undef value.
899const SCEV *Scev = SE->getSCEV(Ptr);
900if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
901!isa<Instruction>(Ptr) || Depth == 0) {
902ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
903return;
904}
905
906Depth--;
907
908auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
909return get<1>(S);
910};
911
912auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
913switch (Opcode) {
914case Instruction::Add:
915return SE->getAddExpr(L, R);
916case Instruction::Sub:
917return SE->getMinusSCEV(L, R);
918default:
919llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");
920}
921};
922
923Instruction *I = cast<Instruction>(Ptr);
924unsigned Opcode = I->getOpcode();
925switch (Opcode) {
926case Instruction::GetElementPtr: {
927GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
928Type *SourceTy = GEP->getSourceElementType();
929// We only handle base + single offset GEPs here for now.
930// Not dealing with preexisting gathers yet, so no vectors.
931if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
932ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
933break;
934}
935SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> BaseScevs;
936SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> OffsetScevs;
937findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
938findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
939
940// See if we need to freeze our fork...
941bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||
942any_of(OffsetScevs, UndefPoisonCheck);
943
944// Check that we only have a single fork, on either the base or the offset.
945// Copy the SCEV across for the one without a fork in order to generate
946// the full SCEV for both sides of the GEP.
947if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)
948BaseScevs.push_back(BaseScevs[0]);
949else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
950OffsetScevs.push_back(OffsetScevs[0]);
951else {
952ScevList.emplace_back(Scev, NeedsFreeze);
953break;
954}
955
956// Find the pointer type we need to extend to.
957Type *IntPtrTy = SE->getEffectiveSCEVType(
958SE->getSCEV(GEP->getPointerOperand())->getType());
959
960// Find the size of the type being pointed to. We only have a single
961// index term (guarded above) so we don't need to index into arrays or
962// structures, just get the size of the scalar value.
963const SCEV *Size = SE->getSizeOfExpr(IntPtrTy, SourceTy);
964
965// Scale up the offsets by the size of the type, then add to the bases.
966const SCEV *Scaled1 = SE->getMulExpr(
967Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[0]), IntPtrTy));
968const SCEV *Scaled2 = SE->getMulExpr(
969Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[1]), IntPtrTy));
970ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[0]), Scaled1),
971NeedsFreeze);
972ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[1]), Scaled2),
973NeedsFreeze);
974break;
975}
976case Instruction::Select: {
977SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> ChildScevs;
978// A select means we've found a forked pointer, but we currently only
979// support a single select per pointer so if there's another behind this
980// then we just bail out and return the generic SCEV.
981findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
982findForkedSCEVs(SE, L, I->getOperand(2), ChildScevs, Depth);
983if (ChildScevs.size() == 2) {
984ScevList.push_back(ChildScevs[0]);
985ScevList.push_back(ChildScevs[1]);
986} else
987ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
988break;
989}
990case Instruction::PHI: {
991SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> ChildScevs;
992// A phi means we've found a forked pointer, but we currently only
993// support a single phi per pointer so if there's another behind this
994// then we just bail out and return the generic SCEV.
995if (I->getNumOperands() == 2) {
996findForkedSCEVs(SE, L, I->getOperand(0), ChildScevs, Depth);
997findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
998}
999if (ChildScevs.size() == 2) {
1000ScevList.push_back(ChildScevs[0]);
1001ScevList.push_back(ChildScevs[1]);
1002} else
1003ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1004break;
1005}
1006case Instruction::Add:
1007case Instruction::Sub: {
1008SmallVector<PointerIntPair<const SCEV *, 1, bool>> LScevs;
1009SmallVector<PointerIntPair<const SCEV *, 1, bool>> RScevs;
1010findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
1011findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
1012
1013// See if we need to freeze our fork...
1014bool NeedsFreeze =
1015any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);
1016
1017// Check that we only have a single fork, on either the left or right side.
1018// Copy the SCEV across for the one without a fork in order to generate
1019// the full SCEV for both sides of the BinOp.
1020if (LScevs.size() == 2 && RScevs.size() == 1)
1021RScevs.push_back(RScevs[0]);
1022else if (RScevs.size() == 2 && LScevs.size() == 1)
1023LScevs.push_back(LScevs[0]);
1024else {
1025ScevList.emplace_back(Scev, NeedsFreeze);
1026break;
1027}
1028
1029ScevList.emplace_back(
1030GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),
1031NeedsFreeze);
1032ScevList.emplace_back(
1033GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),
1034NeedsFreeze);
1035break;
1036}
1037default:
1038// Just return the current SCEV if we haven't handled the instruction yet.
1039LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
1040ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1041break;
1042}
1043}
1044
1045static SmallVector<PointerIntPair<const SCEV *, 1, bool>>
1046findForkedPointer(PredicatedScalarEvolution &PSE,
1047const DenseMap<Value *, const SCEV *> &StridesMap, Value *Ptr,
1048const Loop *L) {
1049ScalarEvolution *SE = PSE.getSE();
1050assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
1051SmallVector<PointerIntPair<const SCEV *, 1, bool>> Scevs;
1052findForkedSCEVs(SE, L, Ptr, Scevs, MaxForkedSCEVDepth);
1053
1054// For now, we will only accept a forked pointer with two possible SCEVs
1055// that are either SCEVAddRecExprs or loop invariant.
1056if (Scevs.size() == 2 &&
1057(isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
1058SE->isLoopInvariant(get<0>(Scevs[0]), L)) &&
1059(isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
1060SE->isLoopInvariant(get<0>(Scevs[1]), L))) {
1061LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n");
1062LLVM_DEBUG(dbgs() << "\t(1) " << *get<0>(Scevs[0]) << "\n");
1063LLVM_DEBUG(dbgs() << "\t(2) " << *get<0>(Scevs[1]) << "\n");
1064return Scevs;
1065}
1066
1067return {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
1068}
1069
1070bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
1071MemAccessInfo Access, Type *AccessTy,
1072const DenseMap<Value *, const SCEV *> &StridesMap,
1073DenseMap<Value *, unsigned> &DepSetId,
1074Loop *TheLoop, unsigned &RunningDepId,
1075unsigned ASId, bool ShouldCheckWrap,
1076bool Assume) {
1077Value *Ptr = Access.getPointer();
1078
1079SmallVector<PointerIntPair<const SCEV *, 1, bool>> TranslatedPtrs =
1080findForkedPointer(PSE, StridesMap, Ptr, TheLoop);
1081
1082for (auto &P : TranslatedPtrs) {
1083const SCEV *PtrExpr = get<0>(P);
1084if (!hasComputableBounds(PSE, Ptr, PtrExpr, TheLoop, Assume))
1085return false;
1086
1087// When we run after a failing dependency check we have to make sure
1088// we don't have wrapping pointers.
1089if (ShouldCheckWrap) {
1090// Skip wrap checking when translating pointers.
1091if (TranslatedPtrs.size() > 1)
1092return false;
1093
1094if (!isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop)) {
1095auto *Expr = PSE.getSCEV(Ptr);
1096if (!Assume || !isa<SCEVAddRecExpr>(Expr))
1097return false;
1098PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1099}
1100}
1101// If there's only one option for Ptr, look it up after bounds and wrap
1102// checking, because assumptions might have been added to PSE.
1103if (TranslatedPtrs.size() == 1)
1104TranslatedPtrs[0] = {replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr),
1105false};
1106}
1107
1108for (auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
1109// The id of the dependence set.
1110unsigned DepId;
1111
1112if (isDependencyCheckNeeded()) {
1113Value *Leader = DepCands.getLeaderValue(Access).getPointer();
1114unsigned &LeaderId = DepSetId[Leader];
1115if (!LeaderId)
1116LeaderId = RunningDepId++;
1117DepId = LeaderId;
1118} else
1119// Each access has its own dependence set.
1120DepId = RunningDepId++;
1121
1122bool IsWrite = Access.getInt();
1123RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1124NeedsFreeze);
1125LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
1126}
1127
1128return true;
1129}
1130
1131bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
1132ScalarEvolution *SE, Loop *TheLoop,
1133const DenseMap<Value *, const SCEV *> &StridesMap,
1134Value *&UncomputablePtr, bool ShouldCheckWrap) {
1135// Find pointers with computable bounds. We are going to use this information
1136// to place a runtime bound check.
1137bool CanDoRT = true;
1138
1139bool MayNeedRTCheck = false;
1140if (!IsRTCheckAnalysisNeeded) return true;
1141
1142bool IsDepCheckNeeded = isDependencyCheckNeeded();
1143
1144// We assign a consecutive id to access from different alias sets.
1145// Accesses between different groups doesn't need to be checked.
1146unsigned ASId = 0;
1147for (auto &AS : AST) {
1148int NumReadPtrChecks = 0;
1149int NumWritePtrChecks = 0;
1150bool CanDoAliasSetRT = true;
1151++ASId;
1152auto ASPointers = AS.getPointers();
1153
1154// We assign consecutive id to access from different dependence sets.
1155// Accesses within the same set don't need a runtime check.
1156unsigned RunningDepId = 1;
1157DenseMap<Value *, unsigned> DepSetId;
1158
1159SmallVector<std::pair<MemAccessInfo, Type *>, 4> Retries;
1160
1161// First, count how many write and read accesses are in the alias set. Also
1162// collect MemAccessInfos for later.
1163SmallVector<MemAccessInfo, 4> AccessInfos;
1164for (const Value *ConstPtr : ASPointers) {
1165Value *Ptr = const_cast<Value *>(ConstPtr);
1166bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
1167if (IsWrite)
1168++NumWritePtrChecks;
1169else
1170++NumReadPtrChecks;
1171AccessInfos.emplace_back(Ptr, IsWrite);
1172}
1173
1174// We do not need runtime checks for this alias set, if there are no writes
1175// or a single write and no reads.
1176if (NumWritePtrChecks == 0 ||
1177(NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1178assert((ASPointers.size() <= 1 ||
1179all_of(ASPointers,
1180[this](const Value *Ptr) {
1181MemAccessInfo AccessWrite(const_cast<Value *>(Ptr),
1182true);
1183return DepCands.findValue(AccessWrite) == DepCands.end();
1184})) &&
1185"Can only skip updating CanDoRT below, if all entries in AS "
1186"are reads or there is at most 1 entry");
1187continue;
1188}
1189
1190for (auto &Access : AccessInfos) {
1191for (const auto &AccessTy : Accesses[Access]) {
1192if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1193DepSetId, TheLoop, RunningDepId, ASId,
1194ShouldCheckWrap, false)) {
1195LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
1196<< *Access.getPointer() << '\n');
1197Retries.push_back({Access, AccessTy});
1198CanDoAliasSetRT = false;
1199}
1200}
1201}
1202
1203// Note that this function computes CanDoRT and MayNeedRTCheck
1204// independently. For example CanDoRT=false, MayNeedRTCheck=false means that
1205// we have a pointer for which we couldn't find the bounds but we don't
1206// actually need to emit any checks so it does not matter.
1207//
1208// We need runtime checks for this alias set, if there are at least 2
1209// dependence sets (in which case RunningDepId > 2) or if we need to re-try
1210// any bound checks (because in that case the number of dependence sets is
1211// incomplete).
1212bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
1213
1214// We need to perform run-time alias checks, but some pointers had bounds
1215// that couldn't be checked.
1216if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1217// Reset the CanDoSetRt flag and retry all accesses that have failed.
1218// We know that we need these checks, so we can now be more aggressive
1219// and add further checks if required (overflow checks).
1220CanDoAliasSetRT = true;
1221for (const auto &[Access, AccessTy] : Retries) {
1222if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1223DepSetId, TheLoop, RunningDepId, ASId,
1224ShouldCheckWrap, /*Assume=*/true)) {
1225CanDoAliasSetRT = false;
1226UncomputablePtr = Access.getPointer();
1227break;
1228}
1229}
1230}
1231
1232CanDoRT &= CanDoAliasSetRT;
1233MayNeedRTCheck |= NeedsAliasSetRTCheck;
1234++ASId;
1235}
1236
1237// If the pointers that we would use for the bounds comparison have different
1238// address spaces, assume the values aren't directly comparable, so we can't
1239// use them for the runtime check. We also have to assume they could
1240// overlap. In the future there should be metadata for whether address spaces
1241// are disjoint.
1242unsigned NumPointers = RtCheck.Pointers.size();
1243for (unsigned i = 0; i < NumPointers; ++i) {
1244for (unsigned j = i + 1; j < NumPointers; ++j) {
1245// Only need to check pointers between two different dependency sets.
1246if (RtCheck.Pointers[i].DependencySetId ==
1247RtCheck.Pointers[j].DependencySetId)
1248continue;
1249// Only need to check pointers in the same alias set.
1250if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
1251continue;
1252
1253Value *PtrI = RtCheck.Pointers[i].PointerValue;
1254Value *PtrJ = RtCheck.Pointers[j].PointerValue;
1255
1256unsigned ASi = PtrI->getType()->getPointerAddressSpace();
1257unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
1258if (ASi != ASj) {
1259LLVM_DEBUG(
1260dbgs() << "LAA: Runtime check would require comparison between"
1261" different address spaces\n");
1262return false;
1263}
1264}
1265}
1266
1267if (MayNeedRTCheck && CanDoRT)
1268RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
1269
1270LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
1271<< " pointer comparisons.\n");
1272
1273// If we can do run-time checks, but there are no checks, no runtime checks
1274// are needed. This can happen when all pointers point to the same underlying
1275// object for example.
1276RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
1277
1278bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
1279if (!CanDoRTIfNeeded)
1280RtCheck.reset();
1281return CanDoRTIfNeeded;
1282}
1283
1284void AccessAnalysis::processMemAccesses() {
1285// We process the set twice: first we process read-write pointers, last we
1286// process read-only pointers. This allows us to skip dependence tests for
1287// read-only pointers.
1288
1289LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
1290LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
1291LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
1292LLVM_DEBUG({
1293for (const auto &[A, _] : Accesses)
1294dbgs() << "\t" << *A.getPointer() << " ("
1295<< (A.getInt() ? "write"
1296: (ReadOnlyPtr.count(A.getPointer()) ? "read-only"
1297: "read"))
1298<< ")\n";
1299});
1300
1301// The AliasSetTracker has nicely partitioned our pointers by metadata
1302// compatibility and potential for underlying-object overlap. As a result, we
1303// only need to check for potential pointer dependencies within each alias
1304// set.
1305for (const auto &AS : AST) {
1306// Note that both the alias-set tracker and the alias sets themselves used
1307// ordered collections internally and so the iteration order here is
1308// deterministic.
1309auto ASPointers = AS.getPointers();
1310
1311bool SetHasWrite = false;
1312
1313// Map of pointers to last access encountered.
1314typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap;
1315UnderlyingObjToAccessMap ObjToLastAccess;
1316
1317// Set of access to check after all writes have been processed.
1318PtrAccessMap DeferredAccesses;
1319
1320// Iterate over each alias set twice, once to process read/write pointers,
1321// and then to process read-only pointers.
1322for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1323bool UseDeferred = SetIteration > 0;
1324PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1325
1326for (const Value *ConstPtr : ASPointers) {
1327Value *Ptr = const_cast<Value *>(ConstPtr);
1328
1329// For a single memory access in AliasSetTracker, Accesses may contain
1330// both read and write, and they both need to be handled for CheckDeps.
1331for (const auto &[AC, _] : S) {
1332if (AC.getPointer() != Ptr)
1333continue;
1334
1335bool IsWrite = AC.getInt();
1336
1337// If we're using the deferred access set, then it contains only
1338// reads.
1339bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
1340if (UseDeferred && !IsReadOnlyPtr)
1341continue;
1342// Otherwise, the pointer must be in the PtrAccessSet, either as a
1343// read or a write.
1344assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1345S.count(MemAccessInfo(Ptr, false))) &&
1346"Alias-set pointer not in the access set?");
1347
1348MemAccessInfo Access(Ptr, IsWrite);
1349DepCands.insert(Access);
1350
1351// Memorize read-only pointers for later processing and skip them in
1352// the first round (they need to be checked after we have seen all
1353// write pointers). Note: we also mark pointer that are not
1354// consecutive as "read-only" pointers (so that we check
1355// "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
1356if (!UseDeferred && IsReadOnlyPtr) {
1357// We only use the pointer keys, the types vector values don't
1358// matter.
1359DeferredAccesses.insert({Access, {}});
1360continue;
1361}
1362
1363// If this is a write - check other reads and writes for conflicts. If
1364// this is a read only check other writes for conflicts (but only if
1365// there is no other write to the ptr - this is an optimization to
1366// catch "a[i] = a[i] + " without having to do a dependence check).
1367if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1368CheckDeps.push_back(Access);
1369IsRTCheckAnalysisNeeded = true;
1370}
1371
1372if (IsWrite)
1373SetHasWrite = true;
1374
1375// Create sets of pointers connected by a shared alias set and
1376// underlying object.
1377typedef SmallVector<const Value *, 16> ValueVector;
1378ValueVector TempObjects;
1379
1380UnderlyingObjects[Ptr] = {};
1381SmallVector<const Value *, 16> &UOs = UnderlyingObjects[Ptr];
1382::getUnderlyingObjects(Ptr, UOs, LI);
1383LLVM_DEBUG(dbgs()
1384<< "Underlying objects for pointer " << *Ptr << "\n");
1385for (const Value *UnderlyingObj : UOs) {
1386// nullptr never alias, don't join sets for pointer that have "null"
1387// in their UnderlyingObjects list.
1388if (isa<ConstantPointerNull>(UnderlyingObj) &&
1389!NullPointerIsDefined(
1390TheLoop->getHeader()->getParent(),
1391UnderlyingObj->getType()->getPointerAddressSpace()))
1392continue;
1393
1394UnderlyingObjToAccessMap::iterator Prev =
1395ObjToLastAccess.find(UnderlyingObj);
1396if (Prev != ObjToLastAccess.end())
1397DepCands.unionSets(Access, Prev->second);
1398
1399ObjToLastAccess[UnderlyingObj] = Access;
1400LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
1401}
1402}
1403}
1404}
1405}
1406}
1407
1408/// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
1409/// i.e. monotonically increasing/decreasing.
1410static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
1411PredicatedScalarEvolution &PSE, const Loop *L) {
1412
1413// FIXME: This should probably only return true for NUW.
1414if (AR->getNoWrapFlags(SCEV::NoWrapMask))
1415return true;
1416
1417if (PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
1418return true;
1419
1420// Scalar evolution does not propagate the non-wrapping flags to values that
1421// are derived from a non-wrapping induction variable because non-wrapping
1422// could be flow-sensitive.
1423//
1424// Look through the potentially overflowing instruction to try to prove
1425// non-wrapping for the *specific* value of Ptr.
1426
1427// The arithmetic implied by an inbounds GEP can't overflow.
1428auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1429if (!GEP || !GEP->isInBounds())
1430return false;
1431
1432// Make sure there is only one non-const index and analyze that.
1433Value *NonConstIndex = nullptr;
1434for (Value *Index : GEP->indices())
1435if (!isa<ConstantInt>(Index)) {
1436if (NonConstIndex)
1437return false;
1438NonConstIndex = Index;
1439}
1440if (!NonConstIndex)
1441// The recurrence is on the pointer, ignore for now.
1442return false;
1443
1444// The index in GEP is signed. It is non-wrapping if it's derived from a NSW
1445// AddRec using a NSW operation.
1446if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1447if (OBO->hasNoSignedWrap() &&
1448// Assume constant for other the operand so that the AddRec can be
1449// easily found.
1450isa<ConstantInt>(OBO->getOperand(1))) {
1451auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
1452
1453if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1454return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
1455}
1456
1457return false;
1458}
1459
1460/// Check whether the access through \p Ptr has a constant stride.
1461std::optional<int64_t> llvm::getPtrStride(PredicatedScalarEvolution &PSE,
1462Type *AccessTy, Value *Ptr,
1463const Loop *Lp,
1464const DenseMap<Value *, const SCEV *> &StridesMap,
1465bool Assume, bool ShouldCheckWrap) {
1466Type *Ty = Ptr->getType();
1467assert(Ty->isPointerTy() && "Unexpected non-ptr");
1468
1469if (isa<ScalableVectorType>(AccessTy)) {
1470LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
1471<< "\n");
1472return std::nullopt;
1473}
1474
1475const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1476
1477const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1478if (Assume && !AR)
1479AR = PSE.getAsAddRec(Ptr);
1480
1481if (!AR) {
1482LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1483<< " SCEV: " << *PtrScev << "\n");
1484return std::nullopt;
1485}
1486
1487// The access function must stride over the innermost loop.
1488if (Lp != AR->getLoop()) {
1489LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1490<< *Ptr << " SCEV: " << *AR << "\n");
1491return std::nullopt;
1492}
1493
1494// Check the step is constant.
1495const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1496
1497// Calculate the pointer stride and check if it is constant.
1498const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1499if (!C) {
1500LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1501<< " SCEV: " << *AR << "\n");
1502return std::nullopt;
1503}
1504
1505auto &DL = Lp->getHeader()->getDataLayout();
1506TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
1507int64_t Size = AllocSize.getFixedValue();
1508const APInt &APStepVal = C->getAPInt();
1509
1510// Huge step value - give up.
1511if (APStepVal.getBitWidth() > 64)
1512return std::nullopt;
1513
1514int64_t StepVal = APStepVal.getSExtValue();
1515
1516// Strided access.
1517int64_t Stride = StepVal / Size;
1518int64_t Rem = StepVal % Size;
1519if (Rem)
1520return std::nullopt;
1521
1522if (!ShouldCheckWrap)
1523return Stride;
1524
1525// The address calculation must not wrap. Otherwise, a dependence could be
1526// inverted.
1527if (isNoWrapAddRec(Ptr, AR, PSE, Lp))
1528return Stride;
1529
1530// An inbounds getelementptr that is a AddRec with a unit stride
1531// cannot wrap per definition. If it did, the result would be poison
1532// and any memory access dependent on it would be immediate UB
1533// when executed.
1534if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1535GEP && GEP->isInBounds() && (Stride == 1 || Stride == -1))
1536return Stride;
1537
1538// If the null pointer is undefined, then a access sequence which would
1539// otherwise access it can be assumed not to unsigned wrap. Note that this
1540// assumes the object in memory is aligned to the natural alignment.
1541unsigned AddrSpace = Ty->getPointerAddressSpace();
1542if (!NullPointerIsDefined(Lp->getHeader()->getParent(), AddrSpace) &&
1543(Stride == 1 || Stride == -1))
1544return Stride;
1545
1546if (Assume) {
1547PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1548LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap:\n"
1549<< "LAA: Pointer: " << *Ptr << "\n"
1550<< "LAA: SCEV: " << *AR << "\n"
1551<< "LAA: Added an overflow assumption\n");
1552return Stride;
1553}
1554LLVM_DEBUG(
1555dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1556<< *Ptr << " SCEV: " << *AR << "\n");
1557return std::nullopt;
1558}
1559
1560std::optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA,
1561Type *ElemTyB, Value *PtrB,
1562const DataLayout &DL,
1563ScalarEvolution &SE, bool StrictCheck,
1564bool CheckType) {
1565assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1566
1567// Make sure that A and B are different pointers.
1568if (PtrA == PtrB)
1569return 0;
1570
1571// Make sure that the element types are the same if required.
1572if (CheckType && ElemTyA != ElemTyB)
1573return std::nullopt;
1574
1575unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1576unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1577
1578// Check that the address spaces match.
1579if (ASA != ASB)
1580return std::nullopt;
1581unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1582
1583APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1584Value *PtrA1 = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1585Value *PtrB1 = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1586
1587int Val;
1588if (PtrA1 == PtrB1) {
1589// Retrieve the address space again as pointer stripping now tracks through
1590// `addrspacecast`.
1591ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1592ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1593// Check that the address spaces match and that the pointers are valid.
1594if (ASA != ASB)
1595return std::nullopt;
1596
1597IdxWidth = DL.getIndexSizeInBits(ASA);
1598OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1599OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1600
1601OffsetB -= OffsetA;
1602Val = OffsetB.getSExtValue();
1603} else {
1604// Otherwise compute the distance with SCEV between the base pointers.
1605const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1606const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1607const auto *Diff =
1608dyn_cast<SCEVConstant>(SE.getMinusSCEV(PtrSCEVB, PtrSCEVA));
1609if (!Diff)
1610return std::nullopt;
1611Val = Diff->getAPInt().getSExtValue();
1612}
1613int Size = DL.getTypeStoreSize(ElemTyA);
1614int Dist = Val / Size;
1615
1616// Ensure that the calculated distance matches the type-based one after all
1617// the bitcasts removal in the provided pointers.
1618if (!StrictCheck || Dist * Size == Val)
1619return Dist;
1620return std::nullopt;
1621}
1622
1623bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy,
1624const DataLayout &DL, ScalarEvolution &SE,
1625SmallVectorImpl<unsigned> &SortedIndices) {
1626assert(llvm::all_of(
1627VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1628"Expected list of pointer operands.");
1629// Walk over the pointers, and map each of them to an offset relative to
1630// first pointer in the array.
1631Value *Ptr0 = VL[0];
1632
1633using DistOrdPair = std::pair<int64_t, int>;
1634auto Compare = llvm::less_first();
1635std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1636Offsets.emplace(0, 0);
1637bool IsConsecutive = true;
1638for (auto [Idx, Ptr] : drop_begin(enumerate(VL))) {
1639std::optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1640/*StrictCheck=*/true);
1641if (!Diff)
1642return false;
1643
1644// Check if the pointer with the same offset is found.
1645int64_t Offset = *Diff;
1646auto [It, IsInserted] = Offsets.emplace(Offset, Idx);
1647if (!IsInserted)
1648return false;
1649// Consecutive order if the inserted element is the last one.
1650IsConsecutive &= std::next(It) == Offsets.end();
1651}
1652SortedIndices.clear();
1653if (!IsConsecutive) {
1654// Fill SortedIndices array only if it is non-consecutive.
1655SortedIndices.resize(VL.size());
1656for (auto [Idx, Off] : enumerate(Offsets))
1657SortedIndices[Idx] = Off.second;
1658}
1659return true;
1660}
1661
1662/// Returns true if the memory operations \p A and \p B are consecutive.
1663bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
1664ScalarEvolution &SE, bool CheckType) {
1665Value *PtrA = getLoadStorePointerOperand(A);
1666Value *PtrB = getLoadStorePointerOperand(B);
1667if (!PtrA || !PtrB)
1668return false;
1669Type *ElemTyA = getLoadStoreType(A);
1670Type *ElemTyB = getLoadStoreType(B);
1671std::optional<int> Diff =
1672getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1673/*StrictCheck=*/true, CheckType);
1674return Diff && *Diff == 1;
1675}
1676
1677void MemoryDepChecker::addAccess(StoreInst *SI) {
1678visitPointers(SI->getPointerOperand(), *InnermostLoop,
1679[this, SI](Value *Ptr) {
1680Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1681InstMap.push_back(SI);
1682++AccessIdx;
1683});
1684}
1685
1686void MemoryDepChecker::addAccess(LoadInst *LI) {
1687visitPointers(LI->getPointerOperand(), *InnermostLoop,
1688[this, LI](Value *Ptr) {
1689Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1690InstMap.push_back(LI);
1691++AccessIdx;
1692});
1693}
1694
1695MemoryDepChecker::VectorizationSafetyStatus
1696MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) {
1697switch (Type) {
1698case NoDep:
1699case Forward:
1700case BackwardVectorizable:
1701return VectorizationSafetyStatus::Safe;
1702
1703case Unknown:
1704return VectorizationSafetyStatus::PossiblySafeWithRtChecks;
1705case ForwardButPreventsForwarding:
1706case Backward:
1707case BackwardVectorizableButPreventsForwarding:
1708case IndirectUnsafe:
1709return VectorizationSafetyStatus::Unsafe;
1710}
1711llvm_unreachable("unexpected DepType!");
1712}
1713
1714bool MemoryDepChecker::Dependence::isBackward() const {
1715switch (Type) {
1716case NoDep:
1717case Forward:
1718case ForwardButPreventsForwarding:
1719case Unknown:
1720case IndirectUnsafe:
1721return false;
1722
1723case BackwardVectorizable:
1724case Backward:
1725case BackwardVectorizableButPreventsForwarding:
1726return true;
1727}
1728llvm_unreachable("unexpected DepType!");
1729}
1730
1731bool MemoryDepChecker::Dependence::isPossiblyBackward() const {
1732return isBackward() || Type == Unknown;
1733}
1734
1735bool MemoryDepChecker::Dependence::isForward() const {
1736switch (Type) {
1737case Forward:
1738case ForwardButPreventsForwarding:
1739return true;
1740
1741case NoDep:
1742case Unknown:
1743case BackwardVectorizable:
1744case Backward:
1745case BackwardVectorizableButPreventsForwarding:
1746case IndirectUnsafe:
1747return false;
1748}
1749llvm_unreachable("unexpected DepType!");
1750}
1751
1752bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1753uint64_t TypeByteSize) {
1754// If loads occur at a distance that is not a multiple of a feasible vector
1755// factor store-load forwarding does not take place.
1756// Positive dependences might cause troubles because vectorizing them might
1757// prevent store-load forwarding making vectorized code run a lot slower.
1758// a[i] = a[i-3] ^ a[i-8];
1759// The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1760// hence on your typical architecture store-load forwarding does not take
1761// place. Vectorizing in such cases does not make sense.
1762// Store-load forwarding distance.
1763
1764// After this many iterations store-to-load forwarding conflicts should not
1765// cause any slowdowns.
1766const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1767// Maximum vector factor.
1768uint64_t MaxVFWithoutSLForwardIssues = std::min(
1769VectorizerParams::MaxVectorWidth * TypeByteSize, MinDepDistBytes);
1770
1771// Compute the smallest VF at which the store and load would be misaligned.
1772for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1773VF *= 2) {
1774// If the number of vector iteration between the store and the load are
1775// small we could incur conflicts.
1776if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1777MaxVFWithoutSLForwardIssues = (VF >> 1);
1778break;
1779}
1780}
1781
1782if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1783LLVM_DEBUG(
1784dbgs() << "LAA: Distance " << Distance
1785<< " that could cause a store-load forwarding conflict\n");
1786return true;
1787}
1788
1789if (MaxVFWithoutSLForwardIssues < MinDepDistBytes &&
1790MaxVFWithoutSLForwardIssues !=
1791VectorizerParams::MaxVectorWidth * TypeByteSize)
1792MinDepDistBytes = MaxVFWithoutSLForwardIssues;
1793return false;
1794}
1795
1796void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1797if (Status < S)
1798Status = S;
1799}
1800
1801/// Given a dependence-distance \p Dist between two
1802/// memory accesses, that have strides in the same direction whose absolute
1803/// value of the maximum stride is given in \p MaxStride, and that have the same
1804/// type size \p TypeByteSize, in a loop whose maximum backedge taken count is
1805/// \p MaxBTC, check if it is possible to prove statically that the dependence
1806/// distance is larger than the range that the accesses will travel through the
1807/// execution of the loop. If so, return true; false otherwise. This is useful
1808/// for example in loops such as the following (PR31098):
1809/// for (i = 0; i < D; ++i) {
1810/// = out[i];
1811/// out[i+D] =
1812/// }
1813static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE,
1814const SCEV &MaxBTC, const SCEV &Dist,
1815uint64_t MaxStride,
1816uint64_t TypeByteSize) {
1817
1818// If we can prove that
1819// (**) |Dist| > MaxBTC * Step
1820// where Step is the absolute stride of the memory accesses in bytes,
1821// then there is no dependence.
1822//
1823// Rationale:
1824// We basically want to check if the absolute distance (|Dist/Step|)
1825// is >= the loop iteration count (or > MaxBTC).
1826// This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1827// Section 4.2.1); Note, that for vectorization it is sufficient to prove
1828// that the dependence distance is >= VF; This is checked elsewhere.
1829// But in some cases we can prune dependence distances early, and
1830// even before selecting the VF, and without a runtime test, by comparing
1831// the distance against the loop iteration count. Since the vectorized code
1832// will be executed only if LoopCount >= VF, proving distance >= LoopCount
1833// also guarantees that distance >= VF.
1834//
1835const uint64_t ByteStride = MaxStride * TypeByteSize;
1836const SCEV *Step = SE.getConstant(MaxBTC.getType(), ByteStride);
1837const SCEV *Product = SE.getMulExpr(&MaxBTC, Step);
1838
1839const SCEV *CastedDist = &Dist;
1840const SCEV *CastedProduct = Product;
1841uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());
1842uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1843
1844// The dependence distance can be positive/negative, so we sign extend Dist;
1845// The multiplication of the absolute stride in bytes and the
1846// backedgeTakenCount is non-negative, so we zero extend Product.
1847if (DistTypeSizeBits > ProductTypeSizeBits)
1848CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1849else
1850CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1851
1852// Is Dist - (MaxBTC * Step) > 0 ?
1853// (If so, then we have proven (**) because |Dist| >= Dist)
1854const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1855if (SE.isKnownPositive(Minus))
1856return true;
1857
1858// Second try: Is -Dist - (MaxBTC * Step) > 0 ?
1859// (If so, then we have proven (**) because |Dist| >= -1*Dist)
1860const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1861Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1862return SE.isKnownPositive(Minus);
1863}
1864
1865/// Check the dependence for two accesses with the same stride \p Stride.
1866/// \p Distance is the positive distance and \p TypeByteSize is type size in
1867/// bytes.
1868///
1869/// \returns true if they are independent.
1870static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride,
1871uint64_t TypeByteSize) {
1872assert(Stride > 1 && "The stride must be greater than 1");
1873assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1874assert(Distance > 0 && "The distance must be non-zero");
1875
1876// Skip if the distance is not multiple of type byte size.
1877if (Distance % TypeByteSize)
1878return false;
1879
1880uint64_t ScaledDist = Distance / TypeByteSize;
1881
1882// No dependence if the scaled distance is not multiple of the stride.
1883// E.g.
1884// for (i = 0; i < 1024 ; i += 4)
1885// A[i+2] = A[i] + 1;
1886//
1887// Two accesses in memory (scaled distance is 2, stride is 4):
1888// | A[0] | | | | A[4] | | | |
1889// | | | A[2] | | | | A[6] | |
1890//
1891// E.g.
1892// for (i = 0; i < 1024 ; i += 3)
1893// A[i+4] = A[i] + 1;
1894//
1895// Two accesses in memory (scaled distance is 4, stride is 3):
1896// | A[0] | | | A[3] | | | A[6] | | |
1897// | | | | | A[4] | | | A[7] | |
1898return ScaledDist % Stride;
1899}
1900
1901/// Returns true if any of the underlying objects has a loop varying address,
1902/// i.e. may change in \p L.
1903static bool
1904isLoopVariantIndirectAddress(ArrayRef<const Value *> UnderlyingObjects,
1905ScalarEvolution &SE, const Loop *L) {
1906return any_of(UnderlyingObjects, [&SE, L](const Value *UO) {
1907return !SE.isLoopInvariant(SE.getSCEV(const_cast<Value *>(UO)), L);
1908});
1909}
1910
1911std::variant<MemoryDepChecker::Dependence::DepType,
1912MemoryDepChecker::DepDistanceStrideAndSizeInfo>
1913MemoryDepChecker::getDependenceDistanceStrideAndSize(
1914const AccessAnalysis::MemAccessInfo &A, Instruction *AInst,
1915const AccessAnalysis::MemAccessInfo &B, Instruction *BInst,
1916const DenseMap<Value *, SmallVector<const Value *, 16>>
1917&UnderlyingObjects) {
1918auto &DL = InnermostLoop->getHeader()->getDataLayout();
1919auto &SE = *PSE.getSE();
1920auto [APtr, AIsWrite] = A;
1921auto [BPtr, BIsWrite] = B;
1922
1923// Two reads are independent.
1924if (!AIsWrite && !BIsWrite)
1925return MemoryDepChecker::Dependence::NoDep;
1926
1927Type *ATy = getLoadStoreType(AInst);
1928Type *BTy = getLoadStoreType(BInst);
1929
1930// We cannot check pointers in different address spaces.
1931if (APtr->getType()->getPointerAddressSpace() !=
1932BPtr->getType()->getPointerAddressSpace())
1933return MemoryDepChecker::Dependence::Unknown;
1934
1935int64_t StrideAPtr =
1936getPtrStride(PSE, ATy, APtr, InnermostLoop, SymbolicStrides, true)
1937.value_or(0);
1938int64_t StrideBPtr =
1939getPtrStride(PSE, BTy, BPtr, InnermostLoop, SymbolicStrides, true)
1940.value_or(0);
1941
1942const SCEV *Src = PSE.getSCEV(APtr);
1943const SCEV *Sink = PSE.getSCEV(BPtr);
1944
1945// If the induction step is negative we have to invert source and sink of the
1946// dependence when measuring the distance between them. We should not swap
1947// AIsWrite with BIsWrite, as their uses expect them in program order.
1948if (StrideAPtr < 0) {
1949std::swap(Src, Sink);
1950std::swap(AInst, BInst);
1951}
1952
1953const SCEV *Dist = SE.getMinusSCEV(Sink, Src);
1954
1955LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1956<< "(Induction step: " << StrideAPtr << ")\n");
1957LLVM_DEBUG(dbgs() << "LAA: Distance for " << *AInst << " to " << *BInst
1958<< ": " << *Dist << "\n");
1959
1960// Needs accesses where the addresses of the accessed underlying objects do
1961// not change within the loop.
1962if (isLoopVariantIndirectAddress(UnderlyingObjects.find(APtr)->second, SE,
1963InnermostLoop) ||
1964isLoopVariantIndirectAddress(UnderlyingObjects.find(BPtr)->second, SE,
1965InnermostLoop))
1966return MemoryDepChecker::Dependence::IndirectUnsafe;
1967
1968// Check if we can prove that Sink only accesses memory after Src's end or
1969// vice versa. At the moment this is limited to cases where either source or
1970// sink are loop invariant to avoid compile-time increases. This is not
1971// required for correctness.
1972if (SE.isLoopInvariant(Src, InnermostLoop) ||
1973SE.isLoopInvariant(Sink, InnermostLoop)) {
1974const auto &[SrcStart, SrcEnd] =
1975getStartAndEndForAccess(InnermostLoop, Src, ATy, PSE, PointerBounds);
1976const auto &[SinkStart, SinkEnd] =
1977getStartAndEndForAccess(InnermostLoop, Sink, BTy, PSE, PointerBounds);
1978if (!isa<SCEVCouldNotCompute>(SrcStart) &&
1979!isa<SCEVCouldNotCompute>(SrcEnd) &&
1980!isa<SCEVCouldNotCompute>(SinkStart) &&
1981!isa<SCEVCouldNotCompute>(SinkEnd)) {
1982if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SrcEnd, SinkStart))
1983return MemoryDepChecker::Dependence::NoDep;
1984if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SinkEnd, SrcStart))
1985return MemoryDepChecker::Dependence::NoDep;
1986}
1987}
1988
1989// Need accesses with constant strides and the same direction. We don't want
1990// to vectorize "A[B[i]] += ..." and similar code or pointer arithmetic that
1991// could wrap in the address space.
1992if (!StrideAPtr || !StrideBPtr || (StrideAPtr > 0 && StrideBPtr < 0) ||
1993(StrideAPtr < 0 && StrideBPtr > 0)) {
1994LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1995return MemoryDepChecker::Dependence::Unknown;
1996}
1997
1998uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
1999bool HasSameSize =
2000DL.getTypeStoreSizeInBits(ATy) == DL.getTypeStoreSizeInBits(BTy);
2001if (!HasSameSize)
2002TypeByteSize = 0;
2003return DepDistanceStrideAndSizeInfo(Dist, std::abs(StrideAPtr),
2004std::abs(StrideBPtr), TypeByteSize,
2005AIsWrite, BIsWrite);
2006}
2007
2008MemoryDepChecker::Dependence::DepType MemoryDepChecker::isDependent(
2009const MemAccessInfo &A, unsigned AIdx, const MemAccessInfo &B,
2010unsigned BIdx,
2011const DenseMap<Value *, SmallVector<const Value *, 16>>
2012&UnderlyingObjects) {
2013assert(AIdx < BIdx && "Must pass arguments in program order");
2014
2015// Get the dependence distance, stride, type size and what access writes for
2016// the dependence between A and B.
2017auto Res = getDependenceDistanceStrideAndSize(
2018A, InstMap[AIdx], B, InstMap[BIdx], UnderlyingObjects);
2019if (std::holds_alternative<Dependence::DepType>(Res))
2020return std::get<Dependence::DepType>(Res);
2021
2022auto &[Dist, StrideA, StrideB, TypeByteSize, AIsWrite, BIsWrite] =
2023std::get<DepDistanceStrideAndSizeInfo>(Res);
2024bool HasSameSize = TypeByteSize > 0;
2025
2026std::optional<uint64_t> CommonStride =
2027StrideA == StrideB ? std::make_optional(StrideA) : std::nullopt;
2028if (isa<SCEVCouldNotCompute>(Dist)) {
2029// TODO: Relax requirement that there is a common stride to retry with
2030// non-constant distance dependencies.
2031FoundNonConstantDistanceDependence |= CommonStride.has_value();
2032LLVM_DEBUG(dbgs() << "LAA: Dependence because of uncomputable distance.\n");
2033return Dependence::Unknown;
2034}
2035
2036ScalarEvolution &SE = *PSE.getSE();
2037auto &DL = InnermostLoop->getHeader()->getDataLayout();
2038uint64_t MaxStride = std::max(StrideA, StrideB);
2039
2040// If the distance between the acecsses is larger than their maximum absolute
2041// stride multiplied by the symbolic maximum backedge taken count (which is an
2042// upper bound of the number of iterations), the accesses are independet, i.e.
2043// they are far enough appart that accesses won't access the same location
2044// across all loop ierations.
2045if (HasSameSize && isSafeDependenceDistance(
2046DL, SE, *(PSE.getSymbolicMaxBackedgeTakenCount()),
2047*Dist, MaxStride, TypeByteSize))
2048return Dependence::NoDep;
2049
2050const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
2051
2052// Attempt to prove strided accesses independent.
2053if (C) {
2054const APInt &Val = C->getAPInt();
2055int64_t Distance = Val.getSExtValue();
2056
2057// If the distance between accesses and their strides are known constants,
2058// check whether the accesses interlace each other.
2059if (std::abs(Distance) > 0 && CommonStride && *CommonStride > 1 &&
2060HasSameSize &&
2061areStridedAccessesIndependent(std::abs(Distance), *CommonStride,
2062TypeByteSize)) {
2063LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
2064return Dependence::NoDep;
2065}
2066} else
2067Dist = SE.applyLoopGuards(Dist, InnermostLoop);
2068
2069// Negative distances are not plausible dependencies.
2070if (SE.isKnownNonPositive(Dist)) {
2071if (SE.isKnownNonNegative(Dist)) {
2072if (HasSameSize) {
2073// Write to the same location with the same size.
2074return Dependence::Forward;
2075}
2076LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence difference but "
2077"different type sizes\n");
2078return Dependence::Unknown;
2079}
2080
2081bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
2082// Check if the first access writes to a location that is read in a later
2083// iteration, where the distance between them is not a multiple of a vector
2084// factor and relatively small.
2085//
2086// NOTE: There is no need to update MaxSafeVectorWidthInBits after call to
2087// couldPreventStoreLoadForward, even if it changed MinDepDistBytes, since a
2088// forward dependency will allow vectorization using any width.
2089
2090if (IsTrueDataDependence && EnableForwardingConflictDetection) {
2091if (!C) {
2092// TODO: FoundNonConstantDistanceDependence is used as a necessary
2093// condition to consider retrying with runtime checks. Historically, we
2094// did not set it when strides were different but there is no inherent
2095// reason to.
2096FoundNonConstantDistanceDependence |= CommonStride.has_value();
2097return Dependence::Unknown;
2098}
2099if (!HasSameSize ||
2100couldPreventStoreLoadForward(C->getAPInt().abs().getZExtValue(),
2101TypeByteSize)) {
2102LLVM_DEBUG(
2103dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
2104return Dependence::ForwardButPreventsForwarding;
2105}
2106}
2107
2108LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
2109return Dependence::Forward;
2110}
2111
2112int64_t MinDistance = SE.getSignedRangeMin(Dist).getSExtValue();
2113// Below we only handle strictly positive distances.
2114if (MinDistance <= 0) {
2115FoundNonConstantDistanceDependence |= CommonStride.has_value();
2116return Dependence::Unknown;
2117}
2118
2119if (!isa<SCEVConstant>(Dist)) {
2120// Previously this case would be treated as Unknown, possibly setting
2121// FoundNonConstantDistanceDependence to force re-trying with runtime
2122// checks. Until the TODO below is addressed, set it here to preserve
2123// original behavior w.r.t. re-trying with runtime checks.
2124// TODO: FoundNonConstantDistanceDependence is used as a necessary
2125// condition to consider retrying with runtime checks. Historically, we
2126// did not set it when strides were different but there is no inherent
2127// reason to.
2128FoundNonConstantDistanceDependence |= CommonStride.has_value();
2129}
2130
2131if (!HasSameSize) {
2132LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
2133"different type sizes\n");
2134return Dependence::Unknown;
2135}
2136
2137if (!CommonStride)
2138return Dependence::Unknown;
2139
2140// Bail out early if passed-in parameters make vectorization not feasible.
2141unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
2142VectorizerParams::VectorizationFactor : 1);
2143unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
2144VectorizerParams::VectorizationInterleave : 1);
2145// The minimum number of iterations for a vectorized/unrolled version.
2146unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2147
2148// It's not vectorizable if the distance is smaller than the minimum distance
2149// needed for a vectroized/unrolled version. Vectorizing one iteration in
2150// front needs TypeByteSize * Stride. Vectorizing the last iteration needs
2151// TypeByteSize (No need to plus the last gap distance).
2152//
2153// E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2154// foo(int *A) {
2155// int *B = (int *)((char *)A + 14);
2156// for (i = 0 ; i < 1024 ; i += 2)
2157// B[i] = A[i] + 1;
2158// }
2159//
2160// Two accesses in memory (stride is 2):
2161// | A[0] | | A[2] | | A[4] | | A[6] | |
2162// | B[0] | | B[2] | | B[4] |
2163//
2164// MinDistance needs for vectorizing iterations except the last iteration:
2165// 4 * 2 * (MinNumIter - 1). MinDistance needs for the last iteration: 4.
2166// So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
2167//
2168// If MinNumIter is 2, it is vectorizable as the minimum distance needed is
2169// 12, which is less than distance.
2170//
2171// If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
2172// the minimum distance needed is 28, which is greater than distance. It is
2173// not safe to do vectorization.
2174
2175// We know that Dist is positive, but it may not be constant. Use the signed
2176// minimum for computations below, as this ensures we compute the closest
2177// possible dependence distance.
2178uint64_t MinDistanceNeeded =
2179TypeByteSize * *CommonStride * (MinNumIter - 1) + TypeByteSize;
2180if (MinDistanceNeeded > static_cast<uint64_t>(MinDistance)) {
2181if (!isa<SCEVConstant>(Dist)) {
2182// For non-constant distances, we checked the lower bound of the
2183// dependence distance and the distance may be larger at runtime (and safe
2184// for vectorization). Classify it as Unknown, so we re-try with runtime
2185// checks.
2186return Dependence::Unknown;
2187}
2188LLVM_DEBUG(dbgs() << "LAA: Failure because of positive minimum distance "
2189<< MinDistance << '\n');
2190return Dependence::Backward;
2191}
2192
2193// Unsafe if the minimum distance needed is greater than smallest dependence
2194// distance distance.
2195if (MinDistanceNeeded > MinDepDistBytes) {
2196LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
2197<< MinDistanceNeeded << " size in bytes\n");
2198return Dependence::Backward;
2199}
2200
2201// Positive distance bigger than max vectorization factor.
2202// FIXME: Should use max factor instead of max distance in bytes, which could
2203// not handle different types.
2204// E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2205// void foo (int *A, char *B) {
2206// for (unsigned i = 0; i < 1024; i++) {
2207// A[i+2] = A[i] + 1;
2208// B[i+2] = B[i] + 1;
2209// }
2210// }
2211//
2212// This case is currently unsafe according to the max safe distance. If we
2213// analyze the two accesses on array B, the max safe dependence distance
2214// is 2. Then we analyze the accesses on array A, the minimum distance needed
2215// is 8, which is less than 2 and forbidden vectorization, But actually
2216// both A and B could be vectorized by 2 iterations.
2217MinDepDistBytes =
2218std::min(static_cast<uint64_t>(MinDistance), MinDepDistBytes);
2219
2220bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2221uint64_t MinDepDistBytesOld = MinDepDistBytes;
2222if (IsTrueDataDependence && EnableForwardingConflictDetection &&
2223isa<SCEVConstant>(Dist) &&
2224couldPreventStoreLoadForward(MinDistance, TypeByteSize)) {
2225// Sanity check that we didn't update MinDepDistBytes when calling
2226// couldPreventStoreLoadForward
2227assert(MinDepDistBytes == MinDepDistBytesOld &&
2228"An update to MinDepDistBytes requires an update to "
2229"MaxSafeVectorWidthInBits");
2230(void)MinDepDistBytesOld;
2231return Dependence::BackwardVectorizableButPreventsForwarding;
2232}
2233
2234// An update to MinDepDistBytes requires an update to MaxSafeVectorWidthInBits
2235// since there is a backwards dependency.
2236uint64_t MaxVF = MinDepDistBytes / (TypeByteSize * *CommonStride);
2237LLVM_DEBUG(dbgs() << "LAA: Positive min distance " << MinDistance
2238<< " with max VF = " << MaxVF << '\n');
2239
2240uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2241if (!isa<SCEVConstant>(Dist) && MaxVFInBits < MaxTargetVectorWidthInBits) {
2242// For non-constant distances, we checked the lower bound of the dependence
2243// distance and the distance may be larger at runtime (and safe for
2244// vectorization). Classify it as Unknown, so we re-try with runtime checks.
2245return Dependence::Unknown;
2246}
2247
2248MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2249return Dependence::BackwardVectorizable;
2250}
2251
2252bool MemoryDepChecker::areDepsSafe(
2253DepCandidates &AccessSets, MemAccessInfoList &CheckDeps,
2254const DenseMap<Value *, SmallVector<const Value *, 16>>
2255&UnderlyingObjects) {
2256
2257MinDepDistBytes = -1;
2258SmallPtrSet<MemAccessInfo, 8> Visited;
2259for (MemAccessInfo CurAccess : CheckDeps) {
2260if (Visited.count(CurAccess))
2261continue;
2262
2263// Get the relevant memory access set.
2264EquivalenceClasses<MemAccessInfo>::iterator I =
2265AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
2266
2267// Check accesses within this set.
2268EquivalenceClasses<MemAccessInfo>::member_iterator AI =
2269AccessSets.member_begin(I);
2270EquivalenceClasses<MemAccessInfo>::member_iterator AE =
2271AccessSets.member_end();
2272
2273// Check every access pair.
2274while (AI != AE) {
2275Visited.insert(*AI);
2276bool AIIsWrite = AI->getInt();
2277// Check loads only against next equivalent class, but stores also against
2278// other stores in the same equivalence class - to the same address.
2279EquivalenceClasses<MemAccessInfo>::member_iterator OI =
2280(AIIsWrite ? AI : std::next(AI));
2281while (OI != AE) {
2282// Check every accessing instruction pair in program order.
2283for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
2284I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
2285// Scan all accesses of another equivalence class, but only the next
2286// accesses of the same equivalent class.
2287for (std::vector<unsigned>::iterator
2288I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2289I2E = (OI == AI ? I1E : Accesses[*OI].end());
2290I2 != I2E; ++I2) {
2291auto A = std::make_pair(&*AI, *I1);
2292auto B = std::make_pair(&*OI, *I2);
2293
2294assert(*I1 != *I2);
2295if (*I1 > *I2)
2296std::swap(A, B);
2297
2298Dependence::DepType Type = isDependent(*A.first, A.second, *B.first,
2299B.second, UnderlyingObjects);
2300mergeInStatus(Dependence::isSafeForVectorization(Type));
2301
2302// Gather dependences unless we accumulated MaxDependences
2303// dependences. In that case return as soon as we find the first
2304// unsafe dependence. This puts a limit on this quadratic
2305// algorithm.
2306if (RecordDependences) {
2307if (Type != Dependence::NoDep)
2308Dependences.push_back(Dependence(A.second, B.second, Type));
2309
2310if (Dependences.size() >= MaxDependences) {
2311RecordDependences = false;
2312Dependences.clear();
2313LLVM_DEBUG(dbgs()
2314<< "Too many dependences, stopped recording\n");
2315}
2316}
2317if (!RecordDependences && !isSafeForVectorization())
2318return false;
2319}
2320++OI;
2321}
2322++AI;
2323}
2324}
2325
2326LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
2327return isSafeForVectorization();
2328}
2329
2330SmallVector<Instruction *, 4>
2331MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool IsWrite) const {
2332MemAccessInfo Access(Ptr, IsWrite);
2333auto &IndexVector = Accesses.find(Access)->second;
2334
2335SmallVector<Instruction *, 4> Insts;
2336transform(IndexVector,
2337std::back_inserter(Insts),
2338[&](unsigned Idx) { return this->InstMap[Idx]; });
2339return Insts;
2340}
2341
2342const char *MemoryDepChecker::Dependence::DepName[] = {
2343"NoDep",
2344"Unknown",
2345"IndirectUnsafe",
2346"Forward",
2347"ForwardButPreventsForwarding",
2348"Backward",
2349"BackwardVectorizable",
2350"BackwardVectorizableButPreventsForwarding"};
2351
2352void MemoryDepChecker::Dependence::print(
2353raw_ostream &OS, unsigned Depth,
2354const SmallVectorImpl<Instruction *> &Instrs) const {
2355OS.indent(Depth) << DepName[Type] << ":\n";
2356OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
2357OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
2358}
2359
2360bool LoopAccessInfo::canAnalyzeLoop() {
2361// We need to have a loop header.
2362LLVM_DEBUG(dbgs() << "\nLAA: Checking a loop in '"
2363<< TheLoop->getHeader()->getParent()->getName() << "' from "
2364<< TheLoop->getLocStr() << "\n");
2365
2366// We can only analyze innermost loops.
2367if (!TheLoop->isInnermost()) {
2368LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
2369recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
2370return false;
2371}
2372
2373// We must have a single backedge.
2374if (TheLoop->getNumBackEdges() != 1) {
2375LLVM_DEBUG(
2376dbgs() << "LAA: loop control flow is not understood by analyzer\n");
2377recordAnalysis("CFGNotUnderstood")
2378<< "loop control flow is not understood by analyzer";
2379return false;
2380}
2381
2382// ScalarEvolution needs to be able to find the symbolic max backedge taken
2383// count, which is an upper bound on the number of loop iterations. The loop
2384// may execute fewer iterations, if it exits via an uncountable exit.
2385const SCEV *ExitCount = PSE->getSymbolicMaxBackedgeTakenCount();
2386if (isa<SCEVCouldNotCompute>(ExitCount)) {
2387recordAnalysis("CantComputeNumberOfIterations")
2388<< "could not determine number of loop iterations";
2389LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
2390return false;
2391}
2392
2393LLVM_DEBUG(dbgs() << "LAA: Found an analyzable loop: "
2394<< TheLoop->getHeader()->getName() << "\n");
2395return true;
2396}
2397
2398bool LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
2399const TargetLibraryInfo *TLI,
2400DominatorTree *DT) {
2401// Holds the Load and Store instructions.
2402SmallVector<LoadInst *, 16> Loads;
2403SmallVector<StoreInst *, 16> Stores;
2404SmallPtrSet<MDNode *, 8> LoopAliasScopes;
2405
2406// Holds all the different accesses in the loop.
2407unsigned NumReads = 0;
2408unsigned NumReadWrites = 0;
2409
2410bool HasComplexMemInst = false;
2411
2412// A runtime check is only legal to insert if there are no convergent calls.
2413HasConvergentOp = false;
2414
2415PtrRtChecking->Pointers.clear();
2416PtrRtChecking->Need = false;
2417
2418const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
2419
2420const bool EnableMemAccessVersioningOfLoop =
2421EnableMemAccessVersioning &&
2422!TheLoop->getHeader()->getParent()->hasOptSize();
2423
2424// Traverse blocks in fixed RPOT order, regardless of their storage in the
2425// loop info, as it may be arbitrary.
2426LoopBlocksRPO RPOT(TheLoop);
2427RPOT.perform(LI);
2428for (BasicBlock *BB : RPOT) {
2429// Scan the BB and collect legal loads and stores. Also detect any
2430// convergent instructions.
2431for (Instruction &I : *BB) {
2432if (auto *Call = dyn_cast<CallBase>(&I)) {
2433if (Call->isConvergent())
2434HasConvergentOp = true;
2435}
2436
2437// With both a non-vectorizable memory instruction and a convergent
2438// operation, found in this loop, no reason to continue the search.
2439if (HasComplexMemInst && HasConvergentOp)
2440return false;
2441
2442// Avoid hitting recordAnalysis multiple times.
2443if (HasComplexMemInst)
2444continue;
2445
2446// Record alias scopes defined inside the loop.
2447if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
2448for (Metadata *Op : Decl->getScopeList()->operands())
2449LoopAliasScopes.insert(cast<MDNode>(Op));
2450
2451// Many math library functions read the rounding mode. We will only
2452// vectorize a loop if it contains known function calls that don't set
2453// the flag. Therefore, it is safe to ignore this read from memory.
2454auto *Call = dyn_cast<CallInst>(&I);
2455if (Call && getVectorIntrinsicIDForCall(Call, TLI))
2456continue;
2457
2458// If this is a load, save it. If this instruction can read from memory
2459// but is not a load, then we quit. Notice that we don't handle function
2460// calls that read or write.
2461if (I.mayReadFromMemory()) {
2462// If the function has an explicit vectorized counterpart, we can safely
2463// assume that it can be vectorized.
2464if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2465!VFDatabase::getMappings(*Call).empty())
2466continue;
2467
2468auto *Ld = dyn_cast<LoadInst>(&I);
2469if (!Ld) {
2470recordAnalysis("CantVectorizeInstruction", Ld)
2471<< "instruction cannot be vectorized";
2472HasComplexMemInst = true;
2473continue;
2474}
2475if (!Ld->isSimple() && !IsAnnotatedParallel) {
2476recordAnalysis("NonSimpleLoad", Ld)
2477<< "read with atomic ordering or volatile read";
2478LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2479HasComplexMemInst = true;
2480continue;
2481}
2482NumLoads++;
2483Loads.push_back(Ld);
2484DepChecker->addAccess(Ld);
2485if (EnableMemAccessVersioningOfLoop)
2486collectStridedAccess(Ld);
2487continue;
2488}
2489
2490// Save 'store' instructions. Abort if other instructions write to memory.
2491if (I.mayWriteToMemory()) {
2492auto *St = dyn_cast<StoreInst>(&I);
2493if (!St) {
2494recordAnalysis("CantVectorizeInstruction", St)
2495<< "instruction cannot be vectorized";
2496HasComplexMemInst = true;
2497continue;
2498}
2499if (!St->isSimple() && !IsAnnotatedParallel) {
2500recordAnalysis("NonSimpleStore", St)
2501<< "write with atomic ordering or volatile write";
2502LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2503HasComplexMemInst = true;
2504continue;
2505}
2506NumStores++;
2507Stores.push_back(St);
2508DepChecker->addAccess(St);
2509if (EnableMemAccessVersioningOfLoop)
2510collectStridedAccess(St);
2511}
2512} // Next instr.
2513} // Next block.
2514
2515if (HasComplexMemInst)
2516return false;
2517
2518// Now we have two lists that hold the loads and the stores.
2519// Next, we find the pointers that they use.
2520
2521// Check if we see any stores. If there are no stores, then we don't
2522// care if the pointers are *restrict*.
2523if (!Stores.size()) {
2524LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2525return true;
2526}
2527
2528MemoryDepChecker::DepCandidates DependentAccesses;
2529AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE,
2530LoopAliasScopes);
2531
2532// Holds the analyzed pointers. We don't want to call getUnderlyingObjects
2533// multiple times on the same object. If the ptr is accessed twice, once
2534// for read and once for write, it will only appear once (on the write
2535// list). This is okay, since we are going to check for conflicts between
2536// writes and between reads and writes, but not between reads and reads.
2537SmallSet<std::pair<Value *, Type *>, 16> Seen;
2538
2539// Record uniform store addresses to identify if we have multiple stores
2540// to the same address.
2541SmallPtrSet<Value *, 16> UniformStores;
2542
2543for (StoreInst *ST : Stores) {
2544Value *Ptr = ST->getPointerOperand();
2545
2546if (isInvariant(Ptr)) {
2547// Record store instructions to loop invariant addresses
2548StoresToInvariantAddresses.push_back(ST);
2549HasStoreStoreDependenceInvolvingLoopInvariantAddress |=
2550!UniformStores.insert(Ptr).second;
2551}
2552
2553// If we did *not* see this pointer before, insert it to the read-write
2554// list. At this phase it is only a 'write' list.
2555Type *AccessTy = getLoadStoreType(ST);
2556if (Seen.insert({Ptr, AccessTy}).second) {
2557++NumReadWrites;
2558
2559MemoryLocation Loc = MemoryLocation::get(ST);
2560// The TBAA metadata could have a control dependency on the predication
2561// condition, so we cannot rely on it when determining whether or not we
2562// need runtime pointer checks.
2563if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2564Loc.AATags.TBAA = nullptr;
2565
2566visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2567[&Accesses, AccessTy, Loc](Value *Ptr) {
2568MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2569Accesses.addStore(NewLoc, AccessTy);
2570});
2571}
2572}
2573
2574if (IsAnnotatedParallel) {
2575LLVM_DEBUG(
2576dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2577<< "checks.\n");
2578return true;
2579}
2580
2581for (LoadInst *LD : Loads) {
2582Value *Ptr = LD->getPointerOperand();
2583// If we did *not* see this pointer before, insert it to the
2584// read list. If we *did* see it before, then it is already in
2585// the read-write list. This allows us to vectorize expressions
2586// such as A[i] += x; Because the address of A[i] is a read-write
2587// pointer. This only works if the index of A[i] is consecutive.
2588// If the address of i is unknown (for example A[B[i]]) then we may
2589// read a few words, modify, and write a few words, and some of the
2590// words may be written to the same address.
2591bool IsReadOnlyPtr = false;
2592Type *AccessTy = getLoadStoreType(LD);
2593if (Seen.insert({Ptr, AccessTy}).second ||
2594!getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides).value_or(0)) {
2595++NumReads;
2596IsReadOnlyPtr = true;
2597}
2598
2599// See if there is an unsafe dependency between a load to a uniform address and
2600// store to the same uniform address.
2601if (UniformStores.count(Ptr)) {
2602LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2603"load and uniform store to the same address!\n");
2604HasLoadStoreDependenceInvolvingLoopInvariantAddress = true;
2605}
2606
2607MemoryLocation Loc = MemoryLocation::get(LD);
2608// The TBAA metadata could have a control dependency on the predication
2609// condition, so we cannot rely on it when determining whether or not we
2610// need runtime pointer checks.
2611if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2612Loc.AATags.TBAA = nullptr;
2613
2614visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2615[&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2616MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2617Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2618});
2619}
2620
2621// If we write (or read-write) to a single destination and there are no
2622// other reads in this loop then is it safe to vectorize.
2623if (NumReadWrites == 1 && NumReads == 0) {
2624LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2625return true;
2626}
2627
2628// Build dependence sets and check whether we need a runtime pointer bounds
2629// check.
2630Accesses.buildDependenceSets();
2631
2632// Find pointers with computable bounds. We are going to use this information
2633// to place a runtime bound check.
2634Value *UncomputablePtr = nullptr;
2635bool CanDoRTIfNeeded =
2636Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), TheLoop,
2637SymbolicStrides, UncomputablePtr, false);
2638if (!CanDoRTIfNeeded) {
2639auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2640recordAnalysis("CantIdentifyArrayBounds", I)
2641<< "cannot identify array bounds";
2642LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2643<< "the array bounds.\n");
2644return false;
2645}
2646
2647LLVM_DEBUG(
2648dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2649
2650bool DepsAreSafe = true;
2651if (Accesses.isDependencyCheckNeeded()) {
2652LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2653DepsAreSafe = DepChecker->areDepsSafe(DependentAccesses,
2654Accesses.getDependenciesToCheck(),
2655Accesses.getUnderlyingObjects());
2656
2657if (!DepsAreSafe && DepChecker->shouldRetryWithRuntimeCheck()) {
2658LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2659
2660// Clear the dependency checks. We assume they are not needed.
2661Accesses.resetDepChecks(*DepChecker);
2662
2663PtrRtChecking->reset();
2664PtrRtChecking->Need = true;
2665
2666auto *SE = PSE->getSE();
2667UncomputablePtr = nullptr;
2668CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(
2669*PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr, true);
2670
2671// Check that we found the bounds for the pointer.
2672if (!CanDoRTIfNeeded) {
2673auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2674recordAnalysis("CantCheckMemDepsAtRunTime", I)
2675<< "cannot check memory dependencies at runtime";
2676LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2677return false;
2678}
2679DepsAreSafe = true;
2680}
2681}
2682
2683if (HasConvergentOp) {
2684recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2685<< "cannot add control dependency to convergent operation";
2686LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2687"would be needed with a convergent operation\n");
2688return false;
2689}
2690
2691if (DepsAreSafe) {
2692LLVM_DEBUG(
2693dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2694<< (PtrRtChecking->Need ? "" : " don't")
2695<< " need runtime memory checks.\n");
2696return true;
2697}
2698
2699emitUnsafeDependenceRemark();
2700return false;
2701}
2702
2703void LoopAccessInfo::emitUnsafeDependenceRemark() {
2704const auto *Deps = getDepChecker().getDependences();
2705if (!Deps)
2706return;
2707const auto *Found =
2708llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
2709return MemoryDepChecker::Dependence::isSafeForVectorization(D.Type) !=
2710MemoryDepChecker::VectorizationSafetyStatus::Safe;
2711});
2712if (Found == Deps->end())
2713return;
2714MemoryDepChecker::Dependence Dep = *Found;
2715
2716LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2717
2718// Emit remark for first unsafe dependence
2719bool HasForcedDistribution = false;
2720std::optional<const MDOperand *> Value =
2721findStringMetadataForLoop(TheLoop, "llvm.loop.distribute.enable");
2722if (Value) {
2723const MDOperand *Op = *Value;
2724assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
2725HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2726}
2727
2728const std::string Info =
2729HasForcedDistribution
2730? "unsafe dependent memory operations in loop."
2731: "unsafe dependent memory operations in loop. Use "
2732"#pragma clang loop distribute(enable) to allow loop distribution "
2733"to attempt to isolate the offending operations into a separate "
2734"loop";
2735OptimizationRemarkAnalysis &R =
2736recordAnalysis("UnsafeDep", Dep.getDestination(getDepChecker())) << Info;
2737
2738switch (Dep.Type) {
2739case MemoryDepChecker::Dependence::NoDep:
2740case MemoryDepChecker::Dependence::Forward:
2741case MemoryDepChecker::Dependence::BackwardVectorizable:
2742llvm_unreachable("Unexpected dependence");
2743case MemoryDepChecker::Dependence::Backward:
2744R << "\nBackward loop carried data dependence.";
2745break;
2746case MemoryDepChecker::Dependence::ForwardButPreventsForwarding:
2747R << "\nForward loop carried data dependence that prevents "
2748"store-to-load forwarding.";
2749break;
2750case MemoryDepChecker::Dependence::BackwardVectorizableButPreventsForwarding:
2751R << "\nBackward loop carried data dependence that prevents "
2752"store-to-load forwarding.";
2753break;
2754case MemoryDepChecker::Dependence::IndirectUnsafe:
2755R << "\nUnsafe indirect dependence.";
2756break;
2757case MemoryDepChecker::Dependence::Unknown:
2758R << "\nUnknown data dependence.";
2759break;
2760}
2761
2762if (Instruction *I = Dep.getSource(getDepChecker())) {
2763DebugLoc SourceLoc = I->getDebugLoc();
2764if (auto *DD = dyn_cast_or_null<Instruction>(getPointerOperand(I)))
2765SourceLoc = DD->getDebugLoc();
2766if (SourceLoc)
2767R << " Memory location is the same as accessed at "
2768<< ore::NV("Location", SourceLoc);
2769}
2770}
2771
2772bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
2773DominatorTree *DT) {
2774assert(TheLoop->contains(BB) && "Unknown block used");
2775
2776// Blocks that do not dominate the latch need predication.
2777BasicBlock* Latch = TheLoop->getLoopLatch();
2778return !DT->dominates(BB, Latch);
2779}
2780
2781OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName,
2782Instruction *I) {
2783assert(!Report && "Multiple reports generated");
2784
2785Value *CodeRegion = TheLoop->getHeader();
2786DebugLoc DL = TheLoop->getStartLoc();
2787
2788if (I) {
2789CodeRegion = I->getParent();
2790// If there is no debug location attached to the instruction, revert back to
2791// using the loop's.
2792if (I->getDebugLoc())
2793DL = I->getDebugLoc();
2794}
2795
2796Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2797CodeRegion);
2798return *Report;
2799}
2800
2801bool LoopAccessInfo::isInvariant(Value *V) const {
2802auto *SE = PSE->getSE();
2803// TODO: Is this really what we want? Even without FP SCEV, we may want some
2804// trivially loop-invariant FP values to be considered invariant.
2805if (!SE->isSCEVable(V->getType()))
2806return false;
2807const SCEV *S = SE->getSCEV(V);
2808return SE->isLoopInvariant(S, TheLoop);
2809}
2810
2811/// Find the operand of the GEP that should be checked for consecutive
2812/// stores. This ignores trailing indices that have no effect on the final
2813/// pointer.
2814static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep) {
2815const DataLayout &DL = Gep->getDataLayout();
2816unsigned LastOperand = Gep->getNumOperands() - 1;
2817TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
2818
2819// Walk backwards and try to peel off zeros.
2820while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
2821// Find the type we're currently indexing into.
2822gep_type_iterator GEPTI = gep_type_begin(Gep);
2823std::advance(GEPTI, LastOperand - 2);
2824
2825// If it's a type with the same allocation size as the result of the GEP we
2826// can peel off the zero index.
2827TypeSize ElemSize = GEPTI.isStruct()
2828? DL.getTypeAllocSize(GEPTI.getIndexedType())
2829: GEPTI.getSequentialElementStride(DL);
2830if (ElemSize != GEPAllocSize)
2831break;
2832--LastOperand;
2833}
2834
2835return LastOperand;
2836}
2837
2838/// If the argument is a GEP, then returns the operand identified by
2839/// getGEPInductionOperand. However, if there is some other non-loop-invariant
2840/// operand, it returns that instead.
2841static Value *stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
2842GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
2843if (!GEP)
2844return Ptr;
2845
2846unsigned InductionOperand = getGEPInductionOperand(GEP);
2847
2848// Check that all of the gep indices are uniform except for our induction
2849// operand.
2850for (unsigned I = 0, E = GEP->getNumOperands(); I != E; ++I)
2851if (I != InductionOperand &&
2852!SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(I)), Lp))
2853return Ptr;
2854return GEP->getOperand(InductionOperand);
2855}
2856
2857/// Get the stride of a pointer access in a loop. Looks for symbolic
2858/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
2859static const SCEV *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
2860auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2861if (!PtrTy || PtrTy->isAggregateType())
2862return nullptr;
2863
2864// Try to remove a gep instruction to make the pointer (actually index at this
2865// point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
2866// pointer, otherwise, we are analyzing the index.
2867Value *OrigPtr = Ptr;
2868
2869// The size of the pointer access.
2870int64_t PtrAccessSize = 1;
2871
2872Ptr = stripGetElementPtr(Ptr, SE, Lp);
2873const SCEV *V = SE->getSCEV(Ptr);
2874
2875if (Ptr != OrigPtr)
2876// Strip off casts.
2877while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V))
2878V = C->getOperand();
2879
2880const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
2881if (!S)
2882return nullptr;
2883
2884// If the pointer is invariant then there is no stride and it makes no
2885// sense to add it here.
2886if (Lp != S->getLoop())
2887return nullptr;
2888
2889V = S->getStepRecurrence(*SE);
2890if (!V)
2891return nullptr;
2892
2893// Strip off the size of access multiplication if we are still analyzing the
2894// pointer.
2895if (OrigPtr == Ptr) {
2896if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
2897if (M->getOperand(0)->getSCEVType() != scConstant)
2898return nullptr;
2899
2900const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
2901
2902// Huge step value - give up.
2903if (APStepVal.getBitWidth() > 64)
2904return nullptr;
2905
2906int64_t StepVal = APStepVal.getSExtValue();
2907if (PtrAccessSize != StepVal)
2908return nullptr;
2909V = M->getOperand(1);
2910}
2911}
2912
2913// Note that the restriction after this loop invariant check are only
2914// profitability restrictions.
2915if (!SE->isLoopInvariant(V, Lp))
2916return nullptr;
2917
2918// Look for the loop invariant symbolic value.
2919if (isa<SCEVUnknown>(V))
2920return V;
2921
2922if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(V))
2923if (isa<SCEVUnknown>(C->getOperand()))
2924return V;
2925
2926return nullptr;
2927}
2928
2929void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2930Value *Ptr = getLoadStorePointerOperand(MemAccess);
2931if (!Ptr)
2932return;
2933
2934// Note: getStrideFromPointer is a *profitability* heuristic. We
2935// could broaden the scope of values returned here - to anything
2936// which happens to be loop invariant and contributes to the
2937// computation of an interesting IV - but we chose not to as we
2938// don't have a cost model here, and broadening the scope exposes
2939// far too many unprofitable cases.
2940const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2941if (!StrideExpr)
2942return;
2943
2944LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2945"versioning:");
2946LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");
2947
2948if (!SpeculateUnitStride) {
2949LLVM_DEBUG(dbgs() << " Chose not to due to -laa-speculate-unit-stride\n");
2950return;
2951}
2952
2953// Avoid adding the "Stride == 1" predicate when we know that
2954// Stride >= Trip-Count. Such a predicate will effectively optimize a single
2955// or zero iteration loop, as Trip-Count <= Stride == 1.
2956//
2957// TODO: We are currently not making a very informed decision on when it is
2958// beneficial to apply stride versioning. It might make more sense that the
2959// users of this analysis (such as the vectorizer) will trigger it, based on
2960// their specific cost considerations; For example, in cases where stride
2961// versioning does not help resolving memory accesses/dependences, the
2962// vectorizer should evaluate the cost of the runtime test, and the benefit
2963// of various possible stride specializations, considering the alternatives
2964// of using gather/scatters (if available).
2965
2966const SCEV *MaxBTC = PSE->getSymbolicMaxBackedgeTakenCount();
2967
2968// Match the types so we can compare the stride and the MaxBTC.
2969// The Stride can be positive/negative, so we sign extend Stride;
2970// The backedgeTakenCount is non-negative, so we zero extend MaxBTC.
2971const DataLayout &DL = TheLoop->getHeader()->getDataLayout();
2972uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
2973uint64_t BETypeSizeBits = DL.getTypeSizeInBits(MaxBTC->getType());
2974const SCEV *CastedStride = StrideExpr;
2975const SCEV *CastedBECount = MaxBTC;
2976ScalarEvolution *SE = PSE->getSE();
2977if (BETypeSizeBits >= StrideTypeSizeBits)
2978CastedStride = SE->getNoopOrSignExtend(StrideExpr, MaxBTC->getType());
2979else
2980CastedBECount = SE->getZeroExtendExpr(MaxBTC, StrideExpr->getType());
2981const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
2982// Since TripCount == BackEdgeTakenCount + 1, checking:
2983// "Stride >= TripCount" is equivalent to checking:
2984// Stride - MaxBTC> 0
2985if (SE->isKnownPositive(StrideMinusBETaken)) {
2986LLVM_DEBUG(
2987dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
2988"Stride==1 predicate will imply that the loop executes "
2989"at most once.\n");
2990return;
2991}
2992LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
2993
2994// Strip back off the integer cast, and check that our result is a
2995// SCEVUnknown as we expect.
2996const SCEV *StrideBase = StrideExpr;
2997if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
2998StrideBase = C->getOperand();
2999SymbolicStrides[Ptr] = cast<SCEVUnknown>(StrideBase);
3000}
3001
3002LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
3003const TargetTransformInfo *TTI,
3004const TargetLibraryInfo *TLI, AAResults *AA,
3005DominatorTree *DT, LoopInfo *LI)
3006: PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
3007PtrRtChecking(nullptr), TheLoop(L) {
3008unsigned MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3009if (TTI) {
3010TypeSize FixedWidth =
3011TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector);
3012if (FixedWidth.isNonZero()) {
3013// Scale the vector width by 2 as rough estimate to also consider
3014// interleaving.
3015MaxTargetVectorWidthInBits = FixedWidth.getFixedValue() * 2;
3016}
3017
3018TypeSize ScalableWidth =
3019TTI->getRegisterBitWidth(TargetTransformInfo::RGK_ScalableVector);
3020if (ScalableWidth.isNonZero())
3021MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3022}
3023DepChecker = std::make_unique<MemoryDepChecker>(*PSE, L, SymbolicStrides,
3024MaxTargetVectorWidthInBits);
3025PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
3026if (canAnalyzeLoop())
3027CanVecMem = analyzeLoop(AA, LI, TLI, DT);
3028}
3029
3030void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
3031if (CanVecMem) {
3032OS.indent(Depth) << "Memory dependences are safe";
3033const MemoryDepChecker &DC = getDepChecker();
3034if (!DC.isSafeForAnyVectorWidth())
3035OS << " with a maximum safe vector width of "
3036<< DC.getMaxSafeVectorWidthInBits() << " bits";
3037if (PtrRtChecking->Need)
3038OS << " with run-time checks";
3039OS << "\n";
3040}
3041
3042if (HasConvergentOp)
3043OS.indent(Depth) << "Has convergent operation in loop\n";
3044
3045if (Report)
3046OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
3047
3048if (auto *Dependences = DepChecker->getDependences()) {
3049OS.indent(Depth) << "Dependences:\n";
3050for (const auto &Dep : *Dependences) {
3051Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
3052OS << "\n";
3053}
3054} else
3055OS.indent(Depth) << "Too many dependences, not recorded\n";
3056
3057// List the pair of accesses need run-time checks to prove independence.
3058PtrRtChecking->print(OS, Depth);
3059OS << "\n";
3060
3061OS.indent(Depth)
3062<< "Non vectorizable stores to invariant address were "
3063<< (HasStoreStoreDependenceInvolvingLoopInvariantAddress ||
3064HasLoadStoreDependenceInvolvingLoopInvariantAddress
3065? ""
3066: "not ")
3067<< "found in loop.\n";
3068
3069OS.indent(Depth) << "SCEV assumptions:\n";
3070PSE->getPredicate().print(OS, Depth);
3071
3072OS << "\n";
3073
3074OS.indent(Depth) << "Expressions re-written:\n";
3075PSE->print(OS, Depth);
3076}
3077
3078const LoopAccessInfo &LoopAccessInfoManager::getInfo(Loop &L) {
3079auto [It, Inserted] = LoopAccessInfoMap.insert({&L, nullptr});
3080
3081if (Inserted)
3082It->second =
3083std::make_unique<LoopAccessInfo>(&L, &SE, TTI, TLI, &AA, &DT, &LI);
3084
3085return *It->second;
3086}
3087void LoopAccessInfoManager::clear() {
3088SmallVector<Loop *> ToRemove;
3089// Collect LoopAccessInfo entries that may keep references to IR outside the
3090// analyzed loop or SCEVs that may have been modified or invalidated. At the
3091// moment, that is loops requiring memory or SCEV runtime checks, as those cache
3092// SCEVs, e.g. for pointer expressions.
3093for (const auto &[L, LAI] : LoopAccessInfoMap) {
3094if (LAI->getRuntimePointerChecking()->getChecks().empty() &&
3095LAI->getPSE().getPredicate().isAlwaysTrue())
3096continue;
3097ToRemove.push_back(L);
3098}
3099
3100for (Loop *L : ToRemove)
3101LoopAccessInfoMap.erase(L);
3102}
3103
3104bool LoopAccessInfoManager::invalidate(
3105Function &F, const PreservedAnalyses &PA,
3106FunctionAnalysisManager::Invalidator &Inv) {
3107// Check whether our analysis is preserved.
3108auto PAC = PA.getChecker<LoopAccessAnalysis>();
3109if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
3110// If not, give up now.
3111return true;
3112
3113// Check whether the analyses we depend on became invalid for any reason.
3114// Skip checking TargetLibraryAnalysis as it is immutable and can't become
3115// invalid.
3116return Inv.invalidate<AAManager>(F, PA) ||
3117Inv.invalidate<ScalarEvolutionAnalysis>(F, PA) ||
3118Inv.invalidate<LoopAnalysis>(F, PA) ||
3119Inv.invalidate<DominatorTreeAnalysis>(F, PA);
3120}
3121
3122LoopAccessInfoManager LoopAccessAnalysis::run(Function &F,
3123FunctionAnalysisManager &FAM) {
3124auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
3125auto &AA = FAM.getResult<AAManager>(F);
3126auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
3127auto &LI = FAM.getResult<LoopAnalysis>(F);
3128auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
3129auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
3130return LoopAccessInfoManager(SE, AA, DT, LI, &TTI, &TLI);
3131}
3132
3133AnalysisKey LoopAccessAnalysis::Key;
3134