llvm-project

Форк
0
/
LoopAccessAnalysis.cpp 
3133 строки · 121.2 Кб
1
//===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// The implementation for the loop memory dependence that was originally
10
// developed for the loop vectorizer.
11
//
12
//===----------------------------------------------------------------------===//
13

14
#include "llvm/Analysis/LoopAccessAnalysis.h"
15
#include "llvm/ADT/APInt.h"
16
#include "llvm/ADT/DenseMap.h"
17
#include "llvm/ADT/EquivalenceClasses.h"
18
#include "llvm/ADT/PointerIntPair.h"
19
#include "llvm/ADT/STLExtras.h"
20
#include "llvm/ADT/SetVector.h"
21
#include "llvm/ADT/SmallPtrSet.h"
22
#include "llvm/ADT/SmallSet.h"
23
#include "llvm/ADT/SmallVector.h"
24
#include "llvm/Analysis/AliasAnalysis.h"
25
#include "llvm/Analysis/AliasSetTracker.h"
26
#include "llvm/Analysis/LoopAnalysisManager.h"
27
#include "llvm/Analysis/LoopInfo.h"
28
#include "llvm/Analysis/LoopIterator.h"
29
#include "llvm/Analysis/MemoryLocation.h"
30
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
31
#include "llvm/Analysis/ScalarEvolution.h"
32
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
33
#include "llvm/Analysis/TargetLibraryInfo.h"
34
#include "llvm/Analysis/TargetTransformInfo.h"
35
#include "llvm/Analysis/ValueTracking.h"
36
#include "llvm/Analysis/VectorUtils.h"
37
#include "llvm/IR/BasicBlock.h"
38
#include "llvm/IR/Constants.h"
39
#include "llvm/IR/DataLayout.h"
40
#include "llvm/IR/DebugLoc.h"
41
#include "llvm/IR/DerivedTypes.h"
42
#include "llvm/IR/DiagnosticInfo.h"
43
#include "llvm/IR/Dominators.h"
44
#include "llvm/IR/Function.h"
45
#include "llvm/IR/GetElementPtrTypeIterator.h"
46
#include "llvm/IR/InstrTypes.h"
47
#include "llvm/IR/Instruction.h"
48
#include "llvm/IR/Instructions.h"
49
#include "llvm/IR/Operator.h"
50
#include "llvm/IR/PassManager.h"
51
#include "llvm/IR/PatternMatch.h"
52
#include "llvm/IR/Type.h"
53
#include "llvm/IR/Value.h"
54
#include "llvm/IR/ValueHandle.h"
55
#include "llvm/Support/Casting.h"
56
#include "llvm/Support/CommandLine.h"
57
#include "llvm/Support/Debug.h"
58
#include "llvm/Support/ErrorHandling.h"
59
#include "llvm/Support/raw_ostream.h"
60
#include <algorithm>
61
#include <cassert>
62
#include <cstdint>
63
#include <iterator>
64
#include <utility>
65
#include <variant>
66
#include <vector>
67

68
using namespace llvm;
69
using namespace llvm::PatternMatch;
70

71
#define DEBUG_TYPE "loop-accesses"
72

73
static cl::opt<unsigned, true>
74
VectorizationFactor("force-vector-width", cl::Hidden,
75
                    cl::desc("Sets the SIMD width. Zero is autoselect."),
76
                    cl::location(VectorizerParams::VectorizationFactor));
77
unsigned VectorizerParams::VectorizationFactor;
78

79
static cl::opt<unsigned, true>
80
VectorizationInterleave("force-vector-interleave", cl::Hidden,
81
                        cl::desc("Sets the vectorization interleave count. "
82
                                 "Zero is autoselect."),
83
                        cl::location(
84
                            VectorizerParams::VectorizationInterleave));
85
unsigned VectorizerParams::VectorizationInterleave;
86

87
static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold(
88
    "runtime-memory-check-threshold", cl::Hidden,
89
    cl::desc("When performing memory disambiguation checks at runtime do not "
90
             "generate more than this number of comparisons (default = 8)."),
91
    cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8));
92
unsigned VectorizerParams::RuntimeMemoryCheckThreshold;
93

94
/// The maximum iterations used to merge memory checks
95
static cl::opt<unsigned> MemoryCheckMergeThreshold(
96
    "memory-check-merge-threshold", cl::Hidden,
97
    cl::desc("Maximum number of comparisons done when trying to merge "
98
             "runtime memory checks. (default = 100)"),
99
    cl::init(100));
100

101
/// Maximum SIMD width.
102
const unsigned VectorizerParams::MaxVectorWidth = 64;
103

104
/// We collect dependences up to this threshold.
105
static cl::opt<unsigned>
106
    MaxDependences("max-dependences", cl::Hidden,
107
                   cl::desc("Maximum number of dependences collected by "
108
                            "loop-access analysis (default = 100)"),
109
                   cl::init(100));
110

111
/// This enables versioning on the strides of symbolically striding memory
112
/// accesses in code like the following.
113
///   for (i = 0; i < N; ++i)
114
///     A[i * Stride1] += B[i * Stride2] ...
115
///
116
/// Will be roughly translated to
117
///    if (Stride1 == 1 && Stride2 == 1) {
118
///      for (i = 0; i < N; i+=4)
119
///       A[i:i+3] += ...
120
///    } else
121
///      ...
122
static cl::opt<bool> EnableMemAccessVersioning(
123
    "enable-mem-access-versioning", cl::init(true), cl::Hidden,
124
    cl::desc("Enable symbolic stride memory access versioning"));
125

126
/// Enable store-to-load forwarding conflict detection. This option can
127
/// be disabled for correctness testing.
128
static cl::opt<bool> EnableForwardingConflictDetection(
129
    "store-to-load-forwarding-conflict-detection", cl::Hidden,
130
    cl::desc("Enable conflict detection in loop-access analysis"),
131
    cl::init(true));
132

133
static cl::opt<unsigned> MaxForkedSCEVDepth(
134
    "max-forked-scev-depth", cl::Hidden,
135
    cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),
136
    cl::init(5));
137

138
static cl::opt<bool> SpeculateUnitStride(
139
    "laa-speculate-unit-stride", cl::Hidden,
140
    cl::desc("Speculate that non-constant strides are unit in LAA"),
141
    cl::init(true));
142

143
static cl::opt<bool, true> HoistRuntimeChecks(
144
    "hoist-runtime-checks", cl::Hidden,
145
    cl::desc(
146
        "Hoist inner loop runtime memory checks to outer loop if possible"),
147
    cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(true));
148
bool VectorizerParams::HoistRuntimeChecks;
149

150
bool VectorizerParams::isInterleaveForced() {
151
  return ::VectorizationInterleave.getNumOccurrences() > 0;
152
}
153

154
const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
155
                                            const DenseMap<Value *, const SCEV *> &PtrToStride,
156
                                            Value *Ptr) {
157
  const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
158

159
  // If there is an entry in the map return the SCEV of the pointer with the
160
  // symbolic stride replaced by one.
161
  DenseMap<Value *, const SCEV *>::const_iterator SI = PtrToStride.find(Ptr);
162
  if (SI == PtrToStride.end())
163
    // For a non-symbolic stride, just return the original expression.
164
    return OrigSCEV;
165

166
  const SCEV *StrideSCEV = SI->second;
167
  // Note: This assert is both overly strong and overly weak.  The actual
168
  // invariant here is that StrideSCEV should be loop invariant.  The only
169
  // such invariant strides we happen to speculate right now are unknowns
170
  // and thus this is a reasonable proxy of the actual invariant.
171
  assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
172

173
  ScalarEvolution *SE = PSE.getSE();
174
  const auto *CT = SE->getOne(StrideSCEV->getType());
175
  PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
176
  auto *Expr = PSE.getSCEV(Ptr);
177

178
  LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
179
	     << " by: " << *Expr << "\n");
180
  return Expr;
181
}
182

183
RuntimeCheckingPtrGroup::RuntimeCheckingPtrGroup(
184
    unsigned Index, RuntimePointerChecking &RtCheck)
185
    : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
186
      AddressSpace(RtCheck.Pointers[Index]
187
                       .PointerValue->getType()
188
                       ->getPointerAddressSpace()),
189
      NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
190
  Members.push_back(Index);
191
}
192

193
/// Calculate Start and End points of memory access.
194
/// Let's assume A is the first access and B is a memory access on N-th loop
195
/// iteration. Then B is calculated as:
196
///   B = A + Step*N .
197
/// Step value may be positive or negative.
198
/// N is a calculated back-edge taken count:
199
///     N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
200
/// Start and End points are calculated in the following way:
201
/// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
202
/// where SizeOfElt is the size of single memory access in bytes.
203
///
204
/// There is no conflict when the intervals are disjoint:
205
/// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
206
static std::pair<const SCEV *, const SCEV *> getStartAndEndForAccess(
207
    const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy,
208
    PredicatedScalarEvolution &PSE,
209
    DenseMap<const SCEV *, std::pair<const SCEV *, const SCEV *>>
210
        &PointerBounds) {
211
  ScalarEvolution *SE = PSE.getSE();
212

213
  auto [Iter, Ins] = PointerBounds.insert(
214
      {PtrExpr, {SE->getCouldNotCompute(), SE->getCouldNotCompute()}});
215
  if (!Ins)
216
    return Iter->second;
217

218
  const SCEV *ScStart;
219
  const SCEV *ScEnd;
220

221
  if (SE->isLoopInvariant(PtrExpr, Lp)) {
222
    ScStart = ScEnd = PtrExpr;
223
  } else if (auto *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr)) {
224
    const SCEV *Ex = PSE.getSymbolicMaxBackedgeTakenCount();
225

226
    ScStart = AR->getStart();
227
    ScEnd = AR->evaluateAtIteration(Ex, *SE);
228
    const SCEV *Step = AR->getStepRecurrence(*SE);
229

230
    // For expressions with negative step, the upper bound is ScStart and the
231
    // lower bound is ScEnd.
232
    if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
233
      if (CStep->getValue()->isNegative())
234
        std::swap(ScStart, ScEnd);
235
    } else {
236
      // Fallback case: the step is not constant, but we can still
237
      // get the upper and lower bounds of the interval by using min/max
238
      // expressions.
239
      ScStart = SE->getUMinExpr(ScStart, ScEnd);
240
      ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
241
    }
242
  } else
243
    return {SE->getCouldNotCompute(), SE->getCouldNotCompute()};
244

245
  assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
246
  assert(SE->isLoopInvariant(ScEnd, Lp)&& "ScEnd needs to be invariant");
247

248
  // Add the size of the pointed element to ScEnd.
249
  auto &DL = Lp->getHeader()->getDataLayout();
250
  Type *IdxTy = DL.getIndexType(PtrExpr->getType());
251
  const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
252
  ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
253

254
  Iter->second = {ScStart, ScEnd};
255
  return Iter->second;
256
}
257

258
/// Calculate Start and End points of memory access using
259
/// getStartAndEndForAccess.
260
void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr,
261
                                    Type *AccessTy, bool WritePtr,
262
                                    unsigned DepSetId, unsigned ASId,
263
                                    PredicatedScalarEvolution &PSE,
264
                                    bool NeedsFreeze) {
265
  const auto &[ScStart, ScEnd] = getStartAndEndForAccess(
266
      Lp, PtrExpr, AccessTy, PSE, DC.getPointerBounds());
267
  assert(!isa<SCEVCouldNotCompute>(ScStart) &&
268
         !isa<SCEVCouldNotCompute>(ScEnd) &&
269
         "must be able to compute both start and end expressions");
270
  Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
271
                        NeedsFreeze);
272
}
273

274
bool RuntimePointerChecking::tryToCreateDiffCheck(
275
    const RuntimeCheckingPtrGroup &CGI, const RuntimeCheckingPtrGroup &CGJ) {
276
  // If either group contains multiple different pointers, bail out.
277
  // TODO: Support multiple pointers by using the minimum or maximum pointer,
278
  // depending on src & sink.
279
  if (CGI.Members.size() != 1 || CGJ.Members.size() != 1)
280
    return false;
281

282
  PointerInfo *Src = &Pointers[CGI.Members[0]];
283
  PointerInfo *Sink = &Pointers[CGJ.Members[0]];
284

285
  // If either pointer is read and written, multiple checks may be needed. Bail
286
  // out.
287
  if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
288
      !DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty())
289
    return false;
290

291
  ArrayRef<unsigned> AccSrc =
292
      DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
293
  ArrayRef<unsigned> AccSink =
294
      DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
295
  // If either pointer is accessed multiple times, there may not be a clear
296
  // src/sink relation. Bail out for now.
297
  if (AccSrc.size() != 1 || AccSink.size() != 1)
298
    return false;
299

300
  // If the sink is accessed before src, swap src/sink.
301
  if (AccSink[0] < AccSrc[0])
302
    std::swap(Src, Sink);
303

304
  auto *SrcAR = dyn_cast<SCEVAddRecExpr>(Src->Expr);
305
  auto *SinkAR = dyn_cast<SCEVAddRecExpr>(Sink->Expr);
306
  if (!SrcAR || !SinkAR || SrcAR->getLoop() != DC.getInnermostLoop() ||
307
      SinkAR->getLoop() != DC.getInnermostLoop())
308
    return false;
309

310
  SmallVector<Instruction *, 4> SrcInsts =
311
      DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
312
  SmallVector<Instruction *, 4> SinkInsts =
313
      DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
314
  Type *SrcTy = getLoadStoreType(SrcInsts[0]);
315
  Type *DstTy = getLoadStoreType(SinkInsts[0]);
316
  if (isa<ScalableVectorType>(SrcTy) || isa<ScalableVectorType>(DstTy))
317
    return false;
318

319
  const DataLayout &DL =
320
      SinkAR->getLoop()->getHeader()->getDataLayout();
321
  unsigned AllocSize =
322
      std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));
323

324
  // Only matching constant steps matching the AllocSize are supported at the
325
  // moment. This simplifies the difference computation. Can be extended in the
326
  // future.
327
  auto *Step = dyn_cast<SCEVConstant>(SinkAR->getStepRecurrence(*SE));
328
  if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||
329
      Step->getAPInt().abs() != AllocSize)
330
    return false;
331

332
  IntegerType *IntTy =
333
      IntegerType::get(Src->PointerValue->getContext(),
334
                       DL.getPointerSizeInBits(CGI.AddressSpace));
335

336
  // When counting down, the dependence distance needs to be swapped.
337
  if (Step->getValue()->isNegative())
338
    std::swap(SinkAR, SrcAR);
339

340
  const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkAR->getStart(), IntTy);
341
  const SCEV *SrcStartInt = SE->getPtrToIntExpr(SrcAR->getStart(), IntTy);
342
  if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
343
      isa<SCEVCouldNotCompute>(SrcStartInt))
344
    return false;
345

346
  const Loop *InnerLoop = SrcAR->getLoop();
347
  // If the start values for both Src and Sink also vary according to an outer
348
  // loop, then it's probably better to avoid creating diff checks because
349
  // they may not be hoisted. We should instead let llvm::addRuntimeChecks
350
  // do the expanded full range overlap checks, which can be hoisted.
351
  if (HoistRuntimeChecks && InnerLoop->getParentLoop() &&
352
      isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
353
    auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
354
    auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
355
    const Loop *StartARLoop = SrcStartAR->getLoop();
356
    if (StartARLoop == SinkStartAR->getLoop() &&
357
        StartARLoop == InnerLoop->getParentLoop() &&
358
        // If the diff check would already be loop invariant (due to the
359
        // recurrences being the same), then we prefer to keep the diff checks
360
        // because they are cheaper.
361
        SrcStartAR->getStepRecurrence(*SE) !=
362
            SinkStartAR->getStepRecurrence(*SE)) {
363
      LLVM_DEBUG(dbgs() << "LAA: Not creating diff runtime check, since these "
364
                           "cannot be hoisted out of the outer loop\n");
365
      return false;
366
    }
367
  }
368

369
  LLVM_DEBUG(dbgs() << "LAA: Creating diff runtime check for:\n"
370
                    << "SrcStart: " << *SrcStartInt << '\n'
371
                    << "SinkStartInt: " << *SinkStartInt << '\n');
372
  DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
373
                          Src->NeedsFreeze || Sink->NeedsFreeze);
374
  return true;
375
}
376

377
SmallVector<RuntimePointerCheck, 4> RuntimePointerChecking::generateChecks() {
378
  SmallVector<RuntimePointerCheck, 4> Checks;
379

380
  for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
381
    for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
382
      const RuntimeCheckingPtrGroup &CGI = CheckingGroups[I];
383
      const RuntimeCheckingPtrGroup &CGJ = CheckingGroups[J];
384

385
      if (needsChecking(CGI, CGJ)) {
386
        CanUseDiffCheck = CanUseDiffCheck && tryToCreateDiffCheck(CGI, CGJ);
387
        Checks.push_back(std::make_pair(&CGI, &CGJ));
388
      }
389
    }
390
  }
391
  return Checks;
392
}
393

394
void RuntimePointerChecking::generateChecks(
395
    MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
396
  assert(Checks.empty() && "Checks is not empty");
397
  groupChecks(DepCands, UseDependencies);
398
  Checks = generateChecks();
399
}
400

401
bool RuntimePointerChecking::needsChecking(
402
    const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
403
  for (const auto &I : M.Members)
404
    for (const auto &J : N.Members)
405
      if (needsChecking(I, J))
406
        return true;
407
  return false;
408
}
409

410
/// Compare \p I and \p J and return the minimum.
411
/// Return nullptr in case we couldn't find an answer.
412
static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
413
                                   ScalarEvolution *SE) {
414
  const SCEV *Diff = SE->getMinusSCEV(J, I);
415
  const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
416

417
  if (!C)
418
    return nullptr;
419
  return C->getValue()->isNegative() ? J : I;
420
}
421

422
bool RuntimeCheckingPtrGroup::addPointer(unsigned Index,
423
                                         RuntimePointerChecking &RtCheck) {
424
  return addPointer(
425
      Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
426
      RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
427
      RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);
428
}
429

430
bool RuntimeCheckingPtrGroup::addPointer(unsigned Index, const SCEV *Start,
431
                                         const SCEV *End, unsigned AS,
432
                                         bool NeedsFreeze,
433
                                         ScalarEvolution &SE) {
434
  assert(AddressSpace == AS &&
435
         "all pointers in a checking group must be in the same address space");
436

437
  // Compare the starts and ends with the known minimum and maximum
438
  // of this set. We need to know how we compare against the min/max
439
  // of the set in order to be able to emit memchecks.
440
  const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
441
  if (!Min0)
442
    return false;
443

444
  const SCEV *Min1 = getMinFromExprs(End, High, &SE);
445
  if (!Min1)
446
    return false;
447

448
  // Update the low bound  expression if we've found a new min value.
449
  if (Min0 == Start)
450
    Low = Start;
451

452
  // Update the high bound expression if we've found a new max value.
453
  if (Min1 != End)
454
    High = End;
455

456
  Members.push_back(Index);
457
  this->NeedsFreeze |= NeedsFreeze;
458
  return true;
459
}
460

461
void RuntimePointerChecking::groupChecks(
462
    MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
463
  // We build the groups from dependency candidates equivalence classes
464
  // because:
465
  //    - We know that pointers in the same equivalence class share
466
  //      the same underlying object and therefore there is a chance
467
  //      that we can compare pointers
468
  //    - We wouldn't be able to merge two pointers for which we need
469
  //      to emit a memcheck. The classes in DepCands are already
470
  //      conveniently built such that no two pointers in the same
471
  //      class need checking against each other.
472

473
  // We use the following (greedy) algorithm to construct the groups
474
  // For every pointer in the equivalence class:
475
  //   For each existing group:
476
  //   - if the difference between this pointer and the min/max bounds
477
  //     of the group is a constant, then make the pointer part of the
478
  //     group and update the min/max bounds of that group as required.
479

480
  CheckingGroups.clear();
481

482
  // If we need to check two pointers to the same underlying object
483
  // with a non-constant difference, we shouldn't perform any pointer
484
  // grouping with those pointers. This is because we can easily get
485
  // into cases where the resulting check would return false, even when
486
  // the accesses are safe.
487
  //
488
  // The following example shows this:
489
  // for (i = 0; i < 1000; ++i)
490
  //   a[5000 + i * m] = a[i] + a[i + 9000]
491
  //
492
  // Here grouping gives a check of (5000, 5000 + 1000 * m) against
493
  // (0, 10000) which is always false. However, if m is 1, there is no
494
  // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
495
  // us to perform an accurate check in this case.
496
  //
497
  // The above case requires that we have an UnknownDependence between
498
  // accesses to the same underlying object. This cannot happen unless
499
  // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
500
  // is also false. In this case we will use the fallback path and create
501
  // separate checking groups for all pointers.
502

503
  // If we don't have the dependency partitions, construct a new
504
  // checking pointer group for each pointer. This is also required
505
  // for correctness, because in this case we can have checking between
506
  // pointers to the same underlying object.
507
  if (!UseDependencies) {
508
    for (unsigned I = 0; I < Pointers.size(); ++I)
509
      CheckingGroups.push_back(RuntimeCheckingPtrGroup(I, *this));
510
    return;
511
  }
512

513
  unsigned TotalComparisons = 0;
514

515
  DenseMap<Value *, SmallVector<unsigned>> PositionMap;
516
  for (unsigned Index = 0; Index < Pointers.size(); ++Index) {
517
    auto [It, _] = PositionMap.insert({Pointers[Index].PointerValue, {}});
518
    It->second.push_back(Index);
519
  }
520

521
  // We need to keep track of what pointers we've already seen so we
522
  // don't process them twice.
523
  SmallSet<unsigned, 2> Seen;
524

525
  // Go through all equivalence classes, get the "pointer check groups"
526
  // and add them to the overall solution. We use the order in which accesses
527
  // appear in 'Pointers' to enforce determinism.
528
  for (unsigned I = 0; I < Pointers.size(); ++I) {
529
    // We've seen this pointer before, and therefore already processed
530
    // its equivalence class.
531
    if (Seen.count(I))
532
      continue;
533

534
    MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
535
                                           Pointers[I].IsWritePtr);
536

537
    SmallVector<RuntimeCheckingPtrGroup, 2> Groups;
538
    auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
539

540
    // Because DepCands is constructed by visiting accesses in the order in
541
    // which they appear in alias sets (which is deterministic) and the
542
    // iteration order within an equivalence class member is only dependent on
543
    // the order in which unions and insertions are performed on the
544
    // equivalence class, the iteration order is deterministic.
545
    for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
546
         MI != ME; ++MI) {
547
      auto PointerI = PositionMap.find(MI->getPointer());
548
      assert(PointerI != PositionMap.end() &&
549
             "pointer in equivalence class not found in PositionMap");
550
      for (unsigned Pointer : PointerI->second) {
551
        bool Merged = false;
552
        // Mark this pointer as seen.
553
        Seen.insert(Pointer);
554

555
        // Go through all the existing sets and see if we can find one
556
        // which can include this pointer.
557
        for (RuntimeCheckingPtrGroup &Group : Groups) {
558
          // Don't perform more than a certain amount of comparisons.
559
          // This should limit the cost of grouping the pointers to something
560
          // reasonable.  If we do end up hitting this threshold, the algorithm
561
          // will create separate groups for all remaining pointers.
562
          if (TotalComparisons > MemoryCheckMergeThreshold)
563
            break;
564

565
          TotalComparisons++;
566

567
          if (Group.addPointer(Pointer, *this)) {
568
            Merged = true;
569
            break;
570
          }
571
        }
572

573
        if (!Merged)
574
          // We couldn't add this pointer to any existing set or the threshold
575
          // for the number of comparisons has been reached. Create a new group
576
          // to hold the current pointer.
577
          Groups.push_back(RuntimeCheckingPtrGroup(Pointer, *this));
578
      }
579
    }
580

581
    // We've computed the grouped checks for this partition.
582
    // Save the results and continue with the next one.
583
    llvm::copy(Groups, std::back_inserter(CheckingGroups));
584
  }
585
}
586

587
bool RuntimePointerChecking::arePointersInSamePartition(
588
    const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
589
    unsigned PtrIdx2) {
590
  return (PtrToPartition[PtrIdx1] != -1 &&
591
          PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
592
}
593

594
bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
595
  const PointerInfo &PointerI = Pointers[I];
596
  const PointerInfo &PointerJ = Pointers[J];
597

598
  // No need to check if two readonly pointers intersect.
599
  if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
600
    return false;
601

602
  // Only need to check pointers between two different dependency sets.
603
  if (PointerI.DependencySetId == PointerJ.DependencySetId)
604
    return false;
605

606
  // Only need to check pointers in the same alias set.
607
  if (PointerI.AliasSetId != PointerJ.AliasSetId)
608
    return false;
609

610
  return true;
611
}
612

613
void RuntimePointerChecking::printChecks(
614
    raw_ostream &OS, const SmallVectorImpl<RuntimePointerCheck> &Checks,
615
    unsigned Depth) const {
616
  unsigned N = 0;
617
  for (const auto &[Check1, Check2] : Checks) {
618
    const auto &First = Check1->Members, &Second = Check2->Members;
619

620
    OS.indent(Depth) << "Check " << N++ << ":\n";
621

622
    OS.indent(Depth + 2) << "Comparing group (" << Check1 << "):\n";
623
    for (unsigned K : First)
624
      OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
625

626
    OS.indent(Depth + 2) << "Against group (" << Check2 << "):\n";
627
    for (unsigned K : Second)
628
      OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
629
  }
630
}
631

632
void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const {
633

634
  OS.indent(Depth) << "Run-time memory checks:\n";
635
  printChecks(OS, Checks, Depth);
636

637
  OS.indent(Depth) << "Grouped accesses:\n";
638
  for (const auto &CG : CheckingGroups) {
639
    OS.indent(Depth + 2) << "Group " << &CG << ":\n";
640
    OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
641
                         << ")\n";
642
    for (unsigned Member : CG.Members) {
643
      OS.indent(Depth + 6) << "Member: " << *Pointers[Member].Expr << "\n";
644
    }
645
  }
646
}
647

648
namespace {
649

650
/// Analyses memory accesses in a loop.
651
///
652
/// Checks whether run time pointer checks are needed and builds sets for data
653
/// dependence checking.
654
class AccessAnalysis {
655
public:
656
  /// Read or write access location.
657
  typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
658
  typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
659

660
  AccessAnalysis(Loop *TheLoop, AAResults *AA, LoopInfo *LI,
661
                 MemoryDepChecker::DepCandidates &DA,
662
                 PredicatedScalarEvolution &PSE,
663
                 SmallPtrSetImpl<MDNode *> &LoopAliasScopes)
664
      : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE),
665
        LoopAliasScopes(LoopAliasScopes) {
666
    // We're analyzing dependences across loop iterations.
667
    BAA.enableCrossIterationMode();
668
  }
669

670
  /// Register a load  and whether it is only read from.
671
  void addLoad(MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
672
    Value *Ptr = const_cast<Value *>(Loc.Ptr);
673
    AST.add(adjustLoc(Loc));
674
    Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
675
    if (IsReadOnly)
676
      ReadOnlyPtr.insert(Ptr);
677
  }
678

679
  /// Register a store.
680
  void addStore(MemoryLocation &Loc, Type *AccessTy) {
681
    Value *Ptr = const_cast<Value *>(Loc.Ptr);
682
    AST.add(adjustLoc(Loc));
683
    Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
684
  }
685

686
  /// Check if we can emit a run-time no-alias check for \p Access.
687
  ///
688
  /// Returns true if we can emit a run-time no alias check for \p Access.
689
  /// If we can check this access, this also adds it to a dependence set and
690
  /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
691
  /// we will attempt to use additional run-time checks in order to get
692
  /// the bounds of the pointer.
693
  bool createCheckForAccess(RuntimePointerChecking &RtCheck,
694
                            MemAccessInfo Access, Type *AccessTy,
695
                            const DenseMap<Value *, const SCEV *> &Strides,
696
                            DenseMap<Value *, unsigned> &DepSetId,
697
                            Loop *TheLoop, unsigned &RunningDepId,
698
                            unsigned ASId, bool ShouldCheckStride, bool Assume);
699

700
  /// Check whether we can check the pointers at runtime for
701
  /// non-intersection.
702
  ///
703
  /// Returns true if we need no check or if we do and we can generate them
704
  /// (i.e. the pointers have computable bounds).
705
  bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
706
                       Loop *TheLoop, const DenseMap<Value *, const SCEV *> &Strides,
707
                       Value *&UncomputablePtr, bool ShouldCheckWrap = false);
708

709
  /// Goes over all memory accesses, checks whether a RT check is needed
710
  /// and builds sets of dependent accesses.
711
  void buildDependenceSets() {
712
    processMemAccesses();
713
  }
714

715
  /// Initial processing of memory accesses determined that we need to
716
  /// perform dependency checking.
717
  ///
718
  /// Note that this can later be cleared if we retry memcheck analysis without
719
  /// dependency checking (i.e. FoundNonConstantDistanceDependence).
720
  bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
721

722
  /// We decided that no dependence analysis would be used.  Reset the state.
723
  void resetDepChecks(MemoryDepChecker &DepChecker) {
724
    CheckDeps.clear();
725
    DepChecker.clearDependences();
726
  }
727

728
  MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; }
729

730
  const DenseMap<Value *, SmallVector<const Value *, 16>> &
731
  getUnderlyingObjects() {
732
    return UnderlyingObjects;
733
  }
734

735
private:
736
  typedef MapVector<MemAccessInfo, SmallSetVector<Type *, 1>> PtrAccessMap;
737

738
  /// Adjust the MemoryLocation so that it represents accesses to this
739
  /// location across all iterations, rather than a single one.
740
  MemoryLocation adjustLoc(MemoryLocation Loc) const {
741
    // The accessed location varies within the loop, but remains within the
742
    // underlying object.
743
    Loc.Size = LocationSize::beforeOrAfterPointer();
744
    Loc.AATags.Scope = adjustAliasScopeList(Loc.AATags.Scope);
745
    Loc.AATags.NoAlias = adjustAliasScopeList(Loc.AATags.NoAlias);
746
    return Loc;
747
  }
748

749
  /// Drop alias scopes that are only valid within a single loop iteration.
750
  MDNode *adjustAliasScopeList(MDNode *ScopeList) const {
751
    if (!ScopeList)
752
      return nullptr;
753

754
    // For the sake of simplicity, drop the whole scope list if any scope is
755
    // iteration-local.
756
    if (any_of(ScopeList->operands(), [&](Metadata *Scope) {
757
          return LoopAliasScopes.contains(cast<MDNode>(Scope));
758
        }))
759
      return nullptr;
760

761
    return ScopeList;
762
  }
763

764
  /// Go over all memory access and check whether runtime pointer checks
765
  /// are needed and build sets of dependency check candidates.
766
  void processMemAccesses();
767

768
  /// Map of all accesses. Values are the types used to access memory pointed to
769
  /// by the pointer.
770
  PtrAccessMap Accesses;
771

772
  /// The loop being checked.
773
  const Loop *TheLoop;
774

775
  /// List of accesses that need a further dependence check.
776
  MemAccessInfoList CheckDeps;
777

778
  /// Set of pointers that are read only.
779
  SmallPtrSet<Value*, 16> ReadOnlyPtr;
780

781
  /// Batched alias analysis results.
782
  BatchAAResults BAA;
783

784
  /// An alias set tracker to partition the access set by underlying object and
785
  //intrinsic property (such as TBAA metadata).
786
  AliasSetTracker AST;
787

788
  LoopInfo *LI;
789

790
  /// Sets of potentially dependent accesses - members of one set share an
791
  /// underlying pointer. The set "CheckDeps" identfies which sets really need a
792
  /// dependence check.
793
  MemoryDepChecker::DepCandidates &DepCands;
794

795
  /// Initial processing of memory accesses determined that we may need
796
  /// to add memchecks.  Perform the analysis to determine the necessary checks.
797
  ///
798
  /// Note that, this is different from isDependencyCheckNeeded.  When we retry
799
  /// memcheck analysis without dependency checking
800
  /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
801
  /// cleared while this remains set if we have potentially dependent accesses.
802
  bool IsRTCheckAnalysisNeeded = false;
803

804
  /// The SCEV predicate containing all the SCEV-related assumptions.
805
  PredicatedScalarEvolution &PSE;
806

807
  DenseMap<Value *, SmallVector<const Value *, 16>> UnderlyingObjects;
808

809
  /// Alias scopes that are declared inside the loop, and as such not valid
810
  /// across iterations.
811
  SmallPtrSetImpl<MDNode *> &LoopAliasScopes;
812
};
813

814
} // end anonymous namespace
815

816
/// Check whether a pointer can participate in a runtime bounds check.
817
/// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
818
/// by adding run-time checks (overflow checks) if necessary.
819
static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr,
820
                                const SCEV *PtrScev, Loop *L, bool Assume) {
821
  // The bounds for loop-invariant pointer is trivial.
822
  if (PSE.getSE()->isLoopInvariant(PtrScev, L))
823
    return true;
824

825
  const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
826

827
  if (!AR && Assume)
828
    AR = PSE.getAsAddRec(Ptr);
829

830
  if (!AR)
831
    return false;
832

833
  return AR->isAffine();
834
}
835

836
/// Check whether a pointer address cannot wrap.
837
static bool isNoWrap(PredicatedScalarEvolution &PSE,
838
                     const DenseMap<Value *, const SCEV *> &Strides, Value *Ptr, Type *AccessTy,
839
                     Loop *L) {
840
  const SCEV *PtrScev = PSE.getSCEV(Ptr);
841
  if (PSE.getSE()->isLoopInvariant(PtrScev, L))
842
    return true;
843

844
  int64_t Stride = getPtrStride(PSE, AccessTy, Ptr, L, Strides).value_or(0);
845
  if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
846
    return true;
847

848
  return false;
849
}
850

851
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
852
                          function_ref<void(Value *)> AddPointer) {
853
  SmallPtrSet<Value *, 8> Visited;
854
  SmallVector<Value *> WorkList;
855
  WorkList.push_back(StartPtr);
856

857
  while (!WorkList.empty()) {
858
    Value *Ptr = WorkList.pop_back_val();
859
    if (!Visited.insert(Ptr).second)
860
      continue;
861
    auto *PN = dyn_cast<PHINode>(Ptr);
862
    // SCEV does not look through non-header PHIs inside the loop. Such phis
863
    // can be analyzed by adding separate accesses for each incoming pointer
864
    // value.
865
    if (PN && InnermostLoop.contains(PN->getParent()) &&
866
        PN->getParent() != InnermostLoop.getHeader()) {
867
      for (const Use &Inc : PN->incoming_values())
868
        WorkList.push_back(Inc);
869
    } else
870
      AddPointer(Ptr);
871
  }
872
}
873

874
// Walk back through the IR for a pointer, looking for a select like the
875
// following:
876
//
877
//  %offset = select i1 %cmp, i64 %a, i64 %b
878
//  %addr = getelementptr double, double* %base, i64 %offset
879
//  %ld = load double, double* %addr, align 8
880
//
881
// We won't be able to form a single SCEVAddRecExpr from this since the
882
// address for each loop iteration depends on %cmp. We could potentially
883
// produce multiple valid SCEVAddRecExprs, though, and check all of them for
884
// memory safety/aliasing if needed.
885
//
886
// If we encounter some IR we don't yet handle, or something obviously fine
887
// like a constant, then we just add the SCEV for that term to the list passed
888
// in by the caller. If we have a node that may potentially yield a valid
889
// SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
890
// ourselves before adding to the list.
891
static void findForkedSCEVs(
892
    ScalarEvolution *SE, const Loop *L, Value *Ptr,
893
    SmallVectorImpl<PointerIntPair<const SCEV *, 1, bool>> &ScevList,
894
    unsigned Depth) {
895
  // If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
896
  // we've exceeded our limit on recursion, just return whatever we have
897
  // regardless of whether it can be used for a forked pointer or not, along
898
  // with an indication of whether it might be a poison or undef value.
899
  const SCEV *Scev = SE->getSCEV(Ptr);
900
  if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
901
      !isa<Instruction>(Ptr) || Depth == 0) {
902
    ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
903
    return;
904
  }
905

906
  Depth--;
907

908
  auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
909
    return get<1>(S);
910
  };
911

912
  auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
913
    switch (Opcode) {
914
    case Instruction::Add:
915
      return SE->getAddExpr(L, R);
916
    case Instruction::Sub:
917
      return SE->getMinusSCEV(L, R);
918
    default:
919
      llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");
920
    }
921
  };
922

923
  Instruction *I = cast<Instruction>(Ptr);
924
  unsigned Opcode = I->getOpcode();
925
  switch (Opcode) {
926
  case Instruction::GetElementPtr: {
927
    GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
928
    Type *SourceTy = GEP->getSourceElementType();
929
    // We only handle base + single offset GEPs here for now.
930
    // Not dealing with preexisting gathers yet, so no vectors.
931
    if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
932
      ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
933
      break;
934
    }
935
    SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> BaseScevs;
936
    SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> OffsetScevs;
937
    findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
938
    findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
939

940
    // See if we need to freeze our fork...
941
    bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||
942
                       any_of(OffsetScevs, UndefPoisonCheck);
943

944
    // Check that we only have a single fork, on either the base or the offset.
945
    // Copy the SCEV across for the one without a fork in order to generate
946
    // the full SCEV for both sides of the GEP.
947
    if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)
948
      BaseScevs.push_back(BaseScevs[0]);
949
    else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
950
      OffsetScevs.push_back(OffsetScevs[0]);
951
    else {
952
      ScevList.emplace_back(Scev, NeedsFreeze);
953
      break;
954
    }
955

956
    // Find the pointer type we need to extend to.
957
    Type *IntPtrTy = SE->getEffectiveSCEVType(
958
        SE->getSCEV(GEP->getPointerOperand())->getType());
959

960
    // Find the size of the type being pointed to. We only have a single
961
    // index term (guarded above) so we don't need to index into arrays or
962
    // structures, just get the size of the scalar value.
963
    const SCEV *Size = SE->getSizeOfExpr(IntPtrTy, SourceTy);
964

965
    // Scale up the offsets by the size of the type, then add to the bases.
966
    const SCEV *Scaled1 = SE->getMulExpr(
967
        Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[0]), IntPtrTy));
968
    const SCEV *Scaled2 = SE->getMulExpr(
969
        Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[1]), IntPtrTy));
970
    ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[0]), Scaled1),
971
                          NeedsFreeze);
972
    ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[1]), Scaled2),
973
                          NeedsFreeze);
974
    break;
975
  }
976
  case Instruction::Select: {
977
    SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> ChildScevs;
978
    // A select means we've found a forked pointer, but we currently only
979
    // support a single select per pointer so if there's another behind this
980
    // then we just bail out and return the generic SCEV.
981
    findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
982
    findForkedSCEVs(SE, L, I->getOperand(2), ChildScevs, Depth);
983
    if (ChildScevs.size() == 2) {
984
      ScevList.push_back(ChildScevs[0]);
985
      ScevList.push_back(ChildScevs[1]);
986
    } else
987
      ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
988
    break;
989
  }
990
  case Instruction::PHI: {
991
    SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> ChildScevs;
992
    // A phi means we've found a forked pointer, but we currently only
993
    // support a single phi per pointer so if there's another behind this
994
    // then we just bail out and return the generic SCEV.
995
    if (I->getNumOperands() == 2) {
996
      findForkedSCEVs(SE, L, I->getOperand(0), ChildScevs, Depth);
997
      findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
998
    }
999
    if (ChildScevs.size() == 2) {
1000
      ScevList.push_back(ChildScevs[0]);
1001
      ScevList.push_back(ChildScevs[1]);
1002
    } else
1003
      ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1004
    break;
1005
  }
1006
  case Instruction::Add:
1007
  case Instruction::Sub: {
1008
    SmallVector<PointerIntPair<const SCEV *, 1, bool>> LScevs;
1009
    SmallVector<PointerIntPair<const SCEV *, 1, bool>> RScevs;
1010
    findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
1011
    findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
1012

1013
    // See if we need to freeze our fork...
1014
    bool NeedsFreeze =
1015
        any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);
1016

1017
    // Check that we only have a single fork, on either the left or right side.
1018
    // Copy the SCEV across for the one without a fork in order to generate
1019
    // the full SCEV for both sides of the BinOp.
1020
    if (LScevs.size() == 2 && RScevs.size() == 1)
1021
      RScevs.push_back(RScevs[0]);
1022
    else if (RScevs.size() == 2 && LScevs.size() == 1)
1023
      LScevs.push_back(LScevs[0]);
1024
    else {
1025
      ScevList.emplace_back(Scev, NeedsFreeze);
1026
      break;
1027
    }
1028

1029
    ScevList.emplace_back(
1030
        GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),
1031
        NeedsFreeze);
1032
    ScevList.emplace_back(
1033
        GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),
1034
        NeedsFreeze);
1035
    break;
1036
  }
1037
  default:
1038
    // Just return the current SCEV if we haven't handled the instruction yet.
1039
    LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
1040
    ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1041
    break;
1042
  }
1043
}
1044

1045
static SmallVector<PointerIntPair<const SCEV *, 1, bool>>
1046
findForkedPointer(PredicatedScalarEvolution &PSE,
1047
                  const DenseMap<Value *, const SCEV *> &StridesMap, Value *Ptr,
1048
                  const Loop *L) {
1049
  ScalarEvolution *SE = PSE.getSE();
1050
  assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
1051
  SmallVector<PointerIntPair<const SCEV *, 1, bool>> Scevs;
1052
  findForkedSCEVs(SE, L, Ptr, Scevs, MaxForkedSCEVDepth);
1053

1054
  // For now, we will only accept a forked pointer with two possible SCEVs
1055
  // that are either SCEVAddRecExprs or loop invariant.
1056
  if (Scevs.size() == 2 &&
1057
      (isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
1058
       SE->isLoopInvariant(get<0>(Scevs[0]), L)) &&
1059
      (isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
1060
       SE->isLoopInvariant(get<0>(Scevs[1]), L))) {
1061
    LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n");
1062
    LLVM_DEBUG(dbgs() << "\t(1) " << *get<0>(Scevs[0]) << "\n");
1063
    LLVM_DEBUG(dbgs() << "\t(2) " << *get<0>(Scevs[1]) << "\n");
1064
    return Scevs;
1065
  }
1066

1067
  return {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
1068
}
1069

1070
bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
1071
                                          MemAccessInfo Access, Type *AccessTy,
1072
                                          const DenseMap<Value *, const SCEV *> &StridesMap,
1073
                                          DenseMap<Value *, unsigned> &DepSetId,
1074
                                          Loop *TheLoop, unsigned &RunningDepId,
1075
                                          unsigned ASId, bool ShouldCheckWrap,
1076
                                          bool Assume) {
1077
  Value *Ptr = Access.getPointer();
1078

1079
  SmallVector<PointerIntPair<const SCEV *, 1, bool>> TranslatedPtrs =
1080
      findForkedPointer(PSE, StridesMap, Ptr, TheLoop);
1081

1082
  for (auto &P : TranslatedPtrs) {
1083
    const SCEV *PtrExpr = get<0>(P);
1084
    if (!hasComputableBounds(PSE, Ptr, PtrExpr, TheLoop, Assume))
1085
      return false;
1086

1087
    // When we run after a failing dependency check we have to make sure
1088
    // we don't have wrapping pointers.
1089
    if (ShouldCheckWrap) {
1090
      // Skip wrap checking when translating pointers.
1091
      if (TranslatedPtrs.size() > 1)
1092
        return false;
1093

1094
      if (!isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop)) {
1095
        auto *Expr = PSE.getSCEV(Ptr);
1096
        if (!Assume || !isa<SCEVAddRecExpr>(Expr))
1097
          return false;
1098
        PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1099
      }
1100
    }
1101
    // If there's only one option for Ptr, look it up after bounds and wrap
1102
    // checking, because assumptions might have been added to PSE.
1103
    if (TranslatedPtrs.size() == 1)
1104
      TranslatedPtrs[0] = {replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr),
1105
                           false};
1106
  }
1107

1108
  for (auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
1109
    // The id of the dependence set.
1110
    unsigned DepId;
1111

1112
    if (isDependencyCheckNeeded()) {
1113
      Value *Leader = DepCands.getLeaderValue(Access).getPointer();
1114
      unsigned &LeaderId = DepSetId[Leader];
1115
      if (!LeaderId)
1116
        LeaderId = RunningDepId++;
1117
      DepId = LeaderId;
1118
    } else
1119
      // Each access has its own dependence set.
1120
      DepId = RunningDepId++;
1121

1122
    bool IsWrite = Access.getInt();
1123
    RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1124
                   NeedsFreeze);
1125
    LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
1126
  }
1127

1128
  return true;
1129
}
1130

1131
bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
1132
                                     ScalarEvolution *SE, Loop *TheLoop,
1133
                                     const DenseMap<Value *, const SCEV *> &StridesMap,
1134
                                     Value *&UncomputablePtr, bool ShouldCheckWrap) {
1135
  // Find pointers with computable bounds. We are going to use this information
1136
  // to place a runtime bound check.
1137
  bool CanDoRT = true;
1138

1139
  bool MayNeedRTCheck = false;
1140
  if (!IsRTCheckAnalysisNeeded) return true;
1141

1142
  bool IsDepCheckNeeded = isDependencyCheckNeeded();
1143

1144
  // We assign a consecutive id to access from different alias sets.
1145
  // Accesses between different groups doesn't need to be checked.
1146
  unsigned ASId = 0;
1147
  for (auto &AS : AST) {
1148
    int NumReadPtrChecks = 0;
1149
    int NumWritePtrChecks = 0;
1150
    bool CanDoAliasSetRT = true;
1151
    ++ASId;
1152
    auto ASPointers = AS.getPointers();
1153

1154
    // We assign consecutive id to access from different dependence sets.
1155
    // Accesses within the same set don't need a runtime check.
1156
    unsigned RunningDepId = 1;
1157
    DenseMap<Value *, unsigned> DepSetId;
1158

1159
    SmallVector<std::pair<MemAccessInfo, Type *>, 4> Retries;
1160

1161
    // First, count how many write and read accesses are in the alias set. Also
1162
    // collect MemAccessInfos for later.
1163
    SmallVector<MemAccessInfo, 4> AccessInfos;
1164
    for (const Value *ConstPtr : ASPointers) {
1165
      Value *Ptr = const_cast<Value *>(ConstPtr);
1166
      bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
1167
      if (IsWrite)
1168
        ++NumWritePtrChecks;
1169
      else
1170
        ++NumReadPtrChecks;
1171
      AccessInfos.emplace_back(Ptr, IsWrite);
1172
    }
1173

1174
    // We do not need runtime checks for this alias set, if there are no writes
1175
    // or a single write and no reads.
1176
    if (NumWritePtrChecks == 0 ||
1177
        (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1178
      assert((ASPointers.size() <= 1 ||
1179
              all_of(ASPointers,
1180
                     [this](const Value *Ptr) {
1181
                       MemAccessInfo AccessWrite(const_cast<Value *>(Ptr),
1182
                                                 true);
1183
                       return DepCands.findValue(AccessWrite) == DepCands.end();
1184
                     })) &&
1185
             "Can only skip updating CanDoRT below, if all entries in AS "
1186
             "are reads or there is at most 1 entry");
1187
      continue;
1188
    }
1189

1190
    for (auto &Access : AccessInfos) {
1191
      for (const auto &AccessTy : Accesses[Access]) {
1192
        if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1193
                                  DepSetId, TheLoop, RunningDepId, ASId,
1194
                                  ShouldCheckWrap, false)) {
1195
          LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
1196
                            << *Access.getPointer() << '\n');
1197
          Retries.push_back({Access, AccessTy});
1198
          CanDoAliasSetRT = false;
1199
        }
1200
      }
1201
    }
1202

1203
    // Note that this function computes CanDoRT and MayNeedRTCheck
1204
    // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
1205
    // we have a pointer for which we couldn't find the bounds but we don't
1206
    // actually need to emit any checks so it does not matter.
1207
    //
1208
    // We need runtime checks for this alias set, if there are at least 2
1209
    // dependence sets (in which case RunningDepId > 2) or if we need to re-try
1210
    // any bound checks (because in that case the number of dependence sets is
1211
    // incomplete).
1212
    bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
1213

1214
    // We need to perform run-time alias checks, but some pointers had bounds
1215
    // that couldn't be checked.
1216
    if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1217
      // Reset the CanDoSetRt flag and retry all accesses that have failed.
1218
      // We know that we need these checks, so we can now be more aggressive
1219
      // and add further checks if required (overflow checks).
1220
      CanDoAliasSetRT = true;
1221
      for (const auto &[Access, AccessTy] : Retries) {
1222
        if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1223
                                  DepSetId, TheLoop, RunningDepId, ASId,
1224
                                  ShouldCheckWrap, /*Assume=*/true)) {
1225
          CanDoAliasSetRT = false;
1226
          UncomputablePtr = Access.getPointer();
1227
          break;
1228
        }
1229
      }
1230
    }
1231

1232
    CanDoRT &= CanDoAliasSetRT;
1233
    MayNeedRTCheck |= NeedsAliasSetRTCheck;
1234
    ++ASId;
1235
  }
1236

1237
  // If the pointers that we would use for the bounds comparison have different
1238
  // address spaces, assume the values aren't directly comparable, so we can't
1239
  // use them for the runtime check. We also have to assume they could
1240
  // overlap. In the future there should be metadata for whether address spaces
1241
  // are disjoint.
1242
  unsigned NumPointers = RtCheck.Pointers.size();
1243
  for (unsigned i = 0; i < NumPointers; ++i) {
1244
    for (unsigned j = i + 1; j < NumPointers; ++j) {
1245
      // Only need to check pointers between two different dependency sets.
1246
      if (RtCheck.Pointers[i].DependencySetId ==
1247
          RtCheck.Pointers[j].DependencySetId)
1248
       continue;
1249
      // Only need to check pointers in the same alias set.
1250
      if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
1251
        continue;
1252

1253
      Value *PtrI = RtCheck.Pointers[i].PointerValue;
1254
      Value *PtrJ = RtCheck.Pointers[j].PointerValue;
1255

1256
      unsigned ASi = PtrI->getType()->getPointerAddressSpace();
1257
      unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
1258
      if (ASi != ASj) {
1259
        LLVM_DEBUG(
1260
            dbgs() << "LAA: Runtime check would require comparison between"
1261
                      " different address spaces\n");
1262
        return false;
1263
      }
1264
    }
1265
  }
1266

1267
  if (MayNeedRTCheck && CanDoRT)
1268
    RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
1269

1270
  LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
1271
                    << " pointer comparisons.\n");
1272

1273
  // If we can do run-time checks, but there are no checks, no runtime checks
1274
  // are needed. This can happen when all pointers point to the same underlying
1275
  // object for example.
1276
  RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
1277

1278
  bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
1279
  if (!CanDoRTIfNeeded)
1280
    RtCheck.reset();
1281
  return CanDoRTIfNeeded;
1282
}
1283

1284
void AccessAnalysis::processMemAccesses() {
1285
  // We process the set twice: first we process read-write pointers, last we
1286
  // process read-only pointers. This allows us to skip dependence tests for
1287
  // read-only pointers.
1288

1289
  LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
1290
  LLVM_DEBUG(dbgs() << "  AST: "; AST.dump());
1291
  LLVM_DEBUG(dbgs() << "LAA:   Accesses(" << Accesses.size() << "):\n");
1292
  LLVM_DEBUG({
1293
    for (const auto &[A, _] : Accesses)
1294
      dbgs() << "\t" << *A.getPointer() << " ("
1295
             << (A.getInt() ? "write"
1296
                            : (ReadOnlyPtr.count(A.getPointer()) ? "read-only"
1297
                                                                 : "read"))
1298
             << ")\n";
1299
  });
1300

1301
  // The AliasSetTracker has nicely partitioned our pointers by metadata
1302
  // compatibility and potential for underlying-object overlap. As a result, we
1303
  // only need to check for potential pointer dependencies within each alias
1304
  // set.
1305
  for (const auto &AS : AST) {
1306
    // Note that both the alias-set tracker and the alias sets themselves used
1307
    // ordered collections internally and so the iteration order here is
1308
    // deterministic.
1309
    auto ASPointers = AS.getPointers();
1310

1311
    bool SetHasWrite = false;
1312

1313
    // Map of pointers to last access encountered.
1314
    typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap;
1315
    UnderlyingObjToAccessMap ObjToLastAccess;
1316

1317
    // Set of access to check after all writes have been processed.
1318
    PtrAccessMap DeferredAccesses;
1319

1320
    // Iterate over each alias set twice, once to process read/write pointers,
1321
    // and then to process read-only pointers.
1322
    for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1323
      bool UseDeferred = SetIteration > 0;
1324
      PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1325

1326
      for (const Value *ConstPtr : ASPointers) {
1327
        Value *Ptr = const_cast<Value *>(ConstPtr);
1328

1329
        // For a single memory access in AliasSetTracker, Accesses may contain
1330
        // both read and write, and they both need to be handled for CheckDeps.
1331
        for (const auto &[AC, _] : S) {
1332
          if (AC.getPointer() != Ptr)
1333
            continue;
1334

1335
          bool IsWrite = AC.getInt();
1336

1337
          // If we're using the deferred access set, then it contains only
1338
          // reads.
1339
          bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
1340
          if (UseDeferred && !IsReadOnlyPtr)
1341
            continue;
1342
          // Otherwise, the pointer must be in the PtrAccessSet, either as a
1343
          // read or a write.
1344
          assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1345
                  S.count(MemAccessInfo(Ptr, false))) &&
1346
                 "Alias-set pointer not in the access set?");
1347

1348
          MemAccessInfo Access(Ptr, IsWrite);
1349
          DepCands.insert(Access);
1350

1351
          // Memorize read-only pointers for later processing and skip them in
1352
          // the first round (they need to be checked after we have seen all
1353
          // write pointers). Note: we also mark pointer that are not
1354
          // consecutive as "read-only" pointers (so that we check
1355
          // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
1356
          if (!UseDeferred && IsReadOnlyPtr) {
1357
            // We only use the pointer keys, the types vector values don't
1358
            // matter.
1359
            DeferredAccesses.insert({Access, {}});
1360
            continue;
1361
          }
1362

1363
          // If this is a write - check other reads and writes for conflicts. If
1364
          // this is a read only check other writes for conflicts (but only if
1365
          // there is no other write to the ptr - this is an optimization to
1366
          // catch "a[i] = a[i] + " without having to do a dependence check).
1367
          if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1368
            CheckDeps.push_back(Access);
1369
            IsRTCheckAnalysisNeeded = true;
1370
          }
1371

1372
          if (IsWrite)
1373
            SetHasWrite = true;
1374

1375
          // Create sets of pointers connected by a shared alias set and
1376
          // underlying object.
1377
          typedef SmallVector<const Value *, 16> ValueVector;
1378
          ValueVector TempObjects;
1379

1380
          UnderlyingObjects[Ptr] = {};
1381
          SmallVector<const Value *, 16> &UOs = UnderlyingObjects[Ptr];
1382
          ::getUnderlyingObjects(Ptr, UOs, LI);
1383
          LLVM_DEBUG(dbgs()
1384
                     << "Underlying objects for pointer " << *Ptr << "\n");
1385
          for (const Value *UnderlyingObj : UOs) {
1386
            // nullptr never alias, don't join sets for pointer that have "null"
1387
            // in their UnderlyingObjects list.
1388
            if (isa<ConstantPointerNull>(UnderlyingObj) &&
1389
                !NullPointerIsDefined(
1390
                    TheLoop->getHeader()->getParent(),
1391
                    UnderlyingObj->getType()->getPointerAddressSpace()))
1392
              continue;
1393

1394
            UnderlyingObjToAccessMap::iterator Prev =
1395
                ObjToLastAccess.find(UnderlyingObj);
1396
            if (Prev != ObjToLastAccess.end())
1397
              DepCands.unionSets(Access, Prev->second);
1398

1399
            ObjToLastAccess[UnderlyingObj] = Access;
1400
            LLVM_DEBUG(dbgs() << "  " << *UnderlyingObj << "\n");
1401
          }
1402
        }
1403
      }
1404
    }
1405
  }
1406
}
1407

1408
/// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
1409
/// i.e. monotonically increasing/decreasing.
1410
static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
1411
                           PredicatedScalarEvolution &PSE, const Loop *L) {
1412

1413
  // FIXME: This should probably only return true for NUW.
1414
  if (AR->getNoWrapFlags(SCEV::NoWrapMask))
1415
    return true;
1416

1417
  if (PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
1418
    return true;
1419

1420
  // Scalar evolution does not propagate the non-wrapping flags to values that
1421
  // are derived from a non-wrapping induction variable because non-wrapping
1422
  // could be flow-sensitive.
1423
  //
1424
  // Look through the potentially overflowing instruction to try to prove
1425
  // non-wrapping for the *specific* value of Ptr.
1426

1427
  // The arithmetic implied by an inbounds GEP can't overflow.
1428
  auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1429
  if (!GEP || !GEP->isInBounds())
1430
    return false;
1431

1432
  // Make sure there is only one non-const index and analyze that.
1433
  Value *NonConstIndex = nullptr;
1434
  for (Value *Index : GEP->indices())
1435
    if (!isa<ConstantInt>(Index)) {
1436
      if (NonConstIndex)
1437
        return false;
1438
      NonConstIndex = Index;
1439
    }
1440
  if (!NonConstIndex)
1441
    // The recurrence is on the pointer, ignore for now.
1442
    return false;
1443

1444
  // The index in GEP is signed.  It is non-wrapping if it's derived from a NSW
1445
  // AddRec using a NSW operation.
1446
  if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1447
    if (OBO->hasNoSignedWrap() &&
1448
        // Assume constant for other the operand so that the AddRec can be
1449
        // easily found.
1450
        isa<ConstantInt>(OBO->getOperand(1))) {
1451
      auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
1452

1453
      if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1454
        return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
1455
    }
1456

1457
  return false;
1458
}
1459

1460
/// Check whether the access through \p Ptr has a constant stride.
1461
std::optional<int64_t> llvm::getPtrStride(PredicatedScalarEvolution &PSE,
1462
                                          Type *AccessTy, Value *Ptr,
1463
                                          const Loop *Lp,
1464
                                          const DenseMap<Value *, const SCEV *> &StridesMap,
1465
                                          bool Assume, bool ShouldCheckWrap) {
1466
  Type *Ty = Ptr->getType();
1467
  assert(Ty->isPointerTy() && "Unexpected non-ptr");
1468

1469
  if (isa<ScalableVectorType>(AccessTy)) {
1470
    LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
1471
                      << "\n");
1472
    return std::nullopt;
1473
  }
1474

1475
  const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1476

1477
  const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1478
  if (Assume && !AR)
1479
    AR = PSE.getAsAddRec(Ptr);
1480

1481
  if (!AR) {
1482
    LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1483
                      << " SCEV: " << *PtrScev << "\n");
1484
    return std::nullopt;
1485
  }
1486

1487
  // The access function must stride over the innermost loop.
1488
  if (Lp != AR->getLoop()) {
1489
    LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1490
                      << *Ptr << " SCEV: " << *AR << "\n");
1491
    return std::nullopt;
1492
  }
1493

1494
  // Check the step is constant.
1495
  const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1496

1497
  // Calculate the pointer stride and check if it is constant.
1498
  const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1499
  if (!C) {
1500
    LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1501
                      << " SCEV: " << *AR << "\n");
1502
    return std::nullopt;
1503
  }
1504

1505
  auto &DL = Lp->getHeader()->getDataLayout();
1506
  TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
1507
  int64_t Size = AllocSize.getFixedValue();
1508
  const APInt &APStepVal = C->getAPInt();
1509

1510
  // Huge step value - give up.
1511
  if (APStepVal.getBitWidth() > 64)
1512
    return std::nullopt;
1513

1514
  int64_t StepVal = APStepVal.getSExtValue();
1515

1516
  // Strided access.
1517
  int64_t Stride = StepVal / Size;
1518
  int64_t Rem = StepVal % Size;
1519
  if (Rem)
1520
    return std::nullopt;
1521

1522
  if (!ShouldCheckWrap)
1523
    return Stride;
1524

1525
  // The address calculation must not wrap. Otherwise, a dependence could be
1526
  // inverted.
1527
  if (isNoWrapAddRec(Ptr, AR, PSE, Lp))
1528
    return Stride;
1529

1530
  // An inbounds getelementptr that is a AddRec with a unit stride
1531
  // cannot wrap per definition.  If it did, the result would be poison
1532
  // and any memory access dependent on it would be immediate UB
1533
  // when executed.
1534
  if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1535
      GEP && GEP->isInBounds() && (Stride == 1 || Stride == -1))
1536
    return Stride;
1537

1538
  // If the null pointer is undefined, then a access sequence which would
1539
  // otherwise access it can be assumed not to unsigned wrap.  Note that this
1540
  // assumes the object in memory is aligned to the natural alignment.
1541
  unsigned AddrSpace = Ty->getPointerAddressSpace();
1542
  if (!NullPointerIsDefined(Lp->getHeader()->getParent(), AddrSpace) &&
1543
      (Stride == 1 || Stride == -1))
1544
    return Stride;
1545

1546
  if (Assume) {
1547
    PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1548
    LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap:\n"
1549
                      << "LAA:   Pointer: " << *Ptr << "\n"
1550
                      << "LAA:   SCEV: " << *AR << "\n"
1551
                      << "LAA:   Added an overflow assumption\n");
1552
    return Stride;
1553
  }
1554
  LLVM_DEBUG(
1555
      dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1556
             << *Ptr << " SCEV: " << *AR << "\n");
1557
  return std::nullopt;
1558
}
1559

1560
std::optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA,
1561
                                         Type *ElemTyB, Value *PtrB,
1562
                                         const DataLayout &DL,
1563
                                         ScalarEvolution &SE, bool StrictCheck,
1564
                                         bool CheckType) {
1565
  assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1566

1567
  // Make sure that A and B are different pointers.
1568
  if (PtrA == PtrB)
1569
    return 0;
1570

1571
  // Make sure that the element types are the same if required.
1572
  if (CheckType && ElemTyA != ElemTyB)
1573
    return std::nullopt;
1574

1575
  unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1576
  unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1577

1578
  // Check that the address spaces match.
1579
  if (ASA != ASB)
1580
    return std::nullopt;
1581
  unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1582

1583
  APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1584
  Value *PtrA1 = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1585
  Value *PtrB1 = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1586

1587
  int Val;
1588
  if (PtrA1 == PtrB1) {
1589
    // Retrieve the address space again as pointer stripping now tracks through
1590
    // `addrspacecast`.
1591
    ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1592
    ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1593
    // Check that the address spaces match and that the pointers are valid.
1594
    if (ASA != ASB)
1595
      return std::nullopt;
1596

1597
    IdxWidth = DL.getIndexSizeInBits(ASA);
1598
    OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1599
    OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1600

1601
    OffsetB -= OffsetA;
1602
    Val = OffsetB.getSExtValue();
1603
  } else {
1604
    // Otherwise compute the distance with SCEV between the base pointers.
1605
    const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1606
    const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1607
    const auto *Diff =
1608
        dyn_cast<SCEVConstant>(SE.getMinusSCEV(PtrSCEVB, PtrSCEVA));
1609
    if (!Diff)
1610
      return std::nullopt;
1611
    Val = Diff->getAPInt().getSExtValue();
1612
  }
1613
  int Size = DL.getTypeStoreSize(ElemTyA);
1614
  int Dist = Val / Size;
1615

1616
  // Ensure that the calculated distance matches the type-based one after all
1617
  // the bitcasts removal in the provided pointers.
1618
  if (!StrictCheck || Dist * Size == Val)
1619
    return Dist;
1620
  return std::nullopt;
1621
}
1622

1623
bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy,
1624
                           const DataLayout &DL, ScalarEvolution &SE,
1625
                           SmallVectorImpl<unsigned> &SortedIndices) {
1626
  assert(llvm::all_of(
1627
             VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1628
         "Expected list of pointer operands.");
1629
  // Walk over the pointers, and map each of them to an offset relative to
1630
  // first pointer in the array.
1631
  Value *Ptr0 = VL[0];
1632

1633
  using DistOrdPair = std::pair<int64_t, int>;
1634
  auto Compare = llvm::less_first();
1635
  std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1636
  Offsets.emplace(0, 0);
1637
  bool IsConsecutive = true;
1638
  for (auto [Idx, Ptr] : drop_begin(enumerate(VL))) {
1639
    std::optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1640
                                              /*StrictCheck=*/true);
1641
    if (!Diff)
1642
      return false;
1643

1644
    // Check if the pointer with the same offset is found.
1645
    int64_t Offset = *Diff;
1646
    auto [It, IsInserted] = Offsets.emplace(Offset, Idx);
1647
    if (!IsInserted)
1648
      return false;
1649
    // Consecutive order if the inserted element is the last one.
1650
    IsConsecutive &= std::next(It) == Offsets.end();
1651
  }
1652
  SortedIndices.clear();
1653
  if (!IsConsecutive) {
1654
    // Fill SortedIndices array only if it is non-consecutive.
1655
    SortedIndices.resize(VL.size());
1656
    for (auto [Idx, Off] : enumerate(Offsets))
1657
      SortedIndices[Idx] = Off.second;
1658
  }
1659
  return true;
1660
}
1661

1662
/// Returns true if the memory operations \p A and \p B are consecutive.
1663
bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
1664
                               ScalarEvolution &SE, bool CheckType) {
1665
  Value *PtrA = getLoadStorePointerOperand(A);
1666
  Value *PtrB = getLoadStorePointerOperand(B);
1667
  if (!PtrA || !PtrB)
1668
    return false;
1669
  Type *ElemTyA = getLoadStoreType(A);
1670
  Type *ElemTyB = getLoadStoreType(B);
1671
  std::optional<int> Diff =
1672
      getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1673
                      /*StrictCheck=*/true, CheckType);
1674
  return Diff && *Diff == 1;
1675
}
1676

1677
void MemoryDepChecker::addAccess(StoreInst *SI) {
1678
  visitPointers(SI->getPointerOperand(), *InnermostLoop,
1679
                [this, SI](Value *Ptr) {
1680
                  Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1681
                  InstMap.push_back(SI);
1682
                  ++AccessIdx;
1683
                });
1684
}
1685

1686
void MemoryDepChecker::addAccess(LoadInst *LI) {
1687
  visitPointers(LI->getPointerOperand(), *InnermostLoop,
1688
                [this, LI](Value *Ptr) {
1689
                  Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1690
                  InstMap.push_back(LI);
1691
                  ++AccessIdx;
1692
                });
1693
}
1694

1695
MemoryDepChecker::VectorizationSafetyStatus
1696
MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) {
1697
  switch (Type) {
1698
  case NoDep:
1699
  case Forward:
1700
  case BackwardVectorizable:
1701
    return VectorizationSafetyStatus::Safe;
1702

1703
  case Unknown:
1704
    return VectorizationSafetyStatus::PossiblySafeWithRtChecks;
1705
  case ForwardButPreventsForwarding:
1706
  case Backward:
1707
  case BackwardVectorizableButPreventsForwarding:
1708
  case IndirectUnsafe:
1709
    return VectorizationSafetyStatus::Unsafe;
1710
  }
1711
  llvm_unreachable("unexpected DepType!");
1712
}
1713

1714
bool MemoryDepChecker::Dependence::isBackward() const {
1715
  switch (Type) {
1716
  case NoDep:
1717
  case Forward:
1718
  case ForwardButPreventsForwarding:
1719
  case Unknown:
1720
  case IndirectUnsafe:
1721
    return false;
1722

1723
  case BackwardVectorizable:
1724
  case Backward:
1725
  case BackwardVectorizableButPreventsForwarding:
1726
    return true;
1727
  }
1728
  llvm_unreachable("unexpected DepType!");
1729
}
1730

1731
bool MemoryDepChecker::Dependence::isPossiblyBackward() const {
1732
  return isBackward() || Type == Unknown;
1733
}
1734

1735
bool MemoryDepChecker::Dependence::isForward() const {
1736
  switch (Type) {
1737
  case Forward:
1738
  case ForwardButPreventsForwarding:
1739
    return true;
1740

1741
  case NoDep:
1742
  case Unknown:
1743
  case BackwardVectorizable:
1744
  case Backward:
1745
  case BackwardVectorizableButPreventsForwarding:
1746
  case IndirectUnsafe:
1747
    return false;
1748
  }
1749
  llvm_unreachable("unexpected DepType!");
1750
}
1751

1752
bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1753
                                                    uint64_t TypeByteSize) {
1754
  // If loads occur at a distance that is not a multiple of a feasible vector
1755
  // factor store-load forwarding does not take place.
1756
  // Positive dependences might cause troubles because vectorizing them might
1757
  // prevent store-load forwarding making vectorized code run a lot slower.
1758
  //   a[i] = a[i-3] ^ a[i-8];
1759
  //   The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1760
  //   hence on your typical architecture store-load forwarding does not take
1761
  //   place. Vectorizing in such cases does not make sense.
1762
  // Store-load forwarding distance.
1763

1764
  // After this many iterations store-to-load forwarding conflicts should not
1765
  // cause any slowdowns.
1766
  const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1767
  // Maximum vector factor.
1768
  uint64_t MaxVFWithoutSLForwardIssues = std::min(
1769
      VectorizerParams::MaxVectorWidth * TypeByteSize, MinDepDistBytes);
1770

1771
  // Compute the smallest VF at which the store and load would be misaligned.
1772
  for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1773
       VF *= 2) {
1774
    // If the number of vector iteration between the store and the load are
1775
    // small we could incur conflicts.
1776
    if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1777
      MaxVFWithoutSLForwardIssues = (VF >> 1);
1778
      break;
1779
    }
1780
  }
1781

1782
  if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1783
    LLVM_DEBUG(
1784
        dbgs() << "LAA: Distance " << Distance
1785
               << " that could cause a store-load forwarding conflict\n");
1786
    return true;
1787
  }
1788

1789
  if (MaxVFWithoutSLForwardIssues < MinDepDistBytes &&
1790
      MaxVFWithoutSLForwardIssues !=
1791
          VectorizerParams::MaxVectorWidth * TypeByteSize)
1792
    MinDepDistBytes = MaxVFWithoutSLForwardIssues;
1793
  return false;
1794
}
1795

1796
void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1797
  if (Status < S)
1798
    Status = S;
1799
}
1800

1801
/// Given a dependence-distance \p Dist between two
1802
/// memory accesses, that have strides in the same direction whose absolute
1803
/// value of the maximum stride is given in \p MaxStride, and that have the same
1804
/// type size \p TypeByteSize, in a loop whose maximum backedge taken count is
1805
/// \p MaxBTC, check if it is possible to prove statically that the dependence
1806
/// distance is larger than the range that the accesses will travel through the
1807
/// execution of the loop. If so, return true; false otherwise. This is useful
1808
/// for example in loops such as the following (PR31098):
1809
///     for (i = 0; i < D; ++i) {
1810
///                = out[i];
1811
///       out[i+D] =
1812
///     }
1813
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE,
1814
                                     const SCEV &MaxBTC, const SCEV &Dist,
1815
                                     uint64_t MaxStride,
1816
                                     uint64_t TypeByteSize) {
1817

1818
  // If we can prove that
1819
  //      (**) |Dist| > MaxBTC * Step
1820
  // where Step is the absolute stride of the memory accesses in bytes,
1821
  // then there is no dependence.
1822
  //
1823
  // Rationale:
1824
  // We basically want to check if the absolute distance (|Dist/Step|)
1825
  // is >= the loop iteration count (or > MaxBTC).
1826
  // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1827
  // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1828
  // that the dependence distance is >= VF; This is checked elsewhere.
1829
  // But in some cases we can prune dependence distances early, and
1830
  // even before selecting the VF, and without a runtime test, by comparing
1831
  // the distance against the loop iteration count. Since the vectorized code
1832
  // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1833
  // also guarantees that distance >= VF.
1834
  //
1835
  const uint64_t ByteStride = MaxStride * TypeByteSize;
1836
  const SCEV *Step = SE.getConstant(MaxBTC.getType(), ByteStride);
1837
  const SCEV *Product = SE.getMulExpr(&MaxBTC, Step);
1838

1839
  const SCEV *CastedDist = &Dist;
1840
  const SCEV *CastedProduct = Product;
1841
  uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());
1842
  uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1843

1844
  // The dependence distance can be positive/negative, so we sign extend Dist;
1845
  // The multiplication of the absolute stride in bytes and the
1846
  // backedgeTakenCount is non-negative, so we zero extend Product.
1847
  if (DistTypeSizeBits > ProductTypeSizeBits)
1848
    CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1849
  else
1850
    CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1851

1852
  // Is  Dist - (MaxBTC * Step) > 0 ?
1853
  // (If so, then we have proven (**) because |Dist| >= Dist)
1854
  const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1855
  if (SE.isKnownPositive(Minus))
1856
    return true;
1857

1858
  // Second try: Is  -Dist - (MaxBTC * Step) > 0 ?
1859
  // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1860
  const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1861
  Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1862
  return SE.isKnownPositive(Minus);
1863
}
1864

1865
/// Check the dependence for two accesses with the same stride \p Stride.
1866
/// \p Distance is the positive distance and \p TypeByteSize is type size in
1867
/// bytes.
1868
///
1869
/// \returns true if they are independent.
1870
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride,
1871
                                          uint64_t TypeByteSize) {
1872
  assert(Stride > 1 && "The stride must be greater than 1");
1873
  assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1874
  assert(Distance > 0 && "The distance must be non-zero");
1875

1876
  // Skip if the distance is not multiple of type byte size.
1877
  if (Distance % TypeByteSize)
1878
    return false;
1879

1880
  uint64_t ScaledDist = Distance / TypeByteSize;
1881

1882
  // No dependence if the scaled distance is not multiple of the stride.
1883
  // E.g.
1884
  //      for (i = 0; i < 1024 ; i += 4)
1885
  //        A[i+2] = A[i] + 1;
1886
  //
1887
  // Two accesses in memory (scaled distance is 2, stride is 4):
1888
  //     | A[0] |      |      |      | A[4] |      |      |      |
1889
  //     |      |      | A[2] |      |      |      | A[6] |      |
1890
  //
1891
  // E.g.
1892
  //      for (i = 0; i < 1024 ; i += 3)
1893
  //        A[i+4] = A[i] + 1;
1894
  //
1895
  // Two accesses in memory (scaled distance is 4, stride is 3):
1896
  //     | A[0] |      |      | A[3] |      |      | A[6] |      |      |
1897
  //     |      |      |      |      | A[4] |      |      | A[7] |      |
1898
  return ScaledDist % Stride;
1899
}
1900

1901
/// Returns true if any of the underlying objects has a loop varying address,
1902
/// i.e. may change in \p L.
1903
static bool
1904
isLoopVariantIndirectAddress(ArrayRef<const Value *> UnderlyingObjects,
1905
                             ScalarEvolution &SE, const Loop *L) {
1906
  return any_of(UnderlyingObjects, [&SE, L](const Value *UO) {
1907
    return !SE.isLoopInvariant(SE.getSCEV(const_cast<Value *>(UO)), L);
1908
  });
1909
}
1910

1911
std::variant<MemoryDepChecker::Dependence::DepType,
1912
             MemoryDepChecker::DepDistanceStrideAndSizeInfo>
1913
MemoryDepChecker::getDependenceDistanceStrideAndSize(
1914
    const AccessAnalysis::MemAccessInfo &A, Instruction *AInst,
1915
    const AccessAnalysis::MemAccessInfo &B, Instruction *BInst,
1916
    const DenseMap<Value *, SmallVector<const Value *, 16>>
1917
        &UnderlyingObjects) {
1918
  auto &DL = InnermostLoop->getHeader()->getDataLayout();
1919
  auto &SE = *PSE.getSE();
1920
  auto [APtr, AIsWrite] = A;
1921
  auto [BPtr, BIsWrite] = B;
1922

1923
  // Two reads are independent.
1924
  if (!AIsWrite && !BIsWrite)
1925
    return MemoryDepChecker::Dependence::NoDep;
1926

1927
  Type *ATy = getLoadStoreType(AInst);
1928
  Type *BTy = getLoadStoreType(BInst);
1929

1930
  // We cannot check pointers in different address spaces.
1931
  if (APtr->getType()->getPointerAddressSpace() !=
1932
      BPtr->getType()->getPointerAddressSpace())
1933
    return MemoryDepChecker::Dependence::Unknown;
1934

1935
  int64_t StrideAPtr =
1936
      getPtrStride(PSE, ATy, APtr, InnermostLoop, SymbolicStrides, true)
1937
          .value_or(0);
1938
  int64_t StrideBPtr =
1939
      getPtrStride(PSE, BTy, BPtr, InnermostLoop, SymbolicStrides, true)
1940
          .value_or(0);
1941

1942
  const SCEV *Src = PSE.getSCEV(APtr);
1943
  const SCEV *Sink = PSE.getSCEV(BPtr);
1944

1945
  // If the induction step is negative we have to invert source and sink of the
1946
  // dependence when measuring the distance between them. We should not swap
1947
  // AIsWrite with BIsWrite, as their uses expect them in program order.
1948
  if (StrideAPtr < 0) {
1949
    std::swap(Src, Sink);
1950
    std::swap(AInst, BInst);
1951
  }
1952

1953
  const SCEV *Dist = SE.getMinusSCEV(Sink, Src);
1954

1955
  LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1956
                    << "(Induction step: " << StrideAPtr << ")\n");
1957
  LLVM_DEBUG(dbgs() << "LAA: Distance for " << *AInst << " to " << *BInst
1958
                    << ": " << *Dist << "\n");
1959

1960
  // Needs accesses where the addresses of the accessed underlying objects do
1961
  // not change within the loop.
1962
  if (isLoopVariantIndirectAddress(UnderlyingObjects.find(APtr)->second, SE,
1963
                                   InnermostLoop) ||
1964
      isLoopVariantIndirectAddress(UnderlyingObjects.find(BPtr)->second, SE,
1965
                                   InnermostLoop))
1966
    return MemoryDepChecker::Dependence::IndirectUnsafe;
1967

1968
  // Check if we can prove that Sink only accesses memory after Src's end or
1969
  // vice versa. At the moment this is limited to cases where either source or
1970
  // sink are loop invariant to avoid compile-time increases. This is not
1971
  // required for correctness.
1972
  if (SE.isLoopInvariant(Src, InnermostLoop) ||
1973
      SE.isLoopInvariant(Sink, InnermostLoop)) {
1974
    const auto &[SrcStart, SrcEnd] =
1975
        getStartAndEndForAccess(InnermostLoop, Src, ATy, PSE, PointerBounds);
1976
    const auto &[SinkStart, SinkEnd] =
1977
        getStartAndEndForAccess(InnermostLoop, Sink, BTy, PSE, PointerBounds);
1978
    if (!isa<SCEVCouldNotCompute>(SrcStart) &&
1979
        !isa<SCEVCouldNotCompute>(SrcEnd) &&
1980
        !isa<SCEVCouldNotCompute>(SinkStart) &&
1981
        !isa<SCEVCouldNotCompute>(SinkEnd)) {
1982
      if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SrcEnd, SinkStart))
1983
        return MemoryDepChecker::Dependence::NoDep;
1984
      if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SinkEnd, SrcStart))
1985
        return MemoryDepChecker::Dependence::NoDep;
1986
    }
1987
  }
1988

1989
  // Need accesses with constant strides and the same direction. We don't want
1990
  // to vectorize "A[B[i]] += ..." and similar code or pointer arithmetic that
1991
  // could wrap in the address space.
1992
  if (!StrideAPtr || !StrideBPtr || (StrideAPtr > 0 && StrideBPtr < 0) ||
1993
      (StrideAPtr < 0 && StrideBPtr > 0)) {
1994
    LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1995
    return MemoryDepChecker::Dependence::Unknown;
1996
  }
1997

1998
  uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
1999
  bool HasSameSize =
2000
      DL.getTypeStoreSizeInBits(ATy) == DL.getTypeStoreSizeInBits(BTy);
2001
  if (!HasSameSize)
2002
    TypeByteSize = 0;
2003
  return DepDistanceStrideAndSizeInfo(Dist, std::abs(StrideAPtr),
2004
                                      std::abs(StrideBPtr), TypeByteSize,
2005
                                      AIsWrite, BIsWrite);
2006
}
2007

2008
MemoryDepChecker::Dependence::DepType MemoryDepChecker::isDependent(
2009
    const MemAccessInfo &A, unsigned AIdx, const MemAccessInfo &B,
2010
    unsigned BIdx,
2011
    const DenseMap<Value *, SmallVector<const Value *, 16>>
2012
        &UnderlyingObjects) {
2013
  assert(AIdx < BIdx && "Must pass arguments in program order");
2014

2015
  // Get the dependence distance, stride, type size and what access writes for
2016
  // the dependence between A and B.
2017
  auto Res = getDependenceDistanceStrideAndSize(
2018
      A, InstMap[AIdx], B, InstMap[BIdx], UnderlyingObjects);
2019
  if (std::holds_alternative<Dependence::DepType>(Res))
2020
    return std::get<Dependence::DepType>(Res);
2021

2022
  auto &[Dist, StrideA, StrideB, TypeByteSize, AIsWrite, BIsWrite] =
2023
      std::get<DepDistanceStrideAndSizeInfo>(Res);
2024
  bool HasSameSize = TypeByteSize > 0;
2025

2026
  std::optional<uint64_t> CommonStride =
2027
      StrideA == StrideB ? std::make_optional(StrideA) : std::nullopt;
2028
  if (isa<SCEVCouldNotCompute>(Dist)) {
2029
    // TODO: Relax requirement that there is a common stride to retry with
2030
    // non-constant distance dependencies.
2031
    FoundNonConstantDistanceDependence |= CommonStride.has_value();
2032
    LLVM_DEBUG(dbgs() << "LAA: Dependence because of uncomputable distance.\n");
2033
    return Dependence::Unknown;
2034
  }
2035

2036
  ScalarEvolution &SE = *PSE.getSE();
2037
  auto &DL = InnermostLoop->getHeader()->getDataLayout();
2038
  uint64_t MaxStride = std::max(StrideA, StrideB);
2039

2040
  // If the distance between the acecsses is larger than their maximum absolute
2041
  // stride multiplied by the symbolic maximum backedge taken count (which is an
2042
  // upper bound of the number of iterations), the accesses are independet, i.e.
2043
  // they are far enough appart that accesses won't access the same location
2044
  // across all loop ierations.
2045
  if (HasSameSize && isSafeDependenceDistance(
2046
                         DL, SE, *(PSE.getSymbolicMaxBackedgeTakenCount()),
2047
                         *Dist, MaxStride, TypeByteSize))
2048
    return Dependence::NoDep;
2049

2050
  const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
2051

2052
  // Attempt to prove strided accesses independent.
2053
  if (C) {
2054
    const APInt &Val = C->getAPInt();
2055
    int64_t Distance = Val.getSExtValue();
2056

2057
    // If the distance between accesses and their strides are known constants,
2058
    // check whether the accesses interlace each other.
2059
    if (std::abs(Distance) > 0 && CommonStride && *CommonStride > 1 &&
2060
        HasSameSize &&
2061
        areStridedAccessesIndependent(std::abs(Distance), *CommonStride,
2062
                                      TypeByteSize)) {
2063
      LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
2064
      return Dependence::NoDep;
2065
    }
2066
  } else
2067
    Dist = SE.applyLoopGuards(Dist, InnermostLoop);
2068

2069
  // Negative distances are not plausible dependencies.
2070
  if (SE.isKnownNonPositive(Dist)) {
2071
    if (SE.isKnownNonNegative(Dist)) {
2072
      if (HasSameSize) {
2073
        // Write to the same location with the same size.
2074
        return Dependence::Forward;
2075
      }
2076
      LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence difference but "
2077
                           "different type sizes\n");
2078
      return Dependence::Unknown;
2079
    }
2080

2081
    bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
2082
    // Check if the first access writes to a location that is read in a later
2083
    // iteration, where the distance between them is not a multiple of a vector
2084
    // factor and relatively small.
2085
    //
2086
    // NOTE: There is no need to update MaxSafeVectorWidthInBits after call to
2087
    // couldPreventStoreLoadForward, even if it changed MinDepDistBytes, since a
2088
    // forward dependency will allow vectorization using any width.
2089

2090
    if (IsTrueDataDependence && EnableForwardingConflictDetection) {
2091
      if (!C) {
2092
        // TODO: FoundNonConstantDistanceDependence is used as a necessary
2093
        // condition to consider retrying with runtime checks. Historically, we
2094
        // did not set it when strides were different but there is no inherent
2095
        // reason to.
2096
        FoundNonConstantDistanceDependence |= CommonStride.has_value();
2097
        return Dependence::Unknown;
2098
      }
2099
      if (!HasSameSize ||
2100
          couldPreventStoreLoadForward(C->getAPInt().abs().getZExtValue(),
2101
                                       TypeByteSize)) {
2102
        LLVM_DEBUG(
2103
            dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
2104
        return Dependence::ForwardButPreventsForwarding;
2105
      }
2106
    }
2107

2108
    LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
2109
    return Dependence::Forward;
2110
  }
2111

2112
  int64_t MinDistance = SE.getSignedRangeMin(Dist).getSExtValue();
2113
  // Below we only handle strictly positive distances.
2114
  if (MinDistance <= 0) {
2115
    FoundNonConstantDistanceDependence |= CommonStride.has_value();
2116
    return Dependence::Unknown;
2117
  }
2118

2119
  if (!isa<SCEVConstant>(Dist)) {
2120
    // Previously this case would be treated as Unknown, possibly setting
2121
    // FoundNonConstantDistanceDependence to force re-trying with runtime
2122
    // checks. Until the TODO below is addressed, set it here to preserve
2123
    // original behavior w.r.t. re-trying with runtime checks.
2124
    // TODO: FoundNonConstantDistanceDependence is used as a necessary
2125
    // condition to consider retrying with runtime checks. Historically, we
2126
    // did not set it when strides were different but there is no inherent
2127
    // reason to.
2128
    FoundNonConstantDistanceDependence |= CommonStride.has_value();
2129
  }
2130

2131
  if (!HasSameSize) {
2132
    LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
2133
                         "different type sizes\n");
2134
    return Dependence::Unknown;
2135
  }
2136

2137
  if (!CommonStride)
2138
    return Dependence::Unknown;
2139

2140
  // Bail out early if passed-in parameters make vectorization not feasible.
2141
  unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
2142
                           VectorizerParams::VectorizationFactor : 1);
2143
  unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
2144
                           VectorizerParams::VectorizationInterleave : 1);
2145
  // The minimum number of iterations for a vectorized/unrolled version.
2146
  unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2147

2148
  // It's not vectorizable if the distance is smaller than the minimum distance
2149
  // needed for a vectroized/unrolled version. Vectorizing one iteration in
2150
  // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
2151
  // TypeByteSize (No need to plus the last gap distance).
2152
  //
2153
  // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2154
  //      foo(int *A) {
2155
  //        int *B = (int *)((char *)A + 14);
2156
  //        for (i = 0 ; i < 1024 ; i += 2)
2157
  //          B[i] = A[i] + 1;
2158
  //      }
2159
  //
2160
  // Two accesses in memory (stride is 2):
2161
  //     | A[0] |      | A[2] |      | A[4] |      | A[6] |      |
2162
  //                              | B[0] |      | B[2] |      | B[4] |
2163
  //
2164
  // MinDistance needs for vectorizing iterations except the last iteration:
2165
  // 4 * 2 * (MinNumIter - 1). MinDistance needs for the last iteration: 4.
2166
  // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
2167
  //
2168
  // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
2169
  // 12, which is less than distance.
2170
  //
2171
  // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
2172
  // the minimum distance needed is 28, which is greater than distance. It is
2173
  // not safe to do vectorization.
2174

2175
  // We know that Dist is positive, but it may not be constant. Use the signed
2176
  // minimum for computations below, as this ensures we compute the closest
2177
  // possible dependence distance.
2178
  uint64_t MinDistanceNeeded =
2179
      TypeByteSize * *CommonStride * (MinNumIter - 1) + TypeByteSize;
2180
  if (MinDistanceNeeded > static_cast<uint64_t>(MinDistance)) {
2181
    if (!isa<SCEVConstant>(Dist)) {
2182
      // For non-constant distances, we checked the lower bound of the
2183
      // dependence distance and the distance may be larger at runtime (and safe
2184
      // for vectorization). Classify it as Unknown, so we re-try with runtime
2185
      // checks.
2186
      return Dependence::Unknown;
2187
    }
2188
    LLVM_DEBUG(dbgs() << "LAA: Failure because of positive minimum distance "
2189
                      << MinDistance << '\n');
2190
    return Dependence::Backward;
2191
  }
2192

2193
  // Unsafe if the minimum distance needed is greater than smallest dependence
2194
  // distance distance.
2195
  if (MinDistanceNeeded > MinDepDistBytes) {
2196
    LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
2197
                      << MinDistanceNeeded << " size in bytes\n");
2198
    return Dependence::Backward;
2199
  }
2200

2201
  // Positive distance bigger than max vectorization factor.
2202
  // FIXME: Should use max factor instead of max distance in bytes, which could
2203
  // not handle different types.
2204
  // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2205
  //      void foo (int *A, char *B) {
2206
  //        for (unsigned i = 0; i < 1024; i++) {
2207
  //          A[i+2] = A[i] + 1;
2208
  //          B[i+2] = B[i] + 1;
2209
  //        }
2210
  //      }
2211
  //
2212
  // This case is currently unsafe according to the max safe distance. If we
2213
  // analyze the two accesses on array B, the max safe dependence distance
2214
  // is 2. Then we analyze the accesses on array A, the minimum distance needed
2215
  // is 8, which is less than 2 and forbidden vectorization, But actually
2216
  // both A and B could be vectorized by 2 iterations.
2217
  MinDepDistBytes =
2218
      std::min(static_cast<uint64_t>(MinDistance), MinDepDistBytes);
2219

2220
  bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2221
  uint64_t MinDepDistBytesOld = MinDepDistBytes;
2222
  if (IsTrueDataDependence && EnableForwardingConflictDetection &&
2223
      isa<SCEVConstant>(Dist) &&
2224
      couldPreventStoreLoadForward(MinDistance, TypeByteSize)) {
2225
    // Sanity check that we didn't update MinDepDistBytes when calling
2226
    // couldPreventStoreLoadForward
2227
    assert(MinDepDistBytes == MinDepDistBytesOld &&
2228
           "An update to MinDepDistBytes requires an update to "
2229
           "MaxSafeVectorWidthInBits");
2230
    (void)MinDepDistBytesOld;
2231
    return Dependence::BackwardVectorizableButPreventsForwarding;
2232
  }
2233

2234
  // An update to MinDepDistBytes requires an update to MaxSafeVectorWidthInBits
2235
  // since there is a backwards dependency.
2236
  uint64_t MaxVF = MinDepDistBytes / (TypeByteSize * *CommonStride);
2237
  LLVM_DEBUG(dbgs() << "LAA: Positive min distance " << MinDistance
2238
                    << " with max VF = " << MaxVF << '\n');
2239

2240
  uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2241
  if (!isa<SCEVConstant>(Dist) && MaxVFInBits < MaxTargetVectorWidthInBits) {
2242
    // For non-constant distances, we checked the lower bound of the dependence
2243
    // distance and the distance may be larger at runtime (and safe for
2244
    // vectorization). Classify it as Unknown, so we re-try with runtime checks.
2245
    return Dependence::Unknown;
2246
  }
2247

2248
  MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2249
  return Dependence::BackwardVectorizable;
2250
}
2251

2252
bool MemoryDepChecker::areDepsSafe(
2253
    DepCandidates &AccessSets, MemAccessInfoList &CheckDeps,
2254
    const DenseMap<Value *, SmallVector<const Value *, 16>>
2255
        &UnderlyingObjects) {
2256

2257
  MinDepDistBytes = -1;
2258
  SmallPtrSet<MemAccessInfo, 8> Visited;
2259
  for (MemAccessInfo CurAccess : CheckDeps) {
2260
    if (Visited.count(CurAccess))
2261
      continue;
2262

2263
    // Get the relevant memory access set.
2264
    EquivalenceClasses<MemAccessInfo>::iterator I =
2265
      AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
2266

2267
    // Check accesses within this set.
2268
    EquivalenceClasses<MemAccessInfo>::member_iterator AI =
2269
        AccessSets.member_begin(I);
2270
    EquivalenceClasses<MemAccessInfo>::member_iterator AE =
2271
        AccessSets.member_end();
2272

2273
    // Check every access pair.
2274
    while (AI != AE) {
2275
      Visited.insert(*AI);
2276
      bool AIIsWrite = AI->getInt();
2277
      // Check loads only against next equivalent class, but stores also against
2278
      // other stores in the same equivalence class - to the same address.
2279
      EquivalenceClasses<MemAccessInfo>::member_iterator OI =
2280
          (AIIsWrite ? AI : std::next(AI));
2281
      while (OI != AE) {
2282
        // Check every accessing instruction pair in program order.
2283
        for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
2284
             I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
2285
          // Scan all accesses of another equivalence class, but only the next
2286
          // accesses of the same equivalent class.
2287
          for (std::vector<unsigned>::iterator
2288
                   I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2289
                   I2E = (OI == AI ? I1E : Accesses[*OI].end());
2290
               I2 != I2E; ++I2) {
2291
            auto A = std::make_pair(&*AI, *I1);
2292
            auto B = std::make_pair(&*OI, *I2);
2293

2294
            assert(*I1 != *I2);
2295
            if (*I1 > *I2)
2296
              std::swap(A, B);
2297

2298
            Dependence::DepType Type = isDependent(*A.first, A.second, *B.first,
2299
                                                   B.second, UnderlyingObjects);
2300
            mergeInStatus(Dependence::isSafeForVectorization(Type));
2301

2302
            // Gather dependences unless we accumulated MaxDependences
2303
            // dependences.  In that case return as soon as we find the first
2304
            // unsafe dependence.  This puts a limit on this quadratic
2305
            // algorithm.
2306
            if (RecordDependences) {
2307
              if (Type != Dependence::NoDep)
2308
                Dependences.push_back(Dependence(A.second, B.second, Type));
2309

2310
              if (Dependences.size() >= MaxDependences) {
2311
                RecordDependences = false;
2312
                Dependences.clear();
2313
                LLVM_DEBUG(dbgs()
2314
                           << "Too many dependences, stopped recording\n");
2315
              }
2316
            }
2317
            if (!RecordDependences && !isSafeForVectorization())
2318
              return false;
2319
          }
2320
        ++OI;
2321
      }
2322
      ++AI;
2323
    }
2324
  }
2325

2326
  LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
2327
  return isSafeForVectorization();
2328
}
2329

2330
SmallVector<Instruction *, 4>
2331
MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool IsWrite) const {
2332
  MemAccessInfo Access(Ptr, IsWrite);
2333
  auto &IndexVector = Accesses.find(Access)->second;
2334

2335
  SmallVector<Instruction *, 4> Insts;
2336
  transform(IndexVector,
2337
                 std::back_inserter(Insts),
2338
                 [&](unsigned Idx) { return this->InstMap[Idx]; });
2339
  return Insts;
2340
}
2341

2342
const char *MemoryDepChecker::Dependence::DepName[] = {
2343
    "NoDep",
2344
    "Unknown",
2345
    "IndirectUnsafe",
2346
    "Forward",
2347
    "ForwardButPreventsForwarding",
2348
    "Backward",
2349
    "BackwardVectorizable",
2350
    "BackwardVectorizableButPreventsForwarding"};
2351

2352
void MemoryDepChecker::Dependence::print(
2353
    raw_ostream &OS, unsigned Depth,
2354
    const SmallVectorImpl<Instruction *> &Instrs) const {
2355
  OS.indent(Depth) << DepName[Type] << ":\n";
2356
  OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
2357
  OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
2358
}
2359

2360
bool LoopAccessInfo::canAnalyzeLoop() {
2361
  // We need to have a loop header.
2362
  LLVM_DEBUG(dbgs() << "\nLAA: Checking a loop in '"
2363
                    << TheLoop->getHeader()->getParent()->getName() << "' from "
2364
                    << TheLoop->getLocStr() << "\n");
2365

2366
  // We can only analyze innermost loops.
2367
  if (!TheLoop->isInnermost()) {
2368
    LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
2369
    recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
2370
    return false;
2371
  }
2372

2373
  // We must have a single backedge.
2374
  if (TheLoop->getNumBackEdges() != 1) {
2375
    LLVM_DEBUG(
2376
        dbgs() << "LAA: loop control flow is not understood by analyzer\n");
2377
    recordAnalysis("CFGNotUnderstood")
2378
        << "loop control flow is not understood by analyzer";
2379
    return false;
2380
  }
2381

2382
  // ScalarEvolution needs to be able to find the symbolic max backedge taken
2383
  // count, which is an upper bound on the number of loop iterations. The loop
2384
  // may execute fewer iterations, if it exits via an uncountable exit.
2385
  const SCEV *ExitCount = PSE->getSymbolicMaxBackedgeTakenCount();
2386
  if (isa<SCEVCouldNotCompute>(ExitCount)) {
2387
    recordAnalysis("CantComputeNumberOfIterations")
2388
        << "could not determine number of loop iterations";
2389
    LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
2390
    return false;
2391
  }
2392

2393
  LLVM_DEBUG(dbgs() << "LAA: Found an analyzable loop: "
2394
                    << TheLoop->getHeader()->getName() << "\n");
2395
  return true;
2396
}
2397

2398
bool LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
2399
                                 const TargetLibraryInfo *TLI,
2400
                                 DominatorTree *DT) {
2401
  // Holds the Load and Store instructions.
2402
  SmallVector<LoadInst *, 16> Loads;
2403
  SmallVector<StoreInst *, 16> Stores;
2404
  SmallPtrSet<MDNode *, 8> LoopAliasScopes;
2405

2406
  // Holds all the different accesses in the loop.
2407
  unsigned NumReads = 0;
2408
  unsigned NumReadWrites = 0;
2409

2410
  bool HasComplexMemInst = false;
2411

2412
  // A runtime check is only legal to insert if there are no convergent calls.
2413
  HasConvergentOp = false;
2414

2415
  PtrRtChecking->Pointers.clear();
2416
  PtrRtChecking->Need = false;
2417

2418
  const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
2419

2420
  const bool EnableMemAccessVersioningOfLoop =
2421
      EnableMemAccessVersioning &&
2422
      !TheLoop->getHeader()->getParent()->hasOptSize();
2423

2424
  // Traverse blocks in fixed RPOT order, regardless of their storage in the
2425
  // loop info, as it may be arbitrary.
2426
  LoopBlocksRPO RPOT(TheLoop);
2427
  RPOT.perform(LI);
2428
  for (BasicBlock *BB : RPOT) {
2429
    // Scan the BB and collect legal loads and stores. Also detect any
2430
    // convergent instructions.
2431
    for (Instruction &I : *BB) {
2432
      if (auto *Call = dyn_cast<CallBase>(&I)) {
2433
        if (Call->isConvergent())
2434
          HasConvergentOp = true;
2435
      }
2436

2437
      // With both a non-vectorizable memory instruction and a convergent
2438
      // operation, found in this loop, no reason to continue the search.
2439
      if (HasComplexMemInst && HasConvergentOp)
2440
        return false;
2441

2442
      // Avoid hitting recordAnalysis multiple times.
2443
      if (HasComplexMemInst)
2444
        continue;
2445

2446
      // Record alias scopes defined inside the loop.
2447
      if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
2448
        for (Metadata *Op : Decl->getScopeList()->operands())
2449
          LoopAliasScopes.insert(cast<MDNode>(Op));
2450

2451
      // Many math library functions read the rounding mode. We will only
2452
      // vectorize a loop if it contains known function calls that don't set
2453
      // the flag. Therefore, it is safe to ignore this read from memory.
2454
      auto *Call = dyn_cast<CallInst>(&I);
2455
      if (Call && getVectorIntrinsicIDForCall(Call, TLI))
2456
        continue;
2457

2458
      // If this is a load, save it. If this instruction can read from memory
2459
      // but is not a load, then we quit. Notice that we don't handle function
2460
      // calls that read or write.
2461
      if (I.mayReadFromMemory()) {
2462
        // If the function has an explicit vectorized counterpart, we can safely
2463
        // assume that it can be vectorized.
2464
        if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2465
            !VFDatabase::getMappings(*Call).empty())
2466
          continue;
2467

2468
        auto *Ld = dyn_cast<LoadInst>(&I);
2469
        if (!Ld) {
2470
          recordAnalysis("CantVectorizeInstruction", Ld)
2471
            << "instruction cannot be vectorized";
2472
          HasComplexMemInst = true;
2473
          continue;
2474
        }
2475
        if (!Ld->isSimple() && !IsAnnotatedParallel) {
2476
          recordAnalysis("NonSimpleLoad", Ld)
2477
              << "read with atomic ordering or volatile read";
2478
          LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2479
          HasComplexMemInst = true;
2480
          continue;
2481
        }
2482
        NumLoads++;
2483
        Loads.push_back(Ld);
2484
        DepChecker->addAccess(Ld);
2485
        if (EnableMemAccessVersioningOfLoop)
2486
          collectStridedAccess(Ld);
2487
        continue;
2488
      }
2489

2490
      // Save 'store' instructions. Abort if other instructions write to memory.
2491
      if (I.mayWriteToMemory()) {
2492
        auto *St = dyn_cast<StoreInst>(&I);
2493
        if (!St) {
2494
          recordAnalysis("CantVectorizeInstruction", St)
2495
              << "instruction cannot be vectorized";
2496
          HasComplexMemInst = true;
2497
          continue;
2498
        }
2499
        if (!St->isSimple() && !IsAnnotatedParallel) {
2500
          recordAnalysis("NonSimpleStore", St)
2501
              << "write with atomic ordering or volatile write";
2502
          LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2503
          HasComplexMemInst = true;
2504
          continue;
2505
        }
2506
        NumStores++;
2507
        Stores.push_back(St);
2508
        DepChecker->addAccess(St);
2509
        if (EnableMemAccessVersioningOfLoop)
2510
          collectStridedAccess(St);
2511
      }
2512
    } // Next instr.
2513
  } // Next block.
2514

2515
  if (HasComplexMemInst)
2516
    return false;
2517

2518
  // Now we have two lists that hold the loads and the stores.
2519
  // Next, we find the pointers that they use.
2520

2521
  // Check if we see any stores. If there are no stores, then we don't
2522
  // care if the pointers are *restrict*.
2523
  if (!Stores.size()) {
2524
    LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2525
    return true;
2526
  }
2527

2528
  MemoryDepChecker::DepCandidates DependentAccesses;
2529
  AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE,
2530
                          LoopAliasScopes);
2531

2532
  // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
2533
  // multiple times on the same object. If the ptr is accessed twice, once
2534
  // for read and once for write, it will only appear once (on the write
2535
  // list). This is okay, since we are going to check for conflicts between
2536
  // writes and between reads and writes, but not between reads and reads.
2537
  SmallSet<std::pair<Value *, Type *>, 16> Seen;
2538

2539
  // Record uniform store addresses to identify if we have multiple stores
2540
  // to the same address.
2541
  SmallPtrSet<Value *, 16> UniformStores;
2542

2543
  for (StoreInst *ST : Stores) {
2544
    Value *Ptr = ST->getPointerOperand();
2545

2546
    if (isInvariant(Ptr)) {
2547
      // Record store instructions to loop invariant addresses
2548
      StoresToInvariantAddresses.push_back(ST);
2549
      HasStoreStoreDependenceInvolvingLoopInvariantAddress |=
2550
          !UniformStores.insert(Ptr).second;
2551
    }
2552

2553
    // If we did *not* see this pointer before, insert it to  the read-write
2554
    // list. At this phase it is only a 'write' list.
2555
    Type *AccessTy = getLoadStoreType(ST);
2556
    if (Seen.insert({Ptr, AccessTy}).second) {
2557
      ++NumReadWrites;
2558

2559
      MemoryLocation Loc = MemoryLocation::get(ST);
2560
      // The TBAA metadata could have a control dependency on the predication
2561
      // condition, so we cannot rely on it when determining whether or not we
2562
      // need runtime pointer checks.
2563
      if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2564
        Loc.AATags.TBAA = nullptr;
2565

2566
      visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2567
                    [&Accesses, AccessTy, Loc](Value *Ptr) {
2568
                      MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2569
                      Accesses.addStore(NewLoc, AccessTy);
2570
                    });
2571
    }
2572
  }
2573

2574
  if (IsAnnotatedParallel) {
2575
    LLVM_DEBUG(
2576
        dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2577
               << "checks.\n");
2578
    return true;
2579
  }
2580

2581
  for (LoadInst *LD : Loads) {
2582
    Value *Ptr = LD->getPointerOperand();
2583
    // If we did *not* see this pointer before, insert it to the
2584
    // read list. If we *did* see it before, then it is already in
2585
    // the read-write list. This allows us to vectorize expressions
2586
    // such as A[i] += x;  Because the address of A[i] is a read-write
2587
    // pointer. This only works if the index of A[i] is consecutive.
2588
    // If the address of i is unknown (for example A[B[i]]) then we may
2589
    // read a few words, modify, and write a few words, and some of the
2590
    // words may be written to the same address.
2591
    bool IsReadOnlyPtr = false;
2592
    Type *AccessTy = getLoadStoreType(LD);
2593
    if (Seen.insert({Ptr, AccessTy}).second ||
2594
        !getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides).value_or(0)) {
2595
      ++NumReads;
2596
      IsReadOnlyPtr = true;
2597
    }
2598

2599
    // See if there is an unsafe dependency between a load to a uniform address and
2600
    // store to the same uniform address.
2601
    if (UniformStores.count(Ptr)) {
2602
      LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2603
                           "load and uniform store to the same address!\n");
2604
      HasLoadStoreDependenceInvolvingLoopInvariantAddress = true;
2605
    }
2606

2607
    MemoryLocation Loc = MemoryLocation::get(LD);
2608
    // The TBAA metadata could have a control dependency on the predication
2609
    // condition, so we cannot rely on it when determining whether or not we
2610
    // need runtime pointer checks.
2611
    if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2612
      Loc.AATags.TBAA = nullptr;
2613

2614
    visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2615
                  [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2616
                    MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2617
                    Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2618
                  });
2619
  }
2620

2621
  // If we write (or read-write) to a single destination and there are no
2622
  // other reads in this loop then is it safe to vectorize.
2623
  if (NumReadWrites == 1 && NumReads == 0) {
2624
    LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2625
    return true;
2626
  }
2627

2628
  // Build dependence sets and check whether we need a runtime pointer bounds
2629
  // check.
2630
  Accesses.buildDependenceSets();
2631

2632
  // Find pointers with computable bounds. We are going to use this information
2633
  // to place a runtime bound check.
2634
  Value *UncomputablePtr = nullptr;
2635
  bool CanDoRTIfNeeded =
2636
      Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), TheLoop,
2637
                               SymbolicStrides, UncomputablePtr, false);
2638
  if (!CanDoRTIfNeeded) {
2639
    auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2640
    recordAnalysis("CantIdentifyArrayBounds", I)
2641
        << "cannot identify array bounds";
2642
    LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2643
                      << "the array bounds.\n");
2644
    return false;
2645
  }
2646

2647
  LLVM_DEBUG(
2648
    dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2649

2650
  bool DepsAreSafe = true;
2651
  if (Accesses.isDependencyCheckNeeded()) {
2652
    LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2653
    DepsAreSafe = DepChecker->areDepsSafe(DependentAccesses,
2654
                                          Accesses.getDependenciesToCheck(),
2655
                                          Accesses.getUnderlyingObjects());
2656

2657
    if (!DepsAreSafe && DepChecker->shouldRetryWithRuntimeCheck()) {
2658
      LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2659

2660
      // Clear the dependency checks. We assume they are not needed.
2661
      Accesses.resetDepChecks(*DepChecker);
2662

2663
      PtrRtChecking->reset();
2664
      PtrRtChecking->Need = true;
2665

2666
      auto *SE = PSE->getSE();
2667
      UncomputablePtr = nullptr;
2668
      CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(
2669
          *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr, true);
2670

2671
      // Check that we found the bounds for the pointer.
2672
      if (!CanDoRTIfNeeded) {
2673
        auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2674
        recordAnalysis("CantCheckMemDepsAtRunTime", I)
2675
            << "cannot check memory dependencies at runtime";
2676
        LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2677
        return false;
2678
      }
2679
      DepsAreSafe = true;
2680
    }
2681
  }
2682

2683
  if (HasConvergentOp) {
2684
    recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2685
        << "cannot add control dependency to convergent operation";
2686
    LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2687
                         "would be needed with a convergent operation\n");
2688
    return false;
2689
  }
2690

2691
  if (DepsAreSafe) {
2692
    LLVM_DEBUG(
2693
        dbgs() << "LAA: No unsafe dependent memory operations in loop.  We"
2694
               << (PtrRtChecking->Need ? "" : " don't")
2695
               << " need runtime memory checks.\n");
2696
    return true;
2697
  }
2698

2699
  emitUnsafeDependenceRemark();
2700
  return false;
2701
}
2702

2703
void LoopAccessInfo::emitUnsafeDependenceRemark() {
2704
  const auto *Deps = getDepChecker().getDependences();
2705
  if (!Deps)
2706
    return;
2707
  const auto *Found =
2708
      llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
2709
        return MemoryDepChecker::Dependence::isSafeForVectorization(D.Type) !=
2710
               MemoryDepChecker::VectorizationSafetyStatus::Safe;
2711
      });
2712
  if (Found == Deps->end())
2713
    return;
2714
  MemoryDepChecker::Dependence Dep = *Found;
2715

2716
  LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2717

2718
  // Emit remark for first unsafe dependence
2719
  bool HasForcedDistribution = false;
2720
  std::optional<const MDOperand *> Value =
2721
      findStringMetadataForLoop(TheLoop, "llvm.loop.distribute.enable");
2722
  if (Value) {
2723
    const MDOperand *Op = *Value;
2724
    assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
2725
    HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2726
  }
2727

2728
  const std::string Info =
2729
      HasForcedDistribution
2730
          ? "unsafe dependent memory operations in loop."
2731
          : "unsafe dependent memory operations in loop. Use "
2732
            "#pragma clang loop distribute(enable) to allow loop distribution "
2733
            "to attempt to isolate the offending operations into a separate "
2734
            "loop";
2735
  OptimizationRemarkAnalysis &R =
2736
      recordAnalysis("UnsafeDep", Dep.getDestination(getDepChecker())) << Info;
2737

2738
  switch (Dep.Type) {
2739
  case MemoryDepChecker::Dependence::NoDep:
2740
  case MemoryDepChecker::Dependence::Forward:
2741
  case MemoryDepChecker::Dependence::BackwardVectorizable:
2742
    llvm_unreachable("Unexpected dependence");
2743
  case MemoryDepChecker::Dependence::Backward:
2744
    R << "\nBackward loop carried data dependence.";
2745
    break;
2746
  case MemoryDepChecker::Dependence::ForwardButPreventsForwarding:
2747
    R << "\nForward loop carried data dependence that prevents "
2748
         "store-to-load forwarding.";
2749
    break;
2750
  case MemoryDepChecker::Dependence::BackwardVectorizableButPreventsForwarding:
2751
    R << "\nBackward loop carried data dependence that prevents "
2752
         "store-to-load forwarding.";
2753
    break;
2754
  case MemoryDepChecker::Dependence::IndirectUnsafe:
2755
    R << "\nUnsafe indirect dependence.";
2756
    break;
2757
  case MemoryDepChecker::Dependence::Unknown:
2758
    R << "\nUnknown data dependence.";
2759
    break;
2760
  }
2761

2762
  if (Instruction *I = Dep.getSource(getDepChecker())) {
2763
    DebugLoc SourceLoc = I->getDebugLoc();
2764
    if (auto *DD = dyn_cast_or_null<Instruction>(getPointerOperand(I)))
2765
      SourceLoc = DD->getDebugLoc();
2766
    if (SourceLoc)
2767
      R << " Memory location is the same as accessed at "
2768
        << ore::NV("Location", SourceLoc);
2769
  }
2770
}
2771

2772
bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
2773
                                           DominatorTree *DT)  {
2774
  assert(TheLoop->contains(BB) && "Unknown block used");
2775

2776
  // Blocks that do not dominate the latch need predication.
2777
  BasicBlock* Latch = TheLoop->getLoopLatch();
2778
  return !DT->dominates(BB, Latch);
2779
}
2780

2781
OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName,
2782
                                                           Instruction *I) {
2783
  assert(!Report && "Multiple reports generated");
2784

2785
  Value *CodeRegion = TheLoop->getHeader();
2786
  DebugLoc DL = TheLoop->getStartLoc();
2787

2788
  if (I) {
2789
    CodeRegion = I->getParent();
2790
    // If there is no debug location attached to the instruction, revert back to
2791
    // using the loop's.
2792
    if (I->getDebugLoc())
2793
      DL = I->getDebugLoc();
2794
  }
2795

2796
  Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2797
                                                   CodeRegion);
2798
  return *Report;
2799
}
2800

2801
bool LoopAccessInfo::isInvariant(Value *V) const {
2802
  auto *SE = PSE->getSE();
2803
  // TODO: Is this really what we want? Even without FP SCEV, we may want some
2804
  // trivially loop-invariant FP values to be considered invariant.
2805
  if (!SE->isSCEVable(V->getType()))
2806
    return false;
2807
  const SCEV *S = SE->getSCEV(V);
2808
  return SE->isLoopInvariant(S, TheLoop);
2809
}
2810

2811
/// Find the operand of the GEP that should be checked for consecutive
2812
/// stores. This ignores trailing indices that have no effect on the final
2813
/// pointer.
2814
static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep) {
2815
  const DataLayout &DL = Gep->getDataLayout();
2816
  unsigned LastOperand = Gep->getNumOperands() - 1;
2817
  TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
2818

2819
  // Walk backwards and try to peel off zeros.
2820
  while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
2821
    // Find the type we're currently indexing into.
2822
    gep_type_iterator GEPTI = gep_type_begin(Gep);
2823
    std::advance(GEPTI, LastOperand - 2);
2824

2825
    // If it's a type with the same allocation size as the result of the GEP we
2826
    // can peel off the zero index.
2827
    TypeSize ElemSize = GEPTI.isStruct()
2828
                            ? DL.getTypeAllocSize(GEPTI.getIndexedType())
2829
                            : GEPTI.getSequentialElementStride(DL);
2830
    if (ElemSize != GEPAllocSize)
2831
      break;
2832
    --LastOperand;
2833
  }
2834

2835
  return LastOperand;
2836
}
2837

2838
/// If the argument is a GEP, then returns the operand identified by
2839
/// getGEPInductionOperand. However, if there is some other non-loop-invariant
2840
/// operand, it returns that instead.
2841
static Value *stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
2842
  GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
2843
  if (!GEP)
2844
    return Ptr;
2845

2846
  unsigned InductionOperand = getGEPInductionOperand(GEP);
2847

2848
  // Check that all of the gep indices are uniform except for our induction
2849
  // operand.
2850
  for (unsigned I = 0, E = GEP->getNumOperands(); I != E; ++I)
2851
    if (I != InductionOperand &&
2852
        !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(I)), Lp))
2853
      return Ptr;
2854
  return GEP->getOperand(InductionOperand);
2855
}
2856

2857
/// Get the stride of a pointer access in a loop. Looks for symbolic
2858
/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
2859
static const SCEV *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
2860
  auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2861
  if (!PtrTy || PtrTy->isAggregateType())
2862
    return nullptr;
2863

2864
  // Try to remove a gep instruction to make the pointer (actually index at this
2865
  // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
2866
  // pointer, otherwise, we are analyzing the index.
2867
  Value *OrigPtr = Ptr;
2868

2869
  // The size of the pointer access.
2870
  int64_t PtrAccessSize = 1;
2871

2872
  Ptr = stripGetElementPtr(Ptr, SE, Lp);
2873
  const SCEV *V = SE->getSCEV(Ptr);
2874

2875
  if (Ptr != OrigPtr)
2876
    // Strip off casts.
2877
    while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V))
2878
      V = C->getOperand();
2879

2880
  const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
2881
  if (!S)
2882
    return nullptr;
2883

2884
  // If the pointer is invariant then there is no stride and it makes no
2885
  // sense to add it here.
2886
  if (Lp != S->getLoop())
2887
    return nullptr;
2888

2889
  V = S->getStepRecurrence(*SE);
2890
  if (!V)
2891
    return nullptr;
2892

2893
  // Strip off the size of access multiplication if we are still analyzing the
2894
  // pointer.
2895
  if (OrigPtr == Ptr) {
2896
    if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
2897
      if (M->getOperand(0)->getSCEVType() != scConstant)
2898
        return nullptr;
2899

2900
      const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
2901

2902
      // Huge step value - give up.
2903
      if (APStepVal.getBitWidth() > 64)
2904
        return nullptr;
2905

2906
      int64_t StepVal = APStepVal.getSExtValue();
2907
      if (PtrAccessSize != StepVal)
2908
        return nullptr;
2909
      V = M->getOperand(1);
2910
    }
2911
  }
2912

2913
  // Note that the restriction after this loop invariant check are only
2914
  // profitability restrictions.
2915
  if (!SE->isLoopInvariant(V, Lp))
2916
    return nullptr;
2917

2918
  // Look for the loop invariant symbolic value.
2919
  if (isa<SCEVUnknown>(V))
2920
    return V;
2921

2922
  if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(V))
2923
    if (isa<SCEVUnknown>(C->getOperand()))
2924
      return V;
2925

2926
  return nullptr;
2927
}
2928

2929
void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2930
  Value *Ptr = getLoadStorePointerOperand(MemAccess);
2931
  if (!Ptr)
2932
    return;
2933

2934
  // Note: getStrideFromPointer is a *profitability* heuristic.  We
2935
  // could broaden the scope of values returned here - to anything
2936
  // which happens to be loop invariant and contributes to the
2937
  // computation of an interesting IV - but we chose not to as we
2938
  // don't have a cost model here, and broadening the scope exposes
2939
  // far too many unprofitable cases.
2940
  const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2941
  if (!StrideExpr)
2942
    return;
2943

2944
  LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2945
                       "versioning:");
2946
  LLVM_DEBUG(dbgs() << "  Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");
2947

2948
  if (!SpeculateUnitStride) {
2949
    LLVM_DEBUG(dbgs() << "  Chose not to due to -laa-speculate-unit-stride\n");
2950
    return;
2951
  }
2952

2953
  // Avoid adding the "Stride == 1" predicate when we know that
2954
  // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2955
  // or zero iteration loop, as Trip-Count <= Stride == 1.
2956
  //
2957
  // TODO: We are currently not making a very informed decision on when it is
2958
  // beneficial to apply stride versioning. It might make more sense that the
2959
  // users of this analysis (such as the vectorizer) will trigger it, based on
2960
  // their specific cost considerations; For example, in cases where stride
2961
  // versioning does  not help resolving memory accesses/dependences, the
2962
  // vectorizer should evaluate the cost of the runtime test, and the benefit
2963
  // of various possible stride specializations, considering the alternatives
2964
  // of using gather/scatters (if available).
2965

2966
  const SCEV *MaxBTC = PSE->getSymbolicMaxBackedgeTakenCount();
2967

2968
  // Match the types so we can compare the stride and the MaxBTC.
2969
  // The Stride can be positive/negative, so we sign extend Stride;
2970
  // The backedgeTakenCount is non-negative, so we zero extend MaxBTC.
2971
  const DataLayout &DL = TheLoop->getHeader()->getDataLayout();
2972
  uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
2973
  uint64_t BETypeSizeBits = DL.getTypeSizeInBits(MaxBTC->getType());
2974
  const SCEV *CastedStride = StrideExpr;
2975
  const SCEV *CastedBECount = MaxBTC;
2976
  ScalarEvolution *SE = PSE->getSE();
2977
  if (BETypeSizeBits >= StrideTypeSizeBits)
2978
    CastedStride = SE->getNoopOrSignExtend(StrideExpr, MaxBTC->getType());
2979
  else
2980
    CastedBECount = SE->getZeroExtendExpr(MaxBTC, StrideExpr->getType());
2981
  const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
2982
  // Since TripCount == BackEdgeTakenCount + 1, checking:
2983
  // "Stride >= TripCount" is equivalent to checking:
2984
  // Stride - MaxBTC> 0
2985
  if (SE->isKnownPositive(StrideMinusBETaken)) {
2986
    LLVM_DEBUG(
2987
        dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
2988
                  "Stride==1 predicate will imply that the loop executes "
2989
                  "at most once.\n");
2990
    return;
2991
  }
2992
  LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
2993

2994
  // Strip back off the integer cast, and check that our result is a
2995
  // SCEVUnknown as we expect.
2996
  const SCEV *StrideBase = StrideExpr;
2997
  if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
2998
    StrideBase = C->getOperand();
2999
  SymbolicStrides[Ptr] = cast<SCEVUnknown>(StrideBase);
3000
}
3001

3002
LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
3003
                               const TargetTransformInfo *TTI,
3004
                               const TargetLibraryInfo *TLI, AAResults *AA,
3005
                               DominatorTree *DT, LoopInfo *LI)
3006
    : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
3007
      PtrRtChecking(nullptr), TheLoop(L) {
3008
  unsigned MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3009
  if (TTI) {
3010
    TypeSize FixedWidth =
3011
        TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector);
3012
    if (FixedWidth.isNonZero()) {
3013
      // Scale the vector width by 2 as rough estimate to also consider
3014
      // interleaving.
3015
      MaxTargetVectorWidthInBits = FixedWidth.getFixedValue() * 2;
3016
    }
3017

3018
    TypeSize ScalableWidth =
3019
        TTI->getRegisterBitWidth(TargetTransformInfo::RGK_ScalableVector);
3020
    if (ScalableWidth.isNonZero())
3021
      MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3022
  }
3023
  DepChecker = std::make_unique<MemoryDepChecker>(*PSE, L, SymbolicStrides,
3024
                                                  MaxTargetVectorWidthInBits);
3025
  PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
3026
  if (canAnalyzeLoop())
3027
    CanVecMem = analyzeLoop(AA, LI, TLI, DT);
3028
}
3029

3030
void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
3031
  if (CanVecMem) {
3032
    OS.indent(Depth) << "Memory dependences are safe";
3033
    const MemoryDepChecker &DC = getDepChecker();
3034
    if (!DC.isSafeForAnyVectorWidth())
3035
      OS << " with a maximum safe vector width of "
3036
         << DC.getMaxSafeVectorWidthInBits() << " bits";
3037
    if (PtrRtChecking->Need)
3038
      OS << " with run-time checks";
3039
    OS << "\n";
3040
  }
3041

3042
  if (HasConvergentOp)
3043
    OS.indent(Depth) << "Has convergent operation in loop\n";
3044

3045
  if (Report)
3046
    OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
3047

3048
  if (auto *Dependences = DepChecker->getDependences()) {
3049
    OS.indent(Depth) << "Dependences:\n";
3050
    for (const auto &Dep : *Dependences) {
3051
      Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
3052
      OS << "\n";
3053
    }
3054
  } else
3055
    OS.indent(Depth) << "Too many dependences, not recorded\n";
3056

3057
  // List the pair of accesses need run-time checks to prove independence.
3058
  PtrRtChecking->print(OS, Depth);
3059
  OS << "\n";
3060

3061
  OS.indent(Depth)
3062
      << "Non vectorizable stores to invariant address were "
3063
      << (HasStoreStoreDependenceInvolvingLoopInvariantAddress ||
3064
                  HasLoadStoreDependenceInvolvingLoopInvariantAddress
3065
              ? ""
3066
              : "not ")
3067
      << "found in loop.\n";
3068

3069
  OS.indent(Depth) << "SCEV assumptions:\n";
3070
  PSE->getPredicate().print(OS, Depth);
3071

3072
  OS << "\n";
3073

3074
  OS.indent(Depth) << "Expressions re-written:\n";
3075
  PSE->print(OS, Depth);
3076
}
3077

3078
const LoopAccessInfo &LoopAccessInfoManager::getInfo(Loop &L) {
3079
  auto [It, Inserted] = LoopAccessInfoMap.insert({&L, nullptr});
3080

3081
  if (Inserted)
3082
    It->second =
3083
        std::make_unique<LoopAccessInfo>(&L, &SE, TTI, TLI, &AA, &DT, &LI);
3084

3085
  return *It->second;
3086
}
3087
void LoopAccessInfoManager::clear() {
3088
  SmallVector<Loop *> ToRemove;
3089
  // Collect LoopAccessInfo entries that may keep references to IR outside the
3090
  // analyzed loop or SCEVs that may have been modified or invalidated. At the
3091
  // moment, that is loops requiring memory or SCEV runtime checks, as those cache
3092
  // SCEVs, e.g. for pointer expressions.
3093
  for (const auto &[L, LAI] : LoopAccessInfoMap) {
3094
    if (LAI->getRuntimePointerChecking()->getChecks().empty() &&
3095
        LAI->getPSE().getPredicate().isAlwaysTrue())
3096
      continue;
3097
    ToRemove.push_back(L);
3098
  }
3099

3100
  for (Loop *L : ToRemove)
3101
    LoopAccessInfoMap.erase(L);
3102
}
3103

3104
bool LoopAccessInfoManager::invalidate(
3105
    Function &F, const PreservedAnalyses &PA,
3106
    FunctionAnalysisManager::Invalidator &Inv) {
3107
  // Check whether our analysis is preserved.
3108
  auto PAC = PA.getChecker<LoopAccessAnalysis>();
3109
  if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
3110
    // If not, give up now.
3111
    return true;
3112

3113
  // Check whether the analyses we depend on became invalid for any reason.
3114
  // Skip checking TargetLibraryAnalysis as it is immutable and can't become
3115
  // invalid.
3116
  return Inv.invalidate<AAManager>(F, PA) ||
3117
         Inv.invalidate<ScalarEvolutionAnalysis>(F, PA) ||
3118
         Inv.invalidate<LoopAnalysis>(F, PA) ||
3119
         Inv.invalidate<DominatorTreeAnalysis>(F, PA);
3120
}
3121

3122
LoopAccessInfoManager LoopAccessAnalysis::run(Function &F,
3123
                                              FunctionAnalysisManager &FAM) {
3124
  auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
3125
  auto &AA = FAM.getResult<AAManager>(F);
3126
  auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
3127
  auto &LI = FAM.getResult<LoopAnalysis>(F);
3128
  auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
3129
  auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
3130
  return LoopAccessInfoManager(SE, AA, DT, LI, &TTI, &TLI);
3131
}
3132

3133
AnalysisKey LoopAccessAnalysis::Key;
3134

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.