llvm-project

Форк
0
/
MemoryLocation.cpp 
334 строки · 12.1 Кб
1
//===- MemoryLocation.cpp - Memory location descriptions -------------------==//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8

9
#include "llvm/Analysis/MemoryLocation.h"
10
#include "llvm/Analysis/TargetLibraryInfo.h"
11
#include "llvm/IR/DataLayout.h"
12
#include "llvm/IR/Instructions.h"
13
#include "llvm/IR/IntrinsicInst.h"
14
#include "llvm/IR/IntrinsicsARM.h"
15
#include "llvm/IR/Module.h"
16
#include "llvm/IR/Type.h"
17
#include <optional>
18
using namespace llvm;
19

20
void LocationSize::print(raw_ostream &OS) const {
21
  OS << "LocationSize::";
22
  if (*this == beforeOrAfterPointer())
23
    OS << "beforeOrAfterPointer";
24
  else if (*this == afterPointer())
25
    OS << "afterPointer";
26
  else if (*this == mapEmpty())
27
    OS << "mapEmpty";
28
  else if (*this == mapTombstone())
29
    OS << "mapTombstone";
30
  else if (isPrecise())
31
    OS << "precise(" << getValue() << ')';
32
  else
33
    OS << "upperBound(" << getValue() << ')';
34
}
35

36
MemoryLocation MemoryLocation::get(const LoadInst *LI) {
37
  const auto &DL = LI->getDataLayout();
38

39
  return MemoryLocation(
40
      LI->getPointerOperand(),
41
      LocationSize::precise(DL.getTypeStoreSize(LI->getType())),
42
      LI->getAAMetadata());
43
}
44

45
MemoryLocation MemoryLocation::get(const StoreInst *SI) {
46
  const auto &DL = SI->getDataLayout();
47

48
  return MemoryLocation(SI->getPointerOperand(),
49
                        LocationSize::precise(DL.getTypeStoreSize(
50
                            SI->getValueOperand()->getType())),
51
                        SI->getAAMetadata());
52
}
53

54
MemoryLocation MemoryLocation::get(const VAArgInst *VI) {
55
  return MemoryLocation(VI->getPointerOperand(),
56
                        LocationSize::afterPointer(), VI->getAAMetadata());
57
}
58

59
MemoryLocation MemoryLocation::get(const AtomicCmpXchgInst *CXI) {
60
  const auto &DL = CXI->getDataLayout();
61

62
  return MemoryLocation(CXI->getPointerOperand(),
63
                        LocationSize::precise(DL.getTypeStoreSize(
64
                            CXI->getCompareOperand()->getType())),
65
                        CXI->getAAMetadata());
66
}
67

68
MemoryLocation MemoryLocation::get(const AtomicRMWInst *RMWI) {
69
  const auto &DL = RMWI->getDataLayout();
70

71
  return MemoryLocation(RMWI->getPointerOperand(),
72
                        LocationSize::precise(DL.getTypeStoreSize(
73
                            RMWI->getValOperand()->getType())),
74
                        RMWI->getAAMetadata());
75
}
76

77
std::optional<MemoryLocation>
78
MemoryLocation::getOrNone(const Instruction *Inst) {
79
  switch (Inst->getOpcode()) {
80
  case Instruction::Load:
81
    return get(cast<LoadInst>(Inst));
82
  case Instruction::Store:
83
    return get(cast<StoreInst>(Inst));
84
  case Instruction::VAArg:
85
    return get(cast<VAArgInst>(Inst));
86
  case Instruction::AtomicCmpXchg:
87
    return get(cast<AtomicCmpXchgInst>(Inst));
88
  case Instruction::AtomicRMW:
89
    return get(cast<AtomicRMWInst>(Inst));
90
  default:
91
    return std::nullopt;
92
  }
93
}
94

95
MemoryLocation MemoryLocation::getForSource(const MemTransferInst *MTI) {
96
  return getForSource(cast<AnyMemTransferInst>(MTI));
97
}
98

99
MemoryLocation MemoryLocation::getForSource(const AtomicMemTransferInst *MTI) {
100
  return getForSource(cast<AnyMemTransferInst>(MTI));
101
}
102

103
MemoryLocation MemoryLocation::getForSource(const AnyMemTransferInst *MTI) {
104
  assert(MTI->getRawSource() == MTI->getArgOperand(1));
105
  return getForArgument(MTI, 1, nullptr);
106
}
107

108
MemoryLocation MemoryLocation::getForDest(const MemIntrinsic *MI) {
109
  return getForDest(cast<AnyMemIntrinsic>(MI));
110
}
111

112
MemoryLocation MemoryLocation::getForDest(const AtomicMemIntrinsic *MI) {
113
  return getForDest(cast<AnyMemIntrinsic>(MI));
114
}
115

116
MemoryLocation MemoryLocation::getForDest(const AnyMemIntrinsic *MI) {
117
  assert(MI->getRawDest() == MI->getArgOperand(0));
118
  return getForArgument(MI, 0, nullptr);
119
}
120

121
std::optional<MemoryLocation>
122
MemoryLocation::getForDest(const CallBase *CB, const TargetLibraryInfo &TLI) {
123
  if (!CB->onlyAccessesArgMemory())
124
    return std::nullopt;
125

126
  if (CB->hasOperandBundles())
127
    // TODO: remove implementation restriction
128
    return std::nullopt;
129

130
  Value *UsedV = nullptr;
131
  std::optional<unsigned> UsedIdx;
132
  for (unsigned i = 0; i < CB->arg_size(); i++) {
133
    if (!CB->getArgOperand(i)->getType()->isPointerTy())
134
      continue;
135
    if (CB->onlyReadsMemory(i))
136
      continue;
137
    if (!UsedV) {
138
      // First potentially writing parameter
139
      UsedV = CB->getArgOperand(i);
140
      UsedIdx = i;
141
      continue;
142
    }
143
    UsedIdx = std::nullopt;
144
    if (UsedV != CB->getArgOperand(i))
145
      // Can't describe writing to two distinct locations.
146
      // TODO: This results in an inprecision when two values derived from the
147
      // same object are passed as arguments to the same function.
148
      return std::nullopt;
149
  }
150
  if (!UsedV)
151
    // We don't currently have a way to represent a "does not write" result
152
    // and thus have to be conservative and return unknown.
153
    return std::nullopt;
154

155
  if (UsedIdx)
156
    return getForArgument(CB, *UsedIdx, &TLI);
157
  return MemoryLocation::getBeforeOrAfter(UsedV, CB->getAAMetadata());
158
}
159

160
MemoryLocation MemoryLocation::getForArgument(const CallBase *Call,
161
                                              unsigned ArgIdx,
162
                                              const TargetLibraryInfo *TLI) {
163
  AAMDNodes AATags = Call->getAAMetadata();
164
  const Value *Arg = Call->getArgOperand(ArgIdx);
165

166
  // We may be able to produce an exact size for known intrinsics.
167
  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call)) {
168
    const DataLayout &DL = II->getDataLayout();
169

170
    switch (II->getIntrinsicID()) {
171
    default:
172
      break;
173
    case Intrinsic::memset:
174
    case Intrinsic::memcpy:
175
    case Intrinsic::memcpy_inline:
176
    case Intrinsic::memmove:
177
    case Intrinsic::memcpy_element_unordered_atomic:
178
    case Intrinsic::memmove_element_unordered_atomic:
179
    case Intrinsic::memset_element_unordered_atomic:
180
      assert((ArgIdx == 0 || ArgIdx == 1) &&
181
             "Invalid argument index for memory intrinsic");
182
      if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
183
        return MemoryLocation(Arg, LocationSize::precise(LenCI->getZExtValue()),
184
                              AATags);
185
      return MemoryLocation::getAfter(Arg, AATags);
186

187
    case Intrinsic::lifetime_start:
188
    case Intrinsic::lifetime_end:
189
    case Intrinsic::invariant_start:
190
      assert(ArgIdx == 1 && "Invalid argument index");
191
      return MemoryLocation(
192
          Arg,
193
          LocationSize::precise(
194
              cast<ConstantInt>(II->getArgOperand(0))->getZExtValue()),
195
          AATags);
196

197
    case Intrinsic::masked_load:
198
      assert(ArgIdx == 0 && "Invalid argument index");
199
      return MemoryLocation(
200
          Arg,
201
          LocationSize::upperBound(DL.getTypeStoreSize(II->getType())),
202
          AATags);
203

204
    case Intrinsic::masked_store:
205
      assert(ArgIdx == 1 && "Invalid argument index");
206
      return MemoryLocation(
207
          Arg,
208
          LocationSize::upperBound(
209
              DL.getTypeStoreSize(II->getArgOperand(0)->getType())),
210
          AATags);
211

212
    case Intrinsic::invariant_end:
213
      // The first argument to an invariant.end is a "descriptor" type (e.g. a
214
      // pointer to a empty struct) which is never actually dereferenced.
215
      if (ArgIdx == 0)
216
        return MemoryLocation(Arg, LocationSize::precise(0), AATags);
217
      assert(ArgIdx == 2 && "Invalid argument index");
218
      return MemoryLocation(
219
          Arg,
220
          LocationSize::precise(
221
              cast<ConstantInt>(II->getArgOperand(1))->getZExtValue()),
222
          AATags);
223

224
    case Intrinsic::arm_neon_vld1:
225
      assert(ArgIdx == 0 && "Invalid argument index");
226
      // LLVM's vld1 and vst1 intrinsics currently only support a single
227
      // vector register.
228
      return MemoryLocation(
229
          Arg, LocationSize::precise(DL.getTypeStoreSize(II->getType())),
230
          AATags);
231

232
    case Intrinsic::arm_neon_vst1:
233
      assert(ArgIdx == 0 && "Invalid argument index");
234
      return MemoryLocation(Arg,
235
                            LocationSize::precise(DL.getTypeStoreSize(
236
                                II->getArgOperand(1)->getType())),
237
                            AATags);
238
    }
239

240
    assert(
241
        !isa<AnyMemTransferInst>(II) &&
242
        "all memory transfer intrinsics should be handled by the switch above");
243
  }
244

245
  // We can bound the aliasing properties of memset_pattern16 just as we can
246
  // for memcpy/memset.  This is particularly important because the
247
  // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
248
  // whenever possible.
249
  LibFunc F;
250
  if (TLI && TLI->getLibFunc(*Call, F) && TLI->has(F)) {
251
    switch (F) {
252
    case LibFunc_strcpy:
253
    case LibFunc_strcat:
254
    case LibFunc_strncat:
255
      assert((ArgIdx == 0 || ArgIdx == 1) && "Invalid argument index for str function");
256
      return MemoryLocation::getAfter(Arg, AATags);
257

258
    case LibFunc_memset_chk:
259
      assert(ArgIdx == 0 && "Invalid argument index for memset_chk");
260
      [[fallthrough]];
261
    case LibFunc_memcpy_chk: {
262
      assert((ArgIdx == 0 || ArgIdx == 1) &&
263
             "Invalid argument index for memcpy_chk");
264
      LocationSize Size = LocationSize::afterPointer();
265
      if (const auto *Len = dyn_cast<ConstantInt>(Call->getArgOperand(2))) {
266
        // memset_chk writes at most Len bytes, memcpy_chk reads/writes at most
267
        // Len bytes. They may read/write less, if Len exceeds the specified max
268
        // size and aborts.
269
        Size = LocationSize::upperBound(Len->getZExtValue());
270
      }
271
      return MemoryLocation(Arg, Size, AATags);
272
    }
273
    case LibFunc_strncpy: {
274
      assert((ArgIdx == 0 || ArgIdx == 1) &&
275
             "Invalid argument index for strncpy");
276
      LocationSize Size = LocationSize::afterPointer();
277
      if (const auto *Len = dyn_cast<ConstantInt>(Call->getArgOperand(2))) {
278
        // strncpy is guaranteed to write Len bytes, but only reads up to Len
279
        // bytes.
280
        Size = ArgIdx == 0 ? LocationSize::precise(Len->getZExtValue())
281
                           : LocationSize::upperBound(Len->getZExtValue());
282
      }
283
      return MemoryLocation(Arg, Size, AATags);
284
    }
285
    case LibFunc_memset_pattern16:
286
    case LibFunc_memset_pattern4:
287
    case LibFunc_memset_pattern8:
288
      assert((ArgIdx == 0 || ArgIdx == 1) &&
289
             "Invalid argument index for memset_pattern16");
290
      if (ArgIdx == 1) {
291
        unsigned Size = 16;
292
        if (F == LibFunc_memset_pattern4)
293
          Size = 4;
294
        else if (F == LibFunc_memset_pattern8)
295
          Size = 8;
296
        return MemoryLocation(Arg, LocationSize::precise(Size), AATags);
297
      }
298
      if (const ConstantInt *LenCI =
299
              dyn_cast<ConstantInt>(Call->getArgOperand(2)))
300
        return MemoryLocation(Arg, LocationSize::precise(LenCI->getZExtValue()),
301
                              AATags);
302
      return MemoryLocation::getAfter(Arg, AATags);
303
    case LibFunc_bcmp:
304
    case LibFunc_memcmp:
305
      assert((ArgIdx == 0 || ArgIdx == 1) &&
306
             "Invalid argument index for memcmp/bcmp");
307
      if (const ConstantInt *LenCI =
308
              dyn_cast<ConstantInt>(Call->getArgOperand(2)))
309
        return MemoryLocation(Arg, LocationSize::precise(LenCI->getZExtValue()),
310
                              AATags);
311
      return MemoryLocation::getAfter(Arg, AATags);
312
    case LibFunc_memchr:
313
      assert((ArgIdx == 0) && "Invalid argument index for memchr");
314
      if (const ConstantInt *LenCI =
315
              dyn_cast<ConstantInt>(Call->getArgOperand(2)))
316
        return MemoryLocation(Arg, LocationSize::precise(LenCI->getZExtValue()),
317
                              AATags);
318
      return MemoryLocation::getAfter(Arg, AATags);
319
    case LibFunc_memccpy:
320
      assert((ArgIdx == 0 || ArgIdx == 1) &&
321
             "Invalid argument index for memccpy");
322
      // We only know an upper bound on the number of bytes read/written.
323
      if (const ConstantInt *LenCI =
324
              dyn_cast<ConstantInt>(Call->getArgOperand(3)))
325
        return MemoryLocation(
326
            Arg, LocationSize::upperBound(LenCI->getZExtValue()), AATags);
327
      return MemoryLocation::getAfter(Arg, AATags);
328
    default:
329
      break;
330
    };
331
  }
332

333
  return MemoryLocation::getBeforeOrAfter(Call->getArgOperand(ArgIdx), AATags);
334
}
335

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.