llvm-project

Форк
0
/
CGDeclCXX.cpp 
1176 строк · 46.6 Кб
1
//===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This contains code dealing with code generation of C++ declarations
10
//
11
//===----------------------------------------------------------------------===//
12

13
#include "CGCXXABI.h"
14
#include "CGHLSLRuntime.h"
15
#include "CGObjCRuntime.h"
16
#include "CGOpenMPRuntime.h"
17
#include "CodeGenFunction.h"
18
#include "TargetInfo.h"
19
#include "clang/AST/Attr.h"
20
#include "clang/Basic/LangOptions.h"
21
#include "llvm/ADT/StringExtras.h"
22
#include "llvm/IR/Intrinsics.h"
23
#include "llvm/IR/MDBuilder.h"
24
#include "llvm/Support/Path.h"
25

26
using namespace clang;
27
using namespace CodeGen;
28

29
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
30
                         ConstantAddress DeclPtr) {
31
  assert(
32
      (D.hasGlobalStorage() ||
33
       (D.hasLocalStorage() && CGF.getContext().getLangOpts().OpenCLCPlusPlus)) &&
34
      "VarDecl must have global or local (in the case of OpenCL) storage!");
35
  assert(!D.getType()->isReferenceType() &&
36
         "Should not call EmitDeclInit on a reference!");
37

38
  QualType type = D.getType();
39
  LValue lv = CGF.MakeAddrLValue(DeclPtr, type);
40

41
  const Expr *Init = D.getInit();
42
  switch (CGF.getEvaluationKind(type)) {
43
  case TEK_Scalar: {
44
    CodeGenModule &CGM = CGF.CGM;
45
    if (lv.isObjCStrong())
46
      CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init),
47
                                                DeclPtr, D.getTLSKind());
48
    else if (lv.isObjCWeak())
49
      CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init),
50
                                              DeclPtr);
51
    else
52
      CGF.EmitScalarInit(Init, &D, lv, false);
53
    return;
54
  }
55
  case TEK_Complex:
56
    CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true);
57
    return;
58
  case TEK_Aggregate:
59
    CGF.EmitAggExpr(Init,
60
                    AggValueSlot::forLValue(lv, AggValueSlot::IsDestructed,
61
                                            AggValueSlot::DoesNotNeedGCBarriers,
62
                                            AggValueSlot::IsNotAliased,
63
                                            AggValueSlot::DoesNotOverlap));
64
    return;
65
  }
66
  llvm_unreachable("bad evaluation kind");
67
}
68

69
/// Emit code to cause the destruction of the given variable with
70
/// static storage duration.
71
static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
72
                            ConstantAddress Addr) {
73
  // Honor __attribute__((no_destroy)) and bail instead of attempting
74
  // to emit a reference to a possibly nonexistent destructor, which
75
  // in turn can cause a crash. This will result in a global constructor
76
  // that isn't balanced out by a destructor call as intended by the
77
  // attribute. This also checks for -fno-c++-static-destructors and
78
  // bails even if the attribute is not present.
79
  QualType::DestructionKind DtorKind = D.needsDestruction(CGF.getContext());
80

81
  // FIXME:  __attribute__((cleanup)) ?
82

83
  switch (DtorKind) {
84
  case QualType::DK_none:
85
    return;
86

87
  case QualType::DK_cxx_destructor:
88
    break;
89

90
  case QualType::DK_objc_strong_lifetime:
91
  case QualType::DK_objc_weak_lifetime:
92
  case QualType::DK_nontrivial_c_struct:
93
    // We don't care about releasing objects during process teardown.
94
    assert(!D.getTLSKind() && "should have rejected this");
95
    return;
96
  }
97

98
  llvm::FunctionCallee Func;
99
  llvm::Constant *Argument;
100

101
  CodeGenModule &CGM = CGF.CGM;
102
  QualType Type = D.getType();
103

104
  // Special-case non-array C++ destructors, if they have the right signature.
105
  // Under some ABIs, destructors return this instead of void, and cannot be
106
  // passed directly to __cxa_atexit if the target does not allow this
107
  // mismatch.
108
  const CXXRecordDecl *Record = Type->getAsCXXRecordDecl();
109
  bool CanRegisterDestructor =
110
      Record && (!CGM.getCXXABI().HasThisReturn(
111
                     GlobalDecl(Record->getDestructor(), Dtor_Complete)) ||
112
                 CGM.getCXXABI().canCallMismatchedFunctionType());
113
  // If __cxa_atexit is disabled via a flag, a different helper function is
114
  // generated elsewhere which uses atexit instead, and it takes the destructor
115
  // directly.
116
  bool UsingExternalHelper = !CGM.getCodeGenOpts().CXAAtExit;
117
  if (Record && (CanRegisterDestructor || UsingExternalHelper)) {
118
    assert(!Record->hasTrivialDestructor());
119
    CXXDestructorDecl *Dtor = Record->getDestructor();
120

121
    Func = CGM.getAddrAndTypeOfCXXStructor(GlobalDecl(Dtor, Dtor_Complete));
122
    if (CGF.getContext().getLangOpts().OpenCL) {
123
      auto DestAS =
124
          CGM.getTargetCodeGenInfo().getAddrSpaceOfCxaAtexitPtrParam();
125
      auto DestTy = llvm::PointerType::get(
126
          CGM.getLLVMContext(), CGM.getContext().getTargetAddressSpace(DestAS));
127
      auto SrcAS = D.getType().getQualifiers().getAddressSpace();
128
      if (DestAS == SrcAS)
129
        Argument = Addr.getPointer();
130
      else
131
        // FIXME: On addr space mismatch we are passing NULL. The generation
132
        // of the global destructor function should be adjusted accordingly.
133
        Argument = llvm::ConstantPointerNull::get(DestTy);
134
    } else {
135
      Argument = Addr.getPointer();
136
    }
137
  // Otherwise, the standard logic requires a helper function.
138
  } else {
139
    Addr = Addr.withElementType(CGF.ConvertTypeForMem(Type));
140
    Func = CodeGenFunction(CGM)
141
           .generateDestroyHelper(Addr, Type, CGF.getDestroyer(DtorKind),
142
                                  CGF.needsEHCleanup(DtorKind), &D);
143
    Argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
144
  }
145

146
  CGM.getCXXABI().registerGlobalDtor(CGF, D, Func, Argument);
147
}
148

149
/// Emit code to cause the variable at the given address to be considered as
150
/// constant from this point onwards.
151
static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D,
152
                              llvm::Constant *Addr) {
153
  return CGF.EmitInvariantStart(
154
      Addr, CGF.getContext().getTypeSizeInChars(D.getType()));
155
}
156

157
void CodeGenFunction::EmitInvariantStart(llvm::Constant *Addr, CharUnits Size) {
158
  // Do not emit the intrinsic if we're not optimizing.
159
  if (!CGM.getCodeGenOpts().OptimizationLevel)
160
    return;
161

162
  // Grab the llvm.invariant.start intrinsic.
163
  llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
164
  // Overloaded address space type.
165
  assert(Addr->getType()->isPointerTy() && "Address must be a pointer");
166
  llvm::Type *ObjectPtr[1] = {Addr->getType()};
167
  llvm::Function *InvariantStart = CGM.getIntrinsic(InvStartID, ObjectPtr);
168

169
  // Emit a call with the size in bytes of the object.
170
  uint64_t Width = Size.getQuantity();
171
  llvm::Value *Args[2] = {llvm::ConstantInt::getSigned(Int64Ty, Width), Addr};
172
  Builder.CreateCall(InvariantStart, Args);
173
}
174

175
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
176
                                               llvm::GlobalVariable *GV,
177
                                               bool PerformInit) {
178

179
  const Expr *Init = D.getInit();
180
  QualType T = D.getType();
181

182
  // The address space of a static local variable (DeclPtr) may be different
183
  // from the address space of the "this" argument of the constructor. In that
184
  // case, we need an addrspacecast before calling the constructor.
185
  //
186
  // struct StructWithCtor {
187
  //   __device__ StructWithCtor() {...}
188
  // };
189
  // __device__ void foo() {
190
  //   __shared__ StructWithCtor s;
191
  //   ...
192
  // }
193
  //
194
  // For example, in the above CUDA code, the static local variable s has a
195
  // "shared" address space qualifier, but the constructor of StructWithCtor
196
  // expects "this" in the "generic" address space.
197
  unsigned ExpectedAddrSpace = getTypes().getTargetAddressSpace(T);
198
  unsigned ActualAddrSpace = GV->getAddressSpace();
199
  llvm::Constant *DeclPtr = GV;
200
  if (ActualAddrSpace != ExpectedAddrSpace) {
201
    llvm::PointerType *PTy =
202
        llvm::PointerType::get(getLLVMContext(), ExpectedAddrSpace);
203
    DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy);
204
  }
205

206
  ConstantAddress DeclAddr(
207
      DeclPtr, GV->getValueType(), getContext().getDeclAlign(&D));
208

209
  if (!T->isReferenceType()) {
210
    if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
211
        D.hasAttr<OMPThreadPrivateDeclAttr>()) {
212
      (void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition(
213
          &D, DeclAddr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
214
          PerformInit, this);
215
    }
216
    bool NeedsDtor =
217
        D.needsDestruction(getContext()) == QualType::DK_cxx_destructor;
218
    if (PerformInit)
219
      EmitDeclInit(*this, D, DeclAddr);
220
    if (D.getType().isConstantStorage(getContext(), true, !NeedsDtor))
221
      EmitDeclInvariant(*this, D, DeclPtr);
222
    else
223
      EmitDeclDestroy(*this, D, DeclAddr);
224
    return;
225
  }
226

227
  assert(PerformInit && "cannot have constant initializer which needs "
228
         "destruction for reference");
229
  RValue RV = EmitReferenceBindingToExpr(Init);
230
  EmitStoreOfScalar(RV.getScalarVal(), DeclAddr, false, T);
231
}
232

233
/// Create a stub function, suitable for being passed to atexit,
234
/// which passes the given address to the given destructor function.
235
llvm::Function *CodeGenFunction::createAtExitStub(const VarDecl &VD,
236
                                                  llvm::FunctionCallee dtor,
237
                                                  llvm::Constant *addr) {
238
  // Get the destructor function type, void(*)(void).
239
  llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false);
240
  SmallString<256> FnName;
241
  {
242
    llvm::raw_svector_ostream Out(FnName);
243
    CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&VD, Out);
244
  }
245

246
  const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
247
  llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction(
248
      ty, FnName.str(), FI, VD.getLocation());
249

250
  CodeGenFunction CGF(CGM);
251

252
  CGF.StartFunction(GlobalDecl(&VD, DynamicInitKind::AtExit),
253
                    CGM.getContext().VoidTy, fn, FI, FunctionArgList(),
254
                    VD.getLocation(), VD.getInit()->getExprLoc());
255
  // Emit an artificial location for this function.
256
  auto AL = ApplyDebugLocation::CreateArtificial(CGF);
257

258
  llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr);
259

260
  // Make sure the call and the callee agree on calling convention.
261
  if (auto *dtorFn = dyn_cast<llvm::Function>(
262
          dtor.getCallee()->stripPointerCastsAndAliases()))
263
    call->setCallingConv(dtorFn->getCallingConv());
264

265
  CGF.FinishFunction();
266

267
  return fn;
268
}
269

270
/// Create a stub function, suitable for being passed to __pt_atexit_np,
271
/// which passes the given address to the given destructor function.
272
llvm::Function *CodeGenFunction::createTLSAtExitStub(
273
    const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr,
274
    llvm::FunctionCallee &AtExit) {
275
  SmallString<256> FnName;
276
  {
277
    llvm::raw_svector_ostream Out(FnName);
278
    CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&D, Out);
279
  }
280

281
  const CGFunctionInfo &FI = CGM.getTypes().arrangeLLVMFunctionInfo(
282
      getContext().IntTy, FnInfoOpts::None, {getContext().IntTy},
283
      FunctionType::ExtInfo(), {}, RequiredArgs::All);
284

285
  // Get the stub function type, int(*)(int,...).
286
  llvm::FunctionType *StubTy =
287
      llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy}, true);
288

289
  llvm::Function *DtorStub = CGM.CreateGlobalInitOrCleanUpFunction(
290
      StubTy, FnName.str(), FI, D.getLocation());
291

292
  CodeGenFunction CGF(CGM);
293

294
  FunctionArgList Args;
295
  ImplicitParamDecl IPD(CGM.getContext(), CGM.getContext().IntTy,
296
                        ImplicitParamKind::Other);
297
  Args.push_back(&IPD);
298
  QualType ResTy = CGM.getContext().IntTy;
299

300
  CGF.StartFunction(GlobalDecl(&D, DynamicInitKind::AtExit), ResTy, DtorStub,
301
                    FI, Args, D.getLocation(), D.getInit()->getExprLoc());
302

303
  // Emit an artificial location for this function.
304
  auto AL = ApplyDebugLocation::CreateArtificial(CGF);
305

306
  llvm::CallInst *call = CGF.Builder.CreateCall(Dtor, Addr);
307

308
  // Make sure the call and the callee agree on calling convention.
309
  if (auto *DtorFn = dyn_cast<llvm::Function>(
310
          Dtor.getCallee()->stripPointerCastsAndAliases()))
311
    call->setCallingConv(DtorFn->getCallingConv());
312

313
  // Return 0 from function
314
  CGF.Builder.CreateStore(llvm::Constant::getNullValue(CGM.IntTy),
315
                          CGF.ReturnValue);
316

317
  CGF.FinishFunction();
318

319
  return DtorStub;
320
}
321

322
/// Register a global destructor using the C atexit runtime function.
323
void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
324
                                                   llvm::FunctionCallee dtor,
325
                                                   llvm::Constant *addr) {
326
  // Create a function which calls the destructor.
327
  llvm::Constant *dtorStub = createAtExitStub(VD, dtor, addr);
328
  registerGlobalDtorWithAtExit(dtorStub);
329
}
330

331
/// Register a global destructor using the LLVM 'llvm.global_dtors' global.
332
void CodeGenFunction::registerGlobalDtorWithLLVM(const VarDecl &VD,
333
                                                 llvm::FunctionCallee Dtor,
334
                                                 llvm::Constant *Addr) {
335
  // Create a function which calls the destructor.
336
  llvm::Function *dtorStub = createAtExitStub(VD, Dtor, Addr);
337
  CGM.AddGlobalDtor(dtorStub);
338
}
339

340
void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) {
341
  // extern "C" int atexit(void (*f)(void));
342
  assert(dtorStub->getType() ==
343
             llvm::PointerType::get(
344
                 llvm::FunctionType::get(CGM.VoidTy, false),
345
                 dtorStub->getType()->getPointerAddressSpace()) &&
346
         "Argument to atexit has a wrong type.");
347

348
  llvm::FunctionType *atexitTy =
349
      llvm::FunctionType::get(IntTy, dtorStub->getType(), false);
350

351
  llvm::FunctionCallee atexit =
352
      CGM.CreateRuntimeFunction(atexitTy, "atexit", llvm::AttributeList(),
353
                                /*Local=*/true);
354
  if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit.getCallee()))
355
    atexitFn->setDoesNotThrow();
356

357
  EmitNounwindRuntimeCall(atexit, dtorStub);
358
}
359

360
llvm::Value *
361
CodeGenFunction::unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub) {
362
  // The unatexit subroutine unregisters __dtor functions that were previously
363
  // registered by the atexit subroutine. If the referenced function is found,
364
  // it is removed from the list of functions that are called at normal program
365
  // termination and the unatexit returns a value of 0, otherwise a non-zero
366
  // value is returned.
367
  //
368
  // extern "C" int unatexit(void (*f)(void));
369
  assert(dtorStub->getType() ==
370
             llvm::PointerType::get(
371
                 llvm::FunctionType::get(CGM.VoidTy, false),
372
                 dtorStub->getType()->getPointerAddressSpace()) &&
373
         "Argument to unatexit has a wrong type.");
374

375
  llvm::FunctionType *unatexitTy =
376
      llvm::FunctionType::get(IntTy, {dtorStub->getType()}, /*isVarArg=*/false);
377

378
  llvm::FunctionCallee unatexit =
379
      CGM.CreateRuntimeFunction(unatexitTy, "unatexit", llvm::AttributeList());
380

381
  cast<llvm::Function>(unatexit.getCallee())->setDoesNotThrow();
382

383
  return EmitNounwindRuntimeCall(unatexit, dtorStub);
384
}
385

386
void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
387
                                         llvm::GlobalVariable *DeclPtr,
388
                                         bool PerformInit) {
389
  // If we've been asked to forbid guard variables, emit an error now.
390
  // This diagnostic is hard-coded for Darwin's use case;  we can find
391
  // better phrasing if someone else needs it.
392
  if (CGM.getCodeGenOpts().ForbidGuardVariables)
393
    CGM.Error(D.getLocation(),
394
              "this initialization requires a guard variable, which "
395
              "the kernel does not support");
396

397
  CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit);
398
}
399

400
void CodeGenFunction::EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
401
                                               llvm::BasicBlock *InitBlock,
402
                                               llvm::BasicBlock *NoInitBlock,
403
                                               GuardKind Kind,
404
                                               const VarDecl *D) {
405
  assert((Kind == GuardKind::TlsGuard || D) && "no guarded variable");
406

407
  // A guess at how many times we will enter the initialization of a
408
  // variable, depending on the kind of variable.
409
  static const uint64_t InitsPerTLSVar = 1024;
410
  static const uint64_t InitsPerLocalVar = 1024 * 1024;
411

412
  llvm::MDNode *Weights;
413
  if (Kind == GuardKind::VariableGuard && !D->isLocalVarDecl()) {
414
    // For non-local variables, don't apply any weighting for now. Due to our
415
    // use of COMDATs, we expect there to be at most one initialization of the
416
    // variable per DSO, but we have no way to know how many DSOs will try to
417
    // initialize the variable.
418
    Weights = nullptr;
419
  } else {
420
    uint64_t NumInits;
421
    // FIXME: For the TLS case, collect and use profiling information to
422
    // determine a more accurate brach weight.
423
    if (Kind == GuardKind::TlsGuard || D->getTLSKind())
424
      NumInits = InitsPerTLSVar;
425
    else
426
      NumInits = InitsPerLocalVar;
427

428
    // The probability of us entering the initializer is
429
    //   1 / (total number of times we attempt to initialize the variable).
430
    llvm::MDBuilder MDHelper(CGM.getLLVMContext());
431
    Weights = MDHelper.createBranchWeights(1, NumInits - 1);
432
  }
433

434
  Builder.CreateCondBr(NeedsInit, InitBlock, NoInitBlock, Weights);
435
}
436

437
llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction(
438
    llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI,
439
    SourceLocation Loc, bool TLS, llvm::GlobalVariable::LinkageTypes Linkage) {
440
  llvm::Function *Fn = llvm::Function::Create(FTy, Linkage, Name, &getModule());
441

442
  if (!getLangOpts().AppleKext && !TLS) {
443
    // Set the section if needed.
444
    if (const char *Section = getTarget().getStaticInitSectionSpecifier())
445
      Fn->setSection(Section);
446
  }
447

448
  if (Linkage == llvm::GlobalVariable::InternalLinkage)
449
    SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
450

451
  Fn->setCallingConv(getRuntimeCC());
452

453
  if (!getLangOpts().Exceptions)
454
    Fn->setDoesNotThrow();
455

456
  if (getLangOpts().Sanitize.has(SanitizerKind::Address) &&
457
      !isInNoSanitizeList(SanitizerKind::Address, Fn, Loc))
458
    Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
459

460
  if (getLangOpts().Sanitize.has(SanitizerKind::KernelAddress) &&
461
      !isInNoSanitizeList(SanitizerKind::KernelAddress, Fn, Loc))
462
    Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
463

464
  if (getLangOpts().Sanitize.has(SanitizerKind::HWAddress) &&
465
      !isInNoSanitizeList(SanitizerKind::HWAddress, Fn, Loc))
466
    Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
467

468
  if (getLangOpts().Sanitize.has(SanitizerKind::KernelHWAddress) &&
469
      !isInNoSanitizeList(SanitizerKind::KernelHWAddress, Fn, Loc))
470
    Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
471

472
  if (getLangOpts().Sanitize.has(SanitizerKind::MemtagStack) &&
473
      !isInNoSanitizeList(SanitizerKind::MemtagStack, Fn, Loc))
474
    Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
475

476
  if (getLangOpts().Sanitize.has(SanitizerKind::Thread) &&
477
      !isInNoSanitizeList(SanitizerKind::Thread, Fn, Loc))
478
    Fn->addFnAttr(llvm::Attribute::SanitizeThread);
479

480
  if (getLangOpts().Sanitize.has(SanitizerKind::NumericalStability) &&
481
      !isInNoSanitizeList(SanitizerKind::NumericalStability, Fn, Loc))
482
    Fn->addFnAttr(llvm::Attribute::SanitizeNumericalStability);
483

484
  if (getLangOpts().Sanitize.has(SanitizerKind::Memory) &&
485
      !isInNoSanitizeList(SanitizerKind::Memory, Fn, Loc))
486
    Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
487

488
  if (getLangOpts().Sanitize.has(SanitizerKind::KernelMemory) &&
489
      !isInNoSanitizeList(SanitizerKind::KernelMemory, Fn, Loc))
490
    Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
491

492
  if (getLangOpts().Sanitize.has(SanitizerKind::SafeStack) &&
493
      !isInNoSanitizeList(SanitizerKind::SafeStack, Fn, Loc))
494
    Fn->addFnAttr(llvm::Attribute::SafeStack);
495

496
  if (getLangOpts().Sanitize.has(SanitizerKind::ShadowCallStack) &&
497
      !isInNoSanitizeList(SanitizerKind::ShadowCallStack, Fn, Loc))
498
    Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
499

500
  return Fn;
501
}
502

503
/// Create a global pointer to a function that will initialize a global
504
/// variable.  The user has requested that this pointer be emitted in a specific
505
/// section.
506
void CodeGenModule::EmitPointerToInitFunc(const VarDecl *D,
507
                                          llvm::GlobalVariable *GV,
508
                                          llvm::Function *InitFunc,
509
                                          InitSegAttr *ISA) {
510
  llvm::GlobalVariable *PtrArray = new llvm::GlobalVariable(
511
      TheModule, InitFunc->getType(), /*isConstant=*/true,
512
      llvm::GlobalValue::PrivateLinkage, InitFunc, "__cxx_init_fn_ptr");
513
  PtrArray->setSection(ISA->getSection());
514
  addUsedGlobal(PtrArray);
515

516
  // If the GV is already in a comdat group, then we have to join it.
517
  if (llvm::Comdat *C = GV->getComdat())
518
    PtrArray->setComdat(C);
519
}
520

521
void
522
CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
523
                                            llvm::GlobalVariable *Addr,
524
                                            bool PerformInit) {
525

526
  // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__,
527
  // __constant__ and __shared__ variables defined in namespace scope,
528
  // that are of class type, cannot have a non-empty constructor. All
529
  // the checks have been done in Sema by now. Whatever initializers
530
  // are allowed are empty and we just need to ignore them here.
531
  if (getLangOpts().CUDAIsDevice && !getLangOpts().GPUAllowDeviceInit &&
532
      (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
533
       D->hasAttr<CUDASharedAttr>()))
534
    return;
535

536
  // Check if we've already initialized this decl.
537
  auto I = DelayedCXXInitPosition.find(D);
538
  if (I != DelayedCXXInitPosition.end() && I->second == ~0U)
539
    return;
540

541
  llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
542
  SmallString<256> FnName;
543
  {
544
    llvm::raw_svector_ostream Out(FnName);
545
    getCXXABI().getMangleContext().mangleDynamicInitializer(D, Out);
546
  }
547

548
  // Create a variable initialization function.
549
  llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
550
      FTy, FnName.str(), getTypes().arrangeNullaryFunction(), D->getLocation());
551

552
  auto *ISA = D->getAttr<InitSegAttr>();
553
  CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
554
                                                          PerformInit);
555

556
  llvm::GlobalVariable *COMDATKey =
557
      supportsCOMDAT() && D->isExternallyVisible() ? Addr : nullptr;
558

559
  if (D->getTLSKind()) {
560
    // FIXME: Should we support init_priority for thread_local?
561
    // FIXME: We only need to register one __cxa_thread_atexit function for the
562
    // entire TU.
563
    CXXThreadLocalInits.push_back(Fn);
564
    CXXThreadLocalInitVars.push_back(D);
565
  } else if (PerformInit && ISA) {
566
    // Contract with backend that "init_seg(compiler)" corresponds to priority
567
    // 200 and "init_seg(lib)" corresponds to priority 400.
568
    int Priority = -1;
569
    if (ISA->getSection() == ".CRT$XCC")
570
      Priority = 200;
571
    else if (ISA->getSection() == ".CRT$XCL")
572
      Priority = 400;
573

574
    if (Priority != -1)
575
      AddGlobalCtor(Fn, Priority, ~0U, COMDATKey);
576
    else
577
      EmitPointerToInitFunc(D, Addr, Fn, ISA);
578
  } else if (auto *IPA = D->getAttr<InitPriorityAttr>()) {
579
    OrderGlobalInitsOrStermFinalizers Key(IPA->getPriority(),
580
                                          PrioritizedCXXGlobalInits.size());
581
    PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
582
  } else if (isTemplateInstantiation(D->getTemplateSpecializationKind()) ||
583
             getContext().GetGVALinkageForVariable(D) == GVA_DiscardableODR ||
584
             D->hasAttr<SelectAnyAttr>()) {
585
    // C++ [basic.start.init]p2:
586
    //   Definitions of explicitly specialized class template static data
587
    //   members have ordered initialization. Other class template static data
588
    //   members (i.e., implicitly or explicitly instantiated specializations)
589
    //   have unordered initialization.
590
    //
591
    // As a consequence, we can put them into their own llvm.global_ctors entry.
592
    //
593
    // If the global is externally visible, put the initializer into a COMDAT
594
    // group with the global being initialized.  On most platforms, this is a
595
    // minor startup time optimization.  In the MS C++ ABI, there are no guard
596
    // variables, so this COMDAT key is required for correctness.
597
    //
598
    // SelectAny globals will be comdat-folded. Put the initializer into a
599
    // COMDAT group associated with the global, so the initializers get folded
600
    // too.
601
    I = DelayedCXXInitPosition.find(D);
602
    // CXXGlobalInits.size() is the lex order number for the next deferred
603
    // VarDecl. Use it when the current VarDecl is non-deferred. Although this
604
    // lex order number is shared between current VarDecl and some following
605
    // VarDecls, their order of insertion into `llvm.global_ctors` is the same
606
    // as the lexing order and the following stable sort would preserve such
607
    // order.
608
    unsigned LexOrder =
609
        I == DelayedCXXInitPosition.end() ? CXXGlobalInits.size() : I->second;
610
    AddGlobalCtor(Fn, 65535, LexOrder, COMDATKey);
611
    if (COMDATKey && (getTriple().isOSBinFormatELF() ||
612
                      getTarget().getCXXABI().isMicrosoft())) {
613
      // When COMDAT is used on ELF or in the MS C++ ABI, the key must be in
614
      // llvm.used to prevent linker GC.
615
      addUsedGlobal(COMDATKey);
616
    }
617

618
    // If we used a COMDAT key for the global ctor, the init function can be
619
    // discarded if the global ctor entry is discarded.
620
    // FIXME: Do we need to restrict this to ELF and Wasm?
621
    llvm::Comdat *C = Addr->getComdat();
622
    if (COMDATKey && C &&
623
        (getTarget().getTriple().isOSBinFormatELF() ||
624
         getTarget().getTriple().isOSBinFormatWasm())) {
625
      Fn->setComdat(C);
626
    }
627
  } else {
628
    I = DelayedCXXInitPosition.find(D); // Re-do lookup in case of re-hash.
629
    if (I == DelayedCXXInitPosition.end()) {
630
      CXXGlobalInits.push_back(Fn);
631
    } else if (I->second != ~0U) {
632
      assert(I->second < CXXGlobalInits.size() &&
633
             CXXGlobalInits[I->second] == nullptr);
634
      CXXGlobalInits[I->second] = Fn;
635
    }
636
  }
637

638
  // Remember that we already emitted the initializer for this global.
639
  DelayedCXXInitPosition[D] = ~0U;
640
}
641

642
void CodeGenModule::EmitCXXThreadLocalInitFunc() {
643
  getCXXABI().EmitThreadLocalInitFuncs(
644
      *this, CXXThreadLocals, CXXThreadLocalInits, CXXThreadLocalInitVars);
645

646
  CXXThreadLocalInits.clear();
647
  CXXThreadLocalInitVars.clear();
648
  CXXThreadLocals.clear();
649
}
650

651
/* Build the initializer for a C++20 module:
652
   This is arranged to be run only once regardless of how many times the module
653
   might be included transitively.  This arranged by using a guard variable.
654

655
   If there are no initializers at all (and also no imported modules) we reduce
656
   this to an empty function (since the Itanium ABI requires that this function
657
   be available to a caller, which might be produced by a different
658
   implementation).
659

660
   First we call any initializers for imported modules.
661
   We then call initializers for the Global Module Fragment (if present)
662
   We then call initializers for the current module.
663
   We then call initializers for the Private Module Fragment (if present)
664
*/
665

666
void CodeGenModule::EmitCXXModuleInitFunc(Module *Primary) {
667
  assert(Primary->isInterfaceOrPartition() &&
668
         "The function should only be called for C++20 named module interface"
669
         " or partition.");
670

671
  while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
672
    CXXGlobalInits.pop_back();
673

674
  // As noted above, we create the function, even if it is empty.
675
  // Module initializers for imported modules are emitted first.
676

677
  // Collect all the modules that we import
678
  llvm::SmallSetVector<Module *, 8> AllImports;
679
  // Ones that we export
680
  for (auto I : Primary->Exports)
681
    AllImports.insert(I.getPointer());
682
  // Ones that we only import.
683
  for (Module *M : Primary->Imports)
684
    AllImports.insert(M);
685
  // Ones that we import in the global module fragment or the private module
686
  // fragment.
687
  for (Module *SubM : Primary->submodules()) {
688
    assert((SubM->isGlobalModule() || SubM->isPrivateModule()) &&
689
           "The sub modules of C++20 module unit should only be global module "
690
           "fragments or private module framents.");
691
    assert(SubM->Exports.empty() &&
692
           "The global mdoule fragments and the private module fragments are "
693
           "not allowed to export import modules.");
694
    for (Module *M : SubM->Imports)
695
      AllImports.insert(M);
696
  }
697

698
  SmallVector<llvm::Function *, 8> ModuleInits;
699
  for (Module *M : AllImports) {
700
    // No Itanium initializer in header like modules.
701
    if (M->isHeaderLikeModule())
702
      continue; // TODO: warn of mixed use of module map modules and C++20?
703
    // We're allowed to skip the initialization if we are sure it doesn't
704
    // do any thing.
705
    if (!M->isNamedModuleInterfaceHasInit())
706
      continue;
707
    llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
708
    SmallString<256> FnName;
709
    {
710
      llvm::raw_svector_ostream Out(FnName);
711
      cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
712
          .mangleModuleInitializer(M, Out);
713
    }
714
    assert(!GetGlobalValue(FnName.str()) &&
715
           "We should only have one use of the initializer call");
716
    llvm::Function *Fn = llvm::Function::Create(
717
        FTy, llvm::Function::ExternalLinkage, FnName.str(), &getModule());
718
    ModuleInits.push_back(Fn);
719
  }
720

721
  // Add any initializers with specified priority; this uses the same  approach
722
  // as EmitCXXGlobalInitFunc().
723
  if (!PrioritizedCXXGlobalInits.empty()) {
724
    SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
725
    llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
726
                         PrioritizedCXXGlobalInits.end());
727
    for (SmallVectorImpl<GlobalInitData>::iterator
728
             I = PrioritizedCXXGlobalInits.begin(),
729
             E = PrioritizedCXXGlobalInits.end();
730
         I != E;) {
731
      SmallVectorImpl<GlobalInitData>::iterator PrioE =
732
          std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp());
733

734
      for (; I < PrioE; ++I)
735
        ModuleInits.push_back(I->second);
736
    }
737
  }
738

739
  // Now append the ones without specified priority.
740
  for (auto *F : CXXGlobalInits)
741
    ModuleInits.push_back(F);
742

743
  llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
744
  const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
745

746
  // We now build the initializer for this module, which has a mangled name
747
  // as per the Itanium ABI .  The action of the initializer is guarded so that
748
  // each init is run just once (even though a module might be imported
749
  // multiple times via nested use).
750
  llvm::Function *Fn;
751
  {
752
    SmallString<256> InitFnName;
753
    llvm::raw_svector_ostream Out(InitFnName);
754
    cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
755
        .mangleModuleInitializer(Primary, Out);
756
    Fn = CreateGlobalInitOrCleanUpFunction(
757
        FTy, llvm::Twine(InitFnName), FI, SourceLocation(), false,
758
        llvm::GlobalVariable::ExternalLinkage);
759

760
    // If we have a completely empty initializer then we do not want to create
761
    // the guard variable.
762
    ConstantAddress GuardAddr = ConstantAddress::invalid();
763
    if (!ModuleInits.empty()) {
764
      // Create the guard var.
765
      llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
766
          getModule(), Int8Ty, /*isConstant=*/false,
767
          llvm::GlobalVariable::InternalLinkage,
768
          llvm::ConstantInt::get(Int8Ty, 0), InitFnName.str() + "__in_chrg");
769
      CharUnits GuardAlign = CharUnits::One();
770
      Guard->setAlignment(GuardAlign.getAsAlign());
771
      GuardAddr = ConstantAddress(Guard, Int8Ty, GuardAlign);
772
    }
773
    CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, ModuleInits,
774
                                                     GuardAddr);
775
  }
776

777
  // We allow for the case that a module object is added to a linked binary
778
  // without a specific call to the the initializer.  This also ensures that
779
  // implementation partition initializers are called when the partition
780
  // is not imported as an interface.
781
  AddGlobalCtor(Fn);
782

783
  // See the comment in EmitCXXGlobalInitFunc about OpenCL global init
784
  // functions.
785
  if (getLangOpts().OpenCL) {
786
    GenKernelArgMetadata(Fn);
787
    Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL);
788
  }
789

790
  assert(!getLangOpts().CUDA || !getLangOpts().CUDAIsDevice ||
791
         getLangOpts().GPUAllowDeviceInit);
792
  if (getLangOpts().HIP && getLangOpts().CUDAIsDevice) {
793
    Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
794
    Fn->addFnAttr("device-init");
795
  }
796

797
  // We are done with the inits.
798
  AllImports.clear();
799
  PrioritizedCXXGlobalInits.clear();
800
  CXXGlobalInits.clear();
801
  ModuleInits.clear();
802
}
803

804
static SmallString<128> getTransformedFileName(llvm::Module &M) {
805
  SmallString<128> FileName = llvm::sys::path::filename(M.getName());
806

807
  if (FileName.empty())
808
    FileName = "<null>";
809

810
  for (size_t i = 0; i < FileName.size(); ++i) {
811
    // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens
812
    // to be the set of C preprocessing numbers.
813
    if (!isPreprocessingNumberBody(FileName[i]))
814
      FileName[i] = '_';
815
  }
816

817
  return FileName;
818
}
819

820
static std::string getPrioritySuffix(unsigned int Priority) {
821
  assert(Priority <= 65535 && "Priority should always be <= 65535.");
822

823
  // Compute the function suffix from priority. Prepend with zeroes to make
824
  // sure the function names are also ordered as priorities.
825
  std::string PrioritySuffix = llvm::utostr(Priority);
826
  PrioritySuffix = std::string(6 - PrioritySuffix.size(), '0') + PrioritySuffix;
827

828
  return PrioritySuffix;
829
}
830

831
void
832
CodeGenModule::EmitCXXGlobalInitFunc() {
833
  while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
834
    CXXGlobalInits.pop_back();
835

836
  // When we import C++20 modules, we must run their initializers first.
837
  SmallVector<llvm::Function *, 8> ModuleInits;
838
  if (CXX20ModuleInits)
839
    for (Module *M : ImportedModules) {
840
      // No Itanium initializer in header like modules.
841
      if (M->isHeaderLikeModule())
842
        continue;
843
      // We're allowed to skip the initialization if we are sure it doesn't
844
      // do any thing.
845
      if (!M->isNamedModuleInterfaceHasInit())
846
        continue;
847
      llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
848
      SmallString<256> FnName;
849
      {
850
        llvm::raw_svector_ostream Out(FnName);
851
        cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
852
            .mangleModuleInitializer(M, Out);
853
      }
854
      assert(!GetGlobalValue(FnName.str()) &&
855
             "We should only have one use of the initializer call");
856
      llvm::Function *Fn = llvm::Function::Create(
857
          FTy, llvm::Function::ExternalLinkage, FnName.str(), &getModule());
858
      ModuleInits.push_back(Fn);
859
    }
860

861
  if (ModuleInits.empty() && CXXGlobalInits.empty() &&
862
      PrioritizedCXXGlobalInits.empty())
863
    return;
864

865
  llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
866
  const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
867

868
  // Create our global prioritized initialization function.
869
  if (!PrioritizedCXXGlobalInits.empty()) {
870
    SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
871
    llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
872
                         PrioritizedCXXGlobalInits.end());
873
    // Iterate over "chunks" of ctors with same priority and emit each chunk
874
    // into separate function. Note - everything is sorted first by priority,
875
    // second - by lex order, so we emit ctor functions in proper order.
876
    for (SmallVectorImpl<GlobalInitData >::iterator
877
           I = PrioritizedCXXGlobalInits.begin(),
878
           E = PrioritizedCXXGlobalInits.end(); I != E; ) {
879
      SmallVectorImpl<GlobalInitData >::iterator
880
        PrioE = std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp());
881

882
      LocalCXXGlobalInits.clear();
883

884
      unsigned int Priority = I->first.priority;
885
      llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
886
          FTy, "_GLOBAL__I_" + getPrioritySuffix(Priority), FI);
887

888
      // Prepend the module inits to the highest priority set.
889
      if (!ModuleInits.empty()) {
890
        for (auto *F : ModuleInits)
891
          LocalCXXGlobalInits.push_back(F);
892
        ModuleInits.clear();
893
      }
894

895
      for (; I < PrioE; ++I)
896
        LocalCXXGlobalInits.push_back(I->second);
897

898
      CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, LocalCXXGlobalInits);
899
      AddGlobalCtor(Fn, Priority);
900
    }
901
    PrioritizedCXXGlobalInits.clear();
902
  }
903

904
  if (getCXXABI().useSinitAndSterm() && ModuleInits.empty() &&
905
      CXXGlobalInits.empty())
906
    return;
907

908
  for (auto *F : CXXGlobalInits)
909
    ModuleInits.push_back(F);
910
  CXXGlobalInits.clear();
911

912
  // Include the filename in the symbol name. Including "sub_" matches gcc
913
  // and makes sure these symbols appear lexicographically behind the symbols
914
  // with priority emitted above.  Module implementation units behave the same
915
  // way as a non-modular TU with imports.
916
  llvm::Function *Fn;
917
  if (CXX20ModuleInits && getContext().getCurrentNamedModule() &&
918
      !getContext().getCurrentNamedModule()->isModuleImplementation()) {
919
    SmallString<256> InitFnName;
920
    llvm::raw_svector_ostream Out(InitFnName);
921
    cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
922
        .mangleModuleInitializer(getContext().getCurrentNamedModule(), Out);
923
    Fn = CreateGlobalInitOrCleanUpFunction(
924
        FTy, llvm::Twine(InitFnName), FI, SourceLocation(), false,
925
        llvm::GlobalVariable::ExternalLinkage);
926
  } else
927
    Fn = CreateGlobalInitOrCleanUpFunction(
928
        FTy,
929
        llvm::Twine("_GLOBAL__sub_I_", getTransformedFileName(getModule())),
930
        FI);
931

932
  CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, ModuleInits);
933
  AddGlobalCtor(Fn);
934

935
  // In OpenCL global init functions must be converted to kernels in order to
936
  // be able to launch them from the host.
937
  // FIXME: Some more work might be needed to handle destructors correctly.
938
  // Current initialization function makes use of function pointers callbacks.
939
  // We can't support function pointers especially between host and device.
940
  // However it seems global destruction has little meaning without any
941
  // dynamic resource allocation on the device and program scope variables are
942
  // destroyed by the runtime when program is released.
943
  if (getLangOpts().OpenCL) {
944
    GenKernelArgMetadata(Fn);
945
    Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL);
946
  }
947

948
  assert(!getLangOpts().CUDA || !getLangOpts().CUDAIsDevice ||
949
         getLangOpts().GPUAllowDeviceInit);
950
  if (getLangOpts().HIP && getLangOpts().CUDAIsDevice) {
951
    Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
952
    Fn->addFnAttr("device-init");
953
  }
954

955
  ModuleInits.clear();
956
}
957

958
void CodeGenModule::EmitCXXGlobalCleanUpFunc() {
959
  if (CXXGlobalDtorsOrStermFinalizers.empty() &&
960
      PrioritizedCXXStermFinalizers.empty())
961
    return;
962

963
  llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
964
  const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
965

966
  // Create our global prioritized cleanup function.
967
  if (!PrioritizedCXXStermFinalizers.empty()) {
968
    SmallVector<CXXGlobalDtorsOrStermFinalizer_t, 8> LocalCXXStermFinalizers;
969
    llvm::array_pod_sort(PrioritizedCXXStermFinalizers.begin(),
970
                         PrioritizedCXXStermFinalizers.end());
971
    // Iterate over "chunks" of dtors with same priority and emit each chunk
972
    // into separate function. Note - everything is sorted first by priority,
973
    // second - by lex order, so we emit dtor functions in proper order.
974
    for (SmallVectorImpl<StermFinalizerData>::iterator
975
             I = PrioritizedCXXStermFinalizers.begin(),
976
             E = PrioritizedCXXStermFinalizers.end();
977
         I != E;) {
978
      SmallVectorImpl<StermFinalizerData>::iterator PrioE =
979
          std::upper_bound(I + 1, E, *I, StermFinalizerPriorityCmp());
980

981
      LocalCXXStermFinalizers.clear();
982

983
      unsigned int Priority = I->first.priority;
984
      llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
985
          FTy, "_GLOBAL__a_" + getPrioritySuffix(Priority), FI);
986

987
      for (; I < PrioE; ++I) {
988
        llvm::FunctionCallee DtorFn = I->second;
989
        LocalCXXStermFinalizers.emplace_back(DtorFn.getFunctionType(),
990
                                             DtorFn.getCallee(), nullptr);
991
      }
992

993
      CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc(
994
          Fn, LocalCXXStermFinalizers);
995
      AddGlobalDtor(Fn, Priority);
996
    }
997
    PrioritizedCXXStermFinalizers.clear();
998
  }
999

1000
  if (CXXGlobalDtorsOrStermFinalizers.empty())
1001
    return;
1002

1003
  // Create our global cleanup function.
1004
  llvm::Function *Fn =
1005
      CreateGlobalInitOrCleanUpFunction(FTy, "_GLOBAL__D_a", FI);
1006

1007
  CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc(
1008
      Fn, CXXGlobalDtorsOrStermFinalizers);
1009
  AddGlobalDtor(Fn);
1010
  CXXGlobalDtorsOrStermFinalizers.clear();
1011
}
1012

1013
/// Emit the code necessary to initialize the given global variable.
1014
void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
1015
                                                       const VarDecl *D,
1016
                                                 llvm::GlobalVariable *Addr,
1017
                                                       bool PerformInit) {
1018
  // Check if we need to emit debug info for variable initializer.
1019
  if (D->hasAttr<NoDebugAttr>())
1020
    DebugInfo = nullptr; // disable debug info indefinitely for this function
1021

1022
  CurEHLocation = D->getBeginLoc();
1023

1024
  StartFunction(GlobalDecl(D, DynamicInitKind::Initializer),
1025
                getContext().VoidTy, Fn, getTypes().arrangeNullaryFunction(),
1026
                FunctionArgList());
1027
  // Emit an artificial location for this function.
1028
  auto AL = ApplyDebugLocation::CreateArtificial(*this);
1029

1030
  // Use guarded initialization if the global variable is weak. This
1031
  // occurs for, e.g., instantiated static data members and
1032
  // definitions explicitly marked weak.
1033
  //
1034
  // Also use guarded initialization for a variable with dynamic TLS and
1035
  // unordered initialization. (If the initialization is ordered, the ABI
1036
  // layer will guard the whole-TU initialization for us.)
1037
  if (Addr->hasWeakLinkage() || Addr->hasLinkOnceLinkage() ||
1038
      (D->getTLSKind() == VarDecl::TLS_Dynamic &&
1039
       isTemplateInstantiation(D->getTemplateSpecializationKind()))) {
1040
    EmitCXXGuardedInit(*D, Addr, PerformInit);
1041
  } else {
1042
    EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit);
1043
  }
1044

1045
  if (getLangOpts().HLSL)
1046
    CGM.getHLSLRuntime().annotateHLSLResource(D, Addr);
1047

1048
  FinishFunction();
1049
}
1050

1051
void
1052
CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
1053
                                           ArrayRef<llvm::Function *> Decls,
1054
                                           ConstantAddress Guard) {
1055
  {
1056
    auto NL = ApplyDebugLocation::CreateEmpty(*this);
1057
    StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
1058
                  getTypes().arrangeNullaryFunction(), FunctionArgList());
1059
    // Emit an artificial location for this function.
1060
    auto AL = ApplyDebugLocation::CreateArtificial(*this);
1061

1062
    llvm::BasicBlock *ExitBlock = nullptr;
1063
    if (Guard.isValid()) {
1064
      // If we have a guard variable, check whether we've already performed
1065
      // these initializations. This happens for TLS initialization functions.
1066
      llvm::Value *GuardVal = Builder.CreateLoad(Guard);
1067
      llvm::Value *Uninit = Builder.CreateIsNull(GuardVal,
1068
                                                 "guard.uninitialized");
1069
      llvm::BasicBlock *InitBlock = createBasicBlock("init");
1070
      ExitBlock = createBasicBlock("exit");
1071
      EmitCXXGuardedInitBranch(Uninit, InitBlock, ExitBlock,
1072
                               GuardKind::TlsGuard, nullptr);
1073
      EmitBlock(InitBlock);
1074
      // Mark as initialized before initializing anything else. If the
1075
      // initializers use previously-initialized thread_local vars, that's
1076
      // probably supposed to be OK, but the standard doesn't say.
1077
      Builder.CreateStore(llvm::ConstantInt::get(GuardVal->getType(),1), Guard);
1078

1079
      // The guard variable can't ever change again.
1080
      EmitInvariantStart(
1081
          Guard.getPointer(),
1082
          CharUnits::fromQuantity(
1083
              CGM.getDataLayout().getTypeAllocSize(GuardVal->getType())));
1084
    }
1085

1086
    RunCleanupsScope Scope(*this);
1087

1088
    // When building in Objective-C++ ARC mode, create an autorelease pool
1089
    // around the global initializers.
1090
    if (getLangOpts().ObjCAutoRefCount && getLangOpts().CPlusPlus) {
1091
      llvm::Value *token = EmitObjCAutoreleasePoolPush();
1092
      EmitObjCAutoreleasePoolCleanup(token);
1093
    }
1094

1095
    for (unsigned i = 0, e = Decls.size(); i != e; ++i)
1096
      if (Decls[i])
1097
        EmitRuntimeCall(Decls[i]);
1098

1099
    Scope.ForceCleanup();
1100

1101
    if (ExitBlock) {
1102
      Builder.CreateBr(ExitBlock);
1103
      EmitBlock(ExitBlock);
1104
    }
1105
  }
1106

1107
  FinishFunction();
1108
}
1109

1110
void CodeGenFunction::GenerateCXXGlobalCleanUpFunc(
1111
    llvm::Function *Fn,
1112
    ArrayRef<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
1113
                        llvm::Constant *>>
1114
        DtorsOrStermFinalizers) {
1115
  {
1116
    auto NL = ApplyDebugLocation::CreateEmpty(*this);
1117
    StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
1118
                  getTypes().arrangeNullaryFunction(), FunctionArgList());
1119
    // Emit an artificial location for this function.
1120
    auto AL = ApplyDebugLocation::CreateArtificial(*this);
1121

1122
    // Emit the cleanups, in reverse order from construction.
1123
    for (unsigned i = 0, e = DtorsOrStermFinalizers.size(); i != e; ++i) {
1124
      llvm::FunctionType *CalleeTy;
1125
      llvm::Value *Callee;
1126
      llvm::Constant *Arg;
1127
      std::tie(CalleeTy, Callee, Arg) = DtorsOrStermFinalizers[e - i - 1];
1128

1129
      llvm::CallInst *CI = nullptr;
1130
      if (Arg == nullptr) {
1131
        assert(
1132
            CGM.getCXXABI().useSinitAndSterm() &&
1133
            "Arg could not be nullptr unless using sinit and sterm functions.");
1134
        CI = Builder.CreateCall(CalleeTy, Callee);
1135
      } else
1136
        CI = Builder.CreateCall(CalleeTy, Callee, Arg);
1137

1138
      // Make sure the call and the callee agree on calling convention.
1139
      if (llvm::Function *F = dyn_cast<llvm::Function>(Callee))
1140
        CI->setCallingConv(F->getCallingConv());
1141
    }
1142
  }
1143

1144
  FinishFunction();
1145
}
1146

1147
/// generateDestroyHelper - Generates a helper function which, when
1148
/// invoked, destroys the given object.  The address of the object
1149
/// should be in global memory.
1150
llvm::Function *CodeGenFunction::generateDestroyHelper(
1151
    Address addr, QualType type, Destroyer *destroyer,
1152
    bool useEHCleanupForArray, const VarDecl *VD) {
1153
  FunctionArgList args;
1154
  ImplicitParamDecl Dst(getContext(), getContext().VoidPtrTy,
1155
                        ImplicitParamKind::Other);
1156
  args.push_back(&Dst);
1157

1158
  const CGFunctionInfo &FI =
1159
    CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, args);
1160
  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
1161
  llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction(
1162
      FTy, "__cxx_global_array_dtor", FI, VD->getLocation());
1163

1164
  CurEHLocation = VD->getBeginLoc();
1165

1166
  StartFunction(GlobalDecl(VD, DynamicInitKind::GlobalArrayDestructor),
1167
                getContext().VoidTy, fn, FI, args);
1168
  // Emit an artificial location for this function.
1169
  auto AL = ApplyDebugLocation::CreateArtificial(*this);
1170

1171
  emitDestroy(addr, type, destroyer, useEHCleanupForArray);
1172

1173
  FinishFunction();
1174

1175
  return fn;
1176
}
1177

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.