llvm-project
2340 строк · 92.4 Кб
1//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code dealing with code generation of C++ expressions
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCUDARuntime.h"14#include "CGCXXABI.h"15#include "CGDebugInfo.h"16#include "CGObjCRuntime.h"17#include "CodeGenFunction.h"18#include "ConstantEmitter.h"19#include "TargetInfo.h"20#include "clang/Basic/CodeGenOptions.h"21#include "clang/CodeGen/CGFunctionInfo.h"22#include "llvm/IR/Intrinsics.h"23
24using namespace clang;25using namespace CodeGen;26
27namespace {28struct MemberCallInfo {29RequiredArgs ReqArgs;30// Number of prefix arguments for the call. Ignores the `this` pointer.31unsigned PrefixSize;32};33}
34
35static MemberCallInfo36commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, GlobalDecl GD,37llvm::Value *This, llvm::Value *ImplicitParam,38QualType ImplicitParamTy, const CallExpr *CE,39CallArgList &Args, CallArgList *RtlArgs) {40auto *MD = cast<CXXMethodDecl>(GD.getDecl());41
42assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||43isa<CXXOperatorCallExpr>(CE));44assert(MD->isImplicitObjectMemberFunction() &&45"Trying to emit a member or operator call expr on a static method!");46
47// Push the this ptr.48const CXXRecordDecl *RD =49CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(GD);50Args.add(RValue::get(This), CGF.getTypes().DeriveThisType(RD, MD));51
52// If there is an implicit parameter (e.g. VTT), emit it.53if (ImplicitParam) {54Args.add(RValue::get(ImplicitParam), ImplicitParamTy);55}56
57const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();58RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());59unsigned PrefixSize = Args.size() - 1;60
61// And the rest of the call args.62if (RtlArgs) {63// Special case: if the caller emitted the arguments right-to-left already64// (prior to emitting the *this argument), we're done. This happens for65// assignment operators.66Args.addFrom(*RtlArgs);67} else if (CE) {68// Special case: skip first argument of CXXOperatorCall (it is "this").69unsigned ArgsToSkip = 0;70if (const auto *Op = dyn_cast<CXXOperatorCallExpr>(CE)) {71if (const auto *M = dyn_cast<CXXMethodDecl>(Op->getCalleeDecl()))72ArgsToSkip =73static_cast<unsigned>(!M->isExplicitObjectMemberFunction());74}75CGF.EmitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip),76CE->getDirectCallee());77} else {78assert(79FPT->getNumParams() == 0 &&80"No CallExpr specified for function with non-zero number of arguments");81}82return {required, PrefixSize};83}
84
85RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(86const CXXMethodDecl *MD, const CGCallee &Callee,87ReturnValueSlot ReturnValue,88llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,89const CallExpr *CE, CallArgList *RtlArgs) {90const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();91CallArgList Args;92MemberCallInfo CallInfo = commonEmitCXXMemberOrOperatorCall(93*this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs);94auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall(95Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize);96return EmitCall(FnInfo, Callee, ReturnValue, Args, nullptr,97CE && CE == MustTailCall,98CE ? CE->getExprLoc() : SourceLocation());99}
100
101RValue CodeGenFunction::EmitCXXDestructorCall(102GlobalDecl Dtor, const CGCallee &Callee, llvm::Value *This, QualType ThisTy,103llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE) {104const CXXMethodDecl *DtorDecl = cast<CXXMethodDecl>(Dtor.getDecl());105
106assert(!ThisTy.isNull());107assert(ThisTy->getAsCXXRecordDecl() == DtorDecl->getParent() &&108"Pointer/Object mixup");109
110LangAS SrcAS = ThisTy.getAddressSpace();111LangAS DstAS = DtorDecl->getMethodQualifiers().getAddressSpace();112if (SrcAS != DstAS) {113QualType DstTy = DtorDecl->getThisType();114llvm::Type *NewType = CGM.getTypes().ConvertType(DstTy);115This = getTargetHooks().performAddrSpaceCast(*this, This, SrcAS, DstAS,116NewType);117}118
119CallArgList Args;120commonEmitCXXMemberOrOperatorCall(*this, Dtor, This, ImplicitParam,121ImplicitParamTy, CE, Args, nullptr);122return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee,123ReturnValueSlot(), Args, nullptr, CE && CE == MustTailCall,124CE ? CE->getExprLoc() : SourceLocation{});125}
126
127RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(128const CXXPseudoDestructorExpr *E) {129QualType DestroyedType = E->getDestroyedType();130if (DestroyedType.hasStrongOrWeakObjCLifetime()) {131// Automatic Reference Counting:132// If the pseudo-expression names a retainable object with weak or133// strong lifetime, the object shall be released.134Expr *BaseExpr = E->getBase();135Address BaseValue = Address::invalid();136Qualifiers BaseQuals;137
138// If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.139if (E->isArrow()) {140BaseValue = EmitPointerWithAlignment(BaseExpr);141const auto *PTy = BaseExpr->getType()->castAs<PointerType>();142BaseQuals = PTy->getPointeeType().getQualifiers();143} else {144LValue BaseLV = EmitLValue(BaseExpr);145BaseValue = BaseLV.getAddress();146QualType BaseTy = BaseExpr->getType();147BaseQuals = BaseTy.getQualifiers();148}149
150switch (DestroyedType.getObjCLifetime()) {151case Qualifiers::OCL_None:152case Qualifiers::OCL_ExplicitNone:153case Qualifiers::OCL_Autoreleasing:154break;155
156case Qualifiers::OCL_Strong:157EmitARCRelease(Builder.CreateLoad(BaseValue,158DestroyedType.isVolatileQualified()),159ARCPreciseLifetime);160break;161
162case Qualifiers::OCL_Weak:163EmitARCDestroyWeak(BaseValue);164break;165}166} else {167// C++ [expr.pseudo]p1:168// The result shall only be used as the operand for the function call169// operator (), and the result of such a call has type void. The only170// effect is the evaluation of the postfix-expression before the dot or171// arrow.172EmitIgnoredExpr(E->getBase());173}174
175return RValue::get(nullptr);176}
177
178static CXXRecordDecl *getCXXRecord(const Expr *E) {179QualType T = E->getType();180if (const PointerType *PTy = T->getAs<PointerType>())181T = PTy->getPointeeType();182const RecordType *Ty = T->castAs<RecordType>();183return cast<CXXRecordDecl>(Ty->getDecl());184}
185
186// Note: This function also emit constructor calls to support a MSVC
187// extensions allowing explicit constructor function call.
188RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,189ReturnValueSlot ReturnValue) {190const Expr *callee = CE->getCallee()->IgnoreParens();191
192if (isa<BinaryOperator>(callee))193return EmitCXXMemberPointerCallExpr(CE, ReturnValue);194
195const MemberExpr *ME = cast<MemberExpr>(callee);196const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());197
198if (MD->isStatic()) {199// The method is static, emit it as we would a regular call.200CGCallee callee =201CGCallee::forDirect(CGM.GetAddrOfFunction(MD), GlobalDecl(MD));202return EmitCall(getContext().getPointerType(MD->getType()), callee, CE,203ReturnValue);204}205
206bool HasQualifier = ME->hasQualifier();207NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr;208bool IsArrow = ME->isArrow();209const Expr *Base = ME->getBase();210
211return EmitCXXMemberOrOperatorMemberCallExpr(212CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base);213}
214
215RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(216const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,217bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,218const Expr *Base) {219assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));220
221// Compute the object pointer.222bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier;223
224const CXXMethodDecl *DevirtualizedMethod = nullptr;225if (CanUseVirtualCall &&226MD->getDevirtualizedMethod(Base, getLangOpts().AppleKext)) {227const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();228DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);229assert(DevirtualizedMethod);230const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();231const Expr *Inner = Base->IgnoreParenBaseCasts();232if (DevirtualizedMethod->getReturnType().getCanonicalType() !=233MD->getReturnType().getCanonicalType())234// If the return types are not the same, this might be a case where more235// code needs to run to compensate for it. For example, the derived236// method might return a type that inherits form from the return237// type of MD and has a prefix.238// For now we just avoid devirtualizing these covariant cases.239DevirtualizedMethod = nullptr;240else if (getCXXRecord(Inner) == DevirtualizedClass)241// If the class of the Inner expression is where the dynamic method242// is defined, build the this pointer from it.243Base = Inner;244else if (getCXXRecord(Base) != DevirtualizedClass) {245// If the method is defined in a class that is not the best dynamic246// one or the one of the full expression, we would have to build247// a derived-to-base cast to compute the correct this pointer, but248// we don't have support for that yet, so do a virtual call.249DevirtualizedMethod = nullptr;250}251}252
253bool TrivialForCodegen =254MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion());255bool TrivialAssignment =256TrivialForCodegen &&257(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&258!MD->getParent()->mayInsertExtraPadding();259
260// C++17 demands that we evaluate the RHS of a (possibly-compound) assignment261// operator before the LHS.262CallArgList RtlArgStorage;263CallArgList *RtlArgs = nullptr;264LValue TrivialAssignmentRHS;265if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(CE)) {266if (OCE->isAssignmentOp()) {267if (TrivialAssignment) {268TrivialAssignmentRHS = EmitLValue(CE->getArg(1));269} else {270RtlArgs = &RtlArgStorage;271EmitCallArgs(*RtlArgs, MD->getType()->castAs<FunctionProtoType>(),272drop_begin(CE->arguments(), 1), CE->getDirectCallee(),273/*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft);274}275}276}277
278LValue This;279if (IsArrow) {280LValueBaseInfo BaseInfo;281TBAAAccessInfo TBAAInfo;282Address ThisValue = EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);283This = MakeAddrLValue(ThisValue, Base->getType()->getPointeeType(),284BaseInfo, TBAAInfo);285} else {286This = EmitLValue(Base);287}288
289if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {290// This is the MSVC p->Ctor::Ctor(...) extension. We assume that's291// constructing a new complete object of type Ctor.292assert(!RtlArgs);293assert(ReturnValue.isNull() && "Constructor shouldn't have return value");294CallArgList Args;295commonEmitCXXMemberOrOperatorCall(296*this, {Ctor, Ctor_Complete}, This.getPointer(*this),297/*ImplicitParam=*/nullptr,298/*ImplicitParamTy=*/QualType(), CE, Args, nullptr);299
300EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,301/*Delegating=*/false, This.getAddress(), Args,302AggValueSlot::DoesNotOverlap, CE->getExprLoc(),303/*NewPointerIsChecked=*/false);304return RValue::get(nullptr);305}306
307if (TrivialForCodegen) {308if (isa<CXXDestructorDecl>(MD))309return RValue::get(nullptr);310
311if (TrivialAssignment) {312// We don't like to generate the trivial copy/move assignment operator313// when it isn't necessary; just produce the proper effect here.314// It's important that we use the result of EmitLValue here rather than315// emitting call arguments, in order to preserve TBAA information from316// the RHS.317LValue RHS = isa<CXXOperatorCallExpr>(CE)318? TrivialAssignmentRHS319: EmitLValue(*CE->arg_begin());320EmitAggregateAssign(This, RHS, CE->getType());321return RValue::get(This.getPointer(*this));322}323
324assert(MD->getParent()->mayInsertExtraPadding() &&325"unknown trivial member function");326}327
328// Compute the function type we're calling.329const CXXMethodDecl *CalleeDecl =330DevirtualizedMethod ? DevirtualizedMethod : MD;331const CGFunctionInfo *FInfo = nullptr;332if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))333FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(334GlobalDecl(Dtor, Dtor_Complete));335else336FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);337
338llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);339
340// C++11 [class.mfct.non-static]p2:341// If a non-static member function of a class X is called for an object that342// is not of type X, or of a type derived from X, the behavior is undefined.343SourceLocation CallLoc;344ASTContext &C = getContext();345if (CE)346CallLoc = CE->getExprLoc();347
348SanitizerSet SkippedChecks;349if (const auto *CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {350auto *IOA = CMCE->getImplicitObjectArgument();351bool IsImplicitObjectCXXThis = IsWrappedCXXThis(IOA);352if (IsImplicitObjectCXXThis)353SkippedChecks.set(SanitizerKind::Alignment, true);354if (IsImplicitObjectCXXThis || isa<DeclRefExpr>(IOA))355SkippedChecks.set(SanitizerKind::Null, true);356}357
358if (sanitizePerformTypeCheck())359EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc,360This.emitRawPointer(*this),361C.getRecordType(CalleeDecl->getParent()),362/*Alignment=*/CharUnits::Zero(), SkippedChecks);363
364// C++ [class.virtual]p12:365// Explicit qualification with the scope operator (5.1) suppresses the366// virtual call mechanism.367//368// We also don't emit a virtual call if the base expression has a record type369// because then we know what the type is.370bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;371
372if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl)) {373assert(CE->arg_begin() == CE->arg_end() &&374"Destructor shouldn't have explicit parameters");375assert(ReturnValue.isNull() && "Destructor shouldn't have return value");376if (UseVirtualCall) {377CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor, Dtor_Complete,378This.getAddress(),379cast<CXXMemberCallExpr>(CE));380} else {381GlobalDecl GD(Dtor, Dtor_Complete);382CGCallee Callee;383if (getLangOpts().AppleKext && Dtor->isVirtual() && HasQualifier)384Callee = BuildAppleKextVirtualCall(Dtor, Qualifier, Ty);385else if (!DevirtualizedMethod)386Callee =387CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD, FInfo, Ty), GD);388else {389Callee = CGCallee::forDirect(CGM.GetAddrOfFunction(GD, Ty), GD);390}391
392QualType ThisTy =393IsArrow ? Base->getType()->getPointeeType() : Base->getType();394EmitCXXDestructorCall(GD, Callee, This.getPointer(*this), ThisTy,395/*ImplicitParam=*/nullptr,396/*ImplicitParamTy=*/QualType(), CE);397}398return RValue::get(nullptr);399}400
401// FIXME: Uses of 'MD' past this point need to be audited. We may need to use402// 'CalleeDecl' instead.403
404CGCallee Callee;405if (UseVirtualCall) {406Callee = CGCallee::forVirtual(CE, MD, This.getAddress(), Ty);407} else {408if (SanOpts.has(SanitizerKind::CFINVCall) &&409MD->getParent()->isDynamicClass()) {410llvm::Value *VTable;411const CXXRecordDecl *RD;412std::tie(VTable, RD) = CGM.getCXXABI().LoadVTablePtr(413*this, This.getAddress(), CalleeDecl->getParent());414EmitVTablePtrCheckForCall(RD, VTable, CFITCK_NVCall, CE->getBeginLoc());415}416
417if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)418Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);419else if (!DevirtualizedMethod)420Callee =421CGCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), GlobalDecl(MD));422else {423Callee =424CGCallee::forDirect(CGM.GetAddrOfFunction(DevirtualizedMethod, Ty),425GlobalDecl(DevirtualizedMethod));426}427}428
429if (MD->isVirtual()) {430Address NewThisAddr =431CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(432*this, CalleeDecl, This.getAddress(), UseVirtualCall);433This.setAddress(NewThisAddr);434}435
436return EmitCXXMemberOrOperatorCall(437CalleeDecl, Callee, ReturnValue, This.getPointer(*this),438/*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs);439}
440
441RValue
442CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,443ReturnValueSlot ReturnValue) {444const BinaryOperator *BO =445cast<BinaryOperator>(E->getCallee()->IgnoreParens());446const Expr *BaseExpr = BO->getLHS();447const Expr *MemFnExpr = BO->getRHS();448
449const auto *MPT = MemFnExpr->getType()->castAs<MemberPointerType>();450const auto *FPT = MPT->getPointeeType()->castAs<FunctionProtoType>();451const auto *RD =452cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());453
454// Emit the 'this' pointer.455Address This = Address::invalid();456if (BO->getOpcode() == BO_PtrMemI)457This = EmitPointerWithAlignment(BaseExpr, nullptr, nullptr, KnownNonNull);458else459This = EmitLValue(BaseExpr, KnownNonNull).getAddress();460
461EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.emitRawPointer(*this),462QualType(MPT->getClass(), 0));463
464// Get the member function pointer.465llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);466
467// Ask the ABI to load the callee. Note that This is modified.468llvm::Value *ThisPtrForCall = nullptr;469CGCallee Callee =470CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This,471ThisPtrForCall, MemFnPtr, MPT);472
473CallArgList Args;474
475QualType ThisType =476getContext().getPointerType(getContext().getTagDeclType(RD));477
478// Push the this ptr.479Args.add(RValue::get(ThisPtrForCall), ThisType);480
481RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);482
483// And the rest of the call args484EmitCallArgs(Args, FPT, E->arguments());485return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required,486/*PrefixSize=*/0),487Callee, ReturnValue, Args, nullptr, E == MustTailCall,488E->getExprLoc());489}
490
491RValue
492CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,493const CXXMethodDecl *MD,494ReturnValueSlot ReturnValue) {495assert(MD->isImplicitObjectMemberFunction() &&496"Trying to emit a member call expr on a static method!");497return EmitCXXMemberOrOperatorMemberCallExpr(498E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,499/*IsArrow=*/false, E->getArg(0));500}
501
502RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,503ReturnValueSlot ReturnValue) {504return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);505}
506
507static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,508Address DestPtr,509const CXXRecordDecl *Base) {510if (Base->isEmpty())511return;512
513DestPtr = DestPtr.withElementType(CGF.Int8Ty);514
515const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);516CharUnits NVSize = Layout.getNonVirtualSize();517
518// We cannot simply zero-initialize the entire base sub-object if vbptrs are519// present, they are initialized by the most derived class before calling the520// constructor.521SmallVector<std::pair<CharUnits, CharUnits>, 1> Stores;522Stores.emplace_back(CharUnits::Zero(), NVSize);523
524// Each store is split by the existence of a vbptr.525CharUnits VBPtrWidth = CGF.getPointerSize();526std::vector<CharUnits> VBPtrOffsets =527CGF.CGM.getCXXABI().getVBPtrOffsets(Base);528for (CharUnits VBPtrOffset : VBPtrOffsets) {529// Stop before we hit any virtual base pointers located in virtual bases.530if (VBPtrOffset >= NVSize)531break;532std::pair<CharUnits, CharUnits> LastStore = Stores.pop_back_val();533CharUnits LastStoreOffset = LastStore.first;534CharUnits LastStoreSize = LastStore.second;535
536CharUnits SplitBeforeOffset = LastStoreOffset;537CharUnits SplitBeforeSize = VBPtrOffset - SplitBeforeOffset;538assert(!SplitBeforeSize.isNegative() && "negative store size!");539if (!SplitBeforeSize.isZero())540Stores.emplace_back(SplitBeforeOffset, SplitBeforeSize);541
542CharUnits SplitAfterOffset = VBPtrOffset + VBPtrWidth;543CharUnits SplitAfterSize = LastStoreSize - SplitAfterOffset;544assert(!SplitAfterSize.isNegative() && "negative store size!");545if (!SplitAfterSize.isZero())546Stores.emplace_back(SplitAfterOffset, SplitAfterSize);547}548
549// If the type contains a pointer to data member we can't memset it to zero.550// Instead, create a null constant and copy it to the destination.551// TODO: there are other patterns besides zero that we can usefully memset,552// like -1, which happens to be the pattern used by member-pointers.553// TODO: isZeroInitializable can be over-conservative in the case where a554// virtual base contains a member pointer.555llvm::Constant *NullConstantForBase = CGF.CGM.EmitNullConstantForBase(Base);556if (!NullConstantForBase->isNullValue()) {557llvm::GlobalVariable *NullVariable = new llvm::GlobalVariable(558CGF.CGM.getModule(), NullConstantForBase->getType(),559/*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage,560NullConstantForBase, Twine());561
562CharUnits Align =563std::max(Layout.getNonVirtualAlignment(), DestPtr.getAlignment());564NullVariable->setAlignment(Align.getAsAlign());565
566Address SrcPtr(NullVariable, CGF.Int8Ty, Align);567
568// Get and call the appropriate llvm.memcpy overload.569for (std::pair<CharUnits, CharUnits> Store : Stores) {570CharUnits StoreOffset = Store.first;571CharUnits StoreSize = Store.second;572llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);573CGF.Builder.CreateMemCpy(574CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),575CGF.Builder.CreateConstInBoundsByteGEP(SrcPtr, StoreOffset),576StoreSizeVal);577}578
579// Otherwise, just memset the whole thing to zero. This is legal580// because in LLVM, all default initializers (other than the ones we just581// handled above) are guaranteed to have a bit pattern of all zeros.582} else {583for (std::pair<CharUnits, CharUnits> Store : Stores) {584CharUnits StoreOffset = Store.first;585CharUnits StoreSize = Store.second;586llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);587CGF.Builder.CreateMemSet(588CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),589CGF.Builder.getInt8(0), StoreSizeVal);590}591}592}
593
594void
595CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,596AggValueSlot Dest) {597assert(!Dest.isIgnored() && "Must have a destination!");598const CXXConstructorDecl *CD = E->getConstructor();599
600// If we require zero initialization before (or instead of) calling the601// constructor, as can be the case with a non-user-provided default602// constructor, emit the zero initialization now, unless destination is603// already zeroed.604if (E->requiresZeroInitialization() && !Dest.isZeroed()) {605switch (E->getConstructionKind()) {606case CXXConstructionKind::Delegating:607case CXXConstructionKind::Complete:608EmitNullInitialization(Dest.getAddress(), E->getType());609break;610case CXXConstructionKind::VirtualBase:611case CXXConstructionKind::NonVirtualBase:612EmitNullBaseClassInitialization(*this, Dest.getAddress(),613CD->getParent());614break;615}616}617
618// If this is a call to a trivial default constructor, do nothing.619if (CD->isTrivial() && CD->isDefaultConstructor())620return;621
622// Elide the constructor if we're constructing from a temporary.623if (getLangOpts().ElideConstructors && E->isElidable()) {624// FIXME: This only handles the simplest case, where the source object625// is passed directly as the first argument to the constructor.626// This should also handle stepping though implicit casts and627// conversion sequences which involve two steps, with a628// conversion operator followed by a converting constructor.629const Expr *SrcObj = E->getArg(0);630assert(SrcObj->isTemporaryObject(getContext(), CD->getParent()));631assert(632getContext().hasSameUnqualifiedType(E->getType(), SrcObj->getType()));633EmitAggExpr(SrcObj, Dest);634return;635}636
637if (const ArrayType *arrayType638= getContext().getAsArrayType(E->getType())) {639EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E,640Dest.isSanitizerChecked());641} else {642CXXCtorType Type = Ctor_Complete;643bool ForVirtualBase = false;644bool Delegating = false;645
646switch (E->getConstructionKind()) {647case CXXConstructionKind::Delegating:648// We should be emitting a constructor; GlobalDecl will assert this649Type = CurGD.getCtorType();650Delegating = true;651break;652
653case CXXConstructionKind::Complete:654Type = Ctor_Complete;655break;656
657case CXXConstructionKind::VirtualBase:658ForVirtualBase = true;659[[fallthrough]];660
661case CXXConstructionKind::NonVirtualBase:662Type = Ctor_Base;663}664
665// Call the constructor.666EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E);667}668}
669
670void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src,671const Expr *Exp) {672if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))673Exp = E->getSubExpr();674assert(isa<CXXConstructExpr>(Exp) &&675"EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");676const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);677const CXXConstructorDecl *CD = E->getConstructor();678RunCleanupsScope Scope(*this);679
680// If we require zero initialization before (or instead of) calling the681// constructor, as can be the case with a non-user-provided default682// constructor, emit the zero initialization now.683// FIXME. Do I still need this for a copy ctor synthesis?684if (E->requiresZeroInitialization())685EmitNullInitialization(Dest, E->getType());686
687assert(!getContext().getAsConstantArrayType(E->getType())688&& "EmitSynthesizedCXXCopyCtor - Copied-in Array");689EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E);690}
691
692static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,693const CXXNewExpr *E) {694if (!E->isArray())695return CharUnits::Zero();696
697// No cookie is required if the operator new[] being used is the698// reserved placement operator new[].699if (E->getOperatorNew()->isReservedGlobalPlacementOperator())700return CharUnits::Zero();701
702return CGF.CGM.getCXXABI().GetArrayCookieSize(E);703}
704
705static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,706const CXXNewExpr *e,707unsigned minElements,708llvm::Value *&numElements,709llvm::Value *&sizeWithoutCookie) {710QualType type = e->getAllocatedType();711
712if (!e->isArray()) {713CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);714sizeWithoutCookie
715= llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());716return sizeWithoutCookie;717}718
719// The width of size_t.720unsigned sizeWidth = CGF.SizeTy->getBitWidth();721
722// Figure out the cookie size.723llvm::APInt cookieSize(sizeWidth,724CalculateCookiePadding(CGF, e).getQuantity());725
726// Emit the array size expression.727// We multiply the size of all dimensions for NumElements.728// e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.729numElements =730ConstantEmitter(CGF).tryEmitAbstract(*e->getArraySize(), e->getType());731if (!numElements)732numElements = CGF.EmitScalarExpr(*e->getArraySize());733assert(isa<llvm::IntegerType>(numElements->getType()));734
735// The number of elements can be have an arbitrary integer type;736// essentially, we need to multiply it by a constant factor, add a737// cookie size, and verify that the result is representable as a738// size_t. That's just a gloss, though, and it's wrong in one739// important way: if the count is negative, it's an error even if740// the cookie size would bring the total size >= 0.741bool isSigned742= (*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType();743llvm::IntegerType *numElementsType744= cast<llvm::IntegerType>(numElements->getType());745unsigned numElementsWidth = numElementsType->getBitWidth();746
747// Compute the constant factor.748llvm::APInt arraySizeMultiplier(sizeWidth, 1);749while (const ConstantArrayType *CAT750= CGF.getContext().getAsConstantArrayType(type)) {751type = CAT->getElementType();752arraySizeMultiplier *= CAT->getSize();753}754
755CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);756llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());757typeSizeMultiplier *= arraySizeMultiplier;758
759// This will be a size_t.760llvm::Value *size;761
762// If someone is doing 'new int[42]' there is no need to do a dynamic check.763// Don't bloat the -O0 code.764if (llvm::ConstantInt *numElementsC =765dyn_cast<llvm::ConstantInt>(numElements)) {766const llvm::APInt &count = numElementsC->getValue();767
768bool hasAnyOverflow = false;769
770// If 'count' was a negative number, it's an overflow.771if (isSigned && count.isNegative())772hasAnyOverflow = true;773
774// We want to do all this arithmetic in size_t. If numElements is775// wider than that, check whether it's already too big, and if so,776// overflow.777else if (numElementsWidth > sizeWidth &&778numElementsWidth - sizeWidth > count.countl_zero())779hasAnyOverflow = true;780
781// Okay, compute a count at the right width.782llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);783
784// If there is a brace-initializer, we cannot allocate fewer elements than785// there are initializers. If we do, that's treated like an overflow.786if (adjustedCount.ult(minElements))787hasAnyOverflow = true;788
789// Scale numElements by that. This might overflow, but we don't790// care because it only overflows if allocationSize does, too, and791// if that overflows then we shouldn't use this.792numElements = llvm::ConstantInt::get(CGF.SizeTy,793adjustedCount * arraySizeMultiplier);794
795// Compute the size before cookie, and track whether it overflowed.796bool overflow;797llvm::APInt allocationSize798= adjustedCount.umul_ov(typeSizeMultiplier, overflow);799hasAnyOverflow |= overflow;800
801// Add in the cookie, and check whether it's overflowed.802if (cookieSize != 0) {803// Save the current size without a cookie. This shouldn't be804// used if there was overflow.805sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);806
807allocationSize = allocationSize.uadd_ov(cookieSize, overflow);808hasAnyOverflow |= overflow;809}810
811// On overflow, produce a -1 so operator new will fail.812if (hasAnyOverflow) {813size = llvm::Constant::getAllOnesValue(CGF.SizeTy);814} else {815size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);816}817
818// Otherwise, we might need to use the overflow intrinsics.819} else {820// There are up to five conditions we need to test for:821// 1) if isSigned, we need to check whether numElements is negative;822// 2) if numElementsWidth > sizeWidth, we need to check whether823// numElements is larger than something representable in size_t;824// 3) if minElements > 0, we need to check whether numElements is smaller825// than that.826// 4) we need to compute827// sizeWithoutCookie := numElements * typeSizeMultiplier828// and check whether it overflows; and829// 5) if we need a cookie, we need to compute830// size := sizeWithoutCookie + cookieSize831// and check whether it overflows.832
833llvm::Value *hasOverflow = nullptr;834
835// If numElementsWidth > sizeWidth, then one way or another, we're836// going to have to do a comparison for (2), and this happens to837// take care of (1), too.838if (numElementsWidth > sizeWidth) {839llvm::APInt threshold =840llvm::APInt::getOneBitSet(numElementsWidth, sizeWidth);841
842llvm::Value *thresholdV843= llvm::ConstantInt::get(numElementsType, threshold);844
845hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);846numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);847
848// Otherwise, if we're signed, we want to sext up to size_t.849} else if (isSigned) {850if (numElementsWidth < sizeWidth)851numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);852
853// If there's a non-1 type size multiplier, then we can do the854// signedness check at the same time as we do the multiply855// because a negative number times anything will cause an856// unsigned overflow. Otherwise, we have to do it here. But at least857// in this case, we can subsume the >= minElements check.858if (typeSizeMultiplier == 1)859hasOverflow = CGF.Builder.CreateICmpSLT(numElements,860llvm::ConstantInt::get(CGF.SizeTy, minElements));861
862// Otherwise, zext up to size_t if necessary.863} else if (numElementsWidth < sizeWidth) {864numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);865}866
867assert(numElements->getType() == CGF.SizeTy);868
869if (minElements) {870// Don't allow allocation of fewer elements than we have initializers.871if (!hasOverflow) {872hasOverflow = CGF.Builder.CreateICmpULT(numElements,873llvm::ConstantInt::get(CGF.SizeTy, minElements));874} else if (numElementsWidth > sizeWidth) {875// The other existing overflow subsumes this check.876// We do an unsigned comparison, since any signed value < -1 is877// taken care of either above or below.878hasOverflow = CGF.Builder.CreateOr(hasOverflow,879CGF.Builder.CreateICmpULT(numElements,880llvm::ConstantInt::get(CGF.SizeTy, minElements)));881}882}883
884size = numElements;885
886// Multiply by the type size if necessary. This multiplier887// includes all the factors for nested arrays.888//889// This step also causes numElements to be scaled up by the890// nested-array factor if necessary. Overflow on this computation891// can be ignored because the result shouldn't be used if892// allocation fails.893if (typeSizeMultiplier != 1) {894llvm::Function *umul_with_overflow895= CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);896
897llvm::Value *tsmV =898llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);899llvm::Value *result =900CGF.Builder.CreateCall(umul_with_overflow, {size, tsmV});901
902llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);903if (hasOverflow)904hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);905else906hasOverflow = overflowed;907
908size = CGF.Builder.CreateExtractValue(result, 0);909
910// Also scale up numElements by the array size multiplier.911if (arraySizeMultiplier != 1) {912// If the base element type size is 1, then we can re-use the913// multiply we just did.914if (typeSize.isOne()) {915assert(arraySizeMultiplier == typeSizeMultiplier);916numElements = size;917
918// Otherwise we need a separate multiply.919} else {920llvm::Value *asmV =921llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);922numElements = CGF.Builder.CreateMul(numElements, asmV);923}924}925} else {926// numElements doesn't need to be scaled.927assert(arraySizeMultiplier == 1);928}929
930// Add in the cookie size if necessary.931if (cookieSize != 0) {932sizeWithoutCookie = size;933
934llvm::Function *uadd_with_overflow935= CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);936
937llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);938llvm::Value *result =939CGF.Builder.CreateCall(uadd_with_overflow, {size, cookieSizeV});940
941llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);942if (hasOverflow)943hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);944else945hasOverflow = overflowed;946
947size = CGF.Builder.CreateExtractValue(result, 0);948}949
950// If we had any possibility of dynamic overflow, make a select to951// overwrite 'size' with an all-ones value, which should cause952// operator new to throw.953if (hasOverflow)954size = CGF.Builder.CreateSelect(hasOverflow,955llvm::Constant::getAllOnesValue(CGF.SizeTy),956size);957}958
959if (cookieSize == 0)960sizeWithoutCookie = size;961else962assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");963
964return size;965}
966
967static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,968QualType AllocType, Address NewPtr,969AggValueSlot::Overlap_t MayOverlap) {970// FIXME: Refactor with EmitExprAsInit.971switch (CGF.getEvaluationKind(AllocType)) {972case TEK_Scalar:973CGF.EmitScalarInit(Init, nullptr,974CGF.MakeAddrLValue(NewPtr, AllocType), false);975return;976case TEK_Complex:977CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType),978/*isInit*/ true);979return;980case TEK_Aggregate: {981AggValueSlot Slot
982= AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(),983AggValueSlot::IsDestructed,984AggValueSlot::DoesNotNeedGCBarriers,985AggValueSlot::IsNotAliased,986MayOverlap, AggValueSlot::IsNotZeroed,987AggValueSlot::IsSanitizerChecked);988CGF.EmitAggExpr(Init, Slot);989return;990}991}992llvm_unreachable("bad evaluation kind");993}
994
995void CodeGenFunction::EmitNewArrayInitializer(996const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy,997Address BeginPtr, llvm::Value *NumElements,998llvm::Value *AllocSizeWithoutCookie) {999// If we have a type with trivial initialization and no initializer,1000// there's nothing to do.1001if (!E->hasInitializer())1002return;1003
1004Address CurPtr = BeginPtr;1005
1006unsigned InitListElements = 0;1007
1008const Expr *Init = E->getInitializer();1009Address EndOfInit = Address::invalid();1010QualType::DestructionKind DtorKind = ElementType.isDestructedType();1011CleanupDeactivationScope deactivation(*this);1012bool pushedCleanup = false;1013
1014CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType);1015CharUnits ElementAlign =1016BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize);1017
1018// Attempt to perform zero-initialization using memset.1019auto TryMemsetInitialization = [&]() -> bool {1020// FIXME: If the type is a pointer-to-data-member under the Itanium ABI,1021// we can initialize with a memset to -1.1022if (!CGM.getTypes().isZeroInitializable(ElementType))1023return false;1024
1025// Optimization: since zero initialization will just set the memory1026// to all zeroes, generate a single memset to do it in one shot.1027
1028// Subtract out the size of any elements we've already initialized.1029auto *RemainingSize = AllocSizeWithoutCookie;1030if (InitListElements) {1031// We know this can't overflow; we check this when doing the allocation.1032auto *InitializedSize = llvm::ConstantInt::get(1033RemainingSize->getType(),1034getContext().getTypeSizeInChars(ElementType).getQuantity() *1035InitListElements);1036RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);1037}1038
1039// Create the memset.1040Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false);1041return true;1042};1043
1044const InitListExpr *ILE = dyn_cast<InitListExpr>(Init);1045const CXXParenListInitExpr *CPLIE = nullptr;1046const StringLiteral *SL = nullptr;1047const ObjCEncodeExpr *OCEE = nullptr;1048const Expr *IgnoreParen = nullptr;1049if (!ILE) {1050IgnoreParen = Init->IgnoreParenImpCasts();1051CPLIE = dyn_cast<CXXParenListInitExpr>(IgnoreParen);1052SL = dyn_cast<StringLiteral>(IgnoreParen);1053OCEE = dyn_cast<ObjCEncodeExpr>(IgnoreParen);1054}1055
1056// If the initializer is an initializer list, first do the explicit elements.1057if (ILE || CPLIE || SL || OCEE) {1058// Initializing from a (braced) string literal is a special case; the init1059// list element does not initialize a (single) array element.1060if ((ILE && ILE->isStringLiteralInit()) || SL || OCEE) {1061if (!ILE)1062Init = IgnoreParen;1063// Initialize the initial portion of length equal to that of the string1064// literal. The allocation must be for at least this much; we emitted a1065// check for that earlier.1066AggValueSlot Slot =1067AggValueSlot::forAddr(CurPtr, ElementType.getQualifiers(),1068AggValueSlot::IsDestructed,1069AggValueSlot::DoesNotNeedGCBarriers,1070AggValueSlot::IsNotAliased,1071AggValueSlot::DoesNotOverlap,1072AggValueSlot::IsNotZeroed,1073AggValueSlot::IsSanitizerChecked);1074EmitAggExpr(ILE ? ILE->getInit(0) : Init, Slot);1075
1076// Move past these elements.1077InitListElements =1078cast<ConstantArrayType>(Init->getType()->getAsArrayTypeUnsafe())1079->getZExtSize();1080CurPtr = Builder.CreateConstInBoundsGEP(1081CurPtr, InitListElements, "string.init.end");1082
1083// Zero out the rest, if any remain.1084llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);1085if (!ConstNum || !ConstNum->equalsInt(InitListElements)) {1086bool OK = TryMemsetInitialization();1087(void)OK;1088assert(OK && "couldn't memset character type?");1089}1090return;1091}1092
1093ArrayRef<const Expr *> InitExprs =1094ILE ? ILE->inits() : CPLIE->getInitExprs();1095InitListElements = InitExprs.size();1096
1097// If this is a multi-dimensional array new, we will initialize multiple1098// elements with each init list element.1099QualType AllocType = E->getAllocatedType();1100if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(1101AllocType->getAsArrayTypeUnsafe())) {1102ElementTy = ConvertTypeForMem(AllocType);1103CurPtr = CurPtr.withElementType(ElementTy);1104InitListElements *= getContext().getConstantArrayElementCount(CAT);1105}1106
1107// Enter a partial-destruction Cleanup if necessary.1108if (DtorKind) {1109AllocaTrackerRAII AllocaTracker(*this);1110// In principle we could tell the Cleanup where we are more1111// directly, but the control flow can get so varied here that it1112// would actually be quite complex. Therefore we go through an1113// alloca.1114llvm::Instruction *DominatingIP =1115Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(Int8PtrTy));1116EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(),1117"array.init.end");1118pushIrregularPartialArrayCleanup(BeginPtr.emitRawPointer(*this),1119EndOfInit, ElementType, ElementAlign,1120getDestroyer(DtorKind));1121cast<EHCleanupScope>(*EHStack.find(EHStack.stable_begin()))1122.AddAuxAllocas(AllocaTracker.Take());1123DeferredDeactivationCleanupStack.push_back(1124{EHStack.stable_begin(), DominatingIP});1125pushedCleanup = true;1126}1127
1128CharUnits StartAlign = CurPtr.getAlignment();1129unsigned i = 0;1130for (const Expr *IE : InitExprs) {1131// Tell the cleanup that it needs to destroy up to this1132// element. TODO: some of these stores can be trivially1133// observed to be unnecessary.1134if (EndOfInit.isValid()) {1135Builder.CreateStore(CurPtr.emitRawPointer(*this), EndOfInit);1136}1137// FIXME: If the last initializer is an incomplete initializer list for1138// an array, and we have an array filler, we can fold together the two1139// initialization loops.1140StoreAnyExprIntoOneUnit(*this, IE, IE->getType(), CurPtr,1141AggValueSlot::DoesNotOverlap);1142CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getElementType(),1143CurPtr.emitRawPointer(*this),1144Builder.getSize(1),1145"array.exp.next"),1146CurPtr.getElementType(),1147StartAlign.alignmentAtOffset((++i) * ElementSize));1148}1149
1150// The remaining elements are filled with the array filler expression.1151Init = ILE ? ILE->getArrayFiller() : CPLIE->getArrayFiller();1152
1153// Extract the initializer for the individual array elements by pulling1154// out the array filler from all the nested initializer lists. This avoids1155// generating a nested loop for the initialization.1156while (Init && Init->getType()->isConstantArrayType()) {1157auto *SubILE = dyn_cast<InitListExpr>(Init);1158if (!SubILE)1159break;1160assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?");1161Init = SubILE->getArrayFiller();1162}1163
1164// Switch back to initializing one base element at a time.1165CurPtr = CurPtr.withElementType(BeginPtr.getElementType());1166}1167
1168// If all elements have already been initialized, skip any further1169// initialization.1170llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);1171if (ConstNum && ConstNum->getZExtValue() <= InitListElements) {1172return;1173}1174
1175assert(Init && "have trailing elements to initialize but no initializer");1176
1177// If this is a constructor call, try to optimize it out, and failing that1178// emit a single loop to initialize all remaining elements.1179if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {1180CXXConstructorDecl *Ctor = CCE->getConstructor();1181if (Ctor->isTrivial()) {1182// If new expression did not specify value-initialization, then there1183// is no initialization.1184if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())1185return;1186
1187if (TryMemsetInitialization())1188return;1189}1190
1191// Store the new Cleanup position for irregular Cleanups.1192//1193// FIXME: Share this cleanup with the constructor call emission rather than1194// having it create a cleanup of its own.1195if (EndOfInit.isValid())1196Builder.CreateStore(CurPtr.emitRawPointer(*this), EndOfInit);1197
1198// Emit a constructor call loop to initialize the remaining elements.1199if (InitListElements)1200NumElements = Builder.CreateSub(1201NumElements,1202llvm::ConstantInt::get(NumElements->getType(), InitListElements));1203EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE,1204/*NewPointerIsChecked*/true,1205CCE->requiresZeroInitialization());1206return;1207}1208
1209// If this is value-initialization, we can usually use memset.1210ImplicitValueInitExpr IVIE(ElementType);1211if (isa<ImplicitValueInitExpr>(Init)) {1212if (TryMemsetInitialization())1213return;1214
1215// Switch to an ImplicitValueInitExpr for the element type. This handles1216// only one case: multidimensional array new of pointers to members. In1217// all other cases, we already have an initializer for the array element.1218Init = &IVIE;1219}1220
1221// At this point we should have found an initializer for the individual1222// elements of the array.1223assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) &&1224"got wrong type of element to initialize");1225
1226// If we have an empty initializer list, we can usually use memset.1227if (auto *ILE = dyn_cast<InitListExpr>(Init))1228if (ILE->getNumInits() == 0 && TryMemsetInitialization())1229return;1230
1231// If we have a struct whose every field is value-initialized, we can1232// usually use memset.1233if (auto *ILE = dyn_cast<InitListExpr>(Init)) {1234if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {1235if (RType->getDecl()->isStruct()) {1236unsigned NumElements = 0;1237if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RType->getDecl()))1238NumElements = CXXRD->getNumBases();1239for (auto *Field : RType->getDecl()->fields())1240if (!Field->isUnnamedBitField())1241++NumElements;1242// FIXME: Recurse into nested InitListExprs.1243if (ILE->getNumInits() == NumElements)1244for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)1245if (!isa<ImplicitValueInitExpr>(ILE->getInit(i)))1246--NumElements;1247if (ILE->getNumInits() == NumElements && TryMemsetInitialization())1248return;1249}1250}1251}1252
1253// Create the loop blocks.1254llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();1255llvm::BasicBlock *LoopBB = createBasicBlock("new.loop");1256llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end");1257
1258// Find the end of the array, hoisted out of the loop.1259llvm::Value *EndPtr = Builder.CreateInBoundsGEP(1260BeginPtr.getElementType(), BeginPtr.emitRawPointer(*this), NumElements,1261"array.end");1262
1263// If the number of elements isn't constant, we have to now check if there is1264// anything left to initialize.1265if (!ConstNum) {1266llvm::Value *IsEmpty = Builder.CreateICmpEQ(CurPtr.emitRawPointer(*this),1267EndPtr, "array.isempty");1268Builder.CreateCondBr(IsEmpty, ContBB, LoopBB);1269}1270
1271// Enter the loop.1272EmitBlock(LoopBB);1273
1274// Set up the current-element phi.1275llvm::PHINode *CurPtrPhi =1276Builder.CreatePHI(CurPtr.getType(), 2, "array.cur");1277CurPtrPhi->addIncoming(CurPtr.emitRawPointer(*this), EntryBB);1278
1279CurPtr = Address(CurPtrPhi, CurPtr.getElementType(), ElementAlign);1280
1281// Store the new Cleanup position for irregular Cleanups.1282if (EndOfInit.isValid())1283Builder.CreateStore(CurPtr.emitRawPointer(*this), EndOfInit);1284
1285// Enter a partial-destruction Cleanup if necessary.1286if (!pushedCleanup && needsEHCleanup(DtorKind)) {1287llvm::Instruction *DominatingIP =1288Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(Int8PtrTy));1289pushRegularPartialArrayCleanup(BeginPtr.emitRawPointer(*this),1290CurPtr.emitRawPointer(*this), ElementType,1291ElementAlign, getDestroyer(DtorKind));1292DeferredDeactivationCleanupStack.push_back(1293{EHStack.stable_begin(), DominatingIP});1294}1295
1296// Emit the initializer into this element.1297StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr,1298AggValueSlot::DoesNotOverlap);1299
1300// Leave the Cleanup if we entered one.1301deactivation.ForceDeactivate();1302
1303// Advance to the next element by adjusting the pointer type as necessary.1304llvm::Value *NextPtr = Builder.CreateConstInBoundsGEP1_32(1305ElementTy, CurPtr.emitRawPointer(*this), 1, "array.next");1306
1307// Check whether we've gotten to the end of the array and, if so,1308// exit the loop.1309llvm::Value *IsEnd = Builder.CreateICmpEQ(NextPtr, EndPtr, "array.atend");1310Builder.CreateCondBr(IsEnd, ContBB, LoopBB);1311CurPtrPhi->addIncoming(NextPtr, Builder.GetInsertBlock());1312
1313EmitBlock(ContBB);1314}
1315
1316static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,1317QualType ElementType, llvm::Type *ElementTy,1318Address NewPtr, llvm::Value *NumElements,1319llvm::Value *AllocSizeWithoutCookie) {1320ApplyDebugLocation DL(CGF, E);1321if (E->isArray())1322CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, NewPtr, NumElements,1323AllocSizeWithoutCookie);1324else if (const Expr *Init = E->getInitializer())1325StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr,1326AggValueSlot::DoesNotOverlap);1327}
1328
1329/// Emit a call to an operator new or operator delete function, as implicitly
1330/// created by new-expressions and delete-expressions.
1331static RValue EmitNewDeleteCall(CodeGenFunction &CGF,1332const FunctionDecl *CalleeDecl,1333const FunctionProtoType *CalleeType,1334const CallArgList &Args) {1335llvm::CallBase *CallOrInvoke;1336llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl);1337CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(CalleeDecl));1338RValue RV =1339CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(1340Args, CalleeType, /*ChainCall=*/false),1341Callee, ReturnValueSlot(), Args, &CallOrInvoke);1342
1343/// C++1y [expr.new]p10:1344/// [In a new-expression,] an implementation is allowed to omit a call1345/// to a replaceable global allocation function.1346///1347/// We model such elidable calls with the 'builtin' attribute.1348llvm::Function *Fn = dyn_cast<llvm::Function>(CalleePtr);1349if (CalleeDecl->isReplaceableGlobalAllocationFunction() &&1350Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {1351CallOrInvoke->addFnAttr(llvm::Attribute::Builtin);1352}1353
1354return RV;1355}
1356
1357RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,1358const CallExpr *TheCall,1359bool IsDelete) {1360CallArgList Args;1361EmitCallArgs(Args, Type, TheCall->arguments());1362// Find the allocation or deallocation function that we're calling.1363ASTContext &Ctx = getContext();1364DeclarationName Name = Ctx.DeclarationNames1365.getCXXOperatorName(IsDelete ? OO_Delete : OO_New);1366
1367for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))1368if (auto *FD = dyn_cast<FunctionDecl>(Decl))1369if (Ctx.hasSameType(FD->getType(), QualType(Type, 0)))1370return EmitNewDeleteCall(*this, FD, Type, Args);1371llvm_unreachable("predeclared global operator new/delete is missing");1372}
1373
1374namespace {1375/// The parameters to pass to a usual operator delete.
1376struct UsualDeleteParams {1377bool DestroyingDelete = false;1378bool Size = false;1379bool Alignment = false;1380};1381}
1382
1383static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) {1384UsualDeleteParams Params;1385
1386const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>();1387auto AI = FPT->param_type_begin(), AE = FPT->param_type_end();1388
1389// The first argument is always a void*.1390++AI;1391
1392// The next parameter may be a std::destroying_delete_t.1393if (FD->isDestroyingOperatorDelete()) {1394Params.DestroyingDelete = true;1395assert(AI != AE);1396++AI;1397}1398
1399// Figure out what other parameters we should be implicitly passing.1400if (AI != AE && (*AI)->isIntegerType()) {1401Params.Size = true;1402++AI;1403}1404
1405if (AI != AE && (*AI)->isAlignValT()) {1406Params.Alignment = true;1407++AI;1408}1409
1410assert(AI == AE && "unexpected usual deallocation function parameter");1411return Params;1412}
1413
1414namespace {1415/// A cleanup to call the given 'operator delete' function upon abnormal1416/// exit from a new expression. Templated on a traits type that deals with1417/// ensuring that the arguments dominate the cleanup if necessary.1418template<typename Traits>1419class CallDeleteDuringNew final : public EHScopeStack::Cleanup {1420/// Type used to hold llvm::Value*s.1421typedef typename Traits::ValueTy ValueTy;1422/// Type used to hold RValues.1423typedef typename Traits::RValueTy RValueTy;1424struct PlacementArg {1425RValueTy ArgValue;1426QualType ArgType;1427};1428
1429unsigned NumPlacementArgs : 31;1430LLVM_PREFERRED_TYPE(bool)1431unsigned PassAlignmentToPlacementDelete : 1;1432const FunctionDecl *OperatorDelete;1433ValueTy Ptr;1434ValueTy AllocSize;1435CharUnits AllocAlign;1436
1437PlacementArg *getPlacementArgs() {1438return reinterpret_cast<PlacementArg *>(this + 1);1439}1440
1441public:1442static size_t getExtraSize(size_t NumPlacementArgs) {1443return NumPlacementArgs * sizeof(PlacementArg);1444}1445
1446CallDeleteDuringNew(size_t NumPlacementArgs,1447const FunctionDecl *OperatorDelete, ValueTy Ptr,1448ValueTy AllocSize, bool PassAlignmentToPlacementDelete,1449CharUnits AllocAlign)1450: NumPlacementArgs(NumPlacementArgs),1451PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete),1452OperatorDelete(OperatorDelete), Ptr(Ptr), AllocSize(AllocSize),1453AllocAlign(AllocAlign) {}1454
1455void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {1456assert(I < NumPlacementArgs && "index out of range");1457getPlacementArgs()[I] = {Arg, Type};1458}1459
1460void Emit(CodeGenFunction &CGF, Flags flags) override {1461const auto *FPT = OperatorDelete->getType()->castAs<FunctionProtoType>();1462CallArgList DeleteArgs;1463
1464// The first argument is always a void* (or C* for a destroying operator1465// delete for class type C).1466DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0));1467
1468// Figure out what other parameters we should be implicitly passing.1469UsualDeleteParams Params;1470if (NumPlacementArgs) {1471// A placement deallocation function is implicitly passed an alignment1472// if the placement allocation function was, but is never passed a size.1473Params.Alignment = PassAlignmentToPlacementDelete;1474} else {1475// For a non-placement new-expression, 'operator delete' can take a1476// size and/or an alignment if it has the right parameters.1477Params = getUsualDeleteParams(OperatorDelete);1478}1479
1480assert(!Params.DestroyingDelete &&1481"should not call destroying delete in a new-expression");1482
1483// The second argument can be a std::size_t (for non-placement delete).1484if (Params.Size)1485DeleteArgs.add(Traits::get(CGF, AllocSize),1486CGF.getContext().getSizeType());1487
1488// The next (second or third) argument can be a std::align_val_t, which1489// is an enum whose underlying type is std::size_t.1490// FIXME: Use the right type as the parameter type. Note that in a call1491// to operator delete(size_t, ...), we may not have it available.1492if (Params.Alignment)1493DeleteArgs.add(RValue::get(llvm::ConstantInt::get(1494CGF.SizeTy, AllocAlign.getQuantity())),1495CGF.getContext().getSizeType());1496
1497// Pass the rest of the arguments, which must match exactly.1498for (unsigned I = 0; I != NumPlacementArgs; ++I) {1499auto Arg = getPlacementArgs()[I];1500DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType);1501}1502
1503// Call 'operator delete'.1504EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);1505}1506};1507}
1508
1509/// Enter a cleanup to call 'operator delete' if the initializer in a
1510/// new-expression throws.
1511static void EnterNewDeleteCleanup(CodeGenFunction &CGF,1512const CXXNewExpr *E,1513Address NewPtr,1514llvm::Value *AllocSize,1515CharUnits AllocAlign,1516const CallArgList &NewArgs) {1517unsigned NumNonPlacementArgs = E->passAlignment() ? 2 : 1;1518
1519// If we're not inside a conditional branch, then the cleanup will1520// dominate and we can do the easier (and more efficient) thing.1521if (!CGF.isInConditionalBranch()) {1522struct DirectCleanupTraits {1523typedef llvm::Value *ValueTy;1524typedef RValue RValueTy;1525static RValue get(CodeGenFunction &, ValueTy V) { return RValue::get(V); }1526static RValue get(CodeGenFunction &, RValueTy V) { return V; }1527};1528
1529typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;1530
1531DirectCleanup *Cleanup = CGF.EHStack.pushCleanupWithExtra<DirectCleanup>(1532EHCleanup, E->getNumPlacementArgs(), E->getOperatorDelete(),1533NewPtr.emitRawPointer(CGF), AllocSize, E->passAlignment(), AllocAlign);1534for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {1535auto &Arg = NewArgs[I + NumNonPlacementArgs];1536Cleanup->setPlacementArg(I, Arg.getRValue(CGF), Arg.Ty);1537}1538
1539return;1540}1541
1542// Otherwise, we need to save all this stuff.1543DominatingValue<RValue>::saved_type SavedNewPtr =1544DominatingValue<RValue>::save(CGF, RValue::get(NewPtr, CGF));1545DominatingValue<RValue>::saved_type SavedAllocSize =1546DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));1547
1548struct ConditionalCleanupTraits {1549typedef DominatingValue<RValue>::saved_type ValueTy;1550typedef DominatingValue<RValue>::saved_type RValueTy;1551static RValue get(CodeGenFunction &CGF, ValueTy V) {1552return V.restore(CGF);1553}1554};1555typedef CallDeleteDuringNew<ConditionalCleanupTraits> ConditionalCleanup;1556
1557ConditionalCleanup *Cleanup = CGF.EHStack1558.pushCleanupWithExtra<ConditionalCleanup>(EHCleanup,1559E->getNumPlacementArgs(),1560E->getOperatorDelete(),1561SavedNewPtr,1562SavedAllocSize,1563E->passAlignment(),1564AllocAlign);1565for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {1566auto &Arg = NewArgs[I + NumNonPlacementArgs];1567Cleanup->setPlacementArg(1568I, DominatingValue<RValue>::save(CGF, Arg.getRValue(CGF)), Arg.Ty);1569}1570
1571CGF.initFullExprCleanup();1572}
1573
1574llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {1575// The element type being allocated.1576QualType allocType = getContext().getBaseElementType(E->getAllocatedType());1577
1578// 1. Build a call to the allocation function.1579FunctionDecl *allocator = E->getOperatorNew();1580
1581// If there is a brace-initializer or C++20 parenthesized initializer, cannot1582// allocate fewer elements than inits.1583unsigned minElements = 0;1584if (E->isArray() && E->hasInitializer()) {1585const Expr *Init = E->getInitializer();1586const InitListExpr *ILE = dyn_cast<InitListExpr>(Init);1587const CXXParenListInitExpr *CPLIE = dyn_cast<CXXParenListInitExpr>(Init);1588const Expr *IgnoreParen = Init->IgnoreParenImpCasts();1589if ((ILE && ILE->isStringLiteralInit()) ||1590isa<StringLiteral>(IgnoreParen) || isa<ObjCEncodeExpr>(IgnoreParen)) {1591minElements =1592cast<ConstantArrayType>(Init->getType()->getAsArrayTypeUnsafe())1593->getZExtSize();1594} else if (ILE || CPLIE) {1595minElements = ILE ? ILE->getNumInits() : CPLIE->getInitExprs().size();1596}1597}1598
1599llvm::Value *numElements = nullptr;1600llvm::Value *allocSizeWithoutCookie = nullptr;1601llvm::Value *allocSize =1602EmitCXXNewAllocSize(*this, E, minElements, numElements,1603allocSizeWithoutCookie);1604CharUnits allocAlign = getContext().getTypeAlignInChars(allocType);1605
1606// Emit the allocation call. If the allocator is a global placement1607// operator, just "inline" it directly.1608Address allocation = Address::invalid();1609CallArgList allocatorArgs;1610if (allocator->isReservedGlobalPlacementOperator()) {1611assert(E->getNumPlacementArgs() == 1);1612const Expr *arg = *E->placement_arguments().begin();1613
1614LValueBaseInfo BaseInfo;1615allocation = EmitPointerWithAlignment(arg, &BaseInfo);1616
1617// The pointer expression will, in many cases, be an opaque void*.1618// In these cases, discard the computed alignment and use the1619// formal alignment of the allocated type.1620if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl)1621allocation.setAlignment(allocAlign);1622
1623// Set up allocatorArgs for the call to operator delete if it's not1624// the reserved global operator.1625if (E->getOperatorDelete() &&1626!E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {1627allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType());1628allocatorArgs.add(RValue::get(allocation, *this), arg->getType());1629}1630
1631} else {1632const FunctionProtoType *allocatorType =1633allocator->getType()->castAs<FunctionProtoType>();1634unsigned ParamsToSkip = 0;1635
1636// The allocation size is the first argument.1637QualType sizeType = getContext().getSizeType();1638allocatorArgs.add(RValue::get(allocSize), sizeType);1639++ParamsToSkip;1640
1641if (allocSize != allocSizeWithoutCookie) {1642CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI.1643allocAlign = std::max(allocAlign, cookieAlign);1644}1645
1646// The allocation alignment may be passed as the second argument.1647if (E->passAlignment()) {1648QualType AlignValT = sizeType;1649if (allocatorType->getNumParams() > 1) {1650AlignValT = allocatorType->getParamType(1);1651assert(getContext().hasSameUnqualifiedType(1652AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(),1653sizeType) &&1654"wrong type for alignment parameter");1655++ParamsToSkip;1656} else {1657// Corner case, passing alignment to 'operator new(size_t, ...)'.1658assert(allocator->isVariadic() && "can't pass alignment to allocator");1659}1660allocatorArgs.add(1661RValue::get(llvm::ConstantInt::get(SizeTy, allocAlign.getQuantity())),1662AlignValT);1663}1664
1665// FIXME: Why do we not pass a CalleeDecl here?1666EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(),1667/*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip);1668
1669RValue RV =1670EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);1671
1672// Set !heapallocsite metadata on the call to operator new.1673if (getDebugInfo())1674if (auto *newCall = dyn_cast<llvm::CallBase>(RV.getScalarVal()))1675getDebugInfo()->addHeapAllocSiteMetadata(newCall, allocType,1676E->getExprLoc());1677
1678// If this was a call to a global replaceable allocation function that does1679// not take an alignment argument, the allocator is known to produce1680// storage that's suitably aligned for any object that fits, up to a known1681// threshold. Otherwise assume it's suitably aligned for the allocated type.1682CharUnits allocationAlign = allocAlign;1683if (!E->passAlignment() &&1684allocator->isReplaceableGlobalAllocationFunction()) {1685unsigned AllocatorAlign = llvm::bit_floor(std::min<uint64_t>(1686Target.getNewAlign(), getContext().getTypeSize(allocType)));1687allocationAlign = std::max(1688allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign));1689}1690
1691allocation = Address(RV.getScalarVal(), Int8Ty, allocationAlign);1692}1693
1694// Emit a null check on the allocation result if the allocation1695// function is allowed to return null (because it has a non-throwing1696// exception spec or is the reserved placement new) and we have an1697// interesting initializer will be running sanitizers on the initialization.1698bool nullCheck = E->shouldNullCheckAllocation() &&1699(!allocType.isPODType(getContext()) || E->hasInitializer() ||1700sanitizePerformTypeCheck());1701
1702llvm::BasicBlock *nullCheckBB = nullptr;1703llvm::BasicBlock *contBB = nullptr;1704
1705// The null-check means that the initializer is conditionally1706// evaluated.1707ConditionalEvaluation conditional(*this);1708
1709if (nullCheck) {1710conditional.begin(*this);1711
1712nullCheckBB = Builder.GetInsertBlock();1713llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");1714contBB = createBasicBlock("new.cont");1715
1716llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");1717Builder.CreateCondBr(isNull, contBB, notNullBB);1718EmitBlock(notNullBB);1719}1720
1721// If there's an operator delete, enter a cleanup to call it if an1722// exception is thrown.1723EHScopeStack::stable_iterator operatorDeleteCleanup;1724llvm::Instruction *cleanupDominator = nullptr;1725if (E->getOperatorDelete() &&1726!E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {1727EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocAlign,1728allocatorArgs);1729operatorDeleteCleanup = EHStack.stable_begin();1730cleanupDominator = Builder.CreateUnreachable();1731}1732
1733assert((allocSize == allocSizeWithoutCookie) ==1734CalculateCookiePadding(*this, E).isZero());1735if (allocSize != allocSizeWithoutCookie) {1736assert(E->isArray());1737allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,1738numElements,1739E, allocType);1740}1741
1742llvm::Type *elementTy = ConvertTypeForMem(allocType);1743Address result = allocation.withElementType(elementTy);1744
1745// Passing pointer through launder.invariant.group to avoid propagation of1746// vptrs information which may be included in previous type.1747// To not break LTO with different optimizations levels, we do it regardless1748// of optimization level.1749if (CGM.getCodeGenOpts().StrictVTablePointers &&1750allocator->isReservedGlobalPlacementOperator())1751result = Builder.CreateLaunderInvariantGroup(result);1752
1753// Emit sanitizer checks for pointer value now, so that in the case of an1754// array it was checked only once and not at each constructor call. We may1755// have already checked that the pointer is non-null.1756// FIXME: If we have an array cookie and a potentially-throwing allocator,1757// we'll null check the wrong pointer here.1758SanitizerSet SkippedChecks;1759SkippedChecks.set(SanitizerKind::Null, nullCheck);1760EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall,1761E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(),1762result, allocType, result.getAlignment(), SkippedChecks,1763numElements);1764
1765EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,1766allocSizeWithoutCookie);1767llvm::Value *resultPtr = result.emitRawPointer(*this);1768if (E->isArray()) {1769// NewPtr is a pointer to the base element type. If we're1770// allocating an array of arrays, we'll need to cast back to the1771// array pointer type.1772llvm::Type *resultType = ConvertTypeForMem(E->getType());1773if (resultPtr->getType() != resultType)1774resultPtr = Builder.CreateBitCast(resultPtr, resultType);1775}1776
1777// Deactivate the 'operator delete' cleanup if we finished1778// initialization.1779if (operatorDeleteCleanup.isValid()) {1780DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);1781cleanupDominator->eraseFromParent();1782}1783
1784if (nullCheck) {1785conditional.end(*this);1786
1787llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();1788EmitBlock(contBB);1789
1790llvm::PHINode *PHI = Builder.CreatePHI(resultPtr->getType(), 2);1791PHI->addIncoming(resultPtr, notNullBB);1792PHI->addIncoming(llvm::Constant::getNullValue(resultPtr->getType()),1793nullCheckBB);1794
1795resultPtr = PHI;1796}1797
1798return resultPtr;1799}
1800
1801void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,1802llvm::Value *Ptr, QualType DeleteTy,1803llvm::Value *NumElements,1804CharUnits CookieSize) {1805assert((!NumElements && CookieSize.isZero()) ||1806DeleteFD->getOverloadedOperator() == OO_Array_Delete);1807
1808const auto *DeleteFTy = DeleteFD->getType()->castAs<FunctionProtoType>();1809CallArgList DeleteArgs;1810
1811auto Params = getUsualDeleteParams(DeleteFD);1812auto ParamTypeIt = DeleteFTy->param_type_begin();1813
1814// Pass the pointer itself.1815QualType ArgTy = *ParamTypeIt++;1816llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));1817DeleteArgs.add(RValue::get(DeletePtr), ArgTy);1818
1819// Pass the std::destroying_delete tag if present.1820llvm::AllocaInst *DestroyingDeleteTag = nullptr;1821if (Params.DestroyingDelete) {1822QualType DDTag = *ParamTypeIt++;1823llvm::Type *Ty = getTypes().ConvertType(DDTag);1824CharUnits Align = CGM.getNaturalTypeAlignment(DDTag);1825DestroyingDeleteTag = CreateTempAlloca(Ty, "destroying.delete.tag");1826DestroyingDeleteTag->setAlignment(Align.getAsAlign());1827DeleteArgs.add(1828RValue::getAggregate(Address(DestroyingDeleteTag, Ty, Align)), DDTag);1829}1830
1831// Pass the size if the delete function has a size_t parameter.1832if (Params.Size) {1833QualType SizeType = *ParamTypeIt++;1834CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);1835llvm::Value *Size = llvm::ConstantInt::get(ConvertType(SizeType),1836DeleteTypeSize.getQuantity());1837
1838// For array new, multiply by the number of elements.1839if (NumElements)1840Size = Builder.CreateMul(Size, NumElements);1841
1842// If there is a cookie, add the cookie size.1843if (!CookieSize.isZero())1844Size = Builder.CreateAdd(1845Size, llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()));1846
1847DeleteArgs.add(RValue::get(Size), SizeType);1848}1849
1850// Pass the alignment if the delete function has an align_val_t parameter.1851if (Params.Alignment) {1852QualType AlignValType = *ParamTypeIt++;1853CharUnits DeleteTypeAlign =1854getContext().toCharUnitsFromBits(getContext().getTypeAlignIfKnown(1855DeleteTy, true /* NeedsPreferredAlignment */));1856llvm::Value *Align = llvm::ConstantInt::get(ConvertType(AlignValType),1857DeleteTypeAlign.getQuantity());1858DeleteArgs.add(RValue::get(Align), AlignValType);1859}1860
1861assert(ParamTypeIt == DeleteFTy->param_type_end() &&1862"unknown parameter to usual delete function");1863
1864// Emit the call to delete.1865EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);1866
1867// If call argument lowering didn't use the destroying_delete_t alloca,1868// remove it again.1869if (DestroyingDeleteTag && DestroyingDeleteTag->use_empty())1870DestroyingDeleteTag->eraseFromParent();1871}
1872
1873namespace {1874/// Calls the given 'operator delete' on a single object.1875struct CallObjectDelete final : EHScopeStack::Cleanup {1876llvm::Value *Ptr;1877const FunctionDecl *OperatorDelete;1878QualType ElementType;1879
1880CallObjectDelete(llvm::Value *Ptr,1881const FunctionDecl *OperatorDelete,1882QualType ElementType)1883: Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}1884
1885void Emit(CodeGenFunction &CGF, Flags flags) override {1886CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);1887}1888};1889}
1890
1891void
1892CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,1893llvm::Value *CompletePtr,1894QualType ElementType) {1895EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr,1896OperatorDelete, ElementType);1897}
1898
1899/// Emit the code for deleting a single object with a destroying operator
1900/// delete. If the element type has a non-virtual destructor, Ptr has already
1901/// been converted to the type of the parameter of 'operator delete'. Otherwise
1902/// Ptr points to an object of the static type.
1903static void EmitDestroyingObjectDelete(CodeGenFunction &CGF,1904const CXXDeleteExpr *DE, Address Ptr,1905QualType ElementType) {1906auto *Dtor = ElementType->getAsCXXRecordDecl()->getDestructor();1907if (Dtor && Dtor->isVirtual())1908CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,1909Dtor);1910else1911CGF.EmitDeleteCall(DE->getOperatorDelete(), Ptr.emitRawPointer(CGF),1912ElementType);1913}
1914
1915/// Emit the code for deleting a single object.
1916/// \return \c true if we started emitting UnconditionalDeleteBlock, \c false
1917/// if not.
1918static bool EmitObjectDelete(CodeGenFunction &CGF,1919const CXXDeleteExpr *DE,1920Address Ptr,1921QualType ElementType,1922llvm::BasicBlock *UnconditionalDeleteBlock) {1923// C++11 [expr.delete]p3:1924// If the static type of the object to be deleted is different from its1925// dynamic type, the static type shall be a base class of the dynamic type1926// of the object to be deleted and the static type shall have a virtual1927// destructor or the behavior is undefined.1928CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall, DE->getExprLoc(), Ptr,1929ElementType);1930
1931const FunctionDecl *OperatorDelete = DE->getOperatorDelete();1932assert(!OperatorDelete->isDestroyingOperatorDelete());1933
1934// Find the destructor for the type, if applicable. If the1935// destructor is virtual, we'll just emit the vcall and return.1936const CXXDestructorDecl *Dtor = nullptr;1937if (const RecordType *RT = ElementType->getAs<RecordType>()) {1938CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());1939if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {1940Dtor = RD->getDestructor();1941
1942if (Dtor->isVirtual()) {1943bool UseVirtualCall = true;1944const Expr *Base = DE->getArgument();1945if (auto *DevirtualizedDtor =1946dyn_cast_or_null<const CXXDestructorDecl>(1947Dtor->getDevirtualizedMethod(1948Base, CGF.CGM.getLangOpts().AppleKext))) {1949UseVirtualCall = false;1950const CXXRecordDecl *DevirtualizedClass =1951DevirtualizedDtor->getParent();1952if (declaresSameEntity(getCXXRecord(Base), DevirtualizedClass)) {1953// Devirtualized to the class of the base type (the type of the1954// whole expression).1955Dtor = DevirtualizedDtor;1956} else {1957// Devirtualized to some other type. Would need to cast the this1958// pointer to that type but we don't have support for that yet, so1959// do a virtual call. FIXME: handle the case where it is1960// devirtualized to the derived type (the type of the inner1961// expression) as in EmitCXXMemberOrOperatorMemberCallExpr.1962UseVirtualCall = true;1963}1964}1965if (UseVirtualCall) {1966CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,1967Dtor);1968return false;1969}1970}1971}1972}1973
1974// Make sure that we call delete even if the dtor throws.1975// This doesn't have to a conditional cleanup because we're going1976// to pop it off in a second.1977CGF.EHStack.pushCleanup<CallObjectDelete>(1978NormalAndEHCleanup, Ptr.emitRawPointer(CGF), OperatorDelete, ElementType);1979
1980if (Dtor)1981CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,1982/*ForVirtualBase=*/false,1983/*Delegating=*/false,1984Ptr, ElementType);1985else if (auto Lifetime = ElementType.getObjCLifetime()) {1986switch (Lifetime) {1987case Qualifiers::OCL_None:1988case Qualifiers::OCL_ExplicitNone:1989case Qualifiers::OCL_Autoreleasing:1990break;1991
1992case Qualifiers::OCL_Strong:1993CGF.EmitARCDestroyStrong(Ptr, ARCPreciseLifetime);1994break;1995
1996case Qualifiers::OCL_Weak:1997CGF.EmitARCDestroyWeak(Ptr);1998break;1999}2000}2001
2002// When optimizing for size, call 'operator delete' unconditionally.2003if (CGF.CGM.getCodeGenOpts().OptimizeSize > 1) {2004CGF.EmitBlock(UnconditionalDeleteBlock);2005CGF.PopCleanupBlock();2006return true;2007}2008
2009CGF.PopCleanupBlock();2010return false;2011}
2012
2013namespace {2014/// Calls the given 'operator delete' on an array of objects.2015struct CallArrayDelete final : EHScopeStack::Cleanup {2016llvm::Value *Ptr;2017const FunctionDecl *OperatorDelete;2018llvm::Value *NumElements;2019QualType ElementType;2020CharUnits CookieSize;2021
2022CallArrayDelete(llvm::Value *Ptr,2023const FunctionDecl *OperatorDelete,2024llvm::Value *NumElements,2025QualType ElementType,2026CharUnits CookieSize)2027: Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),2028ElementType(ElementType), CookieSize(CookieSize) {}2029
2030void Emit(CodeGenFunction &CGF, Flags flags) override {2031CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements,2032CookieSize);2033}2034};2035}
2036
2037/// Emit the code for deleting an array of objects.
2038static void EmitArrayDelete(CodeGenFunction &CGF,2039const CXXDeleteExpr *E,2040Address deletedPtr,2041QualType elementType) {2042llvm::Value *numElements = nullptr;2043llvm::Value *allocatedPtr = nullptr;2044CharUnits cookieSize;2045CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,2046numElements, allocatedPtr, cookieSize);2047
2048assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");2049
2050// Make sure that we call delete even if one of the dtors throws.2051const FunctionDecl *operatorDelete = E->getOperatorDelete();2052CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,2053allocatedPtr, operatorDelete,2054numElements, elementType,2055cookieSize);2056
2057// Destroy the elements.2058if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {2059assert(numElements && "no element count for a type with a destructor!");2060
2061CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);2062CharUnits elementAlign =2063deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);2064
2065llvm::Value *arrayBegin = deletedPtr.emitRawPointer(CGF);2066llvm::Value *arrayEnd = CGF.Builder.CreateInBoundsGEP(2067deletedPtr.getElementType(), arrayBegin, numElements, "delete.end");2068
2069// Note that it is legal to allocate a zero-length array, and we2070// can never fold the check away because the length should always2071// come from a cookie.2072CGF.emitArrayDestroy(arrayBegin, arrayEnd, elementType, elementAlign,2073CGF.getDestroyer(dtorKind),2074/*checkZeroLength*/ true,2075CGF.needsEHCleanup(dtorKind));2076}2077
2078// Pop the cleanup block.2079CGF.PopCleanupBlock();2080}
2081
2082void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {2083const Expr *Arg = E->getArgument();2084Address Ptr = EmitPointerWithAlignment(Arg);2085
2086// Null check the pointer.2087//2088// We could avoid this null check if we can determine that the object2089// destruction is trivial and doesn't require an array cookie; we can2090// unconditionally perform the operator delete call in that case. For now, we2091// assume that deleted pointers are null rarely enough that it's better to2092// keep the branch. This might be worth revisiting for a -O0 code size win.2093llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");2094llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");2095
2096llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");2097
2098Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);2099EmitBlock(DeleteNotNull);2100Ptr.setKnownNonNull();2101
2102QualType DeleteTy = E->getDestroyedType();2103
2104// A destroying operator delete overrides the entire operation of the2105// delete expression.2106if (E->getOperatorDelete()->isDestroyingOperatorDelete()) {2107EmitDestroyingObjectDelete(*this, E, Ptr, DeleteTy);2108EmitBlock(DeleteEnd);2109return;2110}2111
2112// We might be deleting a pointer to array. If so, GEP down to the2113// first non-array element.2114// (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)2115if (DeleteTy->isConstantArrayType()) {2116llvm::Value *Zero = Builder.getInt32(0);2117SmallVector<llvm::Value*,8> GEP;2118
2119GEP.push_back(Zero); // point at the outermost array2120
2121// For each layer of array type we're pointing at:2122while (const ConstantArrayType *Arr2123= getContext().getAsConstantArrayType(DeleteTy)) {2124// 1. Unpeel the array type.2125DeleteTy = Arr->getElementType();2126
2127// 2. GEP to the first element of the array.2128GEP.push_back(Zero);2129}2130
2131Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, ConvertTypeForMem(DeleteTy),2132Ptr.getAlignment(), "del.first");2133}2134
2135assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());2136
2137if (E->isArrayForm()) {2138EmitArrayDelete(*this, E, Ptr, DeleteTy);2139EmitBlock(DeleteEnd);2140} else {2141if (!EmitObjectDelete(*this, E, Ptr, DeleteTy, DeleteEnd))2142EmitBlock(DeleteEnd);2143}2144}
2145
2146static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,2147llvm::Type *StdTypeInfoPtrTy,2148bool HasNullCheck) {2149// Get the vtable pointer.2150Address ThisPtr = CGF.EmitLValue(E).getAddress();2151
2152QualType SrcRecordTy = E->getType();2153
2154// C++ [class.cdtor]p4:2155// If the operand of typeid refers to the object under construction or2156// destruction and the static type of the operand is neither the constructor2157// or destructor’s class nor one of its bases, the behavior is undefined.2158CGF.EmitTypeCheck(CodeGenFunction::TCK_DynamicOperation, E->getExprLoc(),2159ThisPtr, SrcRecordTy);2160
2161// Whether we need an explicit null pointer check. For example, with the2162// Microsoft ABI, if this is a call to __RTtypeid, the null pointer check and2163// exception throw is inside the __RTtypeid(nullptr) call2164if (HasNullCheck &&2165CGF.CGM.getCXXABI().shouldTypeidBeNullChecked(SrcRecordTy)) {2166llvm::BasicBlock *BadTypeidBlock =2167CGF.createBasicBlock("typeid.bad_typeid");2168llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end");2169
2170llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);2171CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);2172
2173CGF.EmitBlock(BadTypeidBlock);2174CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF);2175CGF.EmitBlock(EndBlock);2176}2177
2178return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr,2179StdTypeInfoPtrTy);2180}
2181
2182llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {2183// Ideally, we would like to use GlobalsInt8PtrTy here, however, we cannot,2184// primarily because the result of applying typeid is a value of type2185// type_info, which is declared & defined by the standard library2186// implementation and expects to operate on the generic (default) AS.2187// https://reviews.llvm.org/D157452 has more context, and a possible solution.2188llvm::Type *PtrTy = Int8PtrTy;2189LangAS GlobAS = CGM.GetGlobalVarAddressSpace(nullptr);2190
2191auto MaybeASCast = [=](auto &&TypeInfo) {2192if (GlobAS == LangAS::Default)2193return TypeInfo;2194return getTargetHooks().performAddrSpaceCast(CGM,TypeInfo, GlobAS,2195LangAS::Default, PtrTy);2196};2197
2198if (E->isTypeOperand()) {2199llvm::Constant *TypeInfo =2200CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext()));2201return MaybeASCast(TypeInfo);2202}2203
2204// C++ [expr.typeid]p2:2205// When typeid is applied to a glvalue expression whose type is a2206// polymorphic class type, the result refers to a std::type_info object2207// representing the type of the most derived object (that is, the dynamic2208// type) to which the glvalue refers.2209// If the operand is already most derived object, no need to look up vtable.2210if (E->isPotentiallyEvaluated() && !E->isMostDerived(getContext()))2211return EmitTypeidFromVTable(*this, E->getExprOperand(), PtrTy,2212E->hasNullCheck());2213
2214QualType OperandTy = E->getExprOperand()->getType();2215return MaybeASCast(CGM.GetAddrOfRTTIDescriptor(OperandTy));2216}
2217
2218static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,2219QualType DestTy) {2220llvm::Type *DestLTy = CGF.ConvertType(DestTy);2221if (DestTy->isPointerType())2222return llvm::Constant::getNullValue(DestLTy);2223
2224/// C++ [expr.dynamic.cast]p9:2225/// A failed cast to reference type throws std::bad_cast2226if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF))2227return nullptr;2228
2229CGF.Builder.ClearInsertionPoint();2230return llvm::PoisonValue::get(DestLTy);2231}
2232
2233llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,2234const CXXDynamicCastExpr *DCE) {2235CGM.EmitExplicitCastExprType(DCE, this);2236QualType DestTy = DCE->getTypeAsWritten();2237
2238QualType SrcTy = DCE->getSubExpr()->getType();2239
2240// C++ [expr.dynamic.cast]p7:2241// If T is "pointer to cv void," then the result is a pointer to the most2242// derived object pointed to by v.2243bool IsDynamicCastToVoid = DestTy->isVoidPointerType();2244QualType SrcRecordTy;2245QualType DestRecordTy;2246if (IsDynamicCastToVoid) {2247SrcRecordTy = SrcTy->getPointeeType();2248// No DestRecordTy.2249} else if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {2250SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();2251DestRecordTy = DestPTy->getPointeeType();2252} else {2253SrcRecordTy = SrcTy;2254DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();2255}2256
2257// C++ [class.cdtor]p5:2258// If the operand of the dynamic_cast refers to the object under2259// construction or destruction and the static type of the operand is not a2260// pointer to or object of the constructor or destructor’s own class or one2261// of its bases, the dynamic_cast results in undefined behavior.2262EmitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr, SrcRecordTy);2263
2264if (DCE->isAlwaysNull()) {2265if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy)) {2266// Expression emission is expected to retain a valid insertion point.2267if (!Builder.GetInsertBlock())2268EmitBlock(createBasicBlock("dynamic_cast.unreachable"));2269return T;2270}2271}2272
2273assert(SrcRecordTy->isRecordType() && "source type must be a record type!");2274
2275// If the destination is effectively final, the cast succeeds if and only2276// if the dynamic type of the pointer is exactly the destination type.2277bool IsExact = !IsDynamicCastToVoid &&2278CGM.getCodeGenOpts().OptimizationLevel > 0 &&2279DestRecordTy->getAsCXXRecordDecl()->isEffectivelyFinal() &&2280CGM.getCXXABI().shouldEmitExactDynamicCast(DestRecordTy);2281
2282// C++ [expr.dynamic.cast]p4:2283// If the value of v is a null pointer value in the pointer case, the result2284// is the null pointer value of type T.2285bool ShouldNullCheckSrcValue =2286IsExact || CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(2287SrcTy->isPointerType(), SrcRecordTy);2288
2289llvm::BasicBlock *CastNull = nullptr;2290llvm::BasicBlock *CastNotNull = nullptr;2291llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");2292
2293if (ShouldNullCheckSrcValue) {2294CastNull = createBasicBlock("dynamic_cast.null");2295CastNotNull = createBasicBlock("dynamic_cast.notnull");2296
2297llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr);2298Builder.CreateCondBr(IsNull, CastNull, CastNotNull);2299EmitBlock(CastNotNull);2300}2301
2302llvm::Value *Value;2303if (IsDynamicCastToVoid) {2304Value = CGM.getCXXABI().emitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy);2305} else if (IsExact) {2306// If the destination type is effectively final, this pointer points to the2307// right type if and only if its vptr has the right value.2308Value = CGM.getCXXABI().emitExactDynamicCast(2309*this, ThisAddr, SrcRecordTy, DestTy, DestRecordTy, CastEnd, CastNull);2310} else {2311assert(DestRecordTy->isRecordType() &&2312"destination type must be a record type!");2313Value = CGM.getCXXABI().emitDynamicCastCall(*this, ThisAddr, SrcRecordTy,2314DestTy, DestRecordTy, CastEnd);2315}2316CastNotNull = Builder.GetInsertBlock();2317
2318llvm::Value *NullValue = nullptr;2319if (ShouldNullCheckSrcValue) {2320EmitBranch(CastEnd);2321
2322EmitBlock(CastNull);2323NullValue = EmitDynamicCastToNull(*this, DestTy);2324CastNull = Builder.GetInsertBlock();2325
2326EmitBranch(CastEnd);2327}2328
2329EmitBlock(CastEnd);2330
2331if (CastNull) {2332llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);2333PHI->addIncoming(Value, CastNotNull);2334PHI->addIncoming(NullValue, CastNull);2335
2336Value = PHI;2337}2338
2339return Value;2340}
2341