llvm-project
2212 строк · 83.3 Кб
1//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Aggregate Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"14#include "CGObjCRuntime.h"15#include "CodeGenFunction.h"16#include "CodeGenModule.h"17#include "ConstantEmitter.h"18#include "EHScopeStack.h"19#include "TargetInfo.h"20#include "clang/AST/ASTContext.h"21#include "clang/AST/Attr.h"22#include "clang/AST/DeclCXX.h"23#include "clang/AST/DeclTemplate.h"24#include "clang/AST/StmtVisitor.h"25#include "llvm/IR/Constants.h"26#include "llvm/IR/Function.h"27#include "llvm/IR/GlobalVariable.h"28#include "llvm/IR/Instruction.h"29#include "llvm/IR/IntrinsicInst.h"30#include "llvm/IR/Intrinsics.h"31using namespace clang;32using namespace CodeGen;33
34//===----------------------------------------------------------------------===//
35// Aggregate Expression Emitter
36//===----------------------------------------------------------------------===//
37
38namespace llvm {39extern cl::opt<bool> EnableSingleByteCoverage;40} // namespace llvm41
42namespace {43class AggExprEmitter : public StmtVisitor<AggExprEmitter> {44CodeGenFunction &CGF;45CGBuilderTy &Builder;46AggValueSlot Dest;47bool IsResultUnused;48
49AggValueSlot EnsureSlot(QualType T) {50if (!Dest.isIgnored()) return Dest;51return CGF.CreateAggTemp(T, "agg.tmp.ensured");52}53void EnsureDest(QualType T) {54if (!Dest.isIgnored()) return;55Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");56}57
58// Calls `Fn` with a valid return value slot, potentially creating a temporary59// to do so. If a temporary is created, an appropriate copy into `Dest` will60// be emitted, as will lifetime markers.61//62// The given function should take a ReturnValueSlot, and return an RValue that63// points to said slot.64void withReturnValueSlot(const Expr *E,65llvm::function_ref<RValue(ReturnValueSlot)> Fn);66
67public:68AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)69: CGF(cgf), Builder(CGF.Builder), Dest(Dest),70IsResultUnused(IsResultUnused) { }71
72//===--------------------------------------------------------------------===//73// Utilities74//===--------------------------------------------------------------------===//75
76/// EmitAggLoadOfLValue - Given an expression with aggregate type that77/// represents a value lvalue, this method emits the address of the lvalue,78/// then loads the result into DestPtr.79void EmitAggLoadOfLValue(const Expr *E);80
81/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.82/// SrcIsRValue is true if source comes from an RValue.83void EmitFinalDestCopy(QualType type, const LValue &src,84CodeGenFunction::ExprValueKind SrcValueKind =85CodeGenFunction::EVK_NonRValue);86void EmitFinalDestCopy(QualType type, RValue src);87void EmitCopy(QualType type, const AggValueSlot &dest,88const AggValueSlot &src);89
90void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, QualType ArrayQTy,91Expr *ExprToVisit, ArrayRef<Expr *> Args,92Expr *ArrayFiller);93
94AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {95if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))96return AggValueSlot::NeedsGCBarriers;97return AggValueSlot::DoesNotNeedGCBarriers;98}99
100bool TypeRequiresGCollection(QualType T);101
102//===--------------------------------------------------------------------===//103// Visitor Methods104//===--------------------------------------------------------------------===//105
106void Visit(Expr *E) {107ApplyDebugLocation DL(CGF, E);108StmtVisitor<AggExprEmitter>::Visit(E);109}110
111void VisitStmt(Stmt *S) {112CGF.ErrorUnsupported(S, "aggregate expression");113}114void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }115void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {116Visit(GE->getResultExpr());117}118void VisitCoawaitExpr(CoawaitExpr *E) {119CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused);120}121void VisitCoyieldExpr(CoyieldExpr *E) {122CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused);123}124void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); }125void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }126void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {127return Visit(E->getReplacement());128}129
130void VisitConstantExpr(ConstantExpr *E) {131EnsureDest(E->getType());132
133if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {134Address StoreDest = Dest.getAddress();135// The emitted value is guaranteed to have the same size as the136// destination but can have a different type. Just do a bitcast in this137// case to avoid incorrect GEPs.138if (Result->getType() != StoreDest.getType())139StoreDest = StoreDest.withElementType(Result->getType());140
141CGF.EmitAggregateStore(Result, StoreDest,142E->getType().isVolatileQualified());143return;144}145return Visit(E->getSubExpr());146}147
148// l-values.149void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); }150void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }151void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }152void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }153void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);154void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {155EmitAggLoadOfLValue(E);156}157void VisitPredefinedExpr(const PredefinedExpr *E) {158EmitAggLoadOfLValue(E);159}160
161// Operators.162void VisitCastExpr(CastExpr *E);163void VisitCallExpr(const CallExpr *E);164void VisitStmtExpr(const StmtExpr *E);165void VisitBinaryOperator(const BinaryOperator *BO);166void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);167void VisitBinAssign(const BinaryOperator *E);168void VisitBinComma(const BinaryOperator *E);169void VisitBinCmp(const BinaryOperator *E);170void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {171Visit(E->getSemanticForm());172}173
174void VisitObjCMessageExpr(ObjCMessageExpr *E);175void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {176EmitAggLoadOfLValue(E);177}178
179void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);180void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);181void VisitChooseExpr(const ChooseExpr *CE);182void VisitInitListExpr(InitListExpr *E);183void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,184FieldDecl *InitializedFieldInUnion,185Expr *ArrayFiller);186void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,187llvm::Value *outerBegin = nullptr);188void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);189void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.190void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {191CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);192Visit(DAE->getExpr());193}194void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {195CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);196Visit(DIE->getExpr());197}198void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);199void VisitCXXConstructExpr(const CXXConstructExpr *E);200void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);201void VisitLambdaExpr(LambdaExpr *E);202void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);203void VisitExprWithCleanups(ExprWithCleanups *E);204void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);205void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }206void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);207void VisitOpaqueValueExpr(OpaqueValueExpr *E);208
209void VisitPseudoObjectExpr(PseudoObjectExpr *E) {210if (E->isGLValue()) {211LValue LV = CGF.EmitPseudoObjectLValue(E);212return EmitFinalDestCopy(E->getType(), LV);213}214
215AggValueSlot Slot = EnsureSlot(E->getType());216bool NeedsDestruction =217!Slot.isExternallyDestructed() &&218E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct;219if (NeedsDestruction)220Slot.setExternallyDestructed();221CGF.EmitPseudoObjectRValue(E, Slot);222if (NeedsDestruction)223CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Slot.getAddress(),224E->getType());225}226
227void VisitVAArgExpr(VAArgExpr *E);228void VisitCXXParenListInitExpr(CXXParenListInitExpr *E);229void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,230Expr *ArrayFiller);231
232void EmitInitializationToLValue(Expr *E, LValue Address);233void EmitNullInitializationToLValue(LValue Address);234// case Expr::ChooseExprClass:235void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }236void VisitAtomicExpr(AtomicExpr *E) {237RValue Res = CGF.EmitAtomicExpr(E);238EmitFinalDestCopy(E->getType(), Res);239}240void VisitPackIndexingExpr(PackIndexingExpr *E) {241Visit(E->getSelectedExpr());242}243};244} // end anonymous namespace.245
246//===----------------------------------------------------------------------===//
247// Utilities
248//===----------------------------------------------------------------------===//
249
250/// EmitAggLoadOfLValue - Given an expression with aggregate type that
251/// represents a value lvalue, this method emits the address of the lvalue,
252/// then loads the result into DestPtr.
253void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {254LValue LV = CGF.EmitLValue(E);255
256// If the type of the l-value is atomic, then do an atomic load.257if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {258CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);259return;260}261
262EmitFinalDestCopy(E->getType(), LV);263}
264
265/// True if the given aggregate type requires special GC API calls.
266bool AggExprEmitter::TypeRequiresGCollection(QualType T) {267// Only record types have members that might require garbage collection.268const RecordType *RecordTy = T->getAs<RecordType>();269if (!RecordTy) return false;270
271// Don't mess with non-trivial C++ types.272RecordDecl *Record = RecordTy->getDecl();273if (isa<CXXRecordDecl>(Record) &&274(cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||275!cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))276return false;277
278// Check whether the type has an object member.279return Record->hasObjectMember();280}
281
282void AggExprEmitter::withReturnValueSlot(283const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {284QualType RetTy = E->getType();285bool RequiresDestruction =286!Dest.isExternallyDestructed() &&287RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct;288
289// If it makes no observable difference, save a memcpy + temporary.290//291// We need to always provide our own temporary if destruction is required.292// Otherwise, EmitCall will emit its own, notice that it's "unused", and end293// its lifetime before we have the chance to emit a proper destructor call.294bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||295(RequiresDestruction && Dest.isIgnored());296
297Address RetAddr = Address::invalid();298RawAddress RetAllocaAddr = RawAddress::invalid();299
300EHScopeStack::stable_iterator LifetimeEndBlock;301llvm::Value *LifetimeSizePtr = nullptr;302llvm::IntrinsicInst *LifetimeStartInst = nullptr;303if (!UseTemp) {304RetAddr = Dest.getAddress();305} else {306RetAddr = CGF.CreateMemTemp(RetTy, "tmp", &RetAllocaAddr);307llvm::TypeSize Size =308CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy));309LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAllocaAddr.getPointer());310if (LifetimeSizePtr) {311LifetimeStartInst =312cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint()));313assert(LifetimeStartInst->getIntrinsicID() ==314llvm::Intrinsic::lifetime_start &&315"Last insertion wasn't a lifetime.start?");316
317CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(318NormalEHLifetimeMarker, RetAllocaAddr, LifetimeSizePtr);319LifetimeEndBlock = CGF.EHStack.stable_begin();320}321}322
323RValue Src =324EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused,325Dest.isExternallyDestructed()));326
327if (!UseTemp)328return;329
330assert(Dest.isIgnored() || Dest.emitRawPointer(CGF) !=331Src.getAggregatePointer(E->getType(), CGF));332EmitFinalDestCopy(E->getType(), Src);333
334if (!RequiresDestruction && LifetimeStartInst) {335// If there's no dtor to run, the copy was the last use of our temporary.336// Since we're not guaranteed to be in an ExprWithCleanups, clean up337// eagerly.338CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst);339CGF.EmitLifetimeEnd(LifetimeSizePtr, RetAllocaAddr.getPointer());340}341}
342
343/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
344void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {345assert(src.isAggregate() && "value must be aggregate value!");346LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);347EmitFinalDestCopy(type, srcLV, CodeGenFunction::EVK_RValue);348}
349
350/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
351void AggExprEmitter::EmitFinalDestCopy(352QualType type, const LValue &src,353CodeGenFunction::ExprValueKind SrcValueKind) {354// If Dest is ignored, then we're evaluating an aggregate expression355// in a context that doesn't care about the result. Note that loads356// from volatile l-values force the existence of a non-ignored357// destination.358if (Dest.isIgnored())359return;360
361// Copy non-trivial C structs here.362LValue DstLV = CGF.MakeAddrLValue(363Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type);364
365if (SrcValueKind == CodeGenFunction::EVK_RValue) {366if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {367if (Dest.isPotentiallyAliased())368CGF.callCStructMoveAssignmentOperator(DstLV, src);369else370CGF.callCStructMoveConstructor(DstLV, src);371return;372}373} else {374if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {375if (Dest.isPotentiallyAliased())376CGF.callCStructCopyAssignmentOperator(DstLV, src);377else378CGF.callCStructCopyConstructor(DstLV, src);379return;380}381}382
383AggValueSlot srcAgg = AggValueSlot::forLValue(384src, AggValueSlot::IsDestructed, needsGC(type), AggValueSlot::IsAliased,385AggValueSlot::MayOverlap);386EmitCopy(type, Dest, srcAgg);387}
388
389/// Perform a copy from the source into the destination.
390///
391/// \param type - the type of the aggregate being copied; qualifiers are
392/// ignored
393void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,394const AggValueSlot &src) {395if (dest.requiresGCollection()) {396CharUnits sz = dest.getPreferredSize(CGF.getContext(), type);397llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());398CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,399dest.getAddress(),400src.getAddress(),401size);402return;403}404
405// If the result of the assignment is used, copy the LHS there also.406// It's volatile if either side is. Use the minimum alignment of407// the two sides.408LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type);409LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type);410CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(),411dest.isVolatile() || src.isVolatile());412}
413
414/// Emit the initializer for a std::initializer_list initialized with a
415/// real initializer list.
416void
417AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {418// Emit an array containing the elements. The array is externally destructed419// if the std::initializer_list object is.420ASTContext &Ctx = CGF.getContext();421LValue Array = CGF.EmitLValue(E->getSubExpr());422assert(Array.isSimple() && "initializer_list array not a simple lvalue");423Address ArrayPtr = Array.getAddress();424
425const ConstantArrayType *ArrayType =426Ctx.getAsConstantArrayType(E->getSubExpr()->getType());427assert(ArrayType && "std::initializer_list constructed from non-array");428
429RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();430RecordDecl::field_iterator Field = Record->field_begin();431assert(Field != Record->field_end() &&432Ctx.hasSameType(Field->getType()->getPointeeType(),433ArrayType->getElementType()) &&434"Expected std::initializer_list first field to be const E *");435
436// Start pointer.437AggValueSlot Dest = EnsureSlot(E->getType());438LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());439LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);440llvm::Value *ArrayStart = ArrayPtr.emitRawPointer(CGF);441CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);442++Field;443assert(Field != Record->field_end() &&444"Expected std::initializer_list to have two fields");445
446llvm::Value *Size = Builder.getInt(ArrayType->getSize());447LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);448if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {449// Length.450CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);451
452} else {453// End pointer.454assert(Field->getType()->isPointerType() &&455Ctx.hasSameType(Field->getType()->getPointeeType(),456ArrayType->getElementType()) &&457"Expected std::initializer_list second field to be const E *");458llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);459llvm::Value *IdxEnd[] = { Zero, Size };460llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP(461ArrayPtr.getElementType(), ArrayPtr.emitRawPointer(CGF), IdxEnd,462"arrayend");463CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);464}465
466assert(++Field == Record->field_end() &&467"Expected std::initializer_list to only have two fields");468}
469
470/// Determine if E is a trivial array filler, that is, one that is
471/// equivalent to zero-initialization.
472static bool isTrivialFiller(Expr *E) {473if (!E)474return true;475
476if (isa<ImplicitValueInitExpr>(E))477return true;478
479if (auto *ILE = dyn_cast<InitListExpr>(E)) {480if (ILE->getNumInits())481return false;482return isTrivialFiller(ILE->getArrayFiller());483}484
485if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))486return Cons->getConstructor()->isDefaultConstructor() &&487Cons->getConstructor()->isTrivial();488
489// FIXME: Are there other cases where we can avoid emitting an initializer?490return false;491}
492
493/// Emit initialization of an array from an initializer list. ExprToVisit must
494/// be either an InitListEpxr a CXXParenInitListExpr.
495void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,496QualType ArrayQTy, Expr *ExprToVisit,497ArrayRef<Expr *> Args, Expr *ArrayFiller) {498uint64_t NumInitElements = Args.size();499
500uint64_t NumArrayElements = AType->getNumElements();501for (const auto *Init : Args) {502if (const auto *Embed = dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts())) {503NumInitElements += Embed->getDataElementCount() - 1;504if (NumInitElements > NumArrayElements) {505NumInitElements = NumArrayElements;506break;507}508}509}510
511assert(NumInitElements <= NumArrayElements);512
513QualType elementType =514CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();515CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);516CharUnits elementAlign =517DestPtr.getAlignment().alignmentOfArrayElement(elementSize);518llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);519
520// Consider initializing the array by copying from a global. For this to be521// more efficient than per-element initialization, the size of the elements522// with explicit initializers should be large enough.523if (NumInitElements * elementSize.getQuantity() > 16 &&524elementType.isTriviallyCopyableType(CGF.getContext())) {525CodeGen::CodeGenModule &CGM = CGF.CGM;526ConstantEmitter Emitter(CGF);527QualType GVArrayQTy = CGM.getContext().getAddrSpaceQualType(528CGM.getContext().removeAddrSpaceQualType(ArrayQTy),529CGM.GetGlobalConstantAddressSpace());530LangAS AS = GVArrayQTy.getAddressSpace();531if (llvm::Constant *C =532Emitter.tryEmitForInitializer(ExprToVisit, AS, GVArrayQTy)) {533auto GV = new llvm::GlobalVariable(534CGM.getModule(), C->getType(),535/* isConstant= */ true, llvm::GlobalValue::PrivateLinkage, C,536"constinit",537/* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,538CGM.getContext().getTargetAddressSpace(AS));539Emitter.finalize(GV);540CharUnits Align = CGM.getContext().getTypeAlignInChars(GVArrayQTy);541GV->setAlignment(Align.getAsAlign());542Address GVAddr(GV, GV->getValueType(), Align);543EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GVAddr, GVArrayQTy));544return;545}546}547
548// Exception safety requires us to destroy all the549// already-constructed members if an initializer throws.550// For that, we'll need an EH cleanup.551QualType::DestructionKind dtorKind = elementType.isDestructedType();552Address endOfInit = Address::invalid();553CodeGenFunction::CleanupDeactivationScope deactivation(CGF);554
555llvm::Value *begin = DestPtr.emitRawPointer(CGF);556if (dtorKind) {557CodeGenFunction::AllocaTrackerRAII allocaTracker(CGF);558// In principle we could tell the cleanup where we are more559// directly, but the control flow can get so varied here that it560// would actually be quite complex. Therefore we go through an561// alloca.562llvm::Instruction *dominatingIP =563Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(CGF.Int8PtrTy));564endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),565"arrayinit.endOfInit");566Builder.CreateStore(begin, endOfInit);567CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,568elementAlign,569CGF.getDestroyer(dtorKind));570cast<EHCleanupScope>(*CGF.EHStack.find(CGF.EHStack.stable_begin()))571.AddAuxAllocas(allocaTracker.Take());572
573CGF.DeferredDeactivationCleanupStack.push_back(574{CGF.EHStack.stable_begin(), dominatingIP});575}576
577llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);578
579auto Emit = [&](Expr *Init, uint64_t ArrayIndex) {580llvm::Value *element = begin;581if (ArrayIndex > 0) {582element = Builder.CreateInBoundsGEP(583llvmElementType, begin,584llvm::ConstantInt::get(CGF.SizeTy, ArrayIndex), "arrayinit.element");585
586// Tell the cleanup that it needs to destroy up to this587// element. TODO: some of these stores can be trivially588// observed to be unnecessary.589if (endOfInit.isValid())590Builder.CreateStore(element, endOfInit);591}592
593LValue elementLV = CGF.MakeAddrLValue(594Address(element, llvmElementType, elementAlign), elementType);595EmitInitializationToLValue(Init, elementLV);596return true;597};598
599unsigned ArrayIndex = 0;600// Emit the explicit initializers.601for (uint64_t i = 0; i != NumInitElements; ++i) {602if (ArrayIndex >= NumInitElements)603break;604if (auto *EmbedS = dyn_cast<EmbedExpr>(Args[i]->IgnoreParenImpCasts())) {605EmbedS->doForEachDataElement(Emit, ArrayIndex);606} else {607Emit(Args[i], ArrayIndex);608ArrayIndex++;609}610}611
612// Check whether there's a non-trivial array-fill expression.613bool hasTrivialFiller = isTrivialFiller(ArrayFiller);614
615// Any remaining elements need to be zero-initialized, possibly616// using the filler expression. We can skip this if the we're617// emitting to zeroed memory.618if (NumInitElements != NumArrayElements &&619!(Dest.isZeroed() && hasTrivialFiller &&620CGF.getTypes().isZeroInitializable(elementType))) {621
622// Use an actual loop. This is basically623// do { *array++ = filler; } while (array != end);624
625// Advance to the start of the rest of the array.626llvm::Value *element = begin;627if (NumInitElements) {628element = Builder.CreateInBoundsGEP(629llvmElementType, element,630llvm::ConstantInt::get(CGF.SizeTy, NumInitElements),631"arrayinit.start");632if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);633}634
635// Compute the end of the array.636llvm::Value *end = Builder.CreateInBoundsGEP(637llvmElementType, begin,638llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), "arrayinit.end");639
640llvm::BasicBlock *entryBB = Builder.GetInsertBlock();641llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");642
643// Jump into the body.644CGF.EmitBlock(bodyBB);645llvm::PHINode *currentElement =646Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");647currentElement->addIncoming(element, entryBB);648
649// Emit the actual filler expression.650{651// C++1z [class.temporary]p5:652// when a default constructor is called to initialize an element of653// an array with no corresponding initializer [...] the destruction of654// every temporary created in a default argument is sequenced before655// the construction of the next array element, if any656CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);657LValue elementLV = CGF.MakeAddrLValue(658Address(currentElement, llvmElementType, elementAlign), elementType);659if (ArrayFiller)660EmitInitializationToLValue(ArrayFiller, elementLV);661else662EmitNullInitializationToLValue(elementLV);663}664
665// Move on to the next element.666llvm::Value *nextElement = Builder.CreateInBoundsGEP(667llvmElementType, currentElement, one, "arrayinit.next");668
669// Tell the EH cleanup that we finished with the last element.670if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);671
672// Leave the loop if we're done.673llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,674"arrayinit.done");675llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");676Builder.CreateCondBr(done, endBB, bodyBB);677currentElement->addIncoming(nextElement, Builder.GetInsertBlock());678
679CGF.EmitBlock(endBB);680}681}
682
683//===----------------------------------------------------------------------===//
684// Visitor Methods
685//===----------------------------------------------------------------------===//
686
687void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){688Visit(E->getSubExpr());689}
690
691void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {692// If this is a unique OVE, just visit its source expression.693if (e->isUnique())694Visit(e->getSourceExpr());695else696EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e));697}
698
699void
700AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {701if (Dest.isPotentiallyAliased() &&702E->getType().isPODType(CGF.getContext())) {703// For a POD type, just emit a load of the lvalue + a copy, because our704// compound literal might alias the destination.705EmitAggLoadOfLValue(E);706return;707}708
709AggValueSlot Slot = EnsureSlot(E->getType());710
711// Block-scope compound literals are destroyed at the end of the enclosing712// scope in C.713bool Destruct =714!CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed();715if (Destruct)716Slot.setExternallyDestructed();717
718CGF.EmitAggExpr(E->getInitializer(), Slot);719
720if (Destruct)721if (QualType::DestructionKind DtorKind = E->getType().isDestructedType())722CGF.pushLifetimeExtendedDestroy(723CGF.getCleanupKind(DtorKind), Slot.getAddress(), E->getType(),724CGF.getDestroyer(DtorKind), DtorKind & EHCleanup);725}
726
727/// Attempt to look through various unimportant expressions to find a
728/// cast of the given kind.
729static Expr *findPeephole(Expr *op, CastKind kind, const ASTContext &ctx) {730op = op->IgnoreParenNoopCasts(ctx);731if (auto castE = dyn_cast<CastExpr>(op)) {732if (castE->getCastKind() == kind)733return castE->getSubExpr();734}735return nullptr;736}
737
738void AggExprEmitter::VisitCastExpr(CastExpr *E) {739if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))740CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);741switch (E->getCastKind()) {742case CK_Dynamic: {743// FIXME: Can this actually happen? We have no test coverage for it.744assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");745LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),746CodeGenFunction::TCK_Load);747// FIXME: Do we also need to handle property references here?748if (LV.isSimple())749CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));750else751CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");752
753if (!Dest.isIgnored())754CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");755break;756}757
758case CK_ToUnion: {759// Evaluate even if the destination is ignored.760if (Dest.isIgnored()) {761CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),762/*ignoreResult=*/true);763break;764}765
766// GCC union extension767QualType Ty = E->getSubExpr()->getType();768Address CastPtr = Dest.getAddress().withElementType(CGF.ConvertType(Ty));769EmitInitializationToLValue(E->getSubExpr(),770CGF.MakeAddrLValue(CastPtr, Ty));771break;772}773
774case CK_LValueToRValueBitCast: {775if (Dest.isIgnored()) {776CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),777/*ignoreResult=*/true);778break;779}780
781LValue SourceLV = CGF.EmitLValue(E->getSubExpr());782Address SourceAddress = SourceLV.getAddress().withElementType(CGF.Int8Ty);783Address DestAddress = Dest.getAddress().withElementType(CGF.Int8Ty);784llvm::Value *SizeVal = llvm::ConstantInt::get(785CGF.SizeTy,786CGF.getContext().getTypeSizeInChars(E->getType()).getQuantity());787Builder.CreateMemCpy(DestAddress, SourceAddress, SizeVal);788break;789}790
791case CK_DerivedToBase:792case CK_BaseToDerived:793case CK_UncheckedDerivedToBase: {794llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "795"should have been unpacked before we got here");796}797
798case CK_NonAtomicToAtomic:799case CK_AtomicToNonAtomic: {800bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);801
802// Determine the atomic and value types.803QualType atomicType = E->getSubExpr()->getType();804QualType valueType = E->getType();805if (isToAtomic) std::swap(atomicType, valueType);806
807assert(atomicType->isAtomicType());808assert(CGF.getContext().hasSameUnqualifiedType(valueType,809atomicType->castAs<AtomicType>()->getValueType()));810
811// Just recurse normally if we're ignoring the result or the812// atomic type doesn't change representation.813if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {814return Visit(E->getSubExpr());815}816
817CastKind peepholeTarget =818(isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);819
820// These two cases are reverses of each other; try to peephole them.821if (Expr *op =822findPeephole(E->getSubExpr(), peepholeTarget, CGF.getContext())) {823assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),824E->getType()) &&825"peephole significantly changed types?");826return Visit(op);827}828
829// If we're converting an r-value of non-atomic type to an r-value830// of atomic type, just emit directly into the relevant sub-object.831if (isToAtomic) {832AggValueSlot valueDest = Dest;833if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {834// Zero-initialize. (Strictly speaking, we only need to initialize835// the padding at the end, but this is simpler.)836if (!Dest.isZeroed())837CGF.EmitNullInitialization(Dest.getAddress(), atomicType);838
839// Build a GEP to refer to the subobject.840Address valueAddr =841CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0);842valueDest = AggValueSlot::forAddr(valueAddr,843valueDest.getQualifiers(),844valueDest.isExternallyDestructed(),845valueDest.requiresGCollection(),846valueDest.isPotentiallyAliased(),847AggValueSlot::DoesNotOverlap,848AggValueSlot::IsZeroed);849}850
851CGF.EmitAggExpr(E->getSubExpr(), valueDest);852return;853}854
855// Otherwise, we're converting an atomic type to a non-atomic type.856// Make an atomic temporary, emit into that, and then copy the value out.857AggValueSlot atomicSlot =858CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");859CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);860
861Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0);862RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());863return EmitFinalDestCopy(valueType, rvalue);864}865case CK_AddressSpaceConversion:866return Visit(E->getSubExpr());867
868case CK_LValueToRValue:869// If we're loading from a volatile type, force the destination870// into existence.871if (E->getSubExpr()->getType().isVolatileQualified()) {872bool Destruct =873!Dest.isExternallyDestructed() &&874E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct;875if (Destruct)876Dest.setExternallyDestructed();877EnsureDest(E->getType());878Visit(E->getSubExpr());879
880if (Destruct)881CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(),882E->getType());883
884return;885}886
887[[fallthrough]];888
889case CK_HLSLArrayRValue:890Visit(E->getSubExpr());891break;892
893case CK_NoOp:894case CK_UserDefinedConversion:895case CK_ConstructorConversion:896assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),897E->getType()) &&898"Implicit cast types must be compatible");899Visit(E->getSubExpr());900break;901
902case CK_LValueBitCast:903llvm_unreachable("should not be emitting lvalue bitcast as rvalue");904
905case CK_Dependent:906case CK_BitCast:907case CK_ArrayToPointerDecay:908case CK_FunctionToPointerDecay:909case CK_NullToPointer:910case CK_NullToMemberPointer:911case CK_BaseToDerivedMemberPointer:912case CK_DerivedToBaseMemberPointer:913case CK_MemberPointerToBoolean:914case CK_ReinterpretMemberPointer:915case CK_IntegralToPointer:916case CK_PointerToIntegral:917case CK_PointerToBoolean:918case CK_ToVoid:919case CK_VectorSplat:920case CK_IntegralCast:921case CK_BooleanToSignedIntegral:922case CK_IntegralToBoolean:923case CK_IntegralToFloating:924case CK_FloatingToIntegral:925case CK_FloatingToBoolean:926case CK_FloatingCast:927case CK_CPointerToObjCPointerCast:928case CK_BlockPointerToObjCPointerCast:929case CK_AnyPointerToBlockPointerCast:930case CK_ObjCObjectLValueCast:931case CK_FloatingRealToComplex:932case CK_FloatingComplexToReal:933case CK_FloatingComplexToBoolean:934case CK_FloatingComplexCast:935case CK_FloatingComplexToIntegralComplex:936case CK_IntegralRealToComplex:937case CK_IntegralComplexToReal:938case CK_IntegralComplexToBoolean:939case CK_IntegralComplexCast:940case CK_IntegralComplexToFloatingComplex:941case CK_ARCProduceObject:942case CK_ARCConsumeObject:943case CK_ARCReclaimReturnedObject:944case CK_ARCExtendBlockObject:945case CK_CopyAndAutoreleaseBlockObject:946case CK_BuiltinFnToFnPtr:947case CK_ZeroToOCLOpaqueType:948case CK_MatrixCast:949case CK_HLSLVectorTruncation:950
951case CK_IntToOCLSampler:952case CK_FloatingToFixedPoint:953case CK_FixedPointToFloating:954case CK_FixedPointCast:955case CK_FixedPointToBoolean:956case CK_FixedPointToIntegral:957case CK_IntegralToFixedPoint:958llvm_unreachable("cast kind invalid for aggregate types");959}960}
961
962void AggExprEmitter::VisitCallExpr(const CallExpr *E) {963if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {964EmitAggLoadOfLValue(E);965return;966}967
968withReturnValueSlot(E, [&](ReturnValueSlot Slot) {969return CGF.EmitCallExpr(E, Slot);970});971}
972
973void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {974withReturnValueSlot(E, [&](ReturnValueSlot Slot) {975return CGF.EmitObjCMessageExpr(E, Slot);976});977}
978
979void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {980CGF.EmitIgnoredExpr(E->getLHS());981Visit(E->getRHS());982}
983
984void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {985CodeGenFunction::StmtExprEvaluation eval(CGF);986CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);987}
988
989enum CompareKind {990CK_Less,991CK_Greater,992CK_Equal,993};994
995static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF,996const BinaryOperator *E, llvm::Value *LHS,997llvm::Value *RHS, CompareKind Kind,998const char *NameSuffix = "") {999QualType ArgTy = E->getLHS()->getType();1000if (const ComplexType *CT = ArgTy->getAs<ComplexType>())1001ArgTy = CT->getElementType();1002
1003if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) {1004assert(Kind == CK_Equal &&1005"member pointers may only be compared for equality");1006return CGF.CGM.getCXXABI().EmitMemberPointerComparison(1007CGF, LHS, RHS, MPT, /*IsInequality*/ false);1008}1009
1010// Compute the comparison instructions for the specified comparison kind.1011struct CmpInstInfo {1012const char *Name;1013llvm::CmpInst::Predicate FCmp;1014llvm::CmpInst::Predicate SCmp;1015llvm::CmpInst::Predicate UCmp;1016};1017CmpInstInfo InstInfo = [&]() -> CmpInstInfo {1018using FI = llvm::FCmpInst;1019using II = llvm::ICmpInst;1020switch (Kind) {1021case CK_Less:1022return {"cmp.lt", FI::FCMP_OLT, II::ICMP_SLT, II::ICMP_ULT};1023case CK_Greater:1024return {"cmp.gt", FI::FCMP_OGT, II::ICMP_SGT, II::ICMP_UGT};1025case CK_Equal:1026return {"cmp.eq", FI::FCMP_OEQ, II::ICMP_EQ, II::ICMP_EQ};1027}1028llvm_unreachable("Unrecognised CompareKind enum");1029}();1030
1031if (ArgTy->hasFloatingRepresentation())1032return Builder.CreateFCmp(InstInfo.FCmp, LHS, RHS,1033llvm::Twine(InstInfo.Name) + NameSuffix);1034if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()) {1035auto Inst =1036ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp : InstInfo.UCmp;1037return Builder.CreateICmp(Inst, LHS, RHS,1038llvm::Twine(InstInfo.Name) + NameSuffix);1039}1040
1041llvm_unreachable("unsupported aggregate binary expression should have "1042"already been handled");1043}
1044
1045void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) {1046using llvm::BasicBlock;1047using llvm::PHINode;1048using llvm::Value;1049assert(CGF.getContext().hasSameType(E->getLHS()->getType(),1050E->getRHS()->getType()));1051const ComparisonCategoryInfo &CmpInfo =1052CGF.getContext().CompCategories.getInfoForType(E->getType());1053assert(CmpInfo.Record->isTriviallyCopyable() &&1054"cannot copy non-trivially copyable aggregate");1055
1056QualType ArgTy = E->getLHS()->getType();1057
1058if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() &&1059!ArgTy->isNullPtrType() && !ArgTy->isPointerType() &&1060!ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) {1061return CGF.ErrorUnsupported(E, "aggregate three-way comparison");1062}1063bool IsComplex = ArgTy->isAnyComplexType();1064
1065// Evaluate the operands to the expression and extract their values.1066auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> {1067RValue RV = CGF.EmitAnyExpr(E);1068if (RV.isScalar())1069return {RV.getScalarVal(), nullptr};1070if (RV.isAggregate())1071return {RV.getAggregatePointer(E->getType(), CGF), nullptr};1072assert(RV.isComplex());1073return RV.getComplexVal();1074};1075auto LHSValues = EmitOperand(E->getLHS()),1076RHSValues = EmitOperand(E->getRHS());1077
1078auto EmitCmp = [&](CompareKind K) {1079Value *Cmp = EmitCompare(Builder, CGF, E, LHSValues.first, RHSValues.first,1080K, IsComplex ? ".r" : "");1081if (!IsComplex)1082return Cmp;1083assert(K == CompareKind::CK_Equal);1084Value *CmpImag = EmitCompare(Builder, CGF, E, LHSValues.second,1085RHSValues.second, K, ".i");1086return Builder.CreateAnd(Cmp, CmpImag, "and.eq");1087};1088auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) {1089return Builder.getInt(VInfo->getIntValue());1090};1091
1092Value *Select;1093if (ArgTy->isNullPtrType()) {1094Select = EmitCmpRes(CmpInfo.getEqualOrEquiv());1095} else if (!CmpInfo.isPartial()) {1096Value *SelectOne =1097Builder.CreateSelect(EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()),1098EmitCmpRes(CmpInfo.getGreater()), "sel.lt");1099Select = Builder.CreateSelect(EmitCmp(CK_Equal),1100EmitCmpRes(CmpInfo.getEqualOrEquiv()),1101SelectOne, "sel.eq");1102} else {1103Value *SelectEq = Builder.CreateSelect(1104EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()),1105EmitCmpRes(CmpInfo.getUnordered()), "sel.eq");1106Value *SelectGT = Builder.CreateSelect(EmitCmp(CK_Greater),1107EmitCmpRes(CmpInfo.getGreater()),1108SelectEq, "sel.gt");1109Select = Builder.CreateSelect(1110EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), SelectGT, "sel.lt");1111}1112// Create the return value in the destination slot.1113EnsureDest(E->getType());1114LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());1115
1116// Emit the address of the first (and only) field in the comparison category1117// type, and initialize it from the constant integer value selected above.1118LValue FieldLV = CGF.EmitLValueForFieldInitialization(1119DestLV, *CmpInfo.Record->field_begin());1120CGF.EmitStoreThroughLValue(RValue::get(Select), FieldLV, /*IsInit*/ true);1121
1122// All done! The result is in the Dest slot.1123}
1124
1125void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {1126if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)1127VisitPointerToDataMemberBinaryOperator(E);1128else1129CGF.ErrorUnsupported(E, "aggregate binary expression");1130}
1131
1132void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(1133const BinaryOperator *E) {1134LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);1135EmitFinalDestCopy(E->getType(), LV);1136}
1137
1138/// Is the value of the given expression possibly a reference to or
1139/// into a __block variable?
1140static bool isBlockVarRef(const Expr *E) {1141// Make sure we look through parens.1142E = E->IgnoreParens();1143
1144// Check for a direct reference to a __block variable.1145if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {1146const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());1147return (var && var->hasAttr<BlocksAttr>());1148}1149
1150// More complicated stuff.1151
1152// Binary operators.1153if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {1154// For an assignment or pointer-to-member operation, just care1155// about the LHS.1156if (op->isAssignmentOp() || op->isPtrMemOp())1157return isBlockVarRef(op->getLHS());1158
1159// For a comma, just care about the RHS.1160if (op->getOpcode() == BO_Comma)1161return isBlockVarRef(op->getRHS());1162
1163// FIXME: pointer arithmetic?1164return false;1165
1166// Check both sides of a conditional operator.1167} else if (const AbstractConditionalOperator *op1168= dyn_cast<AbstractConditionalOperator>(E)) {1169return isBlockVarRef(op->getTrueExpr())1170|| isBlockVarRef(op->getFalseExpr());1171
1172// OVEs are required to support BinaryConditionalOperators.1173} else if (const OpaqueValueExpr *op1174= dyn_cast<OpaqueValueExpr>(E)) {1175if (const Expr *src = op->getSourceExpr())1176return isBlockVarRef(src);1177
1178// Casts are necessary to get things like (*(int*)&var) = foo().1179// We don't really care about the kind of cast here, except1180// we don't want to look through l2r casts, because it's okay1181// to get the *value* in a __block variable.1182} else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {1183if (cast->getCastKind() == CK_LValueToRValue)1184return false;1185return isBlockVarRef(cast->getSubExpr());1186
1187// Handle unary operators. Again, just aggressively look through1188// it, ignoring the operation.1189} else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {1190return isBlockVarRef(uop->getSubExpr());1191
1192// Look into the base of a field access.1193} else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {1194return isBlockVarRef(mem->getBase());1195
1196// Look into the base of a subscript.1197} else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {1198return isBlockVarRef(sub->getBase());1199}1200
1201return false;1202}
1203
1204void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {1205// For an assignment to work, the value on the right has1206// to be compatible with the value on the left.1207assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),1208E->getRHS()->getType())1209&& "Invalid assignment");1210
1211// If the LHS might be a __block variable, and the RHS can1212// potentially cause a block copy, we need to evaluate the RHS first1213// so that the assignment goes the right place.1214// This is pretty semantically fragile.1215if (isBlockVarRef(E->getLHS()) &&1216E->getRHS()->HasSideEffects(CGF.getContext())) {1217// Ensure that we have a destination, and evaluate the RHS into that.1218EnsureDest(E->getRHS()->getType());1219Visit(E->getRHS());1220
1221// Now emit the LHS and copy into it.1222LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);1223
1224// That copy is an atomic copy if the LHS is atomic.1225if (LHS.getType()->isAtomicType() ||1226CGF.LValueIsSuitableForInlineAtomic(LHS)) {1227CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);1228return;1229}1230
1231EmitCopy(E->getLHS()->getType(),1232AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,1233needsGC(E->getLHS()->getType()),1234AggValueSlot::IsAliased,1235AggValueSlot::MayOverlap),1236Dest);1237return;1238}1239
1240LValue LHS = CGF.EmitLValue(E->getLHS());1241
1242// If we have an atomic type, evaluate into the destination and then1243// do an atomic copy.1244if (LHS.getType()->isAtomicType() ||1245CGF.LValueIsSuitableForInlineAtomic(LHS)) {1246EnsureDest(E->getRHS()->getType());1247Visit(E->getRHS());1248CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);1249return;1250}1251
1252// Codegen the RHS so that it stores directly into the LHS.1253AggValueSlot LHSSlot = AggValueSlot::forLValue(1254LHS, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),1255AggValueSlot::IsAliased, AggValueSlot::MayOverlap);1256// A non-volatile aggregate destination might have volatile member.1257if (!LHSSlot.isVolatile() &&1258CGF.hasVolatileMember(E->getLHS()->getType()))1259LHSSlot.setVolatile(true);1260
1261CGF.EmitAggExpr(E->getRHS(), LHSSlot);1262
1263// Copy into the destination if the assignment isn't ignored.1264EmitFinalDestCopy(E->getType(), LHS);1265
1266if (!Dest.isIgnored() && !Dest.isExternallyDestructed() &&1267E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)1268CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(),1269E->getType());1270}
1271
1272void AggExprEmitter::1273VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {1274llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");1275llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");1276llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");1277
1278// Bind the common expression if necessary.1279CodeGenFunction::OpaqueValueMapping binding(CGF, E);1280
1281CodeGenFunction::ConditionalEvaluation eval(CGF);1282CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,1283CGF.getProfileCount(E));1284
1285// Save whether the destination's lifetime is externally managed.1286bool isExternallyDestructed = Dest.isExternallyDestructed();1287bool destructNonTrivialCStruct =1288!isExternallyDestructed &&1289E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct;1290isExternallyDestructed |= destructNonTrivialCStruct;1291Dest.setExternallyDestructed(isExternallyDestructed);1292
1293eval.begin(CGF);1294CGF.EmitBlock(LHSBlock);1295if (llvm::EnableSingleByteCoverage)1296CGF.incrementProfileCounter(E->getTrueExpr());1297else1298CGF.incrementProfileCounter(E);1299Visit(E->getTrueExpr());1300eval.end(CGF);1301
1302assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");1303CGF.Builder.CreateBr(ContBlock);1304
1305// If the result of an agg expression is unused, then the emission1306// of the LHS might need to create a destination slot. That's fine1307// with us, and we can safely emit the RHS into the same slot, but1308// we shouldn't claim that it's already being destructed.1309Dest.setExternallyDestructed(isExternallyDestructed);1310
1311eval.begin(CGF);1312CGF.EmitBlock(RHSBlock);1313if (llvm::EnableSingleByteCoverage)1314CGF.incrementProfileCounter(E->getFalseExpr());1315Visit(E->getFalseExpr());1316eval.end(CGF);1317
1318if (destructNonTrivialCStruct)1319CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(),1320E->getType());1321
1322CGF.EmitBlock(ContBlock);1323if (llvm::EnableSingleByteCoverage)1324CGF.incrementProfileCounter(E);1325}
1326
1327void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {1328Visit(CE->getChosenSubExpr());1329}
1330
1331void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {1332Address ArgValue = Address::invalid();1333CGF.EmitVAArg(VE, ArgValue, Dest);1334
1335// If EmitVAArg fails, emit an error.1336if (!ArgValue.isValid()) {1337CGF.ErrorUnsupported(VE, "aggregate va_arg expression");1338return;1339}1340}
1341
1342void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {1343// Ensure that we have a slot, but if we already do, remember1344// whether it was externally destructed.1345bool wasExternallyDestructed = Dest.isExternallyDestructed();1346EnsureDest(E->getType());1347
1348// We're going to push a destructor if there isn't already one.1349Dest.setExternallyDestructed();1350
1351Visit(E->getSubExpr());1352
1353// Push that destructor we promised.1354if (!wasExternallyDestructed)1355CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());1356}
1357
1358void
1359AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {1360AggValueSlot Slot = EnsureSlot(E->getType());1361CGF.EmitCXXConstructExpr(E, Slot);1362}
1363
1364void AggExprEmitter::VisitCXXInheritedCtorInitExpr(1365const CXXInheritedCtorInitExpr *E) {1366AggValueSlot Slot = EnsureSlot(E->getType());1367CGF.EmitInheritedCXXConstructorCall(1368E->getConstructor(), E->constructsVBase(), Slot.getAddress(),1369E->inheritedFromVBase(), E);1370}
1371
1372void
1373AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {1374AggValueSlot Slot = EnsureSlot(E->getType());1375LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType());1376
1377// We'll need to enter cleanup scopes in case any of the element1378// initializers throws an exception or contains branch out of the expressions.1379CodeGenFunction::CleanupDeactivationScope scope(CGF);1380
1381CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();1382for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),1383e = E->capture_init_end();1384i != e; ++i, ++CurField) {1385// Emit initialization1386LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);1387if (CurField->hasCapturedVLAType()) {1388CGF.EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);1389continue;1390}1391
1392EmitInitializationToLValue(*i, LV);1393
1394// Push a destructor if necessary.1395if (QualType::DestructionKind DtorKind =1396CurField->getType().isDestructedType()) {1397assert(LV.isSimple());1398if (DtorKind)1399CGF.pushDestroyAndDeferDeactivation(NormalAndEHCleanup, LV.getAddress(),1400CurField->getType(),1401CGF.getDestroyer(DtorKind), false);1402}1403}1404}
1405
1406void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {1407CodeGenFunction::RunCleanupsScope cleanups(CGF);1408Visit(E->getSubExpr());1409}
1410
1411void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {1412QualType T = E->getType();1413AggValueSlot Slot = EnsureSlot(T);1414EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));1415}
1416
1417void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {1418QualType T = E->getType();1419AggValueSlot Slot = EnsureSlot(T);1420EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));1421}
1422
1423/// Determine whether the given cast kind is known to always convert values
1424/// with all zero bits in their value representation to values with all zero
1425/// bits in their value representation.
1426static bool castPreservesZero(const CastExpr *CE) {1427switch (CE->getCastKind()) {1428// No-ops.1429case CK_NoOp:1430case CK_UserDefinedConversion:1431case CK_ConstructorConversion:1432case CK_BitCast:1433case CK_ToUnion:1434case CK_ToVoid:1435// Conversions between (possibly-complex) integral, (possibly-complex)1436// floating-point, and bool.1437case CK_BooleanToSignedIntegral:1438case CK_FloatingCast:1439case CK_FloatingComplexCast:1440case CK_FloatingComplexToBoolean:1441case CK_FloatingComplexToIntegralComplex:1442case CK_FloatingComplexToReal:1443case CK_FloatingRealToComplex:1444case CK_FloatingToBoolean:1445case CK_FloatingToIntegral:1446case CK_IntegralCast:1447case CK_IntegralComplexCast:1448case CK_IntegralComplexToBoolean:1449case CK_IntegralComplexToFloatingComplex:1450case CK_IntegralComplexToReal:1451case CK_IntegralRealToComplex:1452case CK_IntegralToBoolean:1453case CK_IntegralToFloating:1454// Reinterpreting integers as pointers and vice versa.1455case CK_IntegralToPointer:1456case CK_PointerToIntegral:1457// Language extensions.1458case CK_VectorSplat:1459case CK_MatrixCast:1460case CK_NonAtomicToAtomic:1461case CK_AtomicToNonAtomic:1462case CK_HLSLVectorTruncation:1463return true;1464
1465case CK_BaseToDerivedMemberPointer:1466case CK_DerivedToBaseMemberPointer:1467case CK_MemberPointerToBoolean:1468case CK_NullToMemberPointer:1469case CK_ReinterpretMemberPointer:1470// FIXME: ABI-dependent.1471return false;1472
1473case CK_AnyPointerToBlockPointerCast:1474case CK_BlockPointerToObjCPointerCast:1475case CK_CPointerToObjCPointerCast:1476case CK_ObjCObjectLValueCast:1477case CK_IntToOCLSampler:1478case CK_ZeroToOCLOpaqueType:1479// FIXME: Check these.1480return false;1481
1482case CK_FixedPointCast:1483case CK_FixedPointToBoolean:1484case CK_FixedPointToFloating:1485case CK_FixedPointToIntegral:1486case CK_FloatingToFixedPoint:1487case CK_IntegralToFixedPoint:1488// FIXME: Do all fixed-point types represent zero as all 0 bits?1489return false;1490
1491case CK_AddressSpaceConversion:1492case CK_BaseToDerived:1493case CK_DerivedToBase:1494case CK_Dynamic:1495case CK_NullToPointer:1496case CK_PointerToBoolean:1497// FIXME: Preserves zeroes only if zero pointers and null pointers have the1498// same representation in all involved address spaces.1499return false;1500
1501case CK_ARCConsumeObject:1502case CK_ARCExtendBlockObject:1503case CK_ARCProduceObject:1504case CK_ARCReclaimReturnedObject:1505case CK_CopyAndAutoreleaseBlockObject:1506case CK_ArrayToPointerDecay:1507case CK_FunctionToPointerDecay:1508case CK_BuiltinFnToFnPtr:1509case CK_Dependent:1510case CK_LValueBitCast:1511case CK_LValueToRValue:1512case CK_LValueToRValueBitCast:1513case CK_UncheckedDerivedToBase:1514case CK_HLSLArrayRValue:1515return false;1516}1517llvm_unreachable("Unhandled clang::CastKind enum");1518}
1519
1520/// isSimpleZero - If emitting this value will obviously just cause a store of
1521/// zero to memory, return true. This can return false if uncertain, so it just
1522/// handles simple cases.
1523static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {1524E = E->IgnoreParens();1525while (auto *CE = dyn_cast<CastExpr>(E)) {1526if (!castPreservesZero(CE))1527break;1528E = CE->getSubExpr()->IgnoreParens();1529}1530
1531// 01532if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))1533return IL->getValue() == 0;1534// +0.01535if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))1536return FL->getValue().isPosZero();1537// int()1538if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&1539CGF.getTypes().isZeroInitializable(E->getType()))1540return true;1541// (int*)0 - Null pointer expressions.1542if (const CastExpr *ICE = dyn_cast<CastExpr>(E))1543return ICE->getCastKind() == CK_NullToPointer &&1544CGF.getTypes().isPointerZeroInitializable(E->getType()) &&1545!E->HasSideEffects(CGF.getContext());1546// '\0'1547if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))1548return CL->getValue() == 0;1549
1550// Otherwise, hard case: conservatively return false.1551return false;1552}
1553
1554
1555void
1556AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {1557QualType type = LV.getType();1558// FIXME: Ignore result?1559// FIXME: Are initializers affected by volatile?1560if (Dest.isZeroed() && isSimpleZero(E, CGF)) {1561// Storing "i32 0" to a zero'd memory location is a noop.1562return;1563} else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {1564return EmitNullInitializationToLValue(LV);1565} else if (isa<NoInitExpr>(E)) {1566// Do nothing.1567return;1568} else if (type->isReferenceType()) {1569RValue RV = CGF.EmitReferenceBindingToExpr(E);1570return CGF.EmitStoreThroughLValue(RV, LV);1571}1572
1573switch (CGF.getEvaluationKind(type)) {1574case TEK_Complex:1575CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);1576return;1577case TEK_Aggregate:1578CGF.EmitAggExpr(1579E, AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,1580AggValueSlot::DoesNotNeedGCBarriers,1581AggValueSlot::IsNotAliased,1582AggValueSlot::MayOverlap, Dest.isZeroed()));1583return;1584case TEK_Scalar:1585if (LV.isSimple()) {1586CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);1587} else {1588CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);1589}1590return;1591}1592llvm_unreachable("bad evaluation kind");1593}
1594
1595void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {1596QualType type = lv.getType();1597
1598// If the destination slot is already zeroed out before the aggregate is1599// copied into it, we don't have to emit any zeros here.1600if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))1601return;1602
1603if (CGF.hasScalarEvaluationKind(type)) {1604// For non-aggregates, we can store the appropriate null constant.1605llvm::Value *null = CGF.CGM.EmitNullConstant(type);1606// Note that the following is not equivalent to1607// EmitStoreThroughBitfieldLValue for ARC types.1608if (lv.isBitField()) {1609CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);1610} else {1611assert(lv.isSimple());1612CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);1613}1614} else {1615// There's a potential optimization opportunity in combining1616// memsets; that would be easy for arrays, but relatively1617// difficult for structures with the current code.1618CGF.EmitNullInitialization(lv.getAddress(), lv.getType());1619}1620}
1621
1622void AggExprEmitter::VisitCXXParenListInitExpr(CXXParenListInitExpr *E) {1623VisitCXXParenListOrInitListExpr(E, E->getInitExprs(),1624E->getInitializedFieldInUnion(),1625E->getArrayFiller());1626}
1627
1628void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {1629if (E->hadArrayRangeDesignator())1630CGF.ErrorUnsupported(E, "GNU array range designator extension");1631
1632if (E->isTransparent())1633return Visit(E->getInit(0));1634
1635VisitCXXParenListOrInitListExpr(1636E, E->inits(), E->getInitializedFieldInUnion(), E->getArrayFiller());1637}
1638
1639void AggExprEmitter::VisitCXXParenListOrInitListExpr(1640Expr *ExprToVisit, ArrayRef<Expr *> InitExprs,1641FieldDecl *InitializedFieldInUnion, Expr *ArrayFiller) {1642#if 01643// FIXME: Assess perf here? Figure out what cases are worth optimizing here1644// (Length of globals? Chunks of zeroed-out space?).1645//1646// If we can, prefer a copy from a global; this is a lot less code for long1647// globals, and it's easier for the current optimizers to analyze.1648if (llvm::Constant *C =1649CGF.CGM.EmitConstantExpr(ExprToVisit, ExprToVisit->getType(), &CGF)) {1650llvm::GlobalVariable* GV =1651new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,1652llvm::GlobalValue::InternalLinkage, C, "");1653EmitFinalDestCopy(ExprToVisit->getType(),1654CGF.MakeAddrLValue(GV, ExprToVisit->getType()));1655return;1656}1657#endif1658
1659AggValueSlot Dest = EnsureSlot(ExprToVisit->getType());1660
1661LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), ExprToVisit->getType());1662
1663// Handle initialization of an array.1664if (ExprToVisit->getType()->isConstantArrayType()) {1665auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());1666EmitArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), ExprToVisit,1667InitExprs, ArrayFiller);1668return;1669} else if (ExprToVisit->getType()->isVariableArrayType()) {1670// A variable array type that has an initializer can only do empty1671// initialization. And because this feature is not exposed as an extension1672// in C++, we can safely memset the array memory to zero.1673assert(InitExprs.size() == 0 &&1674"you can only use an empty initializer with VLAs");1675CGF.EmitNullInitialization(Dest.getAddress(), ExprToVisit->getType());1676return;1677}1678
1679assert(ExprToVisit->getType()->isRecordType() &&1680"Only support structs/unions here!");1681
1682// Do struct initialization; this code just sets each individual member1683// to the approprate value. This makes bitfield support automatic;1684// the disadvantage is that the generated code is more difficult for1685// the optimizer, especially with bitfields.1686unsigned NumInitElements = InitExprs.size();1687RecordDecl *record = ExprToVisit->getType()->castAs<RecordType>()->getDecl();1688
1689// We'll need to enter cleanup scopes in case any of the element1690// initializers throws an exception.1691SmallVector<EHScopeStack::stable_iterator, 16> cleanups;1692CodeGenFunction::CleanupDeactivationScope DeactivateCleanups(CGF);1693
1694unsigned curInitIndex = 0;1695
1696// Emit initialization of base classes.1697if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {1698assert(NumInitElements >= CXXRD->getNumBases() &&1699"missing initializer for base class");1700for (auto &Base : CXXRD->bases()) {1701assert(!Base.isVirtual() && "should not see vbases here");1702auto *BaseRD = Base.getType()->getAsCXXRecordDecl();1703Address V = CGF.GetAddressOfDirectBaseInCompleteClass(1704Dest.getAddress(), CXXRD, BaseRD,1705/*isBaseVirtual*/ false);1706AggValueSlot AggSlot = AggValueSlot::forAddr(1707V, Qualifiers(),1708AggValueSlot::IsDestructed,1709AggValueSlot::DoesNotNeedGCBarriers,1710AggValueSlot::IsNotAliased,1711CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));1712CGF.EmitAggExpr(InitExprs[curInitIndex++], AggSlot);1713
1714if (QualType::DestructionKind dtorKind =1715Base.getType().isDestructedType())1716CGF.pushDestroyAndDeferDeactivation(dtorKind, V, Base.getType());1717}1718}1719
1720// Prepare a 'this' for CXXDefaultInitExprs.1721CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());1722
1723if (record->isUnion()) {1724// Only initialize one field of a union. The field itself is1725// specified by the initializer list.1726if (!InitializedFieldInUnion) {1727// Empty union; we have nothing to do.1728
1729#ifndef NDEBUG1730// Make sure that it's really an empty and not a failure of1731// semantic analysis.1732for (const auto *Field : record->fields())1733assert(1734(Field->isUnnamedBitField() || Field->isAnonymousStructOrUnion()) &&1735"Only unnamed bitfields or anonymous class allowed");1736#endif1737return;1738}1739
1740// FIXME: volatility1741FieldDecl *Field = InitializedFieldInUnion;1742
1743LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);1744if (NumInitElements) {1745// Store the initializer into the field1746EmitInitializationToLValue(InitExprs[0], FieldLoc);1747} else {1748// Default-initialize to null.1749EmitNullInitializationToLValue(FieldLoc);1750}1751
1752return;1753}1754
1755// Here we iterate over the fields; this makes it simpler to both1756// default-initialize fields and skip over unnamed fields.1757for (const auto *field : record->fields()) {1758// We're done once we hit the flexible array member.1759if (field->getType()->isIncompleteArrayType())1760break;1761
1762// Always skip anonymous bitfields.1763if (field->isUnnamedBitField())1764continue;1765
1766// We're done if we reach the end of the explicit initializers, we1767// have a zeroed object, and the rest of the fields are1768// zero-initializable.1769if (curInitIndex == NumInitElements && Dest.isZeroed() &&1770CGF.getTypes().isZeroInitializable(ExprToVisit->getType()))1771break;1772
1773
1774LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);1775// We never generate write-barries for initialized fields.1776LV.setNonGC(true);1777
1778if (curInitIndex < NumInitElements) {1779// Store the initializer into the field.1780EmitInitializationToLValue(InitExprs[curInitIndex++], LV);1781} else {1782// We're out of initializers; default-initialize to null1783EmitNullInitializationToLValue(LV);1784}1785
1786// Push a destructor if necessary.1787// FIXME: if we have an array of structures, all explicitly1788// initialized, we can end up pushing a linear number of cleanups.1789if (QualType::DestructionKind dtorKind1790= field->getType().isDestructedType()) {1791assert(LV.isSimple());1792if (dtorKind) {1793CGF.pushDestroyAndDeferDeactivation(NormalAndEHCleanup, LV.getAddress(),1794field->getType(),1795CGF.getDestroyer(dtorKind), false);1796}1797}1798}1799}
1800
1801void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,1802llvm::Value *outerBegin) {1803// Emit the common subexpression.1804CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());1805
1806Address destPtr = EnsureSlot(E->getType()).getAddress();1807uint64_t numElements = E->getArraySize().getZExtValue();1808
1809if (!numElements)1810return;1811
1812// destPtr is an array*. Construct an elementType* by drilling down a level.1813llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);1814llvm::Value *indices[] = {zero, zero};1815llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getElementType(),1816destPtr.emitRawPointer(CGF),1817indices, "arrayinit.begin");1818
1819// Prepare to special-case multidimensional array initialization: we avoid1820// emitting multiple destructor loops in that case.1821if (!outerBegin)1822outerBegin = begin;1823ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());1824
1825QualType elementType =1826CGF.getContext().getAsArrayType(E->getType())->getElementType();1827CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);1828CharUnits elementAlign =1829destPtr.getAlignment().alignmentOfArrayElement(elementSize);1830llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);1831
1832llvm::BasicBlock *entryBB = Builder.GetInsertBlock();1833llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");1834
1835// Jump into the body.1836CGF.EmitBlock(bodyBB);1837llvm::PHINode *index =1838Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");1839index->addIncoming(zero, entryBB);1840llvm::Value *element =1841Builder.CreateInBoundsGEP(llvmElementType, begin, index);1842
1843// Prepare for a cleanup.1844QualType::DestructionKind dtorKind = elementType.isDestructedType();1845EHScopeStack::stable_iterator cleanup;1846if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) {1847if (outerBegin->getType() != element->getType())1848outerBegin = Builder.CreateBitCast(outerBegin, element->getType());1849CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,1850elementAlign,1851CGF.getDestroyer(dtorKind));1852cleanup = CGF.EHStack.stable_begin();1853} else {1854dtorKind = QualType::DK_none;1855}1856
1857// Emit the actual filler expression.1858{1859// Temporaries created in an array initialization loop are destroyed1860// at the end of each iteration.1861CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);1862CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);1863LValue elementLV = CGF.MakeAddrLValue(1864Address(element, llvmElementType, elementAlign), elementType);1865
1866if (InnerLoop) {1867// If the subexpression is an ArrayInitLoopExpr, share its cleanup.1868auto elementSlot = AggValueSlot::forLValue(1869elementLV, AggValueSlot::IsDestructed,1870AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,1871AggValueSlot::DoesNotOverlap);1872AggExprEmitter(CGF, elementSlot, false)1873.VisitArrayInitLoopExpr(InnerLoop, outerBegin);1874} else1875EmitInitializationToLValue(E->getSubExpr(), elementLV);1876}1877
1878// Move on to the next element.1879llvm::Value *nextIndex = Builder.CreateNUWAdd(1880index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");1881index->addIncoming(nextIndex, Builder.GetInsertBlock());1882
1883// Leave the loop if we're done.1884llvm::Value *done = Builder.CreateICmpEQ(1885nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),1886"arrayinit.done");1887llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");1888Builder.CreateCondBr(done, endBB, bodyBB);1889
1890CGF.EmitBlock(endBB);1891
1892// Leave the partial-array cleanup if we entered one.1893if (dtorKind)1894CGF.DeactivateCleanupBlock(cleanup, index);1895}
1896
1897void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {1898AggValueSlot Dest = EnsureSlot(E->getType());1899
1900LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());1901EmitInitializationToLValue(E->getBase(), DestLV);1902VisitInitListExpr(E->getUpdater());1903}
1904
1905//===----------------------------------------------------------------------===//
1906// Entry Points into this File
1907//===----------------------------------------------------------------------===//
1908
1909/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1910/// non-zero bytes that will be stored when outputting the initializer for the
1911/// specified initializer expression.
1912static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {1913if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))1914E = MTE->getSubExpr();1915E = E->IgnoreParenNoopCasts(CGF.getContext());1916
1917// 0 and 0.0 won't require any non-zero stores!1918if (isSimpleZero(E, CGF)) return CharUnits::Zero();1919
1920// If this is an initlist expr, sum up the size of sizes of the (present)1921// elements. If this is something weird, assume the whole thing is non-zero.1922const InitListExpr *ILE = dyn_cast<InitListExpr>(E);1923while (ILE && ILE->isTransparent())1924ILE = dyn_cast<InitListExpr>(ILE->getInit(0));1925if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))1926return CGF.getContext().getTypeSizeInChars(E->getType());1927
1928// InitListExprs for structs have to be handled carefully. If there are1929// reference members, we need to consider the size of the reference, not the1930// referencee. InitListExprs for unions and arrays can't have references.1931if (const RecordType *RT = E->getType()->getAs<RecordType>()) {1932if (!RT->isUnionType()) {1933RecordDecl *SD = RT->getDecl();1934CharUnits NumNonZeroBytes = CharUnits::Zero();1935
1936unsigned ILEElement = 0;1937if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD))1938while (ILEElement != CXXRD->getNumBases())1939NumNonZeroBytes +=1940GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF);1941for (const auto *Field : SD->fields()) {1942// We're done once we hit the flexible array member or run out of1943// InitListExpr elements.1944if (Field->getType()->isIncompleteArrayType() ||1945ILEElement == ILE->getNumInits())1946break;1947if (Field->isUnnamedBitField())1948continue;1949
1950const Expr *E = ILE->getInit(ILEElement++);1951
1952// Reference values are always non-null and have the width of a pointer.1953if (Field->getType()->isReferenceType())1954NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(1955CGF.getTarget().getPointerWidth(LangAS::Default));1956else1957NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);1958}1959
1960return NumNonZeroBytes;1961}1962}1963
1964// FIXME: This overestimates the number of non-zero bytes for bit-fields.1965CharUnits NumNonZeroBytes = CharUnits::Zero();1966for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)1967NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);1968return NumNonZeroBytes;1969}
1970
1971/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1972/// zeros in it, emit a memset and avoid storing the individual zeros.
1973///
1974static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,1975CodeGenFunction &CGF) {1976// If the slot is already known to be zeroed, nothing to do. Don't mess with1977// volatile stores.1978if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())1979return;1980
1981// C++ objects with a user-declared constructor don't need zero'ing.1982if (CGF.getLangOpts().CPlusPlus)1983if (const RecordType *RT = CGF.getContext()1984.getBaseElementType(E->getType())->getAs<RecordType>()) {1985const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());1986if (RD->hasUserDeclaredConstructor())1987return;1988}1989
1990// If the type is 16-bytes or smaller, prefer individual stores over memset.1991CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType());1992if (Size <= CharUnits::fromQuantity(16))1993return;1994
1995// Check to see if over 3/4 of the initializer are known to be zero. If so,1996// we prefer to emit memset + individual stores for the rest.1997CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);1998if (NumNonZeroBytes*4 > Size)1999return;2000
2001// Okay, it seems like a good idea to use an initial memset, emit the call.2002llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());2003
2004Address Loc = Slot.getAddress().withElementType(CGF.Int8Ty);2005CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);2006
2007// Tell the AggExprEmitter that the slot is known zero.2008Slot.setZeroed();2009}
2010
2011
2012
2013
2014/// EmitAggExpr - Emit the computation of the specified expression of aggregate
2015/// type. The result is computed into DestPtr. Note that if DestPtr is null,
2016/// the value of the aggregate expression is not needed. If VolatileDest is
2017/// true, DestPtr cannot be 0.
2018void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {2019assert(E && hasAggregateEvaluationKind(E->getType()) &&2020"Invalid aggregate expression to emit");2021assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&2022"slot has bits but no address");2023
2024// Optimize the slot if possible.2025CheckAggExprForMemSetUse(Slot, E, *this);2026
2027AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));2028}
2029
2030LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {2031assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");2032Address Temp = CreateMemTemp(E->getType());2033LValue LV = MakeAddrLValue(Temp, E->getType());2034EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,2035AggValueSlot::DoesNotNeedGCBarriers,2036AggValueSlot::IsNotAliased,2037AggValueSlot::DoesNotOverlap));2038return LV;2039}
2040
2041void CodeGenFunction::EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest,2042const LValue &Src,2043ExprValueKind SrcKind) {2044return AggExprEmitter(*this, Dest, Dest.isIgnored())2045.EmitFinalDestCopy(Type, Src, SrcKind);2046}
2047
2048AggValueSlot::Overlap_t2049CodeGenFunction::getOverlapForFieldInit(const FieldDecl *FD) {2050if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType())2051return AggValueSlot::DoesNotOverlap;2052
2053// If the field lies entirely within the enclosing class's nvsize, its tail2054// padding cannot overlap any already-initialized object. (The only subobjects2055// with greater addresses that might already be initialized are vbases.)2056const RecordDecl *ClassRD = FD->getParent();2057const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD);2058if (Layout.getFieldOffset(FD->getFieldIndex()) +2059getContext().getTypeSize(FD->getType()) <=2060(uint64_t)getContext().toBits(Layout.getNonVirtualSize()))2061return AggValueSlot::DoesNotOverlap;2062
2063// The tail padding may contain values we need to preserve.2064return AggValueSlot::MayOverlap;2065}
2066
2067AggValueSlot::Overlap_t CodeGenFunction::getOverlapForBaseInit(2068const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) {2069// If the most-derived object is a field declared with [[no_unique_address]],2070// the tail padding of any virtual base could be reused for other subobjects2071// of that field's class.2072if (IsVirtual)2073return AggValueSlot::MayOverlap;2074
2075// If the base class is laid out entirely within the nvsize of the derived2076// class, its tail padding cannot yet be initialized, so we can issue2077// stores at the full width of the base class.2078const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);2079if (Layout.getBaseClassOffset(BaseRD) +2080getContext().getASTRecordLayout(BaseRD).getSize() <=2081Layout.getNonVirtualSize())2082return AggValueSlot::DoesNotOverlap;2083
2084// The tail padding may contain values we need to preserve.2085return AggValueSlot::MayOverlap;2086}
2087
2088void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,2089AggValueSlot::Overlap_t MayOverlap,2090bool isVolatile) {2091assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");2092
2093Address DestPtr = Dest.getAddress();2094Address SrcPtr = Src.getAddress();2095
2096if (getLangOpts().CPlusPlus) {2097if (const RecordType *RT = Ty->getAs<RecordType>()) {2098CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());2099assert((Record->hasTrivialCopyConstructor() ||2100Record->hasTrivialCopyAssignment() ||2101Record->hasTrivialMoveConstructor() ||2102Record->hasTrivialMoveAssignment() ||2103Record->hasAttr<TrivialABIAttr>() || Record->isUnion()) &&2104"Trying to aggregate-copy a type without a trivial copy/move "2105"constructor or assignment operator");2106// Ignore empty classes in C++.2107if (Record->isEmpty())2108return;2109}2110}2111
2112if (getLangOpts().CUDAIsDevice) {2113if (Ty->isCUDADeviceBuiltinSurfaceType()) {2114if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(*this, Dest,2115Src))2116return;2117} else if (Ty->isCUDADeviceBuiltinTextureType()) {2118if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(*this, Dest,2119Src))2120return;2121}2122}2123
2124// Aggregate assignment turns into llvm.memcpy. This is almost valid per2125// C99 6.5.16.1p3, which states "If the value being stored in an object is2126// read from another object that overlaps in anyway the storage of the first2127// object, then the overlap shall be exact and the two objects shall have2128// qualified or unqualified versions of a compatible type."2129//2130// memcpy is not defined if the source and destination pointers are exactly2131// equal, but other compilers do this optimization, and almost every memcpy2132// implementation handles this case safely. If there is a libc that does not2133// safely handle this, we can add a target hook.2134
2135// Get data size info for this aggregate. Don't copy the tail padding if this2136// might be a potentially-overlapping subobject, since the tail padding might2137// be occupied by a different object. Otherwise, copying it is fine.2138TypeInfoChars TypeInfo;2139if (MayOverlap)2140TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);2141else2142TypeInfo = getContext().getTypeInfoInChars(Ty);2143
2144llvm::Value *SizeVal = nullptr;2145if (TypeInfo.Width.isZero()) {2146// But note that getTypeInfo returns 0 for a VLA.2147if (auto *VAT = dyn_cast_or_null<VariableArrayType>(2148getContext().getAsArrayType(Ty))) {2149QualType BaseEltTy;2150SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);2151TypeInfo = getContext().getTypeInfoInChars(BaseEltTy);2152assert(!TypeInfo.Width.isZero());2153SizeVal = Builder.CreateNUWMul(2154SizeVal,2155llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity()));2156}2157}2158if (!SizeVal) {2159SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity());2160}2161
2162// FIXME: If we have a volatile struct, the optimizer can remove what might2163// appear to be `extra' memory ops:2164//2165// volatile struct { int i; } a, b;2166//2167// int main() {2168// a = b;2169// a = b;2170// }2171//2172// we need to use a different call here. We use isVolatile to indicate when2173// either the source or the destination is volatile.2174
2175DestPtr = DestPtr.withElementType(Int8Ty);2176SrcPtr = SrcPtr.withElementType(Int8Ty);2177
2178// Don't do any of the memmove_collectable tests if GC isn't set.2179if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {2180// fall through2181} else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {2182RecordDecl *Record = RecordTy->getDecl();2183if (Record->hasObjectMember()) {2184CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,2185SizeVal);2186return;2187}2188} else if (Ty->isArrayType()) {2189QualType BaseType = getContext().getBaseElementType(Ty);2190if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {2191if (RecordTy->getDecl()->hasObjectMember()) {2192CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,2193SizeVal);2194return;2195}2196}2197}2198
2199auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);2200
2201// Determine the metadata to describe the position of any padding in this2202// memcpy, as well as the TBAA tags for the members of the struct, in case2203// the optimizer wishes to expand it in to scalar memory operations.2204if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))2205Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);2206
2207if (CGM.getCodeGenOpts().NewStructPathTBAA) {2208TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer(2209Dest.getTBAAInfo(), Src.getTBAAInfo());2210CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo);2211}2212}
2213