llvm-project

Форк
0
/
CGExprAgg.cpp 
2212 строк · 83.3 Кб
1
//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This contains code to emit Aggregate Expr nodes as LLVM code.
10
//
11
//===----------------------------------------------------------------------===//
12

13
#include "CGCXXABI.h"
14
#include "CGObjCRuntime.h"
15
#include "CodeGenFunction.h"
16
#include "CodeGenModule.h"
17
#include "ConstantEmitter.h"
18
#include "EHScopeStack.h"
19
#include "TargetInfo.h"
20
#include "clang/AST/ASTContext.h"
21
#include "clang/AST/Attr.h"
22
#include "clang/AST/DeclCXX.h"
23
#include "clang/AST/DeclTemplate.h"
24
#include "clang/AST/StmtVisitor.h"
25
#include "llvm/IR/Constants.h"
26
#include "llvm/IR/Function.h"
27
#include "llvm/IR/GlobalVariable.h"
28
#include "llvm/IR/Instruction.h"
29
#include "llvm/IR/IntrinsicInst.h"
30
#include "llvm/IR/Intrinsics.h"
31
using namespace clang;
32
using namespace CodeGen;
33

34
//===----------------------------------------------------------------------===//
35
//                        Aggregate Expression Emitter
36
//===----------------------------------------------------------------------===//
37

38
namespace llvm {
39
extern cl::opt<bool> EnableSingleByteCoverage;
40
} // namespace llvm
41

42
namespace  {
43
class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
44
  CodeGenFunction &CGF;
45
  CGBuilderTy &Builder;
46
  AggValueSlot Dest;
47
  bool IsResultUnused;
48

49
  AggValueSlot EnsureSlot(QualType T) {
50
    if (!Dest.isIgnored()) return Dest;
51
    return CGF.CreateAggTemp(T, "agg.tmp.ensured");
52
  }
53
  void EnsureDest(QualType T) {
54
    if (!Dest.isIgnored()) return;
55
    Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
56
  }
57

58
  // Calls `Fn` with a valid return value slot, potentially creating a temporary
59
  // to do so. If a temporary is created, an appropriate copy into `Dest` will
60
  // be emitted, as will lifetime markers.
61
  //
62
  // The given function should take a ReturnValueSlot, and return an RValue that
63
  // points to said slot.
64
  void withReturnValueSlot(const Expr *E,
65
                           llvm::function_ref<RValue(ReturnValueSlot)> Fn);
66

67
public:
68
  AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
69
    : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
70
    IsResultUnused(IsResultUnused) { }
71

72
  //===--------------------------------------------------------------------===//
73
  //                               Utilities
74
  //===--------------------------------------------------------------------===//
75

76
  /// EmitAggLoadOfLValue - Given an expression with aggregate type that
77
  /// represents a value lvalue, this method emits the address of the lvalue,
78
  /// then loads the result into DestPtr.
79
  void EmitAggLoadOfLValue(const Expr *E);
80

81
  /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
82
  /// SrcIsRValue is true if source comes from an RValue.
83
  void EmitFinalDestCopy(QualType type, const LValue &src,
84
                         CodeGenFunction::ExprValueKind SrcValueKind =
85
                             CodeGenFunction::EVK_NonRValue);
86
  void EmitFinalDestCopy(QualType type, RValue src);
87
  void EmitCopy(QualType type, const AggValueSlot &dest,
88
                const AggValueSlot &src);
89

90
  void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, QualType ArrayQTy,
91
                     Expr *ExprToVisit, ArrayRef<Expr *> Args,
92
                     Expr *ArrayFiller);
93

94
  AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
95
    if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
96
      return AggValueSlot::NeedsGCBarriers;
97
    return AggValueSlot::DoesNotNeedGCBarriers;
98
  }
99

100
  bool TypeRequiresGCollection(QualType T);
101

102
  //===--------------------------------------------------------------------===//
103
  //                            Visitor Methods
104
  //===--------------------------------------------------------------------===//
105

106
  void Visit(Expr *E) {
107
    ApplyDebugLocation DL(CGF, E);
108
    StmtVisitor<AggExprEmitter>::Visit(E);
109
  }
110

111
  void VisitStmt(Stmt *S) {
112
    CGF.ErrorUnsupported(S, "aggregate expression");
113
  }
114
  void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
115
  void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
116
    Visit(GE->getResultExpr());
117
  }
118
  void VisitCoawaitExpr(CoawaitExpr *E) {
119
    CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused);
120
  }
121
  void VisitCoyieldExpr(CoyieldExpr *E) {
122
    CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused);
123
  }
124
  void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); }
125
  void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
126
  void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
127
    return Visit(E->getReplacement());
128
  }
129

130
  void VisitConstantExpr(ConstantExpr *E) {
131
    EnsureDest(E->getType());
132

133
    if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
134
      Address StoreDest = Dest.getAddress();
135
      // The emitted value is guaranteed to have the same size as the
136
      // destination but can have a different type. Just do a bitcast in this
137
      // case to avoid incorrect GEPs.
138
      if (Result->getType() != StoreDest.getType())
139
        StoreDest = StoreDest.withElementType(Result->getType());
140

141
      CGF.EmitAggregateStore(Result, StoreDest,
142
                             E->getType().isVolatileQualified());
143
      return;
144
    }
145
    return Visit(E->getSubExpr());
146
  }
147

148
  // l-values.
149
  void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); }
150
  void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
151
  void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
152
  void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
153
  void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
154
  void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
155
    EmitAggLoadOfLValue(E);
156
  }
157
  void VisitPredefinedExpr(const PredefinedExpr *E) {
158
    EmitAggLoadOfLValue(E);
159
  }
160

161
  // Operators.
162
  void VisitCastExpr(CastExpr *E);
163
  void VisitCallExpr(const CallExpr *E);
164
  void VisitStmtExpr(const StmtExpr *E);
165
  void VisitBinaryOperator(const BinaryOperator *BO);
166
  void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
167
  void VisitBinAssign(const BinaryOperator *E);
168
  void VisitBinComma(const BinaryOperator *E);
169
  void VisitBinCmp(const BinaryOperator *E);
170
  void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
171
    Visit(E->getSemanticForm());
172
  }
173

174
  void VisitObjCMessageExpr(ObjCMessageExpr *E);
175
  void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
176
    EmitAggLoadOfLValue(E);
177
  }
178

179
  void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
180
  void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
181
  void VisitChooseExpr(const ChooseExpr *CE);
182
  void VisitInitListExpr(InitListExpr *E);
183
  void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
184
                                       FieldDecl *InitializedFieldInUnion,
185
                                       Expr *ArrayFiller);
186
  void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
187
                              llvm::Value *outerBegin = nullptr);
188
  void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
189
  void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
190
  void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
191
    CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
192
    Visit(DAE->getExpr());
193
  }
194
  void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
195
    CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
196
    Visit(DIE->getExpr());
197
  }
198
  void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
199
  void VisitCXXConstructExpr(const CXXConstructExpr *E);
200
  void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
201
  void VisitLambdaExpr(LambdaExpr *E);
202
  void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
203
  void VisitExprWithCleanups(ExprWithCleanups *E);
204
  void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
205
  void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
206
  void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
207
  void VisitOpaqueValueExpr(OpaqueValueExpr *E);
208

209
  void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
210
    if (E->isGLValue()) {
211
      LValue LV = CGF.EmitPseudoObjectLValue(E);
212
      return EmitFinalDestCopy(E->getType(), LV);
213
    }
214

215
    AggValueSlot Slot = EnsureSlot(E->getType());
216
    bool NeedsDestruction =
217
        !Slot.isExternallyDestructed() &&
218
        E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct;
219
    if (NeedsDestruction)
220
      Slot.setExternallyDestructed();
221
    CGF.EmitPseudoObjectRValue(E, Slot);
222
    if (NeedsDestruction)
223
      CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Slot.getAddress(),
224
                      E->getType());
225
  }
226

227
  void VisitVAArgExpr(VAArgExpr *E);
228
  void VisitCXXParenListInitExpr(CXXParenListInitExpr *E);
229
  void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
230
                                       Expr *ArrayFiller);
231

232
  void EmitInitializationToLValue(Expr *E, LValue Address);
233
  void EmitNullInitializationToLValue(LValue Address);
234
  //  case Expr::ChooseExprClass:
235
  void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
236
  void VisitAtomicExpr(AtomicExpr *E) {
237
    RValue Res = CGF.EmitAtomicExpr(E);
238
    EmitFinalDestCopy(E->getType(), Res);
239
  }
240
  void VisitPackIndexingExpr(PackIndexingExpr *E) {
241
    Visit(E->getSelectedExpr());
242
  }
243
};
244
}  // end anonymous namespace.
245

246
//===----------------------------------------------------------------------===//
247
//                                Utilities
248
//===----------------------------------------------------------------------===//
249

250
/// EmitAggLoadOfLValue - Given an expression with aggregate type that
251
/// represents a value lvalue, this method emits the address of the lvalue,
252
/// then loads the result into DestPtr.
253
void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
254
  LValue LV = CGF.EmitLValue(E);
255

256
  // If the type of the l-value is atomic, then do an atomic load.
257
  if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {
258
    CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
259
    return;
260
  }
261

262
  EmitFinalDestCopy(E->getType(), LV);
263
}
264

265
/// True if the given aggregate type requires special GC API calls.
266
bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
267
  // Only record types have members that might require garbage collection.
268
  const RecordType *RecordTy = T->getAs<RecordType>();
269
  if (!RecordTy) return false;
270

271
  // Don't mess with non-trivial C++ types.
272
  RecordDecl *Record = RecordTy->getDecl();
273
  if (isa<CXXRecordDecl>(Record) &&
274
      (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
275
       !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
276
    return false;
277

278
  // Check whether the type has an object member.
279
  return Record->hasObjectMember();
280
}
281

282
void AggExprEmitter::withReturnValueSlot(
283
    const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {
284
  QualType RetTy = E->getType();
285
  bool RequiresDestruction =
286
      !Dest.isExternallyDestructed() &&
287
      RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct;
288

289
  // If it makes no observable difference, save a memcpy + temporary.
290
  //
291
  // We need to always provide our own temporary if destruction is required.
292
  // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
293
  // its lifetime before we have the chance to emit a proper destructor call.
294
  bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||
295
                 (RequiresDestruction && Dest.isIgnored());
296

297
  Address RetAddr = Address::invalid();
298
  RawAddress RetAllocaAddr = RawAddress::invalid();
299

300
  EHScopeStack::stable_iterator LifetimeEndBlock;
301
  llvm::Value *LifetimeSizePtr = nullptr;
302
  llvm::IntrinsicInst *LifetimeStartInst = nullptr;
303
  if (!UseTemp) {
304
    RetAddr = Dest.getAddress();
305
  } else {
306
    RetAddr = CGF.CreateMemTemp(RetTy, "tmp", &RetAllocaAddr);
307
    llvm::TypeSize Size =
308
        CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy));
309
    LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAllocaAddr.getPointer());
310
    if (LifetimeSizePtr) {
311
      LifetimeStartInst =
312
          cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint()));
313
      assert(LifetimeStartInst->getIntrinsicID() ==
314
                 llvm::Intrinsic::lifetime_start &&
315
             "Last insertion wasn't a lifetime.start?");
316

317
      CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
318
          NormalEHLifetimeMarker, RetAllocaAddr, LifetimeSizePtr);
319
      LifetimeEndBlock = CGF.EHStack.stable_begin();
320
    }
321
  }
322

323
  RValue Src =
324
      EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused,
325
                               Dest.isExternallyDestructed()));
326

327
  if (!UseTemp)
328
    return;
329

330
  assert(Dest.isIgnored() || Dest.emitRawPointer(CGF) !=
331
                                 Src.getAggregatePointer(E->getType(), CGF));
332
  EmitFinalDestCopy(E->getType(), Src);
333

334
  if (!RequiresDestruction && LifetimeStartInst) {
335
    // If there's no dtor to run, the copy was the last use of our temporary.
336
    // Since we're not guaranteed to be in an ExprWithCleanups, clean up
337
    // eagerly.
338
    CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst);
339
    CGF.EmitLifetimeEnd(LifetimeSizePtr, RetAllocaAddr.getPointer());
340
  }
341
}
342

343
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
344
void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
345
  assert(src.isAggregate() && "value must be aggregate value!");
346
  LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
347
  EmitFinalDestCopy(type, srcLV, CodeGenFunction::EVK_RValue);
348
}
349

350
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
351
void AggExprEmitter::EmitFinalDestCopy(
352
    QualType type, const LValue &src,
353
    CodeGenFunction::ExprValueKind SrcValueKind) {
354
  // If Dest is ignored, then we're evaluating an aggregate expression
355
  // in a context that doesn't care about the result.  Note that loads
356
  // from volatile l-values force the existence of a non-ignored
357
  // destination.
358
  if (Dest.isIgnored())
359
    return;
360

361
  // Copy non-trivial C structs here.
362
  LValue DstLV = CGF.MakeAddrLValue(
363
      Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type);
364

365
  if (SrcValueKind == CodeGenFunction::EVK_RValue) {
366
    if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
367
      if (Dest.isPotentiallyAliased())
368
        CGF.callCStructMoveAssignmentOperator(DstLV, src);
369
      else
370
        CGF.callCStructMoveConstructor(DstLV, src);
371
      return;
372
    }
373
  } else {
374
    if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
375
      if (Dest.isPotentiallyAliased())
376
        CGF.callCStructCopyAssignmentOperator(DstLV, src);
377
      else
378
        CGF.callCStructCopyConstructor(DstLV, src);
379
      return;
380
    }
381
  }
382

383
  AggValueSlot srcAgg = AggValueSlot::forLValue(
384
      src, AggValueSlot::IsDestructed, needsGC(type), AggValueSlot::IsAliased,
385
      AggValueSlot::MayOverlap);
386
  EmitCopy(type, Dest, srcAgg);
387
}
388

389
/// Perform a copy from the source into the destination.
390
///
391
/// \param type - the type of the aggregate being copied; qualifiers are
392
///   ignored
393
void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
394
                              const AggValueSlot &src) {
395
  if (dest.requiresGCollection()) {
396
    CharUnits sz = dest.getPreferredSize(CGF.getContext(), type);
397
    llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
398
    CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
399
                                                      dest.getAddress(),
400
                                                      src.getAddress(),
401
                                                      size);
402
    return;
403
  }
404

405
  // If the result of the assignment is used, copy the LHS there also.
406
  // It's volatile if either side is.  Use the minimum alignment of
407
  // the two sides.
408
  LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type);
409
  LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type);
410
  CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(),
411
                        dest.isVolatile() || src.isVolatile());
412
}
413

414
/// Emit the initializer for a std::initializer_list initialized with a
415
/// real initializer list.
416
void
417
AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
418
  // Emit an array containing the elements.  The array is externally destructed
419
  // if the std::initializer_list object is.
420
  ASTContext &Ctx = CGF.getContext();
421
  LValue Array = CGF.EmitLValue(E->getSubExpr());
422
  assert(Array.isSimple() && "initializer_list array not a simple lvalue");
423
  Address ArrayPtr = Array.getAddress();
424

425
  const ConstantArrayType *ArrayType =
426
      Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
427
  assert(ArrayType && "std::initializer_list constructed from non-array");
428

429
  RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
430
  RecordDecl::field_iterator Field = Record->field_begin();
431
  assert(Field != Record->field_end() &&
432
         Ctx.hasSameType(Field->getType()->getPointeeType(),
433
                         ArrayType->getElementType()) &&
434
         "Expected std::initializer_list first field to be const E *");
435

436
  // Start pointer.
437
  AggValueSlot Dest = EnsureSlot(E->getType());
438
  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
439
  LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
440
  llvm::Value *ArrayStart = ArrayPtr.emitRawPointer(CGF);
441
  CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
442
  ++Field;
443
  assert(Field != Record->field_end() &&
444
         "Expected std::initializer_list to have two fields");
445

446
  llvm::Value *Size = Builder.getInt(ArrayType->getSize());
447
  LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
448
  if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
449
    // Length.
450
    CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
451

452
  } else {
453
    // End pointer.
454
    assert(Field->getType()->isPointerType() &&
455
           Ctx.hasSameType(Field->getType()->getPointeeType(),
456
                           ArrayType->getElementType()) &&
457
           "Expected std::initializer_list second field to be const E *");
458
    llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
459
    llvm::Value *IdxEnd[] = { Zero, Size };
460
    llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP(
461
        ArrayPtr.getElementType(), ArrayPtr.emitRawPointer(CGF), IdxEnd,
462
        "arrayend");
463
    CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
464
  }
465

466
  assert(++Field == Record->field_end() &&
467
         "Expected std::initializer_list to only have two fields");
468
}
469

470
/// Determine if E is a trivial array filler, that is, one that is
471
/// equivalent to zero-initialization.
472
static bool isTrivialFiller(Expr *E) {
473
  if (!E)
474
    return true;
475

476
  if (isa<ImplicitValueInitExpr>(E))
477
    return true;
478

479
  if (auto *ILE = dyn_cast<InitListExpr>(E)) {
480
    if (ILE->getNumInits())
481
      return false;
482
    return isTrivialFiller(ILE->getArrayFiller());
483
  }
484

485
  if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
486
    return Cons->getConstructor()->isDefaultConstructor() &&
487
           Cons->getConstructor()->isTrivial();
488

489
  // FIXME: Are there other cases where we can avoid emitting an initializer?
490
  return false;
491
}
492

493
/// Emit initialization of an array from an initializer list. ExprToVisit must
494
/// be either an InitListEpxr a CXXParenInitListExpr.
495
void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
496
                                   QualType ArrayQTy, Expr *ExprToVisit,
497
                                   ArrayRef<Expr *> Args, Expr *ArrayFiller) {
498
  uint64_t NumInitElements = Args.size();
499

500
  uint64_t NumArrayElements = AType->getNumElements();
501
  for (const auto *Init : Args) {
502
    if (const auto *Embed = dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts())) {
503
      NumInitElements += Embed->getDataElementCount() - 1;
504
      if (NumInitElements > NumArrayElements) {
505
        NumInitElements = NumArrayElements;
506
        break;
507
      }
508
    }
509
  }
510

511
  assert(NumInitElements <= NumArrayElements);
512

513
  QualType elementType =
514
      CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();
515
  CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
516
  CharUnits elementAlign =
517
    DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
518
  llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
519

520
  // Consider initializing the array by copying from a global. For this to be
521
  // more efficient than per-element initialization, the size of the elements
522
  // with explicit initializers should be large enough.
523
  if (NumInitElements * elementSize.getQuantity() > 16 &&
524
      elementType.isTriviallyCopyableType(CGF.getContext())) {
525
    CodeGen::CodeGenModule &CGM = CGF.CGM;
526
    ConstantEmitter Emitter(CGF);
527
    QualType GVArrayQTy = CGM.getContext().getAddrSpaceQualType(
528
        CGM.getContext().removeAddrSpaceQualType(ArrayQTy),
529
        CGM.GetGlobalConstantAddressSpace());
530
    LangAS AS = GVArrayQTy.getAddressSpace();
531
    if (llvm::Constant *C =
532
            Emitter.tryEmitForInitializer(ExprToVisit, AS, GVArrayQTy)) {
533
      auto GV = new llvm::GlobalVariable(
534
          CGM.getModule(), C->getType(),
535
          /* isConstant= */ true, llvm::GlobalValue::PrivateLinkage, C,
536
          "constinit",
537
          /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
538
          CGM.getContext().getTargetAddressSpace(AS));
539
      Emitter.finalize(GV);
540
      CharUnits Align = CGM.getContext().getTypeAlignInChars(GVArrayQTy);
541
      GV->setAlignment(Align.getAsAlign());
542
      Address GVAddr(GV, GV->getValueType(), Align);
543
      EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GVAddr, GVArrayQTy));
544
      return;
545
    }
546
  }
547

548
  // Exception safety requires us to destroy all the
549
  // already-constructed members if an initializer throws.
550
  // For that, we'll need an EH cleanup.
551
  QualType::DestructionKind dtorKind = elementType.isDestructedType();
552
  Address endOfInit = Address::invalid();
553
  CodeGenFunction::CleanupDeactivationScope deactivation(CGF);
554

555
  llvm::Value *begin = DestPtr.emitRawPointer(CGF);
556
  if (dtorKind) {
557
    CodeGenFunction::AllocaTrackerRAII allocaTracker(CGF);
558
    // In principle we could tell the cleanup where we are more
559
    // directly, but the control flow can get so varied here that it
560
    // would actually be quite complex.  Therefore we go through an
561
    // alloca.
562
    llvm::Instruction *dominatingIP =
563
        Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(CGF.Int8PtrTy));
564
    endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
565
                                     "arrayinit.endOfInit");
566
    Builder.CreateStore(begin, endOfInit);
567
    CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
568
                                         elementAlign,
569
                                         CGF.getDestroyer(dtorKind));
570
    cast<EHCleanupScope>(*CGF.EHStack.find(CGF.EHStack.stable_begin()))
571
        .AddAuxAllocas(allocaTracker.Take());
572

573
    CGF.DeferredDeactivationCleanupStack.push_back(
574
        {CGF.EHStack.stable_begin(), dominatingIP});
575
  }
576

577
  llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
578

579
  auto Emit = [&](Expr *Init, uint64_t ArrayIndex) {
580
    llvm::Value *element = begin;
581
    if (ArrayIndex > 0) {
582
      element = Builder.CreateInBoundsGEP(
583
          llvmElementType, begin,
584
          llvm::ConstantInt::get(CGF.SizeTy, ArrayIndex), "arrayinit.element");
585

586
      // Tell the cleanup that it needs to destroy up to this
587
      // element.  TODO: some of these stores can be trivially
588
      // observed to be unnecessary.
589
      if (endOfInit.isValid())
590
        Builder.CreateStore(element, endOfInit);
591
    }
592

593
    LValue elementLV = CGF.MakeAddrLValue(
594
        Address(element, llvmElementType, elementAlign), elementType);
595
    EmitInitializationToLValue(Init, elementLV);
596
    return true;
597
  };
598

599
  unsigned ArrayIndex = 0;
600
  // Emit the explicit initializers.
601
  for (uint64_t i = 0; i != NumInitElements; ++i) {
602
    if (ArrayIndex >= NumInitElements)
603
      break;
604
    if (auto *EmbedS = dyn_cast<EmbedExpr>(Args[i]->IgnoreParenImpCasts())) {
605
      EmbedS->doForEachDataElement(Emit, ArrayIndex);
606
    } else {
607
      Emit(Args[i], ArrayIndex);
608
      ArrayIndex++;
609
    }
610
  }
611

612
  // Check whether there's a non-trivial array-fill expression.
613
  bool hasTrivialFiller = isTrivialFiller(ArrayFiller);
614

615
  // Any remaining elements need to be zero-initialized, possibly
616
  // using the filler expression.  We can skip this if the we're
617
  // emitting to zeroed memory.
618
  if (NumInitElements != NumArrayElements &&
619
      !(Dest.isZeroed() && hasTrivialFiller &&
620
        CGF.getTypes().isZeroInitializable(elementType))) {
621

622
    // Use an actual loop.  This is basically
623
    //   do { *array++ = filler; } while (array != end);
624

625
    // Advance to the start of the rest of the array.
626
    llvm::Value *element = begin;
627
    if (NumInitElements) {
628
      element = Builder.CreateInBoundsGEP(
629
          llvmElementType, element,
630
          llvm::ConstantInt::get(CGF.SizeTy, NumInitElements),
631
          "arrayinit.start");
632
      if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
633
    }
634

635
    // Compute the end of the array.
636
    llvm::Value *end = Builder.CreateInBoundsGEP(
637
        llvmElementType, begin,
638
        llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), "arrayinit.end");
639

640
    llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
641
    llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
642

643
    // Jump into the body.
644
    CGF.EmitBlock(bodyBB);
645
    llvm::PHINode *currentElement =
646
      Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
647
    currentElement->addIncoming(element, entryBB);
648

649
    // Emit the actual filler expression.
650
    {
651
      // C++1z [class.temporary]p5:
652
      //   when a default constructor is called to initialize an element of
653
      //   an array with no corresponding initializer [...] the destruction of
654
      //   every temporary created in a default argument is sequenced before
655
      //   the construction of the next array element, if any
656
      CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
657
      LValue elementLV = CGF.MakeAddrLValue(
658
          Address(currentElement, llvmElementType, elementAlign), elementType);
659
      if (ArrayFiller)
660
        EmitInitializationToLValue(ArrayFiller, elementLV);
661
      else
662
        EmitNullInitializationToLValue(elementLV);
663
    }
664

665
    // Move on to the next element.
666
    llvm::Value *nextElement = Builder.CreateInBoundsGEP(
667
        llvmElementType, currentElement, one, "arrayinit.next");
668

669
    // Tell the EH cleanup that we finished with the last element.
670
    if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
671

672
    // Leave the loop if we're done.
673
    llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
674
                                             "arrayinit.done");
675
    llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
676
    Builder.CreateCondBr(done, endBB, bodyBB);
677
    currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
678

679
    CGF.EmitBlock(endBB);
680
  }
681
}
682

683
//===----------------------------------------------------------------------===//
684
//                            Visitor Methods
685
//===----------------------------------------------------------------------===//
686

687
void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
688
  Visit(E->getSubExpr());
689
}
690

691
void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
692
  // If this is a unique OVE, just visit its source expression.
693
  if (e->isUnique())
694
    Visit(e->getSourceExpr());
695
  else
696
    EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e));
697
}
698

699
void
700
AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
701
  if (Dest.isPotentiallyAliased() &&
702
      E->getType().isPODType(CGF.getContext())) {
703
    // For a POD type, just emit a load of the lvalue + a copy, because our
704
    // compound literal might alias the destination.
705
    EmitAggLoadOfLValue(E);
706
    return;
707
  }
708

709
  AggValueSlot Slot = EnsureSlot(E->getType());
710

711
  // Block-scope compound literals are destroyed at the end of the enclosing
712
  // scope in C.
713
  bool Destruct =
714
      !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed();
715
  if (Destruct)
716
    Slot.setExternallyDestructed();
717

718
  CGF.EmitAggExpr(E->getInitializer(), Slot);
719

720
  if (Destruct)
721
    if (QualType::DestructionKind DtorKind = E->getType().isDestructedType())
722
      CGF.pushLifetimeExtendedDestroy(
723
          CGF.getCleanupKind(DtorKind), Slot.getAddress(), E->getType(),
724
          CGF.getDestroyer(DtorKind), DtorKind & EHCleanup);
725
}
726

727
/// Attempt to look through various unimportant expressions to find a
728
/// cast of the given kind.
729
static Expr *findPeephole(Expr *op, CastKind kind, const ASTContext &ctx) {
730
  op = op->IgnoreParenNoopCasts(ctx);
731
  if (auto castE = dyn_cast<CastExpr>(op)) {
732
    if (castE->getCastKind() == kind)
733
      return castE->getSubExpr();
734
  }
735
  return nullptr;
736
}
737

738
void AggExprEmitter::VisitCastExpr(CastExpr *E) {
739
  if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
740
    CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
741
  switch (E->getCastKind()) {
742
  case CK_Dynamic: {
743
    // FIXME: Can this actually happen? We have no test coverage for it.
744
    assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
745
    LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
746
                                      CodeGenFunction::TCK_Load);
747
    // FIXME: Do we also need to handle property references here?
748
    if (LV.isSimple())
749
      CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
750
    else
751
      CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
752

753
    if (!Dest.isIgnored())
754
      CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
755
    break;
756
  }
757

758
  case CK_ToUnion: {
759
    // Evaluate even if the destination is ignored.
760
    if (Dest.isIgnored()) {
761
      CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
762
                      /*ignoreResult=*/true);
763
      break;
764
    }
765

766
    // GCC union extension
767
    QualType Ty = E->getSubExpr()->getType();
768
    Address CastPtr = Dest.getAddress().withElementType(CGF.ConvertType(Ty));
769
    EmitInitializationToLValue(E->getSubExpr(),
770
                               CGF.MakeAddrLValue(CastPtr, Ty));
771
    break;
772
  }
773

774
  case CK_LValueToRValueBitCast: {
775
    if (Dest.isIgnored()) {
776
      CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
777
                      /*ignoreResult=*/true);
778
      break;
779
    }
780

781
    LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
782
    Address SourceAddress = SourceLV.getAddress().withElementType(CGF.Int8Ty);
783
    Address DestAddress = Dest.getAddress().withElementType(CGF.Int8Ty);
784
    llvm::Value *SizeVal = llvm::ConstantInt::get(
785
        CGF.SizeTy,
786
        CGF.getContext().getTypeSizeInChars(E->getType()).getQuantity());
787
    Builder.CreateMemCpy(DestAddress, SourceAddress, SizeVal);
788
    break;
789
  }
790

791
  case CK_DerivedToBase:
792
  case CK_BaseToDerived:
793
  case CK_UncheckedDerivedToBase: {
794
    llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
795
                "should have been unpacked before we got here");
796
  }
797

798
  case CK_NonAtomicToAtomic:
799
  case CK_AtomicToNonAtomic: {
800
    bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
801

802
    // Determine the atomic and value types.
803
    QualType atomicType = E->getSubExpr()->getType();
804
    QualType valueType = E->getType();
805
    if (isToAtomic) std::swap(atomicType, valueType);
806

807
    assert(atomicType->isAtomicType());
808
    assert(CGF.getContext().hasSameUnqualifiedType(valueType,
809
                          atomicType->castAs<AtomicType>()->getValueType()));
810

811
    // Just recurse normally if we're ignoring the result or the
812
    // atomic type doesn't change representation.
813
    if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
814
      return Visit(E->getSubExpr());
815
    }
816

817
    CastKind peepholeTarget =
818
      (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
819

820
    // These two cases are reverses of each other; try to peephole them.
821
    if (Expr *op =
822
            findPeephole(E->getSubExpr(), peepholeTarget, CGF.getContext())) {
823
      assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
824
                                                     E->getType()) &&
825
           "peephole significantly changed types?");
826
      return Visit(op);
827
    }
828

829
    // If we're converting an r-value of non-atomic type to an r-value
830
    // of atomic type, just emit directly into the relevant sub-object.
831
    if (isToAtomic) {
832
      AggValueSlot valueDest = Dest;
833
      if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
834
        // Zero-initialize.  (Strictly speaking, we only need to initialize
835
        // the padding at the end, but this is simpler.)
836
        if (!Dest.isZeroed())
837
          CGF.EmitNullInitialization(Dest.getAddress(), atomicType);
838

839
        // Build a GEP to refer to the subobject.
840
        Address valueAddr =
841
            CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0);
842
        valueDest = AggValueSlot::forAddr(valueAddr,
843
                                          valueDest.getQualifiers(),
844
                                          valueDest.isExternallyDestructed(),
845
                                          valueDest.requiresGCollection(),
846
                                          valueDest.isPotentiallyAliased(),
847
                                          AggValueSlot::DoesNotOverlap,
848
                                          AggValueSlot::IsZeroed);
849
      }
850

851
      CGF.EmitAggExpr(E->getSubExpr(), valueDest);
852
      return;
853
    }
854

855
    // Otherwise, we're converting an atomic type to a non-atomic type.
856
    // Make an atomic temporary, emit into that, and then copy the value out.
857
    AggValueSlot atomicSlot =
858
      CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
859
    CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
860

861
    Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0);
862
    RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
863
    return EmitFinalDestCopy(valueType, rvalue);
864
  }
865
  case CK_AddressSpaceConversion:
866
     return Visit(E->getSubExpr());
867

868
  case CK_LValueToRValue:
869
    // If we're loading from a volatile type, force the destination
870
    // into existence.
871
    if (E->getSubExpr()->getType().isVolatileQualified()) {
872
      bool Destruct =
873
          !Dest.isExternallyDestructed() &&
874
          E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct;
875
      if (Destruct)
876
        Dest.setExternallyDestructed();
877
      EnsureDest(E->getType());
878
      Visit(E->getSubExpr());
879

880
      if (Destruct)
881
        CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(),
882
                        E->getType());
883

884
      return;
885
    }
886

887
    [[fallthrough]];
888

889
  case CK_HLSLArrayRValue:
890
    Visit(E->getSubExpr());
891
    break;
892

893
  case CK_NoOp:
894
  case CK_UserDefinedConversion:
895
  case CK_ConstructorConversion:
896
    assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
897
                                                   E->getType()) &&
898
           "Implicit cast types must be compatible");
899
    Visit(E->getSubExpr());
900
    break;
901

902
  case CK_LValueBitCast:
903
    llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
904

905
  case CK_Dependent:
906
  case CK_BitCast:
907
  case CK_ArrayToPointerDecay:
908
  case CK_FunctionToPointerDecay:
909
  case CK_NullToPointer:
910
  case CK_NullToMemberPointer:
911
  case CK_BaseToDerivedMemberPointer:
912
  case CK_DerivedToBaseMemberPointer:
913
  case CK_MemberPointerToBoolean:
914
  case CK_ReinterpretMemberPointer:
915
  case CK_IntegralToPointer:
916
  case CK_PointerToIntegral:
917
  case CK_PointerToBoolean:
918
  case CK_ToVoid:
919
  case CK_VectorSplat:
920
  case CK_IntegralCast:
921
  case CK_BooleanToSignedIntegral:
922
  case CK_IntegralToBoolean:
923
  case CK_IntegralToFloating:
924
  case CK_FloatingToIntegral:
925
  case CK_FloatingToBoolean:
926
  case CK_FloatingCast:
927
  case CK_CPointerToObjCPointerCast:
928
  case CK_BlockPointerToObjCPointerCast:
929
  case CK_AnyPointerToBlockPointerCast:
930
  case CK_ObjCObjectLValueCast:
931
  case CK_FloatingRealToComplex:
932
  case CK_FloatingComplexToReal:
933
  case CK_FloatingComplexToBoolean:
934
  case CK_FloatingComplexCast:
935
  case CK_FloatingComplexToIntegralComplex:
936
  case CK_IntegralRealToComplex:
937
  case CK_IntegralComplexToReal:
938
  case CK_IntegralComplexToBoolean:
939
  case CK_IntegralComplexCast:
940
  case CK_IntegralComplexToFloatingComplex:
941
  case CK_ARCProduceObject:
942
  case CK_ARCConsumeObject:
943
  case CK_ARCReclaimReturnedObject:
944
  case CK_ARCExtendBlockObject:
945
  case CK_CopyAndAutoreleaseBlockObject:
946
  case CK_BuiltinFnToFnPtr:
947
  case CK_ZeroToOCLOpaqueType:
948
  case CK_MatrixCast:
949
  case CK_HLSLVectorTruncation:
950

951
  case CK_IntToOCLSampler:
952
  case CK_FloatingToFixedPoint:
953
  case CK_FixedPointToFloating:
954
  case CK_FixedPointCast:
955
  case CK_FixedPointToBoolean:
956
  case CK_FixedPointToIntegral:
957
  case CK_IntegralToFixedPoint:
958
    llvm_unreachable("cast kind invalid for aggregate types");
959
  }
960
}
961

962
void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
963
  if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
964
    EmitAggLoadOfLValue(E);
965
    return;
966
  }
967

968
  withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
969
    return CGF.EmitCallExpr(E, Slot);
970
  });
971
}
972

973
void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
974
  withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
975
    return CGF.EmitObjCMessageExpr(E, Slot);
976
  });
977
}
978

979
void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
980
  CGF.EmitIgnoredExpr(E->getLHS());
981
  Visit(E->getRHS());
982
}
983

984
void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
985
  CodeGenFunction::StmtExprEvaluation eval(CGF);
986
  CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
987
}
988

989
enum CompareKind {
990
  CK_Less,
991
  CK_Greater,
992
  CK_Equal,
993
};
994

995
static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF,
996
                                const BinaryOperator *E, llvm::Value *LHS,
997
                                llvm::Value *RHS, CompareKind Kind,
998
                                const char *NameSuffix = "") {
999
  QualType ArgTy = E->getLHS()->getType();
1000
  if (const ComplexType *CT = ArgTy->getAs<ComplexType>())
1001
    ArgTy = CT->getElementType();
1002

1003
  if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) {
1004
    assert(Kind == CK_Equal &&
1005
           "member pointers may only be compared for equality");
1006
    return CGF.CGM.getCXXABI().EmitMemberPointerComparison(
1007
        CGF, LHS, RHS, MPT, /*IsInequality*/ false);
1008
  }
1009

1010
  // Compute the comparison instructions for the specified comparison kind.
1011
  struct CmpInstInfo {
1012
    const char *Name;
1013
    llvm::CmpInst::Predicate FCmp;
1014
    llvm::CmpInst::Predicate SCmp;
1015
    llvm::CmpInst::Predicate UCmp;
1016
  };
1017
  CmpInstInfo InstInfo = [&]() -> CmpInstInfo {
1018
    using FI = llvm::FCmpInst;
1019
    using II = llvm::ICmpInst;
1020
    switch (Kind) {
1021
    case CK_Less:
1022
      return {"cmp.lt", FI::FCMP_OLT, II::ICMP_SLT, II::ICMP_ULT};
1023
    case CK_Greater:
1024
      return {"cmp.gt", FI::FCMP_OGT, II::ICMP_SGT, II::ICMP_UGT};
1025
    case CK_Equal:
1026
      return {"cmp.eq", FI::FCMP_OEQ, II::ICMP_EQ, II::ICMP_EQ};
1027
    }
1028
    llvm_unreachable("Unrecognised CompareKind enum");
1029
  }();
1030

1031
  if (ArgTy->hasFloatingRepresentation())
1032
    return Builder.CreateFCmp(InstInfo.FCmp, LHS, RHS,
1033
                              llvm::Twine(InstInfo.Name) + NameSuffix);
1034
  if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()) {
1035
    auto Inst =
1036
        ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp : InstInfo.UCmp;
1037
    return Builder.CreateICmp(Inst, LHS, RHS,
1038
                              llvm::Twine(InstInfo.Name) + NameSuffix);
1039
  }
1040

1041
  llvm_unreachable("unsupported aggregate binary expression should have "
1042
                   "already been handled");
1043
}
1044

1045
void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) {
1046
  using llvm::BasicBlock;
1047
  using llvm::PHINode;
1048
  using llvm::Value;
1049
  assert(CGF.getContext().hasSameType(E->getLHS()->getType(),
1050
                                      E->getRHS()->getType()));
1051
  const ComparisonCategoryInfo &CmpInfo =
1052
      CGF.getContext().CompCategories.getInfoForType(E->getType());
1053
  assert(CmpInfo.Record->isTriviallyCopyable() &&
1054
         "cannot copy non-trivially copyable aggregate");
1055

1056
  QualType ArgTy = E->getLHS()->getType();
1057

1058
  if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() &&
1059
      !ArgTy->isNullPtrType() && !ArgTy->isPointerType() &&
1060
      !ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) {
1061
    return CGF.ErrorUnsupported(E, "aggregate three-way comparison");
1062
  }
1063
  bool IsComplex = ArgTy->isAnyComplexType();
1064

1065
  // Evaluate the operands to the expression and extract their values.
1066
  auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> {
1067
    RValue RV = CGF.EmitAnyExpr(E);
1068
    if (RV.isScalar())
1069
      return {RV.getScalarVal(), nullptr};
1070
    if (RV.isAggregate())
1071
      return {RV.getAggregatePointer(E->getType(), CGF), nullptr};
1072
    assert(RV.isComplex());
1073
    return RV.getComplexVal();
1074
  };
1075
  auto LHSValues = EmitOperand(E->getLHS()),
1076
       RHSValues = EmitOperand(E->getRHS());
1077

1078
  auto EmitCmp = [&](CompareKind K) {
1079
    Value *Cmp = EmitCompare(Builder, CGF, E, LHSValues.first, RHSValues.first,
1080
                             K, IsComplex ? ".r" : "");
1081
    if (!IsComplex)
1082
      return Cmp;
1083
    assert(K == CompareKind::CK_Equal);
1084
    Value *CmpImag = EmitCompare(Builder, CGF, E, LHSValues.second,
1085
                                 RHSValues.second, K, ".i");
1086
    return Builder.CreateAnd(Cmp, CmpImag, "and.eq");
1087
  };
1088
  auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) {
1089
    return Builder.getInt(VInfo->getIntValue());
1090
  };
1091

1092
  Value *Select;
1093
  if (ArgTy->isNullPtrType()) {
1094
    Select = EmitCmpRes(CmpInfo.getEqualOrEquiv());
1095
  } else if (!CmpInfo.isPartial()) {
1096
    Value *SelectOne =
1097
        Builder.CreateSelect(EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()),
1098
                             EmitCmpRes(CmpInfo.getGreater()), "sel.lt");
1099
    Select = Builder.CreateSelect(EmitCmp(CK_Equal),
1100
                                  EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1101
                                  SelectOne, "sel.eq");
1102
  } else {
1103
    Value *SelectEq = Builder.CreateSelect(
1104
        EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1105
        EmitCmpRes(CmpInfo.getUnordered()), "sel.eq");
1106
    Value *SelectGT = Builder.CreateSelect(EmitCmp(CK_Greater),
1107
                                           EmitCmpRes(CmpInfo.getGreater()),
1108
                                           SelectEq, "sel.gt");
1109
    Select = Builder.CreateSelect(
1110
        EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), SelectGT, "sel.lt");
1111
  }
1112
  // Create the return value in the destination slot.
1113
  EnsureDest(E->getType());
1114
  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1115

1116
  // Emit the address of the first (and only) field in the comparison category
1117
  // type, and initialize it from the constant integer value selected above.
1118
  LValue FieldLV = CGF.EmitLValueForFieldInitialization(
1119
      DestLV, *CmpInfo.Record->field_begin());
1120
  CGF.EmitStoreThroughLValue(RValue::get(Select), FieldLV, /*IsInit*/ true);
1121

1122
  // All done! The result is in the Dest slot.
1123
}
1124

1125
void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
1126
  if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
1127
    VisitPointerToDataMemberBinaryOperator(E);
1128
  else
1129
    CGF.ErrorUnsupported(E, "aggregate binary expression");
1130
}
1131

1132
void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
1133
                                                    const BinaryOperator *E) {
1134
  LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
1135
  EmitFinalDestCopy(E->getType(), LV);
1136
}
1137

1138
/// Is the value of the given expression possibly a reference to or
1139
/// into a __block variable?
1140
static bool isBlockVarRef(const Expr *E) {
1141
  // Make sure we look through parens.
1142
  E = E->IgnoreParens();
1143

1144
  // Check for a direct reference to a __block variable.
1145
  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
1146
    const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
1147
    return (var && var->hasAttr<BlocksAttr>());
1148
  }
1149

1150
  // More complicated stuff.
1151

1152
  // Binary operators.
1153
  if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
1154
    // For an assignment or pointer-to-member operation, just care
1155
    // about the LHS.
1156
    if (op->isAssignmentOp() || op->isPtrMemOp())
1157
      return isBlockVarRef(op->getLHS());
1158

1159
    // For a comma, just care about the RHS.
1160
    if (op->getOpcode() == BO_Comma)
1161
      return isBlockVarRef(op->getRHS());
1162

1163
    // FIXME: pointer arithmetic?
1164
    return false;
1165

1166
  // Check both sides of a conditional operator.
1167
  } else if (const AbstractConditionalOperator *op
1168
               = dyn_cast<AbstractConditionalOperator>(E)) {
1169
    return isBlockVarRef(op->getTrueExpr())
1170
        || isBlockVarRef(op->getFalseExpr());
1171

1172
  // OVEs are required to support BinaryConditionalOperators.
1173
  } else if (const OpaqueValueExpr *op
1174
               = dyn_cast<OpaqueValueExpr>(E)) {
1175
    if (const Expr *src = op->getSourceExpr())
1176
      return isBlockVarRef(src);
1177

1178
  // Casts are necessary to get things like (*(int*)&var) = foo().
1179
  // We don't really care about the kind of cast here, except
1180
  // we don't want to look through l2r casts, because it's okay
1181
  // to get the *value* in a __block variable.
1182
  } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
1183
    if (cast->getCastKind() == CK_LValueToRValue)
1184
      return false;
1185
    return isBlockVarRef(cast->getSubExpr());
1186

1187
  // Handle unary operators.  Again, just aggressively look through
1188
  // it, ignoring the operation.
1189
  } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
1190
    return isBlockVarRef(uop->getSubExpr());
1191

1192
  // Look into the base of a field access.
1193
  } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
1194
    return isBlockVarRef(mem->getBase());
1195

1196
  // Look into the base of a subscript.
1197
  } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
1198
    return isBlockVarRef(sub->getBase());
1199
  }
1200

1201
  return false;
1202
}
1203

1204
void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
1205
  // For an assignment to work, the value on the right has
1206
  // to be compatible with the value on the left.
1207
  assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
1208
                                                 E->getRHS()->getType())
1209
         && "Invalid assignment");
1210

1211
  // If the LHS might be a __block variable, and the RHS can
1212
  // potentially cause a block copy, we need to evaluate the RHS first
1213
  // so that the assignment goes the right place.
1214
  // This is pretty semantically fragile.
1215
  if (isBlockVarRef(E->getLHS()) &&
1216
      E->getRHS()->HasSideEffects(CGF.getContext())) {
1217
    // Ensure that we have a destination, and evaluate the RHS into that.
1218
    EnsureDest(E->getRHS()->getType());
1219
    Visit(E->getRHS());
1220

1221
    // Now emit the LHS and copy into it.
1222
    LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
1223

1224
    // That copy is an atomic copy if the LHS is atomic.
1225
    if (LHS.getType()->isAtomicType() ||
1226
        CGF.LValueIsSuitableForInlineAtomic(LHS)) {
1227
      CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1228
      return;
1229
    }
1230

1231
    EmitCopy(E->getLHS()->getType(),
1232
             AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
1233
                                     needsGC(E->getLHS()->getType()),
1234
                                     AggValueSlot::IsAliased,
1235
                                     AggValueSlot::MayOverlap),
1236
             Dest);
1237
    return;
1238
  }
1239

1240
  LValue LHS = CGF.EmitLValue(E->getLHS());
1241

1242
  // If we have an atomic type, evaluate into the destination and then
1243
  // do an atomic copy.
1244
  if (LHS.getType()->isAtomicType() ||
1245
      CGF.LValueIsSuitableForInlineAtomic(LHS)) {
1246
    EnsureDest(E->getRHS()->getType());
1247
    Visit(E->getRHS());
1248
    CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1249
    return;
1250
  }
1251

1252
  // Codegen the RHS so that it stores directly into the LHS.
1253
  AggValueSlot LHSSlot = AggValueSlot::forLValue(
1254
      LHS, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
1255
      AggValueSlot::IsAliased, AggValueSlot::MayOverlap);
1256
  // A non-volatile aggregate destination might have volatile member.
1257
  if (!LHSSlot.isVolatile() &&
1258
      CGF.hasVolatileMember(E->getLHS()->getType()))
1259
    LHSSlot.setVolatile(true);
1260

1261
  CGF.EmitAggExpr(E->getRHS(), LHSSlot);
1262

1263
  // Copy into the destination if the assignment isn't ignored.
1264
  EmitFinalDestCopy(E->getType(), LHS);
1265

1266
  if (!Dest.isIgnored() && !Dest.isExternallyDestructed() &&
1267
      E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
1268
    CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(),
1269
                    E->getType());
1270
}
1271

1272
void AggExprEmitter::
1273
VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
1274
  llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
1275
  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
1276
  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
1277

1278
  // Bind the common expression if necessary.
1279
  CodeGenFunction::OpaqueValueMapping binding(CGF, E);
1280

1281
  CodeGenFunction::ConditionalEvaluation eval(CGF);
1282
  CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
1283
                           CGF.getProfileCount(E));
1284

1285
  // Save whether the destination's lifetime is externally managed.
1286
  bool isExternallyDestructed = Dest.isExternallyDestructed();
1287
  bool destructNonTrivialCStruct =
1288
      !isExternallyDestructed &&
1289
      E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct;
1290
  isExternallyDestructed |= destructNonTrivialCStruct;
1291
  Dest.setExternallyDestructed(isExternallyDestructed);
1292

1293
  eval.begin(CGF);
1294
  CGF.EmitBlock(LHSBlock);
1295
  if (llvm::EnableSingleByteCoverage)
1296
    CGF.incrementProfileCounter(E->getTrueExpr());
1297
  else
1298
    CGF.incrementProfileCounter(E);
1299
  Visit(E->getTrueExpr());
1300
  eval.end(CGF);
1301

1302
  assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
1303
  CGF.Builder.CreateBr(ContBlock);
1304

1305
  // If the result of an agg expression is unused, then the emission
1306
  // of the LHS might need to create a destination slot.  That's fine
1307
  // with us, and we can safely emit the RHS into the same slot, but
1308
  // we shouldn't claim that it's already being destructed.
1309
  Dest.setExternallyDestructed(isExternallyDestructed);
1310

1311
  eval.begin(CGF);
1312
  CGF.EmitBlock(RHSBlock);
1313
  if (llvm::EnableSingleByteCoverage)
1314
    CGF.incrementProfileCounter(E->getFalseExpr());
1315
  Visit(E->getFalseExpr());
1316
  eval.end(CGF);
1317

1318
  if (destructNonTrivialCStruct)
1319
    CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(),
1320
                    E->getType());
1321

1322
  CGF.EmitBlock(ContBlock);
1323
  if (llvm::EnableSingleByteCoverage)
1324
    CGF.incrementProfileCounter(E);
1325
}
1326

1327
void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
1328
  Visit(CE->getChosenSubExpr());
1329
}
1330

1331
void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
1332
  Address ArgValue = Address::invalid();
1333
  CGF.EmitVAArg(VE, ArgValue, Dest);
1334

1335
  // If EmitVAArg fails, emit an error.
1336
  if (!ArgValue.isValid()) {
1337
    CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
1338
    return;
1339
  }
1340
}
1341

1342
void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
1343
  // Ensure that we have a slot, but if we already do, remember
1344
  // whether it was externally destructed.
1345
  bool wasExternallyDestructed = Dest.isExternallyDestructed();
1346
  EnsureDest(E->getType());
1347

1348
  // We're going to push a destructor if there isn't already one.
1349
  Dest.setExternallyDestructed();
1350

1351
  Visit(E->getSubExpr());
1352

1353
  // Push that destructor we promised.
1354
  if (!wasExternallyDestructed)
1355
    CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
1356
}
1357

1358
void
1359
AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1360
  AggValueSlot Slot = EnsureSlot(E->getType());
1361
  CGF.EmitCXXConstructExpr(E, Slot);
1362
}
1363

1364
void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1365
    const CXXInheritedCtorInitExpr *E) {
1366
  AggValueSlot Slot = EnsureSlot(E->getType());
1367
  CGF.EmitInheritedCXXConstructorCall(
1368
      E->getConstructor(), E->constructsVBase(), Slot.getAddress(),
1369
      E->inheritedFromVBase(), E);
1370
}
1371

1372
void
1373
AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1374
  AggValueSlot Slot = EnsureSlot(E->getType());
1375
  LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType());
1376

1377
  // We'll need to enter cleanup scopes in case any of the element
1378
  // initializers throws an exception or contains branch out of the expressions.
1379
  CodeGenFunction::CleanupDeactivationScope scope(CGF);
1380

1381
  CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1382
  for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
1383
                                               e = E->capture_init_end();
1384
       i != e; ++i, ++CurField) {
1385
    // Emit initialization
1386
    LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);
1387
    if (CurField->hasCapturedVLAType()) {
1388
      CGF.EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
1389
      continue;
1390
    }
1391

1392
    EmitInitializationToLValue(*i, LV);
1393

1394
    // Push a destructor if necessary.
1395
    if (QualType::DestructionKind DtorKind =
1396
            CurField->getType().isDestructedType()) {
1397
      assert(LV.isSimple());
1398
      if (DtorKind)
1399
        CGF.pushDestroyAndDeferDeactivation(NormalAndEHCleanup, LV.getAddress(),
1400
                                            CurField->getType(),
1401
                                            CGF.getDestroyer(DtorKind), false);
1402
    }
1403
  }
1404
}
1405

1406
void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1407
  CodeGenFunction::RunCleanupsScope cleanups(CGF);
1408
  Visit(E->getSubExpr());
1409
}
1410

1411
void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1412
  QualType T = E->getType();
1413
  AggValueSlot Slot = EnsureSlot(T);
1414
  EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1415
}
1416

1417
void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1418
  QualType T = E->getType();
1419
  AggValueSlot Slot = EnsureSlot(T);
1420
  EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1421
}
1422

1423
/// Determine whether the given cast kind is known to always convert values
1424
/// with all zero bits in their value representation to values with all zero
1425
/// bits in their value representation.
1426
static bool castPreservesZero(const CastExpr *CE) {
1427
  switch (CE->getCastKind()) {
1428
    // No-ops.
1429
  case CK_NoOp:
1430
  case CK_UserDefinedConversion:
1431
  case CK_ConstructorConversion:
1432
  case CK_BitCast:
1433
  case CK_ToUnion:
1434
  case CK_ToVoid:
1435
    // Conversions between (possibly-complex) integral, (possibly-complex)
1436
    // floating-point, and bool.
1437
  case CK_BooleanToSignedIntegral:
1438
  case CK_FloatingCast:
1439
  case CK_FloatingComplexCast:
1440
  case CK_FloatingComplexToBoolean:
1441
  case CK_FloatingComplexToIntegralComplex:
1442
  case CK_FloatingComplexToReal:
1443
  case CK_FloatingRealToComplex:
1444
  case CK_FloatingToBoolean:
1445
  case CK_FloatingToIntegral:
1446
  case CK_IntegralCast:
1447
  case CK_IntegralComplexCast:
1448
  case CK_IntegralComplexToBoolean:
1449
  case CK_IntegralComplexToFloatingComplex:
1450
  case CK_IntegralComplexToReal:
1451
  case CK_IntegralRealToComplex:
1452
  case CK_IntegralToBoolean:
1453
  case CK_IntegralToFloating:
1454
    // Reinterpreting integers as pointers and vice versa.
1455
  case CK_IntegralToPointer:
1456
  case CK_PointerToIntegral:
1457
    // Language extensions.
1458
  case CK_VectorSplat:
1459
  case CK_MatrixCast:
1460
  case CK_NonAtomicToAtomic:
1461
  case CK_AtomicToNonAtomic:
1462
  case CK_HLSLVectorTruncation:
1463
    return true;
1464

1465
  case CK_BaseToDerivedMemberPointer:
1466
  case CK_DerivedToBaseMemberPointer:
1467
  case CK_MemberPointerToBoolean:
1468
  case CK_NullToMemberPointer:
1469
  case CK_ReinterpretMemberPointer:
1470
    // FIXME: ABI-dependent.
1471
    return false;
1472

1473
  case CK_AnyPointerToBlockPointerCast:
1474
  case CK_BlockPointerToObjCPointerCast:
1475
  case CK_CPointerToObjCPointerCast:
1476
  case CK_ObjCObjectLValueCast:
1477
  case CK_IntToOCLSampler:
1478
  case CK_ZeroToOCLOpaqueType:
1479
    // FIXME: Check these.
1480
    return false;
1481

1482
  case CK_FixedPointCast:
1483
  case CK_FixedPointToBoolean:
1484
  case CK_FixedPointToFloating:
1485
  case CK_FixedPointToIntegral:
1486
  case CK_FloatingToFixedPoint:
1487
  case CK_IntegralToFixedPoint:
1488
    // FIXME: Do all fixed-point types represent zero as all 0 bits?
1489
    return false;
1490

1491
  case CK_AddressSpaceConversion:
1492
  case CK_BaseToDerived:
1493
  case CK_DerivedToBase:
1494
  case CK_Dynamic:
1495
  case CK_NullToPointer:
1496
  case CK_PointerToBoolean:
1497
    // FIXME: Preserves zeroes only if zero pointers and null pointers have the
1498
    // same representation in all involved address spaces.
1499
    return false;
1500

1501
  case CK_ARCConsumeObject:
1502
  case CK_ARCExtendBlockObject:
1503
  case CK_ARCProduceObject:
1504
  case CK_ARCReclaimReturnedObject:
1505
  case CK_CopyAndAutoreleaseBlockObject:
1506
  case CK_ArrayToPointerDecay:
1507
  case CK_FunctionToPointerDecay:
1508
  case CK_BuiltinFnToFnPtr:
1509
  case CK_Dependent:
1510
  case CK_LValueBitCast:
1511
  case CK_LValueToRValue:
1512
  case CK_LValueToRValueBitCast:
1513
  case CK_UncheckedDerivedToBase:
1514
  case CK_HLSLArrayRValue:
1515
    return false;
1516
  }
1517
  llvm_unreachable("Unhandled clang::CastKind enum");
1518
}
1519

1520
/// isSimpleZero - If emitting this value will obviously just cause a store of
1521
/// zero to memory, return true.  This can return false if uncertain, so it just
1522
/// handles simple cases.
1523
static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1524
  E = E->IgnoreParens();
1525
  while (auto *CE = dyn_cast<CastExpr>(E)) {
1526
    if (!castPreservesZero(CE))
1527
      break;
1528
    E = CE->getSubExpr()->IgnoreParens();
1529
  }
1530

1531
  // 0
1532
  if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1533
    return IL->getValue() == 0;
1534
  // +0.0
1535
  if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1536
    return FL->getValue().isPosZero();
1537
  // int()
1538
  if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
1539
      CGF.getTypes().isZeroInitializable(E->getType()))
1540
    return true;
1541
  // (int*)0 - Null pointer expressions.
1542
  if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1543
    return ICE->getCastKind() == CK_NullToPointer &&
1544
           CGF.getTypes().isPointerZeroInitializable(E->getType()) &&
1545
           !E->HasSideEffects(CGF.getContext());
1546
  // '\0'
1547
  if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1548
    return CL->getValue() == 0;
1549

1550
  // Otherwise, hard case: conservatively return false.
1551
  return false;
1552
}
1553

1554

1555
void
1556
AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1557
  QualType type = LV.getType();
1558
  // FIXME: Ignore result?
1559
  // FIXME: Are initializers affected by volatile?
1560
  if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1561
    // Storing "i32 0" to a zero'd memory location is a noop.
1562
    return;
1563
  } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
1564
    return EmitNullInitializationToLValue(LV);
1565
  } else if (isa<NoInitExpr>(E)) {
1566
    // Do nothing.
1567
    return;
1568
  } else if (type->isReferenceType()) {
1569
    RValue RV = CGF.EmitReferenceBindingToExpr(E);
1570
    return CGF.EmitStoreThroughLValue(RV, LV);
1571
  }
1572

1573
  switch (CGF.getEvaluationKind(type)) {
1574
  case TEK_Complex:
1575
    CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
1576
    return;
1577
  case TEK_Aggregate:
1578
    CGF.EmitAggExpr(
1579
        E, AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
1580
                                   AggValueSlot::DoesNotNeedGCBarriers,
1581
                                   AggValueSlot::IsNotAliased,
1582
                                   AggValueSlot::MayOverlap, Dest.isZeroed()));
1583
    return;
1584
  case TEK_Scalar:
1585
    if (LV.isSimple()) {
1586
      CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
1587
    } else {
1588
      CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
1589
    }
1590
    return;
1591
  }
1592
  llvm_unreachable("bad evaluation kind");
1593
}
1594

1595
void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1596
  QualType type = lv.getType();
1597

1598
  // If the destination slot is already zeroed out before the aggregate is
1599
  // copied into it, we don't have to emit any zeros here.
1600
  if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1601
    return;
1602

1603
  if (CGF.hasScalarEvaluationKind(type)) {
1604
    // For non-aggregates, we can store the appropriate null constant.
1605
    llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1606
    // Note that the following is not equivalent to
1607
    // EmitStoreThroughBitfieldLValue for ARC types.
1608
    if (lv.isBitField()) {
1609
      CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
1610
    } else {
1611
      assert(lv.isSimple());
1612
      CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1613
    }
1614
  } else {
1615
    // There's a potential optimization opportunity in combining
1616
    // memsets; that would be easy for arrays, but relatively
1617
    // difficult for structures with the current code.
1618
    CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
1619
  }
1620
}
1621

1622
void AggExprEmitter::VisitCXXParenListInitExpr(CXXParenListInitExpr *E) {
1623
  VisitCXXParenListOrInitListExpr(E, E->getInitExprs(),
1624
                                  E->getInitializedFieldInUnion(),
1625
                                  E->getArrayFiller());
1626
}
1627

1628
void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1629
  if (E->hadArrayRangeDesignator())
1630
    CGF.ErrorUnsupported(E, "GNU array range designator extension");
1631

1632
  if (E->isTransparent())
1633
    return Visit(E->getInit(0));
1634

1635
  VisitCXXParenListOrInitListExpr(
1636
      E, E->inits(), E->getInitializedFieldInUnion(), E->getArrayFiller());
1637
}
1638

1639
void AggExprEmitter::VisitCXXParenListOrInitListExpr(
1640
    Expr *ExprToVisit, ArrayRef<Expr *> InitExprs,
1641
    FieldDecl *InitializedFieldInUnion, Expr *ArrayFiller) {
1642
#if 0
1643
  // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
1644
  // (Length of globals? Chunks of zeroed-out space?).
1645
  //
1646
  // If we can, prefer a copy from a global; this is a lot less code for long
1647
  // globals, and it's easier for the current optimizers to analyze.
1648
  if (llvm::Constant *C =
1649
          CGF.CGM.EmitConstantExpr(ExprToVisit, ExprToVisit->getType(), &CGF)) {
1650
    llvm::GlobalVariable* GV =
1651
    new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1652
                             llvm::GlobalValue::InternalLinkage, C, "");
1653
    EmitFinalDestCopy(ExprToVisit->getType(),
1654
                      CGF.MakeAddrLValue(GV, ExprToVisit->getType()));
1655
    return;
1656
  }
1657
#endif
1658

1659
  AggValueSlot Dest = EnsureSlot(ExprToVisit->getType());
1660

1661
  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), ExprToVisit->getType());
1662

1663
  // Handle initialization of an array.
1664
  if (ExprToVisit->getType()->isConstantArrayType()) {
1665
    auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
1666
    EmitArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), ExprToVisit,
1667
                  InitExprs, ArrayFiller);
1668
    return;
1669
  } else if (ExprToVisit->getType()->isVariableArrayType()) {
1670
    // A variable array type that has an initializer can only do empty
1671
    // initialization. And because this feature is not exposed as an extension
1672
    // in C++, we can safely memset the array memory to zero.
1673
    assert(InitExprs.size() == 0 &&
1674
           "you can only use an empty initializer with VLAs");
1675
    CGF.EmitNullInitialization(Dest.getAddress(), ExprToVisit->getType());
1676
    return;
1677
  }
1678

1679
  assert(ExprToVisit->getType()->isRecordType() &&
1680
         "Only support structs/unions here!");
1681

1682
  // Do struct initialization; this code just sets each individual member
1683
  // to the approprate value.  This makes bitfield support automatic;
1684
  // the disadvantage is that the generated code is more difficult for
1685
  // the optimizer, especially with bitfields.
1686
  unsigned NumInitElements = InitExprs.size();
1687
  RecordDecl *record = ExprToVisit->getType()->castAs<RecordType>()->getDecl();
1688

1689
  // We'll need to enter cleanup scopes in case any of the element
1690
  // initializers throws an exception.
1691
  SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
1692
  CodeGenFunction::CleanupDeactivationScope DeactivateCleanups(CGF);
1693

1694
  unsigned curInitIndex = 0;
1695

1696
  // Emit initialization of base classes.
1697
  if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {
1698
    assert(NumInitElements >= CXXRD->getNumBases() &&
1699
           "missing initializer for base class");
1700
    for (auto &Base : CXXRD->bases()) {
1701
      assert(!Base.isVirtual() && "should not see vbases here");
1702
      auto *BaseRD = Base.getType()->getAsCXXRecordDecl();
1703
      Address V = CGF.GetAddressOfDirectBaseInCompleteClass(
1704
          Dest.getAddress(), CXXRD, BaseRD,
1705
          /*isBaseVirtual*/ false);
1706
      AggValueSlot AggSlot = AggValueSlot::forAddr(
1707
          V, Qualifiers(),
1708
          AggValueSlot::IsDestructed,
1709
          AggValueSlot::DoesNotNeedGCBarriers,
1710
          AggValueSlot::IsNotAliased,
1711
          CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));
1712
      CGF.EmitAggExpr(InitExprs[curInitIndex++], AggSlot);
1713

1714
      if (QualType::DestructionKind dtorKind =
1715
              Base.getType().isDestructedType())
1716
        CGF.pushDestroyAndDeferDeactivation(dtorKind, V, Base.getType());
1717
    }
1718
  }
1719

1720
  // Prepare a 'this' for CXXDefaultInitExprs.
1721
  CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
1722

1723
  if (record->isUnion()) {
1724
    // Only initialize one field of a union. The field itself is
1725
    // specified by the initializer list.
1726
    if (!InitializedFieldInUnion) {
1727
      // Empty union; we have nothing to do.
1728

1729
#ifndef NDEBUG
1730
      // Make sure that it's really an empty and not a failure of
1731
      // semantic analysis.
1732
      for (const auto *Field : record->fields())
1733
        assert(
1734
            (Field->isUnnamedBitField() || Field->isAnonymousStructOrUnion()) &&
1735
            "Only unnamed bitfields or anonymous class allowed");
1736
#endif
1737
      return;
1738
    }
1739

1740
    // FIXME: volatility
1741
    FieldDecl *Field = InitializedFieldInUnion;
1742

1743
    LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1744
    if (NumInitElements) {
1745
      // Store the initializer into the field
1746
      EmitInitializationToLValue(InitExprs[0], FieldLoc);
1747
    } else {
1748
      // Default-initialize to null.
1749
      EmitNullInitializationToLValue(FieldLoc);
1750
    }
1751

1752
    return;
1753
  }
1754

1755
  // Here we iterate over the fields; this makes it simpler to both
1756
  // default-initialize fields and skip over unnamed fields.
1757
  for (const auto *field : record->fields()) {
1758
    // We're done once we hit the flexible array member.
1759
    if (field->getType()->isIncompleteArrayType())
1760
      break;
1761

1762
    // Always skip anonymous bitfields.
1763
    if (field->isUnnamedBitField())
1764
      continue;
1765

1766
    // We're done if we reach the end of the explicit initializers, we
1767
    // have a zeroed object, and the rest of the fields are
1768
    // zero-initializable.
1769
    if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1770
        CGF.getTypes().isZeroInitializable(ExprToVisit->getType()))
1771
      break;
1772

1773

1774
    LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1775
    // We never generate write-barries for initialized fields.
1776
    LV.setNonGC(true);
1777

1778
    if (curInitIndex < NumInitElements) {
1779
      // Store the initializer into the field.
1780
      EmitInitializationToLValue(InitExprs[curInitIndex++], LV);
1781
    } else {
1782
      // We're out of initializers; default-initialize to null
1783
      EmitNullInitializationToLValue(LV);
1784
    }
1785

1786
    // Push a destructor if necessary.
1787
    // FIXME: if we have an array of structures, all explicitly
1788
    // initialized, we can end up pushing a linear number of cleanups.
1789
    if (QualType::DestructionKind dtorKind
1790
          = field->getType().isDestructedType()) {
1791
      assert(LV.isSimple());
1792
      if (dtorKind) {
1793
        CGF.pushDestroyAndDeferDeactivation(NormalAndEHCleanup, LV.getAddress(),
1794
                                            field->getType(),
1795
                                            CGF.getDestroyer(dtorKind), false);
1796
      }
1797
    }
1798
  }
1799
}
1800

1801
void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
1802
                                            llvm::Value *outerBegin) {
1803
  // Emit the common subexpression.
1804
  CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
1805

1806
  Address destPtr = EnsureSlot(E->getType()).getAddress();
1807
  uint64_t numElements = E->getArraySize().getZExtValue();
1808

1809
  if (!numElements)
1810
    return;
1811

1812
  // destPtr is an array*. Construct an elementType* by drilling down a level.
1813
  llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
1814
  llvm::Value *indices[] = {zero, zero};
1815
  llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getElementType(),
1816
                                                 destPtr.emitRawPointer(CGF),
1817
                                                 indices, "arrayinit.begin");
1818

1819
  // Prepare to special-case multidimensional array initialization: we avoid
1820
  // emitting multiple destructor loops in that case.
1821
  if (!outerBegin)
1822
    outerBegin = begin;
1823
  ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());
1824

1825
  QualType elementType =
1826
      CGF.getContext().getAsArrayType(E->getType())->getElementType();
1827
  CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1828
  CharUnits elementAlign =
1829
      destPtr.getAlignment().alignmentOfArrayElement(elementSize);
1830
  llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
1831

1832
  llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1833
  llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
1834

1835
  // Jump into the body.
1836
  CGF.EmitBlock(bodyBB);
1837
  llvm::PHINode *index =
1838
      Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
1839
  index->addIncoming(zero, entryBB);
1840
  llvm::Value *element =
1841
      Builder.CreateInBoundsGEP(llvmElementType, begin, index);
1842

1843
  // Prepare for a cleanup.
1844
  QualType::DestructionKind dtorKind = elementType.isDestructedType();
1845
  EHScopeStack::stable_iterator cleanup;
1846
  if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) {
1847
    if (outerBegin->getType() != element->getType())
1848
      outerBegin = Builder.CreateBitCast(outerBegin, element->getType());
1849
    CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,
1850
                                       elementAlign,
1851
                                       CGF.getDestroyer(dtorKind));
1852
    cleanup = CGF.EHStack.stable_begin();
1853
  } else {
1854
    dtorKind = QualType::DK_none;
1855
  }
1856

1857
  // Emit the actual filler expression.
1858
  {
1859
    // Temporaries created in an array initialization loop are destroyed
1860
    // at the end of each iteration.
1861
    CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
1862
    CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
1863
    LValue elementLV = CGF.MakeAddrLValue(
1864
        Address(element, llvmElementType, elementAlign), elementType);
1865

1866
    if (InnerLoop) {
1867
      // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
1868
      auto elementSlot = AggValueSlot::forLValue(
1869
          elementLV, AggValueSlot::IsDestructed,
1870
          AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
1871
          AggValueSlot::DoesNotOverlap);
1872
      AggExprEmitter(CGF, elementSlot, false)
1873
          .VisitArrayInitLoopExpr(InnerLoop, outerBegin);
1874
    } else
1875
      EmitInitializationToLValue(E->getSubExpr(), elementLV);
1876
  }
1877

1878
  // Move on to the next element.
1879
  llvm::Value *nextIndex = Builder.CreateNUWAdd(
1880
      index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");
1881
  index->addIncoming(nextIndex, Builder.GetInsertBlock());
1882

1883
  // Leave the loop if we're done.
1884
  llvm::Value *done = Builder.CreateICmpEQ(
1885
      nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),
1886
      "arrayinit.done");
1887
  llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
1888
  Builder.CreateCondBr(done, endBB, bodyBB);
1889

1890
  CGF.EmitBlock(endBB);
1891

1892
  // Leave the partial-array cleanup if we entered one.
1893
  if (dtorKind)
1894
    CGF.DeactivateCleanupBlock(cleanup, index);
1895
}
1896

1897
void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
1898
  AggValueSlot Dest = EnsureSlot(E->getType());
1899

1900
  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1901
  EmitInitializationToLValue(E->getBase(), DestLV);
1902
  VisitInitListExpr(E->getUpdater());
1903
}
1904

1905
//===----------------------------------------------------------------------===//
1906
//                        Entry Points into this File
1907
//===----------------------------------------------------------------------===//
1908

1909
/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1910
/// non-zero bytes that will be stored when outputting the initializer for the
1911
/// specified initializer expression.
1912
static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
1913
  if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
1914
    E = MTE->getSubExpr();
1915
  E = E->IgnoreParenNoopCasts(CGF.getContext());
1916

1917
  // 0 and 0.0 won't require any non-zero stores!
1918
  if (isSimpleZero(E, CGF)) return CharUnits::Zero();
1919

1920
  // If this is an initlist expr, sum up the size of sizes of the (present)
1921
  // elements.  If this is something weird, assume the whole thing is non-zero.
1922
  const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
1923
  while (ILE && ILE->isTransparent())
1924
    ILE = dyn_cast<InitListExpr>(ILE->getInit(0));
1925
  if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
1926
    return CGF.getContext().getTypeSizeInChars(E->getType());
1927

1928
  // InitListExprs for structs have to be handled carefully.  If there are
1929
  // reference members, we need to consider the size of the reference, not the
1930
  // referencee.  InitListExprs for unions and arrays can't have references.
1931
  if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
1932
    if (!RT->isUnionType()) {
1933
      RecordDecl *SD = RT->getDecl();
1934
      CharUnits NumNonZeroBytes = CharUnits::Zero();
1935

1936
      unsigned ILEElement = 0;
1937
      if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD))
1938
        while (ILEElement != CXXRD->getNumBases())
1939
          NumNonZeroBytes +=
1940
              GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF);
1941
      for (const auto *Field : SD->fields()) {
1942
        // We're done once we hit the flexible array member or run out of
1943
        // InitListExpr elements.
1944
        if (Field->getType()->isIncompleteArrayType() ||
1945
            ILEElement == ILE->getNumInits())
1946
          break;
1947
        if (Field->isUnnamedBitField())
1948
          continue;
1949

1950
        const Expr *E = ILE->getInit(ILEElement++);
1951

1952
        // Reference values are always non-null and have the width of a pointer.
1953
        if (Field->getType()->isReferenceType())
1954
          NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
1955
              CGF.getTarget().getPointerWidth(LangAS::Default));
1956
        else
1957
          NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
1958
      }
1959

1960
      return NumNonZeroBytes;
1961
    }
1962
  }
1963

1964
  // FIXME: This overestimates the number of non-zero bytes for bit-fields.
1965
  CharUnits NumNonZeroBytes = CharUnits::Zero();
1966
  for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1967
    NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
1968
  return NumNonZeroBytes;
1969
}
1970

1971
/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1972
/// zeros in it, emit a memset and avoid storing the individual zeros.
1973
///
1974
static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
1975
                                     CodeGenFunction &CGF) {
1976
  // If the slot is already known to be zeroed, nothing to do.  Don't mess with
1977
  // volatile stores.
1978
  if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
1979
    return;
1980

1981
  // C++ objects with a user-declared constructor don't need zero'ing.
1982
  if (CGF.getLangOpts().CPlusPlus)
1983
    if (const RecordType *RT = CGF.getContext()
1984
                       .getBaseElementType(E->getType())->getAs<RecordType>()) {
1985
      const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1986
      if (RD->hasUserDeclaredConstructor())
1987
        return;
1988
    }
1989

1990
  // If the type is 16-bytes or smaller, prefer individual stores over memset.
1991
  CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType());
1992
  if (Size <= CharUnits::fromQuantity(16))
1993
    return;
1994

1995
  // Check to see if over 3/4 of the initializer are known to be zero.  If so,
1996
  // we prefer to emit memset + individual stores for the rest.
1997
  CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
1998
  if (NumNonZeroBytes*4 > Size)
1999
    return;
2000

2001
  // Okay, it seems like a good idea to use an initial memset, emit the call.
2002
  llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
2003

2004
  Address Loc = Slot.getAddress().withElementType(CGF.Int8Ty);
2005
  CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
2006

2007
  // Tell the AggExprEmitter that the slot is known zero.
2008
  Slot.setZeroed();
2009
}
2010

2011

2012

2013

2014
/// EmitAggExpr - Emit the computation of the specified expression of aggregate
2015
/// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
2016
/// the value of the aggregate expression is not needed.  If VolatileDest is
2017
/// true, DestPtr cannot be 0.
2018
void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
2019
  assert(E && hasAggregateEvaluationKind(E->getType()) &&
2020
         "Invalid aggregate expression to emit");
2021
  assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
2022
         "slot has bits but no address");
2023

2024
  // Optimize the slot if possible.
2025
  CheckAggExprForMemSetUse(Slot, E, *this);
2026

2027
  AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
2028
}
2029

2030
LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
2031
  assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
2032
  Address Temp = CreateMemTemp(E->getType());
2033
  LValue LV = MakeAddrLValue(Temp, E->getType());
2034
  EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
2035
                                         AggValueSlot::DoesNotNeedGCBarriers,
2036
                                         AggValueSlot::IsNotAliased,
2037
                                         AggValueSlot::DoesNotOverlap));
2038
  return LV;
2039
}
2040

2041
void CodeGenFunction::EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest,
2042
                                           const LValue &Src,
2043
                                           ExprValueKind SrcKind) {
2044
  return AggExprEmitter(*this, Dest, Dest.isIgnored())
2045
      .EmitFinalDestCopy(Type, Src, SrcKind);
2046
}
2047

2048
AggValueSlot::Overlap_t
2049
CodeGenFunction::getOverlapForFieldInit(const FieldDecl *FD) {
2050
  if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType())
2051
    return AggValueSlot::DoesNotOverlap;
2052

2053
  // If the field lies entirely within the enclosing class's nvsize, its tail
2054
  // padding cannot overlap any already-initialized object. (The only subobjects
2055
  // with greater addresses that might already be initialized are vbases.)
2056
  const RecordDecl *ClassRD = FD->getParent();
2057
  const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD);
2058
  if (Layout.getFieldOffset(FD->getFieldIndex()) +
2059
          getContext().getTypeSize(FD->getType()) <=
2060
      (uint64_t)getContext().toBits(Layout.getNonVirtualSize()))
2061
    return AggValueSlot::DoesNotOverlap;
2062

2063
  // The tail padding may contain values we need to preserve.
2064
  return AggValueSlot::MayOverlap;
2065
}
2066

2067
AggValueSlot::Overlap_t CodeGenFunction::getOverlapForBaseInit(
2068
    const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) {
2069
  // If the most-derived object is a field declared with [[no_unique_address]],
2070
  // the tail padding of any virtual base could be reused for other subobjects
2071
  // of that field's class.
2072
  if (IsVirtual)
2073
    return AggValueSlot::MayOverlap;
2074

2075
  // If the base class is laid out entirely within the nvsize of the derived
2076
  // class, its tail padding cannot yet be initialized, so we can issue
2077
  // stores at the full width of the base class.
2078
  const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2079
  if (Layout.getBaseClassOffset(BaseRD) +
2080
          getContext().getASTRecordLayout(BaseRD).getSize() <=
2081
      Layout.getNonVirtualSize())
2082
    return AggValueSlot::DoesNotOverlap;
2083

2084
  // The tail padding may contain values we need to preserve.
2085
  return AggValueSlot::MayOverlap;
2086
}
2087

2088
void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
2089
                                        AggValueSlot::Overlap_t MayOverlap,
2090
                                        bool isVolatile) {
2091
  assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
2092

2093
  Address DestPtr = Dest.getAddress();
2094
  Address SrcPtr = Src.getAddress();
2095

2096
  if (getLangOpts().CPlusPlus) {
2097
    if (const RecordType *RT = Ty->getAs<RecordType>()) {
2098
      CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
2099
      assert((Record->hasTrivialCopyConstructor() ||
2100
              Record->hasTrivialCopyAssignment() ||
2101
              Record->hasTrivialMoveConstructor() ||
2102
              Record->hasTrivialMoveAssignment() ||
2103
              Record->hasAttr<TrivialABIAttr>() || Record->isUnion()) &&
2104
             "Trying to aggregate-copy a type without a trivial copy/move "
2105
             "constructor or assignment operator");
2106
      // Ignore empty classes in C++.
2107
      if (Record->isEmpty())
2108
        return;
2109
    }
2110
  }
2111

2112
  if (getLangOpts().CUDAIsDevice) {
2113
    if (Ty->isCUDADeviceBuiltinSurfaceType()) {
2114
      if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(*this, Dest,
2115
                                                                  Src))
2116
        return;
2117
    } else if (Ty->isCUDADeviceBuiltinTextureType()) {
2118
      if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(*this, Dest,
2119
                                                                  Src))
2120
        return;
2121
    }
2122
  }
2123

2124
  // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
2125
  // C99 6.5.16.1p3, which states "If the value being stored in an object is
2126
  // read from another object that overlaps in anyway the storage of the first
2127
  // object, then the overlap shall be exact and the two objects shall have
2128
  // qualified or unqualified versions of a compatible type."
2129
  //
2130
  // memcpy is not defined if the source and destination pointers are exactly
2131
  // equal, but other compilers do this optimization, and almost every memcpy
2132
  // implementation handles this case safely.  If there is a libc that does not
2133
  // safely handle this, we can add a target hook.
2134

2135
  // Get data size info for this aggregate. Don't copy the tail padding if this
2136
  // might be a potentially-overlapping subobject, since the tail padding might
2137
  // be occupied by a different object. Otherwise, copying it is fine.
2138
  TypeInfoChars TypeInfo;
2139
  if (MayOverlap)
2140
    TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
2141
  else
2142
    TypeInfo = getContext().getTypeInfoInChars(Ty);
2143

2144
  llvm::Value *SizeVal = nullptr;
2145
  if (TypeInfo.Width.isZero()) {
2146
    // But note that getTypeInfo returns 0 for a VLA.
2147
    if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
2148
            getContext().getAsArrayType(Ty))) {
2149
      QualType BaseEltTy;
2150
      SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
2151
      TypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
2152
      assert(!TypeInfo.Width.isZero());
2153
      SizeVal = Builder.CreateNUWMul(
2154
          SizeVal,
2155
          llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity()));
2156
    }
2157
  }
2158
  if (!SizeVal) {
2159
    SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity());
2160
  }
2161

2162
  // FIXME: If we have a volatile struct, the optimizer can remove what might
2163
  // appear to be `extra' memory ops:
2164
  //
2165
  // volatile struct { int i; } a, b;
2166
  //
2167
  // int main() {
2168
  //   a = b;
2169
  //   a = b;
2170
  // }
2171
  //
2172
  // we need to use a different call here.  We use isVolatile to indicate when
2173
  // either the source or the destination is volatile.
2174

2175
  DestPtr = DestPtr.withElementType(Int8Ty);
2176
  SrcPtr = SrcPtr.withElementType(Int8Ty);
2177

2178
  // Don't do any of the memmove_collectable tests if GC isn't set.
2179
  if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
2180
    // fall through
2181
  } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
2182
    RecordDecl *Record = RecordTy->getDecl();
2183
    if (Record->hasObjectMember()) {
2184
      CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2185
                                                    SizeVal);
2186
      return;
2187
    }
2188
  } else if (Ty->isArrayType()) {
2189
    QualType BaseType = getContext().getBaseElementType(Ty);
2190
    if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
2191
      if (RecordTy->getDecl()->hasObjectMember()) {
2192
        CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
2193
                                                      SizeVal);
2194
        return;
2195
      }
2196
    }
2197
  }
2198

2199
  auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
2200

2201
  // Determine the metadata to describe the position of any padding in this
2202
  // memcpy, as well as the TBAA tags for the members of the struct, in case
2203
  // the optimizer wishes to expand it in to scalar memory operations.
2204
  if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
2205
    Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
2206

2207
  if (CGM.getCodeGenOpts().NewStructPathTBAA) {
2208
    TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer(
2209
        Dest.getTBAAInfo(), Src.getTBAAInfo());
2210
    CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo);
2211
  }
2212
}
2213

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.