1 | //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This contains code to emit Aggregate Expr nodes as LLVM code. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "CGCXXABI.h" |
14 | #include "CGDebugInfo.h" |
15 | #include "CGHLSLRuntime.h" |
16 | #include "CGObjCRuntime.h" |
17 | #include "CGRecordLayout.h" |
18 | #include "CodeGenFunction.h" |
19 | #include "CodeGenModule.h" |
20 | #include "ConstantEmitter.h" |
21 | #include "EHScopeStack.h" |
22 | #include "TargetInfo.h" |
23 | #include "clang/AST/ASTContext.h" |
24 | #include "clang/AST/Attr.h" |
25 | #include "clang/AST/DeclCXX.h" |
26 | #include "clang/AST/DeclTemplate.h" |
27 | #include "clang/AST/StmtVisitor.h" |
28 | #include "llvm/IR/Constants.h" |
29 | #include "llvm/IR/Function.h" |
30 | #include "llvm/IR/GlobalVariable.h" |
31 | #include "llvm/IR/Instruction.h" |
32 | #include "llvm/IR/IntrinsicInst.h" |
33 | #include "llvm/IR/Intrinsics.h" |
34 | using namespace clang; |
35 | using namespace CodeGen; |
36 | |
37 | //===----------------------------------------------------------------------===// |
38 | // Aggregate Expression Emitter |
39 | //===----------------------------------------------------------------------===// |
40 | |
41 | namespace llvm { |
42 | extern cl::opt<bool> EnableSingleByteCoverage; |
43 | } // namespace llvm |
44 | |
45 | namespace { |
46 | class AggExprEmitter : public StmtVisitor<AggExprEmitter> { |
47 | CodeGenFunction &CGF; |
48 | CGBuilderTy &Builder; |
49 | AggValueSlot Dest; |
50 | bool IsResultUnused; |
51 | |
52 | AggValueSlot EnsureSlot(QualType T) { |
53 | if (!Dest.isIgnored()) return Dest; |
54 | return CGF.CreateAggTemp(T, Name: "agg.tmp.ensured" ); |
55 | } |
56 | void EnsureDest(QualType T) { |
57 | if (!Dest.isIgnored()) return; |
58 | Dest = CGF.CreateAggTemp(T, Name: "agg.tmp.ensured" ); |
59 | } |
60 | |
61 | // Calls `Fn` with a valid return value slot, potentially creating a temporary |
62 | // to do so. If a temporary is created, an appropriate copy into `Dest` will |
63 | // be emitted, as will lifetime markers. |
64 | // |
65 | // The given function should take a ReturnValueSlot, and return an RValue that |
66 | // points to said slot. |
67 | void withReturnValueSlot(const Expr *E, |
68 | llvm::function_ref<RValue(ReturnValueSlot)> Fn); |
69 | |
70 | void DoZeroInitPadding(uint64_t &PaddingStart, uint64_t PaddingEnd, |
71 | const FieldDecl *NextField); |
72 | |
73 | public: |
74 | AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused) |
75 | : CGF(cgf), Builder(CGF.Builder), Dest(Dest), |
76 | IsResultUnused(IsResultUnused) { } |
77 | |
78 | //===--------------------------------------------------------------------===// |
79 | // Utilities |
80 | //===--------------------------------------------------------------------===// |
81 | |
82 | /// EmitAggLoadOfLValue - Given an expression with aggregate type that |
83 | /// represents a value lvalue, this method emits the address of the lvalue, |
84 | /// then loads the result into DestPtr. |
85 | void EmitAggLoadOfLValue(const Expr *E); |
86 | |
87 | /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. |
88 | /// SrcIsRValue is true if source comes from an RValue. |
89 | void EmitFinalDestCopy(QualType type, const LValue &src, |
90 | CodeGenFunction::ExprValueKind SrcValueKind = |
91 | CodeGenFunction::EVK_NonRValue); |
92 | void EmitFinalDestCopy(QualType type, RValue src); |
93 | void EmitCopy(QualType type, const AggValueSlot &dest, |
94 | const AggValueSlot &src); |
95 | |
96 | void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, QualType ArrayQTy, |
97 | Expr *ExprToVisit, ArrayRef<Expr *> Args, |
98 | Expr *ArrayFiller); |
99 | |
100 | AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) { |
101 | if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T)) |
102 | return AggValueSlot::NeedsGCBarriers; |
103 | return AggValueSlot::DoesNotNeedGCBarriers; |
104 | } |
105 | |
106 | bool TypeRequiresGCollection(QualType T); |
107 | |
108 | //===--------------------------------------------------------------------===// |
109 | // Visitor Methods |
110 | //===--------------------------------------------------------------------===// |
111 | |
112 | void Visit(Expr *E) { |
113 | ApplyDebugLocation DL(CGF, E); |
114 | StmtVisitor<AggExprEmitter>::Visit(S: E); |
115 | } |
116 | |
117 | void VisitStmt(Stmt *S) { |
118 | CGF.ErrorUnsupported(S, Type: "aggregate expression" ); |
119 | } |
120 | void VisitParenExpr(ParenExpr *PE) { Visit(E: PE->getSubExpr()); } |
121 | void VisitGenericSelectionExpr(GenericSelectionExpr *GE) { |
122 | Visit(E: GE->getResultExpr()); |
123 | } |
124 | void VisitCoawaitExpr(CoawaitExpr *E) { |
125 | CGF.EmitCoawaitExpr(E: *E, aggSlot: Dest, ignoreResult: IsResultUnused); |
126 | } |
127 | void VisitCoyieldExpr(CoyieldExpr *E) { |
128 | CGF.EmitCoyieldExpr(E: *E, aggSlot: Dest, ignoreResult: IsResultUnused); |
129 | } |
130 | void VisitUnaryCoawait(UnaryOperator *E) { Visit(E: E->getSubExpr()); } |
131 | void VisitUnaryExtension(UnaryOperator *E) { Visit(E: E->getSubExpr()); } |
132 | void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { |
133 | return Visit(E: E->getReplacement()); |
134 | } |
135 | |
136 | void VisitConstantExpr(ConstantExpr *E) { |
137 | EnsureDest(T: E->getType()); |
138 | |
139 | if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(CE: E)) { |
140 | CGF.CreateCoercedStore( |
141 | Src: Result, Dst: Dest.getAddress(), |
142 | DstSize: llvm::TypeSize::getFixed( |
143 | ExactSize: Dest.getPreferredSize(Ctx&: CGF.getContext(), Type: E->getType()) |
144 | .getQuantity()), |
145 | DstIsVolatile: E->getType().isVolatileQualified()); |
146 | return; |
147 | } |
148 | return Visit(E: E->getSubExpr()); |
149 | } |
150 | |
151 | // l-values. |
152 | void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); } |
153 | void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(E: ME); } |
154 | void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); } |
155 | void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); } |
156 | void VisitCompoundLiteralExpr(CompoundLiteralExpr *E); |
157 | void VisitArraySubscriptExpr(ArraySubscriptExpr *E) { |
158 | EmitAggLoadOfLValue(E); |
159 | } |
160 | void VisitPredefinedExpr(const PredefinedExpr *E) { |
161 | EmitAggLoadOfLValue(E); |
162 | } |
163 | |
164 | // Operators. |
165 | void VisitCastExpr(CastExpr *E); |
166 | void VisitCallExpr(const CallExpr *E); |
167 | void VisitStmtExpr(const StmtExpr *E); |
168 | void VisitBinaryOperator(const BinaryOperator *BO); |
169 | void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO); |
170 | void VisitBinAssign(const BinaryOperator *E); |
171 | void VisitBinComma(const BinaryOperator *E); |
172 | void VisitBinCmp(const BinaryOperator *E); |
173 | void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { |
174 | Visit(E: E->getSemanticForm()); |
175 | } |
176 | |
177 | void VisitObjCMessageExpr(ObjCMessageExpr *E); |
178 | void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { |
179 | EmitAggLoadOfLValue(E); |
180 | } |
181 | |
182 | void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E); |
183 | void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO); |
184 | void VisitChooseExpr(const ChooseExpr *CE); |
185 | void VisitInitListExpr(InitListExpr *E); |
186 | void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args, |
187 | FieldDecl *InitializedFieldInUnion, |
188 | Expr *ArrayFiller); |
189 | void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E, |
190 | llvm::Value *outerBegin = nullptr); |
191 | void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E); |
192 | void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing. |
193 | void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { |
194 | CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE); |
195 | Visit(E: DAE->getExpr()); |
196 | } |
197 | void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { |
198 | CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE); |
199 | Visit(E: DIE->getExpr()); |
200 | } |
201 | void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); |
202 | void VisitCXXConstructExpr(const CXXConstructExpr *E); |
203 | void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E); |
204 | void VisitLambdaExpr(LambdaExpr *E); |
205 | void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E); |
206 | void VisitExprWithCleanups(ExprWithCleanups *E); |
207 | void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E); |
208 | void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); } |
209 | void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E); |
210 | void VisitOpaqueValueExpr(OpaqueValueExpr *E); |
211 | |
212 | void VisitPseudoObjectExpr(PseudoObjectExpr *E) { |
213 | if (E->isGLValue()) { |
214 | LValue LV = CGF.EmitPseudoObjectLValue(e: E); |
215 | return EmitFinalDestCopy(type: E->getType(), src: LV); |
216 | } |
217 | |
218 | AggValueSlot Slot = EnsureSlot(T: E->getType()); |
219 | bool NeedsDestruction = |
220 | !Slot.isExternallyDestructed() && |
221 | E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct; |
222 | if (NeedsDestruction) |
223 | Slot.setExternallyDestructed(); |
224 | CGF.EmitPseudoObjectRValue(e: E, slot: Slot); |
225 | if (NeedsDestruction) |
226 | CGF.pushDestroy(dtorKind: QualType::DK_nontrivial_c_struct, addr: Slot.getAddress(), |
227 | type: E->getType()); |
228 | } |
229 | |
230 | void VisitVAArgExpr(VAArgExpr *E); |
231 | void VisitCXXParenListInitExpr(CXXParenListInitExpr *E); |
232 | void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args, |
233 | Expr *ArrayFiller); |
234 | |
235 | void EmitInitializationToLValue(Expr *E, LValue Address); |
236 | void EmitNullInitializationToLValue(LValue Address); |
237 | // case Expr::ChooseExprClass: |
238 | void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); } |
239 | void VisitAtomicExpr(AtomicExpr *E) { |
240 | RValue Res = CGF.EmitAtomicExpr(E); |
241 | EmitFinalDestCopy(type: E->getType(), src: Res); |
242 | } |
243 | void VisitPackIndexingExpr(PackIndexingExpr *E) { |
244 | Visit(E: E->getSelectedExpr()); |
245 | } |
246 | }; |
247 | } // end anonymous namespace. |
248 | |
249 | //===----------------------------------------------------------------------===// |
250 | // Utilities |
251 | //===----------------------------------------------------------------------===// |
252 | |
253 | /// EmitAggLoadOfLValue - Given an expression with aggregate type that |
254 | /// represents a value lvalue, this method emits the address of the lvalue, |
255 | /// then loads the result into DestPtr. |
256 | void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) { |
257 | LValue LV = CGF.EmitLValue(E); |
258 | |
259 | // If the type of the l-value is atomic, then do an atomic load. |
260 | if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(Src: LV)) { |
261 | CGF.EmitAtomicLoad(LV, SL: E->getExprLoc(), Slot: Dest); |
262 | return; |
263 | } |
264 | |
265 | EmitFinalDestCopy(type: E->getType(), src: LV); |
266 | } |
267 | |
268 | /// True if the given aggregate type requires special GC API calls. |
269 | bool AggExprEmitter::TypeRequiresGCollection(QualType T) { |
270 | // Only record types have members that might require garbage collection. |
271 | const RecordType *RecordTy = T->getAs<RecordType>(); |
272 | if (!RecordTy) return false; |
273 | |
274 | // Don't mess with non-trivial C++ types. |
275 | RecordDecl *Record = RecordTy->getDecl(); |
276 | if (isa<CXXRecordDecl>(Val: Record) && |
277 | (cast<CXXRecordDecl>(Val: Record)->hasNonTrivialCopyConstructor() || |
278 | !cast<CXXRecordDecl>(Val: Record)->hasTrivialDestructor())) |
279 | return false; |
280 | |
281 | // Check whether the type has an object member. |
282 | return Record->hasObjectMember(); |
283 | } |
284 | |
285 | void AggExprEmitter::withReturnValueSlot( |
286 | const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) { |
287 | QualType RetTy = E->getType(); |
288 | bool RequiresDestruction = |
289 | !Dest.isExternallyDestructed() && |
290 | RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct; |
291 | |
292 | // If it makes no observable difference, save a memcpy + temporary. |
293 | // |
294 | // We need to always provide our own temporary if destruction is required. |
295 | // Otherwise, EmitCall will emit its own, notice that it's "unused", and end |
296 | // its lifetime before we have the chance to emit a proper destructor call. |
297 | bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() || |
298 | (RequiresDestruction && Dest.isIgnored()); |
299 | |
300 | Address RetAddr = Address::invalid(); |
301 | |
302 | EHScopeStack::stable_iterator LifetimeEndBlock; |
303 | llvm::Value *LifetimeSizePtr = nullptr; |
304 | llvm::IntrinsicInst *LifetimeStartInst = nullptr; |
305 | if (!UseTemp) { |
306 | RetAddr = Dest.getAddress(); |
307 | } else { |
308 | RetAddr = CGF.CreateMemTempWithoutCast(T: RetTy, Name: "tmp" ); |
309 | llvm::TypeSize Size = |
310 | CGF.CGM.getDataLayout().getTypeAllocSize(Ty: CGF.ConvertTypeForMem(T: RetTy)); |
311 | LifetimeSizePtr = CGF.EmitLifetimeStart(Size, Addr: RetAddr.getBasePointer()); |
312 | if (LifetimeSizePtr) { |
313 | LifetimeStartInst = |
314 | cast<llvm::IntrinsicInst>(Val: std::prev(x: Builder.GetInsertPoint())); |
315 | assert(LifetimeStartInst->getIntrinsicID() == |
316 | llvm::Intrinsic::lifetime_start && |
317 | "Last insertion wasn't a lifetime.start?" ); |
318 | |
319 | CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>( |
320 | kind: NormalEHLifetimeMarker, A: RetAddr, A: LifetimeSizePtr); |
321 | LifetimeEndBlock = CGF.EHStack.stable_begin(); |
322 | } |
323 | } |
324 | |
325 | RValue Src = |
326 | EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused, |
327 | Dest.isExternallyDestructed())); |
328 | |
329 | if (!UseTemp) |
330 | return; |
331 | |
332 | assert(Dest.isIgnored() || Dest.emitRawPointer(CGF) != |
333 | Src.getAggregatePointer(E->getType(), CGF)); |
334 | EmitFinalDestCopy(type: E->getType(), src: Src); |
335 | |
336 | if (!RequiresDestruction && LifetimeStartInst) { |
337 | // If there's no dtor to run, the copy was the last use of our temporary. |
338 | // Since we're not guaranteed to be in an ExprWithCleanups, clean up |
339 | // eagerly. |
340 | CGF.DeactivateCleanupBlock(Cleanup: LifetimeEndBlock, DominatingIP: LifetimeStartInst); |
341 | CGF.EmitLifetimeEnd(Size: LifetimeSizePtr, Addr: RetAddr.getBasePointer()); |
342 | } |
343 | } |
344 | |
345 | /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. |
346 | void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) { |
347 | assert(src.isAggregate() && "value must be aggregate value!" ); |
348 | LValue srcLV = CGF.MakeAddrLValue(Addr: src.getAggregateAddress(), T: type); |
349 | EmitFinalDestCopy(type, src: srcLV, SrcValueKind: CodeGenFunction::EVK_RValue); |
350 | } |
351 | |
352 | /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. |
353 | void AggExprEmitter::EmitFinalDestCopy( |
354 | QualType type, const LValue &src, |
355 | CodeGenFunction::ExprValueKind SrcValueKind) { |
356 | // If Dest is ignored, then we're evaluating an aggregate expression |
357 | // in a context that doesn't care about the result. Note that loads |
358 | // from volatile l-values force the existence of a non-ignored |
359 | // destination. |
360 | if (Dest.isIgnored()) |
361 | return; |
362 | |
363 | // Copy non-trivial C structs here. |
364 | LValue DstLV = CGF.MakeAddrLValue( |
365 | Addr: Dest.getAddress(), T: Dest.isVolatile() ? type.withVolatile() : type); |
366 | |
367 | if (SrcValueKind == CodeGenFunction::EVK_RValue) { |
368 | if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) { |
369 | if (Dest.isPotentiallyAliased()) |
370 | CGF.callCStructMoveAssignmentOperator(Dst: DstLV, Src: src); |
371 | else |
372 | CGF.callCStructMoveConstructor(Dst: DstLV, Src: src); |
373 | return; |
374 | } |
375 | } else { |
376 | if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { |
377 | if (Dest.isPotentiallyAliased()) |
378 | CGF.callCStructCopyAssignmentOperator(Dst: DstLV, Src: src); |
379 | else |
380 | CGF.callCStructCopyConstructor(Dst: DstLV, Src: src); |
381 | return; |
382 | } |
383 | } |
384 | |
385 | AggValueSlot srcAgg = AggValueSlot::forLValue( |
386 | LV: src, isDestructed: AggValueSlot::IsDestructed, needsGC: needsGC(T: type), isAliased: AggValueSlot::IsAliased, |
387 | mayOverlap: AggValueSlot::MayOverlap); |
388 | EmitCopy(type, dest: Dest, src: srcAgg); |
389 | } |
390 | |
391 | /// Perform a copy from the source into the destination. |
392 | /// |
393 | /// \param type - the type of the aggregate being copied; qualifiers are |
394 | /// ignored |
395 | void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest, |
396 | const AggValueSlot &src) { |
397 | if (dest.requiresGCollection()) { |
398 | CharUnits sz = dest.getPreferredSize(Ctx&: CGF.getContext(), Type: type); |
399 | llvm::Value *size = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: sz.getQuantity()); |
400 | CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, |
401 | DestPtr: dest.getAddress(), |
402 | SrcPtr: src.getAddress(), |
403 | Size: size); |
404 | return; |
405 | } |
406 | |
407 | // If the result of the assignment is used, copy the LHS there also. |
408 | // It's volatile if either side is. Use the minimum alignment of |
409 | // the two sides. |
410 | LValue DestLV = CGF.MakeAddrLValue(Addr: dest.getAddress(), T: type); |
411 | LValue SrcLV = CGF.MakeAddrLValue(Addr: src.getAddress(), T: type); |
412 | CGF.EmitAggregateCopy(Dest: DestLV, Src: SrcLV, EltTy: type, MayOverlap: dest.mayOverlap(), |
413 | isVolatile: dest.isVolatile() || src.isVolatile()); |
414 | } |
415 | |
416 | /// Emit the initializer for a std::initializer_list initialized with a |
417 | /// real initializer list. |
418 | void |
419 | AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { |
420 | // Emit an array containing the elements. The array is externally destructed |
421 | // if the std::initializer_list object is. |
422 | ASTContext &Ctx = CGF.getContext(); |
423 | LValue Array = CGF.EmitLValue(E: E->getSubExpr()); |
424 | assert(Array.isSimple() && "initializer_list array not a simple lvalue" ); |
425 | Address ArrayPtr = Array.getAddress(); |
426 | |
427 | const ConstantArrayType *ArrayType = |
428 | Ctx.getAsConstantArrayType(T: E->getSubExpr()->getType()); |
429 | assert(ArrayType && "std::initializer_list constructed from non-array" ); |
430 | |
431 | RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl(); |
432 | RecordDecl::field_iterator Field = Record->field_begin(); |
433 | assert(Field != Record->field_end() && |
434 | Ctx.hasSameType(Field->getType()->getPointeeType(), |
435 | ArrayType->getElementType()) && |
436 | "Expected std::initializer_list first field to be const E *" ); |
437 | |
438 | // Start pointer. |
439 | AggValueSlot Dest = EnsureSlot(T: E->getType()); |
440 | LValue DestLV = CGF.MakeAddrLValue(Addr: Dest.getAddress(), T: E->getType()); |
441 | LValue Start = CGF.EmitLValueForFieldInitialization(Base: DestLV, Field: *Field); |
442 | llvm::Value *ArrayStart = ArrayPtr.emitRawPointer(CGF); |
443 | CGF.EmitStoreThroughLValue(Src: RValue::get(V: ArrayStart), Dst: Start); |
444 | ++Field; |
445 | assert(Field != Record->field_end() && |
446 | "Expected std::initializer_list to have two fields" ); |
447 | |
448 | llvm::Value *Size = Builder.getInt(AI: ArrayType->getSize()); |
449 | LValue EndOrLength = CGF.EmitLValueForFieldInitialization(Base: DestLV, Field: *Field); |
450 | if (Ctx.hasSameType(T1: Field->getType(), T2: Ctx.getSizeType())) { |
451 | // Length. |
452 | CGF.EmitStoreThroughLValue(Src: RValue::get(V: Size), Dst: EndOrLength); |
453 | |
454 | } else { |
455 | // End pointer. |
456 | assert(Field->getType()->isPointerType() && |
457 | Ctx.hasSameType(Field->getType()->getPointeeType(), |
458 | ArrayType->getElementType()) && |
459 | "Expected std::initializer_list second field to be const E *" ); |
460 | llvm::Value *Zero = llvm::ConstantInt::get(Ty: CGF.PtrDiffTy, V: 0); |
461 | llvm::Value *IdxEnd[] = { Zero, Size }; |
462 | llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP( |
463 | Ty: ArrayPtr.getElementType(), Ptr: ArrayPtr.emitRawPointer(CGF), IdxList: IdxEnd, |
464 | Name: "arrayend" ); |
465 | CGF.EmitStoreThroughLValue(Src: RValue::get(V: ArrayEnd), Dst: EndOrLength); |
466 | } |
467 | |
468 | assert(++Field == Record->field_end() && |
469 | "Expected std::initializer_list to only have two fields" ); |
470 | } |
471 | |
472 | /// Determine if E is a trivial array filler, that is, one that is |
473 | /// equivalent to zero-initialization. |
474 | static bool isTrivialFiller(Expr *E) { |
475 | if (!E) |
476 | return true; |
477 | |
478 | if (isa<ImplicitValueInitExpr>(Val: E)) |
479 | return true; |
480 | |
481 | if (auto *ILE = dyn_cast<InitListExpr>(Val: E)) { |
482 | if (ILE->getNumInits()) |
483 | return false; |
484 | return isTrivialFiller(E: ILE->getArrayFiller()); |
485 | } |
486 | |
487 | if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(Val: E)) |
488 | return Cons->getConstructor()->isDefaultConstructor() && |
489 | Cons->getConstructor()->isTrivial(); |
490 | |
491 | // FIXME: Are there other cases where we can avoid emitting an initializer? |
492 | return false; |
493 | } |
494 | |
495 | static void EmitHLSLAggregateSplatCast(CodeGenFunction &CGF, Address DestVal, |
496 | QualType DestTy, llvm::Value *SrcVal, |
497 | QualType SrcTy, SourceLocation Loc) { |
498 | // Flatten our destination |
499 | SmallVector<QualType> DestTypes; // Flattened type |
500 | SmallVector<std::pair<Address, llvm::Value *>, 16> StoreGEPList; |
501 | // ^^ Flattened accesses to DestVal we want to store into |
502 | CGF.FlattenAccessAndType(Addr: DestVal, AddrTy: DestTy, AccessList&: StoreGEPList, FlatTypes&: DestTypes); |
503 | |
504 | assert(SrcTy->isScalarType() && "Invalid HLSL Aggregate splat cast." ); |
505 | for (unsigned I = 0, Size = StoreGEPList.size(); I < Size; ++I) { |
506 | llvm::Value *Cast = |
507 | CGF.EmitScalarConversion(Src: SrcVal, SrcTy, DstTy: DestTypes[I], Loc); |
508 | |
509 | // store back |
510 | llvm::Value *Idx = StoreGEPList[I].second; |
511 | if (Idx) { |
512 | llvm::Value *V = |
513 | CGF.Builder.CreateLoad(Addr: StoreGEPList[I].first, Name: "load.for.insert" ); |
514 | Cast = CGF.Builder.CreateInsertElement(Vec: V, NewElt: Cast, Idx); |
515 | } |
516 | CGF.Builder.CreateStore(Val: Cast, Addr: StoreGEPList[I].first); |
517 | } |
518 | } |
519 | |
520 | // emit a flat cast where the RHS is a scalar, including vector |
521 | static void EmitHLSLScalarFlatCast(CodeGenFunction &CGF, Address DestVal, |
522 | QualType DestTy, llvm::Value *SrcVal, |
523 | QualType SrcTy, SourceLocation Loc) { |
524 | // Flatten our destination |
525 | SmallVector<QualType, 16> DestTypes; // Flattened type |
526 | SmallVector<std::pair<Address, llvm::Value *>, 16> StoreGEPList; |
527 | // ^^ Flattened accesses to DestVal we want to store into |
528 | CGF.FlattenAccessAndType(Addr: DestVal, AddrTy: DestTy, AccessList&: StoreGEPList, FlatTypes&: DestTypes); |
529 | |
530 | assert(SrcTy->isVectorType() && "HLSL Flat cast doesn't handle splatting." ); |
531 | const VectorType *VT = SrcTy->getAs<VectorType>(); |
532 | SrcTy = VT->getElementType(); |
533 | assert(StoreGEPList.size() <= VT->getNumElements() && |
534 | "Cannot perform HLSL flat cast when vector source \ |
535 | object has less elements than flattened destination \ |
536 | object." ); |
537 | for (unsigned I = 0, Size = StoreGEPList.size(); I < Size; I++) { |
538 | llvm::Value *Load = CGF.Builder.CreateExtractElement(Vec: SrcVal, Idx: I, Name: "vec.load" ); |
539 | llvm::Value *Cast = |
540 | CGF.EmitScalarConversion(Src: Load, SrcTy, DstTy: DestTypes[I], Loc); |
541 | |
542 | // store back |
543 | llvm::Value *Idx = StoreGEPList[I].second; |
544 | if (Idx) { |
545 | llvm::Value *V = |
546 | CGF.Builder.CreateLoad(Addr: StoreGEPList[I].first, Name: "load.for.insert" ); |
547 | Cast = CGF.Builder.CreateInsertElement(Vec: V, NewElt: Cast, Idx); |
548 | } |
549 | CGF.Builder.CreateStore(Val: Cast, Addr: StoreGEPList[I].first); |
550 | } |
551 | } |
552 | |
553 | // emit a flat cast where the RHS is an aggregate |
554 | static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, Address DestVal, |
555 | QualType DestTy, Address SrcVal, |
556 | QualType SrcTy, SourceLocation Loc) { |
557 | // Flatten our destination |
558 | SmallVector<QualType, 16> DestTypes; // Flattened type |
559 | SmallVector<std::pair<Address, llvm::Value *>, 16> StoreGEPList; |
560 | // ^^ Flattened accesses to DestVal we want to store into |
561 | CGF.FlattenAccessAndType(Addr: DestVal, AddrTy: DestTy, AccessList&: StoreGEPList, FlatTypes&: DestTypes); |
562 | // Flatten our src |
563 | SmallVector<QualType, 16> SrcTypes; // Flattened type |
564 | SmallVector<std::pair<Address, llvm::Value *>, 16> LoadGEPList; |
565 | // ^^ Flattened accesses to SrcVal we want to load from |
566 | CGF.FlattenAccessAndType(Addr: SrcVal, AddrTy: SrcTy, AccessList&: LoadGEPList, FlatTypes&: SrcTypes); |
567 | |
568 | assert(StoreGEPList.size() <= LoadGEPList.size() && |
569 | "Cannot perform HLSL flat cast when flattened source object \ |
570 | has less elements than flattened destination object." ); |
571 | // apply casts to what we load from LoadGEPList |
572 | // and store result in Dest |
573 | for (unsigned I = 0, E = StoreGEPList.size(); I < E; I++) { |
574 | llvm::Value *Idx = LoadGEPList[I].second; |
575 | llvm::Value *Load = CGF.Builder.CreateLoad(Addr: LoadGEPList[I].first, Name: "load" ); |
576 | Load = |
577 | Idx ? CGF.Builder.CreateExtractElement(Vec: Load, Idx, Name: "vec.extract" ) : Load; |
578 | llvm::Value *Cast = |
579 | CGF.EmitScalarConversion(Src: Load, SrcTy: SrcTypes[I], DstTy: DestTypes[I], Loc); |
580 | |
581 | // store back |
582 | Idx = StoreGEPList[I].second; |
583 | if (Idx) { |
584 | llvm::Value *V = |
585 | CGF.Builder.CreateLoad(Addr: StoreGEPList[I].first, Name: "load.for.insert" ); |
586 | Cast = CGF.Builder.CreateInsertElement(Vec: V, NewElt: Cast, Idx); |
587 | } |
588 | CGF.Builder.CreateStore(Val: Cast, Addr: StoreGEPList[I].first); |
589 | } |
590 | } |
591 | |
592 | /// Emit initialization of an array from an initializer list. ExprToVisit must |
593 | /// be either an InitListEpxr a CXXParenInitListExpr. |
594 | void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, |
595 | QualType ArrayQTy, Expr *ExprToVisit, |
596 | ArrayRef<Expr *> Args, Expr *ArrayFiller) { |
597 | uint64_t NumInitElements = Args.size(); |
598 | |
599 | uint64_t NumArrayElements = AType->getNumElements(); |
600 | for (const auto *Init : Args) { |
601 | if (const auto *Embed = dyn_cast<EmbedExpr>(Val: Init->IgnoreParenImpCasts())) { |
602 | NumInitElements += Embed->getDataElementCount() - 1; |
603 | if (NumInitElements > NumArrayElements) { |
604 | NumInitElements = NumArrayElements; |
605 | break; |
606 | } |
607 | } |
608 | } |
609 | |
610 | assert(NumInitElements <= NumArrayElements); |
611 | |
612 | QualType elementType = |
613 | CGF.getContext().getAsArrayType(T: ArrayQTy)->getElementType(); |
614 | CharUnits elementSize = CGF.getContext().getTypeSizeInChars(T: elementType); |
615 | CharUnits elementAlign = |
616 | DestPtr.getAlignment().alignmentOfArrayElement(elementSize); |
617 | llvm::Type *llvmElementType = CGF.ConvertTypeForMem(T: elementType); |
618 | |
619 | // Consider initializing the array by copying from a global. For this to be |
620 | // more efficient than per-element initialization, the size of the elements |
621 | // with explicit initializers should be large enough. |
622 | if (NumInitElements * elementSize.getQuantity() > 16 && |
623 | elementType.isTriviallyCopyableType(Context: CGF.getContext())) { |
624 | CodeGen::CodeGenModule &CGM = CGF.CGM; |
625 | ConstantEmitter Emitter(CGF); |
626 | QualType GVArrayQTy = CGM.getContext().getAddrSpaceQualType( |
627 | T: CGM.getContext().removeAddrSpaceQualType(T: ArrayQTy), |
628 | AddressSpace: CGM.GetGlobalConstantAddressSpace()); |
629 | LangAS AS = GVArrayQTy.getAddressSpace(); |
630 | if (llvm::Constant *C = |
631 | Emitter.tryEmitForInitializer(E: ExprToVisit, destAddrSpace: AS, destType: GVArrayQTy)) { |
632 | auto GV = new llvm::GlobalVariable( |
633 | CGM.getModule(), C->getType(), |
634 | /* isConstant= */ true, llvm::GlobalValue::PrivateLinkage, C, |
635 | "constinit" , |
636 | /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal, |
637 | CGM.getContext().getTargetAddressSpace(AS)); |
638 | Emitter.finalize(global: GV); |
639 | CharUnits Align = CGM.getContext().getTypeAlignInChars(T: GVArrayQTy); |
640 | GV->setAlignment(Align.getAsAlign()); |
641 | Address GVAddr(GV, GV->getValueType(), Align); |
642 | EmitFinalDestCopy(type: ArrayQTy, src: CGF.MakeAddrLValue(Addr: GVAddr, T: GVArrayQTy)); |
643 | return; |
644 | } |
645 | } |
646 | |
647 | // Exception safety requires us to destroy all the |
648 | // already-constructed members if an initializer throws. |
649 | // For that, we'll need an EH cleanup. |
650 | QualType::DestructionKind dtorKind = elementType.isDestructedType(); |
651 | Address endOfInit = Address::invalid(); |
652 | CodeGenFunction::CleanupDeactivationScope deactivation(CGF); |
653 | |
654 | llvm::Value *begin = DestPtr.emitRawPointer(CGF); |
655 | if (dtorKind) { |
656 | CodeGenFunction::AllocaTrackerRAII allocaTracker(CGF); |
657 | // In principle we could tell the cleanup where we are more |
658 | // directly, but the control flow can get so varied here that it |
659 | // would actually be quite complex. Therefore we go through an |
660 | // alloca. |
661 | llvm::Instruction *dominatingIP = |
662 | Builder.CreateFlagLoad(Addr: llvm::ConstantInt::getNullValue(Ty: CGF.Int8PtrTy)); |
663 | endOfInit = CGF.CreateTempAlloca(Ty: begin->getType(), align: CGF.getPointerAlign(), |
664 | Name: "arrayinit.endOfInit" ); |
665 | Builder.CreateStore(Val: begin, Addr: endOfInit); |
666 | CGF.pushIrregularPartialArrayCleanup(arrayBegin: begin, arrayEndPointer: endOfInit, elementType, |
667 | elementAlignment: elementAlign, |
668 | destroyer: CGF.getDestroyer(destructionKind: dtorKind)); |
669 | cast<EHCleanupScope>(Val&: *CGF.EHStack.find(sp: CGF.EHStack.stable_begin())) |
670 | .AddAuxAllocas(Allocas: allocaTracker.Take()); |
671 | |
672 | CGF.DeferredDeactivationCleanupStack.push_back( |
673 | Elt: {.Cleanup: CGF.EHStack.stable_begin(), .DominatingIP: dominatingIP}); |
674 | } |
675 | |
676 | llvm::Value *one = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: 1); |
677 | |
678 | auto Emit = [&](Expr *Init, uint64_t ArrayIndex) { |
679 | llvm::Value *element = begin; |
680 | if (ArrayIndex > 0) { |
681 | element = Builder.CreateInBoundsGEP( |
682 | Ty: llvmElementType, Ptr: begin, |
683 | IdxList: llvm::ConstantInt::get(Ty: CGF.SizeTy, V: ArrayIndex), Name: "arrayinit.element" ); |
684 | |
685 | // Tell the cleanup that it needs to destroy up to this |
686 | // element. TODO: some of these stores can be trivially |
687 | // observed to be unnecessary. |
688 | if (endOfInit.isValid()) |
689 | Builder.CreateStore(Val: element, Addr: endOfInit); |
690 | } |
691 | |
692 | LValue elementLV = CGF.MakeAddrLValue( |
693 | Addr: Address(element, llvmElementType, elementAlign), T: elementType); |
694 | EmitInitializationToLValue(E: Init, Address: elementLV); |
695 | return true; |
696 | }; |
697 | |
698 | unsigned ArrayIndex = 0; |
699 | // Emit the explicit initializers. |
700 | for (uint64_t i = 0; i != NumInitElements; ++i) { |
701 | if (ArrayIndex >= NumInitElements) |
702 | break; |
703 | if (auto *EmbedS = dyn_cast<EmbedExpr>(Val: Args[i]->IgnoreParenImpCasts())) { |
704 | EmbedS->doForEachDataElement(C&: Emit, StartingIndexInArray&: ArrayIndex); |
705 | } else { |
706 | Emit(Args[i], ArrayIndex); |
707 | ArrayIndex++; |
708 | } |
709 | } |
710 | |
711 | // Check whether there's a non-trivial array-fill expression. |
712 | bool hasTrivialFiller = isTrivialFiller(E: ArrayFiller); |
713 | |
714 | // Any remaining elements need to be zero-initialized, possibly |
715 | // using the filler expression. We can skip this if the we're |
716 | // emitting to zeroed memory. |
717 | if (NumInitElements != NumArrayElements && |
718 | !(Dest.isZeroed() && hasTrivialFiller && |
719 | CGF.getTypes().isZeroInitializable(T: elementType))) { |
720 | |
721 | // Use an actual loop. This is basically |
722 | // do { *array++ = filler; } while (array != end); |
723 | |
724 | // Advance to the start of the rest of the array. |
725 | llvm::Value *element = begin; |
726 | if (NumInitElements) { |
727 | element = Builder.CreateInBoundsGEP( |
728 | Ty: llvmElementType, Ptr: element, |
729 | IdxList: llvm::ConstantInt::get(Ty: CGF.SizeTy, V: NumInitElements), |
730 | Name: "arrayinit.start" ); |
731 | if (endOfInit.isValid()) Builder.CreateStore(Val: element, Addr: endOfInit); |
732 | } |
733 | |
734 | // Compute the end of the array. |
735 | llvm::Value *end = Builder.CreateInBoundsGEP( |
736 | Ty: llvmElementType, Ptr: begin, |
737 | IdxList: llvm::ConstantInt::get(Ty: CGF.SizeTy, V: NumArrayElements), Name: "arrayinit.end" ); |
738 | |
739 | llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); |
740 | llvm::BasicBlock *bodyBB = CGF.createBasicBlock(name: "arrayinit.body" ); |
741 | |
742 | // Jump into the body. |
743 | CGF.EmitBlock(BB: bodyBB); |
744 | llvm::PHINode *currentElement = |
745 | Builder.CreatePHI(Ty: element->getType(), NumReservedValues: 2, Name: "arrayinit.cur" ); |
746 | currentElement->addIncoming(V: element, BB: entryBB); |
747 | |
748 | // Emit the actual filler expression. |
749 | { |
750 | // C++1z [class.temporary]p5: |
751 | // when a default constructor is called to initialize an element of |
752 | // an array with no corresponding initializer [...] the destruction of |
753 | // every temporary created in a default argument is sequenced before |
754 | // the construction of the next array element, if any |
755 | CodeGenFunction::RunCleanupsScope CleanupsScope(CGF); |
756 | LValue elementLV = CGF.MakeAddrLValue( |
757 | Addr: Address(currentElement, llvmElementType, elementAlign), T: elementType); |
758 | if (ArrayFiller) |
759 | EmitInitializationToLValue(E: ArrayFiller, Address: elementLV); |
760 | else |
761 | EmitNullInitializationToLValue(Address: elementLV); |
762 | } |
763 | |
764 | // Move on to the next element. |
765 | llvm::Value *nextElement = Builder.CreateInBoundsGEP( |
766 | Ty: llvmElementType, Ptr: currentElement, IdxList: one, Name: "arrayinit.next" ); |
767 | |
768 | // Tell the EH cleanup that we finished with the last element. |
769 | if (endOfInit.isValid()) Builder.CreateStore(Val: nextElement, Addr: endOfInit); |
770 | |
771 | // Leave the loop if we're done. |
772 | llvm::Value *done = Builder.CreateICmpEQ(LHS: nextElement, RHS: end, |
773 | Name: "arrayinit.done" ); |
774 | llvm::BasicBlock *endBB = CGF.createBasicBlock(name: "arrayinit.end" ); |
775 | Builder.CreateCondBr(Cond: done, True: endBB, False: bodyBB); |
776 | currentElement->addIncoming(V: nextElement, BB: Builder.GetInsertBlock()); |
777 | |
778 | CGF.EmitBlock(BB: endBB); |
779 | } |
780 | } |
781 | |
782 | //===----------------------------------------------------------------------===// |
783 | // Visitor Methods |
784 | //===----------------------------------------------------------------------===// |
785 | |
786 | void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){ |
787 | Visit(E: E->getSubExpr()); |
788 | } |
789 | |
790 | void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) { |
791 | // If this is a unique OVE, just visit its source expression. |
792 | if (e->isUnique()) |
793 | Visit(E: e->getSourceExpr()); |
794 | else |
795 | EmitFinalDestCopy(type: e->getType(), src: CGF.getOrCreateOpaqueLValueMapping(e)); |
796 | } |
797 | |
798 | void |
799 | AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { |
800 | if (Dest.isPotentiallyAliased() && |
801 | E->getType().isPODType(Context: CGF.getContext())) { |
802 | // For a POD type, just emit a load of the lvalue + a copy, because our |
803 | // compound literal might alias the destination. |
804 | EmitAggLoadOfLValue(E); |
805 | return; |
806 | } |
807 | |
808 | AggValueSlot Slot = EnsureSlot(T: E->getType()); |
809 | |
810 | // Block-scope compound literals are destroyed at the end of the enclosing |
811 | // scope in C. |
812 | bool Destruct = |
813 | !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed(); |
814 | if (Destruct) |
815 | Slot.setExternallyDestructed(); |
816 | |
817 | CGF.EmitAggExpr(E: E->getInitializer(), AS: Slot); |
818 | |
819 | if (Destruct) |
820 | if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) |
821 | CGF.pushLifetimeExtendedDestroy( |
822 | kind: CGF.getCleanupKind(kind: DtorKind), addr: Slot.getAddress(), type: E->getType(), |
823 | destroyer: CGF.getDestroyer(destructionKind: DtorKind), useEHCleanupForArray: DtorKind & EHCleanup); |
824 | } |
825 | |
826 | /// Attempt to look through various unimportant expressions to find a |
827 | /// cast of the given kind. |
828 | static Expr *findPeephole(Expr *op, CastKind kind, const ASTContext &ctx) { |
829 | op = op->IgnoreParenNoopCasts(Ctx: ctx); |
830 | if (auto castE = dyn_cast<CastExpr>(Val: op)) { |
831 | if (castE->getCastKind() == kind) |
832 | return castE->getSubExpr(); |
833 | } |
834 | return nullptr; |
835 | } |
836 | |
837 | void AggExprEmitter::VisitCastExpr(CastExpr *E) { |
838 | if (const auto *ECE = dyn_cast<ExplicitCastExpr>(Val: E)) |
839 | CGF.CGM.EmitExplicitCastExprType(E: ECE, CGF: &CGF); |
840 | switch (E->getCastKind()) { |
841 | case CK_Dynamic: { |
842 | // FIXME: Can this actually happen? We have no test coverage for it. |
843 | assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?" ); |
844 | LValue LV = CGF.EmitCheckedLValue(E: E->getSubExpr(), |
845 | TCK: CodeGenFunction::TCK_Load); |
846 | // FIXME: Do we also need to handle property references here? |
847 | if (LV.isSimple()) |
848 | CGF.EmitDynamicCast(V: LV.getAddress(), DCE: cast<CXXDynamicCastExpr>(Val: E)); |
849 | else |
850 | CGF.CGM.ErrorUnsupported(S: E, Type: "non-simple lvalue dynamic_cast" ); |
851 | |
852 | if (!Dest.isIgnored()) |
853 | CGF.CGM.ErrorUnsupported(S: E, Type: "lvalue dynamic_cast with a destination" ); |
854 | break; |
855 | } |
856 | |
857 | case CK_ToUnion: { |
858 | // Evaluate even if the destination is ignored. |
859 | if (Dest.isIgnored()) { |
860 | CGF.EmitAnyExpr(E: E->getSubExpr(), aggSlot: AggValueSlot::ignored(), |
861 | /*ignoreResult=*/true); |
862 | break; |
863 | } |
864 | |
865 | // GCC union extension |
866 | QualType Ty = E->getSubExpr()->getType(); |
867 | Address CastPtr = Dest.getAddress().withElementType(ElemTy: CGF.ConvertType(T: Ty)); |
868 | EmitInitializationToLValue(E: E->getSubExpr(), |
869 | Address: CGF.MakeAddrLValue(Addr: CastPtr, T: Ty)); |
870 | break; |
871 | } |
872 | |
873 | case CK_LValueToRValueBitCast: { |
874 | if (Dest.isIgnored()) { |
875 | CGF.EmitAnyExpr(E: E->getSubExpr(), aggSlot: AggValueSlot::ignored(), |
876 | /*ignoreResult=*/true); |
877 | break; |
878 | } |
879 | |
880 | LValue SourceLV = CGF.EmitLValue(E: E->getSubExpr()); |
881 | Address SourceAddress = SourceLV.getAddress().withElementType(ElemTy: CGF.Int8Ty); |
882 | Address DestAddress = Dest.getAddress().withElementType(ElemTy: CGF.Int8Ty); |
883 | llvm::Value *SizeVal = llvm::ConstantInt::get( |
884 | Ty: CGF.SizeTy, |
885 | V: CGF.getContext().getTypeSizeInChars(T: E->getType()).getQuantity()); |
886 | Builder.CreateMemCpy(Dest: DestAddress, Src: SourceAddress, Size: SizeVal); |
887 | break; |
888 | } |
889 | |
890 | case CK_DerivedToBase: |
891 | case CK_BaseToDerived: |
892 | case CK_UncheckedDerivedToBase: { |
893 | llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: " |
894 | "should have been unpacked before we got here" ); |
895 | } |
896 | |
897 | case CK_NonAtomicToAtomic: |
898 | case CK_AtomicToNonAtomic: { |
899 | bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic); |
900 | |
901 | // Determine the atomic and value types. |
902 | QualType atomicType = E->getSubExpr()->getType(); |
903 | QualType valueType = E->getType(); |
904 | if (isToAtomic) std::swap(a&: atomicType, b&: valueType); |
905 | |
906 | assert(atomicType->isAtomicType()); |
907 | assert(CGF.getContext().hasSameUnqualifiedType(valueType, |
908 | atomicType->castAs<AtomicType>()->getValueType())); |
909 | |
910 | // Just recurse normally if we're ignoring the result or the |
911 | // atomic type doesn't change representation. |
912 | if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(type: atomicType)) { |
913 | return Visit(E: E->getSubExpr()); |
914 | } |
915 | |
916 | CastKind peepholeTarget = |
917 | (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic); |
918 | |
919 | // These two cases are reverses of each other; try to peephole them. |
920 | if (Expr *op = |
921 | findPeephole(op: E->getSubExpr(), kind: peepholeTarget, ctx: CGF.getContext())) { |
922 | assert(CGF.getContext().hasSameUnqualifiedType(op->getType(), |
923 | E->getType()) && |
924 | "peephole significantly changed types?" ); |
925 | return Visit(E: op); |
926 | } |
927 | |
928 | // If we're converting an r-value of non-atomic type to an r-value |
929 | // of atomic type, just emit directly into the relevant sub-object. |
930 | if (isToAtomic) { |
931 | AggValueSlot valueDest = Dest; |
932 | if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(type: atomicType)) { |
933 | // Zero-initialize. (Strictly speaking, we only need to initialize |
934 | // the padding at the end, but this is simpler.) |
935 | if (!Dest.isZeroed()) |
936 | CGF.EmitNullInitialization(DestPtr: Dest.getAddress(), Ty: atomicType); |
937 | |
938 | // Build a GEP to refer to the subobject. |
939 | Address valueAddr = |
940 | CGF.Builder.CreateStructGEP(Addr: valueDest.getAddress(), Index: 0); |
941 | valueDest = AggValueSlot::forAddr(addr: valueAddr, |
942 | quals: valueDest.getQualifiers(), |
943 | isDestructed: valueDest.isExternallyDestructed(), |
944 | needsGC: valueDest.requiresGCollection(), |
945 | isAliased: valueDest.isPotentiallyAliased(), |
946 | mayOverlap: AggValueSlot::DoesNotOverlap, |
947 | isZeroed: AggValueSlot::IsZeroed); |
948 | } |
949 | |
950 | CGF.EmitAggExpr(E: E->getSubExpr(), AS: valueDest); |
951 | return; |
952 | } |
953 | |
954 | // Otherwise, we're converting an atomic type to a non-atomic type. |
955 | // Make an atomic temporary, emit into that, and then copy the value out. |
956 | AggValueSlot atomicSlot = |
957 | CGF.CreateAggTemp(T: atomicType, Name: "atomic-to-nonatomic.temp" ); |
958 | CGF.EmitAggExpr(E: E->getSubExpr(), AS: atomicSlot); |
959 | |
960 | Address valueAddr = Builder.CreateStructGEP(Addr: atomicSlot.getAddress(), Index: 0); |
961 | RValue rvalue = RValue::getAggregate(addr: valueAddr, isVolatile: atomicSlot.isVolatile()); |
962 | return EmitFinalDestCopy(type: valueType, src: rvalue); |
963 | } |
964 | case CK_AddressSpaceConversion: |
965 | return Visit(E: E->getSubExpr()); |
966 | |
967 | case CK_LValueToRValue: |
968 | // If we're loading from a volatile type, force the destination |
969 | // into existence. |
970 | if (E->getSubExpr()->getType().isVolatileQualified()) { |
971 | bool Destruct = |
972 | !Dest.isExternallyDestructed() && |
973 | E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct; |
974 | if (Destruct) |
975 | Dest.setExternallyDestructed(); |
976 | EnsureDest(T: E->getType()); |
977 | Visit(E: E->getSubExpr()); |
978 | |
979 | if (Destruct) |
980 | CGF.pushDestroy(dtorKind: QualType::DK_nontrivial_c_struct, addr: Dest.getAddress(), |
981 | type: E->getType()); |
982 | |
983 | return; |
984 | } |
985 | |
986 | [[fallthrough]]; |
987 | |
988 | case CK_HLSLArrayRValue: |
989 | Visit(E: E->getSubExpr()); |
990 | break; |
991 | case CK_HLSLAggregateSplatCast: { |
992 | Expr *Src = E->getSubExpr(); |
993 | QualType SrcTy = Src->getType(); |
994 | RValue RV = CGF.EmitAnyExpr(E: Src); |
995 | QualType DestTy = E->getType(); |
996 | Address DestVal = Dest.getAddress(); |
997 | SourceLocation Loc = E->getExprLoc(); |
998 | |
999 | assert(RV.isScalar() && "RHS of HLSL splat cast must be a scalar." ); |
1000 | llvm::Value *SrcVal = RV.getScalarVal(); |
1001 | EmitHLSLAggregateSplatCast(CGF, DestVal, DestTy, SrcVal, SrcTy, Loc); |
1002 | break; |
1003 | } |
1004 | case CK_HLSLElementwiseCast: { |
1005 | Expr *Src = E->getSubExpr(); |
1006 | QualType SrcTy = Src->getType(); |
1007 | RValue RV = CGF.EmitAnyExpr(E: Src); |
1008 | QualType DestTy = E->getType(); |
1009 | Address DestVal = Dest.getAddress(); |
1010 | SourceLocation Loc = E->getExprLoc(); |
1011 | |
1012 | if (RV.isScalar()) { |
1013 | llvm::Value *SrcVal = RV.getScalarVal(); |
1014 | EmitHLSLScalarFlatCast(CGF, DestVal, DestTy, SrcVal, SrcTy, Loc); |
1015 | } else { |
1016 | assert(RV.isAggregate() && |
1017 | "Can't perform HLSL Aggregate cast on a complex type." ); |
1018 | Address SrcVal = RV.getAggregateAddress(); |
1019 | EmitHLSLElementwiseCast(CGF, DestVal, DestTy, SrcVal, SrcTy, Loc); |
1020 | } |
1021 | break; |
1022 | } |
1023 | case CK_NoOp: |
1024 | case CK_UserDefinedConversion: |
1025 | case CK_ConstructorConversion: |
1026 | assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(), |
1027 | E->getType()) && |
1028 | "Implicit cast types must be compatible" ); |
1029 | Visit(E: E->getSubExpr()); |
1030 | break; |
1031 | |
1032 | case CK_LValueBitCast: |
1033 | llvm_unreachable("should not be emitting lvalue bitcast as rvalue" ); |
1034 | |
1035 | case CK_Dependent: |
1036 | case CK_BitCast: |
1037 | case CK_ArrayToPointerDecay: |
1038 | case CK_FunctionToPointerDecay: |
1039 | case CK_NullToPointer: |
1040 | case CK_NullToMemberPointer: |
1041 | case CK_BaseToDerivedMemberPointer: |
1042 | case CK_DerivedToBaseMemberPointer: |
1043 | case CK_MemberPointerToBoolean: |
1044 | case CK_ReinterpretMemberPointer: |
1045 | case CK_IntegralToPointer: |
1046 | case CK_PointerToIntegral: |
1047 | case CK_PointerToBoolean: |
1048 | case CK_ToVoid: |
1049 | case CK_VectorSplat: |
1050 | case CK_IntegralCast: |
1051 | case CK_BooleanToSignedIntegral: |
1052 | case CK_IntegralToBoolean: |
1053 | case CK_IntegralToFloating: |
1054 | case CK_FloatingToIntegral: |
1055 | case CK_FloatingToBoolean: |
1056 | case CK_FloatingCast: |
1057 | case CK_CPointerToObjCPointerCast: |
1058 | case CK_BlockPointerToObjCPointerCast: |
1059 | case CK_AnyPointerToBlockPointerCast: |
1060 | case CK_ObjCObjectLValueCast: |
1061 | case CK_FloatingRealToComplex: |
1062 | case CK_FloatingComplexToReal: |
1063 | case CK_FloatingComplexToBoolean: |
1064 | case CK_FloatingComplexCast: |
1065 | case CK_FloatingComplexToIntegralComplex: |
1066 | case CK_IntegralRealToComplex: |
1067 | case CK_IntegralComplexToReal: |
1068 | case CK_IntegralComplexToBoolean: |
1069 | case CK_IntegralComplexCast: |
1070 | case CK_IntegralComplexToFloatingComplex: |
1071 | case CK_ARCProduceObject: |
1072 | case CK_ARCConsumeObject: |
1073 | case CK_ARCReclaimReturnedObject: |
1074 | case CK_ARCExtendBlockObject: |
1075 | case CK_CopyAndAutoreleaseBlockObject: |
1076 | case CK_BuiltinFnToFnPtr: |
1077 | case CK_ZeroToOCLOpaqueType: |
1078 | case CK_MatrixCast: |
1079 | case CK_HLSLVectorTruncation: |
1080 | |
1081 | case CK_IntToOCLSampler: |
1082 | case CK_FloatingToFixedPoint: |
1083 | case CK_FixedPointToFloating: |
1084 | case CK_FixedPointCast: |
1085 | case CK_FixedPointToBoolean: |
1086 | case CK_FixedPointToIntegral: |
1087 | case CK_IntegralToFixedPoint: |
1088 | llvm_unreachable("cast kind invalid for aggregate types" ); |
1089 | } |
1090 | } |
1091 | |
1092 | void AggExprEmitter::VisitCallExpr(const CallExpr *E) { |
1093 | if (E->getCallReturnType(Ctx: CGF.getContext())->isReferenceType()) { |
1094 | EmitAggLoadOfLValue(E); |
1095 | return; |
1096 | } |
1097 | |
1098 | withReturnValueSlot(E, EmitCall: [&](ReturnValueSlot Slot) { |
1099 | return CGF.EmitCallExpr(E, ReturnValue: Slot); |
1100 | }); |
1101 | } |
1102 | |
1103 | void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) { |
1104 | withReturnValueSlot(E, EmitCall: [&](ReturnValueSlot Slot) { |
1105 | return CGF.EmitObjCMessageExpr(E, Return: Slot); |
1106 | }); |
1107 | } |
1108 | |
1109 | void AggExprEmitter::VisitBinComma(const BinaryOperator *E) { |
1110 | CGF.EmitIgnoredExpr(E: E->getLHS()); |
1111 | Visit(E: E->getRHS()); |
1112 | } |
1113 | |
1114 | void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) { |
1115 | CodeGenFunction::StmtExprEvaluation eval(CGF); |
1116 | CGF.EmitCompoundStmt(S: *E->getSubStmt(), GetLast: true, AVS: Dest); |
1117 | } |
1118 | |
1119 | enum CompareKind { |
1120 | CK_Less, |
1121 | CK_Greater, |
1122 | CK_Equal, |
1123 | }; |
1124 | |
1125 | static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, |
1126 | const BinaryOperator *E, llvm::Value *LHS, |
1127 | llvm::Value *RHS, CompareKind Kind, |
1128 | const char *NameSuffix = "" ) { |
1129 | QualType ArgTy = E->getLHS()->getType(); |
1130 | if (const ComplexType *CT = ArgTy->getAs<ComplexType>()) |
1131 | ArgTy = CT->getElementType(); |
1132 | |
1133 | if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) { |
1134 | assert(Kind == CK_Equal && |
1135 | "member pointers may only be compared for equality" ); |
1136 | return CGF.CGM.getCXXABI().EmitMemberPointerComparison( |
1137 | CGF, L: LHS, R: RHS, MPT, /*IsInequality*/ Inequality: false); |
1138 | } |
1139 | |
1140 | // Compute the comparison instructions for the specified comparison kind. |
1141 | struct CmpInstInfo { |
1142 | const char *Name; |
1143 | llvm::CmpInst::Predicate FCmp; |
1144 | llvm::CmpInst::Predicate SCmp; |
1145 | llvm::CmpInst::Predicate UCmp; |
1146 | }; |
1147 | CmpInstInfo InstInfo = [&]() -> CmpInstInfo { |
1148 | using FI = llvm::FCmpInst; |
1149 | using II = llvm::ICmpInst; |
1150 | switch (Kind) { |
1151 | case CK_Less: |
1152 | return {.Name: "cmp.lt" , .FCmp: FI::FCMP_OLT, .SCmp: II::ICMP_SLT, .UCmp: II::ICMP_ULT}; |
1153 | case CK_Greater: |
1154 | return {.Name: "cmp.gt" , .FCmp: FI::FCMP_OGT, .SCmp: II::ICMP_SGT, .UCmp: II::ICMP_UGT}; |
1155 | case CK_Equal: |
1156 | return {.Name: "cmp.eq" , .FCmp: FI::FCMP_OEQ, .SCmp: II::ICMP_EQ, .UCmp: II::ICMP_EQ}; |
1157 | } |
1158 | llvm_unreachable("Unrecognised CompareKind enum" ); |
1159 | }(); |
1160 | |
1161 | if (ArgTy->hasFloatingRepresentation()) |
1162 | return Builder.CreateFCmp(P: InstInfo.FCmp, LHS, RHS, |
1163 | Name: llvm::Twine(InstInfo.Name) + NameSuffix); |
1164 | if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()) { |
1165 | auto Inst = |
1166 | ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp : InstInfo.UCmp; |
1167 | return Builder.CreateICmp(P: Inst, LHS, RHS, |
1168 | Name: llvm::Twine(InstInfo.Name) + NameSuffix); |
1169 | } |
1170 | |
1171 | llvm_unreachable("unsupported aggregate binary expression should have " |
1172 | "already been handled" ); |
1173 | } |
1174 | |
1175 | void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) { |
1176 | using llvm::BasicBlock; |
1177 | using llvm::PHINode; |
1178 | using llvm::Value; |
1179 | assert(CGF.getContext().hasSameType(E->getLHS()->getType(), |
1180 | E->getRHS()->getType())); |
1181 | const ComparisonCategoryInfo &CmpInfo = |
1182 | CGF.getContext().CompCategories.getInfoForType(Ty: E->getType()); |
1183 | assert(CmpInfo.Record->isTriviallyCopyable() && |
1184 | "cannot copy non-trivially copyable aggregate" ); |
1185 | |
1186 | QualType ArgTy = E->getLHS()->getType(); |
1187 | |
1188 | if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() && |
1189 | !ArgTy->isNullPtrType() && !ArgTy->isPointerType() && |
1190 | !ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) { |
1191 | return CGF.ErrorUnsupported(S: E, Type: "aggregate three-way comparison" ); |
1192 | } |
1193 | bool IsComplex = ArgTy->isAnyComplexType(); |
1194 | |
1195 | // Evaluate the operands to the expression and extract their values. |
1196 | auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> { |
1197 | RValue RV = CGF.EmitAnyExpr(E); |
1198 | if (RV.isScalar()) |
1199 | return {RV.getScalarVal(), nullptr}; |
1200 | if (RV.isAggregate()) |
1201 | return {RV.getAggregatePointer(PointeeType: E->getType(), CGF), nullptr}; |
1202 | assert(RV.isComplex()); |
1203 | return RV.getComplexVal(); |
1204 | }; |
1205 | auto LHSValues = EmitOperand(E->getLHS()), |
1206 | RHSValues = EmitOperand(E->getRHS()); |
1207 | |
1208 | auto EmitCmp = [&](CompareKind K) { |
1209 | Value *Cmp = EmitCompare(Builder, CGF, E, LHS: LHSValues.first, RHS: RHSValues.first, |
1210 | Kind: K, NameSuffix: IsComplex ? ".r" : "" ); |
1211 | if (!IsComplex) |
1212 | return Cmp; |
1213 | assert(K == CompareKind::CK_Equal); |
1214 | Value *CmpImag = EmitCompare(Builder, CGF, E, LHS: LHSValues.second, |
1215 | RHS: RHSValues.second, Kind: K, NameSuffix: ".i" ); |
1216 | return Builder.CreateAnd(LHS: Cmp, RHS: CmpImag, Name: "and.eq" ); |
1217 | }; |
1218 | auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) { |
1219 | return Builder.getInt(AI: VInfo->getIntValue()); |
1220 | }; |
1221 | |
1222 | Value *Select; |
1223 | if (ArgTy->isNullPtrType()) { |
1224 | Select = EmitCmpRes(CmpInfo.getEqualOrEquiv()); |
1225 | } else if (!CmpInfo.isPartial()) { |
1226 | Value *SelectOne = |
1227 | Builder.CreateSelect(C: EmitCmp(CK_Less), True: EmitCmpRes(CmpInfo.getLess()), |
1228 | False: EmitCmpRes(CmpInfo.getGreater()), Name: "sel.lt" ); |
1229 | Select = Builder.CreateSelect(C: EmitCmp(CK_Equal), |
1230 | True: EmitCmpRes(CmpInfo.getEqualOrEquiv()), |
1231 | False: SelectOne, Name: "sel.eq" ); |
1232 | } else { |
1233 | Value *SelectEq = Builder.CreateSelect( |
1234 | C: EmitCmp(CK_Equal), True: EmitCmpRes(CmpInfo.getEqualOrEquiv()), |
1235 | False: EmitCmpRes(CmpInfo.getUnordered()), Name: "sel.eq" ); |
1236 | Value *SelectGT = Builder.CreateSelect(C: EmitCmp(CK_Greater), |
1237 | True: EmitCmpRes(CmpInfo.getGreater()), |
1238 | False: SelectEq, Name: "sel.gt" ); |
1239 | Select = Builder.CreateSelect( |
1240 | C: EmitCmp(CK_Less), True: EmitCmpRes(CmpInfo.getLess()), False: SelectGT, Name: "sel.lt" ); |
1241 | } |
1242 | // Create the return value in the destination slot. |
1243 | EnsureDest(T: E->getType()); |
1244 | LValue DestLV = CGF.MakeAddrLValue(Addr: Dest.getAddress(), T: E->getType()); |
1245 | |
1246 | // Emit the address of the first (and only) field in the comparison category |
1247 | // type, and initialize it from the constant integer value selected above. |
1248 | LValue FieldLV = CGF.EmitLValueForFieldInitialization( |
1249 | Base: DestLV, Field: *CmpInfo.Record->field_begin()); |
1250 | CGF.EmitStoreThroughLValue(Src: RValue::get(V: Select), Dst: FieldLV, /*IsInit*/ isInit: true); |
1251 | |
1252 | // All done! The result is in the Dest slot. |
1253 | } |
1254 | |
1255 | void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) { |
1256 | if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) |
1257 | VisitPointerToDataMemberBinaryOperator(BO: E); |
1258 | else |
1259 | CGF.ErrorUnsupported(S: E, Type: "aggregate binary expression" ); |
1260 | } |
1261 | |
1262 | void AggExprEmitter::VisitPointerToDataMemberBinaryOperator( |
1263 | const BinaryOperator *E) { |
1264 | LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E); |
1265 | EmitFinalDestCopy(type: E->getType(), src: LV); |
1266 | } |
1267 | |
1268 | /// Is the value of the given expression possibly a reference to or |
1269 | /// into a __block variable? |
1270 | static bool isBlockVarRef(const Expr *E) { |
1271 | // Make sure we look through parens. |
1272 | E = E->IgnoreParens(); |
1273 | |
1274 | // Check for a direct reference to a __block variable. |
1275 | if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Val: E)) { |
1276 | const VarDecl *var = dyn_cast<VarDecl>(Val: DRE->getDecl()); |
1277 | return (var && var->hasAttr<BlocksAttr>()); |
1278 | } |
1279 | |
1280 | // More complicated stuff. |
1281 | |
1282 | // Binary operators. |
1283 | if (const BinaryOperator *op = dyn_cast<BinaryOperator>(Val: E)) { |
1284 | // For an assignment or pointer-to-member operation, just care |
1285 | // about the LHS. |
1286 | if (op->isAssignmentOp() || op->isPtrMemOp()) |
1287 | return isBlockVarRef(E: op->getLHS()); |
1288 | |
1289 | // For a comma, just care about the RHS. |
1290 | if (op->getOpcode() == BO_Comma) |
1291 | return isBlockVarRef(E: op->getRHS()); |
1292 | |
1293 | // FIXME: pointer arithmetic? |
1294 | return false; |
1295 | |
1296 | // Check both sides of a conditional operator. |
1297 | } else if (const AbstractConditionalOperator *op |
1298 | = dyn_cast<AbstractConditionalOperator>(Val: E)) { |
1299 | return isBlockVarRef(E: op->getTrueExpr()) |
1300 | || isBlockVarRef(E: op->getFalseExpr()); |
1301 | |
1302 | // OVEs are required to support BinaryConditionalOperators. |
1303 | } else if (const OpaqueValueExpr *op |
1304 | = dyn_cast<OpaqueValueExpr>(Val: E)) { |
1305 | if (const Expr *src = op->getSourceExpr()) |
1306 | return isBlockVarRef(E: src); |
1307 | |
1308 | // Casts are necessary to get things like (*(int*)&var) = foo(). |
1309 | // We don't really care about the kind of cast here, except |
1310 | // we don't want to look through l2r casts, because it's okay |
1311 | // to get the *value* in a __block variable. |
1312 | } else if (const CastExpr *cast = dyn_cast<CastExpr>(Val: E)) { |
1313 | if (cast->getCastKind() == CK_LValueToRValue) |
1314 | return false; |
1315 | return isBlockVarRef(E: cast->getSubExpr()); |
1316 | |
1317 | // Handle unary operators. Again, just aggressively look through |
1318 | // it, ignoring the operation. |
1319 | } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(Val: E)) { |
1320 | return isBlockVarRef(E: uop->getSubExpr()); |
1321 | |
1322 | // Look into the base of a field access. |
1323 | } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(Val: E)) { |
1324 | return isBlockVarRef(E: mem->getBase()); |
1325 | |
1326 | // Look into the base of a subscript. |
1327 | } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(Val: E)) { |
1328 | return isBlockVarRef(E: sub->getBase()); |
1329 | } |
1330 | |
1331 | return false; |
1332 | } |
1333 | |
1334 | void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { |
1335 | ApplyAtomGroup Grp(CGF.getDebugInfo()); |
1336 | // For an assignment to work, the value on the right has |
1337 | // to be compatible with the value on the left. |
1338 | assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), |
1339 | E->getRHS()->getType()) |
1340 | && "Invalid assignment" ); |
1341 | |
1342 | // If the LHS might be a __block variable, and the RHS can |
1343 | // potentially cause a block copy, we need to evaluate the RHS first |
1344 | // so that the assignment goes the right place. |
1345 | // This is pretty semantically fragile. |
1346 | if (isBlockVarRef(E: E->getLHS()) && |
1347 | E->getRHS()->HasSideEffects(Ctx: CGF.getContext())) { |
1348 | // Ensure that we have a destination, and evaluate the RHS into that. |
1349 | EnsureDest(T: E->getRHS()->getType()); |
1350 | Visit(E: E->getRHS()); |
1351 | |
1352 | // Now emit the LHS and copy into it. |
1353 | LValue LHS = CGF.EmitCheckedLValue(E: E->getLHS(), TCK: CodeGenFunction::TCK_Store); |
1354 | |
1355 | // That copy is an atomic copy if the LHS is atomic. |
1356 | if (LHS.getType()->isAtomicType() || |
1357 | CGF.LValueIsSuitableForInlineAtomic(Src: LHS)) { |
1358 | CGF.EmitAtomicStore(rvalue: Dest.asRValue(), lvalue: LHS, /*isInit*/ false); |
1359 | return; |
1360 | } |
1361 | |
1362 | EmitCopy(type: E->getLHS()->getType(), |
1363 | dest: AggValueSlot::forLValue(LV: LHS, isDestructed: AggValueSlot::IsDestructed, |
1364 | needsGC: needsGC(T: E->getLHS()->getType()), |
1365 | isAliased: AggValueSlot::IsAliased, |
1366 | mayOverlap: AggValueSlot::MayOverlap), |
1367 | src: Dest); |
1368 | return; |
1369 | } |
1370 | |
1371 | LValue LHS = CGF.EmitLValue(E: E->getLHS()); |
1372 | |
1373 | // If we have an atomic type, evaluate into the destination and then |
1374 | // do an atomic copy. |
1375 | if (LHS.getType()->isAtomicType() || |
1376 | CGF.LValueIsSuitableForInlineAtomic(Src: LHS)) { |
1377 | EnsureDest(T: E->getRHS()->getType()); |
1378 | Visit(E: E->getRHS()); |
1379 | CGF.EmitAtomicStore(rvalue: Dest.asRValue(), lvalue: LHS, /*isInit*/ false); |
1380 | return; |
1381 | } |
1382 | |
1383 | // Codegen the RHS so that it stores directly into the LHS. |
1384 | AggValueSlot LHSSlot = AggValueSlot::forLValue( |
1385 | LV: LHS, isDestructed: AggValueSlot::IsDestructed, needsGC: needsGC(T: E->getLHS()->getType()), |
1386 | isAliased: AggValueSlot::IsAliased, mayOverlap: AggValueSlot::MayOverlap); |
1387 | // A non-volatile aggregate destination might have volatile member. |
1388 | if (!LHSSlot.isVolatile() && |
1389 | CGF.hasVolatileMember(T: E->getLHS()->getType())) |
1390 | LHSSlot.setVolatile(true); |
1391 | |
1392 | CGF.EmitAggExpr(E: E->getRHS(), AS: LHSSlot); |
1393 | |
1394 | // Copy into the destination if the assignment isn't ignored. |
1395 | EmitFinalDestCopy(type: E->getType(), src: LHS); |
1396 | |
1397 | if (!Dest.isIgnored() && !Dest.isExternallyDestructed() && |
1398 | E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct) |
1399 | CGF.pushDestroy(dtorKind: QualType::DK_nontrivial_c_struct, addr: Dest.getAddress(), |
1400 | type: E->getType()); |
1401 | } |
1402 | |
1403 | void AggExprEmitter:: |
1404 | VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { |
1405 | llvm::BasicBlock *LHSBlock = CGF.createBasicBlock(name: "cond.true" ); |
1406 | llvm::BasicBlock *RHSBlock = CGF.createBasicBlock(name: "cond.false" ); |
1407 | llvm::BasicBlock *ContBlock = CGF.createBasicBlock(name: "cond.end" ); |
1408 | |
1409 | // Bind the common expression if necessary. |
1410 | CodeGenFunction::OpaqueValueMapping binding(CGF, E); |
1411 | |
1412 | CodeGenFunction::ConditionalEvaluation eval(CGF); |
1413 | CGF.EmitBranchOnBoolExpr(Cond: E->getCond(), TrueBlock: LHSBlock, FalseBlock: RHSBlock, |
1414 | TrueCount: CGF.getProfileCount(S: E)); |
1415 | |
1416 | // Save whether the destination's lifetime is externally managed. |
1417 | bool isExternallyDestructed = Dest.isExternallyDestructed(); |
1418 | bool destructNonTrivialCStruct = |
1419 | !isExternallyDestructed && |
1420 | E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct; |
1421 | isExternallyDestructed |= destructNonTrivialCStruct; |
1422 | Dest.setExternallyDestructed(isExternallyDestructed); |
1423 | |
1424 | eval.begin(CGF); |
1425 | CGF.EmitBlock(BB: LHSBlock); |
1426 | if (llvm::EnableSingleByteCoverage) |
1427 | CGF.incrementProfileCounter(S: E->getTrueExpr()); |
1428 | else |
1429 | CGF.incrementProfileCounter(S: E); |
1430 | Visit(E: E->getTrueExpr()); |
1431 | eval.end(CGF); |
1432 | |
1433 | assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!" ); |
1434 | CGF.Builder.CreateBr(Dest: ContBlock); |
1435 | |
1436 | // If the result of an agg expression is unused, then the emission |
1437 | // of the LHS might need to create a destination slot. That's fine |
1438 | // with us, and we can safely emit the RHS into the same slot, but |
1439 | // we shouldn't claim that it's already being destructed. |
1440 | Dest.setExternallyDestructed(isExternallyDestructed); |
1441 | |
1442 | eval.begin(CGF); |
1443 | CGF.EmitBlock(BB: RHSBlock); |
1444 | if (llvm::EnableSingleByteCoverage) |
1445 | CGF.incrementProfileCounter(S: E->getFalseExpr()); |
1446 | Visit(E: E->getFalseExpr()); |
1447 | eval.end(CGF); |
1448 | |
1449 | if (destructNonTrivialCStruct) |
1450 | CGF.pushDestroy(dtorKind: QualType::DK_nontrivial_c_struct, addr: Dest.getAddress(), |
1451 | type: E->getType()); |
1452 | |
1453 | CGF.EmitBlock(BB: ContBlock); |
1454 | if (llvm::EnableSingleByteCoverage) |
1455 | CGF.incrementProfileCounter(S: E); |
1456 | } |
1457 | |
1458 | void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) { |
1459 | Visit(E: CE->getChosenSubExpr()); |
1460 | } |
1461 | |
1462 | void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { |
1463 | Address ArgValue = Address::invalid(); |
1464 | CGF.EmitVAArg(VE, VAListAddr&: ArgValue, Slot: Dest); |
1465 | |
1466 | // If EmitVAArg fails, emit an error. |
1467 | if (!ArgValue.isValid()) { |
1468 | CGF.ErrorUnsupported(S: VE, Type: "aggregate va_arg expression" ); |
1469 | return; |
1470 | } |
1471 | } |
1472 | |
1473 | void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { |
1474 | // Ensure that we have a slot, but if we already do, remember |
1475 | // whether it was externally destructed. |
1476 | bool wasExternallyDestructed = Dest.isExternallyDestructed(); |
1477 | EnsureDest(T: E->getType()); |
1478 | |
1479 | // We're going to push a destructor if there isn't already one. |
1480 | Dest.setExternallyDestructed(); |
1481 | |
1482 | Visit(E: E->getSubExpr()); |
1483 | |
1484 | // Push that destructor we promised. |
1485 | if (!wasExternallyDestructed) |
1486 | CGF.EmitCXXTemporary(Temporary: E->getTemporary(), TempType: E->getType(), Ptr: Dest.getAddress()); |
1487 | } |
1488 | |
1489 | void |
1490 | AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { |
1491 | AggValueSlot Slot = EnsureSlot(T: E->getType()); |
1492 | CGF.EmitCXXConstructExpr(E, Dest: Slot); |
1493 | } |
1494 | |
1495 | void AggExprEmitter::VisitCXXInheritedCtorInitExpr( |
1496 | const CXXInheritedCtorInitExpr *E) { |
1497 | AggValueSlot Slot = EnsureSlot(T: E->getType()); |
1498 | CGF.EmitInheritedCXXConstructorCall( |
1499 | D: E->getConstructor(), ForVirtualBase: E->constructsVBase(), This: Slot.getAddress(), |
1500 | InheritedFromVBase: E->inheritedFromVBase(), E); |
1501 | } |
1502 | |
1503 | void |
1504 | AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { |
1505 | AggValueSlot Slot = EnsureSlot(T: E->getType()); |
1506 | LValue SlotLV = CGF.MakeAddrLValue(Addr: Slot.getAddress(), T: E->getType()); |
1507 | |
1508 | // We'll need to enter cleanup scopes in case any of the element |
1509 | // initializers throws an exception or contains branch out of the expressions. |
1510 | CodeGenFunction::CleanupDeactivationScope scope(CGF); |
1511 | |
1512 | CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin(); |
1513 | for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(), |
1514 | e = E->capture_init_end(); |
1515 | i != e; ++i, ++CurField) { |
1516 | // Emit initialization |
1517 | LValue LV = CGF.EmitLValueForFieldInitialization(Base: SlotLV, Field: *CurField); |
1518 | if (CurField->hasCapturedVLAType()) { |
1519 | CGF.EmitLambdaVLACapture(VAT: CurField->getCapturedVLAType(), LV); |
1520 | continue; |
1521 | } |
1522 | |
1523 | EmitInitializationToLValue(E: *i, Address: LV); |
1524 | |
1525 | // Push a destructor if necessary. |
1526 | if (QualType::DestructionKind DtorKind = |
1527 | CurField->getType().isDestructedType()) { |
1528 | assert(LV.isSimple()); |
1529 | if (DtorKind) |
1530 | CGF.pushDestroyAndDeferDeactivation(cleanupKind: NormalAndEHCleanup, addr: LV.getAddress(), |
1531 | type: CurField->getType(), |
1532 | destroyer: CGF.getDestroyer(destructionKind: DtorKind), useEHCleanupForArray: false); |
1533 | } |
1534 | } |
1535 | } |
1536 | |
1537 | void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { |
1538 | CodeGenFunction::RunCleanupsScope cleanups(CGF); |
1539 | Visit(E: E->getSubExpr()); |
1540 | } |
1541 | |
1542 | void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { |
1543 | QualType T = E->getType(); |
1544 | AggValueSlot Slot = EnsureSlot(T); |
1545 | EmitNullInitializationToLValue(Address: CGF.MakeAddrLValue(Addr: Slot.getAddress(), T)); |
1546 | } |
1547 | |
1548 | void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { |
1549 | QualType T = E->getType(); |
1550 | AggValueSlot Slot = EnsureSlot(T); |
1551 | EmitNullInitializationToLValue(Address: CGF.MakeAddrLValue(Addr: Slot.getAddress(), T)); |
1552 | } |
1553 | |
1554 | /// Determine whether the given cast kind is known to always convert values |
1555 | /// with all zero bits in their value representation to values with all zero |
1556 | /// bits in their value representation. |
1557 | static bool castPreservesZero(const CastExpr *CE) { |
1558 | switch (CE->getCastKind()) { |
1559 | // No-ops. |
1560 | case CK_NoOp: |
1561 | case CK_UserDefinedConversion: |
1562 | case CK_ConstructorConversion: |
1563 | case CK_BitCast: |
1564 | case CK_ToUnion: |
1565 | case CK_ToVoid: |
1566 | // Conversions between (possibly-complex) integral, (possibly-complex) |
1567 | // floating-point, and bool. |
1568 | case CK_BooleanToSignedIntegral: |
1569 | case CK_FloatingCast: |
1570 | case CK_FloatingComplexCast: |
1571 | case CK_FloatingComplexToBoolean: |
1572 | case CK_FloatingComplexToIntegralComplex: |
1573 | case CK_FloatingComplexToReal: |
1574 | case CK_FloatingRealToComplex: |
1575 | case CK_FloatingToBoolean: |
1576 | case CK_FloatingToIntegral: |
1577 | case CK_IntegralCast: |
1578 | case CK_IntegralComplexCast: |
1579 | case CK_IntegralComplexToBoolean: |
1580 | case CK_IntegralComplexToFloatingComplex: |
1581 | case CK_IntegralComplexToReal: |
1582 | case CK_IntegralRealToComplex: |
1583 | case CK_IntegralToBoolean: |
1584 | case CK_IntegralToFloating: |
1585 | // Reinterpreting integers as pointers and vice versa. |
1586 | case CK_IntegralToPointer: |
1587 | case CK_PointerToIntegral: |
1588 | // Language extensions. |
1589 | case CK_VectorSplat: |
1590 | case CK_MatrixCast: |
1591 | case CK_NonAtomicToAtomic: |
1592 | case CK_AtomicToNonAtomic: |
1593 | case CK_HLSLVectorTruncation: |
1594 | case CK_HLSLElementwiseCast: |
1595 | case CK_HLSLAggregateSplatCast: |
1596 | return true; |
1597 | |
1598 | case CK_BaseToDerivedMemberPointer: |
1599 | case CK_DerivedToBaseMemberPointer: |
1600 | case CK_MemberPointerToBoolean: |
1601 | case CK_NullToMemberPointer: |
1602 | case CK_ReinterpretMemberPointer: |
1603 | // FIXME: ABI-dependent. |
1604 | return false; |
1605 | |
1606 | case CK_AnyPointerToBlockPointerCast: |
1607 | case CK_BlockPointerToObjCPointerCast: |
1608 | case CK_CPointerToObjCPointerCast: |
1609 | case CK_ObjCObjectLValueCast: |
1610 | case CK_IntToOCLSampler: |
1611 | case CK_ZeroToOCLOpaqueType: |
1612 | // FIXME: Check these. |
1613 | return false; |
1614 | |
1615 | case CK_FixedPointCast: |
1616 | case CK_FixedPointToBoolean: |
1617 | case CK_FixedPointToFloating: |
1618 | case CK_FixedPointToIntegral: |
1619 | case CK_FloatingToFixedPoint: |
1620 | case CK_IntegralToFixedPoint: |
1621 | // FIXME: Do all fixed-point types represent zero as all 0 bits? |
1622 | return false; |
1623 | |
1624 | case CK_AddressSpaceConversion: |
1625 | case CK_BaseToDerived: |
1626 | case CK_DerivedToBase: |
1627 | case CK_Dynamic: |
1628 | case CK_NullToPointer: |
1629 | case CK_PointerToBoolean: |
1630 | // FIXME: Preserves zeroes only if zero pointers and null pointers have the |
1631 | // same representation in all involved address spaces. |
1632 | return false; |
1633 | |
1634 | case CK_ARCConsumeObject: |
1635 | case CK_ARCExtendBlockObject: |
1636 | case CK_ARCProduceObject: |
1637 | case CK_ARCReclaimReturnedObject: |
1638 | case CK_CopyAndAutoreleaseBlockObject: |
1639 | case CK_ArrayToPointerDecay: |
1640 | case CK_FunctionToPointerDecay: |
1641 | case CK_BuiltinFnToFnPtr: |
1642 | case CK_Dependent: |
1643 | case CK_LValueBitCast: |
1644 | case CK_LValueToRValue: |
1645 | case CK_LValueToRValueBitCast: |
1646 | case CK_UncheckedDerivedToBase: |
1647 | case CK_HLSLArrayRValue: |
1648 | return false; |
1649 | } |
1650 | llvm_unreachable("Unhandled clang::CastKind enum" ); |
1651 | } |
1652 | |
1653 | /// isSimpleZero - If emitting this value will obviously just cause a store of |
1654 | /// zero to memory, return true. This can return false if uncertain, so it just |
1655 | /// handles simple cases. |
1656 | static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) { |
1657 | E = E->IgnoreParens(); |
1658 | while (auto *CE = dyn_cast<CastExpr>(Val: E)) { |
1659 | if (!castPreservesZero(CE)) |
1660 | break; |
1661 | E = CE->getSubExpr()->IgnoreParens(); |
1662 | } |
1663 | |
1664 | // 0 |
1665 | if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(Val: E)) |
1666 | return IL->getValue() == 0; |
1667 | // +0.0 |
1668 | if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(Val: E)) |
1669 | return FL->getValue().isPosZero(); |
1670 | // int() |
1671 | if ((isa<ImplicitValueInitExpr>(Val: E) || isa<CXXScalarValueInitExpr>(Val: E)) && |
1672 | CGF.getTypes().isZeroInitializable(T: E->getType())) |
1673 | return true; |
1674 | // (int*)0 - Null pointer expressions. |
1675 | if (const CastExpr *ICE = dyn_cast<CastExpr>(Val: E)) |
1676 | return ICE->getCastKind() == CK_NullToPointer && |
1677 | CGF.getTypes().isPointerZeroInitializable(T: E->getType()) && |
1678 | !E->HasSideEffects(Ctx: CGF.getContext()); |
1679 | // '\0' |
1680 | if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(Val: E)) |
1681 | return CL->getValue() == 0; |
1682 | |
1683 | // Otherwise, hard case: conservatively return false. |
1684 | return false; |
1685 | } |
1686 | |
1687 | |
1688 | void |
1689 | AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) { |
1690 | QualType type = LV.getType(); |
1691 | // FIXME: Ignore result? |
1692 | // FIXME: Are initializers affected by volatile? |
1693 | if (Dest.isZeroed() && isSimpleZero(E, CGF)) { |
1694 | // Storing "i32 0" to a zero'd memory location is a noop. |
1695 | return; |
1696 | } else if (isa<ImplicitValueInitExpr>(Val: E) || isa<CXXScalarValueInitExpr>(Val: E)) { |
1697 | return EmitNullInitializationToLValue(Address: LV); |
1698 | } else if (isa<NoInitExpr>(Val: E)) { |
1699 | // Do nothing. |
1700 | return; |
1701 | } else if (type->isReferenceType()) { |
1702 | RValue RV = CGF.EmitReferenceBindingToExpr(E); |
1703 | return CGF.EmitStoreThroughLValue(Src: RV, Dst: LV); |
1704 | } |
1705 | |
1706 | CGF.EmitInitializationToLValue(E, LV, IsZeroed: Dest.isZeroed()); |
1707 | } |
1708 | |
1709 | void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) { |
1710 | QualType type = lv.getType(); |
1711 | |
1712 | // If the destination slot is already zeroed out before the aggregate is |
1713 | // copied into it, we don't have to emit any zeros here. |
1714 | if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(T: type)) |
1715 | return; |
1716 | |
1717 | if (CGF.hasScalarEvaluationKind(T: type)) { |
1718 | // For non-aggregates, we can store the appropriate null constant. |
1719 | llvm::Value *null = CGF.CGM.EmitNullConstant(T: type); |
1720 | // Note that the following is not equivalent to |
1721 | // EmitStoreThroughBitfieldLValue for ARC types. |
1722 | if (lv.isBitField()) { |
1723 | CGF.EmitStoreThroughBitfieldLValue(Src: RValue::get(V: null), Dst: lv); |
1724 | } else { |
1725 | assert(lv.isSimple()); |
1726 | CGF.EmitStoreOfScalar(value: null, lvalue: lv, /* isInitialization */ isInit: true); |
1727 | } |
1728 | } else { |
1729 | // There's a potential optimization opportunity in combining |
1730 | // memsets; that would be easy for arrays, but relatively |
1731 | // difficult for structures with the current code. |
1732 | CGF.EmitNullInitialization(DestPtr: lv.getAddress(), Ty: lv.getType()); |
1733 | } |
1734 | } |
1735 | |
1736 | void AggExprEmitter::VisitCXXParenListInitExpr(CXXParenListInitExpr *E) { |
1737 | VisitCXXParenListOrInitListExpr(ExprToVisit: E, Args: E->getInitExprs(), |
1738 | InitializedFieldInUnion: E->getInitializedFieldInUnion(), |
1739 | ArrayFiller: E->getArrayFiller()); |
1740 | } |
1741 | |
1742 | void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { |
1743 | if (E->hadArrayRangeDesignator()) |
1744 | CGF.ErrorUnsupported(S: E, Type: "GNU array range designator extension" ); |
1745 | |
1746 | if (E->isTransparent()) |
1747 | return Visit(E: E->getInit(Init: 0)); |
1748 | |
1749 | VisitCXXParenListOrInitListExpr( |
1750 | ExprToVisit: E, Args: E->inits(), InitializedFieldInUnion: E->getInitializedFieldInUnion(), ArrayFiller: E->getArrayFiller()); |
1751 | } |
1752 | |
1753 | void AggExprEmitter::VisitCXXParenListOrInitListExpr( |
1754 | Expr *ExprToVisit, ArrayRef<Expr *> InitExprs, |
1755 | FieldDecl *InitializedFieldInUnion, Expr *ArrayFiller) { |
1756 | #if 0 |
1757 | // FIXME: Assess perf here? Figure out what cases are worth optimizing here |
1758 | // (Length of globals? Chunks of zeroed-out space?). |
1759 | // |
1760 | // If we can, prefer a copy from a global; this is a lot less code for long |
1761 | // globals, and it's easier for the current optimizers to analyze. |
1762 | if (llvm::Constant *C = |
1763 | CGF.CGM.EmitConstantExpr(ExprToVisit, ExprToVisit->getType(), &CGF)) { |
1764 | llvm::GlobalVariable* GV = |
1765 | new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true, |
1766 | llvm::GlobalValue::InternalLinkage, C, "" ); |
1767 | EmitFinalDestCopy(ExprToVisit->getType(), |
1768 | CGF.MakeAddrLValue(GV, ExprToVisit->getType())); |
1769 | return; |
1770 | } |
1771 | #endif |
1772 | |
1773 | // HLSL initialization lists in the AST are an expansion which can contain |
1774 | // side-effecting expressions wrapped in opaque value expressions. To properly |
1775 | // emit these we need to emit the opaque values before we emit the argument |
1776 | // expressions themselves. This is a little hacky, but it prevents us needing |
1777 | // to do a bigger AST-level change for a language feature that we need |
1778 | // deprecate in the near future. See related HLSL language proposals: |
1779 | // * 0005-strict-initializer-lists.md |
1780 | // * https://github.com/microsoft/hlsl-specs/pull/325 |
1781 | if (CGF.getLangOpts().HLSL && isa<InitListExpr>(Val: ExprToVisit)) |
1782 | CGF.CGM.getHLSLRuntime().emitInitListOpaqueValues( |
1783 | CGF, E: cast<InitListExpr>(Val: ExprToVisit)); |
1784 | |
1785 | AggValueSlot Dest = EnsureSlot(T: ExprToVisit->getType()); |
1786 | |
1787 | LValue DestLV = CGF.MakeAddrLValue(Addr: Dest.getAddress(), T: ExprToVisit->getType()); |
1788 | |
1789 | // Handle initialization of an array. |
1790 | if (ExprToVisit->getType()->isConstantArrayType()) { |
1791 | auto AType = cast<llvm::ArrayType>(Val: Dest.getAddress().getElementType()); |
1792 | EmitArrayInit(DestPtr: Dest.getAddress(), AType, ArrayQTy: ExprToVisit->getType(), ExprToVisit, |
1793 | Args: InitExprs, ArrayFiller); |
1794 | return; |
1795 | } else if (ExprToVisit->getType()->isVariableArrayType()) { |
1796 | // A variable array type that has an initializer can only do empty |
1797 | // initialization. And because this feature is not exposed as an extension |
1798 | // in C++, we can safely memset the array memory to zero. |
1799 | assert(InitExprs.size() == 0 && |
1800 | "you can only use an empty initializer with VLAs" ); |
1801 | CGF.EmitNullInitialization(DestPtr: Dest.getAddress(), Ty: ExprToVisit->getType()); |
1802 | return; |
1803 | } |
1804 | |
1805 | assert(ExprToVisit->getType()->isRecordType() && |
1806 | "Only support structs/unions here!" ); |
1807 | |
1808 | // Do struct initialization; this code just sets each individual member |
1809 | // to the approprate value. This makes bitfield support automatic; |
1810 | // the disadvantage is that the generated code is more difficult for |
1811 | // the optimizer, especially with bitfields. |
1812 | unsigned NumInitElements = InitExprs.size(); |
1813 | RecordDecl *record = ExprToVisit->getType()->castAs<RecordType>()->getDecl(); |
1814 | |
1815 | // We'll need to enter cleanup scopes in case any of the element |
1816 | // initializers throws an exception. |
1817 | CodeGenFunction::CleanupDeactivationScope DeactivateCleanups(CGF); |
1818 | |
1819 | unsigned curInitIndex = 0; |
1820 | |
1821 | // Emit initialization of base classes. |
1822 | if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: record)) { |
1823 | assert(NumInitElements >= CXXRD->getNumBases() && |
1824 | "missing initializer for base class" ); |
1825 | for (auto &Base : CXXRD->bases()) { |
1826 | assert(!Base.isVirtual() && "should not see vbases here" ); |
1827 | auto *BaseRD = Base.getType()->getAsCXXRecordDecl(); |
1828 | Address V = CGF.GetAddressOfDirectBaseInCompleteClass( |
1829 | Value: Dest.getAddress(), Derived: CXXRD, Base: BaseRD, |
1830 | /*isBaseVirtual*/ BaseIsVirtual: false); |
1831 | AggValueSlot AggSlot = AggValueSlot::forAddr( |
1832 | addr: V, quals: Qualifiers(), |
1833 | isDestructed: AggValueSlot::IsDestructed, |
1834 | needsGC: AggValueSlot::DoesNotNeedGCBarriers, |
1835 | isAliased: AggValueSlot::IsNotAliased, |
1836 | mayOverlap: CGF.getOverlapForBaseInit(RD: CXXRD, BaseRD, IsVirtual: Base.isVirtual())); |
1837 | CGF.EmitAggExpr(E: InitExprs[curInitIndex++], AS: AggSlot); |
1838 | |
1839 | if (QualType::DestructionKind dtorKind = |
1840 | Base.getType().isDestructedType()) |
1841 | CGF.pushDestroyAndDeferDeactivation(dtorKind, addr: V, type: Base.getType()); |
1842 | } |
1843 | } |
1844 | |
1845 | // Prepare a 'this' for CXXDefaultInitExprs. |
1846 | CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress()); |
1847 | |
1848 | const bool ZeroInitPadding = |
1849 | CGF.CGM.shouldZeroInitPadding() && !Dest.isZeroed(); |
1850 | |
1851 | if (record->isUnion()) { |
1852 | // Only initialize one field of a union. The field itself is |
1853 | // specified by the initializer list. |
1854 | if (!InitializedFieldInUnion) { |
1855 | // Empty union; we have nothing to do. |
1856 | |
1857 | #ifndef NDEBUG |
1858 | // Make sure that it's really an empty and not a failure of |
1859 | // semantic analysis. |
1860 | for (const auto *Field : record->fields()) |
1861 | assert( |
1862 | (Field->isUnnamedBitField() || Field->isAnonymousStructOrUnion()) && |
1863 | "Only unnamed bitfields or anonymous class allowed" ); |
1864 | #endif |
1865 | return; |
1866 | } |
1867 | |
1868 | // FIXME: volatility |
1869 | FieldDecl *Field = InitializedFieldInUnion; |
1870 | |
1871 | LValue FieldLoc = CGF.EmitLValueForFieldInitialization(Base: DestLV, Field); |
1872 | if (NumInitElements) { |
1873 | // Store the initializer into the field |
1874 | EmitInitializationToLValue(E: InitExprs[0], LV: FieldLoc); |
1875 | if (ZeroInitPadding) { |
1876 | uint64_t TotalSize = CGF.getContext().toBits( |
1877 | CharSize: Dest.getPreferredSize(Ctx&: CGF.getContext(), Type: DestLV.getType())); |
1878 | uint64_t FieldSize = CGF.getContext().getTypeSize(T: FieldLoc.getType()); |
1879 | DoZeroInitPadding(PaddingStart&: FieldSize, PaddingEnd: TotalSize, NextField: nullptr); |
1880 | } |
1881 | } else { |
1882 | // Default-initialize to null. |
1883 | if (ZeroInitPadding) |
1884 | EmitNullInitializationToLValue(lv: DestLV); |
1885 | else |
1886 | EmitNullInitializationToLValue(lv: FieldLoc); |
1887 | } |
1888 | return; |
1889 | } |
1890 | |
1891 | // Here we iterate over the fields; this makes it simpler to both |
1892 | // default-initialize fields and skip over unnamed fields. |
1893 | const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(D: record); |
1894 | uint64_t PaddingStart = 0; |
1895 | |
1896 | for (const auto *field : record->fields()) { |
1897 | // We're done once we hit the flexible array member. |
1898 | if (field->getType()->isIncompleteArrayType()) |
1899 | break; |
1900 | |
1901 | // Always skip anonymous bitfields. |
1902 | if (field->isUnnamedBitField()) |
1903 | continue; |
1904 | |
1905 | // We're done if we reach the end of the explicit initializers, we |
1906 | // have a zeroed object, and the rest of the fields are |
1907 | // zero-initializable. |
1908 | if (curInitIndex == NumInitElements && Dest.isZeroed() && |
1909 | CGF.getTypes().isZeroInitializable(T: ExprToVisit->getType())) |
1910 | break; |
1911 | |
1912 | if (ZeroInitPadding) |
1913 | DoZeroInitPadding(PaddingStart, |
1914 | PaddingEnd: Layout.getFieldOffset(FieldNo: field->getFieldIndex()), NextField: field); |
1915 | |
1916 | LValue LV = CGF.EmitLValueForFieldInitialization(Base: DestLV, Field: field); |
1917 | // We never generate write-barries for initialized fields. |
1918 | LV.setNonGC(true); |
1919 | |
1920 | if (curInitIndex < NumInitElements) { |
1921 | // Store the initializer into the field. |
1922 | EmitInitializationToLValue(E: InitExprs[curInitIndex++], LV); |
1923 | } else { |
1924 | // We're out of initializers; default-initialize to null |
1925 | EmitNullInitializationToLValue(lv: LV); |
1926 | } |
1927 | |
1928 | // Push a destructor if necessary. |
1929 | // FIXME: if we have an array of structures, all explicitly |
1930 | // initialized, we can end up pushing a linear number of cleanups. |
1931 | if (QualType::DestructionKind dtorKind |
1932 | = field->getType().isDestructedType()) { |
1933 | assert(LV.isSimple()); |
1934 | if (dtorKind) { |
1935 | CGF.pushDestroyAndDeferDeactivation(cleanupKind: NormalAndEHCleanup, addr: LV.getAddress(), |
1936 | type: field->getType(), |
1937 | destroyer: CGF.getDestroyer(destructionKind: dtorKind), useEHCleanupForArray: false); |
1938 | } |
1939 | } |
1940 | } |
1941 | if (ZeroInitPadding) { |
1942 | uint64_t TotalSize = CGF.getContext().toBits( |
1943 | CharSize: Dest.getPreferredSize(Ctx&: CGF.getContext(), Type: DestLV.getType())); |
1944 | DoZeroInitPadding(PaddingStart, PaddingEnd: TotalSize, NextField: nullptr); |
1945 | } |
1946 | } |
1947 | |
1948 | void AggExprEmitter::DoZeroInitPadding(uint64_t &PaddingStart, |
1949 | uint64_t PaddingEnd, |
1950 | const FieldDecl *NextField) { |
1951 | |
1952 | auto InitBytes = [&](uint64_t StartBit, uint64_t EndBit) { |
1953 | CharUnits Start = CGF.getContext().toCharUnitsFromBits(BitSize: StartBit); |
1954 | CharUnits End = CGF.getContext().toCharUnitsFromBits(BitSize: EndBit); |
1955 | Address Addr = Dest.getAddress().withElementType(ElemTy: CGF.CharTy); |
1956 | if (!Start.isZero()) |
1957 | Addr = Builder.CreateConstGEP(Addr, Index: Start.getQuantity()); |
1958 | llvm::Constant *SizeVal = Builder.getInt64(C: (End - Start).getQuantity()); |
1959 | CGF.Builder.CreateMemSet(Dest: Addr, Value: Builder.getInt8(C: 0), Size: SizeVal, IsVolatile: false); |
1960 | }; |
1961 | |
1962 | if (NextField != nullptr && NextField->isBitField()) { |
1963 | // For bitfield, zero init StorageSize before storing the bits. So we don't |
1964 | // need to handle big/little endian. |
1965 | const CGRecordLayout &RL = |
1966 | CGF.getTypes().getCGRecordLayout(NextField->getParent()); |
1967 | const CGBitFieldInfo &Info = RL.getBitFieldInfo(FD: NextField); |
1968 | uint64_t StorageStart = CGF.getContext().toBits(CharSize: Info.StorageOffset); |
1969 | if (StorageStart + Info.StorageSize > PaddingStart) { |
1970 | if (StorageStart > PaddingStart) |
1971 | InitBytes(PaddingStart, StorageStart); |
1972 | Address Addr = Dest.getAddress(); |
1973 | if (!Info.StorageOffset.isZero()) |
1974 | Addr = Builder.CreateConstGEP(Addr: Addr.withElementType(ElemTy: CGF.CharTy), |
1975 | Index: Info.StorageOffset.getQuantity()); |
1976 | Addr = Addr.withElementType( |
1977 | ElemTy: llvm::Type::getIntNTy(C&: CGF.getLLVMContext(), N: Info.StorageSize)); |
1978 | Builder.CreateStore(Val: Builder.getIntN(N: Info.StorageSize, C: 0), Addr); |
1979 | PaddingStart = StorageStart + Info.StorageSize; |
1980 | } |
1981 | return; |
1982 | } |
1983 | |
1984 | if (PaddingStart < PaddingEnd) |
1985 | InitBytes(PaddingStart, PaddingEnd); |
1986 | if (NextField != nullptr) |
1987 | PaddingStart = |
1988 | PaddingEnd + CGF.getContext().getTypeSize(T: NextField->getType()); |
1989 | } |
1990 | |
1991 | void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E, |
1992 | llvm::Value *outerBegin) { |
1993 | // Emit the common subexpression. |
1994 | CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr()); |
1995 | |
1996 | Address destPtr = EnsureSlot(T: E->getType()).getAddress(); |
1997 | uint64_t numElements = E->getArraySize().getZExtValue(); |
1998 | |
1999 | if (!numElements) |
2000 | return; |
2001 | |
2002 | // destPtr is an array*. Construct an elementType* by drilling down a level. |
2003 | llvm::Value *zero = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: 0); |
2004 | llvm::Value *indices[] = {zero, zero}; |
2005 | llvm::Value *begin = Builder.CreateInBoundsGEP(Ty: destPtr.getElementType(), |
2006 | Ptr: destPtr.emitRawPointer(CGF), |
2007 | IdxList: indices, Name: "arrayinit.begin" ); |
2008 | |
2009 | // Prepare to special-case multidimensional array initialization: we avoid |
2010 | // emitting multiple destructor loops in that case. |
2011 | if (!outerBegin) |
2012 | outerBegin = begin; |
2013 | ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(Val: E->getSubExpr()); |
2014 | |
2015 | QualType elementType = |
2016 | CGF.getContext().getAsArrayType(T: E->getType())->getElementType(); |
2017 | CharUnits elementSize = CGF.getContext().getTypeSizeInChars(T: elementType); |
2018 | CharUnits elementAlign = |
2019 | destPtr.getAlignment().alignmentOfArrayElement(elementSize); |
2020 | llvm::Type *llvmElementType = CGF.ConvertTypeForMem(T: elementType); |
2021 | |
2022 | llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); |
2023 | llvm::BasicBlock *bodyBB = CGF.createBasicBlock(name: "arrayinit.body" ); |
2024 | |
2025 | // Jump into the body. |
2026 | CGF.EmitBlock(BB: bodyBB); |
2027 | llvm::PHINode *index = |
2028 | Builder.CreatePHI(Ty: zero->getType(), NumReservedValues: 2, Name: "arrayinit.index" ); |
2029 | index->addIncoming(V: zero, BB: entryBB); |
2030 | llvm::Value *element = |
2031 | Builder.CreateInBoundsGEP(Ty: llvmElementType, Ptr: begin, IdxList: index); |
2032 | |
2033 | // Prepare for a cleanup. |
2034 | QualType::DestructionKind dtorKind = elementType.isDestructedType(); |
2035 | EHScopeStack::stable_iterator cleanup; |
2036 | if (CGF.needsEHCleanup(kind: dtorKind) && !InnerLoop) { |
2037 | if (outerBegin->getType() != element->getType()) |
2038 | outerBegin = Builder.CreateBitCast(V: outerBegin, DestTy: element->getType()); |
2039 | CGF.pushRegularPartialArrayCleanup(arrayBegin: outerBegin, arrayEnd: element, elementType, |
2040 | elementAlignment: elementAlign, |
2041 | destroyer: CGF.getDestroyer(destructionKind: dtorKind)); |
2042 | cleanup = CGF.EHStack.stable_begin(); |
2043 | } else { |
2044 | dtorKind = QualType::DK_none; |
2045 | } |
2046 | |
2047 | // Emit the actual filler expression. |
2048 | { |
2049 | // Temporaries created in an array initialization loop are destroyed |
2050 | // at the end of each iteration. |
2051 | CodeGenFunction::RunCleanupsScope CleanupsScope(CGF); |
2052 | CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index); |
2053 | LValue elementLV = CGF.MakeAddrLValue( |
2054 | Addr: Address(element, llvmElementType, elementAlign), T: elementType); |
2055 | |
2056 | if (InnerLoop) { |
2057 | // If the subexpression is an ArrayInitLoopExpr, share its cleanup. |
2058 | auto elementSlot = AggValueSlot::forLValue( |
2059 | LV: elementLV, isDestructed: AggValueSlot::IsDestructed, |
2060 | needsGC: AggValueSlot::DoesNotNeedGCBarriers, isAliased: AggValueSlot::IsNotAliased, |
2061 | mayOverlap: AggValueSlot::DoesNotOverlap); |
2062 | AggExprEmitter(CGF, elementSlot, false) |
2063 | .VisitArrayInitLoopExpr(E: InnerLoop, outerBegin); |
2064 | } else |
2065 | EmitInitializationToLValue(E: E->getSubExpr(), LV: elementLV); |
2066 | } |
2067 | |
2068 | // Move on to the next element. |
2069 | llvm::Value *nextIndex = Builder.CreateNUWAdd( |
2070 | LHS: index, RHS: llvm::ConstantInt::get(Ty: CGF.SizeTy, V: 1), Name: "arrayinit.next" ); |
2071 | index->addIncoming(V: nextIndex, BB: Builder.GetInsertBlock()); |
2072 | |
2073 | // Leave the loop if we're done. |
2074 | llvm::Value *done = Builder.CreateICmpEQ( |
2075 | LHS: nextIndex, RHS: llvm::ConstantInt::get(Ty: CGF.SizeTy, V: numElements), |
2076 | Name: "arrayinit.done" ); |
2077 | llvm::BasicBlock *endBB = CGF.createBasicBlock(name: "arrayinit.end" ); |
2078 | Builder.CreateCondBr(Cond: done, True: endBB, False: bodyBB); |
2079 | |
2080 | CGF.EmitBlock(BB: endBB); |
2081 | |
2082 | // Leave the partial-array cleanup if we entered one. |
2083 | if (dtorKind) |
2084 | CGF.DeactivateCleanupBlock(Cleanup: cleanup, DominatingIP: index); |
2085 | } |
2086 | |
2087 | void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) { |
2088 | AggValueSlot Dest = EnsureSlot(T: E->getType()); |
2089 | |
2090 | LValue DestLV = CGF.MakeAddrLValue(Addr: Dest.getAddress(), T: E->getType()); |
2091 | EmitInitializationToLValue(E: E->getBase(), LV: DestLV); |
2092 | VisitInitListExpr(E: E->getUpdater()); |
2093 | } |
2094 | |
2095 | //===----------------------------------------------------------------------===// |
2096 | // Entry Points into this File |
2097 | //===----------------------------------------------------------------------===// |
2098 | |
2099 | /// GetNumNonZeroBytesInInit - Get an approximate count of the number of |
2100 | /// non-zero bytes that will be stored when outputting the initializer for the |
2101 | /// specified initializer expression. |
2102 | static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) { |
2103 | if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(Val: E)) |
2104 | E = MTE->getSubExpr(); |
2105 | E = E->IgnoreParenNoopCasts(Ctx: CGF.getContext()); |
2106 | |
2107 | // 0 and 0.0 won't require any non-zero stores! |
2108 | if (isSimpleZero(E, CGF)) return CharUnits::Zero(); |
2109 | |
2110 | // If this is an initlist expr, sum up the size of sizes of the (present) |
2111 | // elements. If this is something weird, assume the whole thing is non-zero. |
2112 | const InitListExpr *ILE = dyn_cast<InitListExpr>(Val: E); |
2113 | while (ILE && ILE->isTransparent()) |
2114 | ILE = dyn_cast<InitListExpr>(Val: ILE->getInit(Init: 0)); |
2115 | if (!ILE || !CGF.getTypes().isZeroInitializable(T: ILE->getType())) |
2116 | return CGF.getContext().getTypeSizeInChars(T: E->getType()); |
2117 | |
2118 | // InitListExprs for structs have to be handled carefully. If there are |
2119 | // reference members, we need to consider the size of the reference, not the |
2120 | // referencee. InitListExprs for unions and arrays can't have references. |
2121 | if (const RecordType *RT = E->getType()->getAs<RecordType>()) { |
2122 | if (!RT->isUnionType()) { |
2123 | RecordDecl *SD = RT->getDecl(); |
2124 | CharUnits NumNonZeroBytes = CharUnits::Zero(); |
2125 | |
2126 | unsigned ILEElement = 0; |
2127 | if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: SD)) |
2128 | while (ILEElement != CXXRD->getNumBases()) |
2129 | NumNonZeroBytes += |
2130 | GetNumNonZeroBytesInInit(E: ILE->getInit(Init: ILEElement++), CGF); |
2131 | for (const auto *Field : SD->fields()) { |
2132 | // We're done once we hit the flexible array member or run out of |
2133 | // InitListExpr elements. |
2134 | if (Field->getType()->isIncompleteArrayType() || |
2135 | ILEElement == ILE->getNumInits()) |
2136 | break; |
2137 | if (Field->isUnnamedBitField()) |
2138 | continue; |
2139 | |
2140 | const Expr *E = ILE->getInit(Init: ILEElement++); |
2141 | |
2142 | // Reference values are always non-null and have the width of a pointer. |
2143 | if (Field->getType()->isReferenceType()) |
2144 | NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits( |
2145 | BitSize: CGF.getTarget().getPointerWidth(AddrSpace: LangAS::Default)); |
2146 | else |
2147 | NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF); |
2148 | } |
2149 | |
2150 | return NumNonZeroBytes; |
2151 | } |
2152 | } |
2153 | |
2154 | // FIXME: This overestimates the number of non-zero bytes for bit-fields. |
2155 | CharUnits NumNonZeroBytes = CharUnits::Zero(); |
2156 | for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) |
2157 | NumNonZeroBytes += GetNumNonZeroBytesInInit(E: ILE->getInit(Init: i), CGF); |
2158 | return NumNonZeroBytes; |
2159 | } |
2160 | |
2161 | /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of |
2162 | /// zeros in it, emit a memset and avoid storing the individual zeros. |
2163 | /// |
2164 | static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, |
2165 | CodeGenFunction &CGF) { |
2166 | // If the slot is already known to be zeroed, nothing to do. Don't mess with |
2167 | // volatile stores. |
2168 | if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid()) |
2169 | return; |
2170 | |
2171 | // C++ objects with a user-declared constructor don't need zero'ing. |
2172 | if (CGF.getLangOpts().CPlusPlus) |
2173 | if (const RecordType *RT = CGF.getContext() |
2174 | .getBaseElementType(QT: E->getType())->getAs<RecordType>()) { |
2175 | const CXXRecordDecl *RD = cast<CXXRecordDecl>(Val: RT->getDecl()); |
2176 | if (RD->hasUserDeclaredConstructor()) |
2177 | return; |
2178 | } |
2179 | |
2180 | // If the type is 16-bytes or smaller, prefer individual stores over memset. |
2181 | CharUnits Size = Slot.getPreferredSize(Ctx&: CGF.getContext(), Type: E->getType()); |
2182 | if (Size <= CharUnits::fromQuantity(Quantity: 16)) |
2183 | return; |
2184 | |
2185 | // Check to see if over 3/4 of the initializer are known to be zero. If so, |
2186 | // we prefer to emit memset + individual stores for the rest. |
2187 | CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF); |
2188 | if (NumNonZeroBytes*4 > Size) |
2189 | return; |
2190 | |
2191 | // Okay, it seems like a good idea to use an initial memset, emit the call. |
2192 | llvm::Constant *SizeVal = CGF.Builder.getInt64(C: Size.getQuantity()); |
2193 | |
2194 | Address Loc = Slot.getAddress().withElementType(ElemTy: CGF.Int8Ty); |
2195 | CGF.Builder.CreateMemSet(Dest: Loc, Value: CGF.Builder.getInt8(C: 0), Size: SizeVal, IsVolatile: false); |
2196 | |
2197 | // Tell the AggExprEmitter that the slot is known zero. |
2198 | Slot.setZeroed(); |
2199 | } |
2200 | |
2201 | |
2202 | |
2203 | |
2204 | /// EmitAggExpr - Emit the computation of the specified expression of aggregate |
2205 | /// type. The result is computed into DestPtr. Note that if DestPtr is null, |
2206 | /// the value of the aggregate expression is not needed. If VolatileDest is |
2207 | /// true, DestPtr cannot be 0. |
2208 | void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) { |
2209 | assert(E && hasAggregateEvaluationKind(E->getType()) && |
2210 | "Invalid aggregate expression to emit" ); |
2211 | assert((Slot.getAddress().isValid() || Slot.isIgnored()) && |
2212 | "slot has bits but no address" ); |
2213 | |
2214 | // Optimize the slot if possible. |
2215 | CheckAggExprForMemSetUse(Slot, E, CGF&: *this); |
2216 | |
2217 | AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(E: const_cast<Expr*>(E)); |
2218 | } |
2219 | |
2220 | LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) { |
2221 | assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!" ); |
2222 | Address Temp = CreateMemTemp(T: E->getType()); |
2223 | LValue LV = MakeAddrLValue(Addr: Temp, T: E->getType()); |
2224 | EmitAggExpr(E, Slot: AggValueSlot::forLValue(LV, isDestructed: AggValueSlot::IsNotDestructed, |
2225 | needsGC: AggValueSlot::DoesNotNeedGCBarriers, |
2226 | isAliased: AggValueSlot::IsNotAliased, |
2227 | mayOverlap: AggValueSlot::DoesNotOverlap)); |
2228 | return LV; |
2229 | } |
2230 | |
2231 | void CodeGenFunction::EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, |
2232 | const LValue &Src, |
2233 | ExprValueKind SrcKind) { |
2234 | return AggExprEmitter(*this, Dest, Dest.isIgnored()) |
2235 | .EmitFinalDestCopy(type: Type, src: Src, SrcValueKind: SrcKind); |
2236 | } |
2237 | |
2238 | AggValueSlot::Overlap_t |
2239 | CodeGenFunction::getOverlapForFieldInit(const FieldDecl *FD) { |
2240 | if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType()) |
2241 | return AggValueSlot::DoesNotOverlap; |
2242 | |
2243 | // Empty fields can overlap earlier fields. |
2244 | if (FD->getType()->getAsCXXRecordDecl()->isEmpty()) |
2245 | return AggValueSlot::MayOverlap; |
2246 | |
2247 | // If the field lies entirely within the enclosing class's nvsize, its tail |
2248 | // padding cannot overlap any already-initialized object. (The only subobjects |
2249 | // with greater addresses that might already be initialized are vbases.) |
2250 | const RecordDecl *ClassRD = FD->getParent(); |
2251 | const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D: ClassRD); |
2252 | if (Layout.getFieldOffset(FieldNo: FD->getFieldIndex()) + |
2253 | getContext().getTypeSize(T: FD->getType()) <= |
2254 | (uint64_t)getContext().toBits(CharSize: Layout.getNonVirtualSize())) |
2255 | return AggValueSlot::DoesNotOverlap; |
2256 | |
2257 | // The tail padding may contain values we need to preserve. |
2258 | return AggValueSlot::MayOverlap; |
2259 | } |
2260 | |
2261 | AggValueSlot::Overlap_t CodeGenFunction::getOverlapForBaseInit( |
2262 | const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) { |
2263 | // If the most-derived object is a field declared with [[no_unique_address]], |
2264 | // the tail padding of any virtual base could be reused for other subobjects |
2265 | // of that field's class. |
2266 | if (IsVirtual) |
2267 | return AggValueSlot::MayOverlap; |
2268 | |
2269 | // Empty bases can overlap earlier bases. |
2270 | if (BaseRD->isEmpty()) |
2271 | return AggValueSlot::MayOverlap; |
2272 | |
2273 | // If the base class is laid out entirely within the nvsize of the derived |
2274 | // class, its tail padding cannot yet be initialized, so we can issue |
2275 | // stores at the full width of the base class. |
2276 | const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D: RD); |
2277 | if (Layout.getBaseClassOffset(Base: BaseRD) + |
2278 | getContext().getASTRecordLayout(D: BaseRD).getSize() <= |
2279 | Layout.getNonVirtualSize()) |
2280 | return AggValueSlot::DoesNotOverlap; |
2281 | |
2282 | // The tail padding may contain values we need to preserve. |
2283 | return AggValueSlot::MayOverlap; |
2284 | } |
2285 | |
2286 | void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty, |
2287 | AggValueSlot::Overlap_t MayOverlap, |
2288 | bool isVolatile) { |
2289 | assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex" ); |
2290 | |
2291 | Address DestPtr = Dest.getAddress(); |
2292 | Address SrcPtr = Src.getAddress(); |
2293 | |
2294 | if (getLangOpts().CPlusPlus) { |
2295 | if (const RecordType *RT = Ty->getAs<RecordType>()) { |
2296 | CXXRecordDecl *Record = cast<CXXRecordDecl>(Val: RT->getDecl()); |
2297 | assert((Record->hasTrivialCopyConstructor() || |
2298 | Record->hasTrivialCopyAssignment() || |
2299 | Record->hasTrivialMoveConstructor() || |
2300 | Record->hasTrivialMoveAssignment() || |
2301 | Record->hasAttr<TrivialABIAttr>() || Record->isUnion()) && |
2302 | "Trying to aggregate-copy a type without a trivial copy/move " |
2303 | "constructor or assignment operator" ); |
2304 | // Ignore empty classes in C++. |
2305 | if (Record->isEmpty()) |
2306 | return; |
2307 | } |
2308 | } |
2309 | |
2310 | if (getLangOpts().CUDAIsDevice) { |
2311 | if (Ty->isCUDADeviceBuiltinSurfaceType()) { |
2312 | if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(CGF&: *this, Dst: Dest, |
2313 | Src)) |
2314 | return; |
2315 | } else if (Ty->isCUDADeviceBuiltinTextureType()) { |
2316 | if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(CGF&: *this, Dst: Dest, |
2317 | Src)) |
2318 | return; |
2319 | } |
2320 | } |
2321 | |
2322 | // Aggregate assignment turns into llvm.memcpy. This is almost valid per |
2323 | // C99 6.5.16.1p3, which states "If the value being stored in an object is |
2324 | // read from another object that overlaps in anyway the storage of the first |
2325 | // object, then the overlap shall be exact and the two objects shall have |
2326 | // qualified or unqualified versions of a compatible type." |
2327 | // |
2328 | // memcpy is not defined if the source and destination pointers are exactly |
2329 | // equal, but other compilers do this optimization, and almost every memcpy |
2330 | // implementation handles this case safely. If there is a libc that does not |
2331 | // safely handle this, we can add a target hook. |
2332 | |
2333 | // Get data size info for this aggregate. Don't copy the tail padding if this |
2334 | // might be a potentially-overlapping subobject, since the tail padding might |
2335 | // be occupied by a different object. Otherwise, copying it is fine. |
2336 | TypeInfoChars TypeInfo; |
2337 | if (MayOverlap) |
2338 | TypeInfo = getContext().getTypeInfoDataSizeInChars(T: Ty); |
2339 | else |
2340 | TypeInfo = getContext().getTypeInfoInChars(T: Ty); |
2341 | |
2342 | llvm::Value *SizeVal = nullptr; |
2343 | if (TypeInfo.Width.isZero()) { |
2344 | // But note that getTypeInfo returns 0 for a VLA. |
2345 | if (auto *VAT = dyn_cast_or_null<VariableArrayType>( |
2346 | Val: getContext().getAsArrayType(T: Ty))) { |
2347 | QualType BaseEltTy; |
2348 | SizeVal = emitArrayLength(arrayType: VAT, baseType&: BaseEltTy, addr&: DestPtr); |
2349 | TypeInfo = getContext().getTypeInfoInChars(T: BaseEltTy); |
2350 | assert(!TypeInfo.Width.isZero()); |
2351 | SizeVal = Builder.CreateNUWMul( |
2352 | LHS: SizeVal, |
2353 | RHS: llvm::ConstantInt::get(Ty: SizeTy, V: TypeInfo.Width.getQuantity())); |
2354 | } |
2355 | } |
2356 | if (!SizeVal) { |
2357 | SizeVal = llvm::ConstantInt::get(Ty: SizeTy, V: TypeInfo.Width.getQuantity()); |
2358 | } |
2359 | |
2360 | // FIXME: If we have a volatile struct, the optimizer can remove what might |
2361 | // appear to be `extra' memory ops: |
2362 | // |
2363 | // volatile struct { int i; } a, b; |
2364 | // |
2365 | // int main() { |
2366 | // a = b; |
2367 | // a = b; |
2368 | // } |
2369 | // |
2370 | // we need to use a different call here. We use isVolatile to indicate when |
2371 | // either the source or the destination is volatile. |
2372 | |
2373 | DestPtr = DestPtr.withElementType(ElemTy: Int8Ty); |
2374 | SrcPtr = SrcPtr.withElementType(ElemTy: Int8Ty); |
2375 | |
2376 | // Don't do any of the memmove_collectable tests if GC isn't set. |
2377 | if (CGM.getLangOpts().getGC() == LangOptions::NonGC) { |
2378 | // fall through |
2379 | } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) { |
2380 | RecordDecl *Record = RecordTy->getDecl(); |
2381 | if (Record->hasObjectMember()) { |
2382 | CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF&: *this, DestPtr, SrcPtr, |
2383 | Size: SizeVal); |
2384 | return; |
2385 | } |
2386 | } else if (Ty->isArrayType()) { |
2387 | QualType BaseType = getContext().getBaseElementType(QT: Ty); |
2388 | if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) { |
2389 | if (RecordTy->getDecl()->hasObjectMember()) { |
2390 | CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF&: *this, DestPtr, SrcPtr, |
2391 | Size: SizeVal); |
2392 | return; |
2393 | } |
2394 | } |
2395 | } |
2396 | |
2397 | auto *Inst = Builder.CreateMemCpy(Dest: DestPtr, Src: SrcPtr, Size: SizeVal, IsVolatile: isVolatile); |
2398 | addInstToCurrentSourceAtom(KeyInstruction: Inst, Backup: nullptr); |
2399 | |
2400 | // Determine the metadata to describe the position of any padding in this |
2401 | // memcpy, as well as the TBAA tags for the members of the struct, in case |
2402 | // the optimizer wishes to expand it in to scalar memory operations. |
2403 | if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(QTy: Ty)) |
2404 | Inst->setMetadata(KindID: llvm::LLVMContext::MD_tbaa_struct, Node: TBAAStructTag); |
2405 | |
2406 | if (CGM.getCodeGenOpts().NewStructPathTBAA) { |
2407 | TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer( |
2408 | DestInfo: Dest.getTBAAInfo(), SrcInfo: Src.getTBAAInfo()); |
2409 | CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo); |
2410 | } |
2411 | } |
2412 | |