1//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Constant Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ABIInfoImpl.h"
14#include "CGCXXABI.h"
15#include "CGObjCRuntime.h"
16#include "CGRecordLayout.h"
17#include "CodeGenFunction.h"
18#include "CodeGenModule.h"
19#include "ConstantEmitter.h"
20#include "TargetInfo.h"
21#include "clang/AST/APValue.h"
22#include "clang/AST/ASTContext.h"
23#include "clang/AST/Attr.h"
24#include "clang/AST/RecordLayout.h"
25#include "clang/AST/StmtVisitor.h"
26#include "clang/Basic/Builtins.h"
27#include "llvm/ADT/STLExtras.h"
28#include "llvm/ADT/Sequence.h"
29#include "llvm/Analysis/ConstantFolding.h"
30#include "llvm/IR/Constants.h"
31#include "llvm/IR/DataLayout.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/GlobalVariable.h"
34#include "llvm/Support/SipHash.h"
35#include <optional>
36using namespace clang;
37using namespace CodeGen;
38
39//===----------------------------------------------------------------------===//
40// ConstantAggregateBuilder
41//===----------------------------------------------------------------------===//
42
43namespace {
44class ConstExprEmitter;
45
46llvm::Constant *getPadding(const CodeGenModule &CGM, CharUnits PadSize) {
47 llvm::Type *Ty = CGM.CharTy;
48 if (PadSize > CharUnits::One())
49 Ty = llvm::ArrayType::get(ElementType: Ty, NumElements: PadSize.getQuantity());
50 if (CGM.shouldZeroInitPadding()) {
51 return llvm::Constant::getNullValue(Ty);
52 }
53 return llvm::UndefValue::get(T: Ty);
54}
55
56struct ConstantAggregateBuilderUtils {
57 CodeGenModule &CGM;
58
59 ConstantAggregateBuilderUtils(CodeGenModule &CGM) : CGM(CGM) {}
60
61 CharUnits getAlignment(const llvm::Constant *C) const {
62 return CharUnits::fromQuantity(
63 Quantity: CGM.getDataLayout().getABITypeAlign(Ty: C->getType()));
64 }
65
66 CharUnits getSize(llvm::Type *Ty) const {
67 return CharUnits::fromQuantity(Quantity: CGM.getDataLayout().getTypeAllocSize(Ty));
68 }
69
70 CharUnits getSize(const llvm::Constant *C) const {
71 return getSize(Ty: C->getType());
72 }
73
74 llvm::Constant *getPadding(CharUnits PadSize) const {
75 return ::getPadding(CGM, PadSize);
76 }
77
78 llvm::Constant *getZeroes(CharUnits ZeroSize) const {
79 llvm::Type *Ty = llvm::ArrayType::get(ElementType: CGM.CharTy, NumElements: ZeroSize.getQuantity());
80 return llvm::ConstantAggregateZero::get(Ty);
81 }
82};
83
84/// Incremental builder for an llvm::Constant* holding a struct or array
85/// constant.
86class ConstantAggregateBuilder : private ConstantAggregateBuilderUtils {
87 /// The elements of the constant. These two arrays must have the same size;
88 /// Offsets[i] describes the offset of Elems[i] within the constant. The
89 /// elements are kept in increasing offset order, and we ensure that there
90 /// is no overlap: Offsets[i+1] >= Offsets[i] + getSize(Elemes[i]).
91 ///
92 /// This may contain explicit padding elements (in order to create a
93 /// natural layout), but need not. Gaps between elements are implicitly
94 /// considered to be filled with undef.
95 llvm::SmallVector<llvm::Constant*, 32> Elems;
96 llvm::SmallVector<CharUnits, 32> Offsets;
97
98 /// The size of the constant (the maximum end offset of any added element).
99 /// May be larger than the end of Elems.back() if we split the last element
100 /// and removed some trailing undefs.
101 CharUnits Size = CharUnits::Zero();
102
103 /// This is true only if laying out Elems in order as the elements of a
104 /// non-packed LLVM struct will give the correct layout.
105 bool NaturalLayout = true;
106
107 bool split(size_t Index, CharUnits Hint);
108 std::optional<size_t> splitAt(CharUnits Pos);
109
110 static llvm::Constant *buildFrom(CodeGenModule &CGM,
111 ArrayRef<llvm::Constant *> Elems,
112 ArrayRef<CharUnits> Offsets,
113 CharUnits StartOffset, CharUnits Size,
114 bool NaturalLayout, llvm::Type *DesiredTy,
115 bool AllowOversized);
116
117public:
118 ConstantAggregateBuilder(CodeGenModule &CGM)
119 : ConstantAggregateBuilderUtils(CGM) {}
120
121 /// Update or overwrite the value starting at \p Offset with \c C.
122 ///
123 /// \param AllowOverwrite If \c true, this constant might overwrite (part of)
124 /// a constant that has already been added. This flag is only used to
125 /// detect bugs.
126 bool add(llvm::Constant *C, CharUnits Offset, bool AllowOverwrite);
127
128 /// Update or overwrite the bits starting at \p OffsetInBits with \p Bits.
129 bool addBits(llvm::APInt Bits, uint64_t OffsetInBits, bool AllowOverwrite);
130
131 /// Attempt to condense the value starting at \p Offset to a constant of type
132 /// \p DesiredTy.
133 void condense(CharUnits Offset, llvm::Type *DesiredTy);
134
135 /// Produce a constant representing the entire accumulated value, ideally of
136 /// the specified type. If \p AllowOversized, the constant might be larger
137 /// than implied by \p DesiredTy (eg, if there is a flexible array member).
138 /// Otherwise, the constant will be of exactly the same size as \p DesiredTy
139 /// even if we can't represent it as that type.
140 llvm::Constant *build(llvm::Type *DesiredTy, bool AllowOversized) const {
141 return buildFrom(CGM, Elems, Offsets, StartOffset: CharUnits::Zero(), Size,
142 NaturalLayout, DesiredTy, AllowOversized);
143 }
144};
145
146template<typename Container, typename Range = std::initializer_list<
147 typename Container::value_type>>
148static void replace(Container &C, size_t BeginOff, size_t EndOff, Range Vals) {
149 assert(BeginOff <= EndOff && "invalid replacement range");
150 llvm::replace(C, C.begin() + BeginOff, C.begin() + EndOff, Vals);
151}
152
153bool ConstantAggregateBuilder::add(llvm::Constant *C, CharUnits Offset,
154 bool AllowOverwrite) {
155 // Common case: appending to a layout.
156 if (Offset >= Size) {
157 CharUnits Align = getAlignment(C);
158 CharUnits AlignedSize = Size.alignTo(Align);
159 if (AlignedSize > Offset || Offset.alignTo(Align) != Offset)
160 NaturalLayout = false;
161 else if (AlignedSize < Offset) {
162 Elems.push_back(Elt: getPadding(PadSize: Offset - Size));
163 Offsets.push_back(Elt: Size);
164 }
165 Elems.push_back(Elt: C);
166 Offsets.push_back(Elt: Offset);
167 Size = Offset + getSize(C);
168 return true;
169 }
170
171 // Uncommon case: constant overlaps what we've already created.
172 std::optional<size_t> FirstElemToReplace = splitAt(Pos: Offset);
173 if (!FirstElemToReplace)
174 return false;
175
176 CharUnits CSize = getSize(C);
177 std::optional<size_t> LastElemToReplace = splitAt(Pos: Offset + CSize);
178 if (!LastElemToReplace)
179 return false;
180
181 assert((FirstElemToReplace == LastElemToReplace || AllowOverwrite) &&
182 "unexpectedly overwriting field");
183
184 replace(C&: Elems, BeginOff: *FirstElemToReplace, EndOff: *LastElemToReplace, Vals: {C});
185 replace(C&: Offsets, BeginOff: *FirstElemToReplace, EndOff: *LastElemToReplace, Vals: {Offset});
186 Size = std::max(a: Size, b: Offset + CSize);
187 NaturalLayout = false;
188 return true;
189}
190
191bool ConstantAggregateBuilder::addBits(llvm::APInt Bits, uint64_t OffsetInBits,
192 bool AllowOverwrite) {
193 const ASTContext &Context = CGM.getContext();
194 const uint64_t CharWidth = CGM.getContext().getCharWidth();
195
196 // Offset of where we want the first bit to go within the bits of the
197 // current char.
198 unsigned OffsetWithinChar = OffsetInBits % CharWidth;
199
200 // We split bit-fields up into individual bytes. Walk over the bytes and
201 // update them.
202 for (CharUnits OffsetInChars =
203 Context.toCharUnitsFromBits(BitSize: OffsetInBits - OffsetWithinChar);
204 /**/; ++OffsetInChars) {
205 // Number of bits we want to fill in this char.
206 unsigned WantedBits =
207 std::min(a: (uint64_t)Bits.getBitWidth(), b: CharWidth - OffsetWithinChar);
208
209 // Get a char containing the bits we want in the right places. The other
210 // bits have unspecified values.
211 llvm::APInt BitsThisChar = Bits;
212 if (BitsThisChar.getBitWidth() < CharWidth)
213 BitsThisChar = BitsThisChar.zext(width: CharWidth);
214 if (CGM.getDataLayout().isBigEndian()) {
215 // Figure out how much to shift by. We may need to left-shift if we have
216 // less than one byte of Bits left.
217 int Shift = Bits.getBitWidth() - CharWidth + OffsetWithinChar;
218 if (Shift > 0)
219 BitsThisChar.lshrInPlace(ShiftAmt: Shift);
220 else if (Shift < 0)
221 BitsThisChar = BitsThisChar.shl(shiftAmt: -Shift);
222 } else {
223 BitsThisChar = BitsThisChar.shl(shiftAmt: OffsetWithinChar);
224 }
225 if (BitsThisChar.getBitWidth() > CharWidth)
226 BitsThisChar = BitsThisChar.trunc(width: CharWidth);
227
228 if (WantedBits == CharWidth) {
229 // Got a full byte: just add it directly.
230 add(C: llvm::ConstantInt::get(Context&: CGM.getLLVMContext(), V: BitsThisChar),
231 Offset: OffsetInChars, AllowOverwrite);
232 } else {
233 // Partial byte: update the existing integer if there is one. If we
234 // can't split out a 1-CharUnit range to update, then we can't add
235 // these bits and fail the entire constant emission.
236 std::optional<size_t> FirstElemToUpdate = splitAt(Pos: OffsetInChars);
237 if (!FirstElemToUpdate)
238 return false;
239 std::optional<size_t> LastElemToUpdate =
240 splitAt(Pos: OffsetInChars + CharUnits::One());
241 if (!LastElemToUpdate)
242 return false;
243 assert(*LastElemToUpdate - *FirstElemToUpdate < 2 &&
244 "should have at most one element covering one byte");
245
246 // Figure out which bits we want and discard the rest.
247 llvm::APInt UpdateMask(CharWidth, 0);
248 if (CGM.getDataLayout().isBigEndian())
249 UpdateMask.setBits(loBit: CharWidth - OffsetWithinChar - WantedBits,
250 hiBit: CharWidth - OffsetWithinChar);
251 else
252 UpdateMask.setBits(loBit: OffsetWithinChar, hiBit: OffsetWithinChar + WantedBits);
253 BitsThisChar &= UpdateMask;
254
255 if (*FirstElemToUpdate == *LastElemToUpdate ||
256 Elems[*FirstElemToUpdate]->isNullValue() ||
257 isa<llvm::UndefValue>(Val: Elems[*FirstElemToUpdate])) {
258 // All existing bits are either zero or undef.
259 add(C: llvm::ConstantInt::get(Context&: CGM.getLLVMContext(), V: BitsThisChar),
260 Offset: OffsetInChars, /*AllowOverwrite*/ true);
261 } else {
262 llvm::Constant *&ToUpdate = Elems[*FirstElemToUpdate];
263 // In order to perform a partial update, we need the existing bitwise
264 // value, which we can only extract for a constant int.
265 auto *CI = dyn_cast<llvm::ConstantInt>(Val: ToUpdate);
266 if (!CI)
267 return false;
268 // Because this is a 1-CharUnit range, the constant occupying it must
269 // be exactly one CharUnit wide.
270 assert(CI->getBitWidth() == CharWidth && "splitAt failed");
271 assert((!(CI->getValue() & UpdateMask) || AllowOverwrite) &&
272 "unexpectedly overwriting bitfield");
273 BitsThisChar |= (CI->getValue() & ~UpdateMask);
274 ToUpdate = llvm::ConstantInt::get(Context&: CGM.getLLVMContext(), V: BitsThisChar);
275 }
276 }
277
278 // Stop if we've added all the bits.
279 if (WantedBits == Bits.getBitWidth())
280 break;
281
282 // Remove the consumed bits from Bits.
283 if (!CGM.getDataLayout().isBigEndian())
284 Bits.lshrInPlace(ShiftAmt: WantedBits);
285 Bits = Bits.trunc(width: Bits.getBitWidth() - WantedBits);
286
287 // The remanining bits go at the start of the following bytes.
288 OffsetWithinChar = 0;
289 }
290
291 return true;
292}
293
294/// Returns a position within Elems and Offsets such that all elements
295/// before the returned index end before Pos and all elements at or after
296/// the returned index begin at or after Pos. Splits elements as necessary
297/// to ensure this. Returns std::nullopt if we find something we can't split.
298std::optional<size_t> ConstantAggregateBuilder::splitAt(CharUnits Pos) {
299 if (Pos >= Size)
300 return Offsets.size();
301
302 while (true) {
303 auto FirstAfterPos = llvm::upper_bound(Range&: Offsets, Value&: Pos);
304 if (FirstAfterPos == Offsets.begin())
305 return 0;
306
307 // If we already have an element starting at Pos, we're done.
308 size_t LastAtOrBeforePosIndex = FirstAfterPos - Offsets.begin() - 1;
309 if (Offsets[LastAtOrBeforePosIndex] == Pos)
310 return LastAtOrBeforePosIndex;
311
312 // We found an element starting before Pos. Check for overlap.
313 if (Offsets[LastAtOrBeforePosIndex] +
314 getSize(C: Elems[LastAtOrBeforePosIndex]) <= Pos)
315 return LastAtOrBeforePosIndex + 1;
316
317 // Try to decompose it into smaller constants.
318 if (!split(Index: LastAtOrBeforePosIndex, Hint: Pos))
319 return std::nullopt;
320 }
321}
322
323/// Split the constant at index Index, if possible. Return true if we did.
324/// Hint indicates the location at which we'd like to split, but may be
325/// ignored.
326bool ConstantAggregateBuilder::split(size_t Index, CharUnits Hint) {
327 NaturalLayout = false;
328 llvm::Constant *C = Elems[Index];
329 CharUnits Offset = Offsets[Index];
330
331 if (auto *CA = dyn_cast<llvm::ConstantAggregate>(Val: C)) {
332 // Expand the sequence into its contained elements.
333 // FIXME: This assumes vector elements are byte-sized.
334 replace(C&: Elems, BeginOff: Index, EndOff: Index + 1,
335 Vals: llvm::map_range(C: llvm::seq(Begin: 0u, End: CA->getNumOperands()),
336 F: [&](unsigned Op) { return CA->getOperand(i_nocapture: Op); }));
337 if (isa<llvm::ArrayType>(Val: CA->getType()) ||
338 isa<llvm::VectorType>(Val: CA->getType())) {
339 // Array or vector.
340 llvm::Type *ElemTy =
341 llvm::GetElementPtrInst::getTypeAtIndex(Ty: CA->getType(), Idx: (uint64_t)0);
342 CharUnits ElemSize = getSize(Ty: ElemTy);
343 replace(
344 C&: Offsets, BeginOff: Index, EndOff: Index + 1,
345 Vals: llvm::map_range(C: llvm::seq(Begin: 0u, End: CA->getNumOperands()),
346 F: [&](unsigned Op) { return Offset + Op * ElemSize; }));
347 } else {
348 // Must be a struct.
349 auto *ST = cast<llvm::StructType>(Val: CA->getType());
350 const llvm::StructLayout *Layout =
351 CGM.getDataLayout().getStructLayout(Ty: ST);
352 replace(C&: Offsets, BeginOff: Index, EndOff: Index + 1,
353 Vals: llvm::map_range(
354 C: llvm::seq(Begin: 0u, End: CA->getNumOperands()), F: [&](unsigned Op) {
355 return Offset + CharUnits::fromQuantity(
356 Quantity: Layout->getElementOffset(Idx: Op));
357 }));
358 }
359 return true;
360 }
361
362 if (auto *CDS = dyn_cast<llvm::ConstantDataSequential>(Val: C)) {
363 // Expand the sequence into its contained elements.
364 // FIXME: This assumes vector elements are byte-sized.
365 // FIXME: If possible, split into two ConstantDataSequentials at Hint.
366 CharUnits ElemSize = getSize(Ty: CDS->getElementType());
367 replace(C&: Elems, BeginOff: Index, EndOff: Index + 1,
368 Vals: llvm::map_range(C: llvm::seq(Begin: uint64_t(0u), End: CDS->getNumElements()),
369 F: [&](uint64_t Elem) {
370 return CDS->getElementAsConstant(i: Elem);
371 }));
372 replace(C&: Offsets, BeginOff: Index, EndOff: Index + 1,
373 Vals: llvm::map_range(
374 C: llvm::seq(Begin: uint64_t(0u), End: CDS->getNumElements()),
375 F: [&](uint64_t Elem) { return Offset + Elem * ElemSize; }));
376 return true;
377 }
378
379 if (isa<llvm::ConstantAggregateZero>(Val: C)) {
380 // Split into two zeros at the hinted offset.
381 CharUnits ElemSize = getSize(C);
382 assert(Hint > Offset && Hint < Offset + ElemSize && "nothing to split");
383 replace(C&: Elems, BeginOff: Index, EndOff: Index + 1,
384 Vals: {getZeroes(ZeroSize: Hint - Offset), getZeroes(ZeroSize: Offset + ElemSize - Hint)});
385 replace(C&: Offsets, BeginOff: Index, EndOff: Index + 1, Vals: {Offset, Hint});
386 return true;
387 }
388
389 if (isa<llvm::UndefValue>(Val: C)) {
390 // Drop undef; it doesn't contribute to the final layout.
391 replace(C&: Elems, BeginOff: Index, EndOff: Index + 1, Vals: {});
392 replace(C&: Offsets, BeginOff: Index, EndOff: Index + 1, Vals: {});
393 return true;
394 }
395
396 // FIXME: We could split a ConstantInt if the need ever arose.
397 // We don't need to do this to handle bit-fields because we always eagerly
398 // split them into 1-byte chunks.
399
400 return false;
401}
402
403static llvm::Constant *
404EmitArrayConstant(CodeGenModule &CGM, llvm::ArrayType *DesiredType,
405 llvm::Type *CommonElementType, uint64_t ArrayBound,
406 SmallVectorImpl<llvm::Constant *> &Elements,
407 llvm::Constant *Filler);
408
409llvm::Constant *ConstantAggregateBuilder::buildFrom(
410 CodeGenModule &CGM, ArrayRef<llvm::Constant *> Elems,
411 ArrayRef<CharUnits> Offsets, CharUnits StartOffset, CharUnits Size,
412 bool NaturalLayout, llvm::Type *DesiredTy, bool AllowOversized) {
413 ConstantAggregateBuilderUtils Utils(CGM);
414
415 if (Elems.empty())
416 return llvm::UndefValue::get(T: DesiredTy);
417
418 auto Offset = [&](size_t I) { return Offsets[I] - StartOffset; };
419
420 // If we want an array type, see if all the elements are the same type and
421 // appropriately spaced.
422 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(Val: DesiredTy)) {
423 assert(!AllowOversized && "oversized array emission not supported");
424
425 bool CanEmitArray = true;
426 llvm::Type *CommonType = Elems[0]->getType();
427 llvm::Constant *Filler = llvm::Constant::getNullValue(Ty: CommonType);
428 CharUnits ElemSize = Utils.getSize(Ty: ATy->getElementType());
429 SmallVector<llvm::Constant*, 32> ArrayElements;
430 for (size_t I = 0; I != Elems.size(); ++I) {
431 // Skip zeroes; we'll use a zero value as our array filler.
432 if (Elems[I]->isNullValue())
433 continue;
434
435 // All remaining elements must be the same type.
436 if (Elems[I]->getType() != CommonType ||
437 !Offset(I).isMultipleOf(N: ElemSize)) {
438 CanEmitArray = false;
439 break;
440 }
441 ArrayElements.resize(N: Offset(I) / ElemSize + 1, NV: Filler);
442 ArrayElements.back() = Elems[I];
443 }
444
445 if (CanEmitArray) {
446 return EmitArrayConstant(CGM, DesiredType: ATy, CommonElementType: CommonType, ArrayBound: ATy->getNumElements(),
447 Elements&: ArrayElements, Filler);
448 }
449
450 // Can't emit as an array, carry on to emit as a struct.
451 }
452
453 // The size of the constant we plan to generate. This is usually just
454 // the size of the initialized type, but in AllowOversized mode (i.e.
455 // flexible array init), it can be larger.
456 CharUnits DesiredSize = Utils.getSize(Ty: DesiredTy);
457 if (Size > DesiredSize) {
458 assert(AllowOversized && "Elems are oversized");
459 DesiredSize = Size;
460 }
461
462 // The natural alignment of an unpacked LLVM struct with the given elements.
463 CharUnits Align = CharUnits::One();
464 for (llvm::Constant *C : Elems)
465 Align = std::max(a: Align, b: Utils.getAlignment(C));
466
467 // The natural size of an unpacked LLVM struct with the given elements.
468 CharUnits AlignedSize = Size.alignTo(Align);
469
470 bool Packed = false;
471 ArrayRef<llvm::Constant*> UnpackedElems = Elems;
472 llvm::SmallVector<llvm::Constant*, 32> UnpackedElemStorage;
473 if (DesiredSize < AlignedSize || DesiredSize.alignTo(Align) != DesiredSize) {
474 // The natural layout would be too big; force use of a packed layout.
475 NaturalLayout = false;
476 Packed = true;
477 } else if (DesiredSize > AlignedSize) {
478 // The natural layout would be too small. Add padding to fix it. (This
479 // is ignored if we choose a packed layout.)
480 UnpackedElemStorage.assign(in_start: Elems.begin(), in_end: Elems.end());
481 UnpackedElemStorage.push_back(Elt: Utils.getPadding(PadSize: DesiredSize - Size));
482 UnpackedElems = UnpackedElemStorage;
483 }
484
485 // If we don't have a natural layout, insert padding as necessary.
486 // As we go, double-check to see if we can actually just emit Elems
487 // as a non-packed struct and do so opportunistically if possible.
488 llvm::SmallVector<llvm::Constant*, 32> PackedElems;
489 if (!NaturalLayout) {
490 CharUnits SizeSoFar = CharUnits::Zero();
491 for (size_t I = 0; I != Elems.size(); ++I) {
492 CharUnits Align = Utils.getAlignment(C: Elems[I]);
493 CharUnits NaturalOffset = SizeSoFar.alignTo(Align);
494 CharUnits DesiredOffset = Offset(I);
495 assert(DesiredOffset >= SizeSoFar && "elements out of order");
496
497 if (DesiredOffset != NaturalOffset)
498 Packed = true;
499 if (DesiredOffset != SizeSoFar)
500 PackedElems.push_back(Elt: Utils.getPadding(PadSize: DesiredOffset - SizeSoFar));
501 PackedElems.push_back(Elt: Elems[I]);
502 SizeSoFar = DesiredOffset + Utils.getSize(C: Elems[I]);
503 }
504 // If we're using the packed layout, pad it out to the desired size if
505 // necessary.
506 if (Packed) {
507 assert(SizeSoFar <= DesiredSize &&
508 "requested size is too small for contents");
509 if (SizeSoFar < DesiredSize)
510 PackedElems.push_back(Elt: Utils.getPadding(PadSize: DesiredSize - SizeSoFar));
511 }
512 }
513
514 llvm::StructType *STy = llvm::ConstantStruct::getTypeForElements(
515 Ctx&: CGM.getLLVMContext(), V: Packed ? PackedElems : UnpackedElems, Packed);
516
517 // Pick the type to use. If the type is layout identical to the desired
518 // type then use it, otherwise use whatever the builder produced for us.
519 if (llvm::StructType *DesiredSTy = dyn_cast<llvm::StructType>(Val: DesiredTy)) {
520 if (DesiredSTy->isLayoutIdentical(Other: STy))
521 STy = DesiredSTy;
522 }
523
524 return llvm::ConstantStruct::get(T: STy, V: Packed ? PackedElems : UnpackedElems);
525}
526
527void ConstantAggregateBuilder::condense(CharUnits Offset,
528 llvm::Type *DesiredTy) {
529 CharUnits Size = getSize(Ty: DesiredTy);
530
531 std::optional<size_t> FirstElemToReplace = splitAt(Pos: Offset);
532 if (!FirstElemToReplace)
533 return;
534 size_t First = *FirstElemToReplace;
535
536 std::optional<size_t> LastElemToReplace = splitAt(Pos: Offset + Size);
537 if (!LastElemToReplace)
538 return;
539 size_t Last = *LastElemToReplace;
540
541 size_t Length = Last - First;
542 if (Length == 0)
543 return;
544
545 if (Length == 1 && Offsets[First] == Offset &&
546 getSize(C: Elems[First]) == Size) {
547 // Re-wrap single element structs if necessary. Otherwise, leave any single
548 // element constant of the right size alone even if it has the wrong type.
549 auto *STy = dyn_cast<llvm::StructType>(Val: DesiredTy);
550 if (STy && STy->getNumElements() == 1 &&
551 STy->getElementType(N: 0) == Elems[First]->getType())
552 Elems[First] = llvm::ConstantStruct::get(T: STy, Vs: Elems[First]);
553 return;
554 }
555
556 llvm::Constant *Replacement = buildFrom(
557 CGM, Elems: ArrayRef(Elems).slice(N: First, M: Length),
558 Offsets: ArrayRef(Offsets).slice(N: First, M: Length), StartOffset: Offset, Size: getSize(Ty: DesiredTy),
559 /*known to have natural layout=*/NaturalLayout: false, DesiredTy, AllowOversized: false);
560 replace(C&: Elems, BeginOff: First, EndOff: Last, Vals: {Replacement});
561 replace(C&: Offsets, BeginOff: First, EndOff: Last, Vals: {Offset});
562}
563
564//===----------------------------------------------------------------------===//
565// ConstStructBuilder
566//===----------------------------------------------------------------------===//
567
568class ConstStructBuilder {
569 CodeGenModule &CGM;
570 ConstantEmitter &Emitter;
571 ConstantAggregateBuilder &Builder;
572 CharUnits StartOffset;
573
574public:
575 static llvm::Constant *BuildStruct(ConstantEmitter &Emitter,
576 const InitListExpr *ILE,
577 QualType StructTy);
578 static llvm::Constant *BuildStruct(ConstantEmitter &Emitter,
579 const APValue &Value, QualType ValTy);
580 static bool UpdateStruct(ConstantEmitter &Emitter,
581 ConstantAggregateBuilder &Const, CharUnits Offset,
582 const InitListExpr *Updater);
583
584private:
585 ConstStructBuilder(ConstantEmitter &Emitter,
586 ConstantAggregateBuilder &Builder, CharUnits StartOffset)
587 : CGM(Emitter.CGM), Emitter(Emitter), Builder(Builder),
588 StartOffset(StartOffset) {}
589
590 bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
591 llvm::Constant *InitExpr, bool AllowOverwrite = false);
592
593 bool AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst,
594 bool AllowOverwrite = false);
595
596 bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
597 llvm::Constant *InitExpr, bool AllowOverwrite = false);
598
599 bool Build(const InitListExpr *ILE, bool AllowOverwrite);
600 bool Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase,
601 const CXXRecordDecl *VTableClass, CharUnits BaseOffset);
602 bool DoZeroInitPadding(const ASTRecordLayout &Layout, unsigned FieldNo,
603 const FieldDecl &Field, bool AllowOverwrite,
604 CharUnits &SizeSoFar, bool &ZeroFieldSize);
605 bool DoZeroInitPadding(const ASTRecordLayout &Layout, bool AllowOverwrite,
606 CharUnits SizeSoFar);
607 llvm::Constant *Finalize(QualType Ty);
608};
609
610bool ConstStructBuilder::AppendField(
611 const FieldDecl *Field, uint64_t FieldOffset, llvm::Constant *InitCst,
612 bool AllowOverwrite) {
613 const ASTContext &Context = CGM.getContext();
614
615 CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(BitSize: FieldOffset);
616
617 return AppendBytes(FieldOffsetInChars, InitCst, AllowOverwrite);
618}
619
620bool ConstStructBuilder::AppendBytes(CharUnits FieldOffsetInChars,
621 llvm::Constant *InitCst,
622 bool AllowOverwrite) {
623 return Builder.add(C: InitCst, Offset: StartOffset + FieldOffsetInChars, AllowOverwrite);
624}
625
626bool ConstStructBuilder::AppendBitField(const FieldDecl *Field,
627 uint64_t FieldOffset, llvm::Constant *C,
628 bool AllowOverwrite) {
629
630 llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(Val: C);
631 if (!CI) {
632 // Constants for long _BitInt types are sometimes split into individual
633 // bytes. Try to fold these back into an integer constant. If that doesn't
634 // work out, then we are trying to initialize a bitfield with a non-trivial
635 // constant, this must require run-time code.
636 llvm::Type *LoadType =
637 CGM.getTypes().convertTypeForLoadStore(T: Field->getType(), LLVMTy: C->getType());
638 llvm::Constant *FoldedConstant = llvm::ConstantFoldLoadFromConst(
639 C, Ty: LoadType, Offset: llvm::APInt::getZero(numBits: 32), DL: CGM.getDataLayout());
640 CI = dyn_cast_if_present<llvm::ConstantInt>(Val: FoldedConstant);
641 if (!CI)
642 return false;
643 }
644
645 const CGRecordLayout &RL =
646 CGM.getTypes().getCGRecordLayout(Field->getParent());
647 const CGBitFieldInfo &Info = RL.getBitFieldInfo(FD: Field);
648 llvm::APInt FieldValue = CI->getValue();
649
650 // Promote the size of FieldValue if necessary
651 // FIXME: This should never occur, but currently it can because initializer
652 // constants are cast to bool, and because clang is not enforcing bitfield
653 // width limits.
654 if (Info.Size > FieldValue.getBitWidth())
655 FieldValue = FieldValue.zext(width: Info.Size);
656
657 // Truncate the size of FieldValue to the bit field size.
658 if (Info.Size < FieldValue.getBitWidth())
659 FieldValue = FieldValue.trunc(width: Info.Size);
660
661 return Builder.addBits(Bits: FieldValue,
662 OffsetInBits: CGM.getContext().toBits(CharSize: StartOffset) + FieldOffset,
663 AllowOverwrite);
664}
665
666static bool EmitDesignatedInitUpdater(ConstantEmitter &Emitter,
667 ConstantAggregateBuilder &Const,
668 CharUnits Offset, QualType Type,
669 const InitListExpr *Updater) {
670 if (Type->isRecordType())
671 return ConstStructBuilder::UpdateStruct(Emitter, Const, Offset, Updater);
672
673 auto CAT = Emitter.CGM.getContext().getAsConstantArrayType(T: Type);
674 if (!CAT)
675 return false;
676 QualType ElemType = CAT->getElementType();
677 CharUnits ElemSize = Emitter.CGM.getContext().getTypeSizeInChars(T: ElemType);
678 llvm::Type *ElemTy = Emitter.CGM.getTypes().ConvertTypeForMem(T: ElemType);
679
680 llvm::Constant *FillC = nullptr;
681 if (const Expr *Filler = Updater->getArrayFiller()) {
682 if (!isa<NoInitExpr>(Val: Filler)) {
683 FillC = Emitter.tryEmitAbstractForMemory(E: Filler, T: ElemType);
684 if (!FillC)
685 return false;
686 }
687 }
688
689 unsigned NumElementsToUpdate =
690 FillC ? CAT->getZExtSize() : Updater->getNumInits();
691 for (unsigned I = 0; I != NumElementsToUpdate; ++I, Offset += ElemSize) {
692 const Expr *Init = nullptr;
693 if (I < Updater->getNumInits())
694 Init = Updater->getInit(Init: I);
695
696 if (!Init && FillC) {
697 if (!Const.add(C: FillC, Offset, AllowOverwrite: true))
698 return false;
699 } else if (!Init || isa<NoInitExpr>(Val: Init)) {
700 continue;
701 } else if (const auto *ChildILE = dyn_cast<InitListExpr>(Val: Init)) {
702 if (!EmitDesignatedInitUpdater(Emitter, Const, Offset, Type: ElemType,
703 Updater: ChildILE))
704 return false;
705 // Attempt to reduce the array element to a single constant if necessary.
706 Const.condense(Offset, DesiredTy: ElemTy);
707 } else {
708 llvm::Constant *Val = Emitter.tryEmitPrivateForMemory(E: Init, T: ElemType);
709 if (!Const.add(C: Val, Offset, AllowOverwrite: true))
710 return false;
711 }
712 }
713
714 return true;
715}
716
717bool ConstStructBuilder::Build(const InitListExpr *ILE, bool AllowOverwrite) {
718 auto *RD = ILE->getType()->castAsRecordDecl();
719 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(D: RD);
720
721 unsigned FieldNo = -1;
722 unsigned ElementNo = 0;
723
724 // Bail out if we have base classes. We could support these, but they only
725 // arise in C++1z where we will have already constant folded most interesting
726 // cases. FIXME: There are still a few more cases we can handle this way.
727 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD))
728 if (CXXRD->getNumBases())
729 return false;
730
731 const bool ZeroInitPadding = CGM.shouldZeroInitPadding();
732 bool ZeroFieldSize = false;
733 CharUnits SizeSoFar = CharUnits::Zero();
734
735 for (FieldDecl *Field : RD->fields()) {
736 ++FieldNo;
737
738 // If this is a union, skip all the fields that aren't being initialized.
739 if (RD->isUnion() &&
740 !declaresSameEntity(D1: ILE->getInitializedFieldInUnion(), D2: Field))
741 continue;
742
743 // Don't emit anonymous bitfields.
744 if (Field->isUnnamedBitField())
745 continue;
746
747 // Get the initializer. A struct can include fields without initializers,
748 // we just use explicit null values for them.
749 const Expr *Init = nullptr;
750 if (ElementNo < ILE->getNumInits())
751 Init = ILE->getInit(Init: ElementNo++);
752 if (isa_and_nonnull<NoInitExpr>(Val: Init)) {
753 if (ZeroInitPadding &&
754 !DoZeroInitPadding(Layout, FieldNo, Field: *Field, AllowOverwrite, SizeSoFar,
755 ZeroFieldSize))
756 return false;
757 continue;
758 }
759
760 // Zero-sized fields are not emitted, but their initializers may still
761 // prevent emission of this struct as a constant.
762 if (isEmptyFieldForLayout(Context: CGM.getContext(), FD: Field)) {
763 if (Init && Init->HasSideEffects(Ctx: CGM.getContext()))
764 return false;
765 continue;
766 }
767
768 if (ZeroInitPadding &&
769 !DoZeroInitPadding(Layout, FieldNo, Field: *Field, AllowOverwrite, SizeSoFar,
770 ZeroFieldSize))
771 return false;
772
773 // When emitting a DesignatedInitUpdateExpr, a nested InitListExpr
774 // represents additional overwriting of our current constant value, and not
775 // a new constant to emit independently.
776 if (AllowOverwrite &&
777 (Field->getType()->isArrayType() || Field->getType()->isRecordType())) {
778 if (auto *SubILE = dyn_cast<InitListExpr>(Val: Init)) {
779 CharUnits Offset = CGM.getContext().toCharUnitsFromBits(
780 BitSize: Layout.getFieldOffset(FieldNo));
781 if (!EmitDesignatedInitUpdater(Emitter, Const&: Builder, Offset: StartOffset + Offset,
782 Type: Field->getType(), Updater: SubILE))
783 return false;
784 // If we split apart the field's value, try to collapse it down to a
785 // single value now.
786 Builder.condense(Offset: StartOffset + Offset,
787 DesiredTy: CGM.getTypes().ConvertTypeForMem(T: Field->getType()));
788 continue;
789 }
790 }
791
792 llvm::Constant *EltInit =
793 Init ? Emitter.tryEmitPrivateForMemory(E: Init, T: Field->getType())
794 : Emitter.emitNullForMemory(T: Field->getType());
795 if (!EltInit)
796 return false;
797
798 if (ZeroInitPadding && ZeroFieldSize)
799 SizeSoFar += CharUnits::fromQuantity(
800 Quantity: CGM.getDataLayout().getTypeAllocSize(Ty: EltInit->getType()));
801
802 if (!Field->isBitField()) {
803 // Handle non-bitfield members.
804 if (!AppendField(Field, FieldOffset: Layout.getFieldOffset(FieldNo), InitCst: EltInit,
805 AllowOverwrite))
806 return false;
807 // After emitting a non-empty field with [[no_unique_address]], we may
808 // need to overwrite its tail padding.
809 if (Field->hasAttr<NoUniqueAddressAttr>())
810 AllowOverwrite = true;
811 } else {
812 // Otherwise we have a bitfield.
813 if (!AppendBitField(Field, FieldOffset: Layout.getFieldOffset(FieldNo), C: EltInit,
814 AllowOverwrite))
815 return false;
816 }
817 }
818
819 if (ZeroInitPadding && !DoZeroInitPadding(Layout, AllowOverwrite, SizeSoFar))
820 return false;
821
822 return true;
823}
824
825namespace {
826struct BaseInfo {
827 BaseInfo(const CXXRecordDecl *Decl, CharUnits Offset, unsigned Index)
828 : Decl(Decl), Offset(Offset), Index(Index) {
829 }
830
831 const CXXRecordDecl *Decl;
832 CharUnits Offset;
833 unsigned Index;
834
835 bool operator<(const BaseInfo &O) const { return Offset < O.Offset; }
836};
837}
838
839bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
840 bool IsPrimaryBase,
841 const CXXRecordDecl *VTableClass,
842 CharUnits Offset) {
843 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(D: RD);
844
845 if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(Val: RD)) {
846 // Add a vtable pointer, if we need one and it hasn't already been added.
847 if (Layout.hasOwnVFPtr()) {
848 llvm::Constant *VTableAddressPoint =
849 CGM.getCXXABI().getVTableAddressPoint(Base: BaseSubobject(CD, Offset),
850 VTableClass);
851 if (auto Authentication = CGM.getVTablePointerAuthentication(thisClass: CD)) {
852 VTableAddressPoint = Emitter.tryEmitConstantSignedPointer(
853 Ptr: VTableAddressPoint, Auth: *Authentication);
854 if (!VTableAddressPoint)
855 return false;
856 }
857 if (!AppendBytes(FieldOffsetInChars: Offset, InitCst: VTableAddressPoint))
858 return false;
859 }
860
861 // Accumulate and sort bases, in order to visit them in address order, which
862 // may not be the same as declaration order.
863 SmallVector<BaseInfo, 8> Bases;
864 Bases.reserve(N: CD->getNumBases());
865 unsigned BaseNo = 0;
866 for (CXXRecordDecl::base_class_const_iterator Base = CD->bases_begin(),
867 BaseEnd = CD->bases_end(); Base != BaseEnd; ++Base, ++BaseNo) {
868 assert(!Base->isVirtual() && "should not have virtual bases here");
869 const CXXRecordDecl *BD = Base->getType()->getAsCXXRecordDecl();
870 CharUnits BaseOffset = Layout.getBaseClassOffset(Base: BD);
871 Bases.push_back(Elt: BaseInfo(BD, BaseOffset, BaseNo));
872 }
873 llvm::stable_sort(Range&: Bases);
874
875 for (const BaseInfo &Base : Bases) {
876 bool IsPrimaryBase = Layout.getPrimaryBase() == Base.Decl;
877 if (!Build(Val: Val.getStructBase(i: Base.Index), RD: Base.Decl, IsPrimaryBase,
878 VTableClass, Offset: Offset + Base.Offset))
879 return false;
880 }
881 }
882
883 unsigned FieldNo = 0;
884 uint64_t OffsetBits = CGM.getContext().toBits(CharSize: Offset);
885 const bool ZeroInitPadding = CGM.shouldZeroInitPadding();
886 bool ZeroFieldSize = false;
887 CharUnits SizeSoFar = CharUnits::Zero();
888
889 bool AllowOverwrite = false;
890 for (RecordDecl::field_iterator Field = RD->field_begin(),
891 FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
892 // If this is a union, skip all the fields that aren't being initialized.
893 if (RD->isUnion() && !declaresSameEntity(D1: Val.getUnionField(), D2: *Field))
894 continue;
895
896 // Don't emit anonymous bitfields or zero-sized fields.
897 if (Field->isUnnamedBitField() ||
898 isEmptyFieldForLayout(Context: CGM.getContext(), FD: *Field))
899 continue;
900
901 // Emit the value of the initializer.
902 const APValue &FieldValue =
903 RD->isUnion() ? Val.getUnionValue() : Val.getStructField(i: FieldNo);
904 llvm::Constant *EltInit =
905 Emitter.tryEmitPrivateForMemory(value: FieldValue, T: Field->getType());
906 if (!EltInit)
907 return false;
908
909 if (CGM.getContext().isPFPField(Field: *Field)) {
910 llvm::ConstantInt *Disc;
911 llvm::Constant *AddrDisc;
912 if (CGM.getContext().arePFPFieldsTriviallyCopyable(RD)) {
913 uint64_t FieldSignature =
914 llvm::getPointerAuthStableSipHash(S: CGM.getPFPFieldName(FD: *Field));
915 Disc = llvm::ConstantInt::get(Ty: CGM.Int64Ty, V: FieldSignature);
916 AddrDisc = llvm::ConstantPointerNull::get(T: CGM.VoidPtrTy);
917 } else if (Emitter.isAbstract()) {
918 // isAbstract means that we don't know the global's address. Since we
919 // can only form a pointer without knowing the address if the fields are
920 // trivially copyable, we need to return false otherwise.
921 return false;
922 } else {
923 Disc = llvm::ConstantInt::get(Ty: CGM.Int64Ty,
924 V: -(Layout.getFieldOffset(FieldNo) / 8));
925 AddrDisc = Emitter.getCurrentAddrPrivate();
926 }
927 EltInit = llvm::ConstantPtrAuth::get(
928 Ptr: EltInit, Key: llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: 2), Disc, AddrDisc,
929 DeactivationSymbol: CGM.getPFPDeactivationSymbol(FD: *Field));
930 if (!CGM.getContext().arePFPFieldsTriviallyCopyable(RD))
931 Emitter.registerCurrentAddrPrivate(signal: EltInit,
932 placeholder: cast<llvm::GlobalValue>(Val: AddrDisc));
933 }
934
935 if (ZeroInitPadding) {
936 if (!DoZeroInitPadding(Layout, FieldNo, Field: **Field, AllowOverwrite,
937 SizeSoFar, ZeroFieldSize))
938 return false;
939 if (ZeroFieldSize)
940 SizeSoFar += CharUnits::fromQuantity(
941 Quantity: CGM.getDataLayout().getTypeAllocSize(Ty: EltInit->getType()));
942 }
943
944 if (!Field->isBitField()) {
945 // Handle non-bitfield members.
946 if (!AppendField(Field: *Field, FieldOffset: Layout.getFieldOffset(FieldNo) + OffsetBits,
947 InitCst: EltInit, AllowOverwrite))
948 return false;
949 // After emitting a non-empty field with [[no_unique_address]], we may
950 // need to overwrite its tail padding.
951 if (Field->hasAttr<NoUniqueAddressAttr>())
952 AllowOverwrite = true;
953 } else {
954 // Otherwise we have a bitfield.
955 if (!AppendBitField(Field: *Field, FieldOffset: Layout.getFieldOffset(FieldNo) + OffsetBits,
956 C: EltInit, AllowOverwrite))
957 return false;
958 }
959 }
960 if (ZeroInitPadding && !DoZeroInitPadding(Layout, AllowOverwrite, SizeSoFar))
961 return false;
962
963 return true;
964}
965
966bool ConstStructBuilder::DoZeroInitPadding(
967 const ASTRecordLayout &Layout, unsigned FieldNo, const FieldDecl &Field,
968 bool AllowOverwrite, CharUnits &SizeSoFar, bool &ZeroFieldSize) {
969 uint64_t StartBitOffset = Layout.getFieldOffset(FieldNo);
970 CharUnits StartOffset = CGM.getContext().toCharUnitsFromBits(BitSize: StartBitOffset);
971 if (SizeSoFar < StartOffset)
972 if (!AppendBytes(FieldOffsetInChars: SizeSoFar, InitCst: getPadding(CGM, PadSize: StartOffset - SizeSoFar),
973 AllowOverwrite))
974 return false;
975
976 if (!Field.isBitField()) {
977 CharUnits FieldSize = CGM.getContext().getTypeSizeInChars(T: Field.getType());
978 SizeSoFar = StartOffset + FieldSize;
979 ZeroFieldSize = FieldSize.isZero();
980 } else {
981 const CGRecordLayout &RL =
982 CGM.getTypes().getCGRecordLayout(Field.getParent());
983 const CGBitFieldInfo &Info = RL.getBitFieldInfo(FD: &Field);
984 uint64_t EndBitOffset = StartBitOffset + Info.Size;
985 SizeSoFar = CGM.getContext().toCharUnitsFromBits(BitSize: EndBitOffset);
986 if (EndBitOffset % CGM.getContext().getCharWidth() != 0) {
987 SizeSoFar++;
988 }
989 ZeroFieldSize = Info.Size == 0;
990 }
991 return true;
992}
993
994bool ConstStructBuilder::DoZeroInitPadding(const ASTRecordLayout &Layout,
995 bool AllowOverwrite,
996 CharUnits SizeSoFar) {
997 CharUnits TotalSize = Layout.getSize();
998 if (SizeSoFar < TotalSize)
999 if (!AppendBytes(FieldOffsetInChars: SizeSoFar, InitCst: getPadding(CGM, PadSize: TotalSize - SizeSoFar),
1000 AllowOverwrite))
1001 return false;
1002 SizeSoFar = TotalSize;
1003 return true;
1004}
1005
1006llvm::Constant *ConstStructBuilder::Finalize(QualType Type) {
1007 Type = Type.getNonReferenceType();
1008 auto *RD = Type->castAsRecordDecl();
1009 llvm::Type *ValTy = CGM.getTypes().ConvertType(T: Type);
1010 return Builder.build(DesiredTy: ValTy, AllowOversized: RD->hasFlexibleArrayMember());
1011}
1012
1013llvm::Constant *ConstStructBuilder::BuildStruct(ConstantEmitter &Emitter,
1014 const InitListExpr *ILE,
1015 QualType ValTy) {
1016 ConstantAggregateBuilder Const(Emitter.CGM);
1017 ConstStructBuilder Builder(Emitter, Const, CharUnits::Zero());
1018
1019 if (!Builder.Build(ILE, /*AllowOverwrite*/false))
1020 return nullptr;
1021
1022 return Builder.Finalize(Type: ValTy);
1023}
1024
1025llvm::Constant *ConstStructBuilder::BuildStruct(ConstantEmitter &Emitter,
1026 const APValue &Val,
1027 QualType ValTy) {
1028 ConstantAggregateBuilder Const(Emitter.CGM);
1029 ConstStructBuilder Builder(Emitter, Const, CharUnits::Zero());
1030
1031 const auto *RD = ValTy->castAsRecordDecl();
1032 const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(Val: RD);
1033 if (!Builder.Build(Val, RD, IsPrimaryBase: false, VTableClass: CD, Offset: CharUnits::Zero()))
1034 return nullptr;
1035
1036 return Builder.Finalize(Type: ValTy);
1037}
1038
1039bool ConstStructBuilder::UpdateStruct(ConstantEmitter &Emitter,
1040 ConstantAggregateBuilder &Const,
1041 CharUnits Offset,
1042 const InitListExpr *Updater) {
1043 return ConstStructBuilder(Emitter, Const, Offset)
1044 .Build(ILE: Updater, /*AllowOverwrite*/ true);
1045}
1046
1047//===----------------------------------------------------------------------===//
1048// ConstExprEmitter
1049//===----------------------------------------------------------------------===//
1050
1051static ConstantAddress
1052tryEmitGlobalCompoundLiteral(ConstantEmitter &emitter,
1053 const CompoundLiteralExpr *E) {
1054 CodeGenModule &CGM = emitter.CGM;
1055 CharUnits Align = CGM.getContext().getTypeAlignInChars(T: E->getType());
1056 if (llvm::GlobalVariable *Addr =
1057 CGM.getAddrOfConstantCompoundLiteralIfEmitted(E))
1058 return ConstantAddress(Addr, Addr->getValueType(), Align);
1059
1060 LangAS addressSpace = E->getType().getAddressSpace();
1061 llvm::Constant *C = emitter.tryEmitForInitializer(E: E->getInitializer(),
1062 destAddrSpace: addressSpace, destType: E->getType());
1063 if (!C) {
1064 assert(!E->isFileScope() &&
1065 "file-scope compound literal did not have constant initializer!");
1066 return ConstantAddress::invalid();
1067 }
1068
1069 auto GV = new llvm::GlobalVariable(
1070 CGM.getModule(), C->getType(),
1071 E->getType().isConstantStorage(Ctx: CGM.getContext(), ExcludeCtor: true, ExcludeDtor: false),
1072 llvm::GlobalValue::InternalLinkage, C, ".compoundliteral", nullptr,
1073 llvm::GlobalVariable::NotThreadLocal,
1074 CGM.getContext().getTargetAddressSpace(AS: addressSpace));
1075 emitter.finalize(global: GV);
1076 GV->setAlignment(Align.getAsAlign());
1077 CGM.setAddrOfConstantCompoundLiteral(CLE: E, GV);
1078 return ConstantAddress(GV, GV->getValueType(), Align);
1079}
1080
1081static llvm::Constant *
1082EmitArrayConstant(CodeGenModule &CGM, llvm::ArrayType *DesiredType,
1083 llvm::Type *CommonElementType, uint64_t ArrayBound,
1084 SmallVectorImpl<llvm::Constant *> &Elements,
1085 llvm::Constant *Filler) {
1086 // Figure out how long the initial prefix of non-zero elements is.
1087 uint64_t NonzeroLength = ArrayBound;
1088 if (Elements.size() < NonzeroLength && Filler->isNullValue())
1089 NonzeroLength = Elements.size();
1090 if (NonzeroLength == Elements.size()) {
1091 while (NonzeroLength > 0 && Elements[NonzeroLength - 1]->isNullValue())
1092 --NonzeroLength;
1093 }
1094
1095 if (NonzeroLength == 0)
1096 return llvm::ConstantAggregateZero::get(Ty: DesiredType);
1097
1098 // Add a zeroinitializer array filler if we have lots of trailing zeroes.
1099 uint64_t TrailingZeroes = ArrayBound - NonzeroLength;
1100 if (TrailingZeroes >= 8) {
1101 assert(Elements.size() >= NonzeroLength &&
1102 "missing initializer for non-zero element");
1103
1104 // If all the elements had the same type up to the trailing zeroes, emit a
1105 // struct of two arrays (the nonzero data and the zeroinitializer).
1106 if (CommonElementType && NonzeroLength >= 8) {
1107 llvm::Constant *Initial = llvm::ConstantArray::get(
1108 T: llvm::ArrayType::get(ElementType: CommonElementType, NumElements: NonzeroLength),
1109 V: ArrayRef(Elements).take_front(N: NonzeroLength));
1110 Elements.resize(N: 2);
1111 Elements[0] = Initial;
1112 } else {
1113 Elements.resize(N: NonzeroLength + 1);
1114 }
1115
1116 auto *FillerType =
1117 CommonElementType ? CommonElementType : DesiredType->getElementType();
1118 FillerType = llvm::ArrayType::get(ElementType: FillerType, NumElements: TrailingZeroes);
1119 Elements.back() = llvm::ConstantAggregateZero::get(Ty: FillerType);
1120 CommonElementType = nullptr;
1121 } else if (Elements.size() != ArrayBound) {
1122 // Otherwise pad to the right size with the filler if necessary.
1123 Elements.resize(N: ArrayBound, NV: Filler);
1124 if (Filler->getType() != CommonElementType)
1125 CommonElementType = nullptr;
1126 }
1127
1128 // If all elements have the same type, just emit an array constant.
1129 if (CommonElementType)
1130 return llvm::ConstantArray::get(
1131 T: llvm::ArrayType::get(ElementType: CommonElementType, NumElements: ArrayBound), V: Elements);
1132
1133 // We have mixed types. Use a packed struct.
1134 llvm::SmallVector<llvm::Type *, 16> Types;
1135 Types.reserve(N: Elements.size());
1136 for (llvm::Constant *Elt : Elements)
1137 Types.push_back(Elt: Elt->getType());
1138 llvm::StructType *SType =
1139 llvm::StructType::get(Context&: CGM.getLLVMContext(), Elements: Types, isPacked: true);
1140 return llvm::ConstantStruct::get(T: SType, V: Elements);
1141}
1142
1143// This class only needs to handle arrays, structs and unions. Outside C++11
1144// mode, we don't currently constant fold those types. All other types are
1145// handled by constant folding.
1146//
1147// Constant folding is currently missing support for a few features supported
1148// here: CK_ToUnion, CK_ReinterpretMemberPointer, and DesignatedInitUpdateExpr.
1149class ConstExprEmitter
1150 : public ConstStmtVisitor<ConstExprEmitter, llvm::Constant *, QualType> {
1151 CodeGenModule &CGM;
1152 ConstantEmitter &Emitter;
1153 llvm::LLVMContext &VMContext;
1154public:
1155 ConstExprEmitter(ConstantEmitter &emitter)
1156 : CGM(emitter.CGM), Emitter(emitter), VMContext(CGM.getLLVMContext()) {
1157 }
1158
1159 //===--------------------------------------------------------------------===//
1160 // Visitor Methods
1161 //===--------------------------------------------------------------------===//
1162
1163 llvm::Constant *VisitStmt(const Stmt *S, QualType T) { return nullptr; }
1164
1165 llvm::Constant *VisitConstantExpr(const ConstantExpr *CE, QualType T) {
1166 if (llvm::Constant *Result = Emitter.tryEmitConstantExpr(CE))
1167 return Result;
1168 return Visit(S: CE->getSubExpr(), P: T);
1169 }
1170
1171 llvm::Constant *VisitParenExpr(const ParenExpr *PE, QualType T) {
1172 return Visit(S: PE->getSubExpr(), P: T);
1173 }
1174
1175 llvm::Constant *
1176 VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *PE,
1177 QualType T) {
1178 return Visit(S: PE->getReplacement(), P: T);
1179 }
1180
1181 llvm::Constant *VisitGenericSelectionExpr(const GenericSelectionExpr *GE,
1182 QualType T) {
1183 return Visit(S: GE->getResultExpr(), P: T);
1184 }
1185
1186 llvm::Constant *VisitChooseExpr(const ChooseExpr *CE, QualType T) {
1187 return Visit(S: CE->getChosenSubExpr(), P: T);
1188 }
1189
1190 llvm::Constant *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E,
1191 QualType T) {
1192 return Visit(S: E->getInitializer(), P: T);
1193 }
1194
1195 llvm::Constant *ProduceIntToIntCast(const Expr *E, QualType DestType) {
1196 QualType FromType = E->getType();
1197 // See also HandleIntToIntCast in ExprConstant.cpp
1198 if (FromType->isIntegerType())
1199 if (llvm::Constant *C = Visit(S: E, P: FromType))
1200 if (auto *CI = dyn_cast<llvm::ConstantInt>(Val: C)) {
1201 unsigned SrcWidth = CGM.getContext().getIntWidth(T: FromType);
1202 unsigned DstWidth = CGM.getContext().getIntWidth(T: DestType);
1203 if (DstWidth == SrcWidth)
1204 return CI;
1205 llvm::APInt A = FromType->isSignedIntegerType()
1206 ? CI->getValue().sextOrTrunc(width: DstWidth)
1207 : CI->getValue().zextOrTrunc(width: DstWidth);
1208 return llvm::ConstantInt::get(Context&: CGM.getLLVMContext(), V: A);
1209 }
1210 return nullptr;
1211 }
1212
1213 llvm::Constant *VisitCastExpr(const CastExpr *E, QualType destType) {
1214 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(Val: E))
1215 CGM.EmitExplicitCastExprType(E: ECE, CGF: Emitter.CGF);
1216 const Expr *subExpr = E->getSubExpr();
1217
1218 switch (E->getCastKind()) {
1219 case CK_ToUnion: {
1220 // GCC cast to union extension
1221 assert(E->getType()->isUnionType() &&
1222 "Destination type is not union type!");
1223
1224 auto field = E->getTargetUnionField();
1225
1226 auto C = Emitter.tryEmitPrivateForMemory(E: subExpr, T: field->getType());
1227 if (!C) return nullptr;
1228
1229 auto destTy = ConvertType(T: destType);
1230 if (C->getType() == destTy) return C;
1231
1232 // Build a struct with the union sub-element as the first member,
1233 // and padded to the appropriate size.
1234 SmallVector<llvm::Constant*, 2> Elts;
1235 SmallVector<llvm::Type*, 2> Types;
1236 Elts.push_back(Elt: C);
1237 Types.push_back(Elt: C->getType());
1238 unsigned CurSize = CGM.getDataLayout().getTypeAllocSize(Ty: C->getType());
1239 unsigned TotalSize = CGM.getDataLayout().getTypeAllocSize(Ty: destTy);
1240
1241 assert(CurSize <= TotalSize && "Union size mismatch!");
1242 if (unsigned NumPadBytes = TotalSize - CurSize) {
1243 llvm::Constant *Padding =
1244 getPadding(CGM, PadSize: CharUnits::fromQuantity(Quantity: NumPadBytes));
1245 Elts.push_back(Elt: Padding);
1246 Types.push_back(Elt: Padding->getType());
1247 }
1248
1249 llvm::StructType *STy = llvm::StructType::get(Context&: VMContext, Elements: Types, isPacked: false);
1250 return llvm::ConstantStruct::get(T: STy, V: Elts);
1251 }
1252
1253 case CK_AddressSpaceConversion: {
1254 llvm::Constant *C = Emitter.tryEmitPrivate(E: subExpr, T: subExpr->getType());
1255 if (!C)
1256 return nullptr;
1257 llvm::Type *destTy = ConvertType(T: E->getType());
1258 return CGM.performAddrSpaceCast(Src: C, DestTy: destTy);
1259 }
1260
1261 case CK_LValueToRValue: {
1262 // We don't really support doing lvalue-to-rvalue conversions here; any
1263 // interesting conversions should be done in Evaluate(). But as a
1264 // special case, allow compound literals to support the gcc extension
1265 // allowing "struct x {int x;} x = (struct x) {};".
1266 if (const auto *E =
1267 dyn_cast<CompoundLiteralExpr>(Val: subExpr->IgnoreParens()))
1268 return Visit(S: E->getInitializer(), P: destType);
1269 return nullptr;
1270 }
1271
1272 case CK_AtomicToNonAtomic:
1273 case CK_NonAtomicToAtomic:
1274 case CK_NoOp:
1275 case CK_ConstructorConversion:
1276 return Visit(S: subExpr, P: destType);
1277
1278 case CK_ArrayToPointerDecay:
1279 if (const auto *S = dyn_cast<StringLiteral>(Val: subExpr))
1280 return CGM.GetAddrOfConstantStringFromLiteral(S).getPointer();
1281 return nullptr;
1282 case CK_NullToPointer:
1283 if (Visit(S: subExpr, P: destType))
1284 return CGM.EmitNullConstant(T: destType);
1285 return nullptr;
1286
1287 case CK_IntToOCLSampler:
1288 llvm_unreachable("global sampler variables are not generated");
1289
1290 case CK_IntegralCast:
1291 return ProduceIntToIntCast(E: subExpr, DestType: destType);
1292
1293 case CK_Dependent: llvm_unreachable("saw dependent cast!");
1294
1295 case CK_BuiltinFnToFnPtr:
1296 llvm_unreachable("builtin functions are handled elsewhere");
1297
1298 case CK_ReinterpretMemberPointer:
1299 case CK_DerivedToBaseMemberPointer:
1300 case CK_BaseToDerivedMemberPointer: {
1301 auto C = Emitter.tryEmitPrivate(E: subExpr, T: subExpr->getType());
1302 if (!C) return nullptr;
1303 return CGM.getCXXABI().EmitMemberPointerConversion(E, Src: C);
1304 }
1305
1306 // These will never be supported.
1307 case CK_ObjCObjectLValueCast:
1308 case CK_ARCProduceObject:
1309 case CK_ARCConsumeObject:
1310 case CK_ARCReclaimReturnedObject:
1311 case CK_ARCExtendBlockObject:
1312 case CK_CopyAndAutoreleaseBlockObject:
1313 return nullptr;
1314
1315 // These don't need to be handled here because Evaluate knows how to
1316 // evaluate them in the cases where they can be folded.
1317 case CK_BitCast:
1318 case CK_ToVoid:
1319 case CK_Dynamic:
1320 case CK_LValueBitCast:
1321 case CK_LValueToRValueBitCast:
1322 case CK_NullToMemberPointer:
1323 case CK_UserDefinedConversion:
1324 case CK_CPointerToObjCPointerCast:
1325 case CK_BlockPointerToObjCPointerCast:
1326 case CK_AnyPointerToBlockPointerCast:
1327 case CK_FunctionToPointerDecay:
1328 case CK_BaseToDerived:
1329 case CK_DerivedToBase:
1330 case CK_UncheckedDerivedToBase:
1331 case CK_MemberPointerToBoolean:
1332 case CK_VectorSplat:
1333 case CK_FloatingRealToComplex:
1334 case CK_FloatingComplexToReal:
1335 case CK_FloatingComplexToBoolean:
1336 case CK_FloatingComplexCast:
1337 case CK_FloatingComplexToIntegralComplex:
1338 case CK_IntegralRealToComplex:
1339 case CK_IntegralComplexToReal:
1340 case CK_IntegralComplexToBoolean:
1341 case CK_IntegralComplexCast:
1342 case CK_IntegralComplexToFloatingComplex:
1343 case CK_PointerToIntegral:
1344 case CK_PointerToBoolean:
1345 case CK_BooleanToSignedIntegral:
1346 case CK_IntegralToPointer:
1347 case CK_IntegralToBoolean:
1348 case CK_IntegralToFloating:
1349 case CK_FloatingToIntegral:
1350 case CK_FloatingToBoolean:
1351 case CK_FloatingCast:
1352 case CK_FloatingToFixedPoint:
1353 case CK_FixedPointToFloating:
1354 case CK_FixedPointCast:
1355 case CK_FixedPointToBoolean:
1356 case CK_FixedPointToIntegral:
1357 case CK_IntegralToFixedPoint:
1358 case CK_ZeroToOCLOpaqueType:
1359 case CK_MatrixCast:
1360 case CK_HLSLVectorTruncation:
1361 case CK_HLSLMatrixTruncation:
1362 case CK_HLSLArrayRValue:
1363 case CK_HLSLElementwiseCast:
1364 case CK_HLSLAggregateSplatCast:
1365 return nullptr;
1366 }
1367 llvm_unreachable("Invalid CastKind");
1368 }
1369
1370 llvm::Constant *VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *DIE,
1371 QualType T) {
1372 // No need for a DefaultInitExprScope: we don't handle 'this' in a
1373 // constant expression.
1374 return Visit(S: DIE->getExpr(), P: T);
1375 }
1376
1377 llvm::Constant *VisitExprWithCleanups(const ExprWithCleanups *E, QualType T) {
1378 return Visit(S: E->getSubExpr(), P: T);
1379 }
1380
1381 llvm::Constant *VisitIntegerLiteral(const IntegerLiteral *I, QualType T) {
1382 return llvm::ConstantInt::get(Context&: CGM.getLLVMContext(), V: I->getValue());
1383 }
1384
1385 static APValue withDestType(ASTContext &Ctx, const Expr *E, QualType SrcType,
1386 QualType DestType, const llvm::APSInt &Value) {
1387 if (!Ctx.hasSameType(T1: SrcType, T2: DestType)) {
1388 if (DestType->isFloatingType()) {
1389 llvm::APFloat Result =
1390 llvm::APFloat(Ctx.getFloatTypeSemantics(T: DestType), 1);
1391 llvm::RoundingMode RM =
1392 E->getFPFeaturesInEffect(LO: Ctx.getLangOpts()).getRoundingMode();
1393 if (RM == llvm::RoundingMode::Dynamic)
1394 RM = llvm::RoundingMode::NearestTiesToEven;
1395 Result.convertFromAPInt(Input: Value, IsSigned: Value.isSigned(), RM);
1396 return APValue(Result);
1397 }
1398 }
1399 return APValue(Value);
1400 }
1401
1402 llvm::Constant *EmitArrayInitialization(const InitListExpr *ILE, QualType T) {
1403 auto *CAT = CGM.getContext().getAsConstantArrayType(T: ILE->getType());
1404 assert(CAT && "can't emit array init for non-constant-bound array");
1405 uint64_t NumInitElements = ILE->getNumInits();
1406 const uint64_t NumElements = CAT->getZExtSize();
1407 for (const auto *Init : ILE->inits()) {
1408 if (const auto *Embed =
1409 dyn_cast<EmbedExpr>(Val: Init->IgnoreParenImpCasts())) {
1410 NumInitElements += Embed->getDataElementCount() - 1;
1411 if (NumInitElements > NumElements) {
1412 NumInitElements = NumElements;
1413 break;
1414 }
1415 }
1416 }
1417
1418 // Initialising an array requires us to automatically
1419 // initialise any elements that have not been initialised explicitly
1420 uint64_t NumInitableElts = std::min<uint64_t>(a: NumInitElements, b: NumElements);
1421
1422 QualType EltType = CAT->getElementType();
1423
1424 // Initialize remaining array elements.
1425 llvm::Constant *fillC = nullptr;
1426 if (const Expr *filler = ILE->getArrayFiller()) {
1427 fillC = Emitter.tryEmitAbstractForMemory(E: filler, T: EltType);
1428 if (!fillC)
1429 return nullptr;
1430 }
1431
1432 // Copy initializer elements.
1433 SmallVector<llvm::Constant *, 16> Elts;
1434 if (fillC && fillC->isNullValue())
1435 Elts.reserve(N: NumInitableElts + 1);
1436 else
1437 Elts.reserve(N: NumElements);
1438
1439 llvm::Type *CommonElementType = nullptr;
1440 auto Emit = [&](const Expr *Init, unsigned ArrayIndex) {
1441 llvm::Constant *C = nullptr;
1442 C = Emitter.tryEmitPrivateForMemory(E: Init, T: EltType);
1443 if (!C)
1444 return false;
1445 if (ArrayIndex == 0)
1446 CommonElementType = C->getType();
1447 else if (C->getType() != CommonElementType)
1448 CommonElementType = nullptr;
1449 Elts.push_back(Elt: C);
1450 return true;
1451 };
1452
1453 unsigned ArrayIndex = 0;
1454 QualType DestTy = CAT->getElementType();
1455 for (unsigned i = 0; i < ILE->getNumInits(); ++i) {
1456 const Expr *Init = ILE->getInit(Init: i);
1457 if (auto *EmbedS = dyn_cast<EmbedExpr>(Val: Init->IgnoreParenImpCasts())) {
1458 StringLiteral *SL = EmbedS->getDataStringLiteral();
1459 llvm::APSInt Value(CGM.getContext().getTypeSize(T: DestTy),
1460 DestTy->isUnsignedIntegerType());
1461 llvm::Constant *C;
1462 for (unsigned I = EmbedS->getStartingElementPos(),
1463 N = EmbedS->getDataElementCount();
1464 I != EmbedS->getStartingElementPos() + N; ++I) {
1465 Value = SL->getCodeUnit(i: I);
1466 if (DestTy->isIntegerType()) {
1467 C = llvm::ConstantInt::get(Context&: CGM.getLLVMContext(), V: Value);
1468 } else {
1469 C = Emitter.tryEmitPrivateForMemory(
1470 value: withDestType(Ctx&: CGM.getContext(), E: Init, SrcType: EmbedS->getType(), DestType: DestTy,
1471 Value),
1472 T: EltType);
1473 }
1474 if (!C)
1475 return nullptr;
1476 Elts.push_back(Elt: C);
1477 ArrayIndex++;
1478 }
1479 if ((ArrayIndex - EmbedS->getDataElementCount()) == 0)
1480 CommonElementType = C->getType();
1481 else if (C->getType() != CommonElementType)
1482 CommonElementType = nullptr;
1483 } else {
1484 if (!Emit(Init, ArrayIndex))
1485 return nullptr;
1486 ArrayIndex++;
1487 }
1488 }
1489
1490 llvm::ArrayType *Desired =
1491 cast<llvm::ArrayType>(Val: CGM.getTypes().ConvertType(T: ILE->getType()));
1492 return EmitArrayConstant(CGM, DesiredType: Desired, CommonElementType, ArrayBound: NumElements, Elements&: Elts,
1493 Filler: fillC);
1494 }
1495
1496 llvm::Constant *EmitRecordInitialization(const InitListExpr *ILE,
1497 QualType T) {
1498 return ConstStructBuilder::BuildStruct(Emitter, ILE, ValTy: T);
1499 }
1500
1501 llvm::Constant *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E,
1502 QualType T) {
1503 return CGM.EmitNullConstant(T);
1504 }
1505
1506 llvm::Constant *VisitInitListExpr(const InitListExpr *ILE, QualType T) {
1507 if (ILE->isTransparent())
1508 return Visit(S: ILE->getInit(Init: 0), P: T);
1509
1510 if (ILE->getType()->isArrayType())
1511 return EmitArrayInitialization(ILE, T);
1512
1513 if (ILE->getType()->isRecordType())
1514 return EmitRecordInitialization(ILE, T);
1515
1516 return nullptr;
1517 }
1518
1519 llvm::Constant *
1520 VisitDesignatedInitUpdateExpr(const DesignatedInitUpdateExpr *E,
1521 QualType destType) {
1522 auto C = Visit(S: E->getBase(), P: destType);
1523 if (!C)
1524 return nullptr;
1525
1526 ConstantAggregateBuilder Const(CGM);
1527 Const.add(C, Offset: CharUnits::Zero(), AllowOverwrite: false);
1528
1529 if (!EmitDesignatedInitUpdater(Emitter, Const, Offset: CharUnits::Zero(), Type: destType,
1530 Updater: E->getUpdater()))
1531 return nullptr;
1532
1533 llvm::Type *ValTy = CGM.getTypes().ConvertType(T: destType);
1534 bool HasFlexibleArray = false;
1535 if (const auto *RD = destType->getAsRecordDecl())
1536 HasFlexibleArray = RD->hasFlexibleArrayMember();
1537 return Const.build(DesiredTy: ValTy, AllowOversized: HasFlexibleArray);
1538 }
1539
1540 llvm::Constant *VisitCXXConstructExpr(const CXXConstructExpr *E,
1541 QualType Ty) {
1542 if (!E->getConstructor()->isTrivial())
1543 return nullptr;
1544
1545 // Only default and copy/move constructors can be trivial.
1546 if (E->getNumArgs()) {
1547 assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
1548 assert(E->getConstructor()->isCopyOrMoveConstructor() &&
1549 "trivial ctor has argument but isn't a copy/move ctor");
1550
1551 const Expr *Arg = E->getArg(Arg: 0);
1552 assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
1553 "argument to copy ctor is of wrong type");
1554
1555 // Look through the temporary; it's just converting the value to an
1556 // lvalue to pass it to the constructor.
1557 if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(Val: Arg))
1558 return Visit(S: MTE->getSubExpr(), P: Ty);
1559 // Don't try to support arbitrary lvalue-to-rvalue conversions for now.
1560 return nullptr;
1561 }
1562
1563 return CGM.EmitNullConstant(T: Ty);
1564 }
1565
1566 llvm::Constant *VisitStringLiteral(const StringLiteral *E, QualType T) {
1567 // This is a string literal initializing an array in an initializer.
1568 return CGM.GetConstantArrayFromStringLiteral(E);
1569 }
1570
1571 llvm::Constant *VisitObjCEncodeExpr(const ObjCEncodeExpr *E, QualType T) {
1572 // This must be an @encode initializing an array in a static initializer.
1573 // Don't emit it as the address of the string, emit the string data itself
1574 // as an inline array.
1575 std::string Str;
1576 CGM.getContext().getObjCEncodingForType(T: E->getEncodedType(), S&: Str);
1577 const ConstantArrayType *CAT = CGM.getContext().getAsConstantArrayType(T);
1578 assert(CAT && "String data not of constant array type!");
1579
1580 // Resize the string to the right size, adding zeros at the end, or
1581 // truncating as needed.
1582 Str.resize(n: CAT->getZExtSize(), c: '\0');
1583 return llvm::ConstantDataArray::getString(Context&: VMContext, Initializer: Str, AddNull: false);
1584 }
1585
1586 llvm::Constant *VisitUnaryExtension(const UnaryOperator *E, QualType T) {
1587 return Visit(S: E->getSubExpr(), P: T);
1588 }
1589
1590 llvm::Constant *VisitUnaryMinus(const UnaryOperator *U, QualType T) {
1591 if (llvm::Constant *C = Visit(S: U->getSubExpr(), P: T))
1592 if (auto *CI = dyn_cast<llvm::ConstantInt>(Val: C))
1593 return llvm::ConstantInt::get(Context&: CGM.getLLVMContext(), V: -CI->getValue());
1594 return nullptr;
1595 }
1596
1597 llvm::Constant *VisitPackIndexingExpr(const PackIndexingExpr *E, QualType T) {
1598 return Visit(S: E->getSelectedExpr(), P: T);
1599 }
1600
1601 // Utility methods
1602 llvm::Type *ConvertType(QualType T) {
1603 return CGM.getTypes().ConvertType(T);
1604 }
1605};
1606
1607} // end anonymous namespace.
1608
1609llvm::Constant *ConstantEmitter::validateAndPopAbstract(llvm::Constant *C,
1610 AbstractState saved) {
1611 Abstract = saved.OldValue;
1612
1613 assert(saved.OldPlaceholdersSize == PlaceholderAddresses.size() &&
1614 "created a placeholder while doing an abstract emission?");
1615
1616 // No validation necessary for now.
1617 // No cleanup to do for now.
1618 return C;
1619}
1620
1621llvm::Constant *
1622ConstantEmitter::tryEmitAbstractForInitializer(const VarDecl &D) {
1623 auto state = pushAbstract();
1624 auto C = tryEmitPrivateForVarInit(D);
1625 return validateAndPopAbstract(C, saved: state);
1626}
1627
1628llvm::Constant *
1629ConstantEmitter::tryEmitAbstract(const Expr *E, QualType destType) {
1630 auto state = pushAbstract();
1631 auto C = tryEmitPrivate(E, T: destType);
1632 return validateAndPopAbstract(C, saved: state);
1633}
1634
1635llvm::Constant *
1636ConstantEmitter::tryEmitAbstract(const APValue &value, QualType destType) {
1637 auto state = pushAbstract();
1638 auto C = tryEmitPrivate(value, T: destType);
1639 return validateAndPopAbstract(C, saved: state);
1640}
1641
1642llvm::Constant *ConstantEmitter::tryEmitConstantExpr(const ConstantExpr *CE) {
1643 if (!CE->hasAPValueResult())
1644 return nullptr;
1645
1646 QualType RetType = CE->getType();
1647 if (CE->isGLValue())
1648 RetType = CGM.getContext().getLValueReferenceType(T: RetType);
1649
1650 return tryEmitAbstract(value: CE->getAPValueResult(), destType: RetType);
1651}
1652
1653llvm::Constant *
1654ConstantEmitter::emitAbstract(const Expr *E, QualType destType) {
1655 auto state = pushAbstract();
1656 auto C = tryEmitPrivate(E, T: destType);
1657 C = validateAndPopAbstract(C, saved: state);
1658 if (!C) {
1659 CGM.Error(loc: E->getExprLoc(),
1660 error: "internal error: could not emit constant value \"abstractly\"");
1661 C = CGM.EmitNullConstant(T: destType);
1662 }
1663 return C;
1664}
1665
1666llvm::Constant *
1667ConstantEmitter::emitAbstract(SourceLocation loc, const APValue &value,
1668 QualType destType,
1669 bool EnablePtrAuthFunctionTypeDiscrimination) {
1670 auto state = pushAbstract();
1671 auto C =
1672 tryEmitPrivate(value, T: destType, EnablePtrAuthFunctionTypeDiscrimination);
1673 C = validateAndPopAbstract(C, saved: state);
1674 if (!C) {
1675 CGM.Error(loc,
1676 error: "internal error: could not emit constant value \"abstractly\"");
1677 C = CGM.EmitNullConstant(T: destType);
1678 }
1679 return C;
1680}
1681
1682llvm::Constant *ConstantEmitter::tryEmitForInitializer(const VarDecl &D) {
1683 initializeNonAbstract(destAS: D.getType().getAddressSpace());
1684 llvm::Constant *Init = tryEmitPrivateForVarInit(D);
1685
1686 // If a placeholder address was needed for a TLS variable, implying that the
1687 // initializer's value depends on its address, then the object may not be
1688 // initialized in .tdata because the initializer will be memcpy'd to the
1689 // thread's TLS. Instead the initialization must be done in code.
1690 if (!PlaceholderAddresses.empty() && D.getTLSKind() != VarDecl::TLS_None) {
1691 for (auto [_, GV] : PlaceholderAddresses)
1692 GV->eraseFromParent();
1693 PlaceholderAddresses.clear();
1694 Init = nullptr;
1695 }
1696
1697 return markIfFailed(init: Init);
1698}
1699
1700llvm::Constant *ConstantEmitter::tryEmitForInitializer(const Expr *E,
1701 LangAS destAddrSpace,
1702 QualType destType) {
1703 initializeNonAbstract(destAS: destAddrSpace);
1704 return markIfFailed(init: tryEmitPrivateForMemory(E, T: destType));
1705}
1706
1707llvm::Constant *ConstantEmitter::emitForInitializer(const APValue &value,
1708 LangAS destAddrSpace,
1709 QualType destType) {
1710 initializeNonAbstract(destAS: destAddrSpace);
1711 auto C = tryEmitPrivateForMemory(value, T: destType);
1712 assert(C && "couldn't emit constant value non-abstractly?");
1713 return C;
1714}
1715
1716llvm::GlobalValue *ConstantEmitter::getCurrentAddrPrivate() {
1717 assert(!Abstract && "cannot get current address for abstract constant");
1718
1719
1720
1721 // Make an obviously ill-formed global that should blow up compilation
1722 // if it survives.
1723 auto global = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty, true,
1724 llvm::GlobalValue::PrivateLinkage,
1725 /*init*/ nullptr,
1726 /*name*/ "",
1727 /*before*/ nullptr,
1728 llvm::GlobalVariable::NotThreadLocal,
1729 CGM.getContext().getTargetAddressSpace(AS: DestAddressSpace));
1730
1731 PlaceholderAddresses.push_back(Elt: std::make_pair(x: nullptr, y&: global));
1732
1733 return global;
1734}
1735
1736void ConstantEmitter::registerCurrentAddrPrivate(llvm::Constant *signal,
1737 llvm::GlobalValue *placeholder) {
1738 assert(!PlaceholderAddresses.empty());
1739 assert(PlaceholderAddresses.back().first == nullptr);
1740 assert(PlaceholderAddresses.back().second == placeholder);
1741 PlaceholderAddresses.back().first = signal;
1742}
1743
1744namespace {
1745 struct ReplacePlaceholders {
1746 CodeGenModule &CGM;
1747
1748 /// The base address of the global.
1749 llvm::Constant *Base;
1750 llvm::Type *BaseValueTy = nullptr;
1751
1752 /// The placeholder addresses that were registered during emission.
1753 llvm::DenseMap<llvm::Constant*, llvm::GlobalVariable*> PlaceholderAddresses;
1754
1755 /// The locations of the placeholder signals.
1756 llvm::DenseMap<llvm::GlobalVariable*, llvm::Constant*> Locations;
1757
1758 /// The current index stack. We use a simple unsigned stack because
1759 /// we assume that placeholders will be relatively sparse in the
1760 /// initializer, but we cache the index values we find just in case.
1761 llvm::SmallVector<unsigned, 8> Indices;
1762 llvm::SmallVector<llvm::Constant*, 8> IndexValues;
1763
1764 ReplacePlaceholders(CodeGenModule &CGM, llvm::Constant *base,
1765 ArrayRef<std::pair<llvm::Constant*,
1766 llvm::GlobalVariable*>> addresses)
1767 : CGM(CGM), Base(base),
1768 PlaceholderAddresses(addresses.begin(), addresses.end()) {
1769 }
1770
1771 void replaceInInitializer(llvm::Constant *init) {
1772 // Remember the type of the top-most initializer.
1773 BaseValueTy = init->getType();
1774
1775 // Initialize the stack.
1776 Indices.push_back(Elt: 0);
1777 IndexValues.push_back(Elt: nullptr);
1778
1779 // Recurse into the initializer.
1780 findLocations(init);
1781
1782 // Check invariants.
1783 assert(IndexValues.size() == Indices.size() && "mismatch");
1784 assert(Indices.size() == 1 && "didn't pop all indices");
1785
1786 // Do the replacement; this basically invalidates 'init'.
1787 assert(Locations.size() == PlaceholderAddresses.size() &&
1788 "missed a placeholder?");
1789
1790 // We're iterating over a hashtable, so this would be a source of
1791 // non-determinism in compiler output *except* that we're just
1792 // messing around with llvm::Constant structures, which never itself
1793 // does anything that should be visible in compiler output.
1794 for (auto &entry : Locations) {
1795 assert(entry.first->getName() == "" && "not a placeholder!");
1796 entry.first->replaceAllUsesWith(V: entry.second);
1797 entry.first->eraseFromParent();
1798 }
1799 }
1800
1801 private:
1802 void findLocations(llvm::Constant *init) {
1803 // Recurse into aggregates.
1804 if (auto agg = dyn_cast<llvm::ConstantAggregate>(Val: init)) {
1805 for (unsigned i = 0, e = agg->getNumOperands(); i != e; ++i) {
1806 Indices.push_back(Elt: i);
1807 IndexValues.push_back(Elt: nullptr);
1808
1809 findLocations(init: agg->getOperand(i_nocapture: i));
1810
1811 IndexValues.pop_back();
1812 Indices.pop_back();
1813 }
1814 return;
1815 }
1816
1817 // Otherwise, check for registered constants.
1818 while (true) {
1819 auto it = PlaceholderAddresses.find(Val: init);
1820 if (it != PlaceholderAddresses.end()) {
1821 setLocation(it->second);
1822 break;
1823 }
1824
1825 // Look through bitcasts or other expressions.
1826 if (auto expr = dyn_cast<llvm::ConstantExpr>(Val: init)) {
1827 init = expr->getOperand(i_nocapture: 0);
1828 } else {
1829 break;
1830 }
1831 }
1832 }
1833
1834 void setLocation(llvm::GlobalVariable *placeholder) {
1835 assert(!Locations.contains(placeholder) &&
1836 "already found location for placeholder!");
1837
1838 // Lazily fill in IndexValues with the values from Indices.
1839 // We do this in reverse because we should always have a strict
1840 // prefix of indices from the start.
1841 assert(Indices.size() == IndexValues.size());
1842 for (size_t i = Indices.size() - 1; i != size_t(-1); --i) {
1843 if (IndexValues[i]) {
1844#ifndef NDEBUG
1845 for (size_t j = 0; j != i + 1; ++j) {
1846 assert(IndexValues[j] &&
1847 isa<llvm::ConstantInt>(IndexValues[j]) &&
1848 cast<llvm::ConstantInt>(IndexValues[j])->getZExtValue()
1849 == Indices[j]);
1850 }
1851#endif
1852 break;
1853 }
1854
1855 IndexValues[i] = llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: Indices[i]);
1856 }
1857
1858 llvm::Constant *location = llvm::ConstantExpr::getInBoundsGetElementPtr(
1859 Ty: BaseValueTy, C: Base, IdxList: IndexValues);
1860
1861 Locations.insert(KV: {placeholder, location});
1862 }
1863 };
1864}
1865
1866void ConstantEmitter::finalize(llvm::GlobalVariable *global) {
1867 assert(InitializedNonAbstract &&
1868 "finalizing emitter that was used for abstract emission?");
1869 assert(!Finalized && "finalizing emitter multiple times");
1870 assert(global->getInitializer());
1871
1872 // Note that we might also be Failed.
1873 Finalized = true;
1874
1875 if (!PlaceholderAddresses.empty()) {
1876 ReplacePlaceholders(CGM, global, PlaceholderAddresses)
1877 .replaceInInitializer(init: global->getInitializer());
1878 PlaceholderAddresses.clear(); // satisfy
1879 }
1880}
1881
1882ConstantEmitter::~ConstantEmitter() {
1883 assert((!InitializedNonAbstract || Finalized || Failed) &&
1884 "not finalized after being initialized for non-abstract emission");
1885 assert(PlaceholderAddresses.empty() && "unhandled placeholders");
1886}
1887
1888static QualType getNonMemoryType(CodeGenModule &CGM, QualType type) {
1889 if (auto AT = type->getAs<AtomicType>()) {
1890 return CGM.getContext().getQualifiedType(T: AT->getValueType(),
1891 Qs: type.getQualifiers());
1892 }
1893 return type;
1894}
1895
1896llvm::Constant *ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) {
1897 // Make a quick check if variable can be default NULL initialized
1898 // and avoid going through rest of code which may do, for c++11,
1899 // initialization of memory to all NULLs.
1900 if (!D.hasLocalStorage()) {
1901 QualType Ty = CGM.getContext().getBaseElementType(QT: D.getType());
1902 if (Ty->isRecordType())
1903 if (const CXXConstructExpr *E =
1904 dyn_cast_or_null<CXXConstructExpr>(Val: D.getInit())) {
1905 const CXXConstructorDecl *CD = E->getConstructor();
1906 if (CD->isTrivial() && CD->isDefaultConstructor())
1907 return CGM.EmitNullConstant(T: D.getType());
1908 }
1909 }
1910 InConstantContext = D.hasConstantInitialization();
1911
1912 QualType destType = D.getType();
1913 const Expr *E = D.getInit();
1914 assert(E && "No initializer to emit");
1915
1916 if (!destType->isReferenceType()) {
1917 QualType nonMemoryDestType = getNonMemoryType(CGM, type: destType);
1918 if (llvm::Constant *C = ConstExprEmitter(*this).Visit(S: E, P: nonMemoryDestType))
1919 return emitForMemory(C, T: destType);
1920 }
1921
1922 // Try to emit the initializer. Note that this can allow some things that
1923 // are not allowed by tryEmitPrivateForMemory alone.
1924 if (APValue *value = D.evaluateValue()) {
1925 assert(!value->allowConstexprUnknown() &&
1926 "Constexpr unknown values are not allowed in CodeGen");
1927 return tryEmitPrivateForMemory(value: *value, T: destType);
1928 }
1929
1930 return nullptr;
1931}
1932
1933llvm::Constant *
1934ConstantEmitter::tryEmitAbstractForMemory(const Expr *E, QualType destType) {
1935 auto nonMemoryDestType = getNonMemoryType(CGM, type: destType);
1936 auto C = tryEmitAbstract(E, destType: nonMemoryDestType);
1937 return (C ? emitForMemory(C, T: destType) : nullptr);
1938}
1939
1940llvm::Constant *
1941ConstantEmitter::tryEmitAbstractForMemory(const APValue &value,
1942 QualType destType) {
1943 auto nonMemoryDestType = getNonMemoryType(CGM, type: destType);
1944 auto C = tryEmitAbstract(value, destType: nonMemoryDestType);
1945 return (C ? emitForMemory(C, T: destType) : nullptr);
1946}
1947
1948llvm::Constant *ConstantEmitter::tryEmitPrivateForMemory(const Expr *E,
1949 QualType destType) {
1950 auto nonMemoryDestType = getNonMemoryType(CGM, type: destType);
1951 llvm::Constant *C = tryEmitPrivate(E, T: nonMemoryDestType);
1952 return (C ? emitForMemory(C, T: destType) : nullptr);
1953}
1954
1955llvm::Constant *ConstantEmitter::tryEmitPrivateForMemory(const APValue &value,
1956 QualType destType) {
1957 auto nonMemoryDestType = getNonMemoryType(CGM, type: destType);
1958 auto C = tryEmitPrivate(value, T: nonMemoryDestType);
1959 return (C ? emitForMemory(C, T: destType) : nullptr);
1960}
1961
1962/// Try to emit a constant signed pointer, given a raw pointer and the
1963/// destination ptrauth qualifier.
1964///
1965/// This can fail if the qualifier needs address discrimination and the
1966/// emitter is in an abstract mode.
1967llvm::Constant *
1968ConstantEmitter::tryEmitConstantSignedPointer(llvm::Constant *UnsignedPointer,
1969 PointerAuthQualifier Schema) {
1970 assert(Schema && "applying trivial ptrauth schema");
1971
1972 if (Schema.hasKeyNone())
1973 return UnsignedPointer;
1974
1975 unsigned Key = Schema.getKey();
1976
1977 // Create an address placeholder if we're using address discrimination.
1978 llvm::GlobalValue *StorageAddress = nullptr;
1979 if (Schema.isAddressDiscriminated()) {
1980 // We can't do this if the emitter is in an abstract state.
1981 if (isAbstract())
1982 return nullptr;
1983
1984 StorageAddress = getCurrentAddrPrivate();
1985 }
1986
1987 llvm::ConstantInt *Discriminator =
1988 llvm::ConstantInt::get(Ty: CGM.IntPtrTy, V: Schema.getExtraDiscriminator());
1989
1990 llvm::Constant *SignedPointer = CGM.getConstantSignedPointer(
1991 Pointer: UnsignedPointer, Key, StorageAddress, OtherDiscriminator: Discriminator);
1992
1993 if (Schema.isAddressDiscriminated())
1994 registerCurrentAddrPrivate(signal: SignedPointer, placeholder: StorageAddress);
1995
1996 return SignedPointer;
1997}
1998
1999llvm::Constant *ConstantEmitter::emitForMemory(CodeGenModule &CGM,
2000 llvm::Constant *C,
2001 QualType destType) {
2002 // For an _Atomic-qualified constant, we may need to add tail padding.
2003 if (auto AT = destType->getAs<AtomicType>()) {
2004 QualType destValueType = AT->getValueType();
2005 C = emitForMemory(CGM, C, destType: destValueType);
2006
2007 uint64_t innerSize = CGM.getContext().getTypeSize(T: destValueType);
2008 uint64_t outerSize = CGM.getContext().getTypeSize(T: destType);
2009 if (innerSize == outerSize)
2010 return C;
2011
2012 assert(innerSize < outerSize && "emitted over-large constant for atomic");
2013 llvm::Constant *elts[] = {
2014 C,
2015 llvm::ConstantAggregateZero::get(
2016 Ty: llvm::ArrayType::get(ElementType: CGM.Int8Ty, NumElements: (outerSize - innerSize) / 8))
2017 };
2018 return llvm::ConstantStruct::getAnon(V: elts);
2019 }
2020
2021 // Zero-extend bool.
2022 // In HLSL bool vectors are stored in memory as a vector of i32
2023 if ((C->getType()->isIntegerTy(Bitwidth: 1) && !destType->isBitIntType()) ||
2024 (destType->isExtVectorBoolType() &&
2025 !destType->isPackedVectorBoolType(ctx: CGM.getContext()))) {
2026 llvm::Type *boolTy = CGM.getTypes().ConvertTypeForMem(T: destType);
2027 llvm::Constant *Res = llvm::ConstantFoldCastOperand(
2028 Opcode: llvm::Instruction::ZExt, C, DestTy: boolTy, DL: CGM.getDataLayout());
2029 assert(Res && "Constant folding must succeed");
2030 return Res;
2031 }
2032
2033 if (destType->isBitIntType()) {
2034 ConstantAggregateBuilder Builder(CGM);
2035 llvm::Type *LoadStoreTy = CGM.getTypes().convertTypeForLoadStore(T: destType);
2036 // ptrtoint/inttoptr should not involve _BitInt in constant expressions, so
2037 // casting to ConstantInt is safe here.
2038 auto *CI = cast<llvm::ConstantInt>(Val: C);
2039 llvm::Constant *Res = llvm::ConstantFoldCastOperand(
2040 Opcode: destType->isSignedIntegerOrEnumerationType() ? llvm::Instruction::SExt
2041 : llvm::Instruction::ZExt,
2042 C: CI, DestTy: LoadStoreTy, DL: CGM.getDataLayout());
2043 if (CGM.getTypes().typeRequiresSplitIntoByteArray(ASTTy: destType, LLVMTy: C->getType())) {
2044 // Long _BitInt has array of bytes as in-memory type.
2045 // So, split constant into individual bytes.
2046 llvm::Type *DesiredTy = CGM.getTypes().ConvertTypeForMem(T: destType);
2047 llvm::APInt Value = cast<llvm::ConstantInt>(Val: Res)->getValue();
2048 Builder.addBits(Bits: Value, /*OffsetInBits=*/0, /*AllowOverwrite=*/false);
2049 return Builder.build(DesiredTy, /*AllowOversized*/ false);
2050 }
2051 return Res;
2052 }
2053
2054 return C;
2055}
2056
2057llvm::Constant *ConstantEmitter::tryEmitPrivate(const Expr *E,
2058 QualType destType) {
2059 assert(!destType->isVoidType() && "can't emit a void constant");
2060
2061 if (!destType->isReferenceType())
2062 if (llvm::Constant *C = ConstExprEmitter(*this).Visit(S: E, P: destType))
2063 return C;
2064
2065 Expr::EvalResult Result;
2066
2067 bool Success = false;
2068
2069 if (destType->isReferenceType())
2070 Success = E->EvaluateAsLValue(Result, Ctx: CGM.getContext());
2071 else
2072 Success = E->EvaluateAsRValue(Result, Ctx: CGM.getContext(), InConstantContext);
2073
2074 if (Success && !Result.HasSideEffects)
2075 return tryEmitPrivate(value: Result.Val, T: destType);
2076
2077 return nullptr;
2078}
2079
2080llvm::Constant *CodeGenModule::getNullPointer(llvm::PointerType *T, QualType QT) {
2081 return getTargetCodeGenInfo().getNullPointer(CGM: *this, T, QT);
2082}
2083
2084namespace {
2085/// A struct which can be used to peephole certain kinds of finalization
2086/// that normally happen during l-value emission.
2087struct ConstantLValue {
2088 llvm::Constant *Value;
2089 bool HasOffsetApplied;
2090 bool HasDestPointerAuth;
2091
2092 /*implicit*/ ConstantLValue(llvm::Constant *value,
2093 bool hasOffsetApplied = false,
2094 bool hasDestPointerAuth = false)
2095 : Value(value), HasOffsetApplied(hasOffsetApplied),
2096 HasDestPointerAuth(hasDestPointerAuth) {}
2097
2098 /*implicit*/ ConstantLValue(ConstantAddress address)
2099 : ConstantLValue(address.getPointer()) {}
2100};
2101
2102/// A helper class for emitting constant l-values.
2103class ConstantLValueEmitter : public ConstStmtVisitor<ConstantLValueEmitter,
2104 ConstantLValue> {
2105 CodeGenModule &CGM;
2106 ConstantEmitter &Emitter;
2107 const APValue &Value;
2108 QualType DestType;
2109 bool EnablePtrAuthFunctionTypeDiscrimination;
2110
2111 // Befriend StmtVisitorBase so that we don't have to expose Visit*.
2112 friend StmtVisitorBase;
2113
2114public:
2115 ConstantLValueEmitter(ConstantEmitter &emitter, const APValue &value,
2116 QualType destType,
2117 bool EnablePtrAuthFunctionTypeDiscrimination = true)
2118 : CGM(emitter.CGM), Emitter(emitter), Value(value), DestType(destType),
2119 EnablePtrAuthFunctionTypeDiscrimination(
2120 EnablePtrAuthFunctionTypeDiscrimination) {}
2121
2122 llvm::Constant *tryEmit();
2123
2124private:
2125 llvm::Constant *tryEmitAbsolute(llvm::Type *destTy);
2126 ConstantLValue tryEmitBase(const APValue::LValueBase &base);
2127
2128 ConstantLValue VisitStmt(const Stmt *S) { return nullptr; }
2129 ConstantLValue VisitConstantExpr(const ConstantExpr *E);
2130 ConstantLValue VisitCompoundLiteralExpr(const CompoundLiteralExpr *E);
2131 ConstantLValue VisitStringLiteral(const StringLiteral *E);
2132 ConstantLValue VisitObjCBoxedExpr(const ObjCBoxedExpr *E);
2133 ConstantLValue VisitObjCEncodeExpr(const ObjCEncodeExpr *E);
2134 ConstantLValue VisitObjCStringLiteral(const ObjCStringLiteral *E);
2135 ConstantLValue VisitPredefinedExpr(const PredefinedExpr *E);
2136 ConstantLValue VisitAddrLabelExpr(const AddrLabelExpr *E);
2137 ConstantLValue VisitCallExpr(const CallExpr *E);
2138 ConstantLValue VisitBlockExpr(const BlockExpr *E);
2139 ConstantLValue VisitCXXTypeidExpr(const CXXTypeidExpr *E);
2140 ConstantLValue VisitMaterializeTemporaryExpr(
2141 const MaterializeTemporaryExpr *E);
2142
2143 ConstantLValue emitPointerAuthSignConstant(const CallExpr *E);
2144 llvm::Constant *emitPointerAuthPointer(const Expr *E);
2145 unsigned emitPointerAuthKey(const Expr *E);
2146 std::pair<llvm::Constant *, llvm::ConstantInt *>
2147 emitPointerAuthDiscriminator(const Expr *E);
2148
2149 bool hasNonZeroOffset() const {
2150 return !Value.getLValueOffset().isZero();
2151 }
2152
2153 /// Return the value offset.
2154 llvm::Constant *getOffset() {
2155 return llvm::ConstantInt::get(Ty: CGM.Int64Ty,
2156 V: Value.getLValueOffset().getQuantity());
2157 }
2158
2159 /// Apply the value offset to the given constant.
2160 llvm::Constant *applyOffset(llvm::Constant *C) {
2161 if (!hasNonZeroOffset())
2162 return C;
2163
2164 return llvm::ConstantExpr::getPtrAdd(Ptr: C, Offset: getOffset());
2165 }
2166};
2167
2168}
2169
2170llvm::Constant *ConstantLValueEmitter::tryEmit() {
2171 const APValue::LValueBase &base = Value.getLValueBase();
2172
2173 // The destination type should be a pointer or reference
2174 // type, but it might also be a cast thereof.
2175 //
2176 // FIXME: the chain of casts required should be reflected in the APValue.
2177 // We need this in order to correctly handle things like a ptrtoint of a
2178 // non-zero null pointer and addrspace casts that aren't trivially
2179 // represented in LLVM IR.
2180 auto destTy = CGM.getTypes().ConvertTypeForMem(T: DestType);
2181 assert(isa<llvm::IntegerType>(destTy) || isa<llvm::PointerType>(destTy));
2182
2183 // If there's no base at all, this is a null or absolute pointer,
2184 // possibly cast back to an integer type.
2185 if (!base) {
2186 return tryEmitAbsolute(destTy);
2187 }
2188
2189 // Otherwise, try to emit the base.
2190 ConstantLValue result = tryEmitBase(base);
2191
2192 // If that failed, we're done.
2193 llvm::Constant *value = result.Value;
2194 if (!value) return nullptr;
2195
2196 // Apply the offset if necessary and not already done.
2197 if (!result.HasOffsetApplied) {
2198 value = applyOffset(C: value);
2199 }
2200
2201 // Apply pointer-auth signing from the destination type.
2202 if (PointerAuthQualifier PointerAuth = DestType.getPointerAuth();
2203 PointerAuth && !result.HasDestPointerAuth) {
2204 value = Emitter.tryEmitConstantSignedPointer(UnsignedPointer: value, Schema: PointerAuth);
2205 if (!value)
2206 return nullptr;
2207 }
2208
2209 // Convert to the appropriate type; this could be an lvalue for
2210 // an integer. FIXME: performAddrSpaceCast
2211 if (isa<llvm::PointerType>(Val: destTy))
2212 return llvm::ConstantExpr::getPointerCast(C: value, Ty: destTy);
2213
2214 return llvm::ConstantExpr::getPtrToInt(C: value, Ty: destTy);
2215}
2216
2217/// Try to emit an absolute l-value, such as a null pointer or an integer
2218/// bitcast to pointer type.
2219llvm::Constant *
2220ConstantLValueEmitter::tryEmitAbsolute(llvm::Type *destTy) {
2221 // If we're producing a pointer, this is easy.
2222 auto destPtrTy = cast<llvm::PointerType>(Val: destTy);
2223 if (Value.isNullPointer()) {
2224 // FIXME: integer offsets from non-zero null pointers.
2225 return CGM.getNullPointer(T: destPtrTy, QT: DestType);
2226 }
2227
2228 // Convert the integer to a pointer-sized integer before converting it
2229 // to a pointer.
2230 // FIXME: signedness depends on the original integer type.
2231 auto intptrTy = CGM.getDataLayout().getIntPtrType(destPtrTy);
2232 llvm::Constant *C;
2233 C = llvm::ConstantFoldIntegerCast(C: getOffset(), DestTy: intptrTy, /*isSigned*/ IsSigned: false,
2234 DL: CGM.getDataLayout());
2235 assert(C && "Must have folded, as Offset is a ConstantInt");
2236 C = llvm::ConstantExpr::getIntToPtr(C, Ty: destPtrTy);
2237 return C;
2238}
2239
2240ConstantLValue
2241ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
2242 // Handle values.
2243 if (const ValueDecl *D = base.dyn_cast<const ValueDecl*>()) {
2244 // The constant always points to the canonical declaration. We want to look
2245 // at properties of the most recent declaration at the point of emission.
2246 D = cast<ValueDecl>(Val: D->getMostRecentDecl());
2247
2248 if (D->hasAttr<WeakRefAttr>())
2249 return CGM.GetWeakRefReference(VD: D).getPointer();
2250
2251 auto PtrAuthSign = [&](llvm::Constant *C) {
2252 if (PointerAuthQualifier PointerAuth = DestType.getPointerAuth()) {
2253 C = applyOffset(C);
2254 C = Emitter.tryEmitConstantSignedPointer(UnsignedPointer: C, Schema: PointerAuth);
2255 return ConstantLValue(C, /*applied offset*/ true, /*signed*/ true);
2256 }
2257
2258 CGPointerAuthInfo AuthInfo;
2259
2260 if (EnablePtrAuthFunctionTypeDiscrimination)
2261 AuthInfo = CGM.getFunctionPointerAuthInfo(T: DestType);
2262
2263 if (AuthInfo) {
2264 if (hasNonZeroOffset())
2265 return ConstantLValue(nullptr);
2266
2267 C = applyOffset(C);
2268 C = CGM.getConstantSignedPointer(
2269 Pointer: C, Key: AuthInfo.getKey(), StorageAddress: nullptr,
2270 OtherDiscriminator: cast_or_null<llvm::ConstantInt>(Val: AuthInfo.getDiscriminator()));
2271 return ConstantLValue(C, /*applied offset*/ true, /*signed*/ true);
2272 }
2273
2274 return ConstantLValue(C);
2275 };
2276
2277 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
2278 llvm::Constant *C = CGM.getRawFunctionPointer(GD: FD);
2279 if (FD->getType()->isCFIUncheckedCalleeFunctionType())
2280 C = llvm::NoCFIValue::get(GV: cast<llvm::GlobalValue>(Val: C));
2281 return PtrAuthSign(C);
2282 }
2283
2284 if (const auto *VD = dyn_cast<VarDecl>(Val: D)) {
2285 // We can never refer to a variable with local storage.
2286 if (!VD->hasLocalStorage()) {
2287 if (VD->isFileVarDecl() || VD->hasExternalStorage())
2288 return CGM.GetAddrOfGlobalVar(D: VD);
2289
2290 if (VD->isLocalVarDecl()) {
2291 return CGM.getOrCreateStaticVarDecl(
2292 D: *VD, Linkage: CGM.getLLVMLinkageVarDefinition(VD));
2293 }
2294 }
2295 }
2296
2297 if (const auto *GD = dyn_cast<MSGuidDecl>(Val: D))
2298 return CGM.GetAddrOfMSGuidDecl(GD);
2299
2300 if (const auto *GCD = dyn_cast<UnnamedGlobalConstantDecl>(Val: D))
2301 return CGM.GetAddrOfUnnamedGlobalConstantDecl(GCD);
2302
2303 if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(Val: D))
2304 return CGM.GetAddrOfTemplateParamObject(TPO);
2305
2306 return nullptr;
2307 }
2308
2309 // Handle typeid(T).
2310 if (TypeInfoLValue TI = base.dyn_cast<TypeInfoLValue>())
2311 return CGM.GetAddrOfRTTIDescriptor(Ty: QualType(TI.getType(), 0));
2312
2313 // Otherwise, it must be an expression.
2314 return Visit(S: base.get<const Expr*>());
2315}
2316
2317ConstantLValue
2318ConstantLValueEmitter::VisitConstantExpr(const ConstantExpr *E) {
2319 if (llvm::Constant *Result = Emitter.tryEmitConstantExpr(CE: E))
2320 return Result;
2321 return Visit(S: E->getSubExpr());
2322}
2323
2324ConstantLValue
2325ConstantLValueEmitter::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
2326 ConstantEmitter CompoundLiteralEmitter(CGM, Emitter.CGF);
2327 CompoundLiteralEmitter.setInConstantContext(Emitter.isInConstantContext());
2328 return tryEmitGlobalCompoundLiteral(emitter&: CompoundLiteralEmitter, E);
2329}
2330
2331ConstantLValue
2332ConstantLValueEmitter::VisitStringLiteral(const StringLiteral *E) {
2333 return CGM.GetAddrOfConstantStringFromLiteral(S: E);
2334}
2335
2336ConstantLValue
2337ConstantLValueEmitter::VisitObjCEncodeExpr(const ObjCEncodeExpr *E) {
2338 return CGM.GetAddrOfConstantStringFromObjCEncode(E);
2339}
2340
2341static ConstantLValue emitConstantObjCStringLiteral(const StringLiteral *S,
2342 QualType T,
2343 CodeGenModule &CGM) {
2344 auto C = CGM.getObjCRuntime().GenerateConstantString(S);
2345 return C.withElementType(ElemTy: CGM.getTypes().ConvertTypeForMem(T));
2346}
2347
2348ConstantLValue
2349ConstantLValueEmitter::VisitObjCStringLiteral(const ObjCStringLiteral *E) {
2350 return emitConstantObjCStringLiteral(S: E->getString(), T: E->getType(), CGM);
2351}
2352
2353ConstantLValue
2354ConstantLValueEmitter::VisitObjCBoxedExpr(const ObjCBoxedExpr *E) {
2355 assert(E->isExpressibleAsConstantInitializer() &&
2356 "this boxed expression can't be emitted as a compile-time constant");
2357 const auto *SL = cast<StringLiteral>(Val: E->getSubExpr()->IgnoreParenCasts());
2358 return emitConstantObjCStringLiteral(S: SL, T: E->getType(), CGM);
2359}
2360
2361ConstantLValue
2362ConstantLValueEmitter::VisitPredefinedExpr(const PredefinedExpr *E) {
2363 return CGM.GetAddrOfConstantStringFromLiteral(S: E->getFunctionName());
2364}
2365
2366ConstantLValue
2367ConstantLValueEmitter::VisitAddrLabelExpr(const AddrLabelExpr *E) {
2368 assert(Emitter.CGF && "Invalid address of label expression outside function");
2369 llvm::Constant *Ptr = Emitter.CGF->GetAddrOfLabel(L: E->getLabel());
2370 return Ptr;
2371}
2372
2373ConstantLValue
2374ConstantLValueEmitter::VisitCallExpr(const CallExpr *E) {
2375 unsigned builtin = E->getBuiltinCallee();
2376 if (builtin == Builtin::BI__builtin_function_start)
2377 return CGM.GetFunctionStart(
2378 Decl: E->getArg(Arg: 0)->getAsBuiltinConstantDeclRef(Context: CGM.getContext()));
2379
2380 if (builtin == Builtin::BI__builtin_ptrauth_sign_constant)
2381 return emitPointerAuthSignConstant(E);
2382
2383 if (builtin != Builtin::BI__builtin___CFStringMakeConstantString &&
2384 builtin != Builtin::BI__builtin___NSStringMakeConstantString)
2385 return nullptr;
2386
2387 const auto *Literal = cast<StringLiteral>(Val: E->getArg(Arg: 0)->IgnoreParenCasts());
2388 if (builtin == Builtin::BI__builtin___NSStringMakeConstantString) {
2389 return CGM.getObjCRuntime().GenerateConstantString(Literal);
2390 } else {
2391 // FIXME: need to deal with UCN conversion issues.
2392 return CGM.GetAddrOfConstantCFString(Literal);
2393 }
2394}
2395
2396ConstantLValue
2397ConstantLValueEmitter::emitPointerAuthSignConstant(const CallExpr *E) {
2398 llvm::Constant *UnsignedPointer = emitPointerAuthPointer(E: E->getArg(Arg: 0));
2399 unsigned Key = emitPointerAuthKey(E: E->getArg(Arg: 1));
2400 auto [StorageAddress, OtherDiscriminator] =
2401 emitPointerAuthDiscriminator(E: E->getArg(Arg: 2));
2402
2403 llvm::Constant *SignedPointer = CGM.getConstantSignedPointer(
2404 Pointer: UnsignedPointer, Key, StorageAddress, OtherDiscriminator);
2405 return SignedPointer;
2406}
2407
2408llvm::Constant *ConstantLValueEmitter::emitPointerAuthPointer(const Expr *E) {
2409 Expr::EvalResult Result;
2410 bool Succeeded = E->EvaluateAsRValue(Result, Ctx: CGM.getContext());
2411 assert(Succeeded);
2412 (void)Succeeded;
2413
2414 // The assertions here are all checked by Sema.
2415 assert(Result.Val.isLValue());
2416 if (isa<FunctionDecl>(Val: Result.Val.getLValueBase().get<const ValueDecl *>()))
2417 assert(Result.Val.getLValueOffset().isZero());
2418 return ConstantEmitter(CGM, Emitter.CGF)
2419 .emitAbstract(loc: E->getExprLoc(), value: Result.Val, destType: E->getType(), EnablePtrAuthFunctionTypeDiscrimination: false);
2420}
2421
2422unsigned ConstantLValueEmitter::emitPointerAuthKey(const Expr *E) {
2423 return E->EvaluateKnownConstInt(Ctx: CGM.getContext()).getZExtValue();
2424}
2425
2426std::pair<llvm::Constant *, llvm::ConstantInt *>
2427ConstantLValueEmitter::emitPointerAuthDiscriminator(const Expr *E) {
2428 E = E->IgnoreParens();
2429
2430 if (const auto *Call = dyn_cast<CallExpr>(Val: E)) {
2431 if (Call->getBuiltinCallee() ==
2432 Builtin::BI__builtin_ptrauth_blend_discriminator) {
2433 llvm::Constant *Pointer = ConstantEmitter(CGM).emitAbstract(
2434 E: Call->getArg(Arg: 0), destType: Call->getArg(Arg: 0)->getType());
2435 auto *Extra = cast<llvm::ConstantInt>(Val: ConstantEmitter(CGM).emitAbstract(
2436 E: Call->getArg(Arg: 1), destType: Call->getArg(Arg: 1)->getType()));
2437 return {Pointer, Extra};
2438 }
2439 }
2440
2441 llvm::Constant *Result = ConstantEmitter(CGM).emitAbstract(E, destType: E->getType());
2442 if (Result->getType()->isPointerTy())
2443 return {Result, nullptr};
2444 return {nullptr, cast<llvm::ConstantInt>(Val: Result)};
2445}
2446
2447ConstantLValue
2448ConstantLValueEmitter::VisitBlockExpr(const BlockExpr *E) {
2449 StringRef functionName;
2450 if (auto CGF = Emitter.CGF)
2451 functionName = CGF->CurFn->getName();
2452 else
2453 functionName = "global";
2454
2455 return CGM.GetAddrOfGlobalBlock(BE: E, Name: functionName);
2456}
2457
2458ConstantLValue
2459ConstantLValueEmitter::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
2460 QualType T;
2461 if (E->isTypeOperand())
2462 T = E->getTypeOperand(Context: CGM.getContext());
2463 else
2464 T = E->getExprOperand()->getType();
2465 return CGM.GetAddrOfRTTIDescriptor(Ty: T);
2466}
2467
2468ConstantLValue
2469ConstantLValueEmitter::VisitMaterializeTemporaryExpr(
2470 const MaterializeTemporaryExpr *E) {
2471 assert(E->getStorageDuration() == SD_Static);
2472 const Expr *Inner = E->getSubExpr()->skipRValueSubobjectAdjustments();
2473 return CGM.GetAddrOfGlobalTemporary(E, Inner);
2474}
2475
2476llvm::Constant *
2477ConstantEmitter::tryEmitPrivate(const APValue &Value, QualType DestType,
2478 bool EnablePtrAuthFunctionTypeDiscrimination) {
2479 switch (Value.getKind()) {
2480 case APValue::None:
2481 case APValue::Indeterminate:
2482 // Out-of-lifetime and indeterminate values can be modeled as 'undef'.
2483 return llvm::UndefValue::get(T: CGM.getTypes().ConvertType(T: DestType));
2484 case APValue::LValue:
2485 return ConstantLValueEmitter(*this, Value, DestType,
2486 EnablePtrAuthFunctionTypeDiscrimination)
2487 .tryEmit();
2488 case APValue::Int:
2489 if (PointerAuthQualifier PointerAuth = DestType.getPointerAuth();
2490 PointerAuth &&
2491 (PointerAuth.authenticatesNullValues() || Value.getInt() != 0))
2492 return nullptr;
2493 return llvm::ConstantInt::get(Context&: CGM.getLLVMContext(), V: Value.getInt());
2494 case APValue::FixedPoint:
2495 return llvm::ConstantInt::get(Context&: CGM.getLLVMContext(),
2496 V: Value.getFixedPoint().getValue());
2497 case APValue::ComplexInt: {
2498 llvm::Constant *Complex[2];
2499
2500 Complex[0] = llvm::ConstantInt::get(Context&: CGM.getLLVMContext(),
2501 V: Value.getComplexIntReal());
2502 Complex[1] = llvm::ConstantInt::get(Context&: CGM.getLLVMContext(),
2503 V: Value.getComplexIntImag());
2504
2505 // FIXME: the target may want to specify that this is packed.
2506 llvm::StructType *STy =
2507 llvm::StructType::get(elt1: Complex[0]->getType(), elts: Complex[1]->getType());
2508 return llvm::ConstantStruct::get(T: STy, V: Complex);
2509 }
2510 case APValue::Float: {
2511 const llvm::APFloat &Init = Value.getFloat();
2512 if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf() &&
2513 !CGM.getContext().getLangOpts().NativeHalfType &&
2514 CGM.getContext().getTargetInfo().useFP16ConversionIntrinsics())
2515 return llvm::ConstantInt::get(Context&: CGM.getLLVMContext(),
2516 V: Init.bitcastToAPInt());
2517 else
2518 return llvm::ConstantFP::get(Context&: CGM.getLLVMContext(), V: Init);
2519 }
2520 case APValue::ComplexFloat: {
2521 llvm::Constant *Complex[2];
2522
2523 Complex[0] = llvm::ConstantFP::get(Context&: CGM.getLLVMContext(),
2524 V: Value.getComplexFloatReal());
2525 Complex[1] = llvm::ConstantFP::get(Context&: CGM.getLLVMContext(),
2526 V: Value.getComplexFloatImag());
2527
2528 // FIXME: the target may want to specify that this is packed.
2529 llvm::StructType *STy =
2530 llvm::StructType::get(elt1: Complex[0]->getType(), elts: Complex[1]->getType());
2531 return llvm::ConstantStruct::get(T: STy, V: Complex);
2532 }
2533 case APValue::Vector: {
2534 unsigned NumElts = Value.getVectorLength();
2535 SmallVector<llvm::Constant *, 4> Inits(NumElts);
2536
2537 for (unsigned I = 0; I != NumElts; ++I) {
2538 const APValue &Elt = Value.getVectorElt(I);
2539 if (Elt.isInt())
2540 Inits[I] = llvm::ConstantInt::get(Context&: CGM.getLLVMContext(), V: Elt.getInt());
2541 else if (Elt.isFloat())
2542 Inits[I] = llvm::ConstantFP::get(Context&: CGM.getLLVMContext(), V: Elt.getFloat());
2543 else if (Elt.isIndeterminate())
2544 Inits[I] = llvm::UndefValue::get(T: CGM.getTypes().ConvertType(
2545 T: DestType->castAs<VectorType>()->getElementType()));
2546 else
2547 llvm_unreachable("unsupported vector element type");
2548 }
2549 return llvm::ConstantVector::get(V: Inits);
2550 }
2551 case APValue::AddrLabelDiff: {
2552 const AddrLabelExpr *LHSExpr = Value.getAddrLabelDiffLHS();
2553 const AddrLabelExpr *RHSExpr = Value.getAddrLabelDiffRHS();
2554 llvm::Constant *LHS = tryEmitPrivate(E: LHSExpr, destType: LHSExpr->getType());
2555 llvm::Constant *RHS = tryEmitPrivate(E: RHSExpr, destType: RHSExpr->getType());
2556 if (!LHS || !RHS) return nullptr;
2557
2558 // Compute difference
2559 llvm::Type *ResultType = CGM.getTypes().ConvertType(T: DestType);
2560 LHS = llvm::ConstantExpr::getPtrToInt(C: LHS, Ty: CGM.IntPtrTy);
2561 RHS = llvm::ConstantExpr::getPtrToInt(C: RHS, Ty: CGM.IntPtrTy);
2562 llvm::Constant *AddrLabelDiff = llvm::ConstantExpr::getSub(C1: LHS, C2: RHS);
2563
2564 // LLVM is a bit sensitive about the exact format of the
2565 // address-of-label difference; make sure to truncate after
2566 // the subtraction.
2567 return llvm::ConstantExpr::getTruncOrBitCast(C: AddrLabelDiff, Ty: ResultType);
2568 }
2569 case APValue::Struct:
2570 case APValue::Union:
2571 return ConstStructBuilder::BuildStruct(Emitter&: *this, Val: Value, ValTy: DestType);
2572 case APValue::Array: {
2573 const ArrayType *ArrayTy = CGM.getContext().getAsArrayType(T: DestType);
2574 unsigned NumElements = Value.getArraySize();
2575 unsigned NumInitElts = Value.getArrayInitializedElts();
2576
2577 // Emit array filler, if there is one.
2578 llvm::Constant *Filler = nullptr;
2579 if (Value.hasArrayFiller()) {
2580 Filler = tryEmitAbstractForMemory(value: Value.getArrayFiller(),
2581 destType: ArrayTy->getElementType());
2582 if (!Filler)
2583 return nullptr;
2584 }
2585
2586 // Emit initializer elements.
2587 SmallVector<llvm::Constant*, 16> Elts;
2588 if (Filler && Filler->isNullValue())
2589 Elts.reserve(N: NumInitElts + 1);
2590 else
2591 Elts.reserve(N: NumElements);
2592
2593 llvm::Type *CommonElementType = nullptr;
2594 for (unsigned I = 0; I < NumInitElts; ++I) {
2595 llvm::Constant *C = tryEmitPrivateForMemory(
2596 value: Value.getArrayInitializedElt(I), destType: ArrayTy->getElementType());
2597 if (!C) return nullptr;
2598
2599 if (I == 0)
2600 CommonElementType = C->getType();
2601 else if (C->getType() != CommonElementType)
2602 CommonElementType = nullptr;
2603 Elts.push_back(Elt: C);
2604 }
2605
2606 llvm::ArrayType *Desired =
2607 cast<llvm::ArrayType>(Val: CGM.getTypes().ConvertType(T: DestType));
2608
2609 // Fix the type of incomplete arrays if the initializer isn't empty.
2610 if (DestType->isIncompleteArrayType() && !Elts.empty())
2611 Desired = llvm::ArrayType::get(ElementType: Desired->getElementType(), NumElements: Elts.size());
2612
2613 return EmitArrayConstant(CGM, DesiredType: Desired, CommonElementType, ArrayBound: NumElements, Elements&: Elts,
2614 Filler);
2615 }
2616 case APValue::MemberPointer:
2617 return CGM.getCXXABI().EmitMemberPointer(MP: Value, MPT: DestType);
2618 }
2619 llvm_unreachable("Unknown APValue kind");
2620}
2621
2622llvm::GlobalVariable *CodeGenModule::getAddrOfConstantCompoundLiteralIfEmitted(
2623 const CompoundLiteralExpr *E) {
2624 return EmittedCompoundLiterals.lookup(Val: E);
2625}
2626
2627void CodeGenModule::setAddrOfConstantCompoundLiteral(
2628 const CompoundLiteralExpr *CLE, llvm::GlobalVariable *GV) {
2629 bool Ok = EmittedCompoundLiterals.insert(KV: std::make_pair(x&: CLE, y&: GV)).second;
2630 (void)Ok;
2631 assert(Ok && "CLE has already been emitted!");
2632}
2633
2634ConstantAddress
2635CodeGenModule::GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E) {
2636 assert(E->isFileScope() && "not a file-scope compound literal expr");
2637 ConstantEmitter emitter(*this);
2638 return tryEmitGlobalCompoundLiteral(emitter, E);
2639}
2640
2641llvm::Constant *
2642CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
2643 // Member pointer constants always have a very particular form.
2644 const MemberPointerType *type = cast<MemberPointerType>(Val: uo->getType());
2645 const ValueDecl *decl = cast<DeclRefExpr>(Val: uo->getSubExpr())->getDecl();
2646
2647 // A member function pointer.
2648 if (const CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(Val: decl))
2649 return getCXXABI().EmitMemberFunctionPointer(MD: method);
2650
2651 // Otherwise, a member data pointer.
2652 getContext().recordMemberDataPointerEvaluation(VD: decl);
2653 uint64_t fieldOffset = getContext().getFieldOffset(FD: decl);
2654 CharUnits chars = getContext().toCharUnitsFromBits(BitSize: (int64_t) fieldOffset);
2655 return getCXXABI().EmitMemberDataPointer(MPT: type, offset: chars);
2656}
2657
2658static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
2659 llvm::Type *baseType,
2660 const CXXRecordDecl *base);
2661
2662static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
2663 const RecordDecl *record,
2664 bool asCompleteObject) {
2665 const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record);
2666 llvm::StructType *structure =
2667 (asCompleteObject ? layout.getLLVMType()
2668 : layout.getBaseSubobjectLLVMType());
2669
2670 unsigned numElements = structure->getNumElements();
2671 std::vector<llvm::Constant *> elements(numElements);
2672
2673 auto CXXR = dyn_cast<CXXRecordDecl>(Val: record);
2674 // Fill in all the bases.
2675 if (CXXR) {
2676 for (const auto &I : CXXR->bases()) {
2677 if (I.isVirtual()) {
2678 // Ignore virtual bases; if we're laying out for a complete
2679 // object, we'll lay these out later.
2680 continue;
2681 }
2682
2683 const auto *base = I.getType()->castAsCXXRecordDecl();
2684 // Ignore empty bases.
2685 if (isEmptyRecordForLayout(Context: CGM.getContext(), T: I.getType()) ||
2686 CGM.getContext()
2687 .getASTRecordLayout(D: base)
2688 .getNonVirtualSize()
2689 .isZero())
2690 continue;
2691
2692 unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(RD: base);
2693 llvm::Type *baseType = structure->getElementType(N: fieldIndex);
2694 elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
2695 }
2696 }
2697
2698 // Fill in all the fields.
2699 for (const auto *Field : record->fields()) {
2700 // Fill in non-bitfields. (Bitfields always use a zero pattern, which we
2701 // will fill in later.)
2702 if (!Field->isBitField() &&
2703 !isEmptyFieldForLayout(Context: CGM.getContext(), FD: Field)) {
2704 unsigned fieldIndex = layout.getLLVMFieldNo(FD: Field);
2705 elements[fieldIndex] = CGM.EmitNullConstant(T: Field->getType());
2706 }
2707
2708 // For unions, stop after the first named field.
2709 if (record->isUnion()) {
2710 if (Field->getIdentifier())
2711 break;
2712 if (const auto *FieldRD = Field->getType()->getAsRecordDecl())
2713 if (FieldRD->findFirstNamedDataMember())
2714 break;
2715 }
2716 }
2717
2718 // Fill in the virtual bases, if we're working with the complete object.
2719 if (CXXR && asCompleteObject) {
2720 for (const auto &I : CXXR->vbases()) {
2721 const auto *base = I.getType()->castAsCXXRecordDecl();
2722 // Ignore empty bases.
2723 if (isEmptyRecordForLayout(Context: CGM.getContext(), T: I.getType()))
2724 continue;
2725
2726 unsigned fieldIndex = layout.getVirtualBaseIndex(base);
2727
2728 // We might have already laid this field out.
2729 if (elements[fieldIndex]) continue;
2730
2731 llvm::Type *baseType = structure->getElementType(N: fieldIndex);
2732 elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
2733 }
2734 }
2735
2736 // Now go through all other fields and zero them out.
2737 for (unsigned i = 0; i != numElements; ++i) {
2738 if (!elements[i])
2739 elements[i] = llvm::Constant::getNullValue(Ty: structure->getElementType(N: i));
2740 }
2741
2742 return llvm::ConstantStruct::get(T: structure, V: elements);
2743}
2744
2745/// Emit the null constant for a base subobject.
2746static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
2747 llvm::Type *baseType,
2748 const CXXRecordDecl *base) {
2749 const CGRecordLayout &baseLayout = CGM.getTypes().getCGRecordLayout(base);
2750
2751 // Just zero out bases that don't have any pointer to data members.
2752 if (baseLayout.isZeroInitializableAsBase())
2753 return llvm::Constant::getNullValue(Ty: baseType);
2754
2755 // Otherwise, we can just use its null constant.
2756 return EmitNullConstant(CGM, record: base, /*asCompleteObject=*/false);
2757}
2758
2759llvm::Constant *ConstantEmitter::emitNullForMemory(CodeGenModule &CGM,
2760 QualType T) {
2761 return emitForMemory(CGM, C: CGM.EmitNullConstant(T), destType: T);
2762}
2763
2764llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
2765 if (T->getAs<PointerType>())
2766 return getNullPointer(
2767 T: cast<llvm::PointerType>(Val: getTypes().ConvertTypeForMem(T)), QT: T);
2768
2769 if (getTypes().isZeroInitializable(T))
2770 return llvm::Constant::getNullValue(Ty: getTypes().ConvertTypeForMem(T));
2771
2772 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
2773 llvm::ArrayType *ATy =
2774 cast<llvm::ArrayType>(Val: getTypes().ConvertTypeForMem(T));
2775
2776 QualType ElementTy = CAT->getElementType();
2777
2778 llvm::Constant *Element =
2779 ConstantEmitter::emitNullForMemory(CGM&: *this, T: ElementTy);
2780 unsigned NumElements = CAT->getZExtSize();
2781 SmallVector<llvm::Constant *, 8> Array(NumElements, Element);
2782 return llvm::ConstantArray::get(T: ATy, V: Array);
2783 }
2784
2785 if (const auto *RD = T->getAsRecordDecl())
2786 return ::EmitNullConstant(CGM&: *this, record: RD,
2787 /*asCompleteObject=*/true);
2788
2789 assert(T->isMemberDataPointerType() &&
2790 "Should only see pointers to data members here!");
2791
2792 return getCXXABI().EmitNullMemberPointer(MPT: T->castAs<MemberPointerType>());
2793}
2794
2795llvm::Constant *
2796CodeGenModule::EmitNullConstantForBase(const CXXRecordDecl *Record) {
2797 return ::EmitNullConstant(CGM&: *this, record: Record, asCompleteObject: false);
2798}
2799