1//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Constant Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ABIInfoImpl.h"
14#include "CGCXXABI.h"
15#include "CGObjCRuntime.h"
16#include "CGRecordLayout.h"
17#include "CodeGenFunction.h"
18#include "CodeGenModule.h"
19#include "ConstantEmitter.h"
20#include "TargetInfo.h"
21#include "clang/AST/APValue.h"
22#include "clang/AST/ASTContext.h"
23#include "clang/AST/Attr.h"
24#include "clang/AST/NSAPI.h"
25#include "clang/AST/RecordLayout.h"
26#include "clang/AST/StmtVisitor.h"
27#include "clang/Basic/Builtins.h"
28#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/Sequence.h"
30#include "llvm/Analysis/ConstantFolding.h"
31#include "llvm/IR/Constants.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/GlobalVariable.h"
35#include "llvm/Support/SipHash.h"
36#include <optional>
37using namespace clang;
38using namespace CodeGen;
39
40//===----------------------------------------------------------------------===//
41// ConstantAggregateBuilder
42//===----------------------------------------------------------------------===//
43
44namespace {
45class ConstExprEmitter;
46
47llvm::Constant *getPadding(const CodeGenModule &CGM, CharUnits PadSize) {
48 llvm::Type *Ty = CGM.CharTy;
49 if (PadSize > CharUnits::One())
50 Ty = llvm::ArrayType::get(ElementType: Ty, NumElements: PadSize.getQuantity());
51 if (CGM.shouldZeroInitPadding()) {
52 return llvm::Constant::getNullValue(Ty);
53 }
54 return llvm::UndefValue::get(T: Ty);
55}
56
57struct ConstantAggregateBuilderUtils {
58 CodeGenModule &CGM;
59
60 ConstantAggregateBuilderUtils(CodeGenModule &CGM) : CGM(CGM) {}
61
62 CharUnits getAlignment(const llvm::Constant *C) const {
63 return CharUnits::fromQuantity(
64 Quantity: CGM.getDataLayout().getABITypeAlign(Ty: C->getType()));
65 }
66
67 CharUnits getSize(llvm::Type *Ty) const {
68 return CharUnits::fromQuantity(Quantity: CGM.getDataLayout().getTypeAllocSize(Ty));
69 }
70
71 CharUnits getSize(const llvm::Constant *C) const {
72 return getSize(Ty: C->getType());
73 }
74
75 llvm::Constant *getPadding(CharUnits PadSize) const {
76 return ::getPadding(CGM, PadSize);
77 }
78
79 llvm::Constant *getZeroes(CharUnits ZeroSize) const {
80 llvm::Type *Ty = llvm::ArrayType::get(ElementType: CGM.CharTy, NumElements: ZeroSize.getQuantity());
81 return llvm::ConstantAggregateZero::get(Ty);
82 }
83};
84
85/// Incremental builder for an llvm::Constant* holding a struct or array
86/// constant.
87class ConstantAggregateBuilder : private ConstantAggregateBuilderUtils {
88 /// The elements of the constant. These two arrays must have the same size;
89 /// Offsets[i] describes the offset of Elems[i] within the constant. The
90 /// elements are kept in increasing offset order, and we ensure that there
91 /// is no overlap: Offsets[i+1] >= Offsets[i] + getSize(Elemes[i]).
92 ///
93 /// This may contain explicit padding elements (in order to create a
94 /// natural layout), but need not. Gaps between elements are implicitly
95 /// considered to be filled with undef.
96 llvm::SmallVector<llvm::Constant*, 32> Elems;
97 llvm::SmallVector<CharUnits, 32> Offsets;
98
99 /// The size of the constant (the maximum end offset of any added element).
100 /// May be larger than the end of Elems.back() if we split the last element
101 /// and removed some trailing undefs.
102 CharUnits Size = CharUnits::Zero();
103
104 /// This is true only if laying out Elems in order as the elements of a
105 /// non-packed LLVM struct will give the correct layout.
106 bool NaturalLayout = true;
107
108 bool split(size_t Index, CharUnits Hint);
109 std::optional<size_t> splitAt(CharUnits Pos);
110
111 static llvm::Constant *buildFrom(CodeGenModule &CGM,
112 ArrayRef<llvm::Constant *> Elems,
113 ArrayRef<CharUnits> Offsets,
114 CharUnits StartOffset, CharUnits Size,
115 bool NaturalLayout, llvm::Type *DesiredTy,
116 bool AllowOversized);
117
118public:
119 ConstantAggregateBuilder(CodeGenModule &CGM)
120 : ConstantAggregateBuilderUtils(CGM) {}
121
122 /// Update or overwrite the value starting at \p Offset with \c C.
123 ///
124 /// \param AllowOverwrite If \c true, this constant might overwrite (part of)
125 /// a constant that has already been added. This flag is only used to
126 /// detect bugs.
127 bool add(llvm::Constant *C, CharUnits Offset, bool AllowOverwrite);
128
129 /// Update or overwrite the bits starting at \p OffsetInBits with \p Bits.
130 bool addBits(llvm::APInt Bits, uint64_t OffsetInBits, bool AllowOverwrite);
131
132 /// Attempt to condense the value starting at \p Offset to a constant of type
133 /// \p DesiredTy.
134 void condense(CharUnits Offset, llvm::Type *DesiredTy);
135
136 /// Produce a constant representing the entire accumulated value, ideally of
137 /// the specified type. If \p AllowOversized, the constant might be larger
138 /// than implied by \p DesiredTy (eg, if there is a flexible array member).
139 /// Otherwise, the constant will be of exactly the same size as \p DesiredTy
140 /// even if we can't represent it as that type.
141 llvm::Constant *build(llvm::Type *DesiredTy, bool AllowOversized) const {
142 return buildFrom(CGM, Elems, Offsets, StartOffset: CharUnits::Zero(), Size,
143 NaturalLayout, DesiredTy, AllowOversized);
144 }
145};
146
147template<typename Container, typename Range = std::initializer_list<
148 typename Container::value_type>>
149static void replace(Container &C, size_t BeginOff, size_t EndOff, Range Vals) {
150 assert(BeginOff <= EndOff && "invalid replacement range");
151 llvm::replace(C, C.begin() + BeginOff, C.begin() + EndOff, Vals);
152}
153
154bool ConstantAggregateBuilder::add(llvm::Constant *C, CharUnits Offset,
155 bool AllowOverwrite) {
156 // Common case: appending to a layout.
157 if (Offset >= Size) {
158 CharUnits Align = getAlignment(C);
159 CharUnits AlignedSize = Size.alignTo(Align);
160 if (AlignedSize > Offset || Offset.alignTo(Align) != Offset)
161 NaturalLayout = false;
162 else if (AlignedSize < Offset) {
163 Elems.push_back(Elt: getPadding(PadSize: Offset - Size));
164 Offsets.push_back(Elt: Size);
165 }
166 Elems.push_back(Elt: C);
167 Offsets.push_back(Elt: Offset);
168 Size = Offset + getSize(C);
169 return true;
170 }
171
172 // Uncommon case: constant overlaps what we've already created.
173 std::optional<size_t> FirstElemToReplace = splitAt(Pos: Offset);
174 if (!FirstElemToReplace)
175 return false;
176
177 CharUnits CSize = getSize(C);
178 std::optional<size_t> LastElemToReplace = splitAt(Pos: Offset + CSize);
179 if (!LastElemToReplace)
180 return false;
181
182 assert((FirstElemToReplace == LastElemToReplace || AllowOverwrite) &&
183 "unexpectedly overwriting field");
184
185 replace(C&: Elems, BeginOff: *FirstElemToReplace, EndOff: *LastElemToReplace, Vals: {C});
186 replace(C&: Offsets, BeginOff: *FirstElemToReplace, EndOff: *LastElemToReplace, Vals: {Offset});
187 Size = std::max(a: Size, b: Offset + CSize);
188 NaturalLayout = false;
189 return true;
190}
191
192bool ConstantAggregateBuilder::addBits(llvm::APInt Bits, uint64_t OffsetInBits,
193 bool AllowOverwrite) {
194 const ASTContext &Context = CGM.getContext();
195 const uint64_t CharWidth = CGM.getContext().getCharWidth();
196
197 // Offset of where we want the first bit to go within the bits of the
198 // current char.
199 unsigned OffsetWithinChar = OffsetInBits % CharWidth;
200
201 // We split bit-fields up into individual bytes. Walk over the bytes and
202 // update them.
203 for (CharUnits OffsetInChars =
204 Context.toCharUnitsFromBits(BitSize: OffsetInBits - OffsetWithinChar);
205 /**/; ++OffsetInChars) {
206 // Number of bits we want to fill in this char.
207 unsigned WantedBits =
208 std::min(a: (uint64_t)Bits.getBitWidth(), b: CharWidth - OffsetWithinChar);
209
210 // Get a char containing the bits we want in the right places. The other
211 // bits have unspecified values.
212 llvm::APInt BitsThisChar = Bits;
213 if (BitsThisChar.getBitWidth() < CharWidth)
214 BitsThisChar = BitsThisChar.zext(width: CharWidth);
215 if (CGM.getDataLayout().isBigEndian()) {
216 // Figure out how much to shift by. We may need to left-shift if we have
217 // less than one byte of Bits left.
218 int Shift = Bits.getBitWidth() - CharWidth + OffsetWithinChar;
219 if (Shift > 0)
220 BitsThisChar.lshrInPlace(ShiftAmt: Shift);
221 else if (Shift < 0)
222 BitsThisChar = BitsThisChar.shl(shiftAmt: -Shift);
223 } else {
224 BitsThisChar = BitsThisChar.shl(shiftAmt: OffsetWithinChar);
225 }
226 if (BitsThisChar.getBitWidth() > CharWidth)
227 BitsThisChar = BitsThisChar.trunc(width: CharWidth);
228
229 if (WantedBits == CharWidth) {
230 // Got a full byte: just add it directly.
231 add(C: llvm::ConstantInt::get(Context&: CGM.getLLVMContext(), V: BitsThisChar),
232 Offset: OffsetInChars, AllowOverwrite);
233 } else {
234 // Partial byte: update the existing integer if there is one. If we
235 // can't split out a 1-CharUnit range to update, then we can't add
236 // these bits and fail the entire constant emission.
237 std::optional<size_t> FirstElemToUpdate = splitAt(Pos: OffsetInChars);
238 if (!FirstElemToUpdate)
239 return false;
240 std::optional<size_t> LastElemToUpdate =
241 splitAt(Pos: OffsetInChars + CharUnits::One());
242 if (!LastElemToUpdate)
243 return false;
244 assert(*LastElemToUpdate - *FirstElemToUpdate < 2 &&
245 "should have at most one element covering one byte");
246
247 // Figure out which bits we want and discard the rest.
248 llvm::APInt UpdateMask(CharWidth, 0);
249 if (CGM.getDataLayout().isBigEndian())
250 UpdateMask.setBits(loBit: CharWidth - OffsetWithinChar - WantedBits,
251 hiBit: CharWidth - OffsetWithinChar);
252 else
253 UpdateMask.setBits(loBit: OffsetWithinChar, hiBit: OffsetWithinChar + WantedBits);
254 BitsThisChar &= UpdateMask;
255
256 if (*FirstElemToUpdate == *LastElemToUpdate ||
257 Elems[*FirstElemToUpdate]->isNullValue() ||
258 isa<llvm::UndefValue>(Val: Elems[*FirstElemToUpdate])) {
259 // All existing bits are either zero or undef.
260 add(C: llvm::ConstantInt::get(Context&: CGM.getLLVMContext(), V: BitsThisChar),
261 Offset: OffsetInChars, /*AllowOverwrite*/ true);
262 } else {
263 llvm::Constant *&ToUpdate = Elems[*FirstElemToUpdate];
264 // In order to perform a partial update, we need the existing bitwise
265 // value, which we can only extract for a constant int.
266 auto *CI = dyn_cast<llvm::ConstantInt>(Val: ToUpdate);
267 if (!CI)
268 return false;
269 // Because this is a 1-CharUnit range, the constant occupying it must
270 // be exactly one CharUnit wide.
271 assert(CI->getBitWidth() == CharWidth && "splitAt failed");
272 assert((!(CI->getValue() & UpdateMask) || AllowOverwrite) &&
273 "unexpectedly overwriting bitfield");
274 BitsThisChar |= (CI->getValue() & ~UpdateMask);
275 ToUpdate = llvm::ConstantInt::get(Context&: CGM.getLLVMContext(), V: BitsThisChar);
276 }
277 }
278
279 // Stop if we've added all the bits.
280 if (WantedBits == Bits.getBitWidth())
281 break;
282
283 // Remove the consumed bits from Bits.
284 if (!CGM.getDataLayout().isBigEndian())
285 Bits.lshrInPlace(ShiftAmt: WantedBits);
286 Bits = Bits.trunc(width: Bits.getBitWidth() - WantedBits);
287
288 // The remanining bits go at the start of the following bytes.
289 OffsetWithinChar = 0;
290 }
291
292 return true;
293}
294
295/// Returns a position within Elems and Offsets such that all elements
296/// before the returned index end before Pos and all elements at or after
297/// the returned index begin at or after Pos. Splits elements as necessary
298/// to ensure this. Returns std::nullopt if we find something we can't split.
299std::optional<size_t> ConstantAggregateBuilder::splitAt(CharUnits Pos) {
300 if (Pos >= Size)
301 return Offsets.size();
302
303 while (true) {
304 auto FirstAfterPos = llvm::upper_bound(Range&: Offsets, Value&: Pos);
305 if (FirstAfterPos == Offsets.begin())
306 return 0;
307
308 // If we already have an element starting at Pos, we're done.
309 size_t LastAtOrBeforePosIndex = FirstAfterPos - Offsets.begin() - 1;
310 if (Offsets[LastAtOrBeforePosIndex] == Pos)
311 return LastAtOrBeforePosIndex;
312
313 // We found an element starting before Pos. Check for overlap.
314 if (Offsets[LastAtOrBeforePosIndex] +
315 getSize(C: Elems[LastAtOrBeforePosIndex]) <= Pos)
316 return LastAtOrBeforePosIndex + 1;
317
318 // Try to decompose it into smaller constants.
319 if (!split(Index: LastAtOrBeforePosIndex, Hint: Pos))
320 return std::nullopt;
321 }
322}
323
324/// Split the constant at index Index, if possible. Return true if we did.
325/// Hint indicates the location at which we'd like to split, but may be
326/// ignored.
327bool ConstantAggregateBuilder::split(size_t Index, CharUnits Hint) {
328 NaturalLayout = false;
329 llvm::Constant *C = Elems[Index];
330 CharUnits Offset = Offsets[Index];
331
332 if (auto *CA = dyn_cast<llvm::ConstantAggregate>(Val: C)) {
333 // Expand the sequence into its contained elements.
334 // FIXME: This assumes vector elements are byte-sized.
335 replace(C&: Elems, BeginOff: Index, EndOff: Index + 1,
336 Vals: llvm::map_range(C: llvm::seq(Begin: 0u, End: CA->getNumOperands()),
337 F: [&](unsigned Op) { return CA->getOperand(i_nocapture: Op); }));
338 if (isa<llvm::ArrayType>(Val: CA->getType()) ||
339 isa<llvm::VectorType>(Val: CA->getType())) {
340 // Array or vector.
341 llvm::Type *ElemTy =
342 llvm::GetElementPtrInst::getTypeAtIndex(Ty: CA->getType(), Idx: (uint64_t)0);
343 CharUnits ElemSize = getSize(Ty: ElemTy);
344 replace(
345 C&: Offsets, BeginOff: Index, EndOff: Index + 1,
346 Vals: llvm::map_range(C: llvm::seq(Begin: 0u, End: CA->getNumOperands()),
347 F: [&](unsigned Op) { return Offset + Op * ElemSize; }));
348 } else {
349 // Must be a struct.
350 auto *ST = cast<llvm::StructType>(Val: CA->getType());
351 const llvm::StructLayout *Layout =
352 CGM.getDataLayout().getStructLayout(Ty: ST);
353 replace(C&: Offsets, BeginOff: Index, EndOff: Index + 1,
354 Vals: llvm::map_range(
355 C: llvm::seq(Begin: 0u, End: CA->getNumOperands()), F: [&](unsigned Op) {
356 return Offset + CharUnits::fromQuantity(
357 Quantity: Layout->getElementOffset(Idx: Op));
358 }));
359 }
360 return true;
361 }
362
363 if (auto *CDS = dyn_cast<llvm::ConstantDataSequential>(Val: C)) {
364 // Expand the sequence into its contained elements.
365 // FIXME: This assumes vector elements are byte-sized.
366 // FIXME: If possible, split into two ConstantDataSequentials at Hint.
367 CharUnits ElemSize = getSize(Ty: CDS->getElementType());
368 replace(C&: Elems, BeginOff: Index, EndOff: Index + 1,
369 Vals: llvm::map_range(C: llvm::seq(Begin: uint64_t(0u), End: CDS->getNumElements()),
370 F: [&](uint64_t Elem) {
371 return CDS->getElementAsConstant(i: Elem);
372 }));
373 replace(C&: Offsets, BeginOff: Index, EndOff: Index + 1,
374 Vals: llvm::map_range(
375 C: llvm::seq(Begin: uint64_t(0u), End: CDS->getNumElements()),
376 F: [&](uint64_t Elem) { return Offset + Elem * ElemSize; }));
377 return true;
378 }
379
380 if (isa<llvm::ConstantAggregateZero>(Val: C)) {
381 // Split into two zeros at the hinted offset.
382 CharUnits ElemSize = getSize(C);
383 assert(Hint > Offset && Hint < Offset + ElemSize && "nothing to split");
384 replace(C&: Elems, BeginOff: Index, EndOff: Index + 1,
385 Vals: {getZeroes(ZeroSize: Hint - Offset), getZeroes(ZeroSize: Offset + ElemSize - Hint)});
386 replace(C&: Offsets, BeginOff: Index, EndOff: Index + 1, Vals: {Offset, Hint});
387 return true;
388 }
389
390 if (isa<llvm::UndefValue>(Val: C)) {
391 // Drop undef; it doesn't contribute to the final layout.
392 replace(C&: Elems, BeginOff: Index, EndOff: Index + 1, Vals: {});
393 replace(C&: Offsets, BeginOff: Index, EndOff: Index + 1, Vals: {});
394 return true;
395 }
396
397 // FIXME: We could split a ConstantInt if the need ever arose.
398 // We don't need to do this to handle bit-fields because we always eagerly
399 // split them into 1-byte chunks.
400
401 return false;
402}
403
404static llvm::Constant *
405EmitArrayConstant(CodeGenModule &CGM, llvm::ArrayType *DesiredType,
406 llvm::Type *CommonElementType, uint64_t ArrayBound,
407 SmallVectorImpl<llvm::Constant *> &Elements,
408 llvm::Constant *Filler);
409
410llvm::Constant *ConstantAggregateBuilder::buildFrom(
411 CodeGenModule &CGM, ArrayRef<llvm::Constant *> Elems,
412 ArrayRef<CharUnits> Offsets, CharUnits StartOffset, CharUnits Size,
413 bool NaturalLayout, llvm::Type *DesiredTy, bool AllowOversized) {
414 ConstantAggregateBuilderUtils Utils(CGM);
415
416 if (Elems.empty())
417 return llvm::UndefValue::get(T: DesiredTy);
418
419 auto Offset = [&](size_t I) { return Offsets[I] - StartOffset; };
420
421 // If we want an array type, see if all the elements are the same type and
422 // appropriately spaced.
423 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(Val: DesiredTy)) {
424 assert(!AllowOversized && "oversized array emission not supported");
425
426 bool CanEmitArray = true;
427 llvm::Type *CommonType = Elems[0]->getType();
428 llvm::Constant *Filler = llvm::Constant::getNullValue(Ty: CommonType);
429 CharUnits ElemSize = Utils.getSize(Ty: ATy->getElementType());
430 SmallVector<llvm::Constant*, 32> ArrayElements;
431 for (size_t I = 0; I != Elems.size(); ++I) {
432 // Skip zeroes; we'll use a zero value as our array filler.
433 if (Elems[I]->isNullValue())
434 continue;
435
436 // All remaining elements must be the same type.
437 if (Elems[I]->getType() != CommonType ||
438 !Offset(I).isMultipleOf(N: ElemSize)) {
439 CanEmitArray = false;
440 break;
441 }
442 ArrayElements.resize(N: Offset(I) / ElemSize + 1, NV: Filler);
443 ArrayElements.back() = Elems[I];
444 }
445
446 if (CanEmitArray) {
447 return EmitArrayConstant(CGM, DesiredType: ATy, CommonElementType: CommonType, ArrayBound: ATy->getNumElements(),
448 Elements&: ArrayElements, Filler);
449 }
450
451 // Can't emit as an array, carry on to emit as a struct.
452 }
453
454 // The size of the constant we plan to generate. This is usually just
455 // the size of the initialized type, but in AllowOversized mode (i.e.
456 // flexible array init), it can be larger.
457 CharUnits DesiredSize = Utils.getSize(Ty: DesiredTy);
458 if (Size > DesiredSize) {
459 assert(AllowOversized && "Elems are oversized");
460 DesiredSize = Size;
461 }
462
463 // The natural alignment of an unpacked LLVM struct with the given elements.
464 CharUnits Align = CharUnits::One();
465 for (llvm::Constant *C : Elems)
466 Align = std::max(a: Align, b: Utils.getAlignment(C));
467
468 // The natural size of an unpacked LLVM struct with the given elements.
469 CharUnits AlignedSize = Size.alignTo(Align);
470
471 bool Packed = false;
472 ArrayRef<llvm::Constant*> UnpackedElems = Elems;
473 llvm::SmallVector<llvm::Constant*, 32> UnpackedElemStorage;
474 if (DesiredSize < AlignedSize || DesiredSize.alignTo(Align) != DesiredSize) {
475 // The natural layout would be too big; force use of a packed layout.
476 NaturalLayout = false;
477 Packed = true;
478 } else if (DesiredSize > AlignedSize) {
479 // The natural layout would be too small. Add padding to fix it. (This
480 // is ignored if we choose a packed layout.)
481 UnpackedElemStorage.assign(in_start: Elems.begin(), in_end: Elems.end());
482 UnpackedElemStorage.push_back(Elt: Utils.getPadding(PadSize: DesiredSize - Size));
483 UnpackedElems = UnpackedElemStorage;
484 }
485
486 // If we don't have a natural layout, insert padding as necessary.
487 // As we go, double-check to see if we can actually just emit Elems
488 // as a non-packed struct and do so opportunistically if possible.
489 llvm::SmallVector<llvm::Constant*, 32> PackedElems;
490 if (!NaturalLayout) {
491 CharUnits SizeSoFar = CharUnits::Zero();
492 for (size_t I = 0; I != Elems.size(); ++I) {
493 CharUnits Align = Utils.getAlignment(C: Elems[I]);
494 CharUnits NaturalOffset = SizeSoFar.alignTo(Align);
495 CharUnits DesiredOffset = Offset(I);
496 assert(DesiredOffset >= SizeSoFar && "elements out of order");
497
498 if (DesiredOffset != NaturalOffset)
499 Packed = true;
500 if (DesiredOffset != SizeSoFar)
501 PackedElems.push_back(Elt: Utils.getPadding(PadSize: DesiredOffset - SizeSoFar));
502 PackedElems.push_back(Elt: Elems[I]);
503 SizeSoFar = DesiredOffset + Utils.getSize(C: Elems[I]);
504 }
505 // If we're using the packed layout, pad it out to the desired size if
506 // necessary.
507 if (Packed) {
508 assert(SizeSoFar <= DesiredSize &&
509 "requested size is too small for contents");
510 if (SizeSoFar < DesiredSize)
511 PackedElems.push_back(Elt: Utils.getPadding(PadSize: DesiredSize - SizeSoFar));
512 }
513 }
514
515 llvm::StructType *STy = llvm::ConstantStruct::getTypeForElements(
516 Ctx&: CGM.getLLVMContext(), V: Packed ? PackedElems : UnpackedElems, Packed);
517
518 // Pick the type to use. If the type is layout identical to the desired
519 // type then use it, otherwise use whatever the builder produced for us.
520 if (llvm::StructType *DesiredSTy = dyn_cast<llvm::StructType>(Val: DesiredTy)) {
521 if (DesiredSTy->isLayoutIdentical(Other: STy))
522 STy = DesiredSTy;
523 }
524
525 return llvm::ConstantStruct::get(T: STy, V: Packed ? PackedElems : UnpackedElems);
526}
527
528void ConstantAggregateBuilder::condense(CharUnits Offset,
529 llvm::Type *DesiredTy) {
530 CharUnits Size = getSize(Ty: DesiredTy);
531
532 std::optional<size_t> FirstElemToReplace = splitAt(Pos: Offset);
533 if (!FirstElemToReplace)
534 return;
535 size_t First = *FirstElemToReplace;
536
537 std::optional<size_t> LastElemToReplace = splitAt(Pos: Offset + Size);
538 if (!LastElemToReplace)
539 return;
540 size_t Last = *LastElemToReplace;
541
542 size_t Length = Last - First;
543 if (Length == 0)
544 return;
545
546 if (Length == 1 && Offsets[First] == Offset &&
547 getSize(C: Elems[First]) == Size) {
548 // Re-wrap single element structs if necessary. Otherwise, leave any single
549 // element constant of the right size alone even if it has the wrong type.
550 auto *STy = dyn_cast<llvm::StructType>(Val: DesiredTy);
551 if (STy && STy->getNumElements() == 1 &&
552 STy->getElementType(N: 0) == Elems[First]->getType())
553 Elems[First] = llvm::ConstantStruct::get(T: STy, Vs: Elems[First]);
554 return;
555 }
556
557 llvm::Constant *Replacement = buildFrom(
558 CGM, Elems: ArrayRef(Elems).slice(N: First, M: Length),
559 Offsets: ArrayRef(Offsets).slice(N: First, M: Length), StartOffset: Offset, Size: getSize(Ty: DesiredTy),
560 /*known to have natural layout=*/NaturalLayout: false, DesiredTy, AllowOversized: false);
561 replace(C&: Elems, BeginOff: First, EndOff: Last, Vals: {Replacement});
562 replace(C&: Offsets, BeginOff: First, EndOff: Last, Vals: {Offset});
563}
564
565//===----------------------------------------------------------------------===//
566// ConstStructBuilder
567//===----------------------------------------------------------------------===//
568
569class ConstStructBuilder {
570 CodeGenModule &CGM;
571 ConstantEmitter &Emitter;
572 ConstantAggregateBuilder &Builder;
573 CharUnits StartOffset;
574
575public:
576 static llvm::Constant *BuildStruct(ConstantEmitter &Emitter,
577 const InitListExpr *ILE,
578 QualType StructTy);
579 static llvm::Constant *BuildStruct(ConstantEmitter &Emitter,
580 const APValue &Value, QualType ValTy);
581 static bool UpdateStruct(ConstantEmitter &Emitter,
582 ConstantAggregateBuilder &Const, CharUnits Offset,
583 const InitListExpr *Updater);
584
585private:
586 ConstStructBuilder(ConstantEmitter &Emitter,
587 ConstantAggregateBuilder &Builder, CharUnits StartOffset)
588 : CGM(Emitter.CGM), Emitter(Emitter), Builder(Builder),
589 StartOffset(StartOffset) {}
590
591 bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
592 llvm::Constant *InitExpr, bool AllowOverwrite = false);
593
594 bool AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst,
595 bool AllowOverwrite = false);
596
597 bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
598 llvm::Constant *InitExpr, bool AllowOverwrite = false);
599
600 bool Build(const InitListExpr *ILE, bool AllowOverwrite);
601 bool Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase,
602 const CXXRecordDecl *VTableClass, CharUnits BaseOffset);
603 bool DoZeroInitPadding(const ASTRecordLayout &Layout, unsigned FieldNo,
604 const FieldDecl &Field, bool AllowOverwrite,
605 CharUnits &SizeSoFar, bool &ZeroFieldSize);
606 bool DoZeroInitPadding(const ASTRecordLayout &Layout, bool AllowOverwrite,
607 CharUnits SizeSoFar);
608 llvm::Constant *Finalize(QualType Ty);
609};
610
611bool ConstStructBuilder::AppendField(
612 const FieldDecl *Field, uint64_t FieldOffset, llvm::Constant *InitCst,
613 bool AllowOverwrite) {
614 const ASTContext &Context = CGM.getContext();
615
616 CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(BitSize: FieldOffset);
617
618 return AppendBytes(FieldOffsetInChars, InitCst, AllowOverwrite);
619}
620
621bool ConstStructBuilder::AppendBytes(CharUnits FieldOffsetInChars,
622 llvm::Constant *InitCst,
623 bool AllowOverwrite) {
624 return Builder.add(C: InitCst, Offset: StartOffset + FieldOffsetInChars, AllowOverwrite);
625}
626
627bool ConstStructBuilder::AppendBitField(const FieldDecl *Field,
628 uint64_t FieldOffset, llvm::Constant *C,
629 bool AllowOverwrite) {
630
631 llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(Val: C);
632 if (!CI) {
633 // Constants for long _BitInt types are sometimes split into individual
634 // bytes. Try to fold these back into an integer constant. If that doesn't
635 // work out, then we are trying to initialize a bitfield with a non-trivial
636 // constant, this must require run-time code.
637 llvm::Type *LoadType =
638 CGM.getTypes().convertTypeForLoadStore(T: Field->getType(), LLVMTy: C->getType());
639 llvm::Constant *FoldedConstant = llvm::ConstantFoldLoadFromConst(
640 C, Ty: LoadType, Offset: llvm::APInt::getZero(numBits: 32), DL: CGM.getDataLayout());
641 CI = dyn_cast_if_present<llvm::ConstantInt>(Val: FoldedConstant);
642 if (!CI)
643 return false;
644 }
645
646 const CGRecordLayout &RL =
647 CGM.getTypes().getCGRecordLayout(Field->getParent());
648 const CGBitFieldInfo &Info = RL.getBitFieldInfo(FD: Field);
649 llvm::APInt FieldValue = CI->getValue();
650
651 // Promote the size of FieldValue if necessary
652 // FIXME: This should never occur, but currently it can because initializer
653 // constants are cast to bool, and because clang is not enforcing bitfield
654 // width limits.
655 if (Info.Size > FieldValue.getBitWidth())
656 FieldValue = FieldValue.zext(width: Info.Size);
657
658 // Truncate the size of FieldValue to the bit field size.
659 if (Info.Size < FieldValue.getBitWidth())
660 FieldValue = FieldValue.trunc(width: Info.Size);
661
662 return Builder.addBits(Bits: FieldValue,
663 OffsetInBits: CGM.getContext().toBits(CharSize: StartOffset) + FieldOffset,
664 AllowOverwrite);
665}
666
667static bool EmitDesignatedInitUpdater(ConstantEmitter &Emitter,
668 ConstantAggregateBuilder &Const,
669 CharUnits Offset, QualType Type,
670 const InitListExpr *Updater) {
671 if (Type->isRecordType())
672 return ConstStructBuilder::UpdateStruct(Emitter, Const, Offset, Updater);
673
674 auto CAT = Emitter.CGM.getContext().getAsConstantArrayType(T: Type);
675 if (!CAT)
676 return false;
677 QualType ElemType = CAT->getElementType();
678 CharUnits ElemSize = Emitter.CGM.getContext().getTypeSizeInChars(T: ElemType);
679 llvm::Type *ElemTy = Emitter.CGM.getTypes().ConvertTypeForMem(T: ElemType);
680
681 llvm::Constant *FillC = nullptr;
682 if (const Expr *Filler = Updater->getArrayFiller()) {
683 if (!isa<NoInitExpr>(Val: Filler)) {
684 FillC = Emitter.tryEmitAbstractForMemory(E: Filler, T: ElemType);
685 if (!FillC)
686 return false;
687 }
688 }
689
690 unsigned NumElementsToUpdate =
691 FillC ? CAT->getZExtSize() : Updater->getNumInits();
692 for (unsigned I = 0; I != NumElementsToUpdate; ++I, Offset += ElemSize) {
693 const Expr *Init = nullptr;
694 if (I < Updater->getNumInits())
695 Init = Updater->getInit(Init: I);
696
697 if (!Init && FillC) {
698 if (!Const.add(C: FillC, Offset, AllowOverwrite: true))
699 return false;
700 } else if (!Init || isa<NoInitExpr>(Val: Init)) {
701 continue;
702 } else if (const auto *ChildILE = dyn_cast<InitListExpr>(Val: Init)) {
703 if (!EmitDesignatedInitUpdater(Emitter, Const, Offset, Type: ElemType,
704 Updater: ChildILE))
705 return false;
706 // Attempt to reduce the array element to a single constant if necessary.
707 Const.condense(Offset, DesiredTy: ElemTy);
708 } else {
709 llvm::Constant *Val = Emitter.tryEmitPrivateForMemory(E: Init, T: ElemType);
710 if (!Const.add(C: Val, Offset, AllowOverwrite: true))
711 return false;
712 }
713 }
714
715 return true;
716}
717
718bool ConstStructBuilder::Build(const InitListExpr *ILE, bool AllowOverwrite) {
719 auto *RD = ILE->getType()->castAsRecordDecl();
720 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(D: RD);
721
722 unsigned FieldNo = -1;
723 unsigned ElementNo = 0;
724
725 // Bail out if we have base classes. We could support these, but they only
726 // arise in C++1z where we will have already constant folded most interesting
727 // cases. FIXME: There are still a few more cases we can handle this way.
728 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD))
729 if (CXXRD->getNumBases())
730 return false;
731
732 const bool ZeroInitPadding = CGM.shouldZeroInitPadding();
733 bool ZeroFieldSize = false;
734 CharUnits SizeSoFar = CharUnits::Zero();
735
736 for (FieldDecl *Field : RD->fields()) {
737 ++FieldNo;
738
739 // If this is a union, skip all the fields that aren't being initialized.
740 if (RD->isUnion() &&
741 !declaresSameEntity(D1: ILE->getInitializedFieldInUnion(), D2: Field))
742 continue;
743
744 // Don't emit anonymous bitfields.
745 if (Field->isUnnamedBitField())
746 continue;
747
748 // Get the initializer. A struct can include fields without initializers,
749 // we just use explicit null values for them.
750 const Expr *Init = nullptr;
751 if (ElementNo < ILE->getNumInits())
752 Init = ILE->getInit(Init: ElementNo++);
753 if (isa_and_nonnull<NoInitExpr>(Val: Init)) {
754 if (ZeroInitPadding &&
755 !DoZeroInitPadding(Layout, FieldNo, Field: *Field, AllowOverwrite, SizeSoFar,
756 ZeroFieldSize))
757 return false;
758 continue;
759 }
760
761 // Zero-sized fields are not emitted, but their initializers may still
762 // prevent emission of this struct as a constant.
763 if (isEmptyFieldForLayout(Context: CGM.getContext(), FD: Field)) {
764 if (Init && Init->HasSideEffects(Ctx: CGM.getContext()))
765 return false;
766 continue;
767 }
768
769 if (ZeroInitPadding &&
770 !DoZeroInitPadding(Layout, FieldNo, Field: *Field, AllowOverwrite, SizeSoFar,
771 ZeroFieldSize))
772 return false;
773
774 // When emitting a DesignatedInitUpdateExpr, a nested InitListExpr
775 // represents additional overwriting of our current constant value, and not
776 // a new constant to emit independently.
777 if (AllowOverwrite &&
778 (Field->getType()->isArrayType() || Field->getType()->isRecordType())) {
779 if (auto *SubILE = dyn_cast<InitListExpr>(Val: Init)) {
780 CharUnits Offset = CGM.getContext().toCharUnitsFromBits(
781 BitSize: Layout.getFieldOffset(FieldNo));
782 if (!EmitDesignatedInitUpdater(Emitter, Const&: Builder, Offset: StartOffset + Offset,
783 Type: Field->getType(), Updater: SubILE))
784 return false;
785 // If we split apart the field's value, try to collapse it down to a
786 // single value now.
787 Builder.condense(Offset: StartOffset + Offset,
788 DesiredTy: CGM.getTypes().ConvertTypeForMem(T: Field->getType()));
789 continue;
790 }
791 }
792
793 llvm::Constant *EltInit =
794 Init ? Emitter.tryEmitPrivateForMemory(E: Init, T: Field->getType())
795 : Emitter.emitNullForMemory(T: Field->getType());
796 if (!EltInit)
797 return false;
798
799 if (ZeroInitPadding && ZeroFieldSize)
800 SizeSoFar += CharUnits::fromQuantity(
801 Quantity: CGM.getDataLayout().getTypeAllocSize(Ty: EltInit->getType()));
802
803 if (!Field->isBitField()) {
804 // Handle non-bitfield members.
805 if (!AppendField(Field, FieldOffset: Layout.getFieldOffset(FieldNo), InitCst: EltInit,
806 AllowOverwrite))
807 return false;
808 // After emitting a non-empty field with [[no_unique_address]], we may
809 // need to overwrite its tail padding.
810 if (Field->hasAttr<NoUniqueAddressAttr>())
811 AllowOverwrite = true;
812 } else {
813 // Otherwise we have a bitfield.
814 if (!AppendBitField(Field, FieldOffset: Layout.getFieldOffset(FieldNo), C: EltInit,
815 AllowOverwrite))
816 return false;
817 }
818 }
819
820 if (ZeroInitPadding && !DoZeroInitPadding(Layout, AllowOverwrite, SizeSoFar))
821 return false;
822
823 return true;
824}
825
826namespace {
827struct BaseInfo {
828 BaseInfo(const CXXRecordDecl *Decl, CharUnits Offset, unsigned Index)
829 : Decl(Decl), Offset(Offset), Index(Index) {
830 }
831
832 const CXXRecordDecl *Decl;
833 CharUnits Offset;
834 unsigned Index;
835
836 bool operator<(const BaseInfo &O) const { return Offset < O.Offset; }
837};
838}
839
840bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
841 bool IsPrimaryBase,
842 const CXXRecordDecl *VTableClass,
843 CharUnits Offset) {
844 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(D: RD);
845
846 if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(Val: RD)) {
847 // Add a vtable pointer, if we need one and it hasn't already been added.
848 if (Layout.hasOwnVFPtr()) {
849 llvm::Constant *VTableAddressPoint =
850 CGM.getCXXABI().getVTableAddressPoint(Base: BaseSubobject(CD, Offset),
851 VTableClass);
852 if (auto Authentication = CGM.getVTablePointerAuthentication(thisClass: CD)) {
853 VTableAddressPoint = Emitter.tryEmitConstantSignedPointer(
854 Ptr: VTableAddressPoint, Auth: *Authentication);
855 if (!VTableAddressPoint)
856 return false;
857 }
858 if (!AppendBytes(FieldOffsetInChars: Offset, InitCst: VTableAddressPoint))
859 return false;
860 }
861
862 // Accumulate and sort bases, in order to visit them in address order, which
863 // may not be the same as declaration order.
864 SmallVector<BaseInfo, 8> Bases;
865 Bases.reserve(N: CD->getNumBases());
866 unsigned BaseNo = 0;
867 for (CXXRecordDecl::base_class_const_iterator Base = CD->bases_begin(),
868 BaseEnd = CD->bases_end(); Base != BaseEnd; ++Base, ++BaseNo) {
869 assert(!Base->isVirtual() && "should not have virtual bases here");
870 const CXXRecordDecl *BD = Base->getType()->getAsCXXRecordDecl();
871 CharUnits BaseOffset = Layout.getBaseClassOffset(Base: BD);
872 Bases.push_back(Elt: BaseInfo(BD, BaseOffset, BaseNo));
873 }
874 llvm::stable_sort(Range&: Bases);
875
876 for (const BaseInfo &Base : Bases) {
877 bool IsPrimaryBase = Layout.getPrimaryBase() == Base.Decl;
878 if (!Build(Val: Val.getStructBase(i: Base.Index), RD: Base.Decl, IsPrimaryBase,
879 VTableClass, Offset: Offset + Base.Offset))
880 return false;
881 }
882 }
883
884 unsigned FieldNo = 0;
885 uint64_t OffsetBits = CGM.getContext().toBits(CharSize: Offset);
886 const bool ZeroInitPadding = CGM.shouldZeroInitPadding();
887 bool ZeroFieldSize = false;
888 CharUnits SizeSoFar = CharUnits::Zero();
889
890 bool AllowOverwrite = false;
891 for (RecordDecl::field_iterator Field = RD->field_begin(),
892 FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
893 // If this is a union, skip all the fields that aren't being initialized.
894 if (RD->isUnion() && !declaresSameEntity(D1: Val.getUnionField(), D2: *Field))
895 continue;
896
897 // Don't emit anonymous bitfields or zero-sized fields.
898 if (Field->isUnnamedBitField() ||
899 isEmptyFieldForLayout(Context: CGM.getContext(), FD: *Field))
900 continue;
901
902 // Emit the value of the initializer.
903 const APValue &FieldValue =
904 RD->isUnion() ? Val.getUnionValue() : Val.getStructField(i: FieldNo);
905 llvm::Constant *EltInit =
906 Emitter.tryEmitPrivateForMemory(value: FieldValue, T: Field->getType());
907 if (!EltInit)
908 return false;
909
910 if (CGM.getContext().isPFPField(Field: *Field)) {
911 llvm::ConstantInt *Disc;
912 llvm::Constant *AddrDisc;
913 if (CGM.getContext().arePFPFieldsTriviallyCopyable(RD)) {
914 uint64_t FieldSignature =
915 llvm::getPointerAuthStableSipHash(S: CGM.getPFPFieldName(FD: *Field));
916 Disc = llvm::ConstantInt::get(Ty: CGM.Int64Ty, V: FieldSignature);
917 AddrDisc = llvm::ConstantPointerNull::get(T: CGM.VoidPtrTy);
918 } else if (Emitter.isAbstract()) {
919 // isAbstract means that we don't know the global's address. Since we
920 // can only form a pointer without knowing the address if the fields are
921 // trivially copyable, we need to return false otherwise.
922 return false;
923 } else {
924 Disc = llvm::ConstantInt::get(Ty: CGM.Int64Ty,
925 V: -(Layout.getFieldOffset(FieldNo) / 8));
926 AddrDisc = Emitter.getCurrentAddrPrivate();
927 }
928 EltInit = llvm::ConstantPtrAuth::get(
929 Ptr: EltInit, Key: llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: 2), Disc, AddrDisc,
930 DeactivationSymbol: CGM.getPFPDeactivationSymbol(FD: *Field));
931 if (!CGM.getContext().arePFPFieldsTriviallyCopyable(RD))
932 Emitter.registerCurrentAddrPrivate(signal: EltInit,
933 placeholder: cast<llvm::GlobalValue>(Val: AddrDisc));
934 }
935
936 if (ZeroInitPadding) {
937 if (!DoZeroInitPadding(Layout, FieldNo, Field: **Field, AllowOverwrite,
938 SizeSoFar, ZeroFieldSize))
939 return false;
940 if (ZeroFieldSize)
941 SizeSoFar += CharUnits::fromQuantity(
942 Quantity: CGM.getDataLayout().getTypeAllocSize(Ty: EltInit->getType()));
943 }
944
945 if (!Field->isBitField()) {
946 // Handle non-bitfield members.
947 if (!AppendField(Field: *Field, FieldOffset: Layout.getFieldOffset(FieldNo) + OffsetBits,
948 InitCst: EltInit, AllowOverwrite))
949 return false;
950 // After emitting a non-empty field with [[no_unique_address]], we may
951 // need to overwrite its tail padding.
952 if (Field->hasAttr<NoUniqueAddressAttr>())
953 AllowOverwrite = true;
954 } else {
955 // Otherwise we have a bitfield.
956 if (!AppendBitField(Field: *Field, FieldOffset: Layout.getFieldOffset(FieldNo) + OffsetBits,
957 C: EltInit, AllowOverwrite))
958 return false;
959 }
960 }
961 if (ZeroInitPadding && !DoZeroInitPadding(Layout, AllowOverwrite, SizeSoFar))
962 return false;
963
964 return true;
965}
966
967bool ConstStructBuilder::DoZeroInitPadding(
968 const ASTRecordLayout &Layout, unsigned FieldNo, const FieldDecl &Field,
969 bool AllowOverwrite, CharUnits &SizeSoFar, bool &ZeroFieldSize) {
970 uint64_t StartBitOffset = Layout.getFieldOffset(FieldNo);
971 CharUnits StartOffset = CGM.getContext().toCharUnitsFromBits(BitSize: StartBitOffset);
972 if (SizeSoFar < StartOffset)
973 if (!AppendBytes(FieldOffsetInChars: SizeSoFar, InitCst: getPadding(CGM, PadSize: StartOffset - SizeSoFar),
974 AllowOverwrite))
975 return false;
976
977 if (!Field.isBitField()) {
978 CharUnits FieldSize = CGM.getContext().getTypeSizeInChars(T: Field.getType());
979 SizeSoFar = StartOffset + FieldSize;
980 ZeroFieldSize = FieldSize.isZero();
981 } else {
982 const CGRecordLayout &RL =
983 CGM.getTypes().getCGRecordLayout(Field.getParent());
984 const CGBitFieldInfo &Info = RL.getBitFieldInfo(FD: &Field);
985 uint64_t EndBitOffset = StartBitOffset + Info.Size;
986 SizeSoFar = CGM.getContext().toCharUnitsFromBits(BitSize: EndBitOffset);
987 if (EndBitOffset % CGM.getContext().getCharWidth() != 0) {
988 SizeSoFar++;
989 }
990 ZeroFieldSize = Info.Size == 0;
991 }
992 return true;
993}
994
995bool ConstStructBuilder::DoZeroInitPadding(const ASTRecordLayout &Layout,
996 bool AllowOverwrite,
997 CharUnits SizeSoFar) {
998 CharUnits TotalSize = Layout.getSize();
999 if (SizeSoFar < TotalSize)
1000 if (!AppendBytes(FieldOffsetInChars: SizeSoFar, InitCst: getPadding(CGM, PadSize: TotalSize - SizeSoFar),
1001 AllowOverwrite))
1002 return false;
1003 SizeSoFar = TotalSize;
1004 return true;
1005}
1006
1007llvm::Constant *ConstStructBuilder::Finalize(QualType Type) {
1008 Type = Type.getNonReferenceType();
1009 auto *RD = Type->castAsRecordDecl();
1010 llvm::Type *ValTy = CGM.getTypes().ConvertType(T: Type);
1011 return Builder.build(DesiredTy: ValTy, AllowOversized: RD->hasFlexibleArrayMember());
1012}
1013
1014llvm::Constant *ConstStructBuilder::BuildStruct(ConstantEmitter &Emitter,
1015 const InitListExpr *ILE,
1016 QualType ValTy) {
1017 ConstantAggregateBuilder Const(Emitter.CGM);
1018 ConstStructBuilder Builder(Emitter, Const, CharUnits::Zero());
1019
1020 if (!Builder.Build(ILE, /*AllowOverwrite*/false))
1021 return nullptr;
1022
1023 return Builder.Finalize(Type: ValTy);
1024}
1025
1026llvm::Constant *ConstStructBuilder::BuildStruct(ConstantEmitter &Emitter,
1027 const APValue &Val,
1028 QualType ValTy) {
1029 ConstantAggregateBuilder Const(Emitter.CGM);
1030 ConstStructBuilder Builder(Emitter, Const, CharUnits::Zero());
1031
1032 const auto *RD = ValTy->castAsRecordDecl();
1033 const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(Val: RD);
1034 if (!Builder.Build(Val, RD, IsPrimaryBase: false, VTableClass: CD, Offset: CharUnits::Zero()))
1035 return nullptr;
1036
1037 return Builder.Finalize(Type: ValTy);
1038}
1039
1040bool ConstStructBuilder::UpdateStruct(ConstantEmitter &Emitter,
1041 ConstantAggregateBuilder &Const,
1042 CharUnits Offset,
1043 const InitListExpr *Updater) {
1044 return ConstStructBuilder(Emitter, Const, Offset)
1045 .Build(ILE: Updater, /*AllowOverwrite*/ true);
1046}
1047
1048//===----------------------------------------------------------------------===//
1049// ConstExprEmitter
1050//===----------------------------------------------------------------------===//
1051
1052static ConstantAddress
1053tryEmitGlobalCompoundLiteral(ConstantEmitter &emitter,
1054 const CompoundLiteralExpr *E) {
1055 CodeGenModule &CGM = emitter.CGM;
1056 CharUnits Align = CGM.getContext().getTypeAlignInChars(T: E->getType());
1057 if (llvm::GlobalVariable *Addr =
1058 CGM.getAddrOfConstantCompoundLiteralIfEmitted(E))
1059 return ConstantAddress(Addr, Addr->getValueType(), Align);
1060
1061 LangAS addressSpace = E->getType().getAddressSpace();
1062 llvm::Constant *C = emitter.tryEmitForInitializer(E: E->getInitializer(),
1063 destAddrSpace: addressSpace, destType: E->getType());
1064 if (!C) {
1065 assert(!E->isFileScope() &&
1066 "file-scope compound literal did not have constant initializer!");
1067 return ConstantAddress::invalid();
1068 }
1069
1070 auto GV = new llvm::GlobalVariable(
1071 CGM.getModule(), C->getType(),
1072 E->getType().isConstantStorage(Ctx: CGM.getContext(), ExcludeCtor: true, ExcludeDtor: false),
1073 llvm::GlobalValue::InternalLinkage, C, ".compoundliteral", nullptr,
1074 llvm::GlobalVariable::NotThreadLocal,
1075 CGM.getContext().getTargetAddressSpace(AS: addressSpace));
1076 emitter.finalize(global: GV);
1077 GV->setAlignment(Align.getAsAlign());
1078 CGM.setAddrOfConstantCompoundLiteral(CLE: E, GV);
1079 return ConstantAddress(GV, GV->getValueType(), Align);
1080}
1081
1082static llvm::Constant *
1083EmitArrayConstant(CodeGenModule &CGM, llvm::ArrayType *DesiredType,
1084 llvm::Type *CommonElementType, uint64_t ArrayBound,
1085 SmallVectorImpl<llvm::Constant *> &Elements,
1086 llvm::Constant *Filler) {
1087 // Figure out how long the initial prefix of non-zero elements is.
1088 uint64_t NonzeroLength = ArrayBound;
1089 if (Elements.size() < NonzeroLength && Filler->isNullValue())
1090 NonzeroLength = Elements.size();
1091 if (NonzeroLength == Elements.size()) {
1092 while (NonzeroLength > 0 && Elements[NonzeroLength - 1]->isNullValue())
1093 --NonzeroLength;
1094 }
1095
1096 if (NonzeroLength == 0)
1097 return llvm::ConstantAggregateZero::get(Ty: DesiredType);
1098
1099 // Add a zeroinitializer array filler if we have lots of trailing zeroes.
1100 uint64_t TrailingZeroes = ArrayBound - NonzeroLength;
1101 if (TrailingZeroes >= 8) {
1102 assert(Elements.size() >= NonzeroLength &&
1103 "missing initializer for non-zero element");
1104
1105 // If all the elements had the same type up to the trailing zeroes, emit a
1106 // struct of two arrays (the nonzero data and the zeroinitializer).
1107 if (CommonElementType && NonzeroLength >= 8) {
1108 llvm::Constant *Initial = llvm::ConstantArray::get(
1109 T: llvm::ArrayType::get(ElementType: CommonElementType, NumElements: NonzeroLength),
1110 V: ArrayRef(Elements).take_front(N: NonzeroLength));
1111 Elements.resize(N: 2);
1112 Elements[0] = Initial;
1113 } else {
1114 Elements.resize(N: NonzeroLength + 1);
1115 }
1116
1117 auto *FillerType =
1118 CommonElementType ? CommonElementType : DesiredType->getElementType();
1119 FillerType = llvm::ArrayType::get(ElementType: FillerType, NumElements: TrailingZeroes);
1120 Elements.back() = llvm::ConstantAggregateZero::get(Ty: FillerType);
1121 CommonElementType = nullptr;
1122 } else if (Elements.size() != ArrayBound) {
1123 // Otherwise pad to the right size with the filler if necessary.
1124 Elements.resize(N: ArrayBound, NV: Filler);
1125 if (Filler->getType() != CommonElementType)
1126 CommonElementType = nullptr;
1127 }
1128
1129 // If all elements have the same type, just emit an array constant.
1130 if (CommonElementType)
1131 return llvm::ConstantArray::get(
1132 T: llvm::ArrayType::get(ElementType: CommonElementType, NumElements: ArrayBound), V: Elements);
1133
1134 // We have mixed types. Use a packed struct.
1135 llvm::SmallVector<llvm::Type *, 16> Types;
1136 Types.reserve(N: Elements.size());
1137 for (llvm::Constant *Elt : Elements)
1138 Types.push_back(Elt: Elt->getType());
1139 llvm::StructType *SType =
1140 llvm::StructType::get(Context&: CGM.getLLVMContext(), Elements: Types, isPacked: true);
1141 return llvm::ConstantStruct::get(T: SType, V: Elements);
1142}
1143
1144// This class only needs to handle arrays, structs and unions. Outside C++11
1145// mode, we don't currently constant fold those types. All other types are
1146// handled by constant folding.
1147//
1148// Constant folding is currently missing support for a few features supported
1149// here: CK_ToUnion, CK_ReinterpretMemberPointer, and DesignatedInitUpdateExpr.
1150class ConstExprEmitter
1151 : public ConstStmtVisitor<ConstExprEmitter, llvm::Constant *, QualType> {
1152 CodeGenModule &CGM;
1153 ConstantEmitter &Emitter;
1154 llvm::LLVMContext &VMContext;
1155public:
1156 ConstExprEmitter(ConstantEmitter &emitter)
1157 : CGM(emitter.CGM), Emitter(emitter), VMContext(CGM.getLLVMContext()) {
1158 }
1159
1160 //===--------------------------------------------------------------------===//
1161 // Visitor Methods
1162 //===--------------------------------------------------------------------===//
1163
1164 llvm::Constant *VisitStmt(const Stmt *S, QualType T) { return nullptr; }
1165
1166 llvm::Constant *VisitConstantExpr(const ConstantExpr *CE, QualType T) {
1167 if (llvm::Constant *Result = Emitter.tryEmitConstantExpr(CE))
1168 return Result;
1169 return Visit(S: CE->getSubExpr(), P: T);
1170 }
1171
1172 llvm::Constant *VisitParenExpr(const ParenExpr *PE, QualType T) {
1173 return Visit(S: PE->getSubExpr(), P: T);
1174 }
1175
1176 llvm::Constant *
1177 VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *PE,
1178 QualType T) {
1179 return Visit(S: PE->getReplacement(), P: T);
1180 }
1181
1182 llvm::Constant *VisitGenericSelectionExpr(const GenericSelectionExpr *GE,
1183 QualType T) {
1184 return Visit(S: GE->getResultExpr(), P: T);
1185 }
1186
1187 llvm::Constant *VisitChooseExpr(const ChooseExpr *CE, QualType T) {
1188 return Visit(S: CE->getChosenSubExpr(), P: T);
1189 }
1190
1191 llvm::Constant *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E,
1192 QualType T) {
1193 return Visit(S: E->getInitializer(), P: T);
1194 }
1195
1196 llvm::Constant *ProduceIntToIntCast(const Expr *E, QualType DestType) {
1197 QualType FromType = E->getType();
1198 // See also HandleIntToIntCast in ExprConstant.cpp
1199 if (FromType->isIntegerType())
1200 if (llvm::Constant *C = Visit(S: E, P: FromType))
1201 if (auto *CI = dyn_cast<llvm::ConstantInt>(Val: C)) {
1202 unsigned SrcWidth = CGM.getContext().getIntWidth(T: FromType);
1203 unsigned DstWidth = CGM.getContext().getIntWidth(T: DestType);
1204 if (DstWidth == SrcWidth)
1205 return CI;
1206 llvm::APInt A = FromType->isSignedIntegerType()
1207 ? CI->getValue().sextOrTrunc(width: DstWidth)
1208 : CI->getValue().zextOrTrunc(width: DstWidth);
1209 return llvm::ConstantInt::get(Context&: CGM.getLLVMContext(), V: A);
1210 }
1211 return nullptr;
1212 }
1213
1214 llvm::Constant *VisitCastExpr(const CastExpr *E, QualType destType) {
1215 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(Val: E))
1216 CGM.EmitExplicitCastExprType(E: ECE, CGF: Emitter.CGF);
1217 const Expr *subExpr = E->getSubExpr();
1218
1219 switch (E->getCastKind()) {
1220 case CK_ToUnion: {
1221 // GCC cast to union extension
1222 assert(E->getType()->isUnionType() &&
1223 "Destination type is not union type!");
1224
1225 auto field = E->getTargetUnionField();
1226
1227 auto C = Emitter.tryEmitPrivateForMemory(E: subExpr, T: field->getType());
1228 if (!C) return nullptr;
1229
1230 auto destTy = ConvertType(T: destType);
1231 if (C->getType() == destTy) return C;
1232
1233 // Build a struct with the union sub-element as the first member,
1234 // and padded to the appropriate size.
1235 SmallVector<llvm::Constant*, 2> Elts;
1236 SmallVector<llvm::Type*, 2> Types;
1237 Elts.push_back(Elt: C);
1238 Types.push_back(Elt: C->getType());
1239 unsigned CurSize = CGM.getDataLayout().getTypeAllocSize(Ty: C->getType());
1240 unsigned TotalSize = CGM.getDataLayout().getTypeAllocSize(Ty: destTy);
1241
1242 assert(CurSize <= TotalSize && "Union size mismatch!");
1243 if (unsigned NumPadBytes = TotalSize - CurSize) {
1244 llvm::Constant *Padding =
1245 getPadding(CGM, PadSize: CharUnits::fromQuantity(Quantity: NumPadBytes));
1246 Elts.push_back(Elt: Padding);
1247 Types.push_back(Elt: Padding->getType());
1248 }
1249
1250 llvm::StructType *STy = llvm::StructType::get(Context&: VMContext, Elements: Types, isPacked: false);
1251 return llvm::ConstantStruct::get(T: STy, V: Elts);
1252 }
1253
1254 case CK_AddressSpaceConversion: {
1255 llvm::Constant *C = Emitter.tryEmitPrivate(E: subExpr, T: subExpr->getType());
1256 if (!C)
1257 return nullptr;
1258 llvm::Type *destTy = ConvertType(T: E->getType());
1259 return CGM.performAddrSpaceCast(Src: C, DestTy: destTy);
1260 }
1261
1262 case CK_LValueToRValue: {
1263 // We don't really support doing lvalue-to-rvalue conversions here; any
1264 // interesting conversions should be done in Evaluate(). But as a
1265 // special case, allow compound literals to support the gcc extension
1266 // allowing "struct x {int x;} x = (struct x) {};".
1267 if (const auto *E =
1268 dyn_cast<CompoundLiteralExpr>(Val: subExpr->IgnoreParens()))
1269 return Visit(S: E->getInitializer(), P: destType);
1270 return nullptr;
1271 }
1272
1273 case CK_AtomicToNonAtomic:
1274 case CK_NonAtomicToAtomic:
1275 case CK_NoOp:
1276 case CK_ConstructorConversion:
1277 return Visit(S: subExpr, P: destType);
1278
1279 case CK_ArrayToPointerDecay:
1280 if (const auto *S = dyn_cast<StringLiteral>(Val: subExpr))
1281 return CGM.GetAddrOfConstantStringFromLiteral(S).getPointer();
1282 return nullptr;
1283 case CK_NullToPointer:
1284 if (Visit(S: subExpr, P: destType))
1285 return CGM.EmitNullConstant(T: destType);
1286 return nullptr;
1287
1288 case CK_IntToOCLSampler:
1289 llvm_unreachable("global sampler variables are not generated");
1290
1291 case CK_IntegralCast:
1292 return ProduceIntToIntCast(E: subExpr, DestType: destType);
1293
1294 case CK_Dependent: llvm_unreachable("saw dependent cast!");
1295
1296 case CK_BuiltinFnToFnPtr:
1297 llvm_unreachable("builtin functions are handled elsewhere");
1298
1299 case CK_ReinterpretMemberPointer:
1300 case CK_DerivedToBaseMemberPointer:
1301 case CK_BaseToDerivedMemberPointer: {
1302 auto C = Emitter.tryEmitPrivate(E: subExpr, T: subExpr->getType());
1303 if (!C) return nullptr;
1304 return CGM.getCXXABI().EmitMemberPointerConversion(E, Src: C);
1305 }
1306
1307 // These will never be supported.
1308 case CK_ObjCObjectLValueCast:
1309 case CK_ARCProduceObject:
1310 case CK_ARCConsumeObject:
1311 case CK_ARCReclaimReturnedObject:
1312 case CK_ARCExtendBlockObject:
1313 case CK_CopyAndAutoreleaseBlockObject:
1314 return nullptr;
1315
1316 // These don't need to be handled here because Evaluate knows how to
1317 // evaluate them in the cases where they can be folded.
1318 case CK_BitCast:
1319 case CK_ToVoid:
1320 case CK_Dynamic:
1321 case CK_LValueBitCast:
1322 case CK_LValueToRValueBitCast:
1323 case CK_NullToMemberPointer:
1324 case CK_UserDefinedConversion:
1325 case CK_CPointerToObjCPointerCast:
1326 case CK_BlockPointerToObjCPointerCast:
1327 case CK_AnyPointerToBlockPointerCast:
1328 case CK_FunctionToPointerDecay:
1329 case CK_BaseToDerived:
1330 case CK_DerivedToBase:
1331 case CK_UncheckedDerivedToBase:
1332 case CK_MemberPointerToBoolean:
1333 case CK_VectorSplat:
1334 case CK_FloatingRealToComplex:
1335 case CK_FloatingComplexToReal:
1336 case CK_FloatingComplexToBoolean:
1337 case CK_FloatingComplexCast:
1338 case CK_FloatingComplexToIntegralComplex:
1339 case CK_IntegralRealToComplex:
1340 case CK_IntegralComplexToReal:
1341 case CK_IntegralComplexToBoolean:
1342 case CK_IntegralComplexCast:
1343 case CK_IntegralComplexToFloatingComplex:
1344 case CK_PointerToIntegral:
1345 case CK_PointerToBoolean:
1346 case CK_BooleanToSignedIntegral:
1347 case CK_IntegralToPointer:
1348 case CK_IntegralToBoolean:
1349 case CK_IntegralToFloating:
1350 case CK_FloatingToIntegral:
1351 case CK_FloatingToBoolean:
1352 case CK_FloatingCast:
1353 case CK_FloatingToFixedPoint:
1354 case CK_FixedPointToFloating:
1355 case CK_FixedPointCast:
1356 case CK_FixedPointToBoolean:
1357 case CK_FixedPointToIntegral:
1358 case CK_IntegralToFixedPoint:
1359 case CK_ZeroToOCLOpaqueType:
1360 case CK_MatrixCast:
1361 case CK_HLSLVectorTruncation:
1362 case CK_HLSLMatrixTruncation:
1363 case CK_HLSLArrayRValue:
1364 case CK_HLSLElementwiseCast:
1365 case CK_HLSLAggregateSplatCast:
1366 return nullptr;
1367 }
1368 llvm_unreachable("Invalid CastKind");
1369 }
1370
1371 llvm::Constant *VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *DIE,
1372 QualType T) {
1373 // No need for a DefaultInitExprScope: we don't handle 'this' in a
1374 // constant expression.
1375 return Visit(S: DIE->getExpr(), P: T);
1376 }
1377
1378 llvm::Constant *VisitExprWithCleanups(const ExprWithCleanups *E, QualType T) {
1379 return Visit(S: E->getSubExpr(), P: T);
1380 }
1381
1382 llvm::Constant *VisitIntegerLiteral(const IntegerLiteral *I, QualType T) {
1383 return llvm::ConstantInt::get(Context&: CGM.getLLVMContext(), V: I->getValue());
1384 }
1385
1386 static APValue withDestType(ASTContext &Ctx, const Expr *E, QualType SrcType,
1387 QualType DestType, const llvm::APSInt &Value) {
1388 if (!Ctx.hasSameType(T1: SrcType, T2: DestType)) {
1389 if (DestType->isFloatingType()) {
1390 llvm::APFloat Result =
1391 llvm::APFloat(Ctx.getFloatTypeSemantics(T: DestType), 1);
1392 llvm::RoundingMode RM =
1393 E->getFPFeaturesInEffect(LO: Ctx.getLangOpts()).getRoundingMode();
1394 if (RM == llvm::RoundingMode::Dynamic)
1395 RM = llvm::RoundingMode::NearestTiesToEven;
1396 Result.convertFromAPInt(Input: Value, IsSigned: Value.isSigned(), RM);
1397 return APValue(Result);
1398 }
1399 }
1400 return APValue(Value);
1401 }
1402
1403 llvm::Constant *EmitArrayInitialization(const InitListExpr *ILE, QualType T) {
1404 auto *CAT = CGM.getContext().getAsConstantArrayType(T: ILE->getType());
1405 assert(CAT && "can't emit array init for non-constant-bound array");
1406 uint64_t NumInitElements = ILE->getNumInits();
1407 const uint64_t NumElements = CAT->getZExtSize();
1408 for (const auto *Init : ILE->inits()) {
1409 if (const auto *Embed =
1410 dyn_cast<EmbedExpr>(Val: Init->IgnoreParenImpCasts())) {
1411 NumInitElements += Embed->getDataElementCount() - 1;
1412 if (NumInitElements > NumElements) {
1413 NumInitElements = NumElements;
1414 break;
1415 }
1416 }
1417 }
1418
1419 // Initialising an array requires us to automatically
1420 // initialise any elements that have not been initialised explicitly
1421 uint64_t NumInitableElts = std::min<uint64_t>(a: NumInitElements, b: NumElements);
1422
1423 QualType EltType = CAT->getElementType();
1424
1425 // Initialize remaining array elements.
1426 llvm::Constant *fillC = nullptr;
1427 if (const Expr *filler = ILE->getArrayFiller()) {
1428 fillC = Emitter.tryEmitAbstractForMemory(E: filler, T: EltType);
1429 if (!fillC)
1430 return nullptr;
1431 }
1432
1433 // Copy initializer elements.
1434 SmallVector<llvm::Constant *, 16> Elts;
1435 if (fillC && fillC->isNullValue())
1436 Elts.reserve(N: NumInitableElts + 1);
1437 else
1438 Elts.reserve(N: NumElements);
1439
1440 llvm::Type *CommonElementType = nullptr;
1441 auto Emit = [&](const Expr *Init, unsigned ArrayIndex) {
1442 llvm::Constant *C = nullptr;
1443 C = Emitter.tryEmitPrivateForMemory(E: Init, T: EltType);
1444 if (!C)
1445 return false;
1446 if (ArrayIndex == 0)
1447 CommonElementType = C->getType();
1448 else if (C->getType() != CommonElementType)
1449 CommonElementType = nullptr;
1450 Elts.push_back(Elt: C);
1451 return true;
1452 };
1453
1454 unsigned ArrayIndex = 0;
1455 QualType DestTy = CAT->getElementType();
1456 for (unsigned i = 0; i < ILE->getNumInits(); ++i) {
1457 const Expr *Init = ILE->getInit(Init: i);
1458 if (auto *EmbedS = dyn_cast<EmbedExpr>(Val: Init->IgnoreParenImpCasts())) {
1459 StringLiteral *SL = EmbedS->getDataStringLiteral();
1460 llvm::APSInt Value(CGM.getContext().getTypeSize(T: DestTy),
1461 DestTy->isUnsignedIntegerType());
1462 llvm::Constant *C;
1463 for (unsigned I = EmbedS->getStartingElementPos(),
1464 N = EmbedS->getDataElementCount();
1465 I != EmbedS->getStartingElementPos() + N; ++I) {
1466 Value = SL->getCodeUnit(i: I);
1467 if (DestTy->isIntegerType()) {
1468 C = llvm::ConstantInt::get(Context&: CGM.getLLVMContext(), V: Value);
1469 } else {
1470 C = Emitter.tryEmitPrivateForMemory(
1471 value: withDestType(Ctx&: CGM.getContext(), E: Init, SrcType: EmbedS->getType(), DestType: DestTy,
1472 Value),
1473 T: EltType);
1474 }
1475 if (!C)
1476 return nullptr;
1477 Elts.push_back(Elt: C);
1478 ArrayIndex++;
1479 }
1480 if ((ArrayIndex - EmbedS->getDataElementCount()) == 0)
1481 CommonElementType = C->getType();
1482 else if (C->getType() != CommonElementType)
1483 CommonElementType = nullptr;
1484 } else {
1485 if (!Emit(Init, ArrayIndex))
1486 return nullptr;
1487 ArrayIndex++;
1488 }
1489 }
1490
1491 llvm::ArrayType *Desired =
1492 cast<llvm::ArrayType>(Val: CGM.getTypes().ConvertType(T: ILE->getType()));
1493 return EmitArrayConstant(CGM, DesiredType: Desired, CommonElementType, ArrayBound: NumElements, Elements&: Elts,
1494 Filler: fillC);
1495 }
1496
1497 llvm::Constant *EmitRecordInitialization(const InitListExpr *ILE,
1498 QualType T) {
1499 return ConstStructBuilder::BuildStruct(Emitter, ILE, ValTy: T);
1500 }
1501
1502 llvm::Constant *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E,
1503 QualType T) {
1504 return CGM.EmitNullConstant(T);
1505 }
1506
1507 llvm::Constant *VisitInitListExpr(const InitListExpr *ILE, QualType T) {
1508 if (ILE->isTransparent())
1509 return Visit(S: ILE->getInit(Init: 0), P: T);
1510
1511 if (ILE->getType()->isArrayType())
1512 return EmitArrayInitialization(ILE, T);
1513
1514 if (ILE->getType()->isRecordType())
1515 return EmitRecordInitialization(ILE, T);
1516
1517 return nullptr;
1518 }
1519
1520 llvm::Constant *
1521 VisitDesignatedInitUpdateExpr(const DesignatedInitUpdateExpr *E,
1522 QualType destType) {
1523 auto C = Visit(S: E->getBase(), P: destType);
1524 if (!C)
1525 return nullptr;
1526
1527 ConstantAggregateBuilder Const(CGM);
1528 Const.add(C, Offset: CharUnits::Zero(), AllowOverwrite: false);
1529
1530 if (!EmitDesignatedInitUpdater(Emitter, Const, Offset: CharUnits::Zero(), Type: destType,
1531 Updater: E->getUpdater()))
1532 return nullptr;
1533
1534 llvm::Type *ValTy = CGM.getTypes().ConvertType(T: destType);
1535 bool HasFlexibleArray = false;
1536 if (const auto *RD = destType->getAsRecordDecl())
1537 HasFlexibleArray = RD->hasFlexibleArrayMember();
1538 return Const.build(DesiredTy: ValTy, AllowOversized: HasFlexibleArray);
1539 }
1540
1541 llvm::Constant *VisitCXXConstructExpr(const CXXConstructExpr *E,
1542 QualType Ty) {
1543 if (!E->getConstructor()->isTrivial())
1544 return nullptr;
1545
1546 // Only default and copy/move constructors can be trivial.
1547 if (E->getNumArgs()) {
1548 assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
1549 assert(E->getConstructor()->isCopyOrMoveConstructor() &&
1550 "trivial ctor has argument but isn't a copy/move ctor");
1551
1552 const Expr *Arg = E->getArg(Arg: 0);
1553 assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
1554 "argument to copy ctor is of wrong type");
1555
1556 // Look through the temporary; it's just converting the value to an
1557 // lvalue to pass it to the constructor.
1558 if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(Val: Arg))
1559 return Visit(S: MTE->getSubExpr(), P: Ty);
1560 // Don't try to support arbitrary lvalue-to-rvalue conversions for now.
1561 return nullptr;
1562 }
1563
1564 return CGM.EmitNullConstant(T: Ty);
1565 }
1566
1567 llvm::Constant *VisitStringLiteral(const StringLiteral *E, QualType T) {
1568 // This is a string literal initializing an array in an initializer.
1569 return CGM.GetConstantArrayFromStringLiteral(E);
1570 }
1571
1572 llvm::Constant *VisitObjCEncodeExpr(const ObjCEncodeExpr *E, QualType T) {
1573 // This must be an @encode initializing an array in a static initializer.
1574 // Don't emit it as the address of the string, emit the string data itself
1575 // as an inline array.
1576 std::string Str;
1577 CGM.getContext().getObjCEncodingForType(T: E->getEncodedType(), S&: Str);
1578 const ConstantArrayType *CAT = CGM.getContext().getAsConstantArrayType(T);
1579 assert(CAT && "String data not of constant array type!");
1580
1581 // Resize the string to the right size, adding zeros at the end, or
1582 // truncating as needed.
1583 Str.resize(n: CAT->getZExtSize(), c: '\0');
1584 return llvm::ConstantDataArray::getString(Context&: VMContext, Initializer: Str, AddNull: false);
1585 }
1586
1587 llvm::Constant *VisitUnaryExtension(const UnaryOperator *E, QualType T) {
1588 return Visit(S: E->getSubExpr(), P: T);
1589 }
1590
1591 llvm::Constant *VisitUnaryMinus(const UnaryOperator *U, QualType T) {
1592 if (llvm::Constant *C = Visit(S: U->getSubExpr(), P: T))
1593 if (auto *CI = dyn_cast<llvm::ConstantInt>(Val: C))
1594 return llvm::ConstantInt::get(Context&: CGM.getLLVMContext(), V: -CI->getValue());
1595 return nullptr;
1596 }
1597
1598 llvm::Constant *VisitPackIndexingExpr(const PackIndexingExpr *E, QualType T) {
1599 return Visit(S: E->getSelectedExpr(), P: T);
1600 }
1601
1602 // Utility methods
1603 llvm::Type *ConvertType(QualType T) {
1604 return CGM.getTypes().ConvertType(T);
1605 }
1606};
1607
1608} // end anonymous namespace.
1609
1610llvm::Constant *ConstantEmitter::validateAndPopAbstract(llvm::Constant *C,
1611 AbstractState saved) {
1612 Abstract = saved.OldValue;
1613
1614 assert(saved.OldPlaceholdersSize == PlaceholderAddresses.size() &&
1615 "created a placeholder while doing an abstract emission?");
1616
1617 // No validation necessary for now.
1618 // No cleanup to do for now.
1619 return C;
1620}
1621
1622llvm::Constant *
1623ConstantEmitter::tryEmitAbstractForInitializer(const VarDecl &D) {
1624 auto state = pushAbstract();
1625 auto C = tryEmitPrivateForVarInit(D);
1626 return validateAndPopAbstract(C, saved: state);
1627}
1628
1629llvm::Constant *
1630ConstantEmitter::tryEmitAbstract(const Expr *E, QualType destType) {
1631 auto state = pushAbstract();
1632 auto C = tryEmitPrivate(E, T: destType);
1633 return validateAndPopAbstract(C, saved: state);
1634}
1635
1636llvm::Constant *
1637ConstantEmitter::tryEmitAbstract(const APValue &value, QualType destType) {
1638 auto state = pushAbstract();
1639 auto C = tryEmitPrivate(value, T: destType);
1640 return validateAndPopAbstract(C, saved: state);
1641}
1642
1643llvm::Constant *ConstantEmitter::tryEmitConstantExpr(const ConstantExpr *CE) {
1644 if (!CE->hasAPValueResult())
1645 return nullptr;
1646
1647 QualType RetType = CE->getType();
1648 if (CE->isGLValue())
1649 RetType = CGM.getContext().getLValueReferenceType(T: RetType);
1650
1651 return tryEmitAbstract(value: CE->getAPValueResult(), destType: RetType);
1652}
1653
1654llvm::Constant *
1655ConstantEmitter::emitAbstract(const Expr *E, QualType destType) {
1656 auto state = pushAbstract();
1657 auto C = tryEmitPrivate(E, T: destType);
1658 C = validateAndPopAbstract(C, saved: state);
1659 if (!C) {
1660 CGM.Error(loc: E->getExprLoc(),
1661 error: "internal error: could not emit constant value \"abstractly\"");
1662 C = CGM.EmitNullConstant(T: destType);
1663 }
1664 return C;
1665}
1666
1667llvm::Constant *
1668ConstantEmitter::emitAbstract(SourceLocation loc, const APValue &value,
1669 QualType destType,
1670 bool EnablePtrAuthFunctionTypeDiscrimination) {
1671 auto state = pushAbstract();
1672 auto C =
1673 tryEmitPrivate(value, T: destType, EnablePtrAuthFunctionTypeDiscrimination);
1674 C = validateAndPopAbstract(C, saved: state);
1675 if (!C) {
1676 CGM.Error(loc,
1677 error: "internal error: could not emit constant value \"abstractly\"");
1678 C = CGM.EmitNullConstant(T: destType);
1679 }
1680 return C;
1681}
1682
1683llvm::Constant *ConstantEmitter::tryEmitForInitializer(const VarDecl &D) {
1684 initializeNonAbstract(destAS: D.getType().getAddressSpace());
1685 llvm::Constant *Init = tryEmitPrivateForVarInit(D);
1686
1687 // If a placeholder address was needed for a TLS variable, implying that the
1688 // initializer's value depends on its address, then the object may not be
1689 // initialized in .tdata because the initializer will be memcpy'd to the
1690 // thread's TLS. Instead the initialization must be done in code.
1691 if (!PlaceholderAddresses.empty() && D.getTLSKind() != VarDecl::TLS_None) {
1692 for (auto [_, GV] : PlaceholderAddresses)
1693 GV->eraseFromParent();
1694 PlaceholderAddresses.clear();
1695 Init = nullptr;
1696 }
1697
1698 return markIfFailed(init: Init);
1699}
1700
1701llvm::Constant *ConstantEmitter::tryEmitForInitializer(const Expr *E,
1702 LangAS destAddrSpace,
1703 QualType destType) {
1704 initializeNonAbstract(destAS: destAddrSpace);
1705 return markIfFailed(init: tryEmitPrivateForMemory(E, T: destType));
1706}
1707
1708llvm::Constant *ConstantEmitter::emitForInitializer(const APValue &value,
1709 LangAS destAddrSpace,
1710 QualType destType) {
1711 initializeNonAbstract(destAS: destAddrSpace);
1712 auto C = tryEmitPrivateForMemory(value, T: destType);
1713 assert(C && "couldn't emit constant value non-abstractly?");
1714 return C;
1715}
1716
1717llvm::GlobalValue *ConstantEmitter::getCurrentAddrPrivate() {
1718 assert(!Abstract && "cannot get current address for abstract constant");
1719
1720
1721
1722 // Make an obviously ill-formed global that should blow up compilation
1723 // if it survives.
1724 auto global = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty, true,
1725 llvm::GlobalValue::PrivateLinkage,
1726 /*init*/ nullptr,
1727 /*name*/ "",
1728 /*before*/ nullptr,
1729 llvm::GlobalVariable::NotThreadLocal,
1730 CGM.getContext().getTargetAddressSpace(AS: DestAddressSpace));
1731
1732 PlaceholderAddresses.push_back(Elt: std::make_pair(x: nullptr, y&: global));
1733
1734 return global;
1735}
1736
1737void ConstantEmitter::registerCurrentAddrPrivate(llvm::Constant *signal,
1738 llvm::GlobalValue *placeholder) {
1739 assert(!PlaceholderAddresses.empty());
1740 assert(PlaceholderAddresses.back().first == nullptr);
1741 assert(PlaceholderAddresses.back().second == placeholder);
1742 PlaceholderAddresses.back().first = signal;
1743}
1744
1745namespace {
1746 struct ReplacePlaceholders {
1747 CodeGenModule &CGM;
1748
1749 /// The base address of the global.
1750 llvm::Constant *Base;
1751 llvm::Type *BaseValueTy = nullptr;
1752
1753 /// The placeholder addresses that were registered during emission.
1754 llvm::DenseMap<llvm::Constant*, llvm::GlobalVariable*> PlaceholderAddresses;
1755
1756 /// The locations of the placeholder signals.
1757 llvm::DenseMap<llvm::GlobalVariable*, llvm::Constant*> Locations;
1758
1759 /// The current index stack. We use a simple unsigned stack because
1760 /// we assume that placeholders will be relatively sparse in the
1761 /// initializer, but we cache the index values we find just in case.
1762 llvm::SmallVector<unsigned, 8> Indices;
1763 llvm::SmallVector<llvm::Constant*, 8> IndexValues;
1764
1765 ReplacePlaceholders(CodeGenModule &CGM, llvm::Constant *base,
1766 ArrayRef<std::pair<llvm::Constant*,
1767 llvm::GlobalVariable*>> addresses)
1768 : CGM(CGM), Base(base),
1769 PlaceholderAddresses(addresses.begin(), addresses.end()) {
1770 }
1771
1772 void replaceInInitializer(llvm::Constant *init) {
1773 // Remember the type of the top-most initializer.
1774 BaseValueTy = init->getType();
1775
1776 // Initialize the stack.
1777 Indices.push_back(Elt: 0);
1778 IndexValues.push_back(Elt: nullptr);
1779
1780 // Recurse into the initializer.
1781 findLocations(init);
1782
1783 // Check invariants.
1784 assert(IndexValues.size() == Indices.size() && "mismatch");
1785 assert(Indices.size() == 1 && "didn't pop all indices");
1786
1787 // Do the replacement; this basically invalidates 'init'.
1788 assert(Locations.size() == PlaceholderAddresses.size() &&
1789 "missed a placeholder?");
1790
1791 // We're iterating over a hashtable, so this would be a source of
1792 // non-determinism in compiler output *except* that we're just
1793 // messing around with llvm::Constant structures, which never itself
1794 // does anything that should be visible in compiler output.
1795 for (auto &entry : Locations) {
1796 assert(entry.first->getName() == "" && "not a placeholder!");
1797 entry.first->replaceAllUsesWith(V: entry.second);
1798 entry.first->eraseFromParent();
1799 }
1800 }
1801
1802 private:
1803 void findLocations(llvm::Constant *init) {
1804 // Recurse into aggregates.
1805 if (auto agg = dyn_cast<llvm::ConstantAggregate>(Val: init)) {
1806 for (unsigned i = 0, e = agg->getNumOperands(); i != e; ++i) {
1807 Indices.push_back(Elt: i);
1808 IndexValues.push_back(Elt: nullptr);
1809
1810 findLocations(init: agg->getOperand(i_nocapture: i));
1811
1812 IndexValues.pop_back();
1813 Indices.pop_back();
1814 }
1815 return;
1816 }
1817
1818 // Otherwise, check for registered constants.
1819 while (true) {
1820 auto it = PlaceholderAddresses.find(Val: init);
1821 if (it != PlaceholderAddresses.end()) {
1822 setLocation(it->second);
1823 break;
1824 }
1825
1826 // Look through bitcasts or other expressions.
1827 if (auto expr = dyn_cast<llvm::ConstantExpr>(Val: init)) {
1828 init = expr->getOperand(i_nocapture: 0);
1829 } else {
1830 break;
1831 }
1832 }
1833 }
1834
1835 void setLocation(llvm::GlobalVariable *placeholder) {
1836 assert(!Locations.contains(placeholder) &&
1837 "already found location for placeholder!");
1838
1839 // Lazily fill in IndexValues with the values from Indices.
1840 // We do this in reverse because we should always have a strict
1841 // prefix of indices from the start.
1842 assert(Indices.size() == IndexValues.size());
1843 for (size_t i = Indices.size() - 1; i != size_t(-1); --i) {
1844 if (IndexValues[i]) {
1845#ifndef NDEBUG
1846 for (size_t j = 0; j != i + 1; ++j) {
1847 assert(IndexValues[j] &&
1848 isa<llvm::ConstantInt>(IndexValues[j]) &&
1849 cast<llvm::ConstantInt>(IndexValues[j])->getZExtValue()
1850 == Indices[j]);
1851 }
1852#endif
1853 break;
1854 }
1855
1856 IndexValues[i] = llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: Indices[i]);
1857 }
1858
1859 llvm::Constant *location = llvm::ConstantExpr::getInBoundsGetElementPtr(
1860 Ty: BaseValueTy, C: Base, IdxList: IndexValues);
1861
1862 Locations.insert(KV: {placeholder, location});
1863 }
1864 };
1865}
1866
1867void ConstantEmitter::finalize(llvm::GlobalVariable *global) {
1868 assert(InitializedNonAbstract &&
1869 "finalizing emitter that was used for abstract emission?");
1870 assert(!Finalized && "finalizing emitter multiple times");
1871 assert(global->getInitializer());
1872
1873 // Note that we might also be Failed.
1874 Finalized = true;
1875
1876 if (!PlaceholderAddresses.empty()) {
1877 ReplacePlaceholders(CGM, global, PlaceholderAddresses)
1878 .replaceInInitializer(init: global->getInitializer());
1879 PlaceholderAddresses.clear(); // satisfy
1880 }
1881}
1882
1883ConstantEmitter::~ConstantEmitter() {
1884 assert((!InitializedNonAbstract || Finalized || Failed) &&
1885 "not finalized after being initialized for non-abstract emission");
1886 assert(PlaceholderAddresses.empty() && "unhandled placeholders");
1887}
1888
1889static QualType getNonMemoryType(CodeGenModule &CGM, QualType type) {
1890 if (auto AT = type->getAs<AtomicType>()) {
1891 return CGM.getContext().getQualifiedType(T: AT->getValueType(),
1892 Qs: type.getQualifiers());
1893 }
1894 return type;
1895}
1896
1897llvm::Constant *ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) {
1898 // Make a quick check if variable can be default NULL initialized
1899 // and avoid going through rest of code which may do, for c++11,
1900 // initialization of memory to all NULLs.
1901 if (!D.hasLocalStorage()) {
1902 QualType Ty = CGM.getContext().getBaseElementType(QT: D.getType());
1903 if (Ty->isRecordType())
1904 if (const CXXConstructExpr *E =
1905 dyn_cast_or_null<CXXConstructExpr>(Val: D.getInit())) {
1906 const CXXConstructorDecl *CD = E->getConstructor();
1907 if (CD->isTrivial() && CD->isDefaultConstructor())
1908 return CGM.EmitNullConstant(T: D.getType());
1909 }
1910 }
1911 InConstantContext = D.hasConstantInitialization();
1912
1913 QualType destType = D.getType();
1914 const Expr *E = D.getInit();
1915 assert(E && "No initializer to emit");
1916
1917 if (!destType->isReferenceType()) {
1918 QualType nonMemoryDestType = getNonMemoryType(CGM, type: destType);
1919 if (llvm::Constant *C = ConstExprEmitter(*this).Visit(S: E, P: nonMemoryDestType))
1920 return emitForMemory(C, T: destType);
1921 }
1922
1923 // Try to emit the initializer. Note that this can allow some things that
1924 // are not allowed by tryEmitPrivateForMemory alone.
1925 if (APValue *value = D.evaluateValue()) {
1926 assert(!value->allowConstexprUnknown() &&
1927 "Constexpr unknown values are not allowed in CodeGen");
1928 return tryEmitPrivateForMemory(value: *value, T: destType);
1929 }
1930
1931 return nullptr;
1932}
1933
1934llvm::Constant *
1935ConstantEmitter::tryEmitAbstractForMemory(const Expr *E, QualType destType) {
1936 auto nonMemoryDestType = getNonMemoryType(CGM, type: destType);
1937 auto C = tryEmitAbstract(E, destType: nonMemoryDestType);
1938 return (C ? emitForMemory(C, T: destType) : nullptr);
1939}
1940
1941llvm::Constant *
1942ConstantEmitter::tryEmitAbstractForMemory(const APValue &value,
1943 QualType destType) {
1944 auto nonMemoryDestType = getNonMemoryType(CGM, type: destType);
1945 auto C = tryEmitAbstract(value, destType: nonMemoryDestType);
1946 return (C ? emitForMemory(C, T: destType) : nullptr);
1947}
1948
1949llvm::Constant *ConstantEmitter::tryEmitPrivateForMemory(const Expr *E,
1950 QualType destType) {
1951 auto nonMemoryDestType = getNonMemoryType(CGM, type: destType);
1952 llvm::Constant *C = tryEmitPrivate(E, T: nonMemoryDestType);
1953 return (C ? emitForMemory(C, T: destType) : nullptr);
1954}
1955
1956llvm::Constant *ConstantEmitter::tryEmitPrivateForMemory(const APValue &value,
1957 QualType destType) {
1958 auto nonMemoryDestType = getNonMemoryType(CGM, type: destType);
1959 auto C = tryEmitPrivate(value, T: nonMemoryDestType);
1960 return (C ? emitForMemory(C, T: destType) : nullptr);
1961}
1962
1963/// Try to emit a constant signed pointer, given a raw pointer and the
1964/// destination ptrauth qualifier.
1965///
1966/// This can fail if the qualifier needs address discrimination and the
1967/// emitter is in an abstract mode.
1968llvm::Constant *
1969ConstantEmitter::tryEmitConstantSignedPointer(llvm::Constant *UnsignedPointer,
1970 PointerAuthQualifier Schema) {
1971 assert(Schema && "applying trivial ptrauth schema");
1972
1973 if (Schema.hasKeyNone())
1974 return UnsignedPointer;
1975
1976 unsigned Key = Schema.getKey();
1977
1978 // Create an address placeholder if we're using address discrimination.
1979 llvm::GlobalValue *StorageAddress = nullptr;
1980 if (Schema.isAddressDiscriminated()) {
1981 // We can't do this if the emitter is in an abstract state.
1982 if (isAbstract())
1983 return nullptr;
1984
1985 StorageAddress = getCurrentAddrPrivate();
1986 }
1987
1988 llvm::ConstantInt *Discriminator =
1989 llvm::ConstantInt::get(Ty: CGM.IntPtrTy, V: Schema.getExtraDiscriminator());
1990
1991 llvm::Constant *SignedPointer = CGM.getConstantSignedPointer(
1992 Pointer: UnsignedPointer, Key, StorageAddress, OtherDiscriminator: Discriminator);
1993
1994 if (Schema.isAddressDiscriminated())
1995 registerCurrentAddrPrivate(signal: SignedPointer, placeholder: StorageAddress);
1996
1997 return SignedPointer;
1998}
1999
2000llvm::Constant *ConstantEmitter::emitForMemory(CodeGenModule &CGM,
2001 llvm::Constant *C,
2002 QualType destType) {
2003 // For an _Atomic-qualified constant, we may need to add tail padding.
2004 if (auto AT = destType->getAs<AtomicType>()) {
2005 QualType destValueType = AT->getValueType();
2006 C = emitForMemory(CGM, C, destType: destValueType);
2007
2008 uint64_t innerSize = CGM.getContext().getTypeSize(T: destValueType);
2009 uint64_t outerSize = CGM.getContext().getTypeSize(T: destType);
2010 if (innerSize == outerSize)
2011 return C;
2012
2013 assert(innerSize < outerSize && "emitted over-large constant for atomic");
2014 llvm::Constant *elts[] = {
2015 C,
2016 llvm::ConstantAggregateZero::get(
2017 Ty: llvm::ArrayType::get(ElementType: CGM.Int8Ty, NumElements: (outerSize - innerSize) / 8))
2018 };
2019 return llvm::ConstantStruct::getAnon(V: elts);
2020 }
2021
2022 // Zero-extend bool.
2023 // In HLSL bool vectors are stored in memory as a vector of i32
2024 if ((C->getType()->isIntegerTy(Bitwidth: 1) && !destType->isBitIntType()) ||
2025 (destType->isExtVectorBoolType() &&
2026 !destType->isPackedVectorBoolType(ctx: CGM.getContext()))) {
2027 llvm::Type *boolTy = CGM.getTypes().ConvertTypeForMem(T: destType);
2028 llvm::Constant *Res = llvm::ConstantFoldCastOperand(
2029 Opcode: llvm::Instruction::ZExt, C, DestTy: boolTy, DL: CGM.getDataLayout());
2030 assert(Res && "Constant folding must succeed");
2031 return Res;
2032 }
2033
2034 if (destType->isBitIntType()) {
2035 llvm::Type *MemTy = CGM.getTypes().ConvertTypeForMem(T: destType);
2036 if (C->getType() != MemTy) {
2037 ConstantAggregateBuilder Builder(CGM);
2038 llvm::Type *LoadStoreTy =
2039 CGM.getTypes().convertTypeForLoadStore(T: destType);
2040 // ptrtoint/inttoptr should not involve _BitInt in constant expressions,
2041 // so casting to ConstantInt is safe here.
2042 auto *CI = cast<llvm::ConstantInt>(Val: C);
2043 llvm::Constant *Res = llvm::ConstantFoldCastOperand(
2044 Opcode: destType->isSignedIntegerOrEnumerationType()
2045 ? llvm::Instruction::SExt
2046 : llvm::Instruction::ZExt,
2047 C: CI, DestTy: LoadStoreTy, DL: CGM.getDataLayout());
2048 if (CGM.getTypes().typeRequiresSplitIntoByteArray(ASTTy: destType,
2049 LLVMTy: C->getType())) {
2050 // Long _BitInt has array of bytes as in-memory type.
2051 // So, split constant into individual bytes.
2052 llvm::APInt Value = cast<llvm::ConstantInt>(Val: Res)->getValue();
2053 Builder.addBits(Bits: Value, /*OffsetInBits=*/0, /*AllowOverwrite=*/false);
2054 return Builder.build(DesiredTy: MemTy, /*AllowOversized*/ false);
2055 }
2056 return Res;
2057 }
2058 }
2059
2060 return C;
2061}
2062
2063llvm::Constant *ConstantEmitter::tryEmitPrivate(const Expr *E,
2064 QualType destType) {
2065 assert(!destType->isVoidType() && "can't emit a void constant");
2066
2067 if (!destType->isReferenceType())
2068 if (llvm::Constant *C = ConstExprEmitter(*this).Visit(S: E, P: destType))
2069 return C;
2070
2071 Expr::EvalResult Result;
2072
2073 bool Success = false;
2074
2075 if (destType->isReferenceType())
2076 Success = E->EvaluateAsLValue(Result, Ctx: CGM.getContext());
2077 else
2078 Success = E->EvaluateAsRValue(Result, Ctx: CGM.getContext(), InConstantContext);
2079
2080 if (Success && !Result.HasSideEffects)
2081 return tryEmitPrivate(value: Result.Val, T: destType);
2082
2083 return nullptr;
2084}
2085
2086llvm::Constant *CodeGenModule::getNullPointer(llvm::PointerType *T, QualType QT) {
2087 return getTargetCodeGenInfo().getNullPointer(CGM: *this, T, QT);
2088}
2089
2090namespace {
2091/// A struct which can be used to peephole certain kinds of finalization
2092/// that normally happen during l-value emission.
2093struct ConstantLValue {
2094 llvm::Constant *Value;
2095 bool HasOffsetApplied;
2096 bool HasDestPointerAuth;
2097
2098 /*implicit*/ ConstantLValue(llvm::Constant *value,
2099 bool hasOffsetApplied = false,
2100 bool hasDestPointerAuth = false)
2101 : Value(value), HasOffsetApplied(hasOffsetApplied),
2102 HasDestPointerAuth(hasDestPointerAuth) {}
2103
2104 /*implicit*/ ConstantLValue(ConstantAddress address)
2105 : ConstantLValue(address.getPointer()) {}
2106};
2107
2108/// A helper class for emitting constant l-values.
2109class ConstantLValueEmitter : public ConstStmtVisitor<ConstantLValueEmitter,
2110 ConstantLValue> {
2111 CodeGenModule &CGM;
2112 ConstantEmitter &Emitter;
2113 const APValue &Value;
2114 QualType DestType;
2115 bool EnablePtrAuthFunctionTypeDiscrimination;
2116
2117 // Befriend StmtVisitorBase so that we don't have to expose Visit*.
2118 friend StmtVisitorBase;
2119
2120public:
2121 ConstantLValueEmitter(ConstantEmitter &emitter, const APValue &value,
2122 QualType destType,
2123 bool EnablePtrAuthFunctionTypeDiscrimination = true)
2124 : CGM(emitter.CGM), Emitter(emitter), Value(value), DestType(destType),
2125 EnablePtrAuthFunctionTypeDiscrimination(
2126 EnablePtrAuthFunctionTypeDiscrimination) {}
2127
2128 llvm::Constant *tryEmit();
2129
2130private:
2131 llvm::Constant *tryEmitAbsolute(llvm::Type *destTy);
2132 ConstantLValue tryEmitBase(const APValue::LValueBase &base);
2133
2134 ConstantLValue VisitStmt(const Stmt *S) { return nullptr; }
2135 ConstantLValue VisitConstantExpr(const ConstantExpr *E);
2136 ConstantLValue VisitCompoundLiteralExpr(const CompoundLiteralExpr *E);
2137 ConstantLValue VisitStringLiteral(const StringLiteral *E);
2138 ConstantLValue VisitObjCBoxedExpr(const ObjCBoxedExpr *E);
2139 ConstantLValue VisitObjCEncodeExpr(const ObjCEncodeExpr *E);
2140 ConstantLValue VisitObjCStringLiteral(const ObjCStringLiteral *E);
2141 llvm::Constant *VisitObjCCollectionElement(const Expr *E);
2142 ConstantLValue VisitObjCArrayLiteral(const ObjCArrayLiteral *E);
2143 ConstantLValue VisitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
2144 ConstantLValue VisitPredefinedExpr(const PredefinedExpr *E);
2145 ConstantLValue VisitAddrLabelExpr(const AddrLabelExpr *E);
2146 ConstantLValue VisitCallExpr(const CallExpr *E);
2147 ConstantLValue VisitBlockExpr(const BlockExpr *E);
2148 ConstantLValue VisitCXXTypeidExpr(const CXXTypeidExpr *E);
2149 ConstantLValue VisitMaterializeTemporaryExpr(
2150 const MaterializeTemporaryExpr *E);
2151
2152 ConstantLValue emitPointerAuthSignConstant(const CallExpr *E);
2153 llvm::Constant *emitPointerAuthPointer(const Expr *E);
2154 unsigned emitPointerAuthKey(const Expr *E);
2155 std::pair<llvm::Constant *, llvm::ConstantInt *>
2156 emitPointerAuthDiscriminator(const Expr *E);
2157
2158 bool hasNonZeroOffset() const {
2159 return !Value.getLValueOffset().isZero();
2160 }
2161
2162 /// Return the value offset.
2163 llvm::Constant *getOffset() {
2164 return llvm::ConstantInt::get(Ty: CGM.Int64Ty,
2165 V: Value.getLValueOffset().getQuantity());
2166 }
2167
2168 /// Apply the value offset to the given constant.
2169 llvm::Constant *applyOffset(llvm::Constant *C) {
2170 if (!hasNonZeroOffset())
2171 return C;
2172
2173 return llvm::ConstantExpr::getPtrAdd(Ptr: C, Offset: getOffset());
2174 }
2175};
2176
2177}
2178
2179llvm::Constant *ConstantLValueEmitter::tryEmit() {
2180 const APValue::LValueBase &base = Value.getLValueBase();
2181
2182 // The destination type should be a pointer or reference
2183 // type, but it might also be a cast thereof.
2184 //
2185 // FIXME: the chain of casts required should be reflected in the APValue.
2186 // We need this in order to correctly handle things like a ptrtoint of a
2187 // non-zero null pointer and addrspace casts that aren't trivially
2188 // represented in LLVM IR.
2189 auto destTy = CGM.getTypes().ConvertTypeForMem(T: DestType);
2190 assert(isa<llvm::IntegerType>(destTy) || isa<llvm::PointerType>(destTy));
2191
2192 // If there's no base at all, this is a null or absolute pointer,
2193 // possibly cast back to an integer type.
2194 if (!base) {
2195 return tryEmitAbsolute(destTy);
2196 }
2197
2198 // Otherwise, try to emit the base.
2199 ConstantLValue result = tryEmitBase(base);
2200
2201 // If that failed, we're done.
2202 llvm::Constant *value = result.Value;
2203 if (!value) return nullptr;
2204
2205 // Apply the offset if necessary and not already done.
2206 if (!result.HasOffsetApplied) {
2207 value = applyOffset(C: value);
2208 }
2209
2210 // Apply pointer-auth signing from the destination type.
2211 if (PointerAuthQualifier PointerAuth = DestType.getPointerAuth();
2212 PointerAuth && !result.HasDestPointerAuth) {
2213 value = Emitter.tryEmitConstantSignedPointer(UnsignedPointer: value, Schema: PointerAuth);
2214 if (!value)
2215 return nullptr;
2216 }
2217
2218 // Convert to the appropriate type; this could be an lvalue for
2219 // an integer. FIXME: performAddrSpaceCast
2220 if (isa<llvm::PointerType>(Val: destTy))
2221 return llvm::ConstantExpr::getPointerCast(C: value, Ty: destTy);
2222
2223 return llvm::ConstantExpr::getPtrToInt(C: value, Ty: destTy);
2224}
2225
2226/// Try to emit an absolute l-value, such as a null pointer or an integer
2227/// bitcast to pointer type.
2228llvm::Constant *
2229ConstantLValueEmitter::tryEmitAbsolute(llvm::Type *destTy) {
2230 // If we're producing a pointer, this is easy.
2231 auto destPtrTy = cast<llvm::PointerType>(Val: destTy);
2232 if (Value.isNullPointer()) {
2233 // FIXME: integer offsets from non-zero null pointers.
2234 return CGM.getNullPointer(T: destPtrTy, QT: DestType);
2235 }
2236
2237 // Convert the integer to a pointer-sized integer before converting it
2238 // to a pointer.
2239 // FIXME: signedness depends on the original integer type.
2240 auto intptrTy = CGM.getDataLayout().getIntPtrType(destPtrTy);
2241 llvm::Constant *C;
2242 C = llvm::ConstantFoldIntegerCast(C: getOffset(), DestTy: intptrTy, /*isSigned*/ IsSigned: false,
2243 DL: CGM.getDataLayout());
2244 assert(C && "Must have folded, as Offset is a ConstantInt");
2245 C = llvm::ConstantExpr::getIntToPtr(C, Ty: destPtrTy);
2246 return C;
2247}
2248
2249ConstantLValue
2250ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
2251 // Handle values.
2252 if (const ValueDecl *D = base.dyn_cast<const ValueDecl*>()) {
2253 // The constant always points to the canonical declaration. We want to look
2254 // at properties of the most recent declaration at the point of emission.
2255 D = cast<ValueDecl>(Val: D->getMostRecentDecl());
2256
2257 if (D->hasAttr<WeakRefAttr>())
2258 return CGM.GetWeakRefReference(VD: D).getPointer();
2259
2260 auto PtrAuthSign = [&](llvm::Constant *C) {
2261 if (PointerAuthQualifier PointerAuth = DestType.getPointerAuth()) {
2262 C = applyOffset(C);
2263 C = Emitter.tryEmitConstantSignedPointer(UnsignedPointer: C, Schema: PointerAuth);
2264 return ConstantLValue(C, /*applied offset*/ true, /*signed*/ true);
2265 }
2266
2267 CGPointerAuthInfo AuthInfo;
2268
2269 if (EnablePtrAuthFunctionTypeDiscrimination)
2270 AuthInfo = CGM.getFunctionPointerAuthInfo(T: DestType);
2271
2272 if (AuthInfo) {
2273 if (hasNonZeroOffset())
2274 return ConstantLValue(nullptr);
2275
2276 C = applyOffset(C);
2277 C = CGM.getConstantSignedPointer(
2278 Pointer: C, Key: AuthInfo.getKey(), StorageAddress: nullptr,
2279 OtherDiscriminator: cast_or_null<llvm::ConstantInt>(Val: AuthInfo.getDiscriminator()));
2280 return ConstantLValue(C, /*applied offset*/ true, /*signed*/ true);
2281 }
2282
2283 return ConstantLValue(C);
2284 };
2285
2286 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
2287 llvm::Constant *C = CGM.getRawFunctionPointer(GD: FD);
2288 if (FD->getType()->isCFIUncheckedCalleeFunctionType())
2289 C = llvm::NoCFIValue::get(GV: cast<llvm::GlobalValue>(Val: C));
2290 return PtrAuthSign(C);
2291 }
2292
2293 if (const auto *VD = dyn_cast<VarDecl>(Val: D)) {
2294 // We can never refer to a variable with local storage.
2295 if (!VD->hasLocalStorage()) {
2296 if (VD->isFileVarDecl() || VD->hasExternalStorage())
2297 return CGM.GetAddrOfGlobalVar(D: VD);
2298
2299 if (VD->isLocalVarDecl()) {
2300 return CGM.getOrCreateStaticVarDecl(
2301 D: *VD, Linkage: CGM.getLLVMLinkageVarDefinition(VD));
2302 }
2303 }
2304 }
2305
2306 if (const auto *GD = dyn_cast<MSGuidDecl>(Val: D))
2307 return CGM.GetAddrOfMSGuidDecl(GD);
2308
2309 if (const auto *GCD = dyn_cast<UnnamedGlobalConstantDecl>(Val: D))
2310 return CGM.GetAddrOfUnnamedGlobalConstantDecl(GCD);
2311
2312 if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(Val: D))
2313 return CGM.GetAddrOfTemplateParamObject(TPO);
2314
2315 return nullptr;
2316 }
2317
2318 // Handle typeid(T).
2319 if (TypeInfoLValue TI = base.dyn_cast<TypeInfoLValue>())
2320 return CGM.GetAddrOfRTTIDescriptor(Ty: QualType(TI.getType(), 0));
2321
2322 // Otherwise, it must be an expression.
2323 return Visit(S: base.get<const Expr*>());
2324}
2325
2326ConstantLValue
2327ConstantLValueEmitter::VisitConstantExpr(const ConstantExpr *E) {
2328 if (llvm::Constant *Result = Emitter.tryEmitConstantExpr(CE: E))
2329 return Result;
2330 return Visit(S: E->getSubExpr());
2331}
2332
2333ConstantLValue
2334ConstantLValueEmitter::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
2335 ConstantEmitter CompoundLiteralEmitter(CGM, Emitter.CGF);
2336 CompoundLiteralEmitter.setInConstantContext(Emitter.isInConstantContext());
2337 return tryEmitGlobalCompoundLiteral(emitter&: CompoundLiteralEmitter, E);
2338}
2339
2340ConstantLValue
2341ConstantLValueEmitter::VisitStringLiteral(const StringLiteral *E) {
2342 return CGM.GetAddrOfConstantStringFromLiteral(S: E);
2343}
2344
2345ConstantLValue
2346ConstantLValueEmitter::VisitObjCEncodeExpr(const ObjCEncodeExpr *E) {
2347 return CGM.GetAddrOfConstantStringFromObjCEncode(E);
2348}
2349
2350static ConstantLValue emitConstantObjCStringLiteral(const StringLiteral *S,
2351 QualType T,
2352 CodeGenModule &CGM) {
2353 auto C = CGM.getObjCRuntime().GenerateConstantString(S);
2354 return C.withElementType(ElemTy: CGM.getTypes().ConvertTypeForMem(T));
2355}
2356
2357ConstantLValue
2358ConstantLValueEmitter::VisitObjCStringLiteral(const ObjCStringLiteral *E) {
2359 return emitConstantObjCStringLiteral(S: E->getString(), T: E->getType(), CGM);
2360}
2361
2362ConstantLValue
2363ConstantLValueEmitter::VisitObjCBoxedExpr(const ObjCBoxedExpr *E) {
2364 ASTContext &Context = CGM.getContext();
2365 CGObjCRuntime &Runtime = CGM.getObjCRuntime();
2366 const Expr *SubExpr = E->getSubExpr();
2367 const QualType &Ty = SubExpr->IgnoreParens()->getType();
2368
2369 assert(SubExpr->isEvaluatable(Context) &&
2370 "Non const NSNumber is being emitted as a constant");
2371
2372 if (const auto *SL = dyn_cast<StringLiteral>(Val: SubExpr->IgnoreParenCasts()))
2373 return emitConstantObjCStringLiteral(S: SL, T: E->getType(), CGM);
2374
2375 // Note `@YES` `@NO` need to be handled explicitly
2376 // to meet existing plist encoding / decoding expectations
2377 const bool IsBoolType =
2378 (Ty->isBooleanType() || NSAPI(Context).isObjCBOOLType(T: Ty));
2379 bool BoolValue = false;
2380 if (IsBoolType && SubExpr->EvaluateAsBooleanCondition(Result&: BoolValue, Ctx: Context)) {
2381 ConstantAddress C = Runtime.GenerateConstantNumber(Value: BoolValue, Ty);
2382 return C.withElementType(ElemTy: CGM.getTypes().ConvertTypeForMem(T: E->getType()));
2383 }
2384
2385 Expr::EvalResult IntResult{};
2386 if (SubExpr->EvaluateAsInt(Result&: IntResult, Ctx: Context)) {
2387 ConstantAddress C =
2388 Runtime.GenerateConstantNumber(Value: IntResult.Val.getInt(), Ty);
2389 return C.withElementType(ElemTy: CGM.getTypes().ConvertTypeForMem(T: E->getType()));
2390 }
2391
2392 llvm::APFloat FloatValue(0.0);
2393 if (SubExpr->EvaluateAsFloat(Result&: FloatValue, Ctx: Context)) {
2394 ConstantAddress C = Runtime.GenerateConstantNumber(Value: FloatValue, Ty);
2395 return C.withElementType(ElemTy: CGM.getTypes().ConvertTypeForMem(T: E->getType()));
2396 }
2397
2398 llvm_unreachable("SubExpr is expected to be evaluated as a numeric type");
2399}
2400
2401llvm::Constant *
2402ConstantLValueEmitter::VisitObjCCollectionElement(const Expr *E) {
2403 auto CE = cast<CastExpr>(Val: E);
2404 const Expr *Elm = CE->getSubExpr();
2405 QualType DestTy = CE->getType();
2406
2407 assert(CE->getCastKind() == CK_BitCast &&
2408 "Expected a CK_BitCast type for valid items in constant objc "
2409 "collection literals");
2410
2411 llvm::Type *DstTy = CGM.getTypes().ConvertType(T: DestTy);
2412 ConstantLValue LV = Visit(S: Elm);
2413 llvm::Constant *ConstVal = cast<llvm::Constant>(Val: LV.Value);
2414 llvm::Constant *Val = llvm::ConstantExpr::getBitCast(C: ConstVal, Ty: DstTy);
2415 return Val;
2416}
2417
2418ConstantLValue
2419ConstantLValueEmitter::VisitObjCArrayLiteral(const ObjCArrayLiteral *E) {
2420 SmallVector<llvm::Constant *, 16> ObjectExpressions;
2421 uint64_t NumElements = E->getNumElements();
2422 ObjectExpressions.reserve(N: NumElements);
2423
2424 for (uint64_t i = 0; i < NumElements; i++) {
2425 llvm::Constant *Val = VisitObjCCollectionElement(E: E->getElement(Index: i));
2426 ObjectExpressions.push_back(Elt: Val);
2427 }
2428 ConstantAddress C =
2429 CGM.getObjCRuntime().GenerateConstantArray(Objects: ObjectExpressions);
2430 return C.withElementType(ElemTy: CGM.getTypes().ConvertTypeForMem(T: E->getType()));
2431}
2432
2433ConstantLValue ConstantLValueEmitter::VisitObjCDictionaryLiteral(
2434 const ObjCDictionaryLiteral *E) {
2435 SmallVector<std::pair<llvm::Constant *, llvm::Constant *>, 16> KeysAndObjects;
2436 uint64_t NumElements = E->getNumElements();
2437 KeysAndObjects.reserve(N: NumElements);
2438
2439 for (uint64_t i = 0; i < NumElements; i++) {
2440 llvm::Constant *Key =
2441 VisitObjCCollectionElement(E: E->getKeyValueElement(Index: i).Key);
2442 llvm::Constant *Val =
2443 VisitObjCCollectionElement(E: E->getKeyValueElement(Index: i).Value);
2444 KeysAndObjects.push_back(Elt: {Key, Val});
2445 }
2446 ConstantAddress C =
2447 CGM.getObjCRuntime().GenerateConstantDictionary(E, KeysAndObjects);
2448 return C.withElementType(ElemTy: CGM.getTypes().ConvertTypeForMem(T: E->getType()));
2449}
2450
2451ConstantLValue
2452ConstantLValueEmitter::VisitPredefinedExpr(const PredefinedExpr *E) {
2453 return CGM.GetAddrOfConstantStringFromLiteral(S: E->getFunctionName());
2454}
2455
2456ConstantLValue
2457ConstantLValueEmitter::VisitAddrLabelExpr(const AddrLabelExpr *E) {
2458 assert(Emitter.CGF && "Invalid address of label expression outside function");
2459 llvm::Constant *Ptr = Emitter.CGF->GetAddrOfLabel(L: E->getLabel());
2460 return Ptr;
2461}
2462
2463ConstantLValue
2464ConstantLValueEmitter::VisitCallExpr(const CallExpr *E) {
2465 unsigned builtin = E->getBuiltinCallee();
2466 if (builtin == Builtin::BI__builtin_function_start)
2467 return CGM.GetFunctionStart(
2468 Decl: E->getArg(Arg: 0)->getAsBuiltinConstantDeclRef(Context: CGM.getContext()));
2469
2470 if (builtin == Builtin::BI__builtin_ptrauth_sign_constant)
2471 return emitPointerAuthSignConstant(E);
2472
2473 if (builtin != Builtin::BI__builtin___CFStringMakeConstantString &&
2474 builtin != Builtin::BI__builtin___NSStringMakeConstantString)
2475 return nullptr;
2476
2477 const auto *Literal = cast<StringLiteral>(Val: E->getArg(Arg: 0)->IgnoreParenCasts());
2478 if (builtin == Builtin::BI__builtin___NSStringMakeConstantString) {
2479 return CGM.getObjCRuntime().GenerateConstantString(Literal);
2480 } else {
2481 // FIXME: need to deal with UCN conversion issues.
2482 return CGM.GetAddrOfConstantCFString(Literal);
2483 }
2484}
2485
2486ConstantLValue
2487ConstantLValueEmitter::emitPointerAuthSignConstant(const CallExpr *E) {
2488 llvm::Constant *UnsignedPointer = emitPointerAuthPointer(E: E->getArg(Arg: 0));
2489 unsigned Key = emitPointerAuthKey(E: E->getArg(Arg: 1));
2490 auto [StorageAddress, OtherDiscriminator] =
2491 emitPointerAuthDiscriminator(E: E->getArg(Arg: 2));
2492
2493 llvm::Constant *SignedPointer = CGM.getConstantSignedPointer(
2494 Pointer: UnsignedPointer, Key, StorageAddress, OtherDiscriminator);
2495 return SignedPointer;
2496}
2497
2498llvm::Constant *ConstantLValueEmitter::emitPointerAuthPointer(const Expr *E) {
2499 Expr::EvalResult Result;
2500 bool Succeeded = E->EvaluateAsRValue(Result, Ctx: CGM.getContext());
2501 assert(Succeeded);
2502 (void)Succeeded;
2503
2504 // The assertions here are all checked by Sema.
2505 assert(Result.Val.isLValue());
2506 if (isa<FunctionDecl>(Val: Result.Val.getLValueBase().get<const ValueDecl *>()))
2507 assert(Result.Val.getLValueOffset().isZero());
2508 return ConstantEmitter(CGM, Emitter.CGF)
2509 .emitAbstract(loc: E->getExprLoc(), value: Result.Val, destType: E->getType(), EnablePtrAuthFunctionTypeDiscrimination: false);
2510}
2511
2512unsigned ConstantLValueEmitter::emitPointerAuthKey(const Expr *E) {
2513 return E->EvaluateKnownConstInt(Ctx: CGM.getContext()).getZExtValue();
2514}
2515
2516std::pair<llvm::Constant *, llvm::ConstantInt *>
2517ConstantLValueEmitter::emitPointerAuthDiscriminator(const Expr *E) {
2518 E = E->IgnoreParens();
2519
2520 if (const auto *Call = dyn_cast<CallExpr>(Val: E)) {
2521 if (Call->getBuiltinCallee() ==
2522 Builtin::BI__builtin_ptrauth_blend_discriminator) {
2523 llvm::Constant *Pointer = ConstantEmitter(CGM).emitAbstract(
2524 E: Call->getArg(Arg: 0), destType: Call->getArg(Arg: 0)->getType());
2525 auto *Extra = cast<llvm::ConstantInt>(Val: ConstantEmitter(CGM).emitAbstract(
2526 E: Call->getArg(Arg: 1), destType: Call->getArg(Arg: 1)->getType()));
2527 return {Pointer, Extra};
2528 }
2529 }
2530
2531 llvm::Constant *Result = ConstantEmitter(CGM).emitAbstract(E, destType: E->getType());
2532 if (Result->getType()->isPointerTy())
2533 return {Result, nullptr};
2534 return {nullptr, cast<llvm::ConstantInt>(Val: Result)};
2535}
2536
2537ConstantLValue
2538ConstantLValueEmitter::VisitBlockExpr(const BlockExpr *E) {
2539 StringRef functionName;
2540 if (auto CGF = Emitter.CGF)
2541 functionName = CGF->CurFn->getName();
2542 else
2543 functionName = "global";
2544
2545 return CGM.GetAddrOfGlobalBlock(BE: E, Name: functionName);
2546}
2547
2548ConstantLValue
2549ConstantLValueEmitter::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
2550 QualType T;
2551 if (E->isTypeOperand())
2552 T = E->getTypeOperand(Context: CGM.getContext());
2553 else
2554 T = E->getExprOperand()->getType();
2555 return CGM.GetAddrOfRTTIDescriptor(Ty: T);
2556}
2557
2558ConstantLValue
2559ConstantLValueEmitter::VisitMaterializeTemporaryExpr(
2560 const MaterializeTemporaryExpr *E) {
2561 assert(E->getStorageDuration() == SD_Static);
2562 const Expr *Inner = E->getSubExpr()->skipRValueSubobjectAdjustments();
2563 return CGM.GetAddrOfGlobalTemporary(E, Inner);
2564}
2565
2566llvm::Constant *
2567ConstantEmitter::tryEmitPrivate(const APValue &Value, QualType DestType,
2568 bool EnablePtrAuthFunctionTypeDiscrimination) {
2569 switch (Value.getKind()) {
2570 case APValue::None:
2571 case APValue::Indeterminate:
2572 // Out-of-lifetime and indeterminate values can be modeled as 'undef'.
2573 return llvm::UndefValue::get(T: CGM.getTypes().ConvertType(T: DestType));
2574 case APValue::LValue:
2575 return ConstantLValueEmitter(*this, Value, DestType,
2576 EnablePtrAuthFunctionTypeDiscrimination)
2577 .tryEmit();
2578 case APValue::Int:
2579 if (PointerAuthQualifier PointerAuth = DestType.getPointerAuth();
2580 PointerAuth &&
2581 (PointerAuth.authenticatesNullValues() || Value.getInt() != 0))
2582 return nullptr;
2583 return llvm::ConstantInt::get(Context&: CGM.getLLVMContext(), V: Value.getInt());
2584 case APValue::FixedPoint:
2585 return llvm::ConstantInt::get(Context&: CGM.getLLVMContext(),
2586 V: Value.getFixedPoint().getValue());
2587 case APValue::ComplexInt: {
2588 llvm::Constant *Complex[2];
2589
2590 Complex[0] = llvm::ConstantInt::get(Context&: CGM.getLLVMContext(),
2591 V: Value.getComplexIntReal());
2592 Complex[1] = llvm::ConstantInt::get(Context&: CGM.getLLVMContext(),
2593 V: Value.getComplexIntImag());
2594
2595 // FIXME: the target may want to specify that this is packed.
2596 llvm::StructType *STy =
2597 llvm::StructType::get(elt1: Complex[0]->getType(), elts: Complex[1]->getType());
2598 return llvm::ConstantStruct::get(T: STy, V: Complex);
2599 }
2600 case APValue::Float: {
2601 const llvm::APFloat &Init = Value.getFloat();
2602 if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf() &&
2603 !CGM.getContext().getLangOpts().NativeHalfType &&
2604 CGM.getContext().getTargetInfo().useFP16ConversionIntrinsics())
2605 return llvm::ConstantInt::get(Context&: CGM.getLLVMContext(),
2606 V: Init.bitcastToAPInt());
2607 else
2608 return llvm::ConstantFP::get(Context&: CGM.getLLVMContext(), V: Init);
2609 }
2610 case APValue::ComplexFloat: {
2611 llvm::Constant *Complex[2];
2612
2613 Complex[0] = llvm::ConstantFP::get(Context&: CGM.getLLVMContext(),
2614 V: Value.getComplexFloatReal());
2615 Complex[1] = llvm::ConstantFP::get(Context&: CGM.getLLVMContext(),
2616 V: Value.getComplexFloatImag());
2617
2618 // FIXME: the target may want to specify that this is packed.
2619 llvm::StructType *STy =
2620 llvm::StructType::get(elt1: Complex[0]->getType(), elts: Complex[1]->getType());
2621 return llvm::ConstantStruct::get(T: STy, V: Complex);
2622 }
2623 case APValue::Vector: {
2624 unsigned NumElts = Value.getVectorLength();
2625 SmallVector<llvm::Constant *, 4> Inits(NumElts);
2626
2627 for (unsigned I = 0; I != NumElts; ++I) {
2628 const APValue &Elt = Value.getVectorElt(I);
2629 if (Elt.isInt())
2630 Inits[I] = llvm::ConstantInt::get(Context&: CGM.getLLVMContext(), V: Elt.getInt());
2631 else if (Elt.isFloat())
2632 Inits[I] = llvm::ConstantFP::get(Context&: CGM.getLLVMContext(), V: Elt.getFloat());
2633 else if (Elt.isIndeterminate())
2634 Inits[I] = llvm::UndefValue::get(T: CGM.getTypes().ConvertType(
2635 T: DestType->castAs<VectorType>()->getElementType()));
2636 else
2637 llvm_unreachable("unsupported vector element type");
2638 }
2639 return llvm::ConstantVector::get(V: Inits);
2640 }
2641 case APValue::Matrix: {
2642 const auto *MT = DestType->castAs<ConstantMatrixType>();
2643 unsigned NumRows = Value.getMatrixNumRows();
2644 unsigned NumCols = Value.getMatrixNumColumns();
2645 unsigned NumElts = NumRows * NumCols;
2646 SmallVector<llvm::Constant *, 16> Inits(NumElts);
2647
2648 bool IsRowMajor = CGM.getLangOpts().getDefaultMatrixMemoryLayout() ==
2649 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
2650
2651 for (unsigned Row = 0; Row != NumRows; ++Row) {
2652 for (unsigned Col = 0; Col != NumCols; ++Col) {
2653 const APValue &Elt = Value.getMatrixElt(Row, Col);
2654 unsigned Idx = MT->getFlattenedIndex(Row, Column: Col, IsRowMajor);
2655 if (Elt.isInt())
2656 Inits[Idx] =
2657 llvm::ConstantInt::get(Context&: CGM.getLLVMContext(), V: Elt.getInt());
2658 else if (Elt.isFloat())
2659 Inits[Idx] =
2660 llvm::ConstantFP::get(Context&: CGM.getLLVMContext(), V: Elt.getFloat());
2661 else if (Elt.isIndeterminate())
2662 Inits[Idx] = llvm::PoisonValue::get(
2663 T: CGM.getTypes().ConvertType(T: MT->getElementType()));
2664 else
2665 llvm_unreachable("unsupported matrix element type");
2666 }
2667 }
2668 return llvm::ConstantVector::get(V: Inits);
2669 }
2670 case APValue::AddrLabelDiff: {
2671 const AddrLabelExpr *LHSExpr = Value.getAddrLabelDiffLHS();
2672 const AddrLabelExpr *RHSExpr = Value.getAddrLabelDiffRHS();
2673 llvm::Constant *LHS = tryEmitPrivate(E: LHSExpr, destType: LHSExpr->getType());
2674 llvm::Constant *RHS = tryEmitPrivate(E: RHSExpr, destType: RHSExpr->getType());
2675 if (!LHS || !RHS) return nullptr;
2676
2677 // Compute difference
2678 llvm::Type *ResultType = CGM.getTypes().ConvertType(T: DestType);
2679 LHS = llvm::ConstantExpr::getPtrToInt(C: LHS, Ty: CGM.IntPtrTy);
2680 RHS = llvm::ConstantExpr::getPtrToInt(C: RHS, Ty: CGM.IntPtrTy);
2681 llvm::Constant *AddrLabelDiff = llvm::ConstantExpr::getSub(C1: LHS, C2: RHS);
2682
2683 // LLVM is a bit sensitive about the exact format of the
2684 // address-of-label difference; make sure to truncate after
2685 // the subtraction.
2686 return llvm::ConstantExpr::getTruncOrBitCast(C: AddrLabelDiff, Ty: ResultType);
2687 }
2688 case APValue::Struct:
2689 case APValue::Union:
2690 return ConstStructBuilder::BuildStruct(Emitter&: *this, Val: Value, ValTy: DestType);
2691 case APValue::Array: {
2692 const ArrayType *ArrayTy = CGM.getContext().getAsArrayType(T: DestType);
2693 unsigned NumElements = Value.getArraySize();
2694 unsigned NumInitElts = Value.getArrayInitializedElts();
2695
2696 // Emit array filler, if there is one.
2697 llvm::Constant *Filler = nullptr;
2698 if (Value.hasArrayFiller()) {
2699 Filler = tryEmitAbstractForMemory(value: Value.getArrayFiller(),
2700 destType: ArrayTy->getElementType());
2701 if (!Filler)
2702 return nullptr;
2703 }
2704
2705 // Emit initializer elements.
2706 SmallVector<llvm::Constant*, 16> Elts;
2707 if (Filler && Filler->isNullValue())
2708 Elts.reserve(N: NumInitElts + 1);
2709 else
2710 Elts.reserve(N: NumElements);
2711
2712 llvm::Type *CommonElementType = nullptr;
2713 for (unsigned I = 0; I < NumInitElts; ++I) {
2714 llvm::Constant *C = tryEmitPrivateForMemory(
2715 value: Value.getArrayInitializedElt(I), destType: ArrayTy->getElementType());
2716 if (!C) return nullptr;
2717
2718 if (I == 0)
2719 CommonElementType = C->getType();
2720 else if (C->getType() != CommonElementType)
2721 CommonElementType = nullptr;
2722 Elts.push_back(Elt: C);
2723 }
2724
2725 llvm::ArrayType *Desired =
2726 cast<llvm::ArrayType>(Val: CGM.getTypes().ConvertType(T: DestType));
2727
2728 // Fix the type of incomplete arrays if the initializer isn't empty.
2729 if (DestType->isIncompleteArrayType() && !Elts.empty())
2730 Desired = llvm::ArrayType::get(ElementType: Desired->getElementType(), NumElements: Elts.size());
2731
2732 return EmitArrayConstant(CGM, DesiredType: Desired, CommonElementType, ArrayBound: NumElements, Elements&: Elts,
2733 Filler);
2734 }
2735 case APValue::MemberPointer:
2736 return CGM.getCXXABI().EmitMemberPointer(MP: Value, MPT: DestType);
2737 }
2738 llvm_unreachable("Unknown APValue kind");
2739}
2740
2741llvm::GlobalVariable *CodeGenModule::getAddrOfConstantCompoundLiteralIfEmitted(
2742 const CompoundLiteralExpr *E) {
2743 return EmittedCompoundLiterals.lookup(Val: E);
2744}
2745
2746void CodeGenModule::setAddrOfConstantCompoundLiteral(
2747 const CompoundLiteralExpr *CLE, llvm::GlobalVariable *GV) {
2748 bool Ok = EmittedCompoundLiterals.insert(KV: std::make_pair(x&: CLE, y&: GV)).second;
2749 (void)Ok;
2750 assert(Ok && "CLE has already been emitted!");
2751}
2752
2753ConstantAddress
2754CodeGenModule::GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E) {
2755 assert(E->isFileScope() && "not a file-scope compound literal expr");
2756 ConstantEmitter emitter(*this);
2757 return tryEmitGlobalCompoundLiteral(emitter, E);
2758}
2759
2760llvm::Constant *
2761CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
2762 // Member pointer constants always have a very particular form.
2763 const MemberPointerType *type = cast<MemberPointerType>(Val: uo->getType());
2764 const ValueDecl *decl = cast<DeclRefExpr>(Val: uo->getSubExpr())->getDecl();
2765
2766 // A member function pointer.
2767 if (const CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(Val: decl))
2768 return getCXXABI().EmitMemberFunctionPointer(MD: method);
2769
2770 // Otherwise, a member data pointer.
2771 getContext().recordMemberDataPointerEvaluation(VD: decl);
2772 uint64_t fieldOffset = getContext().getFieldOffset(FD: decl);
2773 CharUnits chars = getContext().toCharUnitsFromBits(BitSize: (int64_t) fieldOffset);
2774 return getCXXABI().EmitMemberDataPointer(MPT: type, offset: chars);
2775}
2776
2777static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
2778 llvm::Type *baseType,
2779 const CXXRecordDecl *base);
2780
2781static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
2782 const RecordDecl *record,
2783 bool asCompleteObject) {
2784 const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record);
2785 llvm::StructType *structure =
2786 (asCompleteObject ? layout.getLLVMType()
2787 : layout.getBaseSubobjectLLVMType());
2788
2789 unsigned numElements = structure->getNumElements();
2790 std::vector<llvm::Constant *> elements(numElements);
2791
2792 auto CXXR = dyn_cast<CXXRecordDecl>(Val: record);
2793 // Fill in all the bases.
2794 if (CXXR) {
2795 for (const auto &I : CXXR->bases()) {
2796 if (I.isVirtual()) {
2797 // Ignore virtual bases; if we're laying out for a complete
2798 // object, we'll lay these out later.
2799 continue;
2800 }
2801
2802 const auto *base = I.getType()->castAsCXXRecordDecl();
2803 // Ignore empty bases.
2804 if (isEmptyRecordForLayout(Context: CGM.getContext(), T: I.getType()) ||
2805 CGM.getContext()
2806 .getASTRecordLayout(D: base)
2807 .getNonVirtualSize()
2808 .isZero())
2809 continue;
2810
2811 unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(RD: base);
2812 llvm::Type *baseType = structure->getElementType(N: fieldIndex);
2813 elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
2814 }
2815 }
2816
2817 // Fill in all the fields.
2818 for (const auto *Field : record->fields()) {
2819 // Fill in non-bitfields. (Bitfields always use a zero pattern, which we
2820 // will fill in later.)
2821 if (!Field->isBitField() &&
2822 !isEmptyFieldForLayout(Context: CGM.getContext(), FD: Field)) {
2823 unsigned fieldIndex = layout.getLLVMFieldNo(FD: Field);
2824 elements[fieldIndex] = CGM.EmitNullConstant(T: Field->getType());
2825 }
2826
2827 // For unions, stop after the first named field.
2828 if (record->isUnion()) {
2829 if (Field->getIdentifier())
2830 break;
2831 if (const auto *FieldRD = Field->getType()->getAsRecordDecl())
2832 if (FieldRD->findFirstNamedDataMember())
2833 break;
2834 }
2835 }
2836
2837 // Fill in the virtual bases, if we're working with the complete object.
2838 if (CXXR && asCompleteObject) {
2839 for (const auto &I : CXXR->vbases()) {
2840 const auto *base = I.getType()->castAsCXXRecordDecl();
2841 // Ignore empty bases.
2842 if (isEmptyRecordForLayout(Context: CGM.getContext(), T: I.getType()))
2843 continue;
2844
2845 unsigned fieldIndex = layout.getVirtualBaseIndex(base);
2846
2847 // We might have already laid this field out.
2848 if (elements[fieldIndex]) continue;
2849
2850 llvm::Type *baseType = structure->getElementType(N: fieldIndex);
2851 elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
2852 }
2853 }
2854
2855 // Now go through all other fields and zero them out.
2856 for (unsigned i = 0; i != numElements; ++i) {
2857 if (!elements[i])
2858 elements[i] = llvm::Constant::getNullValue(Ty: structure->getElementType(N: i));
2859 }
2860
2861 return llvm::ConstantStruct::get(T: structure, V: elements);
2862}
2863
2864/// Emit the null constant for a base subobject.
2865static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
2866 llvm::Type *baseType,
2867 const CXXRecordDecl *base) {
2868 const CGRecordLayout &baseLayout = CGM.getTypes().getCGRecordLayout(base);
2869
2870 // Just zero out bases that don't have any pointer to data members.
2871 if (baseLayout.isZeroInitializableAsBase())
2872 return llvm::Constant::getNullValue(Ty: baseType);
2873
2874 // Otherwise, we can just use its null constant.
2875 return EmitNullConstant(CGM, record: base, /*asCompleteObject=*/false);
2876}
2877
2878llvm::Constant *ConstantEmitter::emitNullForMemory(CodeGenModule &CGM,
2879 QualType T) {
2880 return emitForMemory(CGM, C: CGM.EmitNullConstant(T), destType: T);
2881}
2882
2883llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
2884 if (T->getAs<PointerType>())
2885 return getNullPointer(
2886 T: cast<llvm::PointerType>(Val: getTypes().ConvertTypeForMem(T)), QT: T);
2887
2888 if (getTypes().isZeroInitializable(T))
2889 return llvm::Constant::getNullValue(Ty: getTypes().ConvertTypeForMem(T));
2890
2891 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
2892 llvm::ArrayType *ATy =
2893 cast<llvm::ArrayType>(Val: getTypes().ConvertTypeForMem(T));
2894
2895 QualType ElementTy = CAT->getElementType();
2896
2897 llvm::Constant *Element =
2898 ConstantEmitter::emitNullForMemory(CGM&: *this, T: ElementTy);
2899 unsigned NumElements = CAT->getZExtSize();
2900 SmallVector<llvm::Constant *, 8> Array(NumElements, Element);
2901 return llvm::ConstantArray::get(T: ATy, V: Array);
2902 }
2903
2904 if (const auto *RD = T->getAsRecordDecl())
2905 return ::EmitNullConstant(CGM&: *this, record: RD,
2906 /*asCompleteObject=*/true);
2907
2908 assert(T->isMemberDataPointerType() &&
2909 "Should only see pointers to data members here!");
2910
2911 return getCXXABI().EmitNullMemberPointer(MPT: T->castAs<MemberPointerType>());
2912}
2913
2914llvm::Constant *
2915CodeGenModule::EmitNullConstantForBase(const CXXRecordDecl *Record) {
2916 return ::EmitNullConstant(CGM&: *this, record: Record, asCompleteObject: false);
2917}
2918