1//===--- ExprConstant.cpp - Expression Constant Evaluator -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the Expr constant evaluator.
10//
11// Constant expression evaluation produces four main results:
12//
13// * A success/failure flag indicating whether constant folding was successful.
14// This is the 'bool' return value used by most of the code in this file. A
15// 'false' return value indicates that constant folding has failed, and any
16// appropriate diagnostic has already been produced.
17//
18// * An evaluated result, valid only if constant folding has not failed.
19//
20// * A flag indicating if evaluation encountered (unevaluated) side-effects.
21// These arise in cases such as (sideEffect(), 0) and (sideEffect() || 1),
22// where it is possible to determine the evaluated result regardless.
23//
24// * A set of notes indicating why the evaluation was not a constant expression
25// (under the C++11 / C++1y rules only, at the moment), or, if folding failed
26// too, why the expression could not be folded.
27//
28// If we are checking for a potential constant expression, failure to constant
29// fold a potential constant sub-expression will be indicated by a 'false'
30// return value (the expression could not be folded) and no diagnostic (the
31// expression is not necessarily non-constant).
32//
33//===----------------------------------------------------------------------===//
34
35#include "ByteCode/Context.h"
36#include "ByteCode/Frame.h"
37#include "ByteCode/State.h"
38#include "ExprConstShared.h"
39#include "clang/AST/APValue.h"
40#include "clang/AST/ASTContext.h"
41#include "clang/AST/ASTLambda.h"
42#include "clang/AST/Attr.h"
43#include "clang/AST/CXXInheritance.h"
44#include "clang/AST/CharUnits.h"
45#include "clang/AST/CurrentSourceLocExprScope.h"
46#include "clang/AST/Expr.h"
47#include "clang/AST/InferAlloc.h"
48#include "clang/AST/OSLog.h"
49#include "clang/AST/OptionalDiagnostic.h"
50#include "clang/AST/RecordLayout.h"
51#include "clang/AST/StmtVisitor.h"
52#include "clang/AST/Type.h"
53#include "clang/AST/TypeLoc.h"
54#include "clang/Basic/Builtins.h"
55#include "clang/Basic/DiagnosticSema.h"
56#include "clang/Basic/TargetBuiltins.h"
57#include "clang/Basic/TargetInfo.h"
58#include "llvm/ADT/APFixedPoint.h"
59#include "llvm/ADT/Sequence.h"
60#include "llvm/ADT/SmallBitVector.h"
61#include "llvm/ADT/StringExtras.h"
62#include "llvm/Support/Casting.h"
63#include "llvm/Support/Debug.h"
64#include "llvm/Support/SaveAndRestore.h"
65#include "llvm/Support/SipHash.h"
66#include "llvm/Support/TimeProfiler.h"
67#include "llvm/Support/raw_ostream.h"
68#include <cstring>
69#include <functional>
70#include <limits>
71#include <optional>
72
73#define DEBUG_TYPE "exprconstant"
74
75using namespace clang;
76using llvm::APFixedPoint;
77using llvm::APInt;
78using llvm::APSInt;
79using llvm::APFloat;
80using llvm::FixedPointSemantics;
81
82namespace {
83 struct LValue;
84 class CallStackFrame;
85 class EvalInfo;
86
87 using SourceLocExprScopeGuard =
88 CurrentSourceLocExprScope::SourceLocExprScopeGuard;
89
90 static QualType getType(APValue::LValueBase B) {
91 return B.getType();
92 }
93
94 /// Get an LValue path entry, which is known to not be an array index, as a
95 /// field declaration.
96 static const FieldDecl *getAsField(APValue::LValuePathEntry E) {
97 return dyn_cast_or_null<FieldDecl>(Val: E.getAsBaseOrMember().getPointer());
98 }
99 /// Get an LValue path entry, which is known to not be an array index, as a
100 /// base class declaration.
101 static const CXXRecordDecl *getAsBaseClass(APValue::LValuePathEntry E) {
102 return dyn_cast_or_null<CXXRecordDecl>(Val: E.getAsBaseOrMember().getPointer());
103 }
104 /// Determine whether this LValue path entry for a base class names a virtual
105 /// base class.
106 static bool isVirtualBaseClass(APValue::LValuePathEntry E) {
107 return E.getAsBaseOrMember().getInt();
108 }
109
110 /// Given an expression, determine the type used to store the result of
111 /// evaluating that expression.
112 static QualType getStorageType(const ASTContext &Ctx, const Expr *E) {
113 if (E->isPRValue())
114 return E->getType();
115 return Ctx.getLValueReferenceType(T: E->getType());
116 }
117
118 /// Attempts to unwrap a CallExpr (with an alloc_size attribute) from an Expr.
119 /// This will look through a single cast.
120 ///
121 /// Returns null if we couldn't unwrap a function with alloc_size.
122 static const CallExpr *tryUnwrapAllocSizeCall(const Expr *E) {
123 if (!E->getType()->isPointerType())
124 return nullptr;
125
126 E = E->IgnoreParens();
127 // If we're doing a variable assignment from e.g. malloc(N), there will
128 // probably be a cast of some kind. In exotic cases, we might also see a
129 // top-level ExprWithCleanups. Ignore them either way.
130 if (const auto *FE = dyn_cast<FullExpr>(Val: E))
131 E = FE->getSubExpr()->IgnoreParens();
132
133 if (const auto *Cast = dyn_cast<CastExpr>(Val: E))
134 E = Cast->getSubExpr()->IgnoreParens();
135
136 if (const auto *CE = dyn_cast<CallExpr>(Val: E))
137 return CE->getCalleeAllocSizeAttr() ? CE : nullptr;
138 return nullptr;
139 }
140
141 /// Determines whether or not the given Base contains a call to a function
142 /// with the alloc_size attribute.
143 static bool isBaseAnAllocSizeCall(APValue::LValueBase Base) {
144 const auto *E = Base.dyn_cast<const Expr *>();
145 return E && E->getType()->isPointerType() && tryUnwrapAllocSizeCall(E);
146 }
147
148 /// Determines whether the given kind of constant expression is only ever
149 /// used for name mangling. If so, it's permitted to reference things that we
150 /// can't generate code for (in particular, dllimported functions).
151 static bool isForManglingOnly(ConstantExprKind Kind) {
152 switch (Kind) {
153 case ConstantExprKind::Normal:
154 case ConstantExprKind::ClassTemplateArgument:
155 case ConstantExprKind::ImmediateInvocation:
156 // Note that non-type template arguments of class type are emitted as
157 // template parameter objects.
158 return false;
159
160 case ConstantExprKind::NonClassTemplateArgument:
161 return true;
162 }
163 llvm_unreachable("unknown ConstantExprKind");
164 }
165
166 static bool isTemplateArgument(ConstantExprKind Kind) {
167 switch (Kind) {
168 case ConstantExprKind::Normal:
169 case ConstantExprKind::ImmediateInvocation:
170 return false;
171
172 case ConstantExprKind::ClassTemplateArgument:
173 case ConstantExprKind::NonClassTemplateArgument:
174 return true;
175 }
176 llvm_unreachable("unknown ConstantExprKind");
177 }
178
179 /// The bound to claim that an array of unknown bound has.
180 /// The value in MostDerivedArraySize is undefined in this case. So, set it
181 /// to an arbitrary value that's likely to loudly break things if it's used.
182 static const uint64_t AssumedSizeForUnsizedArray =
183 std::numeric_limits<uint64_t>::max() / 2;
184
185 /// Determines if an LValue with the given LValueBase will have an unsized
186 /// array in its designator.
187 /// Find the path length and type of the most-derived subobject in the given
188 /// path, and find the size of the containing array, if any.
189 static unsigned
190 findMostDerivedSubobject(const ASTContext &Ctx, APValue::LValueBase Base,
191 ArrayRef<APValue::LValuePathEntry> Path,
192 uint64_t &ArraySize, QualType &Type, bool &IsArray,
193 bool &FirstEntryIsUnsizedArray) {
194 // This only accepts LValueBases from APValues, and APValues don't support
195 // arrays that lack size info.
196 assert(!isBaseAnAllocSizeCall(Base) &&
197 "Unsized arrays shouldn't appear here");
198 unsigned MostDerivedLength = 0;
199 // The type of Base is a reference type if the base is a constexpr-unknown
200 // variable. In that case, look through the reference type.
201 Type = getType(B: Base).getNonReferenceType();
202
203 for (unsigned I = 0, N = Path.size(); I != N; ++I) {
204 if (Type->isArrayType()) {
205 const ArrayType *AT = Ctx.getAsArrayType(T: Type);
206 Type = AT->getElementType();
207 MostDerivedLength = I + 1;
208 IsArray = true;
209
210 if (auto *CAT = dyn_cast<ConstantArrayType>(Val: AT)) {
211 ArraySize = CAT->getZExtSize();
212 } else {
213 assert(I == 0 && "unexpected unsized array designator");
214 FirstEntryIsUnsizedArray = true;
215 ArraySize = AssumedSizeForUnsizedArray;
216 }
217 } else if (Type->isAnyComplexType()) {
218 const ComplexType *CT = Type->castAs<ComplexType>();
219 Type = CT->getElementType();
220 ArraySize = 2;
221 MostDerivedLength = I + 1;
222 IsArray = true;
223 } else if (const auto *VT = Type->getAs<VectorType>()) {
224 Type = VT->getElementType();
225 ArraySize = VT->getNumElements();
226 MostDerivedLength = I + 1;
227 IsArray = true;
228 } else if (const FieldDecl *FD = getAsField(E: Path[I])) {
229 Type = FD->getType();
230 ArraySize = 0;
231 MostDerivedLength = I + 1;
232 IsArray = false;
233 } else {
234 // Path[I] describes a base class.
235 ArraySize = 0;
236 IsArray = false;
237 }
238 }
239 return MostDerivedLength;
240 }
241
242 /// A path from a glvalue to a subobject of that glvalue.
243 struct SubobjectDesignator {
244 /// True if the subobject was named in a manner not supported by C++11. Such
245 /// lvalues can still be folded, but they are not core constant expressions
246 /// and we cannot perform lvalue-to-rvalue conversions on them.
247 LLVM_PREFERRED_TYPE(bool)
248 unsigned Invalid : 1;
249
250 /// Is this a pointer one past the end of an object?
251 LLVM_PREFERRED_TYPE(bool)
252 unsigned IsOnePastTheEnd : 1;
253
254 /// Indicator of whether the first entry is an unsized array.
255 LLVM_PREFERRED_TYPE(bool)
256 unsigned FirstEntryIsAnUnsizedArray : 1;
257
258 /// Indicator of whether the most-derived object is an array element.
259 LLVM_PREFERRED_TYPE(bool)
260 unsigned MostDerivedIsArrayElement : 1;
261
262 /// The length of the path to the most-derived object of which this is a
263 /// subobject.
264 unsigned MostDerivedPathLength : 28;
265
266 /// The size of the array of which the most-derived object is an element.
267 /// This will always be 0 if the most-derived object is not an array
268 /// element. 0 is not an indicator of whether or not the most-derived object
269 /// is an array, however, because 0-length arrays are allowed.
270 ///
271 /// If the current array is an unsized array, the value of this is
272 /// undefined.
273 uint64_t MostDerivedArraySize;
274 /// The type of the most derived object referred to by this address.
275 QualType MostDerivedType;
276
277 typedef APValue::LValuePathEntry PathEntry;
278
279 /// The entries on the path from the glvalue to the designated subobject.
280 SmallVector<PathEntry, 8> Entries;
281
282 SubobjectDesignator() : Invalid(true) {}
283
284 explicit SubobjectDesignator(QualType T)
285 : Invalid(false), IsOnePastTheEnd(false),
286 FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false),
287 MostDerivedPathLength(0), MostDerivedArraySize(0),
288 MostDerivedType(T.isNull() ? QualType() : T.getNonReferenceType()) {}
289
290 SubobjectDesignator(const ASTContext &Ctx, const APValue &V)
291 : Invalid(!V.isLValue() || !V.hasLValuePath()), IsOnePastTheEnd(false),
292 FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false),
293 MostDerivedPathLength(0), MostDerivedArraySize(0) {
294 assert(V.isLValue() && "Non-LValue used to make an LValue designator?");
295 if (!Invalid) {
296 IsOnePastTheEnd = V.isLValueOnePastTheEnd();
297 llvm::append_range(C&: Entries, R: V.getLValuePath());
298 if (V.getLValueBase()) {
299 bool IsArray = false;
300 bool FirstIsUnsizedArray = false;
301 MostDerivedPathLength = findMostDerivedSubobject(
302 Ctx, Base: V.getLValueBase(), Path: V.getLValuePath(), ArraySize&: MostDerivedArraySize,
303 Type&: MostDerivedType, IsArray, FirstEntryIsUnsizedArray&: FirstIsUnsizedArray);
304 MostDerivedIsArrayElement = IsArray;
305 FirstEntryIsAnUnsizedArray = FirstIsUnsizedArray;
306 }
307 }
308 }
309
310 void truncate(ASTContext &Ctx, APValue::LValueBase Base,
311 unsigned NewLength) {
312 if (Invalid)
313 return;
314
315 assert(Base && "cannot truncate path for null pointer");
316 assert(NewLength <= Entries.size() && "not a truncation");
317
318 if (NewLength == Entries.size())
319 return;
320 Entries.resize(N: NewLength);
321
322 bool IsArray = false;
323 bool FirstIsUnsizedArray = false;
324 MostDerivedPathLength = findMostDerivedSubobject(
325 Ctx, Base, Path: Entries, ArraySize&: MostDerivedArraySize, Type&: MostDerivedType, IsArray,
326 FirstEntryIsUnsizedArray&: FirstIsUnsizedArray);
327 MostDerivedIsArrayElement = IsArray;
328 FirstEntryIsAnUnsizedArray = FirstIsUnsizedArray;
329 }
330
331 void setInvalid() {
332 Invalid = true;
333 Entries.clear();
334 }
335
336 /// Determine whether the most derived subobject is an array without a
337 /// known bound.
338 bool isMostDerivedAnUnsizedArray() const {
339 assert(!Invalid && "Calling this makes no sense on invalid designators");
340 return Entries.size() == 1 && FirstEntryIsAnUnsizedArray;
341 }
342
343 /// Determine what the most derived array's size is. Results in an assertion
344 /// failure if the most derived array lacks a size.
345 uint64_t getMostDerivedArraySize() const {
346 assert(!isMostDerivedAnUnsizedArray() && "Unsized array has no size");
347 return MostDerivedArraySize;
348 }
349
350 /// Determine whether this is a one-past-the-end pointer.
351 bool isOnePastTheEnd() const {
352 assert(!Invalid);
353 if (IsOnePastTheEnd)
354 return true;
355 if (!isMostDerivedAnUnsizedArray() && MostDerivedIsArrayElement &&
356 Entries[MostDerivedPathLength - 1].getAsArrayIndex() ==
357 MostDerivedArraySize)
358 return true;
359 return false;
360 }
361
362 /// Get the range of valid index adjustments in the form
363 /// {maximum value that can be subtracted from this pointer,
364 /// maximum value that can be added to this pointer}
365 std::pair<uint64_t, uint64_t> validIndexAdjustments() {
366 if (Invalid || isMostDerivedAnUnsizedArray())
367 return {0, 0};
368
369 // [expr.add]p4: For the purposes of these operators, a pointer to a
370 // nonarray object behaves the same as a pointer to the first element of
371 // an array of length one with the type of the object as its element type.
372 bool IsArray = MostDerivedPathLength == Entries.size() &&
373 MostDerivedIsArrayElement;
374 uint64_t ArrayIndex = IsArray ? Entries.back().getAsArrayIndex()
375 : (uint64_t)IsOnePastTheEnd;
376 uint64_t ArraySize =
377 IsArray ? getMostDerivedArraySize() : (uint64_t)1;
378 return {ArrayIndex, ArraySize - ArrayIndex};
379 }
380
381 /// Check that this refers to a valid subobject.
382 bool isValidSubobject() const {
383 if (Invalid)
384 return false;
385 return !isOnePastTheEnd();
386 }
387 /// Check that this refers to a valid subobject, and if not, produce a
388 /// relevant diagnostic and set the designator as invalid.
389 bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK);
390
391 /// Get the type of the designated object.
392 QualType getType(ASTContext &Ctx) const {
393 assert(!Invalid && "invalid designator has no subobject type");
394 return MostDerivedPathLength == Entries.size()
395 ? MostDerivedType
396 : Ctx.getCanonicalTagType(TD: getAsBaseClass(E: Entries.back()));
397 }
398
399 /// Update this designator to refer to the first element within this array.
400 void addArrayUnchecked(const ConstantArrayType *CAT) {
401 Entries.push_back(Elt: PathEntry::ArrayIndex(Index: 0));
402
403 // This is a most-derived object.
404 MostDerivedType = CAT->getElementType();
405 MostDerivedIsArrayElement = true;
406 MostDerivedArraySize = CAT->getZExtSize();
407 MostDerivedPathLength = Entries.size();
408 }
409 /// Update this designator to refer to the first element within the array of
410 /// elements of type T. This is an array of unknown size.
411 void addUnsizedArrayUnchecked(QualType ElemTy) {
412 Entries.push_back(Elt: PathEntry::ArrayIndex(Index: 0));
413
414 MostDerivedType = ElemTy;
415 MostDerivedIsArrayElement = true;
416 // The value in MostDerivedArraySize is undefined in this case. So, set it
417 // to an arbitrary value that's likely to loudly break things if it's
418 // used.
419 MostDerivedArraySize = AssumedSizeForUnsizedArray;
420 MostDerivedPathLength = Entries.size();
421 }
422 /// Update this designator to refer to the given base or member of this
423 /// object.
424 void addDeclUnchecked(const Decl *D, bool Virtual = false) {
425 Entries.push_back(Elt: APValue::BaseOrMemberType(D, Virtual));
426
427 // If this isn't a base class, it's a new most-derived object.
428 if (const FieldDecl *FD = dyn_cast<FieldDecl>(Val: D)) {
429 MostDerivedType = FD->getType();
430 MostDerivedIsArrayElement = false;
431 MostDerivedArraySize = 0;
432 MostDerivedPathLength = Entries.size();
433 }
434 }
435 /// Update this designator to refer to the given complex component.
436 void addComplexUnchecked(QualType EltTy, bool Imag) {
437 Entries.push_back(Elt: PathEntry::ArrayIndex(Index: Imag));
438
439 // This is technically a most-derived object, though in practice this
440 // is unlikely to matter.
441 MostDerivedType = EltTy;
442 MostDerivedIsArrayElement = true;
443 MostDerivedArraySize = 2;
444 MostDerivedPathLength = Entries.size();
445 }
446
447 void addVectorElementUnchecked(QualType EltTy, uint64_t Size,
448 uint64_t Idx) {
449 Entries.push_back(Elt: PathEntry::ArrayIndex(Index: Idx));
450 MostDerivedType = EltTy;
451 MostDerivedPathLength = Entries.size();
452 MostDerivedArraySize = 0;
453 MostDerivedIsArrayElement = false;
454 }
455
456 void diagnoseUnsizedArrayPointerArithmetic(EvalInfo &Info, const Expr *E);
457 void diagnosePointerArithmetic(EvalInfo &Info, const Expr *E,
458 const APSInt &N);
459 /// Add N to the address of this subobject.
460 void adjustIndex(EvalInfo &Info, const Expr *E, APSInt N, const LValue &LV);
461 };
462
463 /// A scope at the end of which an object can need to be destroyed.
464 enum class ScopeKind {
465 Block,
466 FullExpression,
467 Call
468 };
469
470 /// A reference to a particular call and its arguments.
471 struct CallRef {
472 CallRef() : OrigCallee(), CallIndex(0), Version() {}
473 CallRef(const FunctionDecl *Callee, unsigned CallIndex, unsigned Version)
474 : OrigCallee(Callee), CallIndex(CallIndex), Version(Version) {}
475
476 explicit operator bool() const { return OrigCallee; }
477
478 /// Get the parameter that the caller initialized, corresponding to the
479 /// given parameter in the callee.
480 const ParmVarDecl *getOrigParam(const ParmVarDecl *PVD) const {
481 return OrigCallee ? OrigCallee->getParamDecl(i: PVD->getFunctionScopeIndex())
482 : PVD;
483 }
484
485 /// The callee at the point where the arguments were evaluated. This might
486 /// be different from the actual callee (a different redeclaration, or a
487 /// virtual override), but this function's parameters are the ones that
488 /// appear in the parameter map.
489 const FunctionDecl *OrigCallee;
490 /// The call index of the frame that holds the argument values.
491 unsigned CallIndex;
492 /// The version of the parameters corresponding to this call.
493 unsigned Version;
494 };
495
496 /// A stack frame in the constexpr call stack.
497 class CallStackFrame : public interp::Frame {
498 public:
499 EvalInfo &Info;
500
501 /// Parent - The caller of this stack frame.
502 CallStackFrame *Caller;
503
504 /// Callee - The function which was called.
505 const FunctionDecl *Callee;
506
507 /// This - The binding for the this pointer in this call, if any.
508 const LValue *This;
509
510 /// CallExpr - The syntactical structure of member function calls
511 const Expr *CallExpr;
512
513 /// Information on how to find the arguments to this call. Our arguments
514 /// are stored in our parent's CallStackFrame, using the ParmVarDecl* as a
515 /// key and this value as the version.
516 CallRef Arguments;
517
518 /// Source location information about the default argument or default
519 /// initializer expression we're evaluating, if any.
520 CurrentSourceLocExprScope CurSourceLocExprScope;
521
522 // Note that we intentionally use std::map here so that references to
523 // values are stable.
524 typedef std::pair<const void *, unsigned> MapKeyTy;
525 typedef std::map<MapKeyTy, APValue> MapTy;
526 /// Temporaries - Temporary lvalues materialized within this stack frame.
527 MapTy Temporaries;
528
529 /// CallRange - The source range of the call expression for this call.
530 SourceRange CallRange;
531
532 /// Index - The call index of this call.
533 unsigned Index;
534
535 /// The stack of integers for tracking version numbers for temporaries.
536 SmallVector<unsigned, 2> TempVersionStack = {1};
537 unsigned CurTempVersion = TempVersionStack.back();
538
539 unsigned getTempVersion() const { return TempVersionStack.back(); }
540
541 void pushTempVersion() {
542 TempVersionStack.push_back(Elt: ++CurTempVersion);
543 }
544
545 void popTempVersion() {
546 TempVersionStack.pop_back();
547 }
548
549 CallRef createCall(const FunctionDecl *Callee) {
550 return {Callee, Index, ++CurTempVersion};
551 }
552
553 // FIXME: Adding this to every 'CallStackFrame' may have a nontrivial impact
554 // on the overall stack usage of deeply-recursing constexpr evaluations.
555 // (We should cache this map rather than recomputing it repeatedly.)
556 // But let's try this and see how it goes; we can look into caching the map
557 // as a later change.
558
559 /// LambdaCaptureFields - Mapping from captured variables/this to
560 /// corresponding data members in the closure class.
561 llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
562 FieldDecl *LambdaThisCaptureField = nullptr;
563
564 CallStackFrame(EvalInfo &Info, SourceRange CallRange,
565 const FunctionDecl *Callee, const LValue *This,
566 const Expr *CallExpr, CallRef Arguments);
567 ~CallStackFrame();
568
569 // Return the temporary for Key whose version number is Version.
570 APValue *getTemporary(const void *Key, unsigned Version) {
571 MapKeyTy KV(Key, Version);
572 auto LB = Temporaries.lower_bound(x: KV);
573 if (LB != Temporaries.end() && LB->first == KV)
574 return &LB->second;
575 return nullptr;
576 }
577
578 // Return the current temporary for Key in the map.
579 APValue *getCurrentTemporary(const void *Key) {
580 auto UB = Temporaries.upper_bound(x: MapKeyTy(Key, UINT_MAX));
581 if (UB != Temporaries.begin() && std::prev(x: UB)->first.first == Key)
582 return &std::prev(x: UB)->second;
583 return nullptr;
584 }
585
586 // Return the version number of the current temporary for Key.
587 unsigned getCurrentTemporaryVersion(const void *Key) const {
588 auto UB = Temporaries.upper_bound(x: MapKeyTy(Key, UINT_MAX));
589 if (UB != Temporaries.begin() && std::prev(x: UB)->first.first == Key)
590 return std::prev(x: UB)->first.second;
591 return 0;
592 }
593
594 /// Allocate storage for an object of type T in this stack frame.
595 /// Populates LV with a handle to the created object. Key identifies
596 /// the temporary within the stack frame, and must not be reused without
597 /// bumping the temporary version number.
598 template<typename KeyT>
599 APValue &createTemporary(const KeyT *Key, QualType T,
600 ScopeKind Scope, LValue &LV);
601
602 /// Allocate storage for a parameter of a function call made in this frame.
603 APValue &createParam(CallRef Args, const ParmVarDecl *PVD, LValue &LV);
604
605 void describe(llvm::raw_ostream &OS) const override;
606
607 Frame *getCaller() const override { return Caller; }
608 SourceRange getCallRange() const override { return CallRange; }
609 const FunctionDecl *getCallee() const override { return Callee; }
610
611 bool isStdFunction() const {
612 for (const DeclContext *DC = Callee; DC; DC = DC->getParent())
613 if (DC->isStdNamespace())
614 return true;
615 return false;
616 }
617
618 /// Whether we're in a context where [[msvc::constexpr]] evaluation is
619 /// permitted. See MSConstexprDocs for description of permitted contexts.
620 bool CanEvalMSConstexpr = false;
621
622 private:
623 APValue &createLocal(APValue::LValueBase Base, const void *Key, QualType T,
624 ScopeKind Scope);
625 };
626
627 /// Temporarily override 'this'.
628 class ThisOverrideRAII {
629 public:
630 ThisOverrideRAII(CallStackFrame &Frame, const LValue *NewThis, bool Enable)
631 : Frame(Frame), OldThis(Frame.This) {
632 if (Enable)
633 Frame.This = NewThis;
634 }
635 ~ThisOverrideRAII() {
636 Frame.This = OldThis;
637 }
638 private:
639 CallStackFrame &Frame;
640 const LValue *OldThis;
641 };
642
643 // A shorthand time trace scope struct, prints source range, for example
644 // {"name":"EvaluateAsRValue","args":{"detail":"<test.cc:8:21, col:25>"}}}
645 class ExprTimeTraceScope {
646 public:
647 ExprTimeTraceScope(const Expr *E, const ASTContext &Ctx, StringRef Name)
648 : TimeScope(Name, [E, &Ctx] {
649 return E->getSourceRange().printToString(SM: Ctx.getSourceManager());
650 }) {}
651
652 private:
653 llvm::TimeTraceScope TimeScope;
654 };
655
656 /// RAII object used to change the current ability of
657 /// [[msvc::constexpr]] evaulation.
658 struct MSConstexprContextRAII {
659 CallStackFrame &Frame;
660 bool OldValue;
661 explicit MSConstexprContextRAII(CallStackFrame &Frame, bool Value)
662 : Frame(Frame), OldValue(Frame.CanEvalMSConstexpr) {
663 Frame.CanEvalMSConstexpr = Value;
664 }
665
666 ~MSConstexprContextRAII() { Frame.CanEvalMSConstexpr = OldValue; }
667 };
668}
669
670static bool HandleDestruction(EvalInfo &Info, const Expr *E,
671 const LValue &This, QualType ThisType);
672static bool HandleDestruction(EvalInfo &Info, SourceLocation Loc,
673 APValue::LValueBase LVBase, APValue &Value,
674 QualType T);
675
676namespace {
677 /// A cleanup, and a flag indicating whether it is lifetime-extended.
678 class Cleanup {
679 llvm::PointerIntPair<APValue*, 2, ScopeKind> Value;
680 APValue::LValueBase Base;
681 QualType T;
682
683 public:
684 Cleanup(APValue *Val, APValue::LValueBase Base, QualType T,
685 ScopeKind Scope)
686 : Value(Val, Scope), Base(Base), T(T) {}
687
688 /// Determine whether this cleanup should be performed at the end of the
689 /// given kind of scope.
690 bool isDestroyedAtEndOf(ScopeKind K) const {
691 return (int)Value.getInt() >= (int)K;
692 }
693 bool endLifetime(EvalInfo &Info, bool RunDestructors) {
694 if (RunDestructors) {
695 SourceLocation Loc;
696 if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>())
697 Loc = VD->getLocation();
698 else if (const Expr *E = Base.dyn_cast<const Expr*>())
699 Loc = E->getExprLoc();
700 return HandleDestruction(Info, Loc, LVBase: Base, Value&: *Value.getPointer(), T);
701 }
702 *Value.getPointer() = APValue();
703 return true;
704 }
705
706 bool hasSideEffect() {
707 return T.isDestructedType();
708 }
709 };
710
711 /// A reference to an object whose construction we are currently evaluating.
712 struct ObjectUnderConstruction {
713 APValue::LValueBase Base;
714 ArrayRef<APValue::LValuePathEntry> Path;
715 friend bool operator==(const ObjectUnderConstruction &LHS,
716 const ObjectUnderConstruction &RHS) {
717 return LHS.Base == RHS.Base && LHS.Path == RHS.Path;
718 }
719 friend llvm::hash_code hash_value(const ObjectUnderConstruction &Obj) {
720 return llvm::hash_combine(args: Obj.Base, args: Obj.Path);
721 }
722 };
723 enum class ConstructionPhase {
724 None,
725 Bases,
726 AfterBases,
727 AfterFields,
728 Destroying,
729 DestroyingBases
730 };
731}
732
733namespace llvm {
734template<> struct DenseMapInfo<ObjectUnderConstruction> {
735 using Base = DenseMapInfo<APValue::LValueBase>;
736 static ObjectUnderConstruction getEmptyKey() {
737 return {.Base: Base::getEmptyKey(), .Path: {}}; }
738 static ObjectUnderConstruction getTombstoneKey() {
739 return {.Base: Base::getTombstoneKey(), .Path: {}};
740 }
741 static unsigned getHashValue(const ObjectUnderConstruction &Object) {
742 return hash_value(Obj: Object);
743 }
744 static bool isEqual(const ObjectUnderConstruction &LHS,
745 const ObjectUnderConstruction &RHS) {
746 return LHS == RHS;
747 }
748};
749}
750
751namespace {
752 /// A dynamically-allocated heap object.
753 struct DynAlloc {
754 /// The value of this heap-allocated object.
755 APValue Value;
756 /// The allocating expression; used for diagnostics. Either a CXXNewExpr
757 /// or a CallExpr (the latter is for direct calls to operator new inside
758 /// std::allocator<T>::allocate).
759 const Expr *AllocExpr = nullptr;
760
761 enum Kind {
762 New,
763 ArrayNew,
764 StdAllocator
765 };
766
767 /// Get the kind of the allocation. This must match between allocation
768 /// and deallocation.
769 Kind getKind() const {
770 if (auto *NE = dyn_cast<CXXNewExpr>(Val: AllocExpr))
771 return NE->isArray() ? ArrayNew : New;
772 assert(isa<CallExpr>(AllocExpr));
773 return StdAllocator;
774 }
775 };
776
777 struct DynAllocOrder {
778 bool operator()(DynamicAllocLValue L, DynamicAllocLValue R) const {
779 return L.getIndex() < R.getIndex();
780 }
781 };
782
783 /// EvalInfo - This is a private struct used by the evaluator to capture
784 /// information about a subexpression as it is folded. It retains information
785 /// about the AST context, but also maintains information about the folded
786 /// expression.
787 ///
788 /// If an expression could be evaluated, it is still possible it is not a C
789 /// "integer constant expression" or constant expression. If not, this struct
790 /// captures information about how and why not.
791 ///
792 /// One bit of information passed *into* the request for constant folding
793 /// indicates whether the subexpression is "evaluated" or not according to C
794 /// rules. For example, the RHS of (0 && foo()) is not evaluated. We can
795 /// evaluate the expression regardless of what the RHS is, but C only allows
796 /// certain things in certain situations.
797 class EvalInfo final : public interp::State {
798 public:
799 /// CurrentCall - The top of the constexpr call stack.
800 CallStackFrame *CurrentCall;
801
802 /// CallStackDepth - The number of calls in the call stack right now.
803 unsigned CallStackDepth;
804
805 /// NextCallIndex - The next call index to assign.
806 unsigned NextCallIndex;
807
808 /// StepsLeft - The remaining number of evaluation steps we're permitted
809 /// to perform. This is essentially a limit for the number of statements
810 /// we will evaluate.
811 unsigned StepsLeft;
812
813 /// Enable the experimental new constant interpreter. If an expression is
814 /// not supported by the interpreter, an error is triggered.
815 bool EnableNewConstInterp;
816
817 /// BottomFrame - The frame in which evaluation started. This must be
818 /// initialized after CurrentCall and CallStackDepth.
819 CallStackFrame BottomFrame;
820
821 /// A stack of values whose lifetimes end at the end of some surrounding
822 /// evaluation frame.
823 llvm::SmallVector<Cleanup, 16> CleanupStack;
824
825 /// EvaluatingDecl - This is the declaration whose initializer is being
826 /// evaluated, if any.
827 APValue::LValueBase EvaluatingDecl;
828
829 enum class EvaluatingDeclKind {
830 None,
831 /// We're evaluating the construction of EvaluatingDecl.
832 Ctor,
833 /// We're evaluating the destruction of EvaluatingDecl.
834 Dtor,
835 };
836 EvaluatingDeclKind IsEvaluatingDecl = EvaluatingDeclKind::None;
837
838 /// EvaluatingDeclValue - This is the value being constructed for the
839 /// declaration whose initializer is being evaluated, if any.
840 APValue *EvaluatingDeclValue;
841
842 /// Stack of loops and 'switch' statements which we're currently
843 /// breaking/continuing; null entries are used to mark unlabeled
844 /// break/continue.
845 SmallVector<const Stmt *> BreakContinueStack;
846
847 /// Set of objects that are currently being constructed.
848 llvm::DenseMap<ObjectUnderConstruction, ConstructionPhase>
849 ObjectsUnderConstruction;
850
851 /// Current heap allocations, along with the location where each was
852 /// allocated. We use std::map here because we need stable addresses
853 /// for the stored APValues.
854 std::map<DynamicAllocLValue, DynAlloc, DynAllocOrder> HeapAllocs;
855
856 /// The number of heap allocations performed so far in this evaluation.
857 unsigned NumHeapAllocs = 0;
858
859 struct EvaluatingConstructorRAII {
860 EvalInfo &EI;
861 ObjectUnderConstruction Object;
862 bool DidInsert;
863 EvaluatingConstructorRAII(EvalInfo &EI, ObjectUnderConstruction Object,
864 bool HasBases)
865 : EI(EI), Object(Object) {
866 DidInsert =
867 EI.ObjectsUnderConstruction
868 .insert(KV: {Object, HasBases ? ConstructionPhase::Bases
869 : ConstructionPhase::AfterBases})
870 .second;
871 }
872 void finishedConstructingBases() {
873 EI.ObjectsUnderConstruction[Object] = ConstructionPhase::AfterBases;
874 }
875 void finishedConstructingFields() {
876 EI.ObjectsUnderConstruction[Object] = ConstructionPhase::AfterFields;
877 }
878 ~EvaluatingConstructorRAII() {
879 if (DidInsert) EI.ObjectsUnderConstruction.erase(Val: Object);
880 }
881 };
882
883 struct EvaluatingDestructorRAII {
884 EvalInfo &EI;
885 ObjectUnderConstruction Object;
886 bool DidInsert;
887 EvaluatingDestructorRAII(EvalInfo &EI, ObjectUnderConstruction Object)
888 : EI(EI), Object(Object) {
889 DidInsert = EI.ObjectsUnderConstruction
890 .insert(KV: {Object, ConstructionPhase::Destroying})
891 .second;
892 }
893 void startedDestroyingBases() {
894 EI.ObjectsUnderConstruction[Object] =
895 ConstructionPhase::DestroyingBases;
896 }
897 ~EvaluatingDestructorRAII() {
898 if (DidInsert)
899 EI.ObjectsUnderConstruction.erase(Val: Object);
900 }
901 };
902
903 ConstructionPhase
904 isEvaluatingCtorDtor(APValue::LValueBase Base,
905 ArrayRef<APValue::LValuePathEntry> Path) {
906 return ObjectsUnderConstruction.lookup(Val: {.Base: Base, .Path: Path});
907 }
908
909 /// If we're currently speculatively evaluating, the outermost call stack
910 /// depth at which we can mutate state, otherwise 0.
911 unsigned SpeculativeEvaluationDepth = 0;
912
913 /// The current array initialization index, if we're performing array
914 /// initialization.
915 uint64_t ArrayInitIndex = -1;
916
917 EvalInfo(const ASTContext &C, Expr::EvalStatus &S, EvaluationMode Mode)
918 : State(const_cast<ASTContext &>(C), S), CurrentCall(nullptr),
919 CallStackDepth(0), NextCallIndex(1),
920 StepsLeft(C.getLangOpts().ConstexprStepLimit),
921 EnableNewConstInterp(C.getLangOpts().EnableNewConstInterp),
922 BottomFrame(*this, SourceLocation(), /*Callee=*/nullptr,
923 /*This=*/nullptr,
924 /*CallExpr=*/nullptr, CallRef()),
925 EvaluatingDecl((const ValueDecl *)nullptr),
926 EvaluatingDeclValue(nullptr) {
927 EvalMode = Mode;
928 }
929
930 ~EvalInfo() {
931 discardCleanups();
932 }
933
934 void setEvaluatingDecl(APValue::LValueBase Base, APValue &Value,
935 EvaluatingDeclKind EDK = EvaluatingDeclKind::Ctor) {
936 EvaluatingDecl = Base;
937 IsEvaluatingDecl = EDK;
938 EvaluatingDeclValue = &Value;
939 }
940
941 bool CheckCallLimit(SourceLocation Loc) {
942 // Don't perform any constexpr calls (other than the call we're checking)
943 // when checking a potential constant expression.
944 if (checkingPotentialConstantExpression() && CallStackDepth > 1)
945 return false;
946 if (NextCallIndex == 0) {
947 // NextCallIndex has wrapped around.
948 FFDiag(Loc, DiagId: diag::note_constexpr_call_limit_exceeded);
949 return false;
950 }
951 if (CallStackDepth <= getLangOpts().ConstexprCallDepth)
952 return true;
953 FFDiag(Loc, DiagId: diag::note_constexpr_depth_limit_exceeded)
954 << getLangOpts().ConstexprCallDepth;
955 return false;
956 }
957
958 bool CheckArraySize(SourceLocation Loc, unsigned BitWidth,
959 uint64_t ElemCount, bool Diag) {
960 // FIXME: GH63562
961 // APValue stores array extents as unsigned,
962 // so anything that is greater that unsigned would overflow when
963 // constructing the array, we catch this here.
964 if (BitWidth > ConstantArrayType::getMaxSizeBits(Context: Ctx) ||
965 ElemCount > uint64_t(std::numeric_limits<unsigned>::max())) {
966 if (Diag)
967 FFDiag(Loc, DiagId: diag::note_constexpr_new_too_large) << ElemCount;
968 return false;
969 }
970
971 // FIXME: GH63562
972 // Arrays allocate an APValue per element.
973 // We use the number of constexpr steps as a proxy for the maximum size
974 // of arrays to avoid exhausting the system resources, as initialization
975 // of each element is likely to take some number of steps anyway.
976 uint64_t Limit = getLangOpts().ConstexprStepLimit;
977 if (Limit != 0 && ElemCount > Limit) {
978 if (Diag)
979 FFDiag(Loc, DiagId: diag::note_constexpr_new_exceeds_limits)
980 << ElemCount << Limit;
981 return false;
982 }
983 return true;
984 }
985
986 std::pair<CallStackFrame *, unsigned>
987 getCallFrameAndDepth(unsigned CallIndex) {
988 assert(CallIndex && "no call index in getCallFrameAndDepth");
989 // We will eventually hit BottomFrame, which has Index 1, so Frame can't
990 // be null in this loop.
991 unsigned Depth = CallStackDepth;
992 CallStackFrame *Frame = CurrentCall;
993 while (Frame->Index > CallIndex) {
994 Frame = Frame->Caller;
995 --Depth;
996 }
997 if (Frame->Index == CallIndex)
998 return {Frame, Depth};
999 return {nullptr, 0};
1000 }
1001
1002 bool nextStep(const Stmt *S) {
1003 if (getLangOpts().ConstexprStepLimit == 0)
1004 return true;
1005
1006 if (!StepsLeft) {
1007 FFDiag(Loc: S->getBeginLoc(), DiagId: diag::note_constexpr_step_limit_exceeded);
1008 return false;
1009 }
1010 --StepsLeft;
1011 return true;
1012 }
1013
1014 APValue *createHeapAlloc(const Expr *E, QualType T, LValue &LV);
1015
1016 std::optional<DynAlloc *> lookupDynamicAlloc(DynamicAllocLValue DA) {
1017 std::optional<DynAlloc *> Result;
1018 auto It = HeapAllocs.find(x: DA);
1019 if (It != HeapAllocs.end())
1020 Result = &It->second;
1021 return Result;
1022 }
1023
1024 /// Get the allocated storage for the given parameter of the given call.
1025 APValue *getParamSlot(CallRef Call, const ParmVarDecl *PVD) {
1026 CallStackFrame *Frame = getCallFrameAndDepth(CallIndex: Call.CallIndex).first;
1027 return Frame ? Frame->getTemporary(Key: Call.getOrigParam(PVD), Version: Call.Version)
1028 : nullptr;
1029 }
1030
1031 /// Information about a stack frame for std::allocator<T>::[de]allocate.
1032 struct StdAllocatorCaller {
1033 unsigned FrameIndex;
1034 QualType ElemType;
1035 const Expr *Call;
1036 explicit operator bool() const { return FrameIndex != 0; };
1037 };
1038
1039 StdAllocatorCaller getStdAllocatorCaller(StringRef FnName) const {
1040 for (const CallStackFrame *Call = CurrentCall; Call != &BottomFrame;
1041 Call = Call->Caller) {
1042 const auto *MD = dyn_cast_or_null<CXXMethodDecl>(Val: Call->Callee);
1043 if (!MD)
1044 continue;
1045 const IdentifierInfo *FnII = MD->getIdentifier();
1046 if (!FnII || !FnII->isStr(Str: FnName))
1047 continue;
1048
1049 const auto *CTSD =
1050 dyn_cast<ClassTemplateSpecializationDecl>(Val: MD->getParent());
1051 if (!CTSD)
1052 continue;
1053
1054 const IdentifierInfo *ClassII = CTSD->getIdentifier();
1055 const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
1056 if (CTSD->isInStdNamespace() && ClassII &&
1057 ClassII->isStr(Str: "allocator") && TAL.size() >= 1 &&
1058 TAL[0].getKind() == TemplateArgument::Type)
1059 return {.FrameIndex: Call->Index, .ElemType: TAL[0].getAsType(), .Call: Call->CallExpr};
1060 }
1061
1062 return {};
1063 }
1064
1065 void performLifetimeExtension() {
1066 // Disable the cleanups for lifetime-extended temporaries.
1067 llvm::erase_if(C&: CleanupStack, P: [](Cleanup &C) {
1068 return !C.isDestroyedAtEndOf(K: ScopeKind::FullExpression);
1069 });
1070 }
1071
1072 /// Throw away any remaining cleanups at the end of evaluation. If any
1073 /// cleanups would have had a side-effect, note that as an unmodeled
1074 /// side-effect and return false. Otherwise, return true.
1075 bool discardCleanups() {
1076 for (Cleanup &C : CleanupStack) {
1077 if (C.hasSideEffect() && !noteSideEffect()) {
1078 CleanupStack.clear();
1079 return false;
1080 }
1081 }
1082 CleanupStack.clear();
1083 return true;
1084 }
1085
1086 private:
1087 const interp::Frame *getCurrentFrame() override { return CurrentCall; }
1088 const interp::Frame *getBottomFrame() const override { return &BottomFrame; }
1089
1090 unsigned getCallStackDepth() override { return CallStackDepth; }
1091 bool stepsLeft() const override { return StepsLeft > 0; }
1092
1093 public:
1094 /// Notes that we failed to evaluate an expression that other expressions
1095 /// directly depend on, and determine if we should keep evaluating. This
1096 /// should only be called if we actually intend to keep evaluating.
1097 ///
1098 /// Call noteSideEffect() instead if we may be able to ignore the value that
1099 /// we failed to evaluate, e.g. if we failed to evaluate Foo() in:
1100 ///
1101 /// (Foo(), 1) // use noteSideEffect
1102 /// (Foo() || true) // use noteSideEffect
1103 /// Foo() + 1 // use noteFailure
1104 [[nodiscard]] bool noteFailure() {
1105 // Failure when evaluating some expression often means there is some
1106 // subexpression whose evaluation was skipped. Therefore, (because we
1107 // don't track whether we skipped an expression when unwinding after an
1108 // evaluation failure) every evaluation failure that bubbles up from a
1109 // subexpression implies that a side-effect has potentially happened. We
1110 // skip setting the HasSideEffects flag to true until we decide to
1111 // continue evaluating after that point, which happens here.
1112 bool KeepGoing = keepEvaluatingAfterFailure();
1113 EvalStatus.HasSideEffects |= KeepGoing;
1114 return KeepGoing;
1115 }
1116
1117 class ArrayInitLoopIndex {
1118 EvalInfo &Info;
1119 uint64_t OuterIndex;
1120
1121 public:
1122 ArrayInitLoopIndex(EvalInfo &Info)
1123 : Info(Info), OuterIndex(Info.ArrayInitIndex) {
1124 Info.ArrayInitIndex = 0;
1125 }
1126 ~ArrayInitLoopIndex() { Info.ArrayInitIndex = OuterIndex; }
1127
1128 operator uint64_t&() { return Info.ArrayInitIndex; }
1129 };
1130 };
1131
1132 /// Object used to treat all foldable expressions as constant expressions.
1133 struct FoldConstant {
1134 EvalInfo &Info;
1135 bool Enabled;
1136 bool HadNoPriorDiags;
1137 EvaluationMode OldMode;
1138
1139 explicit FoldConstant(EvalInfo &Info, bool Enabled)
1140 : Info(Info),
1141 Enabled(Enabled),
1142 HadNoPriorDiags(Info.EvalStatus.Diag &&
1143 Info.EvalStatus.Diag->empty() &&
1144 !Info.EvalStatus.HasSideEffects),
1145 OldMode(Info.EvalMode) {
1146 if (Enabled)
1147 Info.EvalMode = EvaluationMode::ConstantFold;
1148 }
1149 void keepDiagnostics() { Enabled = false; }
1150 ~FoldConstant() {
1151 if (Enabled && HadNoPriorDiags && !Info.EvalStatus.Diag->empty() &&
1152 !Info.EvalStatus.HasSideEffects)
1153 Info.EvalStatus.Diag->clear();
1154 Info.EvalMode = OldMode;
1155 }
1156 };
1157
1158 /// RAII object used to set the current evaluation mode to ignore
1159 /// side-effects.
1160 struct IgnoreSideEffectsRAII {
1161 EvalInfo &Info;
1162 EvaluationMode OldMode;
1163 explicit IgnoreSideEffectsRAII(EvalInfo &Info)
1164 : Info(Info), OldMode(Info.EvalMode) {
1165 Info.EvalMode = EvaluationMode::IgnoreSideEffects;
1166 }
1167
1168 ~IgnoreSideEffectsRAII() { Info.EvalMode = OldMode; }
1169 };
1170
1171 /// RAII object used to optionally suppress diagnostics and side-effects from
1172 /// a speculative evaluation.
1173 class SpeculativeEvaluationRAII {
1174 EvalInfo *Info = nullptr;
1175 Expr::EvalStatus OldStatus;
1176 unsigned OldSpeculativeEvaluationDepth = 0;
1177
1178 void moveFromAndCancel(SpeculativeEvaluationRAII &&Other) {
1179 Info = Other.Info;
1180 OldStatus = Other.OldStatus;
1181 OldSpeculativeEvaluationDepth = Other.OldSpeculativeEvaluationDepth;
1182 Other.Info = nullptr;
1183 }
1184
1185 void maybeRestoreState() {
1186 if (!Info)
1187 return;
1188
1189 Info->EvalStatus = OldStatus;
1190 Info->SpeculativeEvaluationDepth = OldSpeculativeEvaluationDepth;
1191 }
1192
1193 public:
1194 SpeculativeEvaluationRAII() = default;
1195
1196 SpeculativeEvaluationRAII(
1197 EvalInfo &Info, SmallVectorImpl<PartialDiagnosticAt> *NewDiag = nullptr)
1198 : Info(&Info), OldStatus(Info.EvalStatus),
1199 OldSpeculativeEvaluationDepth(Info.SpeculativeEvaluationDepth) {
1200 Info.EvalStatus.Diag = NewDiag;
1201 Info.SpeculativeEvaluationDepth = Info.CallStackDepth + 1;
1202 }
1203
1204 SpeculativeEvaluationRAII(const SpeculativeEvaluationRAII &Other) = delete;
1205 SpeculativeEvaluationRAII(SpeculativeEvaluationRAII &&Other) {
1206 moveFromAndCancel(Other: std::move(Other));
1207 }
1208
1209 SpeculativeEvaluationRAII &operator=(SpeculativeEvaluationRAII &&Other) {
1210 maybeRestoreState();
1211 moveFromAndCancel(Other: std::move(Other));
1212 return *this;
1213 }
1214
1215 ~SpeculativeEvaluationRAII() { maybeRestoreState(); }
1216 };
1217
1218 /// RAII object wrapping a full-expression or block scope, and handling
1219 /// the ending of the lifetime of temporaries created within it.
1220 template<ScopeKind Kind>
1221 class ScopeRAII {
1222 EvalInfo &Info;
1223 unsigned OldStackSize;
1224 public:
1225 ScopeRAII(EvalInfo &Info)
1226 : Info(Info), OldStackSize(Info.CleanupStack.size()) {
1227 // Push a new temporary version. This is needed to distinguish between
1228 // temporaries created in different iterations of a loop.
1229 Info.CurrentCall->pushTempVersion();
1230 }
1231 bool destroy(bool RunDestructors = true) {
1232 bool OK = cleanup(Info, RunDestructors, OldStackSize);
1233 OldStackSize = std::numeric_limits<unsigned>::max();
1234 return OK;
1235 }
1236 ~ScopeRAII() {
1237 if (OldStackSize != std::numeric_limits<unsigned>::max())
1238 destroy(RunDestructors: false);
1239 // Body moved to a static method to encourage the compiler to inline away
1240 // instances of this class.
1241 Info.CurrentCall->popTempVersion();
1242 }
1243 private:
1244 static bool cleanup(EvalInfo &Info, bool RunDestructors,
1245 unsigned OldStackSize) {
1246 assert(OldStackSize <= Info.CleanupStack.size() &&
1247 "running cleanups out of order?");
1248
1249 // Run all cleanups for a block scope, and non-lifetime-extended cleanups
1250 // for a full-expression scope.
1251 bool Success = true;
1252 for (unsigned I = Info.CleanupStack.size(); I > OldStackSize; --I) {
1253 if (Info.CleanupStack[I - 1].isDestroyedAtEndOf(K: Kind)) {
1254 if (!Info.CleanupStack[I - 1].endLifetime(Info, RunDestructors)) {
1255 Success = false;
1256 break;
1257 }
1258 }
1259 }
1260
1261 // Compact any retained cleanups.
1262 auto NewEnd = Info.CleanupStack.begin() + OldStackSize;
1263 if (Kind != ScopeKind::Block)
1264 NewEnd =
1265 std::remove_if(NewEnd, Info.CleanupStack.end(), [](Cleanup &C) {
1266 return C.isDestroyedAtEndOf(K: Kind);
1267 });
1268 Info.CleanupStack.erase(CS: NewEnd, CE: Info.CleanupStack.end());
1269 return Success;
1270 }
1271 };
1272 typedef ScopeRAII<ScopeKind::Block> BlockScopeRAII;
1273 typedef ScopeRAII<ScopeKind::FullExpression> FullExpressionRAII;
1274 typedef ScopeRAII<ScopeKind::Call> CallScopeRAII;
1275}
1276
1277bool SubobjectDesignator::checkSubobject(EvalInfo &Info, const Expr *E,
1278 CheckSubobjectKind CSK) {
1279 if (Invalid)
1280 return false;
1281 if (isOnePastTheEnd()) {
1282 Info.CCEDiag(E, DiagId: diag::note_constexpr_past_end_subobject)
1283 << CSK;
1284 setInvalid();
1285 return false;
1286 }
1287 // Note, we do not diagnose if isMostDerivedAnUnsizedArray(), because there
1288 // must actually be at least one array element; even a VLA cannot have a
1289 // bound of zero. And if our index is nonzero, we already had a CCEDiag.
1290 return true;
1291}
1292
1293void SubobjectDesignator::diagnoseUnsizedArrayPointerArithmetic(EvalInfo &Info,
1294 const Expr *E) {
1295 Info.CCEDiag(E, DiagId: diag::note_constexpr_unsized_array_indexed);
1296 // Do not set the designator as invalid: we can represent this situation,
1297 // and correct handling of __builtin_object_size requires us to do so.
1298}
1299
1300void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info,
1301 const Expr *E,
1302 const APSInt &N) {
1303 // If we're complaining, we must be able to statically determine the size of
1304 // the most derived array.
1305 if (MostDerivedPathLength == Entries.size() && MostDerivedIsArrayElement)
1306 Info.CCEDiag(E, DiagId: diag::note_constexpr_array_index)
1307 << N << /*array*/ 0
1308 << static_cast<unsigned>(getMostDerivedArraySize());
1309 else
1310 Info.CCEDiag(E, DiagId: diag::note_constexpr_array_index)
1311 << N << /*non-array*/ 1;
1312 setInvalid();
1313}
1314
1315CallStackFrame::CallStackFrame(EvalInfo &Info, SourceRange CallRange,
1316 const FunctionDecl *Callee, const LValue *This,
1317 const Expr *CallExpr, CallRef Call)
1318 : Info(Info), Caller(Info.CurrentCall), Callee(Callee), This(This),
1319 CallExpr(CallExpr), Arguments(Call), CallRange(CallRange),
1320 Index(Info.NextCallIndex++) {
1321 Info.CurrentCall = this;
1322 ++Info.CallStackDepth;
1323}
1324
1325CallStackFrame::~CallStackFrame() {
1326 assert(Info.CurrentCall == this && "calls retired out of order");
1327 --Info.CallStackDepth;
1328 Info.CurrentCall = Caller;
1329}
1330
1331static bool isRead(AccessKinds AK) {
1332 return AK == AK_Read || AK == AK_ReadObjectRepresentation ||
1333 AK == AK_IsWithinLifetime || AK == AK_Dereference;
1334}
1335
1336static bool isModification(AccessKinds AK) {
1337 switch (AK) {
1338 case AK_Read:
1339 case AK_ReadObjectRepresentation:
1340 case AK_MemberCall:
1341 case AK_DynamicCast:
1342 case AK_TypeId:
1343 case AK_IsWithinLifetime:
1344 case AK_Dereference:
1345 return false;
1346 case AK_Assign:
1347 case AK_Increment:
1348 case AK_Decrement:
1349 case AK_Construct:
1350 case AK_Destroy:
1351 return true;
1352 }
1353 llvm_unreachable("unknown access kind");
1354}
1355
1356static bool isAnyAccess(AccessKinds AK) {
1357 return isRead(AK) || isModification(AK);
1358}
1359
1360/// Is this an access per the C++ definition?
1361static bool isFormalAccess(AccessKinds AK) {
1362 return isAnyAccess(AK) && AK != AK_Construct && AK != AK_Destroy &&
1363 AK != AK_IsWithinLifetime && AK != AK_Dereference;
1364}
1365
1366/// Is this kind of access valid on an indeterminate object value?
1367static bool isValidIndeterminateAccess(AccessKinds AK) {
1368 switch (AK) {
1369 case AK_Read:
1370 case AK_Increment:
1371 case AK_Decrement:
1372 case AK_Dereference:
1373 // These need the object's value.
1374 return false;
1375
1376 case AK_IsWithinLifetime:
1377 case AK_ReadObjectRepresentation:
1378 case AK_Assign:
1379 case AK_Construct:
1380 case AK_Destroy:
1381 // Construction and destruction don't need the value.
1382 return true;
1383
1384 case AK_MemberCall:
1385 case AK_DynamicCast:
1386 case AK_TypeId:
1387 // These aren't really meaningful on scalars.
1388 return true;
1389 }
1390 llvm_unreachable("unknown access kind");
1391}
1392
1393namespace {
1394 struct ComplexValue {
1395 private:
1396 bool IsInt;
1397
1398 public:
1399 APSInt IntReal, IntImag;
1400 APFloat FloatReal, FloatImag;
1401
1402 ComplexValue() : FloatReal(APFloat::Bogus()), FloatImag(APFloat::Bogus()) {}
1403
1404 void makeComplexFloat() { IsInt = false; }
1405 bool isComplexFloat() const { return !IsInt; }
1406 APFloat &getComplexFloatReal() { return FloatReal; }
1407 APFloat &getComplexFloatImag() { return FloatImag; }
1408
1409 void makeComplexInt() { IsInt = true; }
1410 bool isComplexInt() const { return IsInt; }
1411 APSInt &getComplexIntReal() { return IntReal; }
1412 APSInt &getComplexIntImag() { return IntImag; }
1413
1414 void moveInto(APValue &v) const {
1415 if (isComplexFloat())
1416 v = APValue(FloatReal, FloatImag);
1417 else
1418 v = APValue(IntReal, IntImag);
1419 }
1420 void setFrom(const APValue &v) {
1421 assert(v.isComplexFloat() || v.isComplexInt());
1422 if (v.isComplexFloat()) {
1423 makeComplexFloat();
1424 FloatReal = v.getComplexFloatReal();
1425 FloatImag = v.getComplexFloatImag();
1426 } else {
1427 makeComplexInt();
1428 IntReal = v.getComplexIntReal();
1429 IntImag = v.getComplexIntImag();
1430 }
1431 }
1432 };
1433
1434 struct LValue {
1435 APValue::LValueBase Base;
1436 CharUnits Offset;
1437 SubobjectDesignator Designator;
1438 bool IsNullPtr : 1;
1439 bool InvalidBase : 1;
1440 // P2280R4 track if we have an unknown reference or pointer.
1441 bool AllowConstexprUnknown = false;
1442
1443 const APValue::LValueBase getLValueBase() const { return Base; }
1444 bool allowConstexprUnknown() const { return AllowConstexprUnknown; }
1445 CharUnits &getLValueOffset() { return Offset; }
1446 const CharUnits &getLValueOffset() const { return Offset; }
1447 SubobjectDesignator &getLValueDesignator() { return Designator; }
1448 const SubobjectDesignator &getLValueDesignator() const { return Designator;}
1449 bool isNullPointer() const { return IsNullPtr;}
1450
1451 unsigned getLValueCallIndex() const { return Base.getCallIndex(); }
1452 unsigned getLValueVersion() const { return Base.getVersion(); }
1453
1454 void moveInto(APValue &V) const {
1455 if (Designator.Invalid)
1456 V = APValue(Base, Offset, APValue::NoLValuePath(), IsNullPtr);
1457 else {
1458 assert(!InvalidBase && "APValues can't handle invalid LValue bases");
1459 V = APValue(Base, Offset, Designator.Entries,
1460 Designator.IsOnePastTheEnd, IsNullPtr);
1461 }
1462 if (AllowConstexprUnknown)
1463 V.setConstexprUnknown();
1464 }
1465 void setFrom(const ASTContext &Ctx, const APValue &V) {
1466 assert(V.isLValue() && "Setting LValue from a non-LValue?");
1467 Base = V.getLValueBase();
1468 Offset = V.getLValueOffset();
1469 InvalidBase = false;
1470 Designator = SubobjectDesignator(Ctx, V);
1471 IsNullPtr = V.isNullPointer();
1472 AllowConstexprUnknown = V.allowConstexprUnknown();
1473 }
1474
1475 void set(APValue::LValueBase B, bool BInvalid = false) {
1476#ifndef NDEBUG
1477 // We only allow a few types of invalid bases. Enforce that here.
1478 if (BInvalid) {
1479 const auto *E = B.get<const Expr *>();
1480 assert((isa<MemberExpr>(E) || tryUnwrapAllocSizeCall(E)) &&
1481 "Unexpected type of invalid base");
1482 }
1483#endif
1484
1485 Base = B;
1486 Offset = CharUnits::fromQuantity(Quantity: 0);
1487 InvalidBase = BInvalid;
1488 Designator = SubobjectDesignator(getType(B));
1489 IsNullPtr = false;
1490 AllowConstexprUnknown = false;
1491 }
1492
1493 void setNull(ASTContext &Ctx, QualType PointerTy) {
1494 Base = (const ValueDecl *)nullptr;
1495 Offset =
1496 CharUnits::fromQuantity(Quantity: Ctx.getTargetNullPointerValue(QT: PointerTy));
1497 InvalidBase = false;
1498 Designator = SubobjectDesignator(PointerTy->getPointeeType());
1499 IsNullPtr = true;
1500 AllowConstexprUnknown = false;
1501 }
1502
1503 void setInvalid(APValue::LValueBase B, unsigned I = 0) {
1504 set(B, BInvalid: true);
1505 }
1506
1507 std::string toString(ASTContext &Ctx, QualType T) const {
1508 APValue Printable;
1509 moveInto(V&: Printable);
1510 return Printable.getAsString(Ctx, Ty: T);
1511 }
1512
1513 private:
1514 // Check that this LValue is not based on a null pointer. If it is, produce
1515 // a diagnostic and mark the designator as invalid.
1516 template <typename GenDiagType>
1517 bool checkNullPointerDiagnosingWith(const GenDiagType &GenDiag) {
1518 if (Designator.Invalid)
1519 return false;
1520 if (IsNullPtr) {
1521 GenDiag();
1522 Designator.setInvalid();
1523 return false;
1524 }
1525 return true;
1526 }
1527
1528 public:
1529 bool checkNullPointer(EvalInfo &Info, const Expr *E,
1530 CheckSubobjectKind CSK) {
1531 return checkNullPointerDiagnosingWith(GenDiag: [&Info, E, CSK] {
1532 Info.CCEDiag(E, DiagId: diag::note_constexpr_null_subobject) << CSK;
1533 });
1534 }
1535
1536 bool checkNullPointerForFoldAccess(EvalInfo &Info, const Expr *E,
1537 AccessKinds AK) {
1538 return checkNullPointerDiagnosingWith(GenDiag: [&Info, E, AK] {
1539 if (AK == AccessKinds::AK_Dereference)
1540 Info.FFDiag(E, DiagId: diag::note_constexpr_dereferencing_null);
1541 else
1542 Info.FFDiag(E, DiagId: diag::note_constexpr_access_null) << AK;
1543 });
1544 }
1545
1546 // Check this LValue refers to an object. If not, set the designator to be
1547 // invalid and emit a diagnostic.
1548 bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK) {
1549 return (CSK == CSK_ArrayToPointer || checkNullPointer(Info, E, CSK)) &&
1550 Designator.checkSubobject(Info, E, CSK);
1551 }
1552
1553 void addDecl(EvalInfo &Info, const Expr *E,
1554 const Decl *D, bool Virtual = false) {
1555 if (checkSubobject(Info, E, CSK: isa<FieldDecl>(Val: D) ? CSK_Field : CSK_Base))
1556 Designator.addDeclUnchecked(D, Virtual);
1557 }
1558 void addUnsizedArray(EvalInfo &Info, const Expr *E, QualType ElemTy) {
1559 if (!Designator.Entries.empty()) {
1560 Info.CCEDiag(E, DiagId: diag::note_constexpr_unsupported_unsized_array);
1561 Designator.setInvalid();
1562 return;
1563 }
1564 if (checkSubobject(Info, E, CSK: CSK_ArrayToPointer)) {
1565 assert(getType(Base).getNonReferenceType()->isPointerType() ||
1566 getType(Base).getNonReferenceType()->isArrayType());
1567 Designator.FirstEntryIsAnUnsizedArray = true;
1568 Designator.addUnsizedArrayUnchecked(ElemTy);
1569 }
1570 }
1571 void addArray(EvalInfo &Info, const Expr *E, const ConstantArrayType *CAT) {
1572 if (checkSubobject(Info, E, CSK: CSK_ArrayToPointer))
1573 Designator.addArrayUnchecked(CAT);
1574 }
1575 void addComplex(EvalInfo &Info, const Expr *E, QualType EltTy, bool Imag) {
1576 if (checkSubobject(Info, E, CSK: Imag ? CSK_Imag : CSK_Real))
1577 Designator.addComplexUnchecked(EltTy, Imag);
1578 }
1579 void addVectorElement(EvalInfo &Info, const Expr *E, QualType EltTy,
1580 uint64_t Size, uint64_t Idx) {
1581 if (checkSubobject(Info, E, CSK: CSK_VectorElement))
1582 Designator.addVectorElementUnchecked(EltTy, Size, Idx);
1583 }
1584 void clearIsNullPointer() {
1585 IsNullPtr = false;
1586 }
1587 void adjustOffsetAndIndex(EvalInfo &Info, const Expr *E,
1588 const APSInt &Index, CharUnits ElementSize) {
1589 // An index of 0 has no effect. (In C, adding 0 to a null pointer is UB,
1590 // but we're not required to diagnose it and it's valid in C++.)
1591 if (!Index)
1592 return;
1593
1594 // Compute the new offset in the appropriate width, wrapping at 64 bits.
1595 // FIXME: When compiling for a 32-bit target, we should use 32-bit
1596 // offsets.
1597 uint64_t Offset64 = Offset.getQuantity();
1598 uint64_t ElemSize64 = ElementSize.getQuantity();
1599 uint64_t Index64 = Index.extOrTrunc(width: 64).getZExtValue();
1600 Offset = CharUnits::fromQuantity(Quantity: Offset64 + ElemSize64 * Index64);
1601
1602 if (checkNullPointer(Info, E, CSK: CSK_ArrayIndex))
1603 Designator.adjustIndex(Info, E, N: Index, LV: *this);
1604 clearIsNullPointer();
1605 }
1606 void adjustOffset(CharUnits N) {
1607 Offset += N;
1608 if (N.getQuantity())
1609 clearIsNullPointer();
1610 }
1611 };
1612
1613 struct MemberPtr {
1614 MemberPtr() {}
1615 explicit MemberPtr(const ValueDecl *Decl)
1616 : DeclAndIsDerivedMember(Decl, false) {}
1617
1618 /// The member or (direct or indirect) field referred to by this member
1619 /// pointer, or 0 if this is a null member pointer.
1620 const ValueDecl *getDecl() const {
1621 return DeclAndIsDerivedMember.getPointer();
1622 }
1623 /// Is this actually a member of some type derived from the relevant class?
1624 bool isDerivedMember() const {
1625 return DeclAndIsDerivedMember.getInt();
1626 }
1627 /// Get the class which the declaration actually lives in.
1628 const CXXRecordDecl *getContainingRecord() const {
1629 return cast<CXXRecordDecl>(
1630 Val: DeclAndIsDerivedMember.getPointer()->getDeclContext());
1631 }
1632
1633 void moveInto(APValue &V) const {
1634 V = APValue(getDecl(), isDerivedMember(), Path);
1635 }
1636 void setFrom(const APValue &V) {
1637 assert(V.isMemberPointer());
1638 DeclAndIsDerivedMember.setPointer(V.getMemberPointerDecl());
1639 DeclAndIsDerivedMember.setInt(V.isMemberPointerToDerivedMember());
1640 Path.clear();
1641 llvm::append_range(C&: Path, R: V.getMemberPointerPath());
1642 }
1643
1644 /// DeclAndIsDerivedMember - The member declaration, and a flag indicating
1645 /// whether the member is a member of some class derived from the class type
1646 /// of the member pointer.
1647 llvm::PointerIntPair<const ValueDecl*, 1, bool> DeclAndIsDerivedMember;
1648 /// Path - The path of base/derived classes from the member declaration's
1649 /// class (exclusive) to the class type of the member pointer (inclusive).
1650 SmallVector<const CXXRecordDecl*, 4> Path;
1651
1652 /// Perform a cast towards the class of the Decl (either up or down the
1653 /// hierarchy).
1654 bool castBack(const CXXRecordDecl *Class) {
1655 assert(!Path.empty());
1656 const CXXRecordDecl *Expected;
1657 if (Path.size() >= 2)
1658 Expected = Path[Path.size() - 2];
1659 else
1660 Expected = getContainingRecord();
1661 if (Expected->getCanonicalDecl() != Class->getCanonicalDecl()) {
1662 // C++11 [expr.static.cast]p12: In a conversion from (D::*) to (B::*),
1663 // if B does not contain the original member and is not a base or
1664 // derived class of the class containing the original member, the result
1665 // of the cast is undefined.
1666 // C++11 [conv.mem]p2 does not cover this case for a cast from (B::*) to
1667 // (D::*). We consider that to be a language defect.
1668 return false;
1669 }
1670 Path.pop_back();
1671 return true;
1672 }
1673 /// Perform a base-to-derived member pointer cast.
1674 bool castToDerived(const CXXRecordDecl *Derived) {
1675 if (!getDecl())
1676 return true;
1677 if (!isDerivedMember()) {
1678 Path.push_back(Elt: Derived);
1679 return true;
1680 }
1681 if (!castBack(Class: Derived))
1682 return false;
1683 if (Path.empty())
1684 DeclAndIsDerivedMember.setInt(false);
1685 return true;
1686 }
1687 /// Perform a derived-to-base member pointer cast.
1688 bool castToBase(const CXXRecordDecl *Base) {
1689 if (!getDecl())
1690 return true;
1691 if (Path.empty())
1692 DeclAndIsDerivedMember.setInt(true);
1693 if (isDerivedMember()) {
1694 Path.push_back(Elt: Base);
1695 return true;
1696 }
1697 return castBack(Class: Base);
1698 }
1699 };
1700
1701 /// Compare two member pointers, which are assumed to be of the same type.
1702 static bool operator==(const MemberPtr &LHS, const MemberPtr &RHS) {
1703 if (!LHS.getDecl() || !RHS.getDecl())
1704 return !LHS.getDecl() && !RHS.getDecl();
1705 if (LHS.getDecl()->getCanonicalDecl() != RHS.getDecl()->getCanonicalDecl())
1706 return false;
1707 return LHS.Path == RHS.Path;
1708 }
1709}
1710
1711void SubobjectDesignator::adjustIndex(EvalInfo &Info, const Expr *E, APSInt N,
1712 const LValue &LV) {
1713 if (Invalid || !N)
1714 return;
1715 uint64_t TruncatedN = N.extOrTrunc(width: 64).getZExtValue();
1716 if (isMostDerivedAnUnsizedArray()) {
1717 diagnoseUnsizedArrayPointerArithmetic(Info, E);
1718 // Can't verify -- trust that the user is doing the right thing (or if
1719 // not, trust that the caller will catch the bad behavior).
1720 // FIXME: Should we reject if this overflows, at least?
1721 Entries.back() =
1722 PathEntry::ArrayIndex(Index: Entries.back().getAsArrayIndex() + TruncatedN);
1723 return;
1724 }
1725
1726 // [expr.add]p4: For the purposes of these operators, a pointer to a
1727 // nonarray object behaves the same as a pointer to the first element of
1728 // an array of length one with the type of the object as its element type.
1729 bool IsArray =
1730 MostDerivedPathLength == Entries.size() && MostDerivedIsArrayElement;
1731 uint64_t ArrayIndex =
1732 IsArray ? Entries.back().getAsArrayIndex() : (uint64_t)IsOnePastTheEnd;
1733 uint64_t ArraySize = IsArray ? getMostDerivedArraySize() : (uint64_t)1;
1734
1735 if (N < -(int64_t)ArrayIndex || N > ArraySize - ArrayIndex) {
1736 if (!Info.checkingPotentialConstantExpression() ||
1737 !LV.AllowConstexprUnknown) {
1738 // Calculate the actual index in a wide enough type, so we can include
1739 // it in the note.
1740 N = N.extend(width: std::max<unsigned>(a: N.getBitWidth() + 1, b: 65));
1741 (llvm::APInt &)N += ArrayIndex;
1742 assert(N.ugt(ArraySize) && "bounds check failed for in-bounds index");
1743 diagnosePointerArithmetic(Info, E, N);
1744 }
1745 setInvalid();
1746 return;
1747 }
1748
1749 ArrayIndex += TruncatedN;
1750 assert(ArrayIndex <= ArraySize &&
1751 "bounds check succeeded for out-of-bounds index");
1752
1753 if (IsArray)
1754 Entries.back() = PathEntry::ArrayIndex(Index: ArrayIndex);
1755 else
1756 IsOnePastTheEnd = (ArrayIndex != 0);
1757}
1758
1759static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E);
1760static bool EvaluateInPlace(APValue &Result, EvalInfo &Info,
1761 const LValue &This, const Expr *E,
1762 bool AllowNonLiteralTypes = false);
1763static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info,
1764 bool InvalidBaseOK = false);
1765static bool EvaluatePointer(const Expr *E, LValue &Result, EvalInfo &Info,
1766 bool InvalidBaseOK = false);
1767static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
1768 EvalInfo &Info);
1769static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info);
1770static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info);
1771static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
1772 EvalInfo &Info);
1773static bool EvaluateFloat(const Expr *E, APFloat &Result, EvalInfo &Info);
1774static bool EvaluateComplex(const Expr *E, ComplexValue &Res, EvalInfo &Info);
1775static bool EvaluateMatrix(const Expr *E, APValue &Result, EvalInfo &Info);
1776static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result,
1777 EvalInfo &Info);
1778static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result);
1779static std::optional<uint64_t>
1780EvaluateBuiltinStrLen(const Expr *E, EvalInfo &Info,
1781 std::string *StringResult = nullptr);
1782
1783/// Evaluate an integer or fixed point expression into an APResult.
1784static bool EvaluateFixedPointOrInteger(const Expr *E, APFixedPoint &Result,
1785 EvalInfo &Info);
1786
1787/// Evaluate only a fixed point expression into an APResult.
1788static bool EvaluateFixedPoint(const Expr *E, APFixedPoint &Result,
1789 EvalInfo &Info);
1790
1791//===----------------------------------------------------------------------===//
1792// Misc utilities
1793//===----------------------------------------------------------------------===//
1794
1795/// Negate an APSInt in place, converting it to a signed form if necessary, and
1796/// preserving its value (by extending by up to one bit as needed).
1797static void negateAsSigned(APSInt &Int) {
1798 if (Int.isUnsigned() || Int.isMinSignedValue()) {
1799 Int = Int.extend(width: Int.getBitWidth() + 1);
1800 Int.setIsSigned(true);
1801 }
1802 Int = -Int;
1803}
1804
1805template<typename KeyT>
1806APValue &CallStackFrame::createTemporary(const KeyT *Key, QualType T,
1807 ScopeKind Scope, LValue &LV) {
1808 unsigned Version = getTempVersion();
1809 APValue::LValueBase Base(Key, Index, Version);
1810 LV.set(B: Base);
1811 return createLocal(Base, Key, T, Scope);
1812}
1813
1814/// Allocate storage for a parameter of a function call made in this frame.
1815APValue &CallStackFrame::createParam(CallRef Args, const ParmVarDecl *PVD,
1816 LValue &LV) {
1817 assert(Args.CallIndex == Index && "creating parameter in wrong frame");
1818 APValue::LValueBase Base(PVD, Index, Args.Version);
1819 LV.set(B: Base);
1820 // We always destroy parameters at the end of the call, even if we'd allow
1821 // them to live to the end of the full-expression at runtime, in order to
1822 // give portable results and match other compilers.
1823 return createLocal(Base, Key: PVD, T: PVD->getType(), Scope: ScopeKind::Call);
1824}
1825
1826APValue &CallStackFrame::createLocal(APValue::LValueBase Base, const void *Key,
1827 QualType T, ScopeKind Scope) {
1828 assert(Base.getCallIndex() == Index && "lvalue for wrong frame");
1829 unsigned Version = Base.getVersion();
1830 APValue &Result = Temporaries[MapKeyTy(Key, Version)];
1831 assert(Result.isAbsent() && "local created multiple times");
1832
1833 // If we're creating a local immediately in the operand of a speculative
1834 // evaluation, don't register a cleanup to be run outside the speculative
1835 // evaluation context, since we won't actually be able to initialize this
1836 // object.
1837 if (Index <= Info.SpeculativeEvaluationDepth) {
1838 if (T.isDestructedType())
1839 Info.noteSideEffect();
1840 } else {
1841 Info.CleanupStack.push_back(Elt: Cleanup(&Result, Base, T, Scope));
1842 }
1843 return Result;
1844}
1845
1846APValue *EvalInfo::createHeapAlloc(const Expr *E, QualType T, LValue &LV) {
1847 if (NumHeapAllocs > DynamicAllocLValue::getMaxIndex()) {
1848 FFDiag(E, DiagId: diag::note_constexpr_heap_alloc_limit_exceeded);
1849 return nullptr;
1850 }
1851
1852 DynamicAllocLValue DA(NumHeapAllocs++);
1853 LV.set(B: APValue::LValueBase::getDynamicAlloc(LV: DA, Type: T));
1854 auto Result = HeapAllocs.emplace(args: std::piecewise_construct,
1855 args: std::forward_as_tuple(args&: DA), args: std::tuple<>());
1856 assert(Result.second && "reused a heap alloc index?");
1857 Result.first->second.AllocExpr = E;
1858 return &Result.first->second.Value;
1859}
1860
1861/// Produce a string describing the given constexpr call.
1862void CallStackFrame::describe(raw_ostream &Out) const {
1863 bool IsMemberCall = false;
1864 bool ExplicitInstanceParam = false;
1865 if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: Callee)) {
1866 IsMemberCall = !isa<CXXConstructorDecl>(Val: MD) && !MD->isStatic();
1867 ExplicitInstanceParam = MD->isExplicitObjectMemberFunction();
1868 }
1869
1870 if (!IsMemberCall)
1871 Callee->getNameForDiagnostic(OS&: Out, Policy: Info.Ctx.getPrintingPolicy(),
1872 /*Qualified=*/false);
1873
1874 if (This && IsMemberCall) {
1875 if (const auto *MCE = dyn_cast_if_present<CXXMemberCallExpr>(Val: CallExpr)) {
1876 const Expr *Object = MCE->getImplicitObjectArgument();
1877 Object->printPretty(OS&: Out, /*Helper=*/nullptr, Policy: Info.Ctx.getPrintingPolicy(),
1878 /*Indentation=*/0);
1879 if (Object->getType()->isPointerType())
1880 Out << "->";
1881 else
1882 Out << ".";
1883 } else if (const auto *OCE =
1884 dyn_cast_if_present<CXXOperatorCallExpr>(Val: CallExpr)) {
1885 OCE->getArg(Arg: 0)->printPretty(OS&: Out, /*Helper=*/nullptr,
1886 Policy: Info.Ctx.getPrintingPolicy(),
1887 /*Indentation=*/0);
1888 Out << ".";
1889 } else {
1890 APValue Val;
1891 This->moveInto(V&: Val);
1892 Val.printPretty(
1893 OS&: Out, Ctx: Info.Ctx,
1894 Ty: Info.Ctx.getLValueReferenceType(T: This->Designator.MostDerivedType));
1895 Out << ".";
1896 }
1897 Callee->getNameForDiagnostic(OS&: Out, Policy: Info.Ctx.getPrintingPolicy(),
1898 /*Qualified=*/false);
1899 }
1900
1901 Out << '(';
1902
1903 llvm::ListSeparator Comma;
1904 for (const ParmVarDecl *Param :
1905 Callee->parameters().slice(N: ExplicitInstanceParam)) {
1906 Out << Comma;
1907 const APValue *V = Info.getParamSlot(Call: Arguments, PVD: Param);
1908 if (V)
1909 V->printPretty(OS&: Out, Ctx: Info.Ctx, Ty: Param->getType());
1910 else
1911 Out << "<...>";
1912 }
1913
1914 Out << ')';
1915}
1916
1917/// Evaluate an expression to see if it had side-effects, and discard its
1918/// result.
1919/// \return \c true if the caller should keep evaluating.
1920static bool EvaluateIgnoredValue(EvalInfo &Info, const Expr *E) {
1921 assert(!E->isValueDependent());
1922 APValue Scratch;
1923 if (!Evaluate(Result&: Scratch, Info, E))
1924 // We don't need the value, but we might have skipped a side effect here.
1925 return Info.noteSideEffect();
1926 return true;
1927}
1928
1929/// Should this call expression be treated as forming an opaque constant?
1930static bool IsOpaqueConstantCall(const CallExpr *E) {
1931 unsigned Builtin = E->getBuiltinCallee();
1932 return (Builtin == Builtin::BI__builtin___CFStringMakeConstantString ||
1933 Builtin == Builtin::BI__builtin___NSStringMakeConstantString ||
1934 Builtin == Builtin::BI__builtin_ptrauth_sign_constant ||
1935 Builtin == Builtin::BI__builtin_function_start);
1936}
1937
1938static bool IsOpaqueConstantCall(const LValue &LVal) {
1939 const auto *BaseExpr =
1940 llvm::dyn_cast_if_present<CallExpr>(Val: LVal.Base.dyn_cast<const Expr *>());
1941 return BaseExpr && IsOpaqueConstantCall(E: BaseExpr);
1942}
1943
1944static bool IsGlobalLValue(APValue::LValueBase B) {
1945 // C++11 [expr.const]p3 An address constant expression is a prvalue core
1946 // constant expression of pointer type that evaluates to...
1947
1948 // ... a null pointer value, or a prvalue core constant expression of type
1949 // std::nullptr_t.
1950 if (!B)
1951 return true;
1952
1953 if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
1954 // ... the address of an object with static storage duration,
1955 if (const VarDecl *VD = dyn_cast<VarDecl>(Val: D))
1956 return VD->hasGlobalStorage();
1957 if (isa<TemplateParamObjectDecl>(Val: D))
1958 return true;
1959 // ... the address of a function,
1960 // ... the address of a GUID [MS extension],
1961 // ... the address of an unnamed global constant
1962 return isa<FunctionDecl, MSGuidDecl, UnnamedGlobalConstantDecl>(Val: D);
1963 }
1964
1965 if (B.is<TypeInfoLValue>() || B.is<DynamicAllocLValue>())
1966 return true;
1967
1968 const Expr *E = B.get<const Expr*>();
1969 switch (E->getStmtClass()) {
1970 default:
1971 return false;
1972 case Expr::CompoundLiteralExprClass: {
1973 const CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(Val: E);
1974 return CLE->isFileScope() && CLE->isLValue();
1975 }
1976 case Expr::MaterializeTemporaryExprClass:
1977 // A materialized temporary might have been lifetime-extended to static
1978 // storage duration.
1979 return cast<MaterializeTemporaryExpr>(Val: E)->getStorageDuration() == SD_Static;
1980 // A string literal has static storage duration.
1981 case Expr::StringLiteralClass:
1982 case Expr::PredefinedExprClass:
1983 case Expr::ObjCStringLiteralClass:
1984 case Expr::ObjCEncodeExprClass:
1985 return true;
1986 case Expr::ObjCBoxedExprClass:
1987 return cast<ObjCBoxedExpr>(Val: E)->isExpressibleAsConstantInitializer();
1988 case Expr::CallExprClass:
1989 return IsOpaqueConstantCall(E: cast<CallExpr>(Val: E));
1990 // For GCC compatibility, &&label has static storage duration.
1991 case Expr::AddrLabelExprClass:
1992 return true;
1993 // A Block literal expression may be used as the initialization value for
1994 // Block variables at global or local static scope.
1995 case Expr::BlockExprClass:
1996 return !cast<BlockExpr>(Val: E)->getBlockDecl()->hasCaptures();
1997 // The APValue generated from a __builtin_source_location will be emitted as a
1998 // literal.
1999 case Expr::SourceLocExprClass:
2000 return true;
2001 case Expr::ImplicitValueInitExprClass:
2002 // FIXME:
2003 // We can never form an lvalue with an implicit value initialization as its
2004 // base through expression evaluation, so these only appear in one case: the
2005 // implicit variable declaration we invent when checking whether a constexpr
2006 // constructor can produce a constant expression. We must assume that such
2007 // an expression might be a global lvalue.
2008 return true;
2009 }
2010}
2011
2012static const ValueDecl *GetLValueBaseDecl(const LValue &LVal) {
2013 return LVal.Base.dyn_cast<const ValueDecl*>();
2014}
2015
2016// Information about an LValueBase that is some kind of string.
2017struct LValueBaseString {
2018 std::string ObjCEncodeStorage;
2019 StringRef Bytes;
2020 int CharWidth;
2021};
2022
2023// Gets the lvalue base of LVal as a string.
2024static bool GetLValueBaseAsString(const EvalInfo &Info, const LValue &LVal,
2025 LValueBaseString &AsString) {
2026 const auto *BaseExpr = LVal.Base.dyn_cast<const Expr *>();
2027 if (!BaseExpr)
2028 return false;
2029
2030 // For ObjCEncodeExpr, we need to compute and store the string.
2031 if (const auto *EE = dyn_cast<ObjCEncodeExpr>(Val: BaseExpr)) {
2032 Info.Ctx.getObjCEncodingForType(T: EE->getEncodedType(),
2033 S&: AsString.ObjCEncodeStorage);
2034 AsString.Bytes = AsString.ObjCEncodeStorage;
2035 AsString.CharWidth = 1;
2036 return true;
2037 }
2038
2039 // Otherwise, we have a StringLiteral.
2040 const auto *Lit = dyn_cast<StringLiteral>(Val: BaseExpr);
2041 if (const auto *PE = dyn_cast<PredefinedExpr>(Val: BaseExpr))
2042 Lit = PE->getFunctionName();
2043
2044 if (!Lit)
2045 return false;
2046
2047 AsString.Bytes = Lit->getBytes();
2048 AsString.CharWidth = Lit->getCharByteWidth();
2049 return true;
2050}
2051
2052// Determine whether two string literals potentially overlap. This will be the
2053// case if they agree on the values of all the bytes on the overlapping region
2054// between them.
2055//
2056// The overlapping region is the portion of the two string literals that must
2057// overlap in memory if the pointers actually point to the same address at
2058// runtime. For example, if LHS is "abcdef" + 3 and RHS is "cdef\0gh" + 1 then
2059// the overlapping region is "cdef\0", which in this case does agree, so the
2060// strings are potentially overlapping. Conversely, for "foobar" + 3 versus
2061// "bazbar" + 3, the overlapping region contains all of both strings, so they
2062// are not potentially overlapping, even though they agree from the given
2063// addresses onwards.
2064//
2065// See open core issue CWG2765 which is discussing the desired rule here.
2066static bool ArePotentiallyOverlappingStringLiterals(const EvalInfo &Info,
2067 const LValue &LHS,
2068 const LValue &RHS) {
2069 LValueBaseString LHSString, RHSString;
2070 if (!GetLValueBaseAsString(Info, LVal: LHS, AsString&: LHSString) ||
2071 !GetLValueBaseAsString(Info, LVal: RHS, AsString&: RHSString))
2072 return false;
2073
2074 // This is the byte offset to the location of the first character of LHS
2075 // within RHS. We don't need to look at the characters of one string that
2076 // would appear before the start of the other string if they were merged.
2077 CharUnits Offset = RHS.Offset - LHS.Offset;
2078 if (Offset.isNegative()) {
2079 if (LHSString.Bytes.size() < (size_t)-Offset.getQuantity())
2080 return false;
2081 LHSString.Bytes = LHSString.Bytes.drop_front(N: -Offset.getQuantity());
2082 } else {
2083 if (RHSString.Bytes.size() < (size_t)Offset.getQuantity())
2084 return false;
2085 RHSString.Bytes = RHSString.Bytes.drop_front(N: Offset.getQuantity());
2086 }
2087
2088 bool LHSIsLonger = LHSString.Bytes.size() > RHSString.Bytes.size();
2089 StringRef Longer = LHSIsLonger ? LHSString.Bytes : RHSString.Bytes;
2090 StringRef Shorter = LHSIsLonger ? RHSString.Bytes : LHSString.Bytes;
2091 int ShorterCharWidth = (LHSIsLonger ? RHSString : LHSString).CharWidth;
2092
2093 // The null terminator isn't included in the string data, so check for it
2094 // manually. If the longer string doesn't have a null terminator where the
2095 // shorter string ends, they aren't potentially overlapping.
2096 for (int NullByte : llvm::seq(Size: ShorterCharWidth)) {
2097 if (Shorter.size() + NullByte >= Longer.size())
2098 break;
2099 if (Longer[Shorter.size() + NullByte])
2100 return false;
2101 }
2102
2103 // Otherwise, they're potentially overlapping if and only if the overlapping
2104 // region is the same.
2105 return Shorter == Longer.take_front(N: Shorter.size());
2106}
2107
2108static bool IsWeakLValue(const LValue &Value) {
2109 const ValueDecl *Decl = GetLValueBaseDecl(LVal: Value);
2110 return Decl && Decl->isWeak();
2111}
2112
2113static bool isZeroSized(const LValue &Value) {
2114 const ValueDecl *Decl = GetLValueBaseDecl(LVal: Value);
2115 if (isa_and_nonnull<VarDecl>(Val: Decl)) {
2116 QualType Ty = Decl->getType();
2117 if (Ty->isArrayType())
2118 return Ty->isIncompleteType() ||
2119 Decl->getASTContext().getTypeSize(T: Ty) == 0;
2120 }
2121 return false;
2122}
2123
2124static bool HasSameBase(const LValue &A, const LValue &B) {
2125 if (!A.getLValueBase())
2126 return !B.getLValueBase();
2127 if (!B.getLValueBase())
2128 return false;
2129
2130 if (A.getLValueBase().getOpaqueValue() !=
2131 B.getLValueBase().getOpaqueValue())
2132 return false;
2133
2134 return A.getLValueCallIndex() == B.getLValueCallIndex() &&
2135 A.getLValueVersion() == B.getLValueVersion();
2136}
2137
2138static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) {
2139 assert(Base && "no location for a null lvalue");
2140 const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
2141
2142 // For a parameter, find the corresponding call stack frame (if it still
2143 // exists), and point at the parameter of the function definition we actually
2144 // invoked.
2145 if (auto *PVD = dyn_cast_or_null<ParmVarDecl>(Val: VD)) {
2146 unsigned Idx = PVD->getFunctionScopeIndex();
2147 for (CallStackFrame *F = Info.CurrentCall; F; F = F->Caller) {
2148 if (F->Arguments.CallIndex == Base.getCallIndex() &&
2149 F->Arguments.Version == Base.getVersion() && F->Callee &&
2150 Idx < F->Callee->getNumParams()) {
2151 VD = F->Callee->getParamDecl(i: Idx);
2152 break;
2153 }
2154 }
2155 }
2156
2157 if (VD)
2158 Info.Note(Loc: VD->getLocation(), DiagId: diag::note_declared_at);
2159 else if (const Expr *E = Base.dyn_cast<const Expr*>())
2160 Info.Note(Loc: E->getExprLoc(), DiagId: diag::note_constexpr_temporary_here);
2161 else if (DynamicAllocLValue DA = Base.dyn_cast<DynamicAllocLValue>()) {
2162 // FIXME: Produce a note for dangling pointers too.
2163 if (std::optional<DynAlloc *> Alloc = Info.lookupDynamicAlloc(DA))
2164 Info.Note(Loc: (*Alloc)->AllocExpr->getExprLoc(),
2165 DiagId: diag::note_constexpr_dynamic_alloc_here);
2166 }
2167
2168 // We have no information to show for a typeid(T) object.
2169}
2170
2171enum class CheckEvaluationResultKind {
2172 ConstantExpression,
2173 FullyInitialized,
2174};
2175
2176/// Materialized temporaries that we've already checked to determine if they're
2177/// initializsed by a constant expression.
2178using CheckedTemporaries =
2179 llvm::SmallPtrSet<const MaterializeTemporaryExpr *, 8>;
2180
2181static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
2182 EvalInfo &Info, SourceLocation DiagLoc,
2183 QualType Type, const APValue &Value,
2184 ConstantExprKind Kind,
2185 const FieldDecl *SubobjectDecl,
2186 CheckedTemporaries &CheckedTemps);
2187
2188/// Check that this reference or pointer core constant expression is a valid
2189/// value for an address or reference constant expression. Return true if we
2190/// can fold this expression, whether or not it's a constant expression.
2191static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
2192 QualType Type, const LValue &LVal,
2193 ConstantExprKind Kind,
2194 CheckedTemporaries &CheckedTemps) {
2195 bool IsReferenceType = Type->isReferenceType();
2196
2197 APValue::LValueBase Base = LVal.getLValueBase();
2198 const SubobjectDesignator &Designator = LVal.getLValueDesignator();
2199
2200 const Expr *BaseE = Base.dyn_cast<const Expr *>();
2201 const ValueDecl *BaseVD = Base.dyn_cast<const ValueDecl*>();
2202
2203 // Additional restrictions apply in a template argument. We only enforce the
2204 // C++20 restrictions here; additional syntactic and semantic restrictions
2205 // are applied elsewhere.
2206 if (isTemplateArgument(Kind)) {
2207 int InvalidBaseKind = -1;
2208 StringRef Ident;
2209 if (Base.is<TypeInfoLValue>())
2210 InvalidBaseKind = 0;
2211 else if (isa_and_nonnull<StringLiteral>(Val: BaseE))
2212 InvalidBaseKind = 1;
2213 else if (isa_and_nonnull<MaterializeTemporaryExpr>(Val: BaseE) ||
2214 isa_and_nonnull<LifetimeExtendedTemporaryDecl>(Val: BaseVD))
2215 InvalidBaseKind = 2;
2216 else if (auto *PE = dyn_cast_or_null<PredefinedExpr>(Val: BaseE)) {
2217 InvalidBaseKind = 3;
2218 Ident = PE->getIdentKindName();
2219 }
2220
2221 if (InvalidBaseKind != -1) {
2222 Info.FFDiag(Loc, DiagId: diag::note_constexpr_invalid_template_arg)
2223 << IsReferenceType << !Designator.Entries.empty() << InvalidBaseKind
2224 << Ident;
2225 return false;
2226 }
2227 }
2228
2229 if (auto *FD = dyn_cast_or_null<FunctionDecl>(Val: BaseVD);
2230 FD && FD->isImmediateFunction()) {
2231 Info.FFDiag(Loc, DiagId: diag::note_consteval_address_accessible)
2232 << !Type->isAnyPointerType();
2233 Info.Note(Loc: FD->getLocation(), DiagId: diag::note_declared_at);
2234 return false;
2235 }
2236
2237 // Check that the object is a global. Note that the fake 'this' object we
2238 // manufacture when checking potential constant expressions is conservatively
2239 // assumed to be global here.
2240 if (!IsGlobalLValue(B: Base)) {
2241 if (Info.getLangOpts().CPlusPlus11) {
2242 Info.FFDiag(Loc, DiagId: diag::note_constexpr_non_global, ExtraNotes: 1)
2243 << IsReferenceType << !Designator.Entries.empty() << !!BaseVD
2244 << BaseVD;
2245 auto *VarD = dyn_cast_or_null<VarDecl>(Val: BaseVD);
2246 if (VarD && VarD->isConstexpr()) {
2247 // Non-static local constexpr variables have unintuitive semantics:
2248 // constexpr int a = 1;
2249 // constexpr const int *p = &a;
2250 // ... is invalid because the address of 'a' is not constant. Suggest
2251 // adding a 'static' in this case.
2252 Info.Note(Loc: VarD->getLocation(), DiagId: diag::note_constexpr_not_static)
2253 << VarD
2254 << FixItHint::CreateInsertion(InsertionLoc: VarD->getBeginLoc(), Code: "static ");
2255 } else {
2256 NoteLValueLocation(Info, Base);
2257 }
2258 } else {
2259 Info.FFDiag(Loc);
2260 }
2261 // Don't allow references to temporaries to escape.
2262 return false;
2263 }
2264 assert((Info.checkingPotentialConstantExpression() ||
2265 LVal.getLValueCallIndex() == 0) &&
2266 "have call index for global lvalue");
2267
2268 if (LVal.allowConstexprUnknown()) {
2269 if (BaseVD) {
2270 Info.FFDiag(Loc, DiagId: diag::note_constexpr_var_init_non_constant, ExtraNotes: 1) << BaseVD;
2271 NoteLValueLocation(Info, Base);
2272 } else {
2273 Info.FFDiag(Loc);
2274 }
2275 return false;
2276 }
2277
2278 if (Base.is<DynamicAllocLValue>()) {
2279 Info.FFDiag(Loc, DiagId: diag::note_constexpr_dynamic_alloc)
2280 << IsReferenceType << !Designator.Entries.empty();
2281 NoteLValueLocation(Info, Base);
2282 return false;
2283 }
2284
2285 if (BaseVD) {
2286 if (const VarDecl *Var = dyn_cast<const VarDecl>(Val: BaseVD)) {
2287 // Check if this is a thread-local variable.
2288 if (Var->getTLSKind())
2289 // FIXME: Diagnostic!
2290 return false;
2291
2292 // A dllimport variable never acts like a constant, unless we're
2293 // evaluating a value for use only in name mangling, and unless it's a
2294 // static local. For the latter case, we'd still need to evaluate the
2295 // constant expression in case we're inside a (inlined) function.
2296 if (!isForManglingOnly(Kind) && Var->hasAttr<DLLImportAttr>() &&
2297 !Var->isStaticLocal())
2298 return false;
2299
2300 // In CUDA/HIP device compilation, only device side variables have
2301 // constant addresses.
2302 if (Info.getLangOpts().CUDA && Info.getLangOpts().CUDAIsDevice &&
2303 Info.Ctx.CUDAConstantEvalCtx.NoWrongSidedVars) {
2304 if ((!Var->hasAttr<CUDADeviceAttr>() &&
2305 !Var->hasAttr<CUDAConstantAttr>() &&
2306 !Var->getType()->isCUDADeviceBuiltinSurfaceType() &&
2307 !Var->getType()->isCUDADeviceBuiltinTextureType()) ||
2308 Var->hasAttr<HIPManagedAttr>())
2309 return false;
2310 }
2311 }
2312 if (const auto *FD = dyn_cast<const FunctionDecl>(Val: BaseVD)) {
2313 // __declspec(dllimport) must be handled very carefully:
2314 // We must never initialize an expression with the thunk in C++.
2315 // Doing otherwise would allow the same id-expression to yield
2316 // different addresses for the same function in different translation
2317 // units. However, this means that we must dynamically initialize the
2318 // expression with the contents of the import address table at runtime.
2319 //
2320 // The C language has no notion of ODR; furthermore, it has no notion of
2321 // dynamic initialization. This means that we are permitted to
2322 // perform initialization with the address of the thunk.
2323 if (Info.getLangOpts().CPlusPlus && !isForManglingOnly(Kind) &&
2324 FD->hasAttr<DLLImportAttr>())
2325 // FIXME: Diagnostic!
2326 return false;
2327 }
2328 } else if (const auto *MTE =
2329 dyn_cast_or_null<MaterializeTemporaryExpr>(Val: BaseE)) {
2330 if (CheckedTemps.insert(Ptr: MTE).second) {
2331 QualType TempType = getType(B: Base);
2332 if (TempType.isDestructedType()) {
2333 Info.FFDiag(Loc: MTE->getExprLoc(),
2334 DiagId: diag::note_constexpr_unsupported_temporary_nontrivial_dtor)
2335 << TempType;
2336 return false;
2337 }
2338
2339 APValue *V = MTE->getOrCreateValue(MayCreate: false);
2340 assert(V && "evasluation result refers to uninitialised temporary");
2341 if (!CheckEvaluationResult(CERK: CheckEvaluationResultKind::ConstantExpression,
2342 Info, DiagLoc: MTE->getExprLoc(), Type: TempType, Value: *V, Kind,
2343 /*SubobjectDecl=*/nullptr, CheckedTemps))
2344 return false;
2345 }
2346 }
2347
2348 // Allow address constant expressions to be past-the-end pointers. This is
2349 // an extension: the standard requires them to point to an object.
2350 if (!IsReferenceType)
2351 return true;
2352
2353 // A reference constant expression must refer to an object.
2354 if (!Base) {
2355 // FIXME: diagnostic
2356 Info.CCEDiag(Loc);
2357 return true;
2358 }
2359
2360 // Does this refer one past the end of some object?
2361 if (!Designator.Invalid && Designator.isOnePastTheEnd()) {
2362 Info.FFDiag(Loc, DiagId: diag::note_constexpr_past_end, ExtraNotes: 1)
2363 << !Designator.Entries.empty() << !!BaseVD << BaseVD;
2364 NoteLValueLocation(Info, Base);
2365 }
2366
2367 return true;
2368}
2369
2370/// Member pointers are constant expressions unless they point to a
2371/// non-virtual dllimport member function.
2372static bool CheckMemberPointerConstantExpression(EvalInfo &Info,
2373 SourceLocation Loc,
2374 QualType Type,
2375 const APValue &Value,
2376 ConstantExprKind Kind) {
2377 const ValueDecl *Member = Value.getMemberPointerDecl();
2378 const auto *FD = dyn_cast_or_null<CXXMethodDecl>(Val: Member);
2379 if (!FD)
2380 return true;
2381 if (FD->isImmediateFunction()) {
2382 Info.FFDiag(Loc, DiagId: diag::note_consteval_address_accessible) << /*pointer*/ 0;
2383 Info.Note(Loc: FD->getLocation(), DiagId: diag::note_declared_at);
2384 return false;
2385 }
2386 return isForManglingOnly(Kind) || FD->isVirtual() ||
2387 !FD->hasAttr<DLLImportAttr>();
2388}
2389
2390/// Check that this core constant expression is of literal type, and if not,
2391/// produce an appropriate diagnostic.
2392static bool CheckLiteralType(EvalInfo &Info, const Expr *E,
2393 const LValue *This = nullptr) {
2394 // The restriction to literal types does not exist in C++23 anymore.
2395 if (Info.getLangOpts().CPlusPlus23)
2396 return true;
2397
2398 if (!E->isPRValue() || E->getType()->isLiteralType(Ctx: Info.Ctx))
2399 return true;
2400
2401 // C++1y: A constant initializer for an object o [...] may also invoke
2402 // constexpr constructors for o and its subobjects even if those objects
2403 // are of non-literal class types.
2404 //
2405 // C++11 missed this detail for aggregates, so classes like this:
2406 // struct foo_t { union { int i; volatile int j; } u; };
2407 // are not (obviously) initializable like so:
2408 // __attribute__((__require_constant_initialization__))
2409 // static const foo_t x = {{0}};
2410 // because "i" is a subobject with non-literal initialization (due to the
2411 // volatile member of the union). See:
2412 // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#1677
2413 // Therefore, we use the C++1y behavior.
2414 if (This && Info.EvaluatingDecl == This->getLValueBase())
2415 return true;
2416
2417 // Prvalue constant expressions must be of literal types.
2418 if (Info.getLangOpts().CPlusPlus11)
2419 Info.FFDiag(E, DiagId: diag::note_constexpr_nonliteral)
2420 << E->getType();
2421 else
2422 Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
2423 return false;
2424}
2425
2426static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
2427 EvalInfo &Info, SourceLocation DiagLoc,
2428 QualType Type, const APValue &Value,
2429 ConstantExprKind Kind,
2430 const FieldDecl *SubobjectDecl,
2431 CheckedTemporaries &CheckedTemps) {
2432 if (!Value.hasValue()) {
2433 if (SubobjectDecl) {
2434 Info.FFDiag(Loc: DiagLoc, DiagId: diag::note_constexpr_uninitialized)
2435 << /*(name)*/ 1 << SubobjectDecl;
2436 Info.Note(Loc: SubobjectDecl->getLocation(),
2437 DiagId: diag::note_constexpr_subobject_declared_here);
2438 } else {
2439 Info.FFDiag(Loc: DiagLoc, DiagId: diag::note_constexpr_uninitialized)
2440 << /*of type*/ 0 << Type;
2441 }
2442 return false;
2443 }
2444
2445 // We allow _Atomic(T) to be initialized from anything that T can be
2446 // initialized from.
2447 if (const AtomicType *AT = Type->getAs<AtomicType>())
2448 Type = AT->getValueType();
2449
2450 // Core issue 1454: For a literal constant expression of array or class type,
2451 // each subobject of its value shall have been initialized by a constant
2452 // expression.
2453 if (Value.isArray()) {
2454 QualType EltTy = Type->castAsArrayTypeUnsafe()->getElementType();
2455 for (unsigned I = 0, N = Value.getArrayInitializedElts(); I != N; ++I) {
2456 if (!CheckEvaluationResult(CERK, Info, DiagLoc, Type: EltTy,
2457 Value: Value.getArrayInitializedElt(I), Kind,
2458 SubobjectDecl, CheckedTemps))
2459 return false;
2460 }
2461 if (!Value.hasArrayFiller())
2462 return true;
2463 return CheckEvaluationResult(CERK, Info, DiagLoc, Type: EltTy,
2464 Value: Value.getArrayFiller(), Kind, SubobjectDecl,
2465 CheckedTemps);
2466 }
2467 if (Value.isUnion() && Value.getUnionField()) {
2468 return CheckEvaluationResult(
2469 CERK, Info, DiagLoc, Type: Value.getUnionField()->getType(),
2470 Value: Value.getUnionValue(), Kind, SubobjectDecl: Value.getUnionField(), CheckedTemps);
2471 }
2472 if (Value.isStruct()) {
2473 auto *RD = Type->castAsRecordDecl();
2474 if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(Val: RD)) {
2475 unsigned BaseIndex = 0;
2476 for (const CXXBaseSpecifier &BS : CD->bases()) {
2477 const APValue &BaseValue = Value.getStructBase(i: BaseIndex);
2478 if (!BaseValue.hasValue()) {
2479 SourceLocation TypeBeginLoc = BS.getBaseTypeLoc();
2480 Info.FFDiag(Loc: TypeBeginLoc, DiagId: diag::note_constexpr_uninitialized_base)
2481 << BS.getType() << SourceRange(TypeBeginLoc, BS.getEndLoc());
2482 return false;
2483 }
2484 if (!CheckEvaluationResult(CERK, Info, DiagLoc, Type: BS.getType(), Value: BaseValue,
2485 Kind, /*SubobjectDecl=*/nullptr,
2486 CheckedTemps))
2487 return false;
2488 ++BaseIndex;
2489 }
2490 }
2491 for (const auto *I : RD->fields()) {
2492 if (I->isUnnamedBitField())
2493 continue;
2494
2495 if (!CheckEvaluationResult(CERK, Info, DiagLoc, Type: I->getType(),
2496 Value: Value.getStructField(i: I->getFieldIndex()), Kind,
2497 SubobjectDecl: I, CheckedTemps))
2498 return false;
2499 }
2500 }
2501
2502 if (Value.isLValue() &&
2503 CERK == CheckEvaluationResultKind::ConstantExpression) {
2504 LValue LVal;
2505 LVal.setFrom(Ctx: Info.Ctx, V: Value);
2506 return CheckLValueConstantExpression(Info, Loc: DiagLoc, Type, LVal, Kind,
2507 CheckedTemps);
2508 }
2509
2510 if (Value.isMemberPointer() &&
2511 CERK == CheckEvaluationResultKind::ConstantExpression)
2512 return CheckMemberPointerConstantExpression(Info, Loc: DiagLoc, Type, Value, Kind);
2513
2514 // Everything else is fine.
2515 return true;
2516}
2517
2518/// Check that this core constant expression value is a valid value for a
2519/// constant expression. If not, report an appropriate diagnostic. Does not
2520/// check that the expression is of literal type.
2521static bool CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc,
2522 QualType Type, const APValue &Value,
2523 ConstantExprKind Kind) {
2524 // Nothing to check for a constant expression of type 'cv void'.
2525 if (Type->isVoidType())
2526 return true;
2527
2528 CheckedTemporaries CheckedTemps;
2529 return CheckEvaluationResult(CERK: CheckEvaluationResultKind::ConstantExpression,
2530 Info, DiagLoc, Type, Value, Kind,
2531 /*SubobjectDecl=*/nullptr, CheckedTemps);
2532}
2533
2534/// Check that this evaluated value is fully-initialized and can be loaded by
2535/// an lvalue-to-rvalue conversion.
2536static bool CheckFullyInitialized(EvalInfo &Info, SourceLocation DiagLoc,
2537 QualType Type, const APValue &Value) {
2538 CheckedTemporaries CheckedTemps;
2539 return CheckEvaluationResult(
2540 CERK: CheckEvaluationResultKind::FullyInitialized, Info, DiagLoc, Type, Value,
2541 Kind: ConstantExprKind::Normal, /*SubobjectDecl=*/nullptr, CheckedTemps);
2542}
2543
2544/// Enforce C++2a [expr.const]/4.17, which disallows new-expressions unless
2545/// "the allocated storage is deallocated within the evaluation".
2546static bool CheckMemoryLeaks(EvalInfo &Info) {
2547 if (!Info.HeapAllocs.empty()) {
2548 // We can still fold to a constant despite a compile-time memory leak,
2549 // so long as the heap allocation isn't referenced in the result (we check
2550 // that in CheckConstantExpression).
2551 Info.CCEDiag(E: Info.HeapAllocs.begin()->second.AllocExpr,
2552 DiagId: diag::note_constexpr_memory_leak)
2553 << unsigned(Info.HeapAllocs.size() - 1);
2554 }
2555 return true;
2556}
2557
2558static bool EvalPointerValueAsBool(const APValue &Value, bool &Result) {
2559 // A null base expression indicates a null pointer. These are always
2560 // evaluatable, and they are false unless the offset is zero.
2561 if (!Value.getLValueBase()) {
2562 // TODO: Should a non-null pointer with an offset of zero evaluate to true?
2563 Result = !Value.getLValueOffset().isZero();
2564 return true;
2565 }
2566
2567 // We have a non-null base. These are generally known to be true, but if it's
2568 // a weak declaration it can be null at runtime.
2569 Result = true;
2570 const ValueDecl *Decl = Value.getLValueBase().dyn_cast<const ValueDecl*>();
2571 return !Decl || !Decl->isWeak();
2572}
2573
2574static bool HandleConversionToBool(const APValue &Val, bool &Result) {
2575 // TODO: This function should produce notes if it fails.
2576 switch (Val.getKind()) {
2577 case APValue::None:
2578 case APValue::Indeterminate:
2579 return false;
2580 case APValue::Int:
2581 Result = Val.getInt().getBoolValue();
2582 return true;
2583 case APValue::FixedPoint:
2584 Result = Val.getFixedPoint().getBoolValue();
2585 return true;
2586 case APValue::Float:
2587 Result = !Val.getFloat().isZero();
2588 return true;
2589 case APValue::ComplexInt:
2590 Result = Val.getComplexIntReal().getBoolValue() ||
2591 Val.getComplexIntImag().getBoolValue();
2592 return true;
2593 case APValue::ComplexFloat:
2594 Result = !Val.getComplexFloatReal().isZero() ||
2595 !Val.getComplexFloatImag().isZero();
2596 return true;
2597 case APValue::LValue:
2598 return EvalPointerValueAsBool(Value: Val, Result);
2599 case APValue::MemberPointer:
2600 if (Val.getMemberPointerDecl() && Val.getMemberPointerDecl()->isWeak()) {
2601 return false;
2602 }
2603 Result = Val.getMemberPointerDecl();
2604 return true;
2605 case APValue::Vector:
2606 case APValue::Matrix:
2607 case APValue::Array:
2608 case APValue::Struct:
2609 case APValue::Union:
2610 case APValue::AddrLabelDiff:
2611 return false;
2612 }
2613
2614 llvm_unreachable("unknown APValue kind");
2615}
2616
2617static bool EvaluateAsBooleanCondition(const Expr *E, bool &Result,
2618 EvalInfo &Info) {
2619 assert(!E->isValueDependent());
2620 assert(E->isPRValue() && "missing lvalue-to-rvalue conv in bool condition");
2621 APValue Val;
2622 if (!Evaluate(Result&: Val, Info, E))
2623 return false;
2624 return HandleConversionToBool(Val, Result);
2625}
2626
2627template<typename T>
2628static bool HandleOverflow(EvalInfo &Info, const Expr *E,
2629 const T &SrcValue, QualType DestType) {
2630 Info.CCEDiag(E, DiagId: diag::note_constexpr_overflow) << SrcValue << DestType;
2631 if (const auto *OBT = DestType->getAs<OverflowBehaviorType>();
2632 OBT && OBT->isTrapKind()) {
2633 return false;
2634 }
2635 return Info.noteUndefinedBehavior();
2636}
2637
2638static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E,
2639 QualType SrcType, const APFloat &Value,
2640 QualType DestType, APSInt &Result) {
2641 unsigned DestWidth = Info.Ctx.getIntWidth(T: DestType);
2642 // Determine whether we are converting to unsigned or signed.
2643 bool DestSigned = DestType->isSignedIntegerOrEnumerationType();
2644
2645 Result = APSInt(DestWidth, !DestSigned);
2646 bool ignored;
2647 if (Value.convertToInteger(Result, RM: llvm::APFloat::rmTowardZero, IsExact: &ignored)
2648 & APFloat::opInvalidOp)
2649 return HandleOverflow(Info, E, SrcValue: Value, DestType);
2650 return true;
2651}
2652
2653/// Get rounding mode to use in evaluation of the specified expression.
2654///
2655/// If rounding mode is unknown at compile time, still try to evaluate the
2656/// expression. If the result is exact, it does not depend on rounding mode.
2657/// So return "tonearest" mode instead of "dynamic".
2658static llvm::RoundingMode getActiveRoundingMode(EvalInfo &Info, const Expr *E) {
2659 llvm::RoundingMode RM =
2660 E->getFPFeaturesInEffect(LO: Info.getLangOpts()).getRoundingMode();
2661 if (RM == llvm::RoundingMode::Dynamic)
2662 RM = llvm::RoundingMode::NearestTiesToEven;
2663 return RM;
2664}
2665
2666/// Check if the given evaluation result is allowed for constant evaluation.
2667static bool checkFloatingPointResult(EvalInfo &Info, const Expr *E,
2668 APFloat::opStatus St) {
2669 // In a constant context, assume that any dynamic rounding mode or FP
2670 // exception state matches the default floating-point environment.
2671 if (Info.InConstantContext)
2672 return true;
2673
2674 FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.getLangOpts());
2675 if ((St & APFloat::opInexact) &&
2676 FPO.getRoundingMode() == llvm::RoundingMode::Dynamic) {
2677 // Inexact result means that it depends on rounding mode. If the requested
2678 // mode is dynamic, the evaluation cannot be made in compile time.
2679 Info.FFDiag(E, DiagId: diag::note_constexpr_dynamic_rounding);
2680 return false;
2681 }
2682
2683 if ((St != APFloat::opOK) &&
2684 (FPO.getRoundingMode() == llvm::RoundingMode::Dynamic ||
2685 FPO.getExceptionMode() != LangOptions::FPE_Ignore ||
2686 FPO.getAllowFEnvAccess())) {
2687 Info.FFDiag(E, DiagId: diag::note_constexpr_float_arithmetic_strict);
2688 return false;
2689 }
2690
2691 if ((St & APFloat::opStatus::opInvalidOp) &&
2692 FPO.getExceptionMode() != LangOptions::FPE_Ignore) {
2693 // There is no usefully definable result.
2694 Info.FFDiag(E);
2695 return false;
2696 }
2697
2698 // FIXME: if:
2699 // - evaluation triggered other FP exception, and
2700 // - exception mode is not "ignore", and
2701 // - the expression being evaluated is not a part of global variable
2702 // initializer,
2703 // the evaluation probably need to be rejected.
2704 return true;
2705}
2706
2707static bool HandleFloatToFloatCast(EvalInfo &Info, const Expr *E,
2708 QualType SrcType, QualType DestType,
2709 APFloat &Result) {
2710 assert((isa<CastExpr>(E) || isa<CompoundAssignOperator>(E) ||
2711 isa<ConvertVectorExpr>(E)) &&
2712 "HandleFloatToFloatCast has been checked with only CastExpr, "
2713 "CompoundAssignOperator and ConvertVectorExpr. Please either validate "
2714 "the new expression or address the root cause of this usage.");
2715 llvm::RoundingMode RM = getActiveRoundingMode(Info, E);
2716 APFloat::opStatus St;
2717 APFloat Value = Result;
2718 bool ignored;
2719 St = Result.convert(ToSemantics: Info.Ctx.getFloatTypeSemantics(T: DestType), RM, losesInfo: &ignored);
2720 return checkFloatingPointResult(Info, E, St);
2721}
2722
2723static APSInt HandleIntToIntCast(EvalInfo &Info, const Expr *E,
2724 QualType DestType, QualType SrcType,
2725 const APSInt &Value) {
2726 unsigned DestWidth = Info.Ctx.getIntWidth(T: DestType);
2727 // Figure out if this is a truncate, extend or noop cast.
2728 // If the input is signed, do a sign extend, noop, or truncate.
2729 APSInt Result = Value.extOrTrunc(width: DestWidth);
2730 Result.setIsUnsigned(DestType->isUnsignedIntegerOrEnumerationType());
2731 if (DestType->isBooleanType())
2732 Result = Value.getBoolValue();
2733 return Result;
2734}
2735
2736static bool HandleIntToFloatCast(EvalInfo &Info, const Expr *E,
2737 const FPOptions FPO,
2738 QualType SrcType, const APSInt &Value,
2739 QualType DestType, APFloat &Result) {
2740 Result = APFloat(Info.Ctx.getFloatTypeSemantics(T: DestType), 1);
2741 llvm::RoundingMode RM = getActiveRoundingMode(Info, E);
2742 APFloat::opStatus St = Result.convertFromAPInt(Input: Value, IsSigned: Value.isSigned(), RM);
2743 return checkFloatingPointResult(Info, E, St);
2744}
2745
2746static bool truncateBitfieldValue(EvalInfo &Info, const Expr *E,
2747 APValue &Value, const FieldDecl *FD) {
2748 assert(FD->isBitField() && "truncateBitfieldValue on non-bitfield");
2749
2750 if (!Value.isInt()) {
2751 // Trying to store a pointer-cast-to-integer into a bitfield.
2752 // FIXME: In this case, we should provide the diagnostic for casting
2753 // a pointer to an integer.
2754 assert(Value.isLValue() && "integral value neither int nor lvalue?");
2755 Info.FFDiag(E);
2756 return false;
2757 }
2758
2759 APSInt &Int = Value.getInt();
2760 unsigned OldBitWidth = Int.getBitWidth();
2761 unsigned NewBitWidth = FD->getBitWidthValue();
2762 if (NewBitWidth < OldBitWidth)
2763 Int = Int.trunc(width: NewBitWidth).extend(width: OldBitWidth);
2764 return true;
2765}
2766
2767/// Perform the given integer operation, which is known to need at most BitWidth
2768/// bits, and check for overflow in the original type (if that type was not an
2769/// unsigned type).
2770template<typename Operation>
2771static bool CheckedIntArithmetic(EvalInfo &Info, const Expr *E,
2772 const APSInt &LHS, const APSInt &RHS,
2773 unsigned BitWidth, Operation Op,
2774 APSInt &Result) {
2775 if (LHS.isUnsigned()) {
2776 Result = Op(LHS, RHS);
2777 return true;
2778 }
2779
2780 APSInt Value(Op(LHS.extend(width: BitWidth), RHS.extend(width: BitWidth)), false);
2781 Result = Value.trunc(width: LHS.getBitWidth());
2782 if (Result.extend(width: BitWidth) != Value && !E->getType().isWrapType()) {
2783 if (Info.checkingForUndefinedBehavior())
2784 Info.Ctx.getDiagnostics().Report(Loc: E->getExprLoc(),
2785 DiagID: diag::warn_integer_constant_overflow)
2786 << toString(I: Result, Radix: 10, Signed: Result.isSigned(), /*formatAsCLiteral=*/false,
2787 /*UpperCase=*/true, /*InsertSeparators=*/true)
2788 << E->getType() << E->getSourceRange();
2789 return HandleOverflow(Info, E, SrcValue: Value, DestType: E->getType());
2790 }
2791 return true;
2792}
2793
2794/// Perform the given binary integer operation.
2795static bool handleIntIntBinOp(EvalInfo &Info, const BinaryOperator *E,
2796 const APSInt &LHS, BinaryOperatorKind Opcode,
2797 APSInt RHS, APSInt &Result) {
2798 bool HandleOverflowResult = true;
2799 switch (Opcode) {
2800 default:
2801 Info.FFDiag(E);
2802 return false;
2803 case BO_Mul:
2804 return CheckedIntArithmetic(Info, E, LHS, RHS, BitWidth: LHS.getBitWidth() * 2,
2805 Op: std::multiplies<APSInt>(), Result);
2806 case BO_Add:
2807 return CheckedIntArithmetic(Info, E, LHS, RHS, BitWidth: LHS.getBitWidth() + 1,
2808 Op: std::plus<APSInt>(), Result);
2809 case BO_Sub:
2810 return CheckedIntArithmetic(Info, E, LHS, RHS, BitWidth: LHS.getBitWidth() + 1,
2811 Op: std::minus<APSInt>(), Result);
2812 case BO_And: Result = LHS & RHS; return true;
2813 case BO_Xor: Result = LHS ^ RHS; return true;
2814 case BO_Or: Result = LHS | RHS; return true;
2815 case BO_Div:
2816 case BO_Rem:
2817 if (RHS == 0) {
2818 Info.FFDiag(E, DiagId: diag::note_expr_divide_by_zero)
2819 << E->getRHS()->getSourceRange();
2820 return false;
2821 }
2822 // Check for overflow case: INT_MIN / -1 or INT_MIN % -1. APSInt supports
2823 // this operation and gives the two's complement result.
2824 if (RHS.isNegative() && RHS.isAllOnes() && LHS.isSigned() &&
2825 LHS.isMinSignedValue())
2826 HandleOverflowResult = HandleOverflow(
2827 Info, E, SrcValue: -LHS.extend(width: LHS.getBitWidth() + 1), DestType: E->getType());
2828 Result = (Opcode == BO_Rem ? LHS % RHS : LHS / RHS);
2829 return HandleOverflowResult;
2830 case BO_Shl: {
2831 if (Info.getLangOpts().OpenCL)
2832 // OpenCL 6.3j: shift values are effectively % word size of LHS.
2833 RHS &= APSInt(llvm::APInt(RHS.getBitWidth(),
2834 static_cast<uint64_t>(LHS.getBitWidth() - 1)),
2835 RHS.isUnsigned());
2836 else if (RHS.isSigned() && RHS.isNegative()) {
2837 // During constant-folding, a negative shift is an opposite shift. Such
2838 // a shift is not a constant expression.
2839 Info.CCEDiag(E, DiagId: diag::note_constexpr_negative_shift) << RHS;
2840 if (!Info.noteUndefinedBehavior())
2841 return false;
2842 RHS = -RHS;
2843 goto shift_right;
2844 }
2845 shift_left:
2846 // C++11 [expr.shift]p1: Shift width must be less than the bit width of
2847 // the shifted type.
2848 unsigned SA = (unsigned) RHS.getLimitedValue(Limit: LHS.getBitWidth()-1);
2849 if (SA != RHS) {
2850 Info.CCEDiag(E, DiagId: diag::note_constexpr_large_shift)
2851 << RHS << E->getType() << LHS.getBitWidth();
2852 if (!Info.noteUndefinedBehavior())
2853 return false;
2854 } else if (LHS.isSigned() && !Info.getLangOpts().CPlusPlus20) {
2855 // C++11 [expr.shift]p2: A signed left shift must have a non-negative
2856 // operand, and must not overflow the corresponding unsigned type.
2857 // C++2a [expr.shift]p2: E1 << E2 is the unique value congruent to
2858 // E1 x 2^E2 module 2^N.
2859 if (LHS.isNegative()) {
2860 Info.CCEDiag(E, DiagId: diag::note_constexpr_lshift_of_negative) << LHS;
2861 if (!Info.noteUndefinedBehavior())
2862 return false;
2863 } else if (LHS.countl_zero() < SA) {
2864 Info.CCEDiag(E, DiagId: diag::note_constexpr_lshift_discards);
2865 if (!Info.noteUndefinedBehavior())
2866 return false;
2867 }
2868 }
2869 Result = LHS << SA;
2870 return true;
2871 }
2872 case BO_Shr: {
2873 if (Info.getLangOpts().OpenCL)
2874 // OpenCL 6.3j: shift values are effectively % word size of LHS.
2875 RHS &= APSInt(llvm::APInt(RHS.getBitWidth(),
2876 static_cast<uint64_t>(LHS.getBitWidth() - 1)),
2877 RHS.isUnsigned());
2878 else if (RHS.isSigned() && RHS.isNegative()) {
2879 // During constant-folding, a negative shift is an opposite shift. Such a
2880 // shift is not a constant expression.
2881 Info.CCEDiag(E, DiagId: diag::note_constexpr_negative_shift) << RHS;
2882 if (!Info.noteUndefinedBehavior())
2883 return false;
2884 RHS = -RHS;
2885 goto shift_left;
2886 }
2887 shift_right:
2888 // C++11 [expr.shift]p1: Shift width must be less than the bit width of the
2889 // shifted type.
2890 unsigned SA = (unsigned) RHS.getLimitedValue(Limit: LHS.getBitWidth()-1);
2891 if (SA != RHS) {
2892 Info.CCEDiag(E, DiagId: diag::note_constexpr_large_shift)
2893 << RHS << E->getType() << LHS.getBitWidth();
2894 if (!Info.noteUndefinedBehavior())
2895 return false;
2896 }
2897
2898 Result = LHS >> SA;
2899 return true;
2900 }
2901
2902 case BO_LT: Result = LHS < RHS; return true;
2903 case BO_GT: Result = LHS > RHS; return true;
2904 case BO_LE: Result = LHS <= RHS; return true;
2905 case BO_GE: Result = LHS >= RHS; return true;
2906 case BO_EQ: Result = LHS == RHS; return true;
2907 case BO_NE: Result = LHS != RHS; return true;
2908 case BO_Cmp:
2909 llvm_unreachable("BO_Cmp should be handled elsewhere");
2910 }
2911}
2912
2913/// Perform the given binary floating-point operation, in-place, on LHS.
2914static bool handleFloatFloatBinOp(EvalInfo &Info, const BinaryOperator *E,
2915 APFloat &LHS, BinaryOperatorKind Opcode,
2916 const APFloat &RHS) {
2917 llvm::RoundingMode RM = getActiveRoundingMode(Info, E);
2918 APFloat::opStatus St;
2919 switch (Opcode) {
2920 default:
2921 Info.FFDiag(E);
2922 return false;
2923 case BO_Mul:
2924 St = LHS.multiply(RHS, RM);
2925 break;
2926 case BO_Add:
2927 St = LHS.add(RHS, RM);
2928 break;
2929 case BO_Sub:
2930 St = LHS.subtract(RHS, RM);
2931 break;
2932 case BO_Div:
2933 // [expr.mul]p4:
2934 // If the second operand of / or % is zero the behavior is undefined.
2935 if (RHS.isZero())
2936 Info.CCEDiag(E, DiagId: diag::note_expr_divide_by_zero);
2937 St = LHS.divide(RHS, RM);
2938 break;
2939 }
2940
2941 // [expr.pre]p4:
2942 // If during the evaluation of an expression, the result is not
2943 // mathematically defined [...], the behavior is undefined.
2944 // FIXME: C++ rules require us to not conform to IEEE 754 here.
2945 if (LHS.isNaN()) {
2946 Info.CCEDiag(E, DiagId: diag::note_constexpr_float_arithmetic) << LHS.isNaN();
2947 return Info.noteUndefinedBehavior();
2948 }
2949
2950 return checkFloatingPointResult(Info, E, St);
2951}
2952
2953static bool handleLogicalOpForVector(const APInt &LHSValue,
2954 BinaryOperatorKind Opcode,
2955 const APInt &RHSValue, APInt &Result) {
2956 bool LHS = (LHSValue != 0);
2957 bool RHS = (RHSValue != 0);
2958
2959 if (Opcode == BO_LAnd)
2960 Result = LHS && RHS;
2961 else
2962 Result = LHS || RHS;
2963 return true;
2964}
2965static bool handleLogicalOpForVector(const APFloat &LHSValue,
2966 BinaryOperatorKind Opcode,
2967 const APFloat &RHSValue, APInt &Result) {
2968 bool LHS = !LHSValue.isZero();
2969 bool RHS = !RHSValue.isZero();
2970
2971 if (Opcode == BO_LAnd)
2972 Result = LHS && RHS;
2973 else
2974 Result = LHS || RHS;
2975 return true;
2976}
2977
2978static bool handleLogicalOpForVector(const APValue &LHSValue,
2979 BinaryOperatorKind Opcode,
2980 const APValue &RHSValue, APInt &Result) {
2981 // The result is always an int type, however operands match the first.
2982 if (LHSValue.getKind() == APValue::Int)
2983 return handleLogicalOpForVector(LHSValue: LHSValue.getInt(), Opcode,
2984 RHSValue: RHSValue.getInt(), Result);
2985 assert(LHSValue.getKind() == APValue::Float && "Should be no other options");
2986 return handleLogicalOpForVector(LHSValue: LHSValue.getFloat(), Opcode,
2987 RHSValue: RHSValue.getFloat(), Result);
2988}
2989
2990template <typename APTy>
2991static bool
2992handleCompareOpForVectorHelper(const APTy &LHSValue, BinaryOperatorKind Opcode,
2993 const APTy &RHSValue, APInt &Result) {
2994 switch (Opcode) {
2995 default:
2996 llvm_unreachable("unsupported binary operator");
2997 case BO_EQ:
2998 Result = (LHSValue == RHSValue);
2999 break;
3000 case BO_NE:
3001 Result = (LHSValue != RHSValue);
3002 break;
3003 case BO_LT:
3004 Result = (LHSValue < RHSValue);
3005 break;
3006 case BO_GT:
3007 Result = (LHSValue > RHSValue);
3008 break;
3009 case BO_LE:
3010 Result = (LHSValue <= RHSValue);
3011 break;
3012 case BO_GE:
3013 Result = (LHSValue >= RHSValue);
3014 break;
3015 }
3016
3017 // The boolean operations on these vector types use an instruction that
3018 // results in a mask of '-1' for the 'truth' value. Ensure that we negate 1
3019 // to -1 to make sure that we produce the correct value.
3020 Result.negate();
3021
3022 return true;
3023}
3024
3025static bool handleCompareOpForVector(const APValue &LHSValue,
3026 BinaryOperatorKind Opcode,
3027 const APValue &RHSValue, APInt &Result) {
3028 // The result is always an int type, however operands match the first.
3029 if (LHSValue.getKind() == APValue::Int)
3030 return handleCompareOpForVectorHelper(LHSValue: LHSValue.getInt(), Opcode,
3031 RHSValue: RHSValue.getInt(), Result);
3032 assert(LHSValue.getKind() == APValue::Float && "Should be no other options");
3033 return handleCompareOpForVectorHelper(LHSValue: LHSValue.getFloat(), Opcode,
3034 RHSValue: RHSValue.getFloat(), Result);
3035}
3036
3037// Perform binary operations for vector types, in place on the LHS.
3038static bool handleVectorVectorBinOp(EvalInfo &Info, const BinaryOperator *E,
3039 BinaryOperatorKind Opcode,
3040 APValue &LHSValue,
3041 const APValue &RHSValue) {
3042 assert(Opcode != BO_PtrMemD && Opcode != BO_PtrMemI &&
3043 "Operation not supported on vector types");
3044
3045 const auto *VT = E->getType()->castAs<VectorType>();
3046 unsigned NumElements = VT->getNumElements();
3047 QualType EltTy = VT->getElementType();
3048
3049 // In the cases (typically C as I've observed) where we aren't evaluating
3050 // constexpr but are checking for cases where the LHS isn't yet evaluatable,
3051 // just give up.
3052 if (!LHSValue.isVector()) {
3053 assert(LHSValue.isLValue() &&
3054 "A vector result that isn't a vector OR uncalculated LValue");
3055 Info.FFDiag(E);
3056 return false;
3057 }
3058
3059 assert(LHSValue.getVectorLength() == NumElements &&
3060 RHSValue.getVectorLength() == NumElements && "Different vector sizes");
3061
3062 SmallVector<APValue, 4> ResultElements;
3063
3064 for (unsigned EltNum = 0; EltNum < NumElements; ++EltNum) {
3065 APValue LHSElt = LHSValue.getVectorElt(I: EltNum);
3066 APValue RHSElt = RHSValue.getVectorElt(I: EltNum);
3067
3068 if (EltTy->isIntegerType()) {
3069 APSInt EltResult{Info.Ctx.getIntWidth(T: EltTy),
3070 EltTy->isUnsignedIntegerType()};
3071 bool Success = true;
3072
3073 if (BinaryOperator::isLogicalOp(Opc: Opcode))
3074 Success = handleLogicalOpForVector(LHSValue: LHSElt, Opcode, RHSValue: RHSElt, Result&: EltResult);
3075 else if (BinaryOperator::isComparisonOp(Opc: Opcode))
3076 Success = handleCompareOpForVector(LHSValue: LHSElt, Opcode, RHSValue: RHSElt, Result&: EltResult);
3077 else
3078 Success = handleIntIntBinOp(Info, E, LHS: LHSElt.getInt(), Opcode,
3079 RHS: RHSElt.getInt(), Result&: EltResult);
3080
3081 if (!Success) {
3082 Info.FFDiag(E);
3083 return false;
3084 }
3085 ResultElements.emplace_back(Args&: EltResult);
3086
3087 } else if (EltTy->isFloatingType()) {
3088 assert(LHSElt.getKind() == APValue::Float &&
3089 RHSElt.getKind() == APValue::Float &&
3090 "Mismatched LHS/RHS/Result Type");
3091 APFloat LHSFloat = LHSElt.getFloat();
3092
3093 if (!handleFloatFloatBinOp(Info, E, LHS&: LHSFloat, Opcode,
3094 RHS: RHSElt.getFloat())) {
3095 Info.FFDiag(E);
3096 return false;
3097 }
3098
3099 ResultElements.emplace_back(Args&: LHSFloat);
3100 }
3101 }
3102
3103 LHSValue = APValue(ResultElements.data(), ResultElements.size());
3104 return true;
3105}
3106
3107/// Cast an lvalue referring to a base subobject to a derived class, by
3108/// truncating the lvalue's path to the given length.
3109static bool CastToDerivedClass(EvalInfo &Info, const Expr *E, LValue &Result,
3110 const RecordDecl *TruncatedType,
3111 unsigned TruncatedElements) {
3112 SubobjectDesignator &D = Result.Designator;
3113
3114 // Check we actually point to a derived class object.
3115 if (TruncatedElements == D.Entries.size())
3116 return true;
3117 assert(TruncatedElements >= D.MostDerivedPathLength &&
3118 "not casting to a derived class");
3119 if (!Result.checkSubobject(Info, E, CSK: CSK_Derived))
3120 return false;
3121
3122 // Truncate the path to the subobject, and remove any derived-to-base offsets.
3123 const RecordDecl *RD = TruncatedType;
3124 for (unsigned I = TruncatedElements, N = D.Entries.size(); I != N; ++I) {
3125 if (RD->isInvalidDecl()) return false;
3126 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD);
3127 const CXXRecordDecl *Base = getAsBaseClass(E: D.Entries[I]);
3128 if (isVirtualBaseClass(E: D.Entries[I]))
3129 Result.Offset -= Layout.getVBaseClassOffset(VBase: Base);
3130 else
3131 Result.Offset -= Layout.getBaseClassOffset(Base);
3132 RD = Base;
3133 }
3134 D.Entries.resize(N: TruncatedElements);
3135 return true;
3136}
3137
3138static bool HandleLValueDirectBase(EvalInfo &Info, const Expr *E, LValue &Obj,
3139 const CXXRecordDecl *Derived,
3140 const CXXRecordDecl *Base,
3141 const ASTRecordLayout *RL = nullptr) {
3142 if (!RL) {
3143 if (Derived->isInvalidDecl()) return false;
3144 RL = &Info.Ctx.getASTRecordLayout(D: Derived);
3145 }
3146
3147 Obj.addDecl(Info, E, D: Base, /*Virtual*/ false);
3148 Obj.getLValueOffset() += RL->getBaseClassOffset(Base);
3149 return true;
3150}
3151
3152static bool HandleLValueBase(EvalInfo &Info, const Expr *E, LValue &Obj,
3153 const CXXRecordDecl *DerivedDecl,
3154 const CXXBaseSpecifier *Base) {
3155 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl();
3156
3157 if (!Base->isVirtual())
3158 return HandleLValueDirectBase(Info, E, Obj, Derived: DerivedDecl, Base: BaseDecl);
3159
3160 SubobjectDesignator &D = Obj.Designator;
3161 if (D.Invalid)
3162 return false;
3163
3164 // Extract most-derived object and corresponding type.
3165 // FIXME: After implementing P2280R4 it became possible to get references
3166 // here. We do MostDerivedType->getAsCXXRecordDecl() in several other
3167 // locations and if we see crashes in those locations in the future
3168 // it may make more sense to move this fix into Lvalue::set.
3169 DerivedDecl = D.MostDerivedType.getNonReferenceType()->getAsCXXRecordDecl();
3170 if (!CastToDerivedClass(Info, E, Result&: Obj, TruncatedType: DerivedDecl, TruncatedElements: D.MostDerivedPathLength))
3171 return false;
3172
3173 // Find the virtual base class.
3174 if (DerivedDecl->isInvalidDecl()) return false;
3175 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: DerivedDecl);
3176 Obj.addDecl(Info, E, D: BaseDecl, /*Virtual*/ true);
3177 Obj.getLValueOffset() += Layout.getVBaseClassOffset(VBase: BaseDecl);
3178 return true;
3179}
3180
3181static bool HandleLValueBasePath(EvalInfo &Info, const CastExpr *E,
3182 QualType Type, LValue &Result) {
3183 for (CastExpr::path_const_iterator PathI = E->path_begin(),
3184 PathE = E->path_end();
3185 PathI != PathE; ++PathI) {
3186 if (!HandleLValueBase(Info, E, Obj&: Result, DerivedDecl: Type->getAsCXXRecordDecl(),
3187 Base: *PathI))
3188 return false;
3189 Type = (*PathI)->getType();
3190 }
3191 return true;
3192}
3193
3194/// Cast an lvalue referring to a derived class to a known base subobject.
3195static bool CastToBaseClass(EvalInfo &Info, const Expr *E, LValue &Result,
3196 const CXXRecordDecl *DerivedRD,
3197 const CXXRecordDecl *BaseRD) {
3198 CXXBasePaths Paths(/*FindAmbiguities=*/false,
3199 /*RecordPaths=*/true, /*DetectVirtual=*/false);
3200 if (!DerivedRD->isDerivedFrom(Base: BaseRD, Paths))
3201 llvm_unreachable("Class must be derived from the passed in base class!");
3202
3203 for (CXXBasePathElement &Elem : Paths.front())
3204 if (!HandleLValueBase(Info, E, Obj&: Result, DerivedDecl: Elem.Class, Base: Elem.Base))
3205 return false;
3206 return true;
3207}
3208
3209/// Update LVal to refer to the given field, which must be a member of the type
3210/// currently described by LVal.
3211static bool HandleLValueMember(EvalInfo &Info, const Expr *E, LValue &LVal,
3212 const FieldDecl *FD,
3213 const ASTRecordLayout *RL = nullptr) {
3214 if (!RL) {
3215 if (FD->getParent()->isInvalidDecl()) return false;
3216 RL = &Info.Ctx.getASTRecordLayout(D: FD->getParent());
3217 }
3218
3219 unsigned I = FD->getFieldIndex();
3220 LVal.addDecl(Info, E, D: FD);
3221 LVal.adjustOffset(N: Info.Ctx.toCharUnitsFromBits(BitSize: RL->getFieldOffset(FieldNo: I)));
3222 return true;
3223}
3224
3225/// Update LVal to refer to the given indirect field.
3226static bool HandleLValueIndirectMember(EvalInfo &Info, const Expr *E,
3227 LValue &LVal,
3228 const IndirectFieldDecl *IFD) {
3229 for (const auto *C : IFD->chain())
3230 if (!HandleLValueMember(Info, E, LVal, FD: cast<FieldDecl>(Val: C)))
3231 return false;
3232 return true;
3233}
3234
3235enum class SizeOfType {
3236 SizeOf,
3237 DataSizeOf,
3238};
3239
3240/// Get the size of the given type in char units.
3241static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc, QualType Type,
3242 CharUnits &Size, SizeOfType SOT = SizeOfType::SizeOf) {
3243 // sizeof(void), __alignof__(void), sizeof(function) = 1 as a gcc
3244 // extension.
3245 if (Type->isVoidType() || Type->isFunctionType()) {
3246 Size = CharUnits::One();
3247 return true;
3248 }
3249
3250 if (Type->isDependentType()) {
3251 Info.FFDiag(Loc);
3252 return false;
3253 }
3254
3255 if (!Type->isConstantSizeType()) {
3256 // sizeof(vla) is not a constantexpr: C99 6.5.3.4p2.
3257 // FIXME: Better diagnostic.
3258 Info.FFDiag(Loc);
3259 return false;
3260 }
3261
3262 if (SOT == SizeOfType::SizeOf)
3263 Size = Info.Ctx.getTypeSizeInChars(T: Type);
3264 else
3265 Size = Info.Ctx.getTypeInfoDataSizeInChars(T: Type).Width;
3266 return true;
3267}
3268
3269/// Update a pointer value to model pointer arithmetic.
3270/// \param Info - Information about the ongoing evaluation.
3271/// \param E - The expression being evaluated, for diagnostic purposes.
3272/// \param LVal - The pointer value to be updated.
3273/// \param EltTy - The pointee type represented by LVal.
3274/// \param Adjustment - The adjustment, in objects of type EltTy, to add.
3275static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E,
3276 LValue &LVal, QualType EltTy,
3277 APSInt Adjustment) {
3278 CharUnits SizeOfPointee;
3279 if (!HandleSizeof(Info, Loc: E->getExprLoc(), Type: EltTy, Size&: SizeOfPointee))
3280 return false;
3281
3282 LVal.adjustOffsetAndIndex(Info, E, Index: Adjustment, ElementSize: SizeOfPointee);
3283 return true;
3284}
3285
3286static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E,
3287 LValue &LVal, QualType EltTy,
3288 int64_t Adjustment) {
3289 return HandleLValueArrayAdjustment(Info, E, LVal, EltTy,
3290 Adjustment: APSInt::get(X: Adjustment));
3291}
3292
3293/// Update an lvalue to refer to a component of a complex number.
3294/// \param Info - Information about the ongoing evaluation.
3295/// \param LVal - The lvalue to be updated.
3296/// \param EltTy - The complex number's component type.
3297/// \param Imag - False for the real component, true for the imaginary.
3298static bool HandleLValueComplexElement(EvalInfo &Info, const Expr *E,
3299 LValue &LVal, QualType EltTy,
3300 bool Imag) {
3301 if (Imag) {
3302 CharUnits SizeOfComponent;
3303 if (!HandleSizeof(Info, Loc: E->getExprLoc(), Type: EltTy, Size&: SizeOfComponent))
3304 return false;
3305 LVal.Offset += SizeOfComponent;
3306 }
3307 LVal.addComplex(Info, E, EltTy, Imag);
3308 return true;
3309}
3310
3311static bool HandleLValueVectorElement(EvalInfo &Info, const Expr *E,
3312 LValue &LVal, QualType EltTy,
3313 uint64_t Size, uint64_t Idx) {
3314 if (Idx) {
3315 CharUnits SizeOfElement;
3316 if (!HandleSizeof(Info, Loc: E->getExprLoc(), Type: EltTy, Size&: SizeOfElement))
3317 return false;
3318 LVal.Offset += SizeOfElement * Idx;
3319 }
3320 LVal.addVectorElement(Info, E, EltTy, Size, Idx);
3321 return true;
3322}
3323
3324/// Try to evaluate the initializer for a variable declaration.
3325///
3326/// \param Info Information about the ongoing evaluation.
3327/// \param E An expression to be used when printing diagnostics.
3328/// \param VD The variable whose initializer should be obtained.
3329/// \param Version The version of the variable within the frame.
3330/// \param Frame The frame in which the variable was created. Must be null
3331/// if this variable is not local to the evaluation.
3332/// \param Result Filled in with a pointer to the value of the variable.
3333static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
3334 const VarDecl *VD, CallStackFrame *Frame,
3335 unsigned Version, APValue *&Result) {
3336 // C++23 [expr.const]p8 If we have a reference type allow unknown references
3337 // and pointers.
3338 bool AllowConstexprUnknown =
3339 Info.getLangOpts().CPlusPlus23 && VD->getType()->isReferenceType();
3340
3341 APValue::LValueBase Base(VD, Frame ? Frame->Index : 0, Version);
3342
3343 auto CheckUninitReference = [&](bool IsLocalVariable) {
3344 if (!Result || (!Result->hasValue() && VD->getType()->isReferenceType())) {
3345 // C++23 [expr.const]p8
3346 // ... For such an object that is not usable in constant expressions, the
3347 // dynamic type of the object is constexpr-unknown. For such a reference
3348 // that is not usable in constant expressions, the reference is treated
3349 // as binding to an unspecified object of the referenced type whose
3350 // lifetime and that of all subobjects includes the entire constant
3351 // evaluation and whose dynamic type is constexpr-unknown.
3352 //
3353 // Variables that are part of the current evaluation are not
3354 // constexpr-unknown.
3355 if (!AllowConstexprUnknown || IsLocalVariable) {
3356 if (!Info.checkingPotentialConstantExpression())
3357 Info.FFDiag(E, DiagId: diag::note_constexpr_use_uninit_reference);
3358 return false;
3359 }
3360 Result = nullptr;
3361 }
3362 return true;
3363 };
3364
3365 // If this is a local variable, dig out its value.
3366 if (Frame) {
3367 Result = Frame->getTemporary(Key: VD, Version);
3368 if (Result)
3369 return CheckUninitReference(/*IsLocalVariable=*/true);
3370
3371 if (!isa<ParmVarDecl>(Val: VD)) {
3372 // Assume variables referenced within a lambda's call operator that were
3373 // not declared within the call operator are captures and during checking
3374 // of a potential constant expression, assume they are unknown constant
3375 // expressions.
3376 assert(isLambdaCallOperator(Frame->Callee) &&
3377 (VD->getDeclContext() != Frame->Callee || VD->isInitCapture()) &&
3378 "missing value for local variable");
3379 if (Info.checkingPotentialConstantExpression())
3380 return false;
3381
3382 llvm_unreachable(
3383 "A variable in a frame should either be a local or a parameter");
3384 }
3385 }
3386
3387 // If we're currently evaluating the initializer of this declaration, use that
3388 // in-flight value.
3389 if (Info.EvaluatingDecl == Base) {
3390 Result = Info.EvaluatingDeclValue;
3391 return CheckUninitReference(/*IsLocalVariable=*/false);
3392 }
3393
3394 // P2280R4 struck the restriction that variable of reference type lifetime
3395 // should begin within the evaluation of E
3396 // Used to be C++20 [expr.const]p5.12.2:
3397 // ... its lifetime began within the evaluation of E;
3398 if (isa<ParmVarDecl>(Val: VD)) {
3399 if (AllowConstexprUnknown) {
3400 Result = nullptr;
3401 return true;
3402 }
3403
3404 // Assume parameters of a potential constant expression are usable in
3405 // constant expressions.
3406 if (!Info.checkingPotentialConstantExpression() ||
3407 !Info.CurrentCall->Callee ||
3408 !Info.CurrentCall->Callee->Equals(DC: VD->getDeclContext())) {
3409 if (Info.getLangOpts().CPlusPlus11) {
3410 Info.FFDiag(E, DiagId: diag::note_constexpr_function_param_value_unknown)
3411 << VD;
3412 NoteLValueLocation(Info, Base);
3413 } else {
3414 Info.FFDiag(E);
3415 }
3416 }
3417 return false;
3418 }
3419
3420 if (E->isValueDependent())
3421 return false;
3422
3423 // Dig out the initializer, and use the declaration which it's attached to.
3424 // FIXME: We should eventually check whether the variable has a reachable
3425 // initializing declaration.
3426 const Expr *Init = VD->getAnyInitializer(D&: VD);
3427 // P2280R4 struck the restriction that variable of reference type should have
3428 // a preceding initialization.
3429 // Used to be C++20 [expr.const]p5.12:
3430 // ... reference has a preceding initialization and either ...
3431 if (!Init && !AllowConstexprUnknown) {
3432 // Don't diagnose during potential constant expression checking; an
3433 // initializer might be added later.
3434 if (!Info.checkingPotentialConstantExpression()) {
3435 Info.FFDiag(E, DiagId: diag::note_constexpr_var_init_unknown, ExtraNotes: 1)
3436 << VD;
3437 NoteLValueLocation(Info, Base);
3438 }
3439 return false;
3440 }
3441
3442 // P2280R4 struck the initialization requirement for variables of reference
3443 // type so we can no longer assume we have an Init.
3444 // Used to be C++20 [expr.const]p5.12:
3445 // ... reference has a preceding initialization and either ...
3446 if (Init && Init->isValueDependent()) {
3447 // The DeclRefExpr is not value-dependent, but the variable it refers to
3448 // has a value-dependent initializer. This should only happen in
3449 // constant-folding cases, where the variable is not actually of a suitable
3450 // type for use in a constant expression (otherwise the DeclRefExpr would
3451 // have been value-dependent too), so diagnose that.
3452 assert(!VD->mightBeUsableInConstantExpressions(Info.Ctx));
3453 if (!Info.checkingPotentialConstantExpression()) {
3454 Info.FFDiag(E, DiagId: Info.getLangOpts().CPlusPlus11
3455 ? diag::note_constexpr_ltor_non_constexpr
3456 : diag::note_constexpr_ltor_non_integral, ExtraNotes: 1)
3457 << VD << VD->getType();
3458 NoteLValueLocation(Info, Base);
3459 }
3460 return false;
3461 }
3462
3463 // Check that we can fold the initializer. In C++, we will have already done
3464 // this in the cases where it matters for conformance.
3465 // P2280R4 struck the initialization requirement for variables of reference
3466 // type so we can no longer assume we have an Init.
3467 // Used to be C++20 [expr.const]p5.12:
3468 // ... reference has a preceding initialization and either ...
3469 if (Init && !VD->evaluateValue() && !AllowConstexprUnknown) {
3470 Info.FFDiag(E, DiagId: diag::note_constexpr_var_init_non_constant, ExtraNotes: 1) << VD;
3471 NoteLValueLocation(Info, Base);
3472 return false;
3473 }
3474
3475 // Check that the variable is actually usable in constant expressions. For a
3476 // const integral variable or a reference, we might have a non-constant
3477 // initializer that we can nonetheless evaluate the initializer for. Such
3478 // variables are not usable in constant expressions. In C++98, the
3479 // initializer also syntactically needs to be an ICE.
3480 //
3481 // FIXME: We don't diagnose cases that aren't potentially usable in constant
3482 // expressions here; doing so would regress diagnostics for things like
3483 // reading from a volatile constexpr variable.
3484 if ((Info.getLangOpts().CPlusPlus && !VD->hasConstantInitialization() &&
3485 VD->mightBeUsableInConstantExpressions(C: Info.Ctx) &&
3486 !AllowConstexprUnknown) ||
3487 ((Info.getLangOpts().CPlusPlus || Info.getLangOpts().OpenCL) &&
3488 !Info.getLangOpts().CPlusPlus11 && !VD->hasICEInitializer(Context: Info.Ctx))) {
3489 if (Init) {
3490 Info.CCEDiag(E, DiagId: diag::note_constexpr_var_init_non_constant, ExtraNotes: 1) << VD;
3491 NoteLValueLocation(Info, Base);
3492 } else {
3493 Info.CCEDiag(E);
3494 }
3495 }
3496
3497 // Never use the initializer of a weak variable, not even for constant
3498 // folding. We can't be sure that this is the definition that will be used.
3499 if (VD->isWeak()) {
3500 Info.FFDiag(E, DiagId: diag::note_constexpr_var_init_weak) << VD;
3501 NoteLValueLocation(Info, Base);
3502 return false;
3503 }
3504
3505 Result = VD->getEvaluatedValue();
3506
3507 if (!Result && !AllowConstexprUnknown)
3508 return false;
3509
3510 return CheckUninitReference(/*IsLocalVariable=*/false);
3511}
3512
3513/// Get the base index of the given base class within an APValue representing
3514/// the given derived class.
3515static unsigned getBaseIndex(const CXXRecordDecl *Derived,
3516 const CXXRecordDecl *Base) {
3517 Base = Base->getCanonicalDecl();
3518 unsigned Index = 0;
3519 for (CXXRecordDecl::base_class_const_iterator I = Derived->bases_begin(),
3520 E = Derived->bases_end(); I != E; ++I, ++Index) {
3521 if (I->getType()->getAsCXXRecordDecl()->getCanonicalDecl() == Base)
3522 return Index;
3523 }
3524
3525 llvm_unreachable("base class missing from derived class's bases list");
3526}
3527
3528/// Extract the value of a character from a string literal.
3529static APSInt extractStringLiteralCharacter(EvalInfo &Info, const Expr *Lit,
3530 uint64_t Index) {
3531 assert(!isa<SourceLocExpr>(Lit) &&
3532 "SourceLocExpr should have already been converted to a StringLiteral");
3533
3534 // FIXME: Support MakeStringConstant
3535 if (const auto *ObjCEnc = dyn_cast<ObjCEncodeExpr>(Val: Lit)) {
3536 std::string Str;
3537 Info.Ctx.getObjCEncodingForType(T: ObjCEnc->getEncodedType(), S&: Str);
3538 assert(Index <= Str.size() && "Index too large");
3539 return APSInt::getUnsigned(X: Str.c_str()[Index]);
3540 }
3541
3542 if (auto PE = dyn_cast<PredefinedExpr>(Val: Lit))
3543 Lit = PE->getFunctionName();
3544 const StringLiteral *S = cast<StringLiteral>(Val: Lit);
3545 const ConstantArrayType *CAT =
3546 Info.Ctx.getAsConstantArrayType(T: S->getType());
3547 assert(CAT && "string literal isn't an array");
3548 QualType CharType = CAT->getElementType();
3549 assert(CharType->isIntegerType() && "unexpected character type");
3550 APSInt Value(Info.Ctx.getTypeSize(T: CharType),
3551 CharType->isUnsignedIntegerType());
3552 if (Index < S->getLength())
3553 Value = S->getCodeUnit(i: Index);
3554 return Value;
3555}
3556
3557// Expand a string literal into an array of characters.
3558//
3559// FIXME: This is inefficient; we should probably introduce something similar
3560// to the LLVM ConstantDataArray to make this cheaper.
3561static void expandStringLiteral(EvalInfo &Info, const StringLiteral *S,
3562 APValue &Result,
3563 QualType AllocType = QualType()) {
3564 const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(
3565 T: AllocType.isNull() ? S->getType() : AllocType);
3566 assert(CAT && "string literal isn't an array");
3567 QualType CharType = CAT->getElementType();
3568 assert(CharType->isIntegerType() && "unexpected character type");
3569
3570 unsigned Elts = CAT->getZExtSize();
3571 Result = APValue(APValue::UninitArray(),
3572 std::min(a: S->getLength(), b: Elts), Elts);
3573 APSInt Value(Info.Ctx.getTypeSize(T: CharType),
3574 CharType->isUnsignedIntegerType());
3575 if (Result.hasArrayFiller())
3576 Result.getArrayFiller() = APValue(Value);
3577 for (unsigned I = 0, N = Result.getArrayInitializedElts(); I != N; ++I) {
3578 Value = S->getCodeUnit(i: I);
3579 Result.getArrayInitializedElt(I) = APValue(Value);
3580 }
3581}
3582
3583// Expand an array so that it has more than Index filled elements.
3584static void expandArray(APValue &Array, unsigned Index) {
3585 unsigned Size = Array.getArraySize();
3586 assert(Index < Size);
3587
3588 // Always at least double the number of elements for which we store a value.
3589 unsigned OldElts = Array.getArrayInitializedElts();
3590 unsigned NewElts = std::max(a: Index+1, b: OldElts * 2);
3591 NewElts = std::min(a: Size, b: std::max(a: NewElts, b: 8u));
3592
3593 // Copy the data across.
3594 APValue NewValue(APValue::UninitArray(), NewElts, Size);
3595 for (unsigned I = 0; I != OldElts; ++I)
3596 NewValue.getArrayInitializedElt(I).swap(RHS&: Array.getArrayInitializedElt(I));
3597 for (unsigned I = OldElts; I != NewElts; ++I)
3598 NewValue.getArrayInitializedElt(I) = Array.getArrayFiller();
3599 if (NewValue.hasArrayFiller())
3600 NewValue.getArrayFiller() = Array.getArrayFiller();
3601 Array.swap(RHS&: NewValue);
3602}
3603
3604// Expand an indeterminate vector to materialize all elements.
3605static void expandVector(APValue &Vec, unsigned NumElements) {
3606 assert(Vec.isIndeterminate());
3607 SmallVector<APValue, 4> Elts(NumElements, APValue::IndeterminateValue());
3608 Vec = APValue(Elts.data(), Elts.size());
3609}
3610
3611/// Determine whether a type would actually be read by an lvalue-to-rvalue
3612/// conversion. If it's of class type, we may assume that the copy operation
3613/// is trivial. Note that this is never true for a union type with fields
3614/// (because the copy always "reads" the active member) and always true for
3615/// a non-class type.
3616static bool isReadByLvalueToRvalueConversion(const CXXRecordDecl *RD);
3617static bool isReadByLvalueToRvalueConversion(QualType T) {
3618 CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
3619 return !RD || isReadByLvalueToRvalueConversion(RD);
3620}
3621static bool isReadByLvalueToRvalueConversion(const CXXRecordDecl *RD) {
3622 // FIXME: A trivial copy of a union copies the object representation, even if
3623 // the union is empty.
3624 if (RD->isUnion())
3625 return !RD->field_empty();
3626 if (RD->isEmpty())
3627 return false;
3628
3629 for (auto *Field : RD->fields())
3630 if (!Field->isUnnamedBitField() &&
3631 isReadByLvalueToRvalueConversion(T: Field->getType()))
3632 return true;
3633
3634 for (auto &BaseSpec : RD->bases())
3635 if (isReadByLvalueToRvalueConversion(T: BaseSpec.getType()))
3636 return true;
3637
3638 return false;
3639}
3640
3641/// Diagnose an attempt to read from any unreadable field within the specified
3642/// type, which might be a class type.
3643static bool diagnoseMutableFields(EvalInfo &Info, const Expr *E, AccessKinds AK,
3644 QualType T) {
3645 CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
3646 if (!RD)
3647 return false;
3648
3649 if (!RD->hasMutableFields())
3650 return false;
3651
3652 for (auto *Field : RD->fields()) {
3653 // If we're actually going to read this field in some way, then it can't
3654 // be mutable. If we're in a union, then assigning to a mutable field
3655 // (even an empty one) can change the active member, so that's not OK.
3656 // FIXME: Add core issue number for the union case.
3657 if (Field->isMutable() &&
3658 (RD->isUnion() || isReadByLvalueToRvalueConversion(T: Field->getType()))) {
3659 Info.FFDiag(E, DiagId: diag::note_constexpr_access_mutable, ExtraNotes: 1) << AK << Field;
3660 Info.Note(Loc: Field->getLocation(), DiagId: diag::note_declared_at);
3661 return true;
3662 }
3663
3664 if (diagnoseMutableFields(Info, E, AK, T: Field->getType()))
3665 return true;
3666 }
3667
3668 for (auto &BaseSpec : RD->bases())
3669 if (diagnoseMutableFields(Info, E, AK, T: BaseSpec.getType()))
3670 return true;
3671
3672 // All mutable fields were empty, and thus not actually read.
3673 return false;
3674}
3675
3676static bool lifetimeStartedInEvaluation(EvalInfo &Info,
3677 APValue::LValueBase Base,
3678 bool MutableSubobject = false) {
3679 // A temporary or transient heap allocation we created.
3680 if (Base.getCallIndex() || Base.is<DynamicAllocLValue>())
3681 return true;
3682
3683 switch (Info.IsEvaluatingDecl) {
3684 case EvalInfo::EvaluatingDeclKind::None:
3685 return false;
3686
3687 case EvalInfo::EvaluatingDeclKind::Ctor:
3688 // The variable whose initializer we're evaluating.
3689 if (Info.EvaluatingDecl == Base)
3690 return true;
3691
3692 // A temporary lifetime-extended by the variable whose initializer we're
3693 // evaluating.
3694 if (auto *BaseE = Base.dyn_cast<const Expr *>())
3695 if (auto *BaseMTE = dyn_cast<MaterializeTemporaryExpr>(Val: BaseE))
3696 return Info.EvaluatingDecl == BaseMTE->getExtendingDecl();
3697 return false;
3698
3699 case EvalInfo::EvaluatingDeclKind::Dtor:
3700 // C++2a [expr.const]p6:
3701 // [during constant destruction] the lifetime of a and its non-mutable
3702 // subobjects (but not its mutable subobjects) [are] considered to start
3703 // within e.
3704 if (MutableSubobject || Base != Info.EvaluatingDecl)
3705 return false;
3706 // FIXME: We can meaningfully extend this to cover non-const objects, but
3707 // we will need special handling: we should be able to access only
3708 // subobjects of such objects that are themselves declared const.
3709 QualType T = getType(B: Base);
3710 return T.isConstQualified() || T->isReferenceType();
3711 }
3712
3713 llvm_unreachable("unknown evaluating decl kind");
3714}
3715
3716static bool CheckArraySize(EvalInfo &Info, const ConstantArrayType *CAT,
3717 SourceLocation CallLoc = {}) {
3718 return Info.CheckArraySize(
3719 Loc: CAT->getSizeExpr() ? CAT->getSizeExpr()->getBeginLoc() : CallLoc,
3720 BitWidth: CAT->getNumAddressingBits(Context: Info.Ctx), ElemCount: CAT->getZExtSize(),
3721 /*Diag=*/true);
3722}
3723
3724static bool handleScalarCast(EvalInfo &Info, const FPOptions FPO, const Expr *E,
3725 QualType SourceTy, QualType DestTy,
3726 APValue const &Original, APValue &Result) {
3727 // boolean must be checked before integer
3728 // since IsIntegerType() is true for bool
3729 if (SourceTy->isBooleanType()) {
3730 if (DestTy->isBooleanType()) {
3731 Result = Original;
3732 return true;
3733 }
3734 if (DestTy->isIntegerType() || DestTy->isRealFloatingType()) {
3735 bool BoolResult;
3736 if (!HandleConversionToBool(Val: Original, Result&: BoolResult))
3737 return false;
3738 uint64_t IntResult = BoolResult;
3739 QualType IntType = DestTy->isIntegerType()
3740 ? DestTy
3741 : Info.Ctx.getIntTypeForBitwidth(DestWidth: 64, Signed: false);
3742 Result = APValue(Info.Ctx.MakeIntValue(Value: IntResult, Type: IntType));
3743 }
3744 if (DestTy->isRealFloatingType()) {
3745 APValue Result2 = APValue(APFloat(0.0));
3746 if (!HandleIntToFloatCast(Info, E, FPO,
3747 SrcType: Info.Ctx.getIntTypeForBitwidth(DestWidth: 64, Signed: false),
3748 Value: Result.getInt(), DestType: DestTy, Result&: Result2.getFloat()))
3749 return false;
3750 Result = std::move(Result2);
3751 }
3752 return true;
3753 }
3754 if (SourceTy->isIntegerType()) {
3755 if (DestTy->isRealFloatingType()) {
3756 Result = APValue(APFloat(0.0));
3757 return HandleIntToFloatCast(Info, E, FPO, SrcType: SourceTy, Value: Original.getInt(),
3758 DestType: DestTy, Result&: Result.getFloat());
3759 }
3760 if (DestTy->isBooleanType()) {
3761 bool BoolResult;
3762 if (!HandleConversionToBool(Val: Original, Result&: BoolResult))
3763 return false;
3764 uint64_t IntResult = BoolResult;
3765 Result = APValue(Info.Ctx.MakeIntValue(Value: IntResult, Type: DestTy));
3766 return true;
3767 }
3768 if (DestTy->isIntegerType()) {
3769 Result = APValue(
3770 HandleIntToIntCast(Info, E, DestType: DestTy, SrcType: SourceTy, Value: Original.getInt()));
3771 return true;
3772 }
3773 } else if (SourceTy->isRealFloatingType()) {
3774 if (DestTy->isRealFloatingType()) {
3775 Result = Original;
3776 return HandleFloatToFloatCast(Info, E, SrcType: SourceTy, DestType: DestTy,
3777 Result&: Result.getFloat());
3778 }
3779 if (DestTy->isBooleanType()) {
3780 bool BoolResult;
3781 if (!HandleConversionToBool(Val: Original, Result&: BoolResult))
3782 return false;
3783 uint64_t IntResult = BoolResult;
3784 Result = APValue(Info.Ctx.MakeIntValue(Value: IntResult, Type: DestTy));
3785 return true;
3786 }
3787 if (DestTy->isIntegerType()) {
3788 Result = APValue(APSInt());
3789 return HandleFloatToIntCast(Info, E, SrcType: SourceTy, Value: Original.getFloat(),
3790 DestType: DestTy, Result&: Result.getInt());
3791 }
3792 }
3793
3794 Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
3795 return false;
3796}
3797
3798// do the heavy lifting for casting to aggregate types
3799// because we have to deal with bitfields specially
3800static bool constructAggregate(EvalInfo &Info, const FPOptions FPO,
3801 const Expr *E, APValue &Result,
3802 QualType ResultType,
3803 SmallVectorImpl<APValue> &Elements,
3804 SmallVectorImpl<QualType> &ElTypes) {
3805
3806 SmallVector<std::tuple<APValue *, QualType, unsigned>> WorkList = {
3807 {&Result, ResultType, 0}};
3808
3809 unsigned ElI = 0;
3810 while (!WorkList.empty() && ElI < Elements.size()) {
3811 auto [Res, Type, BitWidth] = WorkList.pop_back_val();
3812
3813 if (Type->isRealFloatingType()) {
3814 if (!handleScalarCast(Info, FPO, E, SourceTy: ElTypes[ElI], DestTy: Type, Original: Elements[ElI],
3815 Result&: *Res))
3816 return false;
3817 ElI++;
3818 continue;
3819 }
3820 if (Type->isIntegerType()) {
3821 if (!handleScalarCast(Info, FPO, E, SourceTy: ElTypes[ElI], DestTy: Type, Original: Elements[ElI],
3822 Result&: *Res))
3823 return false;
3824 if (BitWidth > 0) {
3825 if (!Res->isInt())
3826 return false;
3827 APSInt &Int = Res->getInt();
3828 unsigned OldBitWidth = Int.getBitWidth();
3829 unsigned NewBitWidth = BitWidth;
3830 if (NewBitWidth < OldBitWidth)
3831 Int = Int.trunc(width: NewBitWidth).extend(width: OldBitWidth);
3832 }
3833 ElI++;
3834 continue;
3835 }
3836 if (Type->isVectorType()) {
3837 QualType ElTy = Type->castAs<VectorType>()->getElementType();
3838 unsigned NumEl = Type->castAs<VectorType>()->getNumElements();
3839 SmallVector<APValue> Vals(NumEl);
3840 for (unsigned I = 0; I < NumEl; ++I) {
3841 if (!handleScalarCast(Info, FPO, E, SourceTy: ElTypes[ElI], DestTy: ElTy, Original: Elements[ElI],
3842 Result&: Vals[I]))
3843 return false;
3844 ElI++;
3845 }
3846 *Res = APValue(Vals.data(), NumEl);
3847 continue;
3848 }
3849 if (Type->isConstantArrayType()) {
3850 QualType ElTy = cast<ConstantArrayType>(Val: Info.Ctx.getAsArrayType(T: Type))
3851 ->getElementType();
3852 uint64_t Size =
3853 cast<ConstantArrayType>(Val: Info.Ctx.getAsArrayType(T: Type))->getZExtSize();
3854 *Res = APValue(APValue::UninitArray(), Size, Size);
3855 for (int64_t I = Size - 1; I > -1; --I)
3856 WorkList.emplace_back(Args: &Res->getArrayInitializedElt(I), Args&: ElTy, Args: 0u);
3857 continue;
3858 }
3859 if (Type->isRecordType()) {
3860 const RecordDecl *RD = Type->getAsRecordDecl();
3861
3862 unsigned NumBases = 0;
3863 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD))
3864 NumBases = CXXRD->getNumBases();
3865
3866 *Res = APValue(APValue::UninitStruct(), NumBases, RD->getNumFields());
3867
3868 SmallVector<std::tuple<APValue *, QualType, unsigned>> ReverseList;
3869 // we need to traverse backwards
3870 // Visit the base classes.
3871 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
3872 if (CXXRD->getNumBases() > 0) {
3873 assert(CXXRD->getNumBases() == 1);
3874 const CXXBaseSpecifier &BS = CXXRD->bases_begin()[0];
3875 ReverseList.emplace_back(Args: &Res->getStructBase(i: 0), Args: BS.getType(), Args: 0u);
3876 }
3877 }
3878
3879 // Visit the fields.
3880 for (FieldDecl *FD : RD->fields()) {
3881 unsigned FDBW = 0;
3882 if (FD->isUnnamedBitField())
3883 continue;
3884 if (FD->isBitField()) {
3885 FDBW = FD->getBitWidthValue();
3886 }
3887
3888 ReverseList.emplace_back(Args: &Res->getStructField(i: FD->getFieldIndex()),
3889 Args: FD->getType(), Args&: FDBW);
3890 }
3891
3892 std::reverse(first: ReverseList.begin(), last: ReverseList.end());
3893 llvm::append_range(C&: WorkList, R&: ReverseList);
3894 continue;
3895 }
3896 Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
3897 return false;
3898 }
3899 return true;
3900}
3901
3902static bool handleElementwiseCast(EvalInfo &Info, const Expr *E,
3903 const FPOptions FPO,
3904 SmallVectorImpl<APValue> &Elements,
3905 SmallVectorImpl<QualType> &SrcTypes,
3906 SmallVectorImpl<QualType> &DestTypes,
3907 SmallVectorImpl<APValue> &Results) {
3908
3909 assert((Elements.size() == SrcTypes.size()) &&
3910 (Elements.size() == DestTypes.size()));
3911
3912 for (unsigned I = 0, ESz = Elements.size(); I < ESz; ++I) {
3913 APValue Original = Elements[I];
3914 QualType SourceTy = SrcTypes[I];
3915 QualType DestTy = DestTypes[I];
3916
3917 if (!handleScalarCast(Info, FPO, E, SourceTy, DestTy, Original, Result&: Results[I]))
3918 return false;
3919 }
3920 return true;
3921}
3922
3923static unsigned elementwiseSize(EvalInfo &Info, QualType BaseTy) {
3924
3925 SmallVector<QualType> WorkList = {BaseTy};
3926
3927 unsigned Size = 0;
3928 while (!WorkList.empty()) {
3929 QualType Type = WorkList.pop_back_val();
3930 if (Type->isRealFloatingType() || Type->isIntegerType() ||
3931 Type->isBooleanType()) {
3932 ++Size;
3933 continue;
3934 }
3935 if (Type->isVectorType()) {
3936 unsigned NumEl = Type->castAs<VectorType>()->getNumElements();
3937 Size += NumEl;
3938 continue;
3939 }
3940 if (Type->isConstantMatrixType()) {
3941 unsigned NumEl =
3942 Type->castAs<ConstantMatrixType>()->getNumElementsFlattened();
3943 Size += NumEl;
3944 continue;
3945 }
3946 if (Type->isConstantArrayType()) {
3947 QualType ElTy = cast<ConstantArrayType>(Val: Info.Ctx.getAsArrayType(T: Type))
3948 ->getElementType();
3949 uint64_t ArrSize =
3950 cast<ConstantArrayType>(Val: Info.Ctx.getAsArrayType(T: Type))->getZExtSize();
3951 for (uint64_t I = 0; I < ArrSize; ++I) {
3952 WorkList.push_back(Elt: ElTy);
3953 }
3954 continue;
3955 }
3956 if (Type->isRecordType()) {
3957 const RecordDecl *RD = Type->getAsRecordDecl();
3958
3959 // Visit the base classes.
3960 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
3961 if (CXXRD->getNumBases() > 0) {
3962 assert(CXXRD->getNumBases() == 1);
3963 const CXXBaseSpecifier &BS = CXXRD->bases_begin()[0];
3964 WorkList.push_back(Elt: BS.getType());
3965 }
3966 }
3967
3968 // visit the fields.
3969 for (FieldDecl *FD : RD->fields()) {
3970 if (FD->isUnnamedBitField())
3971 continue;
3972 WorkList.push_back(Elt: FD->getType());
3973 }
3974 continue;
3975 }
3976 }
3977 return Size;
3978}
3979
3980static bool hlslAggSplatHelper(EvalInfo &Info, const Expr *E, APValue &SrcVal,
3981 QualType &SrcTy) {
3982 SrcTy = E->getType();
3983
3984 if (!Evaluate(Result&: SrcVal, Info, E))
3985 return false;
3986
3987 assert((SrcVal.isFloat() || SrcVal.isInt() ||
3988 (SrcVal.isVector() && SrcVal.getVectorLength() == 1)) &&
3989 "Not a valid HLSLAggregateSplatCast.");
3990
3991 if (SrcVal.isVector()) {
3992 assert(SrcTy->isVectorType() && "Type mismatch.");
3993 SrcTy = SrcTy->castAs<VectorType>()->getElementType();
3994 SrcVal = SrcVal.getVectorElt(I: 0);
3995 }
3996 if (SrcVal.isMatrix()) {
3997 assert(SrcTy->isConstantMatrixType() && "Type mismatch.");
3998 SrcTy = SrcTy->castAs<ConstantMatrixType>()->getElementType();
3999 SrcVal = SrcVal.getMatrixElt(Row: 0, Col: 0);
4000 }
4001 return true;
4002}
4003
4004static bool flattenAPValue(EvalInfo &Info, const Expr *E, APValue Value,
4005 QualType BaseTy, SmallVectorImpl<APValue> &Elements,
4006 SmallVectorImpl<QualType> &Types, unsigned Size) {
4007
4008 SmallVector<std::pair<APValue, QualType>> WorkList = {{Value, BaseTy}};
4009 unsigned Populated = 0;
4010 while (!WorkList.empty() && Populated < Size) {
4011 auto [Work, Type] = WorkList.pop_back_val();
4012
4013 if (Work.isFloat() || Work.isInt()) {
4014 Elements.push_back(Elt: Work);
4015 Types.push_back(Elt: Type);
4016 Populated++;
4017 continue;
4018 }
4019 if (Work.isVector()) {
4020 assert(Type->isVectorType() && "Type mismatch.");
4021 QualType ElTy = Type->castAs<VectorType>()->getElementType();
4022 for (unsigned I = 0; I < Work.getVectorLength() && Populated < Size;
4023 I++) {
4024 Elements.push_back(Elt: Work.getVectorElt(I));
4025 Types.push_back(Elt: ElTy);
4026 Populated++;
4027 }
4028 continue;
4029 }
4030 if (Work.isMatrix()) {
4031 assert(Type->isConstantMatrixType() && "Type mismatch.");
4032 const auto *MT = Type->castAs<ConstantMatrixType>();
4033 QualType ElTy = MT->getElementType();
4034 // Matrix elements are flattened in row-major order.
4035 for (unsigned Row = 0; Row < Work.getMatrixNumRows() && Populated < Size;
4036 Row++) {
4037 for (unsigned Col = 0;
4038 Col < Work.getMatrixNumColumns() && Populated < Size; Col++) {
4039 Elements.push_back(Elt: Work.getMatrixElt(Row, Col));
4040 Types.push_back(Elt: ElTy);
4041 Populated++;
4042 }
4043 }
4044 continue;
4045 }
4046 if (Work.isArray()) {
4047 assert(Type->isConstantArrayType() && "Type mismatch.");
4048 QualType ElTy = cast<ConstantArrayType>(Val: Info.Ctx.getAsArrayType(T: Type))
4049 ->getElementType();
4050 for (int64_t I = Work.getArraySize() - 1; I > -1; --I) {
4051 WorkList.emplace_back(Args&: Work.getArrayInitializedElt(I), Args&: ElTy);
4052 }
4053 continue;
4054 }
4055
4056 if (Work.isStruct()) {
4057 assert(Type->isRecordType() && "Type mismatch.");
4058
4059 const RecordDecl *RD = Type->getAsRecordDecl();
4060
4061 SmallVector<std::pair<APValue, QualType>> ReverseList;
4062 // Visit the fields.
4063 for (FieldDecl *FD : RD->fields()) {
4064 if (FD->isUnnamedBitField())
4065 continue;
4066 ReverseList.emplace_back(Args&: Work.getStructField(i: FD->getFieldIndex()),
4067 Args: FD->getType());
4068 }
4069
4070 std::reverse(first: ReverseList.begin(), last: ReverseList.end());
4071 llvm::append_range(C&: WorkList, R&: ReverseList);
4072
4073 // Visit the base classes.
4074 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
4075 if (CXXRD->getNumBases() > 0) {
4076 assert(CXXRD->getNumBases() == 1);
4077 const CXXBaseSpecifier &BS = CXXRD->bases_begin()[0];
4078 const APValue &Base = Work.getStructBase(i: 0);
4079
4080 // Can happen in error cases.
4081 if (!Base.isStruct())
4082 return false;
4083
4084 WorkList.emplace_back(Args: Base, Args: BS.getType());
4085 }
4086 }
4087 continue;
4088 }
4089 Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
4090 return false;
4091 }
4092 return true;
4093}
4094
4095namespace {
4096/// A handle to a complete object (an object that is not a subobject of
4097/// another object).
4098struct CompleteObject {
4099 /// The identity of the object.
4100 APValue::LValueBase Base;
4101 /// The value of the complete object.
4102 APValue *Value;
4103 /// The type of the complete object.
4104 QualType Type;
4105
4106 CompleteObject() : Value(nullptr) {}
4107 CompleteObject(APValue::LValueBase Base, APValue *Value, QualType Type)
4108 : Base(Base), Value(Value), Type(Type) {}
4109
4110 bool mayAccessMutableMembers(EvalInfo &Info, AccessKinds AK) const {
4111 // If this isn't a "real" access (eg, if it's just accessing the type
4112 // info), allow it. We assume the type doesn't change dynamically for
4113 // subobjects of constexpr objects (even though we'd hit UB here if it
4114 // did). FIXME: Is this right?
4115 if (!isAnyAccess(AK))
4116 return true;
4117
4118 // In C++14 onwards, it is permitted to read a mutable member whose
4119 // lifetime began within the evaluation.
4120 // FIXME: Should we also allow this in C++11?
4121 if (!Info.getLangOpts().CPlusPlus14 &&
4122 AK != AccessKinds::AK_IsWithinLifetime)
4123 return false;
4124 return lifetimeStartedInEvaluation(Info, Base, /*MutableSubobject*/true);
4125 }
4126
4127 explicit operator bool() const { return !Type.isNull(); }
4128};
4129} // end anonymous namespace
4130
4131static QualType getSubobjectType(QualType ObjType, QualType SubobjType,
4132 bool IsMutable = false) {
4133 // C++ [basic.type.qualifier]p1:
4134 // - A const object is an object of type const T or a non-mutable subobject
4135 // of a const object.
4136 if (ObjType.isConstQualified() && !IsMutable)
4137 SubobjType.addConst();
4138 // - A volatile object is an object of type const T or a subobject of a
4139 // volatile object.
4140 if (ObjType.isVolatileQualified())
4141 SubobjType.addVolatile();
4142 return SubobjType;
4143}
4144
4145/// Find the designated sub-object of an rvalue.
4146template <typename SubobjectHandler>
4147static typename SubobjectHandler::result_type
4148findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
4149 const SubobjectDesignator &Sub, SubobjectHandler &handler) {
4150 if (Sub.Invalid)
4151 // A diagnostic will have already been produced.
4152 return handler.failed();
4153 if (Sub.isOnePastTheEnd() || Sub.isMostDerivedAnUnsizedArray()) {
4154 if (Info.getLangOpts().CPlusPlus11)
4155 Info.FFDiag(E, DiagId: Sub.isOnePastTheEnd()
4156 ? diag::note_constexpr_access_past_end
4157 : diag::note_constexpr_access_unsized_array)
4158 << handler.AccessKind;
4159 else
4160 Info.FFDiag(E);
4161 return handler.failed();
4162 }
4163
4164 APValue *O = Obj.Value;
4165 QualType ObjType = Obj.Type;
4166 const FieldDecl *LastField = nullptr;
4167 const FieldDecl *VolatileField = nullptr;
4168
4169 // Walk the designator's path to find the subobject.
4170 for (unsigned I = 0, N = Sub.Entries.size(); /**/; ++I) {
4171 // Reading an indeterminate value is undefined, but assigning over one is OK.
4172 if ((O->isAbsent() && !(handler.AccessKind == AK_Construct && I == N)) ||
4173 (O->isIndeterminate() &&
4174 !isValidIndeterminateAccess(handler.AccessKind))) {
4175 // Object has ended lifetime.
4176 // If I is non-zero, some subobject (member or array element) of a
4177 // complete object has ended its lifetime, so this is valid for
4178 // IsWithinLifetime, resulting in false.
4179 if (I != 0 && handler.AccessKind == AK_IsWithinLifetime)
4180 return false;
4181 if (!Info.checkingPotentialConstantExpression())
4182 Info.FFDiag(E, DiagId: diag::note_constexpr_access_uninit)
4183 << handler.AccessKind << O->isIndeterminate()
4184 << E->getSourceRange();
4185 return handler.failed();
4186 }
4187
4188 // C++ [class.ctor]p5, C++ [class.dtor]p5:
4189 // const and volatile semantics are not applied on an object under
4190 // {con,de}struction.
4191 if ((ObjType.isConstQualified() || ObjType.isVolatileQualified()) &&
4192 ObjType->isRecordType() &&
4193 Info.isEvaluatingCtorDtor(
4194 Base: Obj.Base, Path: ArrayRef(Sub.Entries.begin(), Sub.Entries.begin() + I)) !=
4195 ConstructionPhase::None) {
4196 ObjType = Info.Ctx.getCanonicalType(T: ObjType);
4197 ObjType.removeLocalConst();
4198 ObjType.removeLocalVolatile();
4199 }
4200
4201 // If this is our last pass, check that the final object type is OK.
4202 if (I == N || (I == N - 1 && ObjType->isAnyComplexType())) {
4203 // Accesses to volatile objects are prohibited.
4204 if (ObjType.isVolatileQualified() && isFormalAccess(handler.AccessKind)) {
4205 if (Info.getLangOpts().CPlusPlus) {
4206 int DiagKind;
4207 SourceLocation Loc;
4208 const NamedDecl *Decl = nullptr;
4209 if (VolatileField) {
4210 DiagKind = 2;
4211 Loc = VolatileField->getLocation();
4212 Decl = VolatileField;
4213 } else if (auto *VD = Obj.Base.dyn_cast<const ValueDecl*>()) {
4214 DiagKind = 1;
4215 Loc = VD->getLocation();
4216 Decl = VD;
4217 } else {
4218 DiagKind = 0;
4219 if (auto *E = Obj.Base.dyn_cast<const Expr *>())
4220 Loc = E->getExprLoc();
4221 }
4222 Info.FFDiag(E, DiagId: diag::note_constexpr_access_volatile_obj, ExtraNotes: 1)
4223 << handler.AccessKind << DiagKind << Decl;
4224 Info.Note(Loc, DiagId: diag::note_constexpr_volatile_here) << DiagKind;
4225 } else {
4226 Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
4227 }
4228 return handler.failed();
4229 }
4230
4231 // If we are reading an object of class type, there may still be more
4232 // things we need to check: if there are any mutable subobjects, we
4233 // cannot perform this read. (This only happens when performing a trivial
4234 // copy or assignment.)
4235 if (ObjType->isRecordType() &&
4236 !Obj.mayAccessMutableMembers(Info, AK: handler.AccessKind) &&
4237 diagnoseMutableFields(Info, E, handler.AccessKind, ObjType))
4238 return handler.failed();
4239 }
4240
4241 if (I == N) {
4242 if (!handler.found(*O, ObjType))
4243 return false;
4244
4245 // If we modified a bit-field, truncate it to the right width.
4246 if (isModification(handler.AccessKind) &&
4247 LastField && LastField->isBitField() &&
4248 !truncateBitfieldValue(Info, E, Value&: *O, FD: LastField))
4249 return false;
4250
4251 return true;
4252 }
4253
4254 LastField = nullptr;
4255 if (ObjType->isArrayType()) {
4256 // Next subobject is an array element.
4257 const ArrayType *AT = Info.Ctx.getAsArrayType(T: ObjType);
4258 assert((isa<ConstantArrayType>(AT) || isa<IncompleteArrayType>(AT)) &&
4259 "vla in literal type?");
4260 uint64_t Index = Sub.Entries[I].getAsArrayIndex();
4261 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT);
4262 CAT && CAT->getSize().ule(RHS: Index)) {
4263 // Note, it should not be possible to form a pointer with a valid
4264 // designator which points more than one past the end of the array.
4265 if (Info.getLangOpts().CPlusPlus11)
4266 Info.FFDiag(E, DiagId: diag::note_constexpr_access_past_end)
4267 << handler.AccessKind;
4268 else
4269 Info.FFDiag(E);
4270 return handler.failed();
4271 }
4272
4273 ObjType = AT->getElementType();
4274
4275 if (O->getArrayInitializedElts() > Index)
4276 O = &O->getArrayInitializedElt(I: Index);
4277 else if (!isRead(handler.AccessKind)) {
4278 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT);
4279 CAT && !CheckArraySize(Info, CAT, CallLoc: E->getExprLoc()))
4280 return handler.failed();
4281
4282 expandArray(Array&: *O, Index);
4283 O = &O->getArrayInitializedElt(I: Index);
4284 } else
4285 O = &O->getArrayFiller();
4286 } else if (ObjType->isAnyComplexType()) {
4287 // Next subobject is a complex number.
4288 uint64_t Index = Sub.Entries[I].getAsArrayIndex();
4289 if (Index > 1) {
4290 if (Info.getLangOpts().CPlusPlus11)
4291 Info.FFDiag(E, DiagId: diag::note_constexpr_access_past_end)
4292 << handler.AccessKind;
4293 else
4294 Info.FFDiag(E);
4295 return handler.failed();
4296 }
4297
4298 ObjType = getSubobjectType(
4299 ObjType, SubobjType: ObjType->castAs<ComplexType>()->getElementType());
4300
4301 assert(I == N - 1 && "extracting subobject of scalar?");
4302 if (O->isComplexInt()) {
4303 return handler.found(Index ? O->getComplexIntImag()
4304 : O->getComplexIntReal(), ObjType);
4305 } else {
4306 assert(O->isComplexFloat());
4307 return handler.found(Index ? O->getComplexFloatImag()
4308 : O->getComplexFloatReal(), ObjType);
4309 }
4310 } else if (const auto *VT = ObjType->getAs<VectorType>()) {
4311 uint64_t Index = Sub.Entries[I].getAsArrayIndex();
4312 unsigned NumElements = VT->getNumElements();
4313 if (Index == NumElements) {
4314 if (Info.getLangOpts().CPlusPlus11)
4315 Info.FFDiag(E, DiagId: diag::note_constexpr_access_past_end)
4316 << handler.AccessKind;
4317 else
4318 Info.FFDiag(E);
4319 return handler.failed();
4320 }
4321
4322 if (Index > NumElements) {
4323 Info.CCEDiag(E, DiagId: diag::note_constexpr_array_index)
4324 << Index << /*array*/ 0 << NumElements;
4325 return handler.failed();
4326 }
4327
4328 ObjType = VT->getElementType();
4329 assert(I == N - 1 && "extracting subobject of scalar?");
4330
4331 if (O->isIndeterminate()) {
4332 if (isRead(handler.AccessKind)) {
4333 Info.FFDiag(E);
4334 return handler.failed();
4335 }
4336 expandVector(Vec&: *O, NumElements);
4337 }
4338 assert(O->isVector() && "unexpected object during vector element access");
4339 return handler.found(O->getVectorElt(I: Index), ObjType);
4340 } else if (const FieldDecl *Field = getAsField(E: Sub.Entries[I])) {
4341 if (Field->isMutable() &&
4342 !Obj.mayAccessMutableMembers(Info, AK: handler.AccessKind)) {
4343 Info.FFDiag(E, DiagId: diag::note_constexpr_access_mutable, ExtraNotes: 1)
4344 << handler.AccessKind << Field;
4345 Info.Note(Loc: Field->getLocation(), DiagId: diag::note_declared_at);
4346 return handler.failed();
4347 }
4348
4349 // Next subobject is a class, struct or union field.
4350 RecordDecl *RD = ObjType->castAsCanonical<RecordType>()->getDecl();
4351 if (RD->isUnion()) {
4352 const FieldDecl *UnionField = O->getUnionField();
4353 if (!UnionField ||
4354 UnionField->getCanonicalDecl() != Field->getCanonicalDecl()) {
4355 if (I == N - 1 && handler.AccessKind == AK_Construct) {
4356 // Placement new onto an inactive union member makes it active.
4357 O->setUnion(Field, Value: APValue());
4358 } else {
4359 // Pointer to/into inactive union member: Not within lifetime
4360 if (handler.AccessKind == AK_IsWithinLifetime)
4361 return false;
4362 // FIXME: If O->getUnionValue() is absent, report that there's no
4363 // active union member rather than reporting the prior active union
4364 // member. We'll need to fix nullptr_t to not use APValue() as its
4365 // representation first.
4366 Info.FFDiag(E, DiagId: diag::note_constexpr_access_inactive_union_member)
4367 << handler.AccessKind << Field << !UnionField << UnionField;
4368 return handler.failed();
4369 }
4370 }
4371 O = &O->getUnionValue();
4372 } else
4373 O = &O->getStructField(i: Field->getFieldIndex());
4374
4375 ObjType = getSubobjectType(ObjType, SubobjType: Field->getType(), IsMutable: Field->isMutable());
4376 LastField = Field;
4377 if (Field->getType().isVolatileQualified())
4378 VolatileField = Field;
4379 } else {
4380 // Next subobject is a base class.
4381 const CXXRecordDecl *Derived = ObjType->getAsCXXRecordDecl();
4382 const CXXRecordDecl *Base = getAsBaseClass(E: Sub.Entries[I]);
4383 O = &O->getStructBase(i: getBaseIndex(Derived, Base));
4384
4385 ObjType = getSubobjectType(ObjType, SubobjType: Info.Ctx.getCanonicalTagType(TD: Base));
4386 }
4387 }
4388}
4389
4390namespace {
4391struct ExtractSubobjectHandler {
4392 EvalInfo &Info;
4393 const Expr *E;
4394 APValue &Result;
4395 const AccessKinds AccessKind;
4396
4397 typedef bool result_type;
4398 bool failed() { return false; }
4399 bool found(APValue &Subobj, QualType SubobjType) {
4400 Result = Subobj;
4401 if (AccessKind == AK_ReadObjectRepresentation)
4402 return true;
4403 return CheckFullyInitialized(Info, DiagLoc: E->getExprLoc(), Type: SubobjType, Value: Result);
4404 }
4405 bool found(APSInt &Value, QualType SubobjType) {
4406 Result = APValue(Value);
4407 return true;
4408 }
4409 bool found(APFloat &Value, QualType SubobjType) {
4410 Result = APValue(Value);
4411 return true;
4412 }
4413};
4414} // end anonymous namespace
4415
4416/// Extract the designated sub-object of an rvalue.
4417static bool extractSubobject(EvalInfo &Info, const Expr *E,
4418 const CompleteObject &Obj,
4419 const SubobjectDesignator &Sub, APValue &Result,
4420 AccessKinds AK = AK_Read) {
4421 assert(AK == AK_Read || AK == AK_ReadObjectRepresentation);
4422 ExtractSubobjectHandler Handler = {.Info: Info, .E: E, .Result: Result, .AccessKind: AK};
4423 return findSubobject(Info, E, Obj, Sub, handler&: Handler);
4424}
4425
4426namespace {
4427struct ModifySubobjectHandler {
4428 EvalInfo &Info;
4429 APValue &NewVal;
4430 const Expr *E;
4431
4432 typedef bool result_type;
4433 static const AccessKinds AccessKind = AK_Assign;
4434
4435 bool checkConst(QualType QT) {
4436 // Assigning to a const object has undefined behavior.
4437 if (QT.isConstQualified()) {
4438 Info.FFDiag(E, DiagId: diag::note_constexpr_modify_const_type) << QT;
4439 return false;
4440 }
4441 return true;
4442 }
4443
4444 bool failed() { return false; }
4445 bool found(APValue &Subobj, QualType SubobjType) {
4446 if (!checkConst(QT: SubobjType))
4447 return false;
4448 // We've been given ownership of NewVal, so just swap it in.
4449 Subobj.swap(RHS&: NewVal);
4450 return true;
4451 }
4452 bool found(APSInt &Value, QualType SubobjType) {
4453 if (!checkConst(QT: SubobjType))
4454 return false;
4455 if (!NewVal.isInt()) {
4456 // Maybe trying to write a cast pointer value into a complex?
4457 Info.FFDiag(E);
4458 return false;
4459 }
4460 Value = NewVal.getInt();
4461 return true;
4462 }
4463 bool found(APFloat &Value, QualType SubobjType) {
4464 if (!checkConst(QT: SubobjType))
4465 return false;
4466 Value = NewVal.getFloat();
4467 return true;
4468 }
4469};
4470} // end anonymous namespace
4471
4472const AccessKinds ModifySubobjectHandler::AccessKind;
4473
4474/// Update the designated sub-object of an rvalue to the given value.
4475static bool modifySubobject(EvalInfo &Info, const Expr *E,
4476 const CompleteObject &Obj,
4477 const SubobjectDesignator &Sub,
4478 APValue &NewVal) {
4479 ModifySubobjectHandler Handler = { .Info: Info, .NewVal: NewVal, .E: E };
4480 return findSubobject(Info, E, Obj, Sub, handler&: Handler);
4481}
4482
4483/// Find the position where two subobject designators diverge, or equivalently
4484/// the length of the common initial subsequence.
4485static unsigned FindDesignatorMismatch(QualType ObjType,
4486 const SubobjectDesignator &A,
4487 const SubobjectDesignator &B,
4488 bool &WasArrayIndex) {
4489 unsigned I = 0, N = std::min(a: A.Entries.size(), b: B.Entries.size());
4490 for (/**/; I != N; ++I) {
4491 if (!ObjType.isNull() &&
4492 (ObjType->isArrayType() || ObjType->isAnyComplexType())) {
4493 // Next subobject is an array element.
4494 if (A.Entries[I].getAsArrayIndex() != B.Entries[I].getAsArrayIndex()) {
4495 WasArrayIndex = true;
4496 return I;
4497 }
4498 if (ObjType->isAnyComplexType())
4499 ObjType = ObjType->castAs<ComplexType>()->getElementType();
4500 else
4501 ObjType = ObjType->castAsArrayTypeUnsafe()->getElementType();
4502 } else {
4503 if (A.Entries[I].getAsBaseOrMember() !=
4504 B.Entries[I].getAsBaseOrMember()) {
4505 WasArrayIndex = false;
4506 return I;
4507 }
4508 if (const FieldDecl *FD = getAsField(E: A.Entries[I]))
4509 // Next subobject is a field.
4510 ObjType = FD->getType();
4511 else
4512 // Next subobject is a base class.
4513 ObjType = QualType();
4514 }
4515 }
4516 WasArrayIndex = false;
4517 return I;
4518}
4519
4520/// Determine whether the given subobject designators refer to elements of the
4521/// same array object.
4522static bool AreElementsOfSameArray(QualType ObjType,
4523 const SubobjectDesignator &A,
4524 const SubobjectDesignator &B) {
4525 if (A.Entries.size() != B.Entries.size())
4526 return false;
4527
4528 bool IsArray = A.MostDerivedIsArrayElement;
4529 if (IsArray && A.MostDerivedPathLength != A.Entries.size())
4530 // A is a subobject of the array element.
4531 return false;
4532
4533 // If A (and B) designates an array element, the last entry will be the array
4534 // index. That doesn't have to match. Otherwise, we're in the 'implicit array
4535 // of length 1' case, and the entire path must match.
4536 bool WasArrayIndex;
4537 unsigned CommonLength = FindDesignatorMismatch(ObjType, A, B, WasArrayIndex);
4538 return CommonLength >= A.Entries.size() - IsArray;
4539}
4540
4541/// Find the complete object to which an LValue refers.
4542static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
4543 AccessKinds AK, const LValue &LVal,
4544 QualType LValType) {
4545 if (LVal.InvalidBase) {
4546 Info.FFDiag(E);
4547 return CompleteObject();
4548 }
4549
4550 if (!LVal.Base) {
4551 if (AK == AccessKinds::AK_Dereference)
4552 Info.FFDiag(E, DiagId: diag::note_constexpr_dereferencing_null);
4553 else
4554 Info.FFDiag(E, DiagId: diag::note_constexpr_access_null) << AK;
4555 return CompleteObject();
4556 }
4557
4558 CallStackFrame *Frame = nullptr;
4559 unsigned Depth = 0;
4560 if (LVal.getLValueCallIndex()) {
4561 std::tie(args&: Frame, args&: Depth) =
4562 Info.getCallFrameAndDepth(CallIndex: LVal.getLValueCallIndex());
4563 if (!Frame) {
4564 Info.FFDiag(E, DiagId: diag::note_constexpr_access_uninit, ExtraNotes: 1)
4565 << AK << /*Indeterminate=*/false << E->getSourceRange();
4566 NoteLValueLocation(Info, Base: LVal.Base);
4567 return CompleteObject();
4568 }
4569 }
4570
4571 bool IsAccess = isAnyAccess(AK);
4572
4573 // C++11 DR1311: An lvalue-to-rvalue conversion on a volatile-qualified type
4574 // is not a constant expression (even if the object is non-volatile). We also
4575 // apply this rule to C++98, in order to conform to the expected 'volatile'
4576 // semantics.
4577 if (isFormalAccess(AK) && LValType.isVolatileQualified()) {
4578 if (Info.getLangOpts().CPlusPlus)
4579 Info.FFDiag(E, DiagId: diag::note_constexpr_access_volatile_type)
4580 << AK << LValType;
4581 else
4582 Info.FFDiag(E);
4583 return CompleteObject();
4584 }
4585
4586 // Compute value storage location and type of base object.
4587 APValue *BaseVal = nullptr;
4588 QualType BaseType = getType(B: LVal.Base);
4589
4590 if (Info.getLangOpts().CPlusPlus14 && LVal.Base == Info.EvaluatingDecl &&
4591 lifetimeStartedInEvaluation(Info, Base: LVal.Base)) {
4592 // This is the object whose initializer we're evaluating, so its lifetime
4593 // started in the current evaluation.
4594 BaseVal = Info.EvaluatingDeclValue;
4595 } else if (const ValueDecl *D = LVal.Base.dyn_cast<const ValueDecl *>()) {
4596 // Allow reading from a GUID declaration.
4597 if (auto *GD = dyn_cast<MSGuidDecl>(Val: D)) {
4598 if (isModification(AK)) {
4599 // All the remaining cases do not permit modification of the object.
4600 Info.FFDiag(E, DiagId: diag::note_constexpr_modify_global);
4601 return CompleteObject();
4602 }
4603 APValue &V = GD->getAsAPValue();
4604 if (V.isAbsent()) {
4605 Info.FFDiag(E, DiagId: diag::note_constexpr_unsupported_layout)
4606 << GD->getType();
4607 return CompleteObject();
4608 }
4609 return CompleteObject(LVal.Base, &V, GD->getType());
4610 }
4611
4612 // Allow reading the APValue from an UnnamedGlobalConstantDecl.
4613 if (auto *GCD = dyn_cast<UnnamedGlobalConstantDecl>(Val: D)) {
4614 if (isModification(AK)) {
4615 Info.FFDiag(E, DiagId: diag::note_constexpr_modify_global);
4616 return CompleteObject();
4617 }
4618 return CompleteObject(LVal.Base, const_cast<APValue *>(&GCD->getValue()),
4619 GCD->getType());
4620 }
4621
4622 // Allow reading from template parameter objects.
4623 if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(Val: D)) {
4624 if (isModification(AK)) {
4625 Info.FFDiag(E, DiagId: diag::note_constexpr_modify_global);
4626 return CompleteObject();
4627 }
4628 return CompleteObject(LVal.Base, const_cast<APValue *>(&TPO->getValue()),
4629 TPO->getType());
4630 }
4631
4632 // In C++98, const, non-volatile integers initialized with ICEs are ICEs.
4633 // In C++11, constexpr, non-volatile variables initialized with constant
4634 // expressions are constant expressions too. Inside constexpr functions,
4635 // parameters are constant expressions even if they're non-const.
4636 // In C++1y, objects local to a constant expression (those with a Frame) are
4637 // both readable and writable inside constant expressions.
4638 // In C, such things can also be folded, although they are not ICEs.
4639 const VarDecl *VD = dyn_cast<VarDecl>(Val: D);
4640 if (VD) {
4641 if (const VarDecl *VDef = VD->getDefinition(C&: Info.Ctx))
4642 VD = VDef;
4643 }
4644 if (!VD || VD->isInvalidDecl()) {
4645 Info.FFDiag(E);
4646 return CompleteObject();
4647 }
4648
4649 bool IsConstant = BaseType.isConstant(Ctx: Info.Ctx);
4650 bool ConstexprVar = false;
4651 if (const auto *VD = dyn_cast_if_present<VarDecl>(
4652 Val: Info.EvaluatingDecl.dyn_cast<const ValueDecl *>()))
4653 ConstexprVar = VD->isConstexpr();
4654
4655 // Unless we're looking at a local variable or argument in a constexpr call,
4656 // the variable we're reading must be const (unless we are binding to a
4657 // reference).
4658 if (AK != clang::AK_Dereference && !Frame) {
4659 if (IsAccess && isa<ParmVarDecl>(Val: VD)) {
4660 // Access of a parameter that's not associated with a frame isn't going
4661 // to work out, but we can leave it to evaluateVarDeclInit to provide a
4662 // suitable diagnostic.
4663 } else if (Info.getLangOpts().CPlusPlus14 &&
4664 lifetimeStartedInEvaluation(Info, Base: LVal.Base)) {
4665 // OK, we can read and modify an object if we're in the process of
4666 // evaluating its initializer, because its lifetime began in this
4667 // evaluation.
4668 } else if (isModification(AK)) {
4669 // All the remaining cases do not permit modification of the object.
4670 Info.FFDiag(E, DiagId: diag::note_constexpr_modify_global);
4671 return CompleteObject();
4672 } else if (VD->isConstexpr()) {
4673 // OK, we can read this variable.
4674 } else if (Info.getLangOpts().C23 && ConstexprVar) {
4675 Info.FFDiag(E);
4676 return CompleteObject();
4677 } else if (BaseType->isIntegralOrEnumerationType()) {
4678 if (!IsConstant) {
4679 if (!IsAccess)
4680 return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
4681 if (Info.getLangOpts().CPlusPlus) {
4682 Info.FFDiag(E, DiagId: diag::note_constexpr_ltor_non_const_int, ExtraNotes: 1) << VD;
4683 Info.Note(Loc: VD->getLocation(), DiagId: diag::note_declared_at);
4684 } else {
4685 Info.FFDiag(E);
4686 }
4687 return CompleteObject();
4688 }
4689 } else if (!IsAccess) {
4690 return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
4691 } else if ((IsConstant || BaseType->isReferenceType()) &&
4692 Info.checkingPotentialConstantExpression() &&
4693 BaseType->isLiteralType(Ctx: Info.Ctx) && !VD->hasDefinition()) {
4694 // This variable might end up being constexpr. Don't diagnose it yet.
4695 } else if (IsConstant) {
4696 // Keep evaluating to see what we can do. In particular, we support
4697 // folding of const floating-point types, in order to make static const
4698 // data members of such types (supported as an extension) more useful.
4699 if (Info.getLangOpts().CPlusPlus) {
4700 Info.CCEDiag(E, DiagId: Info.getLangOpts().CPlusPlus11
4701 ? diag::note_constexpr_ltor_non_constexpr
4702 : diag::note_constexpr_ltor_non_integral, ExtraNotes: 1)
4703 << VD << BaseType;
4704 Info.Note(Loc: VD->getLocation(), DiagId: diag::note_declared_at);
4705 } else {
4706 Info.CCEDiag(E);
4707 }
4708 } else {
4709 // Never allow reading a non-const value.
4710 if (Info.getLangOpts().CPlusPlus) {
4711 Info.FFDiag(E, DiagId: Info.getLangOpts().CPlusPlus11
4712 ? diag::note_constexpr_ltor_non_constexpr
4713 : diag::note_constexpr_ltor_non_integral, ExtraNotes: 1)
4714 << VD << BaseType;
4715 Info.Note(Loc: VD->getLocation(), DiagId: diag::note_declared_at);
4716 } else {
4717 Info.FFDiag(E);
4718 }
4719 return CompleteObject();
4720 }
4721 }
4722
4723 // When binding to a reference, the variable does not need to be constexpr
4724 // or have constant initalization.
4725 if (AK != clang::AK_Dereference &&
4726 !evaluateVarDeclInit(Info, E, VD, Frame, Version: LVal.getLValueVersion(),
4727 Result&: BaseVal))
4728 return CompleteObject();
4729 // If evaluateVarDeclInit sees a constexpr-unknown variable, it returns
4730 // a null BaseVal. Any constexpr-unknown variable seen here is an error:
4731 // we can't access a constexpr-unknown object.
4732 if (AK != clang::AK_Dereference && !BaseVal) {
4733 if (!Info.checkingPotentialConstantExpression()) {
4734 Info.FFDiag(E, DiagId: diag::note_constexpr_access_unknown_variable, ExtraNotes: 1)
4735 << AK << VD;
4736 Info.Note(Loc: VD->getLocation(), DiagId: diag::note_declared_at);
4737 }
4738 return CompleteObject();
4739 }
4740 } else if (DynamicAllocLValue DA = LVal.Base.dyn_cast<DynamicAllocLValue>()) {
4741 std::optional<DynAlloc *> Alloc = Info.lookupDynamicAlloc(DA);
4742 if (!Alloc) {
4743 Info.FFDiag(E, DiagId: diag::note_constexpr_access_deleted_object) << AK;
4744 return CompleteObject();
4745 }
4746 return CompleteObject(LVal.Base, &(*Alloc)->Value,
4747 LVal.Base.getDynamicAllocType());
4748 }
4749 // When binding to a reference, the variable does not need to be
4750 // within its lifetime.
4751 else if (AK != clang::AK_Dereference) {
4752 const Expr *Base = LVal.Base.dyn_cast<const Expr*>();
4753
4754 if (!Frame) {
4755 if (const MaterializeTemporaryExpr *MTE =
4756 dyn_cast_or_null<MaterializeTemporaryExpr>(Val: Base)) {
4757 assert(MTE->getStorageDuration() == SD_Static &&
4758 "should have a frame for a non-global materialized temporary");
4759
4760 // C++20 [expr.const]p4: [DR2126]
4761 // An object or reference is usable in constant expressions if it is
4762 // - a temporary object of non-volatile const-qualified literal type
4763 // whose lifetime is extended to that of a variable that is usable
4764 // in constant expressions
4765 //
4766 // C++20 [expr.const]p5:
4767 // an lvalue-to-rvalue conversion [is not allowed unless it applies to]
4768 // - a non-volatile glvalue that refers to an object that is usable
4769 // in constant expressions, or
4770 // - a non-volatile glvalue of literal type that refers to a
4771 // non-volatile object whose lifetime began within the evaluation
4772 // of E;
4773 //
4774 // C++11 misses the 'began within the evaluation of e' check and
4775 // instead allows all temporaries, including things like:
4776 // int &&r = 1;
4777 // int x = ++r;
4778 // constexpr int k = r;
4779 // Therefore we use the C++14-onwards rules in C++11 too.
4780 //
4781 // Note that temporaries whose lifetimes began while evaluating a
4782 // variable's constructor are not usable while evaluating the
4783 // corresponding destructor, not even if they're of const-qualified
4784 // types.
4785 if (!MTE->isUsableInConstantExpressions(Context: Info.Ctx) &&
4786 !lifetimeStartedInEvaluation(Info, Base: LVal.Base)) {
4787 if (!IsAccess)
4788 return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
4789 Info.FFDiag(E, DiagId: diag::note_constexpr_access_static_temporary, ExtraNotes: 1) << AK;
4790 Info.Note(Loc: MTE->getExprLoc(), DiagId: diag::note_constexpr_temporary_here);
4791 return CompleteObject();
4792 }
4793
4794 BaseVal = MTE->getOrCreateValue(MayCreate: false);
4795 assert(BaseVal && "got reference to unevaluated temporary");
4796 } else if (const CompoundLiteralExpr *CLE =
4797 dyn_cast_or_null<CompoundLiteralExpr>(Val: Base)) {
4798 // According to GCC info page:
4799 //
4800 // 6.28 Compound Literals
4801 //
4802 // As an optimization, G++ sometimes gives array compound literals
4803 // longer lifetimes: when the array either appears outside a function or
4804 // has a const-qualified type. If foo and its initializer had elements
4805 // of type char *const rather than char *, or if foo were a global
4806 // variable, the array would have static storage duration. But it is
4807 // probably safest just to avoid the use of array compound literals in
4808 // C++ code.
4809 //
4810 // Obey that rule by checking constness for converted array types.
4811 if (QualType CLETy = CLE->getType(); CLETy->isArrayType() &&
4812 !LValType->isArrayType() &&
4813 !CLETy.isConstant(Ctx: Info.Ctx)) {
4814 Info.FFDiag(E);
4815 Info.Note(Loc: CLE->getExprLoc(), DiagId: diag::note_declared_at);
4816 return CompleteObject();
4817 }
4818
4819 BaseVal = &CLE->getStaticValue();
4820 } else {
4821 if (!IsAccess)
4822 return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
4823 APValue Val;
4824 LVal.moveInto(V&: Val);
4825 Info.FFDiag(E, DiagId: diag::note_constexpr_access_unreadable_object)
4826 << AK
4827 << Val.getAsString(Ctx: Info.Ctx,
4828 Ty: Info.Ctx.getLValueReferenceType(T: LValType));
4829 NoteLValueLocation(Info, Base: LVal.Base);
4830 return CompleteObject();
4831 }
4832 } else if (AK != clang::AK_Dereference) {
4833 BaseVal = Frame->getTemporary(Key: Base, Version: LVal.Base.getVersion());
4834 assert(BaseVal && "missing value for temporary");
4835 }
4836 }
4837
4838 // In C++14, we can't safely access any mutable state when we might be
4839 // evaluating after an unmodeled side effect. Parameters are modeled as state
4840 // in the caller, but aren't visible once the call returns, so they can be
4841 // modified in a speculatively-evaluated call.
4842 //
4843 // FIXME: Not all local state is mutable. Allow local constant subobjects
4844 // to be read here (but take care with 'mutable' fields).
4845 unsigned VisibleDepth = Depth;
4846 if (llvm::isa_and_nonnull<ParmVarDecl>(
4847 Val: LVal.Base.dyn_cast<const ValueDecl *>()))
4848 ++VisibleDepth;
4849 if ((Frame && Info.getLangOpts().CPlusPlus14 &&
4850 Info.EvalStatus.HasSideEffects) ||
4851 (isModification(AK) && VisibleDepth < Info.SpeculativeEvaluationDepth))
4852 return CompleteObject();
4853
4854 return CompleteObject(LVal.getLValueBase(), BaseVal, BaseType);
4855}
4856
4857/// Perform an lvalue-to-rvalue conversion on the given glvalue. This
4858/// can also be used for 'lvalue-to-lvalue' conversions for looking up the
4859/// glvalue referred to by an entity of reference type.
4860///
4861/// \param Info - Information about the ongoing evaluation.
4862/// \param Conv - The expression for which we are performing the conversion.
4863/// Used for diagnostics.
4864/// \param Type - The type of the glvalue (before stripping cv-qualifiers in the
4865/// case of a non-class type).
4866/// \param LVal - The glvalue on which we are attempting to perform this action.
4867/// \param RVal - The produced value will be placed here.
4868/// \param WantObjectRepresentation - If true, we're looking for the object
4869/// representation rather than the value, and in particular,
4870/// there is no requirement that the result be fully initialized.
4871static bool
4872handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv, QualType Type,
4873 const LValue &LVal, APValue &RVal,
4874 bool WantObjectRepresentation = false) {
4875 if (LVal.Designator.Invalid)
4876 return false;
4877
4878 // Check for special cases where there is no existing APValue to look at.
4879 const Expr *Base = LVal.Base.dyn_cast<const Expr*>();
4880
4881 AccessKinds AK =
4882 WantObjectRepresentation ? AK_ReadObjectRepresentation : AK_Read;
4883
4884 if (Base && !LVal.getLValueCallIndex() && !Type.isVolatileQualified()) {
4885 if (isa<StringLiteral>(Val: Base) || isa<PredefinedExpr>(Val: Base)) {
4886 // Special-case character extraction so we don't have to construct an
4887 // APValue for the whole string.
4888 assert(LVal.Designator.Entries.size() <= 1 &&
4889 "Can only read characters from string literals");
4890 if (LVal.Designator.Entries.empty()) {
4891 // Fail for now for LValue to RValue conversion of an array.
4892 // (This shouldn't show up in C/C++, but it could be triggered by a
4893 // weird EvaluateAsRValue call from a tool.)
4894 Info.FFDiag(E: Conv);
4895 return false;
4896 }
4897 if (LVal.Designator.isOnePastTheEnd()) {
4898 if (Info.getLangOpts().CPlusPlus11)
4899 Info.FFDiag(E: Conv, DiagId: diag::note_constexpr_access_past_end) << AK;
4900 else
4901 Info.FFDiag(E: Conv);
4902 return false;
4903 }
4904 uint64_t CharIndex = LVal.Designator.Entries[0].getAsArrayIndex();
4905 RVal = APValue(extractStringLiteralCharacter(Info, Lit: Base, Index: CharIndex));
4906 return true;
4907 }
4908 }
4909
4910 CompleteObject Obj = findCompleteObject(Info, E: Conv, AK, LVal, LValType: Type);
4911 return Obj && extractSubobject(Info, E: Conv, Obj, Sub: LVal.Designator, Result&: RVal, AK);
4912}
4913
4914static bool hlslElementwiseCastHelper(EvalInfo &Info, const Expr *E,
4915 QualType DestTy,
4916 SmallVectorImpl<APValue> &SrcVals,
4917 SmallVectorImpl<QualType> &SrcTypes) {
4918 APValue Val;
4919 if (!Evaluate(Result&: Val, Info, E))
4920 return false;
4921
4922 // must be dealing with a record
4923 if (Val.isLValue()) {
4924 LValue LVal;
4925 LVal.setFrom(Ctx: Info.Ctx, V: Val);
4926 if (!handleLValueToRValueConversion(Info, Conv: E, Type: E->getType(), LVal, RVal&: Val))
4927 return false;
4928 }
4929
4930 unsigned NEls = elementwiseSize(Info, BaseTy: DestTy);
4931 // flatten the source
4932 if (!flattenAPValue(Info, E, Value: Val, BaseTy: E->getType(), Elements&: SrcVals, Types&: SrcTypes, Size: NEls))
4933 return false;
4934
4935 return true;
4936}
4937
4938/// Perform an assignment of Val to LVal. Takes ownership of Val.
4939static bool handleAssignment(EvalInfo &Info, const Expr *E, const LValue &LVal,
4940 QualType LValType, APValue &Val) {
4941 if (LVal.Designator.Invalid)
4942 return false;
4943
4944 if (!Info.getLangOpts().CPlusPlus14) {
4945 Info.FFDiag(E);
4946 return false;
4947 }
4948
4949 CompleteObject Obj = findCompleteObject(Info, E, AK: AK_Assign, LVal, LValType);
4950 return Obj && modifySubobject(Info, E, Obj, Sub: LVal.Designator, NewVal&: Val);
4951}
4952
4953namespace {
4954struct CompoundAssignSubobjectHandler {
4955 EvalInfo &Info;
4956 const CompoundAssignOperator *E;
4957 QualType PromotedLHSType;
4958 BinaryOperatorKind Opcode;
4959 const APValue &RHS;
4960
4961 static const AccessKinds AccessKind = AK_Assign;
4962
4963 typedef bool result_type;
4964
4965 bool checkConst(QualType QT) {
4966 // Assigning to a const object has undefined behavior.
4967 if (QT.isConstQualified()) {
4968 Info.FFDiag(E, DiagId: diag::note_constexpr_modify_const_type) << QT;
4969 return false;
4970 }
4971 return true;
4972 }
4973
4974 bool failed() { return false; }
4975 bool found(APValue &Subobj, QualType SubobjType) {
4976 switch (Subobj.getKind()) {
4977 case APValue::Int:
4978 return found(Value&: Subobj.getInt(), SubobjType);
4979 case APValue::Float:
4980 return found(Value&: Subobj.getFloat(), SubobjType);
4981 case APValue::ComplexInt:
4982 case APValue::ComplexFloat:
4983 // FIXME: Implement complex compound assignment.
4984 Info.FFDiag(E);
4985 return false;
4986 case APValue::LValue:
4987 return foundPointer(Subobj, SubobjType);
4988 case APValue::Vector:
4989 return foundVector(Value&: Subobj, SubobjType);
4990 case APValue::Indeterminate:
4991 Info.FFDiag(E, DiagId: diag::note_constexpr_access_uninit)
4992 << /*read of=*/0 << /*uninitialized object=*/1
4993 << E->getLHS()->getSourceRange();
4994 return false;
4995 default:
4996 // FIXME: can this happen?
4997 Info.FFDiag(E);
4998 return false;
4999 }
5000 }
5001
5002 bool foundVector(APValue &Value, QualType SubobjType) {
5003 if (!checkConst(QT: SubobjType))
5004 return false;
5005
5006 if (!SubobjType->isVectorType()) {
5007 Info.FFDiag(E);
5008 return false;
5009 }
5010 return handleVectorVectorBinOp(Info, E, Opcode, LHSValue&: Value, RHSValue: RHS);
5011 }
5012
5013 bool found(APSInt &Value, QualType SubobjType) {
5014 if (!checkConst(QT: SubobjType))
5015 return false;
5016
5017 if (!SubobjType->isIntegerType()) {
5018 // We don't support compound assignment on integer-cast-to-pointer
5019 // values.
5020 Info.FFDiag(E);
5021 return false;
5022 }
5023
5024 if (RHS.isInt()) {
5025 APSInt LHS =
5026 HandleIntToIntCast(Info, E, DestType: PromotedLHSType, SrcType: SubobjType, Value);
5027 if (!handleIntIntBinOp(Info, E, LHS, Opcode, RHS: RHS.getInt(), Result&: LHS))
5028 return false;
5029 Value = HandleIntToIntCast(Info, E, DestType: SubobjType, SrcType: PromotedLHSType, Value: LHS);
5030 return true;
5031 } else if (RHS.isFloat()) {
5032 const FPOptions FPO = E->getFPFeaturesInEffect(
5033 LO: Info.Ctx.getLangOpts());
5034 APFloat FValue(0.0);
5035 return HandleIntToFloatCast(Info, E, FPO, SrcType: SubobjType, Value,
5036 DestType: PromotedLHSType, Result&: FValue) &&
5037 handleFloatFloatBinOp(Info, E, LHS&: FValue, Opcode, RHS: RHS.getFloat()) &&
5038 HandleFloatToIntCast(Info, E, SrcType: PromotedLHSType, Value: FValue, DestType: SubobjType,
5039 Result&: Value);
5040 }
5041
5042 Info.FFDiag(E);
5043 return false;
5044 }
5045 bool found(APFloat &Value, QualType SubobjType) {
5046 return checkConst(QT: SubobjType) &&
5047 HandleFloatToFloatCast(Info, E, SrcType: SubobjType, DestType: PromotedLHSType,
5048 Result&: Value) &&
5049 handleFloatFloatBinOp(Info, E, LHS&: Value, Opcode, RHS: RHS.getFloat()) &&
5050 HandleFloatToFloatCast(Info, E, SrcType: PromotedLHSType, DestType: SubobjType, Result&: Value);
5051 }
5052 bool foundPointer(APValue &Subobj, QualType SubobjType) {
5053 if (!checkConst(QT: SubobjType))
5054 return false;
5055
5056 QualType PointeeType;
5057 if (const PointerType *PT = SubobjType->getAs<PointerType>())
5058 PointeeType = PT->getPointeeType();
5059
5060 if (PointeeType.isNull() || !RHS.isInt() ||
5061 (Opcode != BO_Add && Opcode != BO_Sub)) {
5062 Info.FFDiag(E);
5063 return false;
5064 }
5065
5066 APSInt Offset = RHS.getInt();
5067 if (Opcode == BO_Sub)
5068 negateAsSigned(Int&: Offset);
5069
5070 LValue LVal;
5071 LVal.setFrom(Ctx: Info.Ctx, V: Subobj);
5072 if (!HandleLValueArrayAdjustment(Info, E, LVal, EltTy: PointeeType, Adjustment: Offset))
5073 return false;
5074 LVal.moveInto(V&: Subobj);
5075 return true;
5076 }
5077};
5078} // end anonymous namespace
5079
5080const AccessKinds CompoundAssignSubobjectHandler::AccessKind;
5081
5082/// Perform a compound assignment of LVal <op>= RVal.
5083static bool handleCompoundAssignment(EvalInfo &Info,
5084 const CompoundAssignOperator *E,
5085 const LValue &LVal, QualType LValType,
5086 QualType PromotedLValType,
5087 BinaryOperatorKind Opcode,
5088 const APValue &RVal) {
5089 if (LVal.Designator.Invalid)
5090 return false;
5091
5092 if (!Info.getLangOpts().CPlusPlus14) {
5093 Info.FFDiag(E);
5094 return false;
5095 }
5096
5097 CompleteObject Obj = findCompleteObject(Info, E, AK: AK_Assign, LVal, LValType);
5098 CompoundAssignSubobjectHandler Handler = { .Info: Info, .E: E, .PromotedLHSType: PromotedLValType, .Opcode: Opcode,
5099 .RHS: RVal };
5100 return Obj && findSubobject(Info, E, Obj, Sub: LVal.Designator, handler&: Handler);
5101}
5102
5103namespace {
5104struct IncDecSubobjectHandler {
5105 EvalInfo &Info;
5106 const UnaryOperator *E;
5107 AccessKinds AccessKind;
5108 APValue *Old;
5109
5110 typedef bool result_type;
5111
5112 bool checkConst(QualType QT) {
5113 // Assigning to a const object has undefined behavior.
5114 if (QT.isConstQualified()) {
5115 Info.FFDiag(E, DiagId: diag::note_constexpr_modify_const_type) << QT;
5116 return false;
5117 }
5118 return true;
5119 }
5120
5121 bool failed() { return false; }
5122 bool found(APValue &Subobj, QualType SubobjType) {
5123 // Stash the old value. Also clear Old, so we don't clobber it later
5124 // if we're post-incrementing a complex.
5125 if (Old) {
5126 *Old = Subobj;
5127 Old = nullptr;
5128 }
5129
5130 switch (Subobj.getKind()) {
5131 case APValue::Int:
5132 return found(Value&: Subobj.getInt(), SubobjType);
5133 case APValue::Float:
5134 return found(Value&: Subobj.getFloat(), SubobjType);
5135 case APValue::ComplexInt:
5136 return found(Value&: Subobj.getComplexIntReal(),
5137 SubobjType: SubobjType->castAs<ComplexType>()->getElementType()
5138 .withCVRQualifiers(CVR: SubobjType.getCVRQualifiers()));
5139 case APValue::ComplexFloat:
5140 return found(Value&: Subobj.getComplexFloatReal(),
5141 SubobjType: SubobjType->castAs<ComplexType>()->getElementType()
5142 .withCVRQualifiers(CVR: SubobjType.getCVRQualifiers()));
5143 case APValue::LValue:
5144 return foundPointer(Subobj, SubobjType);
5145 default:
5146 // FIXME: can this happen?
5147 Info.FFDiag(E);
5148 return false;
5149 }
5150 }
5151 bool found(APSInt &Value, QualType SubobjType) {
5152 if (!checkConst(QT: SubobjType))
5153 return false;
5154
5155 if (!SubobjType->isIntegerType()) {
5156 // We don't support increment / decrement on integer-cast-to-pointer
5157 // values.
5158 Info.FFDiag(E);
5159 return false;
5160 }
5161
5162 if (Old) *Old = APValue(Value);
5163
5164 // bool arithmetic promotes to int, and the conversion back to bool
5165 // doesn't reduce mod 2^n, so special-case it.
5166 if (SubobjType->isBooleanType()) {
5167 if (AccessKind == AK_Increment)
5168 Value = 1;
5169 else
5170 Value = !Value;
5171 return true;
5172 }
5173
5174 bool WasNegative = Value.isNegative();
5175 if (AccessKind == AK_Increment) {
5176 ++Value;
5177
5178 if (!WasNegative && Value.isNegative() && E->canOverflow() &&
5179 !SubobjType.isWrapType()) {
5180 APSInt ActualValue(Value, /*IsUnsigned*/true);
5181 return HandleOverflow(Info, E, SrcValue: ActualValue, DestType: SubobjType);
5182 }
5183 } else {
5184 --Value;
5185
5186 if (WasNegative && !Value.isNegative() && E->canOverflow() &&
5187 !SubobjType.isWrapType()) {
5188 unsigned BitWidth = Value.getBitWidth();
5189 APSInt ActualValue(Value.sext(width: BitWidth + 1), /*IsUnsigned*/false);
5190 ActualValue.setBit(BitWidth);
5191 return HandleOverflow(Info, E, SrcValue: ActualValue, DestType: SubobjType);
5192 }
5193 }
5194 return true;
5195 }
5196 bool found(APFloat &Value, QualType SubobjType) {
5197 if (!checkConst(QT: SubobjType))
5198 return false;
5199
5200 if (Old) *Old = APValue(Value);
5201
5202 APFloat One(Value.getSemantics(), 1);
5203 llvm::RoundingMode RM = getActiveRoundingMode(Info, E);
5204 APFloat::opStatus St;
5205 if (AccessKind == AK_Increment)
5206 St = Value.add(RHS: One, RM);
5207 else
5208 St = Value.subtract(RHS: One, RM);
5209 return checkFloatingPointResult(Info, E, St);
5210 }
5211 bool foundPointer(APValue &Subobj, QualType SubobjType) {
5212 if (!checkConst(QT: SubobjType))
5213 return false;
5214
5215 QualType PointeeType;
5216 if (const PointerType *PT = SubobjType->getAs<PointerType>())
5217 PointeeType = PT->getPointeeType();
5218 else {
5219 Info.FFDiag(E);
5220 return false;
5221 }
5222
5223 LValue LVal;
5224 LVal.setFrom(Ctx: Info.Ctx, V: Subobj);
5225 if (!HandleLValueArrayAdjustment(Info, E, LVal, EltTy: PointeeType,
5226 Adjustment: AccessKind == AK_Increment ? 1 : -1))
5227 return false;
5228 LVal.moveInto(V&: Subobj);
5229 return true;
5230 }
5231};
5232} // end anonymous namespace
5233
5234/// Perform an increment or decrement on LVal.
5235static bool handleIncDec(EvalInfo &Info, const Expr *E, const LValue &LVal,
5236 QualType LValType, bool IsIncrement, APValue *Old) {
5237 if (LVal.Designator.Invalid)
5238 return false;
5239
5240 if (!Info.getLangOpts().CPlusPlus14) {
5241 Info.FFDiag(E);
5242 return false;
5243 }
5244
5245 AccessKinds AK = IsIncrement ? AK_Increment : AK_Decrement;
5246 CompleteObject Obj = findCompleteObject(Info, E, AK, LVal, LValType);
5247 IncDecSubobjectHandler Handler = {.Info: Info, .E: cast<UnaryOperator>(Val: E), .AccessKind: AK, .Old: Old};
5248 return Obj && findSubobject(Info, E, Obj, Sub: LVal.Designator, handler&: Handler);
5249}
5250
5251/// Build an lvalue for the object argument of a member function call.
5252static bool EvaluateObjectArgument(EvalInfo &Info, const Expr *Object,
5253 LValue &This) {
5254 if (Object->getType()->isPointerType() && Object->isPRValue())
5255 return EvaluatePointer(E: Object, Result&: This, Info);
5256
5257 if (Object->isGLValue())
5258 return EvaluateLValue(E: Object, Result&: This, Info);
5259
5260 if (Object->getType()->isLiteralType(Ctx: Info.Ctx))
5261 return EvaluateTemporary(E: Object, Result&: This, Info);
5262
5263 if (Object->getType()->isRecordType() && Object->isPRValue())
5264 return EvaluateTemporary(E: Object, Result&: This, Info);
5265
5266 Info.FFDiag(E: Object, DiagId: diag::note_constexpr_nonliteral) << Object->getType();
5267 return false;
5268}
5269
5270/// HandleMemberPointerAccess - Evaluate a member access operation and build an
5271/// lvalue referring to the result.
5272///
5273/// \param Info - Information about the ongoing evaluation.
5274/// \param LV - An lvalue referring to the base of the member pointer.
5275/// \param RHS - The member pointer expression.
5276/// \param IncludeMember - Specifies whether the member itself is included in
5277/// the resulting LValue subobject designator. This is not possible when
5278/// creating a bound member function.
5279/// \return The field or method declaration to which the member pointer refers,
5280/// or 0 if evaluation fails.
5281static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info,
5282 QualType LVType,
5283 LValue &LV,
5284 const Expr *RHS,
5285 bool IncludeMember = true) {
5286 MemberPtr MemPtr;
5287 if (!EvaluateMemberPointer(E: RHS, Result&: MemPtr, Info))
5288 return nullptr;
5289
5290 // C++11 [expr.mptr.oper]p6: If the second operand is the null pointer to
5291 // member value, the behavior is undefined.
5292 if (!MemPtr.getDecl()) {
5293 // FIXME: Specific diagnostic.
5294 Info.FFDiag(E: RHS);
5295 return nullptr;
5296 }
5297
5298 if (MemPtr.isDerivedMember()) {
5299 // This is a member of some derived class. Truncate LV appropriately.
5300 // The end of the derived-to-base path for the base object must match the
5301 // derived-to-base path for the member pointer.
5302 // C++23 [expr.mptr.oper]p4:
5303 // If the result of E1 is an object [...] whose most derived object does
5304 // not contain the member to which E2 refers, the behavior is undefined.
5305 if (LV.Designator.MostDerivedPathLength + MemPtr.Path.size() >
5306 LV.Designator.Entries.size()) {
5307 Info.FFDiag(E: RHS);
5308 return nullptr;
5309 }
5310 unsigned PathLengthToMember =
5311 LV.Designator.Entries.size() - MemPtr.Path.size();
5312 for (unsigned I = 0, N = MemPtr.Path.size(); I != N; ++I) {
5313 const CXXRecordDecl *LVDecl = getAsBaseClass(
5314 E: LV.Designator.Entries[PathLengthToMember + I]);
5315 const CXXRecordDecl *MPDecl = MemPtr.Path[I];
5316 if (LVDecl->getCanonicalDecl() != MPDecl->getCanonicalDecl()) {
5317 Info.FFDiag(E: RHS);
5318 return nullptr;
5319 }
5320 }
5321 // MemPtr.Path only contains the base classes of the class directly
5322 // containing the member E2. It is still necessary to check that the class
5323 // directly containing the member E2 lies on the derived-to-base path of E1
5324 // to avoid incorrectly permitting member pointer access into a sibling
5325 // class of the class containing the member E2. If this class would
5326 // correspond to the most-derived class of E1, it either isn't contained in
5327 // LV.Designator.Entries or the corresponding entry refers to an array
5328 // element instead. Therefore get the most derived class directly in this
5329 // case. Otherwise the previous entry should correpond to this class.
5330 const CXXRecordDecl *LastLVDecl =
5331 (PathLengthToMember > LV.Designator.MostDerivedPathLength)
5332 ? getAsBaseClass(E: LV.Designator.Entries[PathLengthToMember - 1])
5333 : LV.Designator.MostDerivedType->getAsCXXRecordDecl();
5334 const CXXRecordDecl *LastMPDecl = MemPtr.getContainingRecord();
5335 if (LastLVDecl->getCanonicalDecl() != LastMPDecl->getCanonicalDecl()) {
5336 Info.FFDiag(E: RHS);
5337 return nullptr;
5338 }
5339
5340 // Truncate the lvalue to the appropriate derived class.
5341 if (!CastToDerivedClass(Info, E: RHS, Result&: LV, TruncatedType: MemPtr.getContainingRecord(),
5342 TruncatedElements: PathLengthToMember))
5343 return nullptr;
5344 } else if (!MemPtr.Path.empty()) {
5345 // Extend the LValue path with the member pointer's path.
5346 LV.Designator.Entries.reserve(N: LV.Designator.Entries.size() +
5347 MemPtr.Path.size() + IncludeMember);
5348
5349 // Walk down to the appropriate base class.
5350 if (const PointerType *PT = LVType->getAs<PointerType>())
5351 LVType = PT->getPointeeType();
5352 const CXXRecordDecl *RD = LVType->getAsCXXRecordDecl();
5353 assert(RD && "member pointer access on non-class-type expression");
5354 // The first class in the path is that of the lvalue.
5355 for (unsigned I = 1, N = MemPtr.Path.size(); I != N; ++I) {
5356 const CXXRecordDecl *Base = MemPtr.Path[N - I - 1];
5357 if (!HandleLValueDirectBase(Info, E: RHS, Obj&: LV, Derived: RD, Base))
5358 return nullptr;
5359 RD = Base;
5360 }
5361 // Finally cast to the class containing the member.
5362 if (!HandleLValueDirectBase(Info, E: RHS, Obj&: LV, Derived: RD,
5363 Base: MemPtr.getContainingRecord()))
5364 return nullptr;
5365 }
5366
5367 // Add the member. Note that we cannot build bound member functions here.
5368 if (IncludeMember) {
5369 if (const FieldDecl *FD = dyn_cast<FieldDecl>(Val: MemPtr.getDecl())) {
5370 if (!HandleLValueMember(Info, E: RHS, LVal&: LV, FD))
5371 return nullptr;
5372 } else if (const IndirectFieldDecl *IFD =
5373 dyn_cast<IndirectFieldDecl>(Val: MemPtr.getDecl())) {
5374 if (!HandleLValueIndirectMember(Info, E: RHS, LVal&: LV, IFD))
5375 return nullptr;
5376 } else {
5377 llvm_unreachable("can't construct reference to bound member function");
5378 }
5379 }
5380
5381 return MemPtr.getDecl();
5382}
5383
5384static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info,
5385 const BinaryOperator *BO,
5386 LValue &LV,
5387 bool IncludeMember = true) {
5388 assert(BO->getOpcode() == BO_PtrMemD || BO->getOpcode() == BO_PtrMemI);
5389
5390 if (!EvaluateObjectArgument(Info, Object: BO->getLHS(), This&: LV)) {
5391 if (Info.noteFailure()) {
5392 MemberPtr MemPtr;
5393 EvaluateMemberPointer(E: BO->getRHS(), Result&: MemPtr, Info);
5394 }
5395 return nullptr;
5396 }
5397
5398 return HandleMemberPointerAccess(Info, LVType: BO->getLHS()->getType(), LV,
5399 RHS: BO->getRHS(), IncludeMember);
5400}
5401
5402/// HandleBaseToDerivedCast - Apply the given base-to-derived cast operation on
5403/// the provided lvalue, which currently refers to the base object.
5404static bool HandleBaseToDerivedCast(EvalInfo &Info, const CastExpr *E,
5405 LValue &Result) {
5406 SubobjectDesignator &D = Result.Designator;
5407 if (D.Invalid || !Result.checkNullPointer(Info, E, CSK: CSK_Derived))
5408 return false;
5409
5410 QualType TargetQT = E->getType();
5411 if (const PointerType *PT = TargetQT->getAs<PointerType>())
5412 TargetQT = PT->getPointeeType();
5413
5414 auto InvalidCast = [&]() {
5415 if (!Info.checkingPotentialConstantExpression() ||
5416 !Result.AllowConstexprUnknown) {
5417 Info.CCEDiag(E, DiagId: diag::note_constexpr_invalid_downcast)
5418 << D.MostDerivedType << TargetQT;
5419 }
5420 return false;
5421 };
5422
5423 // Check this cast lands within the final derived-to-base subobject path.
5424 if (D.MostDerivedPathLength + E->path_size() > D.Entries.size())
5425 return InvalidCast();
5426
5427 // Check the type of the final cast. We don't need to check the path,
5428 // since a cast can only be formed if the path is unique.
5429 unsigned NewEntriesSize = D.Entries.size() - E->path_size();
5430 const CXXRecordDecl *TargetType = TargetQT->getAsCXXRecordDecl();
5431 const CXXRecordDecl *FinalType;
5432 if (NewEntriesSize == D.MostDerivedPathLength)
5433 FinalType = D.MostDerivedType->getAsCXXRecordDecl();
5434 else
5435 FinalType = getAsBaseClass(E: D.Entries[NewEntriesSize - 1]);
5436 if (FinalType->getCanonicalDecl() != TargetType->getCanonicalDecl())
5437 return InvalidCast();
5438
5439 // Truncate the lvalue to the appropriate derived class.
5440 return CastToDerivedClass(Info, E, Result, TruncatedType: TargetType, TruncatedElements: NewEntriesSize);
5441}
5442
5443/// Get the value to use for a default-initialized object of type T.
5444/// Return false if it encounters something invalid.
5445static bool handleDefaultInitValue(QualType T, APValue &Result) {
5446 bool Success = true;
5447
5448 // If there is already a value present don't overwrite it.
5449 if (!Result.isAbsent())
5450 return true;
5451
5452 if (auto *RD = T->getAsCXXRecordDecl()) {
5453 if (RD->isInvalidDecl()) {
5454 Result = APValue();
5455 return false;
5456 }
5457 if (RD->isUnion()) {
5458 Result = APValue((const FieldDecl *)nullptr);
5459 return true;
5460 }
5461 Result =
5462 APValue(APValue::UninitStruct(), RD->getNumBases(), RD->getNumFields());
5463
5464 unsigned Index = 0;
5465 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
5466 End = RD->bases_end();
5467 I != End; ++I, ++Index)
5468 Success &=
5469 handleDefaultInitValue(T: I->getType(), Result&: Result.getStructBase(i: Index));
5470
5471 for (const auto *I : RD->fields()) {
5472 if (I->isUnnamedBitField())
5473 continue;
5474 Success &= handleDefaultInitValue(
5475 T: I->getType(), Result&: Result.getStructField(i: I->getFieldIndex()));
5476 }
5477 return Success;
5478 }
5479
5480 if (auto *AT =
5481 dyn_cast_or_null<ConstantArrayType>(Val: T->getAsArrayTypeUnsafe())) {
5482 Result = APValue(APValue::UninitArray(), 0, AT->getZExtSize());
5483 if (Result.hasArrayFiller())
5484 Success &=
5485 handleDefaultInitValue(T: AT->getElementType(), Result&: Result.getArrayFiller());
5486
5487 return Success;
5488 }
5489
5490 Result = APValue::IndeterminateValue();
5491 return true;
5492}
5493
5494namespace {
5495enum EvalStmtResult {
5496 /// Evaluation failed.
5497 ESR_Failed,
5498 /// Hit a 'return' statement.
5499 ESR_Returned,
5500 /// Evaluation succeeded.
5501 ESR_Succeeded,
5502 /// Hit a 'continue' statement.
5503 ESR_Continue,
5504 /// Hit a 'break' statement.
5505 ESR_Break,
5506 /// Still scanning for 'case' or 'default' statement.
5507 ESR_CaseNotFound
5508};
5509}
5510/// Evaluates the initializer of a reference.
5511static bool EvaluateInitForDeclOfReferenceType(EvalInfo &Info,
5512 const ValueDecl *D,
5513 const Expr *Init, LValue &Result,
5514 APValue &Val) {
5515 assert(Init->isGLValue() && D->getType()->isReferenceType());
5516 // A reference is an lvalue.
5517 if (!EvaluateLValue(E: Init, Result, Info))
5518 return false;
5519 // [C++26][decl.ref]
5520 // The object designated by such a glvalue can be outside its lifetime
5521 // Because a null pointer value or a pointer past the end of an object
5522 // does not point to an object, a reference in a well-defined program cannot
5523 // refer to such things;
5524 if (!Result.Designator.Invalid && Result.Designator.isOnePastTheEnd()) {
5525 Info.FFDiag(E: Init, DiagId: diag::note_constexpr_access_past_end) << AK_Dereference;
5526 return false;
5527 }
5528
5529 // Save the result.
5530 Result.moveInto(V&: Val);
5531 return true;
5532}
5533
5534static bool EvaluateVarDecl(EvalInfo &Info, const VarDecl *VD) {
5535 if (VD->isInvalidDecl())
5536 return false;
5537 // We don't need to evaluate the initializer for a static local.
5538 if (!VD->hasLocalStorage())
5539 return true;
5540
5541 LValue Result;
5542 APValue &Val = Info.CurrentCall->createTemporary(Key: VD, T: VD->getType(),
5543 Scope: ScopeKind::Block, LV&: Result);
5544
5545 const Expr *InitE = VD->getInit();
5546 if (!InitE) {
5547 if (VD->getType()->isDependentType())
5548 return Info.noteSideEffect();
5549 return handleDefaultInitValue(T: VD->getType(), Result&: Val);
5550 }
5551 if (InitE->isValueDependent())
5552 return false;
5553
5554 // For references to objects, check they do not designate a one-past-the-end
5555 // object.
5556 if (VD->getType()->isReferenceType()) {
5557 return EvaluateInitForDeclOfReferenceType(Info, D: VD, Init: InitE, Result, Val);
5558 } else if (!EvaluateInPlace(Result&: Val, Info, This: Result, E: InitE)) {
5559 // Wipe out any partially-computed value, to allow tracking that this
5560 // evaluation failed.
5561 Val = APValue();
5562 return false;
5563 }
5564
5565 return true;
5566}
5567
5568static bool EvaluateDecompositionDeclInit(EvalInfo &Info,
5569 const DecompositionDecl *DD);
5570
5571static bool EvaluateDecl(EvalInfo &Info, const Decl *D,
5572 bool EvaluateConditionDecl = false) {
5573 bool OK = true;
5574 if (const VarDecl *VD = dyn_cast<VarDecl>(Val: D))
5575 OK &= EvaluateVarDecl(Info, VD);
5576
5577 if (const DecompositionDecl *DD = dyn_cast<DecompositionDecl>(Val: D);
5578 EvaluateConditionDecl && DD)
5579 OK &= EvaluateDecompositionDeclInit(Info, DD);
5580
5581 return OK;
5582}
5583
5584static bool EvaluateDecompositionDeclInit(EvalInfo &Info,
5585 const DecompositionDecl *DD) {
5586 bool OK = true;
5587 for (auto *BD : DD->flat_bindings())
5588 if (auto *VD = BD->getHoldingVar())
5589 OK &= EvaluateDecl(Info, D: VD, /*EvaluateConditionDecl=*/true);
5590
5591 return OK;
5592}
5593
5594static bool MaybeEvaluateDeferredVarDeclInit(EvalInfo &Info,
5595 const VarDecl *VD) {
5596 if (auto *DD = dyn_cast_if_present<DecompositionDecl>(Val: VD)) {
5597 if (!EvaluateDecompositionDeclInit(Info, DD))
5598 return false;
5599 }
5600 return true;
5601}
5602
5603static bool EvaluateDependentExpr(const Expr *E, EvalInfo &Info) {
5604 assert(E->isValueDependent());
5605 if (Info.noteSideEffect())
5606 return true;
5607 assert(E->containsErrors() && "valid value-dependent expression should never "
5608 "reach invalid code path.");
5609 return false;
5610}
5611
5612/// Evaluate a condition (either a variable declaration or an expression).
5613static bool EvaluateCond(EvalInfo &Info, const VarDecl *CondDecl,
5614 const Expr *Cond, bool &Result) {
5615 if (Cond->isValueDependent())
5616 return false;
5617 FullExpressionRAII Scope(Info);
5618 if (CondDecl && !EvaluateDecl(Info, D: CondDecl))
5619 return false;
5620 if (!EvaluateAsBooleanCondition(E: Cond, Result, Info))
5621 return false;
5622 if (!MaybeEvaluateDeferredVarDeclInit(Info, VD: CondDecl))
5623 return false;
5624 return Scope.destroy();
5625}
5626
5627namespace {
5628/// A location where the result (returned value) of evaluating a
5629/// statement should be stored.
5630struct StmtResult {
5631 /// The APValue that should be filled in with the returned value.
5632 APValue &Value;
5633 /// The location containing the result, if any (used to support RVO).
5634 const LValue *Slot;
5635};
5636
5637struct TempVersionRAII {
5638 CallStackFrame &Frame;
5639
5640 TempVersionRAII(CallStackFrame &Frame) : Frame(Frame) {
5641 Frame.pushTempVersion();
5642 }
5643
5644 ~TempVersionRAII() {
5645 Frame.popTempVersion();
5646 }
5647};
5648
5649}
5650
5651static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
5652 const Stmt *S,
5653 const SwitchCase *SC = nullptr);
5654
5655/// Helper to implement named break/continue. Returns 'true' if the evaluation
5656/// result should be propagated up. Otherwise, it sets the evaluation result
5657/// to either Continue to continue the current loop, or Succeeded to break it.
5658static bool ShouldPropagateBreakContinue(EvalInfo &Info,
5659 const Stmt *LoopOrSwitch,
5660 ArrayRef<BlockScopeRAII *> Scopes,
5661 EvalStmtResult &ESR) {
5662 bool IsSwitch = isa<SwitchStmt>(Val: LoopOrSwitch);
5663
5664 // For loops, map Succeeded to Continue so we don't have to check for both.
5665 if (!IsSwitch && ESR == ESR_Succeeded) {
5666 ESR = ESR_Continue;
5667 return false;
5668 }
5669
5670 if (ESR != ESR_Break && ESR != ESR_Continue)
5671 return false;
5672
5673 // Are we breaking out of or continuing this statement?
5674 bool CanBreakOrContinue = !IsSwitch || ESR == ESR_Break;
5675 const Stmt *StackTop = Info.BreakContinueStack.back();
5676 if (CanBreakOrContinue && (StackTop == nullptr || StackTop == LoopOrSwitch)) {
5677 Info.BreakContinueStack.pop_back();
5678 if (ESR == ESR_Break)
5679 ESR = ESR_Succeeded;
5680 return false;
5681 }
5682
5683 // We're not. Propagate the result up.
5684 for (BlockScopeRAII *S : Scopes) {
5685 if (!S->destroy()) {
5686 ESR = ESR_Failed;
5687 break;
5688 }
5689 }
5690 return true;
5691}
5692
5693/// Evaluate the body of a loop, and translate the result as appropriate.
5694static EvalStmtResult EvaluateLoopBody(StmtResult &Result, EvalInfo &Info,
5695 const Stmt *Body,
5696 const SwitchCase *Case = nullptr) {
5697 BlockScopeRAII Scope(Info);
5698
5699 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: Body, SC: Case);
5700 if (ESR != ESR_Failed && ESR != ESR_CaseNotFound && !Scope.destroy())
5701 ESR = ESR_Failed;
5702
5703 return ESR;
5704}
5705
5706/// Evaluate a switch statement.
5707static EvalStmtResult EvaluateSwitch(StmtResult &Result, EvalInfo &Info,
5708 const SwitchStmt *SS) {
5709 BlockScopeRAII Scope(Info);
5710
5711 // Evaluate the switch condition.
5712 APSInt Value;
5713 {
5714 if (const Stmt *Init = SS->getInit()) {
5715 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: Init);
5716 if (ESR != ESR_Succeeded) {
5717 if (ESR != ESR_Failed && !Scope.destroy())
5718 ESR = ESR_Failed;
5719 return ESR;
5720 }
5721 }
5722
5723 FullExpressionRAII CondScope(Info);
5724 if (SS->getConditionVariable() &&
5725 !EvaluateDecl(Info, D: SS->getConditionVariable()))
5726 return ESR_Failed;
5727 if (SS->getCond()->isValueDependent()) {
5728 // We don't know what the value is, and which branch should jump to.
5729 EvaluateDependentExpr(E: SS->getCond(), Info);
5730 return ESR_Failed;
5731 }
5732 if (!EvaluateInteger(E: SS->getCond(), Result&: Value, Info))
5733 return ESR_Failed;
5734
5735 if (!MaybeEvaluateDeferredVarDeclInit(Info, VD: SS->getConditionVariable()))
5736 return ESR_Failed;
5737
5738 if (!CondScope.destroy())
5739 return ESR_Failed;
5740 }
5741
5742 // Find the switch case corresponding to the value of the condition.
5743 // FIXME: Cache this lookup.
5744 const SwitchCase *Found = nullptr;
5745 for (const SwitchCase *SC = SS->getSwitchCaseList(); SC;
5746 SC = SC->getNextSwitchCase()) {
5747 if (isa<DefaultStmt>(Val: SC)) {
5748 Found = SC;
5749 continue;
5750 }
5751
5752 const CaseStmt *CS = cast<CaseStmt>(Val: SC);
5753 const Expr *LHS = CS->getLHS();
5754 const Expr *RHS = CS->getRHS();
5755 if (LHS->isValueDependent() || (RHS && RHS->isValueDependent()))
5756 return ESR_Failed;
5757 APSInt LHSValue = LHS->EvaluateKnownConstInt(Ctx: Info.Ctx);
5758 APSInt RHSValue = RHS ? RHS->EvaluateKnownConstInt(Ctx: Info.Ctx) : LHSValue;
5759 if (LHSValue <= Value && Value <= RHSValue) {
5760 Found = SC;
5761 break;
5762 }
5763 }
5764
5765 if (!Found)
5766 return Scope.destroy() ? ESR_Succeeded : ESR_Failed;
5767
5768 // Search the switch body for the switch case and evaluate it from there.
5769 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: SS->getBody(), SC: Found);
5770 if (ESR != ESR_Failed && ESR != ESR_CaseNotFound && !Scope.destroy())
5771 return ESR_Failed;
5772 if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: SS, /*Scopes=*/{}, ESR))
5773 return ESR;
5774
5775 switch (ESR) {
5776 case ESR_Break:
5777 llvm_unreachable("Should have been converted to Succeeded");
5778 case ESR_Succeeded:
5779 case ESR_Continue:
5780 case ESR_Failed:
5781 case ESR_Returned:
5782 return ESR;
5783 case ESR_CaseNotFound:
5784 // This can only happen if the switch case is nested within a statement
5785 // expression. We have no intention of supporting that.
5786 Info.FFDiag(Loc: Found->getBeginLoc(),
5787 DiagId: diag::note_constexpr_stmt_expr_unsupported);
5788 return ESR_Failed;
5789 }
5790 llvm_unreachable("Invalid EvalStmtResult!");
5791}
5792
5793static bool CheckLocalVariableDeclaration(EvalInfo &Info, const VarDecl *VD) {
5794 // An expression E is a core constant expression unless the evaluation of E
5795 // would evaluate one of the following: [C++23] - a control flow that passes
5796 // through a declaration of a variable with static or thread storage duration
5797 // unless that variable is usable in constant expressions.
5798 if (VD->isLocalVarDecl() && VD->isStaticLocal() &&
5799 !VD->isUsableInConstantExpressions(C: Info.Ctx)) {
5800 Info.CCEDiag(Loc: VD->getLocation(), DiagId: diag::note_constexpr_static_local)
5801 << (VD->getTSCSpec() == TSCS_unspecified ? 0 : 1) << VD;
5802 return false;
5803 }
5804 return true;
5805}
5806
5807// Evaluate a statement.
5808static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
5809 const Stmt *S, const SwitchCase *Case) {
5810 if (!Info.nextStep(S))
5811 return ESR_Failed;
5812
5813 // If we're hunting down a 'case' or 'default' label, recurse through
5814 // substatements until we hit the label.
5815 if (Case) {
5816 switch (S->getStmtClass()) {
5817 case Stmt::CompoundStmtClass:
5818 // FIXME: Precompute which substatement of a compound statement we
5819 // would jump to, and go straight there rather than performing a
5820 // linear scan each time.
5821 case Stmt::LabelStmtClass:
5822 case Stmt::AttributedStmtClass:
5823 case Stmt::DoStmtClass:
5824 break;
5825
5826 case Stmt::CaseStmtClass:
5827 case Stmt::DefaultStmtClass:
5828 if (Case == S)
5829 Case = nullptr;
5830 break;
5831
5832 case Stmt::IfStmtClass: {
5833 // FIXME: Precompute which side of an 'if' we would jump to, and go
5834 // straight there rather than scanning both sides.
5835 const IfStmt *IS = cast<IfStmt>(Val: S);
5836
5837 // Wrap the evaluation in a block scope, in case it's a DeclStmt
5838 // preceded by our switch label.
5839 BlockScopeRAII Scope(Info);
5840
5841 // Step into the init statement in case it brings an (uninitialized)
5842 // variable into scope.
5843 if (const Stmt *Init = IS->getInit()) {
5844 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: Init, Case);
5845 if (ESR != ESR_CaseNotFound) {
5846 assert(ESR != ESR_Succeeded);
5847 return ESR;
5848 }
5849 }
5850
5851 // Condition variable must be initialized if it exists.
5852 // FIXME: We can skip evaluating the body if there's a condition
5853 // variable, as there can't be any case labels within it.
5854 // (The same is true for 'for' statements.)
5855
5856 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: IS->getThen(), Case);
5857 if (ESR == ESR_Failed)
5858 return ESR;
5859 if (ESR != ESR_CaseNotFound)
5860 return Scope.destroy() ? ESR : ESR_Failed;
5861 if (!IS->getElse())
5862 return ESR_CaseNotFound;
5863
5864 ESR = EvaluateStmt(Result, Info, S: IS->getElse(), Case);
5865 if (ESR == ESR_Failed)
5866 return ESR;
5867 if (ESR != ESR_CaseNotFound)
5868 return Scope.destroy() ? ESR : ESR_Failed;
5869 return ESR_CaseNotFound;
5870 }
5871
5872 case Stmt::WhileStmtClass: {
5873 EvalStmtResult ESR =
5874 EvaluateLoopBody(Result, Info, Body: cast<WhileStmt>(Val: S)->getBody(), Case);
5875 if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: S, /*Scopes=*/{}, ESR))
5876 return ESR;
5877 if (ESR != ESR_Continue)
5878 return ESR;
5879 break;
5880 }
5881
5882 case Stmt::ForStmtClass: {
5883 const ForStmt *FS = cast<ForStmt>(Val: S);
5884 BlockScopeRAII Scope(Info);
5885
5886 // Step into the init statement in case it brings an (uninitialized)
5887 // variable into scope.
5888 if (const Stmt *Init = FS->getInit()) {
5889 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: Init, Case);
5890 if (ESR != ESR_CaseNotFound) {
5891 assert(ESR != ESR_Succeeded);
5892 return ESR;
5893 }
5894 }
5895
5896 EvalStmtResult ESR =
5897 EvaluateLoopBody(Result, Info, Body: FS->getBody(), Case);
5898 if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: FS, /*Scopes=*/{}, ESR))
5899 return ESR;
5900 if (ESR != ESR_Continue)
5901 return ESR;
5902 if (const auto *Inc = FS->getInc()) {
5903 if (Inc->isValueDependent()) {
5904 if (!EvaluateDependentExpr(E: Inc, Info))
5905 return ESR_Failed;
5906 } else {
5907 FullExpressionRAII IncScope(Info);
5908 if (!EvaluateIgnoredValue(Info, E: Inc) || !IncScope.destroy())
5909 return ESR_Failed;
5910 }
5911 }
5912 break;
5913 }
5914
5915 case Stmt::DeclStmtClass: {
5916 // Start the lifetime of any uninitialized variables we encounter. They
5917 // might be used by the selected branch of the switch.
5918 const DeclStmt *DS = cast<DeclStmt>(Val: S);
5919 for (const auto *D : DS->decls()) {
5920 if (const auto *VD = dyn_cast<VarDecl>(Val: D)) {
5921 if (!CheckLocalVariableDeclaration(Info, VD))
5922 return ESR_Failed;
5923 if (VD->hasLocalStorage() && !VD->getInit())
5924 if (!EvaluateVarDecl(Info, VD))
5925 return ESR_Failed;
5926 // FIXME: If the variable has initialization that can't be jumped
5927 // over, bail out of any immediately-surrounding compound-statement
5928 // too. There can't be any case labels here.
5929 }
5930 }
5931 return ESR_CaseNotFound;
5932 }
5933
5934 default:
5935 return ESR_CaseNotFound;
5936 }
5937 }
5938
5939 switch (S->getStmtClass()) {
5940 default:
5941 if (const Expr *E = dyn_cast<Expr>(Val: S)) {
5942 if (E->isValueDependent()) {
5943 if (!EvaluateDependentExpr(E, Info))
5944 return ESR_Failed;
5945 } else {
5946 // Don't bother evaluating beyond an expression-statement which couldn't
5947 // be evaluated.
5948 // FIXME: Do we need the FullExpressionRAII object here?
5949 // VisitExprWithCleanups should create one when necessary.
5950 FullExpressionRAII Scope(Info);
5951 if (!EvaluateIgnoredValue(Info, E) || !Scope.destroy())
5952 return ESR_Failed;
5953 }
5954 return ESR_Succeeded;
5955 }
5956
5957 Info.FFDiag(Loc: S->getBeginLoc()) << S->getSourceRange();
5958 return ESR_Failed;
5959
5960 case Stmt::NullStmtClass:
5961 return ESR_Succeeded;
5962
5963 case Stmt::DeclStmtClass: {
5964 const DeclStmt *DS = cast<DeclStmt>(Val: S);
5965 for (const auto *D : DS->decls()) {
5966 const VarDecl *VD = dyn_cast_or_null<VarDecl>(Val: D);
5967 if (VD && !CheckLocalVariableDeclaration(Info, VD))
5968 return ESR_Failed;
5969 // Each declaration initialization is its own full-expression.
5970 FullExpressionRAII Scope(Info);
5971 if (!EvaluateDecl(Info, D, /*EvaluateConditionDecl=*/true) &&
5972 !Info.noteFailure())
5973 return ESR_Failed;
5974 if (!Scope.destroy())
5975 return ESR_Failed;
5976 }
5977 return ESR_Succeeded;
5978 }
5979
5980 case Stmt::ReturnStmtClass: {
5981 const Expr *RetExpr = cast<ReturnStmt>(Val: S)->getRetValue();
5982 FullExpressionRAII Scope(Info);
5983 if (RetExpr && RetExpr->isValueDependent()) {
5984 EvaluateDependentExpr(E: RetExpr, Info);
5985 // We know we returned, but we don't know what the value is.
5986 return ESR_Failed;
5987 }
5988 if (RetExpr &&
5989 !(Result.Slot
5990 ? EvaluateInPlace(Result&: Result.Value, Info, This: *Result.Slot, E: RetExpr)
5991 : Evaluate(Result&: Result.Value, Info, E: RetExpr)))
5992 return ESR_Failed;
5993 return Scope.destroy() ? ESR_Returned : ESR_Failed;
5994 }
5995
5996 case Stmt::CompoundStmtClass: {
5997 BlockScopeRAII Scope(Info);
5998
5999 const CompoundStmt *CS = cast<CompoundStmt>(Val: S);
6000 for (const auto *BI : CS->body()) {
6001 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: BI, Case);
6002 if (ESR == ESR_Succeeded)
6003 Case = nullptr;
6004 else if (ESR != ESR_CaseNotFound) {
6005 if (ESR != ESR_Failed && !Scope.destroy())
6006 return ESR_Failed;
6007 return ESR;
6008 }
6009 }
6010 if (Case)
6011 return ESR_CaseNotFound;
6012 return Scope.destroy() ? ESR_Succeeded : ESR_Failed;
6013 }
6014
6015 case Stmt::IfStmtClass: {
6016 const IfStmt *IS = cast<IfStmt>(Val: S);
6017
6018 // Evaluate the condition, as either a var decl or as an expression.
6019 BlockScopeRAII Scope(Info);
6020 if (const Stmt *Init = IS->getInit()) {
6021 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: Init);
6022 if (ESR != ESR_Succeeded) {
6023 if (ESR != ESR_Failed && !Scope.destroy())
6024 return ESR_Failed;
6025 return ESR;
6026 }
6027 }
6028 bool Cond;
6029 if (IS->isConsteval()) {
6030 Cond = IS->isNonNegatedConsteval();
6031 // If we are not in a constant context, if consteval should not evaluate
6032 // to true.
6033 if (!Info.InConstantContext)
6034 Cond = !Cond;
6035 } else if (!EvaluateCond(Info, CondDecl: IS->getConditionVariable(), Cond: IS->getCond(),
6036 Result&: Cond))
6037 return ESR_Failed;
6038
6039 if (const Stmt *SubStmt = Cond ? IS->getThen() : IS->getElse()) {
6040 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: SubStmt);
6041 if (ESR != ESR_Succeeded) {
6042 if (ESR != ESR_Failed && !Scope.destroy())
6043 return ESR_Failed;
6044 return ESR;
6045 }
6046 }
6047 return Scope.destroy() ? ESR_Succeeded : ESR_Failed;
6048 }
6049
6050 case Stmt::WhileStmtClass: {
6051 const WhileStmt *WS = cast<WhileStmt>(Val: S);
6052 while (true) {
6053 BlockScopeRAII Scope(Info);
6054 bool Continue;
6055 if (!EvaluateCond(Info, CondDecl: WS->getConditionVariable(), Cond: WS->getCond(),
6056 Result&: Continue))
6057 return ESR_Failed;
6058 if (!Continue)
6059 break;
6060
6061 EvalStmtResult ESR = EvaluateLoopBody(Result, Info, Body: WS->getBody());
6062 if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: WS, Scopes: &Scope, ESR))
6063 return ESR;
6064
6065 if (ESR != ESR_Continue) {
6066 if (ESR != ESR_Failed && !Scope.destroy())
6067 return ESR_Failed;
6068 return ESR;
6069 }
6070 if (!Scope.destroy())
6071 return ESR_Failed;
6072 }
6073 return ESR_Succeeded;
6074 }
6075
6076 case Stmt::DoStmtClass: {
6077 const DoStmt *DS = cast<DoStmt>(Val: S);
6078 bool Continue;
6079 do {
6080 EvalStmtResult ESR = EvaluateLoopBody(Result, Info, Body: DS->getBody(), Case);
6081 if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: DS, /*Scopes=*/{}, ESR))
6082 return ESR;
6083 if (ESR != ESR_Continue)
6084 return ESR;
6085 Case = nullptr;
6086
6087 if (DS->getCond()->isValueDependent()) {
6088 EvaluateDependentExpr(E: DS->getCond(), Info);
6089 // Bailout as we don't know whether to keep going or terminate the loop.
6090 return ESR_Failed;
6091 }
6092 FullExpressionRAII CondScope(Info);
6093 if (!EvaluateAsBooleanCondition(E: DS->getCond(), Result&: Continue, Info) ||
6094 !CondScope.destroy())
6095 return ESR_Failed;
6096 } while (Continue);
6097 return ESR_Succeeded;
6098 }
6099
6100 case Stmt::ForStmtClass: {
6101 const ForStmt *FS = cast<ForStmt>(Val: S);
6102 BlockScopeRAII ForScope(Info);
6103 if (FS->getInit()) {
6104 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: FS->getInit());
6105 if (ESR != ESR_Succeeded) {
6106 if (ESR != ESR_Failed && !ForScope.destroy())
6107 return ESR_Failed;
6108 return ESR;
6109 }
6110 }
6111 while (true) {
6112 BlockScopeRAII IterScope(Info);
6113 bool Continue = true;
6114 if (FS->getCond() && !EvaluateCond(Info, CondDecl: FS->getConditionVariable(),
6115 Cond: FS->getCond(), Result&: Continue))
6116 return ESR_Failed;
6117
6118 if (!Continue) {
6119 if (!IterScope.destroy())
6120 return ESR_Failed;
6121 break;
6122 }
6123
6124 EvalStmtResult ESR = EvaluateLoopBody(Result, Info, Body: FS->getBody());
6125 if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: FS, Scopes: {&IterScope, &ForScope}, ESR))
6126 return ESR;
6127 if (ESR != ESR_Continue) {
6128 if (ESR != ESR_Failed && (!IterScope.destroy() || !ForScope.destroy()))
6129 return ESR_Failed;
6130 return ESR;
6131 }
6132
6133 if (const auto *Inc = FS->getInc()) {
6134 if (Inc->isValueDependent()) {
6135 if (!EvaluateDependentExpr(E: Inc, Info))
6136 return ESR_Failed;
6137 } else {
6138 FullExpressionRAII IncScope(Info);
6139 if (!EvaluateIgnoredValue(Info, E: Inc) || !IncScope.destroy())
6140 return ESR_Failed;
6141 }
6142 }
6143
6144 if (!IterScope.destroy())
6145 return ESR_Failed;
6146 }
6147 return ForScope.destroy() ? ESR_Succeeded : ESR_Failed;
6148 }
6149
6150 case Stmt::CXXForRangeStmtClass: {
6151 const CXXForRangeStmt *FS = cast<CXXForRangeStmt>(Val: S);
6152 BlockScopeRAII Scope(Info);
6153
6154 // Evaluate the init-statement if present.
6155 if (FS->getInit()) {
6156 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: FS->getInit());
6157 if (ESR != ESR_Succeeded) {
6158 if (ESR != ESR_Failed && !Scope.destroy())
6159 return ESR_Failed;
6160 return ESR;
6161 }
6162 }
6163
6164 // Initialize the __range variable.
6165 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: FS->getRangeStmt());
6166 if (ESR != ESR_Succeeded) {
6167 if (ESR != ESR_Failed && !Scope.destroy())
6168 return ESR_Failed;
6169 return ESR;
6170 }
6171
6172 // In error-recovery cases it's possible to get here even if we failed to
6173 // synthesize the __begin and __end variables.
6174 if (!FS->getBeginStmt() || !FS->getEndStmt() || !FS->getCond())
6175 return ESR_Failed;
6176
6177 // Create the __begin and __end iterators.
6178 ESR = EvaluateStmt(Result, Info, S: FS->getBeginStmt());
6179 if (ESR != ESR_Succeeded) {
6180 if (ESR != ESR_Failed && !Scope.destroy())
6181 return ESR_Failed;
6182 return ESR;
6183 }
6184 ESR = EvaluateStmt(Result, Info, S: FS->getEndStmt());
6185 if (ESR != ESR_Succeeded) {
6186 if (ESR != ESR_Failed && !Scope.destroy())
6187 return ESR_Failed;
6188 return ESR;
6189 }
6190
6191 while (true) {
6192 // Condition: __begin != __end.
6193 {
6194 if (FS->getCond()->isValueDependent()) {
6195 EvaluateDependentExpr(E: FS->getCond(), Info);
6196 // We don't know whether to keep going or terminate the loop.
6197 return ESR_Failed;
6198 }
6199 bool Continue = true;
6200 FullExpressionRAII CondExpr(Info);
6201 if (!EvaluateAsBooleanCondition(E: FS->getCond(), Result&: Continue, Info))
6202 return ESR_Failed;
6203 if (!Continue)
6204 break;
6205 }
6206
6207 // User's variable declaration, initialized by *__begin.
6208 BlockScopeRAII InnerScope(Info);
6209 ESR = EvaluateStmt(Result, Info, S: FS->getLoopVarStmt());
6210 if (ESR != ESR_Succeeded) {
6211 if (ESR != ESR_Failed && (!InnerScope.destroy() || !Scope.destroy()))
6212 return ESR_Failed;
6213 return ESR;
6214 }
6215
6216 // Loop body.
6217 ESR = EvaluateLoopBody(Result, Info, Body: FS->getBody());
6218 if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: FS, Scopes: {&InnerScope, &Scope}, ESR))
6219 return ESR;
6220 if (ESR != ESR_Continue) {
6221 if (ESR != ESR_Failed && (!InnerScope.destroy() || !Scope.destroy()))
6222 return ESR_Failed;
6223 return ESR;
6224 }
6225 if (FS->getInc()->isValueDependent()) {
6226 if (!EvaluateDependentExpr(E: FS->getInc(), Info))
6227 return ESR_Failed;
6228 } else {
6229 // Increment: ++__begin
6230 if (!EvaluateIgnoredValue(Info, E: FS->getInc()))
6231 return ESR_Failed;
6232 }
6233
6234 if (!InnerScope.destroy())
6235 return ESR_Failed;
6236 }
6237
6238 return Scope.destroy() ? ESR_Succeeded : ESR_Failed;
6239 }
6240
6241 case Stmt::SwitchStmtClass:
6242 return EvaluateSwitch(Result, Info, SS: cast<SwitchStmt>(Val: S));
6243
6244 case Stmt::ContinueStmtClass:
6245 case Stmt::BreakStmtClass: {
6246 auto *B = cast<LoopControlStmt>(Val: S);
6247 Info.BreakContinueStack.push_back(Elt: B->getNamedLoopOrSwitch());
6248 return isa<ContinueStmt>(Val: S) ? ESR_Continue : ESR_Break;
6249 }
6250
6251 case Stmt::LabelStmtClass:
6252 return EvaluateStmt(Result, Info, S: cast<LabelStmt>(Val: S)->getSubStmt(), Case);
6253
6254 case Stmt::AttributedStmtClass: {
6255 const auto *AS = cast<AttributedStmt>(Val: S);
6256 const auto *SS = AS->getSubStmt();
6257 MSConstexprContextRAII ConstexprContext(
6258 *Info.CurrentCall, hasSpecificAttr<MSConstexprAttr>(container: AS->getAttrs()) &&
6259 isa<ReturnStmt>(Val: SS));
6260
6261 auto LO = Info.Ctx.getLangOpts();
6262 if (LO.CXXAssumptions && !LO.MSVCCompat) {
6263 for (auto *Attr : AS->getAttrs()) {
6264 auto *AA = dyn_cast<CXXAssumeAttr>(Val: Attr);
6265 if (!AA)
6266 continue;
6267
6268 auto *Assumption = AA->getAssumption();
6269 if (Assumption->isValueDependent())
6270 return ESR_Failed;
6271
6272 if (Assumption->HasSideEffects(Ctx: Info.Ctx))
6273 continue;
6274
6275 bool Value;
6276 if (!EvaluateAsBooleanCondition(E: Assumption, Result&: Value, Info))
6277 return ESR_Failed;
6278 if (!Value) {
6279 Info.CCEDiag(Loc: Assumption->getExprLoc(),
6280 DiagId: diag::note_constexpr_assumption_failed);
6281 return ESR_Failed;
6282 }
6283 }
6284 }
6285
6286 return EvaluateStmt(Result, Info, S: SS, Case);
6287 }
6288
6289 case Stmt::CaseStmtClass:
6290 case Stmt::DefaultStmtClass:
6291 return EvaluateStmt(Result, Info, S: cast<SwitchCase>(Val: S)->getSubStmt(), Case);
6292 case Stmt::CXXTryStmtClass:
6293 // Evaluate try blocks by evaluating all sub statements.
6294 return EvaluateStmt(Result, Info, S: cast<CXXTryStmt>(Val: S)->getTryBlock(), Case);
6295 }
6296}
6297
6298/// CheckTrivialDefaultConstructor - Check whether a constructor is a trivial
6299/// default constructor. If so, we'll fold it whether or not it's marked as
6300/// constexpr. If it is marked as constexpr, we will never implicitly define it,
6301/// so we need special handling.
6302static bool CheckTrivialDefaultConstructor(EvalInfo &Info, SourceLocation Loc,
6303 const CXXConstructorDecl *CD,
6304 bool IsValueInitialization) {
6305 if (!CD->isTrivial() || !CD->isDefaultConstructor())
6306 return false;
6307
6308 // Value-initialization does not call a trivial default constructor, so such a
6309 // call is a core constant expression whether or not the constructor is
6310 // constexpr.
6311 if (!CD->isConstexpr() && !IsValueInitialization) {
6312 if (Info.getLangOpts().CPlusPlus11) {
6313 // FIXME: If DiagDecl is an implicitly-declared special member function,
6314 // we should be much more explicit about why it's not constexpr.
6315 Info.CCEDiag(Loc, DiagId: diag::note_constexpr_invalid_function, ExtraNotes: 1)
6316 << /*IsConstexpr*/0 << /*IsConstructor*/1 << CD;
6317 Info.Note(Loc: CD->getLocation(), DiagId: diag::note_declared_at);
6318 } else {
6319 Info.CCEDiag(Loc, DiagId: diag::note_invalid_subexpr_in_const_expr);
6320 }
6321 }
6322 return true;
6323}
6324
6325/// CheckConstexprFunction - Check that a function can be called in a constant
6326/// expression.
6327static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
6328 const FunctionDecl *Declaration,
6329 const FunctionDecl *Definition,
6330 const Stmt *Body) {
6331 // Potential constant expressions can contain calls to declared, but not yet
6332 // defined, constexpr functions.
6333 if (Info.checkingPotentialConstantExpression() && !Definition &&
6334 Declaration->isConstexpr())
6335 return false;
6336
6337 // Bail out if the function declaration itself is invalid. We will
6338 // have produced a relevant diagnostic while parsing it, so just
6339 // note the problematic sub-expression.
6340 if (Declaration->isInvalidDecl()) {
6341 Info.FFDiag(Loc: CallLoc, DiagId: diag::note_invalid_subexpr_in_const_expr);
6342 return false;
6343 }
6344
6345 // DR1872: An instantiated virtual constexpr function can't be called in a
6346 // constant expression (prior to C++20). We can still constant-fold such a
6347 // call.
6348 if (!Info.Ctx.getLangOpts().CPlusPlus20 && isa<CXXMethodDecl>(Val: Declaration) &&
6349 cast<CXXMethodDecl>(Val: Declaration)->isVirtual())
6350 Info.CCEDiag(Loc: CallLoc, DiagId: diag::note_constexpr_virtual_call);
6351
6352 if (Definition && Definition->isInvalidDecl()) {
6353 Info.FFDiag(Loc: CallLoc, DiagId: diag::note_invalid_subexpr_in_const_expr);
6354 return false;
6355 }
6356
6357 // Can we evaluate this function call?
6358 if (Definition && Body &&
6359 (Definition->isConstexpr() || (Info.CurrentCall->CanEvalMSConstexpr &&
6360 Definition->hasAttr<MSConstexprAttr>())))
6361 return true;
6362
6363 const FunctionDecl *DiagDecl = Definition ? Definition : Declaration;
6364 // Special note for the assert() macro, as the normal error message falsely
6365 // implies we cannot use an assertion during constant evaluation.
6366 if (CallLoc.isMacroID() && DiagDecl->getIdentifier()) {
6367 // FIXME: Instead of checking for an implementation-defined function,
6368 // check and evaluate the assert() macro.
6369 StringRef Name = DiagDecl->getName();
6370 bool AssertFailed =
6371 Name == "__assert_rtn" || Name == "__assert_fail" || Name == "_wassert";
6372 if (AssertFailed) {
6373 Info.FFDiag(Loc: CallLoc, DiagId: diag::note_constexpr_assert_failed);
6374 return false;
6375 }
6376 }
6377
6378 if (Info.getLangOpts().CPlusPlus11) {
6379 // If this function is not constexpr because it is an inherited
6380 // non-constexpr constructor, diagnose that directly.
6381 auto *CD = dyn_cast<CXXConstructorDecl>(Val: DiagDecl);
6382 if (CD && CD->isInheritingConstructor()) {
6383 auto *Inherited = CD->getInheritedConstructor().getConstructor();
6384 if (!Inherited->isConstexpr())
6385 DiagDecl = CD = Inherited;
6386 }
6387
6388 // FIXME: If DiagDecl is an implicitly-declared special member function
6389 // or an inheriting constructor, we should be much more explicit about why
6390 // it's not constexpr.
6391 if (CD && CD->isInheritingConstructor())
6392 Info.FFDiag(Loc: CallLoc, DiagId: diag::note_constexpr_invalid_inhctor, ExtraNotes: 1)
6393 << CD->getInheritedConstructor().getConstructor()->getParent();
6394 else
6395 Info.FFDiag(Loc: CallLoc, DiagId: diag::note_constexpr_invalid_function, ExtraNotes: 1)
6396 << DiagDecl->isConstexpr() << (bool)CD << DiagDecl;
6397 Info.Note(Loc: DiagDecl->getLocation(), DiagId: diag::note_declared_at);
6398 } else {
6399 Info.FFDiag(Loc: CallLoc, DiagId: diag::note_invalid_subexpr_in_const_expr);
6400 }
6401 return false;
6402}
6403
6404namespace {
6405struct CheckDynamicTypeHandler {
6406 AccessKinds AccessKind;
6407 typedef bool result_type;
6408 bool failed() { return false; }
6409 bool found(APValue &Subobj, QualType SubobjType) { return true; }
6410 bool found(APSInt &Value, QualType SubobjType) { return true; }
6411 bool found(APFloat &Value, QualType SubobjType) { return true; }
6412};
6413} // end anonymous namespace
6414
6415/// Check that we can access the notional vptr of an object / determine its
6416/// dynamic type.
6417static bool checkDynamicType(EvalInfo &Info, const Expr *E, const LValue &This,
6418 AccessKinds AK, bool Polymorphic) {
6419 if (This.Designator.Invalid)
6420 return false;
6421
6422 CompleteObject Obj = findCompleteObject(Info, E, AK, LVal: This, LValType: QualType());
6423
6424 if (!Obj)
6425 return false;
6426
6427 if (!Obj.Value) {
6428 // The object is not usable in constant expressions, so we can't inspect
6429 // its value to see if it's in-lifetime or what the active union members
6430 // are. We can still check for a one-past-the-end lvalue.
6431 if (This.Designator.isOnePastTheEnd() ||
6432 This.Designator.isMostDerivedAnUnsizedArray()) {
6433 Info.FFDiag(E, DiagId: This.Designator.isOnePastTheEnd()
6434 ? diag::note_constexpr_access_past_end
6435 : diag::note_constexpr_access_unsized_array)
6436 << AK;
6437 return false;
6438 } else if (Polymorphic) {
6439 // Conservatively refuse to perform a polymorphic operation if we would
6440 // not be able to read a notional 'vptr' value.
6441 if (!Info.checkingPotentialConstantExpression() ||
6442 !This.AllowConstexprUnknown) {
6443 APValue Val;
6444 This.moveInto(V&: Val);
6445 QualType StarThisType =
6446 Info.Ctx.getLValueReferenceType(T: This.Designator.getType(Ctx&: Info.Ctx));
6447 Info.FFDiag(E, DiagId: diag::note_constexpr_polymorphic_unknown_dynamic_type)
6448 << AK << Val.getAsString(Ctx: Info.Ctx, Ty: StarThisType);
6449 }
6450 return false;
6451 }
6452 return true;
6453 }
6454
6455 CheckDynamicTypeHandler Handler{.AccessKind: AK};
6456 return Obj && findSubobject(Info, E, Obj, Sub: This.Designator, handler&: Handler);
6457}
6458
6459/// Check that the pointee of the 'this' pointer in a member function call is
6460/// either within its lifetime or in its period of construction or destruction.
6461static bool
6462checkNonVirtualMemberCallThisPointer(EvalInfo &Info, const Expr *E,
6463 const LValue &This,
6464 const CXXMethodDecl *NamedMember) {
6465 return checkDynamicType(
6466 Info, E, This,
6467 AK: isa<CXXDestructorDecl>(Val: NamedMember) ? AK_Destroy : AK_MemberCall, Polymorphic: false);
6468}
6469
6470struct DynamicType {
6471 /// The dynamic class type of the object.
6472 const CXXRecordDecl *Type;
6473 /// The corresponding path length in the lvalue.
6474 unsigned PathLength;
6475};
6476
6477static const CXXRecordDecl *getBaseClassType(SubobjectDesignator &Designator,
6478 unsigned PathLength) {
6479 assert(PathLength >= Designator.MostDerivedPathLength && PathLength <=
6480 Designator.Entries.size() && "invalid path length");
6481 return (PathLength == Designator.MostDerivedPathLength)
6482 ? Designator.MostDerivedType->getAsCXXRecordDecl()
6483 : getAsBaseClass(E: Designator.Entries[PathLength - 1]);
6484}
6485
6486/// Determine the dynamic type of an object.
6487static std::optional<DynamicType> ComputeDynamicType(EvalInfo &Info,
6488 const Expr *E,
6489 LValue &This,
6490 AccessKinds AK) {
6491 // If we don't have an lvalue denoting an object of class type, there is no
6492 // meaningful dynamic type. (We consider objects of non-class type to have no
6493 // dynamic type.)
6494 if (!checkDynamicType(Info, E, This, AK,
6495 Polymorphic: AK != AK_TypeId || This.AllowConstexprUnknown))
6496 return std::nullopt;
6497
6498 if (This.Designator.Invalid)
6499 return std::nullopt;
6500
6501 // Refuse to compute a dynamic type in the presence of virtual bases. This
6502 // shouldn't happen other than in constant-folding situations, since literal
6503 // types can't have virtual bases.
6504 //
6505 // Note that consumers of DynamicType assume that the type has no virtual
6506 // bases, and will need modifications if this restriction is relaxed.
6507 const CXXRecordDecl *Class =
6508 This.Designator.MostDerivedType->getAsCXXRecordDecl();
6509 if (!Class || Class->getNumVBases()) {
6510 Info.FFDiag(E);
6511 return std::nullopt;
6512 }
6513
6514 // FIXME: For very deep class hierarchies, it might be beneficial to use a
6515 // binary search here instead. But the overwhelmingly common case is that
6516 // we're not in the middle of a constructor, so it probably doesn't matter
6517 // in practice.
6518 ArrayRef<APValue::LValuePathEntry> Path = This.Designator.Entries;
6519 for (unsigned PathLength = This.Designator.MostDerivedPathLength;
6520 PathLength <= Path.size(); ++PathLength) {
6521 switch (Info.isEvaluatingCtorDtor(Base: This.getLValueBase(),
6522 Path: Path.slice(N: 0, M: PathLength))) {
6523 case ConstructionPhase::Bases:
6524 case ConstructionPhase::DestroyingBases:
6525 // We're constructing or destroying a base class. This is not the dynamic
6526 // type.
6527 break;
6528
6529 case ConstructionPhase::None:
6530 case ConstructionPhase::AfterBases:
6531 case ConstructionPhase::AfterFields:
6532 case ConstructionPhase::Destroying:
6533 // We've finished constructing the base classes and not yet started
6534 // destroying them again, so this is the dynamic type.
6535 return DynamicType{.Type: getBaseClassType(Designator&: This.Designator, PathLength),
6536 .PathLength: PathLength};
6537 }
6538 }
6539
6540 // CWG issue 1517: we're constructing a base class of the object described by
6541 // 'This', so that object has not yet begun its period of construction and
6542 // any polymorphic operation on it results in undefined behavior.
6543 Info.FFDiag(E);
6544 return std::nullopt;
6545}
6546
6547/// Perform virtual dispatch.
6548static const CXXMethodDecl *HandleVirtualDispatch(
6549 EvalInfo &Info, const Expr *E, LValue &This, const CXXMethodDecl *Found,
6550 llvm::SmallVectorImpl<QualType> &CovariantAdjustmentPath) {
6551 std::optional<DynamicType> DynType = ComputeDynamicType(
6552 Info, E, This,
6553 AK: isa<CXXDestructorDecl>(Val: Found) ? AK_Destroy : AK_MemberCall);
6554 if (!DynType)
6555 return nullptr;
6556
6557 // Find the final overrider. It must be declared in one of the classes on the
6558 // path from the dynamic type to the static type.
6559 // FIXME: If we ever allow literal types to have virtual base classes, that
6560 // won't be true.
6561 const CXXMethodDecl *Callee = Found;
6562 unsigned PathLength = DynType->PathLength;
6563 for (/**/; PathLength <= This.Designator.Entries.size(); ++PathLength) {
6564 const CXXRecordDecl *Class = getBaseClassType(Designator&: This.Designator, PathLength);
6565 const CXXMethodDecl *Overrider =
6566 Found->getCorrespondingMethodDeclaredInClass(RD: Class, MayBeBase: false);
6567 if (Overrider) {
6568 Callee = Overrider;
6569 break;
6570 }
6571 }
6572
6573 // C++2a [class.abstract]p6:
6574 // the effect of making a virtual call to a pure virtual function [...] is
6575 // undefined
6576 if (Callee->isPureVirtual()) {
6577 Info.FFDiag(E, DiagId: diag::note_constexpr_pure_virtual_call, ExtraNotes: 1) << Callee;
6578 Info.Note(Loc: Callee->getLocation(), DiagId: diag::note_declared_at);
6579 return nullptr;
6580 }
6581
6582 // If necessary, walk the rest of the path to determine the sequence of
6583 // covariant adjustment steps to apply.
6584 if (!Info.Ctx.hasSameUnqualifiedType(T1: Callee->getReturnType(),
6585 T2: Found->getReturnType())) {
6586 CovariantAdjustmentPath.push_back(Elt: Callee->getReturnType());
6587 for (unsigned CovariantPathLength = PathLength + 1;
6588 CovariantPathLength != This.Designator.Entries.size();
6589 ++CovariantPathLength) {
6590 const CXXRecordDecl *NextClass =
6591 getBaseClassType(Designator&: This.Designator, PathLength: CovariantPathLength);
6592 const CXXMethodDecl *Next =
6593 Found->getCorrespondingMethodDeclaredInClass(RD: NextClass, MayBeBase: false);
6594 if (Next && !Info.Ctx.hasSameUnqualifiedType(
6595 T1: Next->getReturnType(), T2: CovariantAdjustmentPath.back()))
6596 CovariantAdjustmentPath.push_back(Elt: Next->getReturnType());
6597 }
6598 if (!Info.Ctx.hasSameUnqualifiedType(T1: Found->getReturnType(),
6599 T2: CovariantAdjustmentPath.back()))
6600 CovariantAdjustmentPath.push_back(Elt: Found->getReturnType());
6601 }
6602
6603 // Perform 'this' adjustment.
6604 if (!CastToDerivedClass(Info, E, Result&: This, TruncatedType: Callee->getParent(), TruncatedElements: PathLength))
6605 return nullptr;
6606
6607 return Callee;
6608}
6609
6610/// Perform the adjustment from a value returned by a virtual function to
6611/// a value of the statically expected type, which may be a pointer or
6612/// reference to a base class of the returned type.
6613static bool HandleCovariantReturnAdjustment(EvalInfo &Info, const Expr *E,
6614 APValue &Result,
6615 ArrayRef<QualType> Path) {
6616 assert(Result.isLValue() &&
6617 "unexpected kind of APValue for covariant return");
6618 if (Result.isNullPointer())
6619 return true;
6620
6621 LValue LVal;
6622 LVal.setFrom(Ctx: Info.Ctx, V: Result);
6623
6624 const CXXRecordDecl *OldClass = Path[0]->getPointeeCXXRecordDecl();
6625 for (unsigned I = 1; I != Path.size(); ++I) {
6626 const CXXRecordDecl *NewClass = Path[I]->getPointeeCXXRecordDecl();
6627 assert(OldClass && NewClass && "unexpected kind of covariant return");
6628 if (OldClass != NewClass &&
6629 !CastToBaseClass(Info, E, Result&: LVal, DerivedRD: OldClass, BaseRD: NewClass))
6630 return false;
6631 OldClass = NewClass;
6632 }
6633
6634 LVal.moveInto(V&: Result);
6635 return true;
6636}
6637
6638/// Determine whether \p Base, which is known to be a direct base class of
6639/// \p Derived, is a public base class.
6640static bool isBaseClassPublic(const CXXRecordDecl *Derived,
6641 const CXXRecordDecl *Base) {
6642 for (const CXXBaseSpecifier &BaseSpec : Derived->bases()) {
6643 auto *BaseClass = BaseSpec.getType()->getAsCXXRecordDecl();
6644 if (BaseClass && declaresSameEntity(D1: BaseClass, D2: Base))
6645 return BaseSpec.getAccessSpecifier() == AS_public;
6646 }
6647 llvm_unreachable("Base is not a direct base of Derived");
6648}
6649
6650/// Apply the given dynamic cast operation on the provided lvalue.
6651///
6652/// This implements the hard case of dynamic_cast, requiring a "runtime check"
6653/// to find a suitable target subobject.
6654static bool HandleDynamicCast(EvalInfo &Info, const ExplicitCastExpr *E,
6655 LValue &Ptr) {
6656 // We can't do anything with a non-symbolic pointer value.
6657 SubobjectDesignator &D = Ptr.Designator;
6658 if (D.Invalid)
6659 return false;
6660
6661 // C++ [expr.dynamic.cast]p6:
6662 // If v is a null pointer value, the result is a null pointer value.
6663 if (Ptr.isNullPointer() && !E->isGLValue())
6664 return true;
6665
6666 // For all the other cases, we need the pointer to point to an object within
6667 // its lifetime / period of construction / destruction, and we need to know
6668 // its dynamic type.
6669 std::optional<DynamicType> DynType =
6670 ComputeDynamicType(Info, E, This&: Ptr, AK: AK_DynamicCast);
6671 if (!DynType)
6672 return false;
6673
6674 // C++ [expr.dynamic.cast]p7:
6675 // If T is "pointer to cv void", then the result is a pointer to the most
6676 // derived object
6677 if (E->getType()->isVoidPointerType())
6678 return CastToDerivedClass(Info, E, Result&: Ptr, TruncatedType: DynType->Type, TruncatedElements: DynType->PathLength);
6679
6680 const CXXRecordDecl *C = E->getTypeAsWritten()->getPointeeCXXRecordDecl();
6681 assert(C && "dynamic_cast target is not void pointer nor class");
6682 CanQualType CQT = Info.Ctx.getCanonicalTagType(TD: C);
6683
6684 auto RuntimeCheckFailed = [&] (CXXBasePaths *Paths) {
6685 // C++ [expr.dynamic.cast]p9:
6686 if (!E->isGLValue()) {
6687 // The value of a failed cast to pointer type is the null pointer value
6688 // of the required result type.
6689 Ptr.setNull(Ctx&: Info.Ctx, PointerTy: E->getType());
6690 return true;
6691 }
6692
6693 // A failed cast to reference type throws [...] std::bad_cast.
6694 unsigned DiagKind;
6695 if (!Paths && (declaresSameEntity(D1: DynType->Type, D2: C) ||
6696 DynType->Type->isDerivedFrom(Base: C)))
6697 DiagKind = 0;
6698 else if (!Paths || Paths->begin() == Paths->end())
6699 DiagKind = 1;
6700 else if (Paths->isAmbiguous(BaseType: CQT))
6701 DiagKind = 2;
6702 else {
6703 assert(Paths->front().Access != AS_public && "why did the cast fail?");
6704 DiagKind = 3;
6705 }
6706 Info.FFDiag(E, DiagId: diag::note_constexpr_dynamic_cast_to_reference_failed)
6707 << DiagKind << Ptr.Designator.getType(Ctx&: Info.Ctx)
6708 << Info.Ctx.getCanonicalTagType(TD: DynType->Type)
6709 << E->getType().getUnqualifiedType();
6710 return false;
6711 };
6712
6713 // Runtime check, phase 1:
6714 // Walk from the base subobject towards the derived object looking for the
6715 // target type.
6716 for (int PathLength = Ptr.Designator.Entries.size();
6717 PathLength >= (int)DynType->PathLength; --PathLength) {
6718 const CXXRecordDecl *Class = getBaseClassType(Designator&: Ptr.Designator, PathLength);
6719 if (declaresSameEntity(D1: Class, D2: C))
6720 return CastToDerivedClass(Info, E, Result&: Ptr, TruncatedType: Class, TruncatedElements: PathLength);
6721 // We can only walk across public inheritance edges.
6722 if (PathLength > (int)DynType->PathLength &&
6723 !isBaseClassPublic(Derived: getBaseClassType(Designator&: Ptr.Designator, PathLength: PathLength - 1),
6724 Base: Class))
6725 return RuntimeCheckFailed(nullptr);
6726 }
6727
6728 // Runtime check, phase 2:
6729 // Search the dynamic type for an unambiguous public base of type C.
6730 CXXBasePaths Paths(/*FindAmbiguities=*/true,
6731 /*RecordPaths=*/true, /*DetectVirtual=*/false);
6732 if (DynType->Type->isDerivedFrom(Base: C, Paths) && !Paths.isAmbiguous(BaseType: CQT) &&
6733 Paths.front().Access == AS_public) {
6734 // Downcast to the dynamic type...
6735 if (!CastToDerivedClass(Info, E, Result&: Ptr, TruncatedType: DynType->Type, TruncatedElements: DynType->PathLength))
6736 return false;
6737 // ... then upcast to the chosen base class subobject.
6738 for (CXXBasePathElement &Elem : Paths.front())
6739 if (!HandleLValueBase(Info, E, Obj&: Ptr, DerivedDecl: Elem.Class, Base: Elem.Base))
6740 return false;
6741 return true;
6742 }
6743
6744 // Otherwise, the runtime check fails.
6745 return RuntimeCheckFailed(&Paths);
6746}
6747
6748namespace {
6749struct StartLifetimeOfUnionMemberHandler {
6750 EvalInfo &Info;
6751 const Expr *LHSExpr;
6752 const FieldDecl *Field;
6753 bool DuringInit;
6754 bool Failed = false;
6755 static const AccessKinds AccessKind = AK_Assign;
6756
6757 typedef bool result_type;
6758 bool failed() { return Failed; }
6759 bool found(APValue &Subobj, QualType SubobjType) {
6760 // We are supposed to perform no initialization but begin the lifetime of
6761 // the object. We interpret that as meaning to do what default
6762 // initialization of the object would do if all constructors involved were
6763 // trivial:
6764 // * All base, non-variant member, and array element subobjects' lifetimes
6765 // begin
6766 // * No variant members' lifetimes begin
6767 // * All scalar subobjects whose lifetimes begin have indeterminate values
6768 assert(SubobjType->isUnionType());
6769 if (declaresSameEntity(D1: Subobj.getUnionField(), D2: Field)) {
6770 // This union member is already active. If it's also in-lifetime, there's
6771 // nothing to do.
6772 if (Subobj.getUnionValue().hasValue())
6773 return true;
6774 } else if (DuringInit) {
6775 // We're currently in the process of initializing a different union
6776 // member. If we carried on, that initialization would attempt to
6777 // store to an inactive union member, resulting in undefined behavior.
6778 Info.FFDiag(E: LHSExpr,
6779 DiagId: diag::note_constexpr_union_member_change_during_init);
6780 return false;
6781 }
6782 APValue Result;
6783 Failed = !handleDefaultInitValue(T: Field->getType(), Result);
6784 Subobj.setUnion(Field, Value: Result);
6785 return true;
6786 }
6787 bool found(APSInt &Value, QualType SubobjType) {
6788 llvm_unreachable("wrong value kind for union object");
6789 }
6790 bool found(APFloat &Value, QualType SubobjType) {
6791 llvm_unreachable("wrong value kind for union object");
6792 }
6793};
6794} // end anonymous namespace
6795
6796const AccessKinds StartLifetimeOfUnionMemberHandler::AccessKind;
6797
6798/// Handle a builtin simple-assignment or a call to a trivial assignment
6799/// operator whose left-hand side might involve a union member access. If it
6800/// does, implicitly start the lifetime of any accessed union elements per
6801/// C++20 [class.union]5.
6802static bool MaybeHandleUnionActiveMemberChange(EvalInfo &Info,
6803 const Expr *LHSExpr,
6804 const LValue &LHS) {
6805 if (LHS.InvalidBase || LHS.Designator.Invalid)
6806 return false;
6807
6808 llvm::SmallVector<std::pair<unsigned, const FieldDecl*>, 4> UnionPathLengths;
6809 // C++ [class.union]p5:
6810 // define the set S(E) of subexpressions of E as follows:
6811 unsigned PathLength = LHS.Designator.Entries.size();
6812 for (const Expr *E = LHSExpr; E != nullptr;) {
6813 // -- If E is of the form A.B, S(E) contains the elements of S(A)...
6814 if (auto *ME = dyn_cast<MemberExpr>(Val: E)) {
6815 auto *FD = dyn_cast<FieldDecl>(Val: ME->getMemberDecl());
6816 // Note that we can't implicitly start the lifetime of a reference,
6817 // so we don't need to proceed any further if we reach one.
6818 if (!FD || FD->getType()->isReferenceType())
6819 break;
6820
6821 // ... and also contains A.B if B names a union member ...
6822 if (FD->getParent()->isUnion()) {
6823 // ... of a non-class, non-array type, or of a class type with a
6824 // trivial default constructor that is not deleted, or an array of
6825 // such types.
6826 auto *RD =
6827 FD->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
6828 if (!RD || RD->hasTrivialDefaultConstructor())
6829 UnionPathLengths.push_back(Elt: {PathLength - 1, FD});
6830 }
6831
6832 E = ME->getBase();
6833 --PathLength;
6834 assert(declaresSameEntity(FD,
6835 LHS.Designator.Entries[PathLength]
6836 .getAsBaseOrMember().getPointer()));
6837
6838 // -- If E is of the form A[B] and is interpreted as a built-in array
6839 // subscripting operator, S(E) is [S(the array operand, if any)].
6840 } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(Val: E)) {
6841 // Step over an ArrayToPointerDecay implicit cast.
6842 auto *Base = ASE->getBase()->IgnoreImplicit();
6843 if (!Base->getType()->isArrayType())
6844 break;
6845
6846 E = Base;
6847 --PathLength;
6848
6849 } else if (auto *ICE = dyn_cast<ImplicitCastExpr>(Val: E)) {
6850 // Step over a derived-to-base conversion.
6851 E = ICE->getSubExpr();
6852 if (ICE->getCastKind() == CK_NoOp)
6853 continue;
6854 if (ICE->getCastKind() != CK_DerivedToBase &&
6855 ICE->getCastKind() != CK_UncheckedDerivedToBase)
6856 break;
6857 // Walk path backwards as we walk up from the base to the derived class.
6858 for (const CXXBaseSpecifier *Elt : llvm::reverse(C: ICE->path())) {
6859 if (Elt->isVirtual()) {
6860 // A class with virtual base classes never has a trivial default
6861 // constructor, so S(E) is empty in this case.
6862 E = nullptr;
6863 break;
6864 }
6865
6866 --PathLength;
6867 assert(declaresSameEntity(Elt->getType()->getAsCXXRecordDecl(),
6868 LHS.Designator.Entries[PathLength]
6869 .getAsBaseOrMember().getPointer()));
6870 }
6871
6872 // -- Otherwise, S(E) is empty.
6873 } else {
6874 break;
6875 }
6876 }
6877
6878 // Common case: no unions' lifetimes are started.
6879 if (UnionPathLengths.empty())
6880 return true;
6881
6882 // if modification of X [would access an inactive union member], an object
6883 // of the type of X is implicitly created
6884 CompleteObject Obj =
6885 findCompleteObject(Info, E: LHSExpr, AK: AK_Assign, LVal: LHS, LValType: LHSExpr->getType());
6886 if (!Obj)
6887 return false;
6888 for (std::pair<unsigned, const FieldDecl *> LengthAndField :
6889 llvm::reverse(C&: UnionPathLengths)) {
6890 // Form a designator for the union object.
6891 SubobjectDesignator D = LHS.Designator;
6892 D.truncate(Ctx&: Info.Ctx, Base: LHS.Base, NewLength: LengthAndField.first);
6893
6894 bool DuringInit = Info.isEvaluatingCtorDtor(Base: LHS.Base, Path: D.Entries) ==
6895 ConstructionPhase::AfterBases;
6896 StartLifetimeOfUnionMemberHandler StartLifetime{
6897 .Info: Info, .LHSExpr: LHSExpr, .Field: LengthAndField.second, .DuringInit: DuringInit};
6898 if (!findSubobject(Info, E: LHSExpr, Obj, Sub: D, handler&: StartLifetime))
6899 return false;
6900 }
6901
6902 return true;
6903}
6904
6905static bool EvaluateCallArg(const ParmVarDecl *PVD, const Expr *Arg,
6906 CallRef Call, EvalInfo &Info, bool NonNull = false,
6907 APValue **EvaluatedArg = nullptr) {
6908 LValue LV;
6909 // Create the parameter slot and register its destruction. For a vararg
6910 // argument, create a temporary.
6911 // FIXME: For calling conventions that destroy parameters in the callee,
6912 // should we consider performing destruction when the function returns
6913 // instead?
6914 APValue &V = PVD ? Info.CurrentCall->createParam(Args: Call, PVD, LV)
6915 : Info.CurrentCall->createTemporary(Key: Arg, T: Arg->getType(),
6916 Scope: ScopeKind::Call, LV);
6917 if (!EvaluateInPlace(Result&: V, Info, This: LV, E: Arg))
6918 return false;
6919
6920 // Passing a null pointer to an __attribute__((nonnull)) parameter results in
6921 // undefined behavior, so is non-constant.
6922 if (NonNull && V.isLValue() && V.isNullPointer()) {
6923 Info.CCEDiag(E: Arg, DiagId: diag::note_non_null_attribute_failed);
6924 return false;
6925 }
6926
6927 if (EvaluatedArg)
6928 *EvaluatedArg = &V;
6929
6930 return true;
6931}
6932
6933/// Evaluate the arguments to a function call.
6934static bool EvaluateArgs(ArrayRef<const Expr *> Args, CallRef Call,
6935 EvalInfo &Info, const FunctionDecl *Callee,
6936 bool RightToLeft = false,
6937 LValue *ObjectArg = nullptr) {
6938 bool Success = true;
6939 llvm::SmallBitVector ForbiddenNullArgs;
6940 if (Callee->hasAttr<NonNullAttr>()) {
6941 ForbiddenNullArgs.resize(N: Args.size());
6942 for (const auto *Attr : Callee->specific_attrs<NonNullAttr>()) {
6943 if (!Attr->args_size()) {
6944 ForbiddenNullArgs.set();
6945 break;
6946 } else
6947 for (auto Idx : Attr->args()) {
6948 unsigned ASTIdx = Idx.getASTIndex();
6949 if (ASTIdx >= Args.size())
6950 continue;
6951 ForbiddenNullArgs[ASTIdx] = true;
6952 }
6953 }
6954 }
6955 for (unsigned I = 0; I < Args.size(); I++) {
6956 unsigned Idx = RightToLeft ? Args.size() - I - 1 : I;
6957 const ParmVarDecl *PVD =
6958 Idx < Callee->getNumParams() ? Callee->getParamDecl(i: Idx) : nullptr;
6959 bool NonNull = !ForbiddenNullArgs.empty() && ForbiddenNullArgs[Idx];
6960 APValue *That = nullptr;
6961 if (!EvaluateCallArg(PVD, Arg: Args[Idx], Call, Info, NonNull, EvaluatedArg: &That)) {
6962 // If we're checking for a potential constant expression, evaluate all
6963 // initializers even if some of them fail.
6964 if (!Info.noteFailure())
6965 return false;
6966 Success = false;
6967 }
6968 if (PVD && PVD->isExplicitObjectParameter() && That && That->isLValue())
6969 ObjectArg->setFrom(Ctx: Info.Ctx, V: *That);
6970 }
6971 return Success;
6972}
6973
6974/// Perform a trivial copy from Param, which is the parameter of a copy or move
6975/// constructor or assignment operator.
6976static bool handleTrivialCopy(EvalInfo &Info, const ParmVarDecl *Param,
6977 const Expr *E, APValue &Result,
6978 bool CopyObjectRepresentation) {
6979 // Find the reference argument.
6980 CallStackFrame *Frame = Info.CurrentCall;
6981 APValue *RefValue = Info.getParamSlot(Call: Frame->Arguments, PVD: Param);
6982 if (!RefValue) {
6983 Info.FFDiag(E);
6984 return false;
6985 }
6986
6987 // Copy out the contents of the RHS object.
6988 LValue RefLValue;
6989 RefLValue.setFrom(Ctx: Info.Ctx, V: *RefValue);
6990 return handleLValueToRValueConversion(
6991 Info, Conv: E, Type: Param->getType().getNonReferenceType(), LVal: RefLValue, RVal&: Result,
6992 WantObjectRepresentation: CopyObjectRepresentation);
6993}
6994
6995/// Evaluate a function call.
6996static bool HandleFunctionCall(SourceLocation CallLoc,
6997 const FunctionDecl *Callee,
6998 const LValue *ObjectArg, const Expr *E,
6999 ArrayRef<const Expr *> Args, CallRef Call,
7000 const Stmt *Body, EvalInfo &Info,
7001 APValue &Result, const LValue *ResultSlot) {
7002 if (!Info.CheckCallLimit(Loc: CallLoc))
7003 return false;
7004
7005 CallStackFrame Frame(Info, E->getSourceRange(), Callee, ObjectArg, E, Call);
7006
7007 // For a trivial copy or move assignment, perform an APValue copy. This is
7008 // essential for unions, where the operations performed by the assignment
7009 // operator cannot be represented as statements.
7010 //
7011 // Skip this for non-union classes with no fields; in that case, the defaulted
7012 // copy/move does not actually read the object.
7013 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: Callee);
7014 if (MD && MD->isDefaulted() &&
7015 (MD->getParent()->isUnion() ||
7016 (MD->isTrivial() &&
7017 isReadByLvalueToRvalueConversion(RD: MD->getParent())))) {
7018 unsigned ExplicitOffset = MD->isExplicitObjectMemberFunction() ? 1 : 0;
7019 assert(ObjectArg &&
7020 (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()));
7021 APValue RHSValue;
7022 if (!handleTrivialCopy(Info, Param: MD->getParamDecl(i: 0), E: Args[0], Result&: RHSValue,
7023 CopyObjectRepresentation: MD->getParent()->isUnion()))
7024 return false;
7025
7026 LValue Obj;
7027 if (!handleAssignment(Info, E: Args[ExplicitOffset], LVal: *ObjectArg,
7028 LValType: MD->getFunctionObjectParameterReferenceType(),
7029 Val&: RHSValue))
7030 return false;
7031 ObjectArg->moveInto(V&: Result);
7032 return true;
7033 } else if (MD && isLambdaCallOperator(MD)) {
7034 // We're in a lambda; determine the lambda capture field maps unless we're
7035 // just constexpr checking a lambda's call operator. constexpr checking is
7036 // done before the captures have been added to the closure object (unless
7037 // we're inferring constexpr-ness), so we don't have access to them in this
7038 // case. But since we don't need the captures to constexpr check, we can
7039 // just ignore them.
7040 if (!Info.checkingPotentialConstantExpression())
7041 MD->getParent()->getCaptureFields(Captures&: Frame.LambdaCaptureFields,
7042 ThisCapture&: Frame.LambdaThisCaptureField);
7043 }
7044
7045 StmtResult Ret = {.Value: Result, .Slot: ResultSlot};
7046 EvalStmtResult ESR = EvaluateStmt(Result&: Ret, Info, S: Body);
7047 if (ESR == ESR_Succeeded) {
7048 if (Callee->getReturnType()->isVoidType())
7049 return true;
7050 Info.FFDiag(Loc: Callee->getEndLoc(), DiagId: diag::note_constexpr_no_return);
7051 }
7052 return ESR == ESR_Returned;
7053}
7054
7055/// Evaluate a constructor call.
7056static bool HandleConstructorCall(const Expr *E, const LValue &This,
7057 CallRef Call,
7058 const CXXConstructorDecl *Definition,
7059 EvalInfo &Info, APValue &Result) {
7060 SourceLocation CallLoc = E->getExprLoc();
7061 if (!Info.CheckCallLimit(Loc: CallLoc))
7062 return false;
7063
7064 const CXXRecordDecl *RD = Definition->getParent();
7065 if (RD->getNumVBases()) {
7066 Info.FFDiag(Loc: CallLoc, DiagId: diag::note_constexpr_virtual_base) << RD;
7067 return false;
7068 }
7069
7070 EvalInfo::EvaluatingConstructorRAII EvalObj(
7071 Info,
7072 ObjectUnderConstruction{.Base: This.getLValueBase(), .Path: This.Designator.Entries},
7073 RD->getNumBases());
7074 CallStackFrame Frame(Info, E->getSourceRange(), Definition, &This, E, Call);
7075
7076 // FIXME: Creating an APValue just to hold a nonexistent return value is
7077 // wasteful.
7078 APValue RetVal;
7079 StmtResult Ret = {.Value: RetVal, .Slot: nullptr};
7080
7081 // If it's a delegating constructor, delegate.
7082 if (Definition->isDelegatingConstructor()) {
7083 CXXConstructorDecl::init_const_iterator I = Definition->init_begin();
7084 if ((*I)->getInit()->isValueDependent()) {
7085 if (!EvaluateDependentExpr(E: (*I)->getInit(), Info))
7086 return false;
7087 } else {
7088 FullExpressionRAII InitScope(Info);
7089 if (!EvaluateInPlace(Result, Info, This, E: (*I)->getInit()) ||
7090 !InitScope.destroy())
7091 return false;
7092 }
7093 return EvaluateStmt(Result&: Ret, Info, S: Definition->getBody()) != ESR_Failed;
7094 }
7095
7096 // For a trivial copy or move constructor, perform an APValue copy. This is
7097 // essential for unions (or classes with anonymous union members), where the
7098 // operations performed by the constructor cannot be represented by
7099 // ctor-initializers.
7100 //
7101 // Skip this for empty non-union classes; we should not perform an
7102 // lvalue-to-rvalue conversion on them because their copy constructor does not
7103 // actually read them.
7104 if (Definition->isDefaulted() && Definition->isCopyOrMoveConstructor() &&
7105 (Definition->getParent()->isUnion() ||
7106 (Definition->isTrivial() &&
7107 isReadByLvalueToRvalueConversion(RD: Definition->getParent())))) {
7108 return handleTrivialCopy(Info, Param: Definition->getParamDecl(i: 0), E, Result,
7109 CopyObjectRepresentation: Definition->getParent()->isUnion());
7110 }
7111
7112 // Reserve space for the struct members.
7113 if (!Result.hasValue()) {
7114 if (!RD->isUnion())
7115 Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
7116 RD->getNumFields());
7117 else
7118 // A union starts with no active member.
7119 Result = APValue((const FieldDecl*)nullptr);
7120 }
7121
7122 if (RD->isInvalidDecl()) return false;
7123 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD);
7124
7125 // A scope for temporaries lifetime-extended by reference members.
7126 BlockScopeRAII LifetimeExtendedScope(Info);
7127
7128 bool Success = true;
7129 unsigned BasesSeen = 0;
7130#ifndef NDEBUG
7131 CXXRecordDecl::base_class_const_iterator BaseIt = RD->bases_begin();
7132#endif
7133 CXXRecordDecl::field_iterator FieldIt = RD->field_begin();
7134 auto SkipToField = [&](FieldDecl *FD, bool Indirect) {
7135 // We might be initializing the same field again if this is an indirect
7136 // field initialization.
7137 if (FieldIt == RD->field_end() ||
7138 FieldIt->getFieldIndex() > FD->getFieldIndex()) {
7139 assert(Indirect && "fields out of order?");
7140 return;
7141 }
7142
7143 // Default-initialize any fields with no explicit initializer.
7144 for (; !declaresSameEntity(D1: *FieldIt, D2: FD); ++FieldIt) {
7145 assert(FieldIt != RD->field_end() && "missing field?");
7146 if (!FieldIt->isUnnamedBitField())
7147 Success &= handleDefaultInitValue(
7148 T: FieldIt->getType(),
7149 Result&: Result.getStructField(i: FieldIt->getFieldIndex()));
7150 }
7151 ++FieldIt;
7152 };
7153 for (const auto *I : Definition->inits()) {
7154 LValue Subobject = This;
7155 LValue SubobjectParent = This;
7156 APValue *Value = &Result;
7157
7158 // Determine the subobject to initialize.
7159 FieldDecl *FD = nullptr;
7160 if (I->isBaseInitializer()) {
7161 QualType BaseType(I->getBaseClass(), 0);
7162#ifndef NDEBUG
7163 // Non-virtual base classes are initialized in the order in the class
7164 // definition. We have already checked for virtual base classes.
7165 assert(!BaseIt->isVirtual() && "virtual base for literal type");
7166 assert(Info.Ctx.hasSameUnqualifiedType(BaseIt->getType(), BaseType) &&
7167 "base class initializers not in expected order");
7168 ++BaseIt;
7169#endif
7170 if (!HandleLValueDirectBase(Info, E: I->getInit(), Obj&: Subobject, Derived: RD,
7171 Base: BaseType->getAsCXXRecordDecl(), RL: &Layout))
7172 return false;
7173 Value = &Result.getStructBase(i: BasesSeen++);
7174 } else if ((FD = I->getMember())) {
7175 if (!HandleLValueMember(Info, E: I->getInit(), LVal&: Subobject, FD, RL: &Layout))
7176 return false;
7177 if (RD->isUnion()) {
7178 Result = APValue(FD);
7179 Value = &Result.getUnionValue();
7180 } else {
7181 SkipToField(FD, false);
7182 Value = &Result.getStructField(i: FD->getFieldIndex());
7183 }
7184 } else if (IndirectFieldDecl *IFD = I->getIndirectMember()) {
7185 // Walk the indirect field decl's chain to find the object to initialize,
7186 // and make sure we've initialized every step along it.
7187 auto IndirectFieldChain = IFD->chain();
7188 for (auto *C : IndirectFieldChain) {
7189 FD = cast<FieldDecl>(Val: C);
7190 CXXRecordDecl *CD = cast<CXXRecordDecl>(Val: FD->getParent());
7191 // Switch the union field if it differs. This happens if we had
7192 // preceding zero-initialization, and we're now initializing a union
7193 // subobject other than the first.
7194 // FIXME: In this case, the values of the other subobjects are
7195 // specified, since zero-initialization sets all padding bits to zero.
7196 if (!Value->hasValue() ||
7197 (Value->isUnion() &&
7198 !declaresSameEntity(D1: Value->getUnionField(), D2: FD))) {
7199 if (CD->isUnion())
7200 *Value = APValue(FD);
7201 else
7202 // FIXME: This immediately starts the lifetime of all members of
7203 // an anonymous struct. It would be preferable to strictly start
7204 // member lifetime in initialization order.
7205 Success &= handleDefaultInitValue(T: Info.Ctx.getCanonicalTagType(TD: CD),
7206 Result&: *Value);
7207 }
7208 // Store Subobject as its parent before updating it for the last element
7209 // in the chain.
7210 if (C == IndirectFieldChain.back())
7211 SubobjectParent = Subobject;
7212 if (!HandleLValueMember(Info, E: I->getInit(), LVal&: Subobject, FD))
7213 return false;
7214 if (CD->isUnion())
7215 Value = &Value->getUnionValue();
7216 else {
7217 if (C == IndirectFieldChain.front() && !RD->isUnion())
7218 SkipToField(FD, true);
7219 Value = &Value->getStructField(i: FD->getFieldIndex());
7220 }
7221 }
7222 } else {
7223 llvm_unreachable("unknown base initializer kind");
7224 }
7225
7226 // Need to override This for implicit field initializers as in this case
7227 // This refers to innermost anonymous struct/union containing initializer,
7228 // not to currently constructed class.
7229 const Expr *Init = I->getInit();
7230 if (Init->isValueDependent()) {
7231 if (!EvaluateDependentExpr(E: Init, Info))
7232 return false;
7233 } else {
7234 ThisOverrideRAII ThisOverride(*Info.CurrentCall, &SubobjectParent,
7235 isa<CXXDefaultInitExpr>(Val: Init));
7236 FullExpressionRAII InitScope(Info);
7237 if (FD && FD->getType()->isReferenceType() &&
7238 !FD->getType()->isFunctionReferenceType()) {
7239 LValue Result;
7240 if (!EvaluateInitForDeclOfReferenceType(Info, D: FD, Init, Result,
7241 Val&: *Value)) {
7242 if (!Info.noteFailure())
7243 return false;
7244 Success = false;
7245 }
7246 } else if (!EvaluateInPlace(Result&: *Value, Info, This: Subobject, E: Init) ||
7247 (FD && FD->isBitField() &&
7248 !truncateBitfieldValue(Info, E: Init, Value&: *Value, FD))) {
7249 // If we're checking for a potential constant expression, evaluate all
7250 // initializers even if some of them fail.
7251 if (!Info.noteFailure())
7252 return false;
7253 Success = false;
7254 }
7255 }
7256
7257 // This is the point at which the dynamic type of the object becomes this
7258 // class type.
7259 if (I->isBaseInitializer() && BasesSeen == RD->getNumBases())
7260 EvalObj.finishedConstructingBases();
7261 }
7262
7263 // Default-initialize any remaining fields.
7264 if (!RD->isUnion()) {
7265 for (; FieldIt != RD->field_end(); ++FieldIt) {
7266 if (!FieldIt->isUnnamedBitField())
7267 Success &= handleDefaultInitValue(
7268 T: FieldIt->getType(),
7269 Result&: Result.getStructField(i: FieldIt->getFieldIndex()));
7270 }
7271 }
7272
7273 EvalObj.finishedConstructingFields();
7274
7275 return Success &&
7276 EvaluateStmt(Result&: Ret, Info, S: Definition->getBody()) != ESR_Failed &&
7277 LifetimeExtendedScope.destroy();
7278}
7279
7280static bool HandleConstructorCall(const Expr *E, const LValue &This,
7281 ArrayRef<const Expr*> Args,
7282 const CXXConstructorDecl *Definition,
7283 EvalInfo &Info, APValue &Result) {
7284 CallScopeRAII CallScope(Info);
7285 CallRef Call = Info.CurrentCall->createCall(Callee: Definition);
7286 if (!EvaluateArgs(Args, Call, Info, Callee: Definition))
7287 return false;
7288
7289 return HandleConstructorCall(E, This, Call, Definition, Info, Result) &&
7290 CallScope.destroy();
7291}
7292
7293static bool HandleDestructionImpl(EvalInfo &Info, SourceRange CallRange,
7294 const LValue &This, APValue &Value,
7295 QualType T) {
7296 // Objects can only be destroyed while they're within their lifetimes.
7297 // FIXME: We have no representation for whether an object of type nullptr_t
7298 // is in its lifetime; it usually doesn't matter. Perhaps we should model it
7299 // as indeterminate instead?
7300 if (Value.isAbsent() && !T->isNullPtrType()) {
7301 APValue Printable;
7302 This.moveInto(V&: Printable);
7303 Info.FFDiag(Loc: CallRange.getBegin(),
7304 DiagId: diag::note_constexpr_destroy_out_of_lifetime)
7305 << Printable.getAsString(Ctx: Info.Ctx, Ty: Info.Ctx.getLValueReferenceType(T));
7306 return false;
7307 }
7308
7309 // Invent an expression for location purposes.
7310 // FIXME: We shouldn't need to do this.
7311 OpaqueValueExpr LocE(CallRange.getBegin(), Info.Ctx.IntTy, VK_PRValue);
7312
7313 // For arrays, destroy elements right-to-left.
7314 if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(T)) {
7315 uint64_t Size = CAT->getZExtSize();
7316 QualType ElemT = CAT->getElementType();
7317
7318 if (!CheckArraySize(Info, CAT, CallLoc: CallRange.getBegin()))
7319 return false;
7320
7321 LValue ElemLV = This;
7322 ElemLV.addArray(Info, E: &LocE, CAT);
7323 if (!HandleLValueArrayAdjustment(Info, E: &LocE, LVal&: ElemLV, EltTy: ElemT, Adjustment: Size))
7324 return false;
7325
7326 // Ensure that we have actual array elements available to destroy; the
7327 // destructors might mutate the value, so we can't run them on the array
7328 // filler.
7329 if (Size && Size > Value.getArrayInitializedElts())
7330 expandArray(Array&: Value, Index: Value.getArraySize() - 1);
7331
7332 // The size of the array might have been reduced by
7333 // a placement new.
7334 for (Size = Value.getArraySize(); Size != 0; --Size) {
7335 APValue &Elem = Value.getArrayInitializedElt(I: Size - 1);
7336 if (!HandleLValueArrayAdjustment(Info, E: &LocE, LVal&: ElemLV, EltTy: ElemT, Adjustment: -1) ||
7337 !HandleDestructionImpl(Info, CallRange, This: ElemLV, Value&: Elem, T: ElemT))
7338 return false;
7339 }
7340
7341 // End the lifetime of this array now.
7342 Value = APValue();
7343 return true;
7344 }
7345
7346 const CXXRecordDecl *RD = T->getAsCXXRecordDecl();
7347 if (!RD) {
7348 if (T.isDestructedType()) {
7349 Info.FFDiag(Loc: CallRange.getBegin(),
7350 DiagId: diag::note_constexpr_unsupported_destruction)
7351 << T;
7352 return false;
7353 }
7354
7355 Value = APValue();
7356 return true;
7357 }
7358
7359 if (RD->getNumVBases()) {
7360 Info.FFDiag(Loc: CallRange.getBegin(), DiagId: diag::note_constexpr_virtual_base) << RD;
7361 return false;
7362 }
7363
7364 const CXXDestructorDecl *DD = RD->getDestructor();
7365 if (!DD && !RD->hasTrivialDestructor()) {
7366 Info.FFDiag(Loc: CallRange.getBegin());
7367 return false;
7368 }
7369
7370 if (!DD || DD->isTrivial() ||
7371 (RD->isAnonymousStructOrUnion() && RD->isUnion())) {
7372 // A trivial destructor just ends the lifetime of the object. Check for
7373 // this case before checking for a body, because we might not bother
7374 // building a body for a trivial destructor. Note that it doesn't matter
7375 // whether the destructor is constexpr in this case; all trivial
7376 // destructors are constexpr.
7377 //
7378 // If an anonymous union would be destroyed, some enclosing destructor must
7379 // have been explicitly defined, and the anonymous union destruction should
7380 // have no effect.
7381 Value = APValue();
7382 return true;
7383 }
7384
7385 if (!Info.CheckCallLimit(Loc: CallRange.getBegin()))
7386 return false;
7387
7388 const FunctionDecl *Definition = nullptr;
7389 const Stmt *Body = DD->getBody(Definition);
7390
7391 if (!CheckConstexprFunction(Info, CallLoc: CallRange.getBegin(), Declaration: DD, Definition, Body))
7392 return false;
7393
7394 CallStackFrame Frame(Info, CallRange, Definition, &This, /*CallExpr=*/nullptr,
7395 CallRef());
7396
7397 // We're now in the period of destruction of this object.
7398 unsigned BasesLeft = RD->getNumBases();
7399 EvalInfo::EvaluatingDestructorRAII EvalObj(
7400 Info,
7401 ObjectUnderConstruction{.Base: This.getLValueBase(), .Path: This.Designator.Entries});
7402 if (!EvalObj.DidInsert) {
7403 // C++2a [class.dtor]p19:
7404 // the behavior is undefined if the destructor is invoked for an object
7405 // whose lifetime has ended
7406 // (Note that formally the lifetime ends when the period of destruction
7407 // begins, even though certain uses of the object remain valid until the
7408 // period of destruction ends.)
7409 Info.FFDiag(Loc: CallRange.getBegin(), DiagId: diag::note_constexpr_double_destroy);
7410 return false;
7411 }
7412
7413 // FIXME: Creating an APValue just to hold a nonexistent return value is
7414 // wasteful.
7415 APValue RetVal;
7416 StmtResult Ret = {.Value: RetVal, .Slot: nullptr};
7417 if (EvaluateStmt(Result&: Ret, Info, S: Definition->getBody()) == ESR_Failed)
7418 return false;
7419
7420 // A union destructor does not implicitly destroy its members.
7421 if (RD->isUnion())
7422 return true;
7423
7424 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD);
7425
7426 // We don't have a good way to iterate fields in reverse, so collect all the
7427 // fields first and then walk them backwards.
7428 SmallVector<FieldDecl*, 16> Fields(RD->fields());
7429 for (const FieldDecl *FD : llvm::reverse(C&: Fields)) {
7430 if (FD->isUnnamedBitField())
7431 continue;
7432
7433 LValue Subobject = This;
7434 if (!HandleLValueMember(Info, E: &LocE, LVal&: Subobject, FD, RL: &Layout))
7435 return false;
7436
7437 APValue *SubobjectValue = &Value.getStructField(i: FD->getFieldIndex());
7438 if (!HandleDestructionImpl(Info, CallRange, This: Subobject, Value&: *SubobjectValue,
7439 T: FD->getType()))
7440 return false;
7441 }
7442
7443 if (BasesLeft != 0)
7444 EvalObj.startedDestroyingBases();
7445
7446 // Destroy base classes in reverse order.
7447 for (const CXXBaseSpecifier &Base : llvm::reverse(C: RD->bases())) {
7448 --BasesLeft;
7449
7450 QualType BaseType = Base.getType();
7451 LValue Subobject = This;
7452 if (!HandleLValueDirectBase(Info, E: &LocE, Obj&: Subobject, Derived: RD,
7453 Base: BaseType->getAsCXXRecordDecl(), RL: &Layout))
7454 return false;
7455
7456 APValue *SubobjectValue = &Value.getStructBase(i: BasesLeft);
7457 if (!HandleDestructionImpl(Info, CallRange, This: Subobject, Value&: *SubobjectValue,
7458 T: BaseType))
7459 return false;
7460 }
7461 assert(BasesLeft == 0 && "NumBases was wrong?");
7462
7463 // The period of destruction ends now. The object is gone.
7464 Value = APValue();
7465 return true;
7466}
7467
7468namespace {
7469struct DestroyObjectHandler {
7470 EvalInfo &Info;
7471 const Expr *E;
7472 const LValue &This;
7473 const AccessKinds AccessKind;
7474
7475 typedef bool result_type;
7476 bool failed() { return false; }
7477 bool found(APValue &Subobj, QualType SubobjType) {
7478 return HandleDestructionImpl(Info, CallRange: E->getSourceRange(), This, Value&: Subobj,
7479 T: SubobjType);
7480 }
7481 bool found(APSInt &Value, QualType SubobjType) {
7482 Info.FFDiag(E, DiagId: diag::note_constexpr_destroy_complex_elem);
7483 return false;
7484 }
7485 bool found(APFloat &Value, QualType SubobjType) {
7486 Info.FFDiag(E, DiagId: diag::note_constexpr_destroy_complex_elem);
7487 return false;
7488 }
7489};
7490}
7491
7492/// Perform a destructor or pseudo-destructor call on the given object, which
7493/// might in general not be a complete object.
7494static bool HandleDestruction(EvalInfo &Info, const Expr *E,
7495 const LValue &This, QualType ThisType) {
7496 CompleteObject Obj = findCompleteObject(Info, E, AK: AK_Destroy, LVal: This, LValType: ThisType);
7497 DestroyObjectHandler Handler = {.Info: Info, .E: E, .This: This, .AccessKind: AK_Destroy};
7498 return Obj && findSubobject(Info, E, Obj, Sub: This.Designator, handler&: Handler);
7499}
7500
7501/// Destroy and end the lifetime of the given complete object.
7502static bool HandleDestruction(EvalInfo &Info, SourceLocation Loc,
7503 APValue::LValueBase LVBase, APValue &Value,
7504 QualType T) {
7505 // If we've had an unmodeled side-effect, we can't rely on mutable state
7506 // (such as the object we're about to destroy) being correct.
7507 if (Info.EvalStatus.HasSideEffects)
7508 return false;
7509
7510 LValue LV;
7511 LV.set(B: {LVBase});
7512 return HandleDestructionImpl(Info, CallRange: Loc, This: LV, Value, T);
7513}
7514
7515/// Perform a call to 'operator new' or to `__builtin_operator_new'.
7516static bool HandleOperatorNewCall(EvalInfo &Info, const CallExpr *E,
7517 LValue &Result) {
7518 if (Info.checkingPotentialConstantExpression() ||
7519 Info.SpeculativeEvaluationDepth)
7520 return false;
7521
7522 // This is permitted only within a call to std::allocator<T>::allocate.
7523 auto Caller = Info.getStdAllocatorCaller(FnName: "allocate");
7524 if (!Caller) {
7525 Info.FFDiag(Loc: E->getExprLoc(), DiagId: Info.getLangOpts().CPlusPlus20
7526 ? diag::note_constexpr_new_untyped
7527 : diag::note_constexpr_new);
7528 return false;
7529 }
7530
7531 QualType ElemType = Caller.ElemType;
7532 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
7533 Info.FFDiag(Loc: E->getExprLoc(),
7534 DiagId: diag::note_constexpr_new_not_complete_object_type)
7535 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
7536 return false;
7537 }
7538
7539 APSInt ByteSize;
7540 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: ByteSize, Info))
7541 return false;
7542 bool IsNothrow = false;
7543 for (unsigned I = 1, N = E->getNumArgs(); I != N; ++I) {
7544 EvaluateIgnoredValue(Info, E: E->getArg(Arg: I));
7545 IsNothrow |= E->getType()->isNothrowT();
7546 }
7547
7548 CharUnits ElemSize;
7549 if (!HandleSizeof(Info, Loc: E->getExprLoc(), Type: ElemType, Size&: ElemSize))
7550 return false;
7551 APInt Size, Remainder;
7552 APInt ElemSizeAP(ByteSize.getBitWidth(), ElemSize.getQuantity());
7553 APInt::udivrem(LHS: ByteSize, RHS: ElemSizeAP, Quotient&: Size, Remainder);
7554 if (Remainder != 0) {
7555 // This likely indicates a bug in the implementation of 'std::allocator'.
7556 Info.FFDiag(Loc: E->getExprLoc(), DiagId: diag::note_constexpr_operator_new_bad_size)
7557 << ByteSize << APSInt(ElemSizeAP, true) << ElemType;
7558 return false;
7559 }
7560
7561 if (!Info.CheckArraySize(Loc: E->getBeginLoc(), BitWidth: ByteSize.getActiveBits(),
7562 ElemCount: Size.getZExtValue(), /*Diag=*/!IsNothrow)) {
7563 if (IsNothrow) {
7564 Result.setNull(Ctx&: Info.Ctx, PointerTy: E->getType());
7565 return true;
7566 }
7567 return false;
7568 }
7569
7570 QualType AllocType = Info.Ctx.getConstantArrayType(
7571 EltTy: ElemType, ArySize: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
7572 APValue *Val = Info.createHeapAlloc(E: Caller.Call, T: AllocType, LV&: Result);
7573 *Val = APValue(APValue::UninitArray(), 0, Size.getZExtValue());
7574 Result.addArray(Info, E, CAT: cast<ConstantArrayType>(Val&: AllocType));
7575 return true;
7576}
7577
7578static bool hasVirtualDestructor(QualType T) {
7579 if (CXXRecordDecl *RD = T->getAsCXXRecordDecl())
7580 if (CXXDestructorDecl *DD = RD->getDestructor())
7581 return DD->isVirtual();
7582 return false;
7583}
7584
7585static const FunctionDecl *getVirtualOperatorDelete(QualType T) {
7586 if (CXXRecordDecl *RD = T->getAsCXXRecordDecl())
7587 if (CXXDestructorDecl *DD = RD->getDestructor())
7588 return DD->isVirtual() ? DD->getOperatorDelete() : nullptr;
7589 return nullptr;
7590}
7591
7592/// Check that the given object is a suitable pointer to a heap allocation that
7593/// still exists and is of the right kind for the purpose of a deletion.
7594///
7595/// On success, returns the heap allocation to deallocate. On failure, produces
7596/// a diagnostic and returns std::nullopt.
7597static std::optional<DynAlloc *> CheckDeleteKind(EvalInfo &Info, const Expr *E,
7598 const LValue &Pointer,
7599 DynAlloc::Kind DeallocKind) {
7600 auto PointerAsString = [&] {
7601 return Pointer.toString(Ctx&: Info.Ctx, T: Info.Ctx.VoidPtrTy);
7602 };
7603
7604 DynamicAllocLValue DA = Pointer.Base.dyn_cast<DynamicAllocLValue>();
7605 if (!DA) {
7606 Info.FFDiag(E, DiagId: diag::note_constexpr_delete_not_heap_alloc)
7607 << PointerAsString();
7608 if (Pointer.Base)
7609 NoteLValueLocation(Info, Base: Pointer.Base);
7610 return std::nullopt;
7611 }
7612
7613 std::optional<DynAlloc *> Alloc = Info.lookupDynamicAlloc(DA);
7614 if (!Alloc) {
7615 Info.FFDiag(E, DiagId: diag::note_constexpr_double_delete);
7616 return std::nullopt;
7617 }
7618
7619 if (DeallocKind != (*Alloc)->getKind()) {
7620 QualType AllocType = Pointer.Base.getDynamicAllocType();
7621 Info.FFDiag(E, DiagId: diag::note_constexpr_new_delete_mismatch)
7622 << DeallocKind << (*Alloc)->getKind() << AllocType;
7623 NoteLValueLocation(Info, Base: Pointer.Base);
7624 return std::nullopt;
7625 }
7626
7627 bool Subobject = false;
7628 if (DeallocKind == DynAlloc::New) {
7629 Subobject = Pointer.Designator.MostDerivedPathLength != 0 ||
7630 Pointer.Designator.isOnePastTheEnd();
7631 } else {
7632 Subobject = Pointer.Designator.Entries.size() != 1 ||
7633 Pointer.Designator.Entries[0].getAsArrayIndex() != 0;
7634 }
7635 if (Subobject) {
7636 Info.FFDiag(E, DiagId: diag::note_constexpr_delete_subobject)
7637 << PointerAsString() << Pointer.Designator.isOnePastTheEnd();
7638 return std::nullopt;
7639 }
7640
7641 return Alloc;
7642}
7643
7644// Perform a call to 'operator delete' or '__builtin_operator_delete'.
7645static bool HandleOperatorDeleteCall(EvalInfo &Info, const CallExpr *E) {
7646 if (Info.checkingPotentialConstantExpression() ||
7647 Info.SpeculativeEvaluationDepth)
7648 return false;
7649
7650 // This is permitted only within a call to std::allocator<T>::deallocate.
7651 if (!Info.getStdAllocatorCaller(FnName: "deallocate")) {
7652 Info.FFDiag(Loc: E->getExprLoc());
7653 return true;
7654 }
7655
7656 LValue Pointer;
7657 if (!EvaluatePointer(E: E->getArg(Arg: 0), Result&: Pointer, Info))
7658 return false;
7659 for (unsigned I = 1, N = E->getNumArgs(); I != N; ++I)
7660 EvaluateIgnoredValue(Info, E: E->getArg(Arg: I));
7661
7662 if (Pointer.Designator.Invalid)
7663 return false;
7664
7665 // Deleting a null pointer would have no effect, but it's not permitted by
7666 // std::allocator<T>::deallocate's contract.
7667 if (Pointer.isNullPointer()) {
7668 Info.CCEDiag(Loc: E->getExprLoc(), DiagId: diag::note_constexpr_deallocate_null);
7669 return true;
7670 }
7671
7672 if (!CheckDeleteKind(Info, E, Pointer, DeallocKind: DynAlloc::StdAllocator))
7673 return false;
7674
7675 Info.HeapAllocs.erase(x: Pointer.Base.get<DynamicAllocLValue>());
7676 return true;
7677}
7678
7679//===----------------------------------------------------------------------===//
7680// Generic Evaluation
7681//===----------------------------------------------------------------------===//
7682namespace {
7683
7684class BitCastBuffer {
7685 // FIXME: We're going to need bit-level granularity when we support
7686 // bit-fields.
7687 // FIXME: Its possible under the C++ standard for 'char' to not be 8 bits, but
7688 // we don't support a host or target where that is the case. Still, we should
7689 // use a more generic type in case we ever do.
7690 SmallVector<std::optional<unsigned char>, 32> Bytes;
7691
7692 static_assert(std::numeric_limits<unsigned char>::digits >= 8,
7693 "Need at least 8 bit unsigned char");
7694
7695 bool TargetIsLittleEndian;
7696
7697public:
7698 BitCastBuffer(CharUnits Width, bool TargetIsLittleEndian)
7699 : Bytes(Width.getQuantity()),
7700 TargetIsLittleEndian(TargetIsLittleEndian) {}
7701
7702 [[nodiscard]] bool readObject(CharUnits Offset, CharUnits Width,
7703 SmallVectorImpl<unsigned char> &Output) const {
7704 for (CharUnits I = Offset, E = Offset + Width; I != E; ++I) {
7705 // If a byte of an integer is uninitialized, then the whole integer is
7706 // uninitialized.
7707 if (!Bytes[I.getQuantity()])
7708 return false;
7709 Output.push_back(Elt: *Bytes[I.getQuantity()]);
7710 }
7711 if (llvm::sys::IsLittleEndianHost != TargetIsLittleEndian)
7712 std::reverse(first: Output.begin(), last: Output.end());
7713 return true;
7714 }
7715
7716 void writeObject(CharUnits Offset, SmallVectorImpl<unsigned char> &Input) {
7717 if (llvm::sys::IsLittleEndianHost != TargetIsLittleEndian)
7718 std::reverse(first: Input.begin(), last: Input.end());
7719
7720 size_t Index = 0;
7721 for (unsigned char Byte : Input) {
7722 assert(!Bytes[Offset.getQuantity() + Index] && "overwriting a byte?");
7723 Bytes[Offset.getQuantity() + Index] = Byte;
7724 ++Index;
7725 }
7726 }
7727
7728 size_t size() { return Bytes.size(); }
7729};
7730
7731/// Traverse an APValue to produce an BitCastBuffer, emulating how the current
7732/// target would represent the value at runtime.
7733class APValueToBufferConverter {
7734 EvalInfo &Info;
7735 BitCastBuffer Buffer;
7736 const CastExpr *BCE;
7737
7738 APValueToBufferConverter(EvalInfo &Info, CharUnits ObjectWidth,
7739 const CastExpr *BCE)
7740 : Info(Info),
7741 Buffer(ObjectWidth, Info.Ctx.getTargetInfo().isLittleEndian()),
7742 BCE(BCE) {}
7743
7744 bool visit(const APValue &Val, QualType Ty) {
7745 return visit(Val, Ty, Offset: CharUnits::fromQuantity(Quantity: 0));
7746 }
7747
7748 // Write out Val with type Ty into Buffer starting at Offset.
7749 bool visit(const APValue &Val, QualType Ty, CharUnits Offset) {
7750 assert((size_t)Offset.getQuantity() <= Buffer.size());
7751
7752 // As a special case, nullptr_t has an indeterminate value.
7753 if (Ty->isNullPtrType())
7754 return true;
7755
7756 // Dig through Src to find the byte at SrcOffset.
7757 switch (Val.getKind()) {
7758 case APValue::Indeterminate:
7759 case APValue::None:
7760 return true;
7761
7762 case APValue::Int:
7763 return visitInt(Val: Val.getInt(), Ty, Offset);
7764 case APValue::Float:
7765 return visitFloat(Val: Val.getFloat(), Ty, Offset);
7766 case APValue::Array:
7767 return visitArray(Val, Ty, Offset);
7768 case APValue::Struct:
7769 return visitRecord(Val, Ty, Offset);
7770 case APValue::Vector:
7771 return visitVector(Val, Ty, Offset);
7772
7773 case APValue::ComplexInt:
7774 case APValue::ComplexFloat:
7775 return visitComplex(Val, Ty, Offset);
7776 case APValue::FixedPoint:
7777 // FIXME: We should support these.
7778
7779 case APValue::LValue:
7780 case APValue::Matrix:
7781 case APValue::Union:
7782 case APValue::MemberPointer:
7783 case APValue::AddrLabelDiff: {
7784 Info.FFDiag(Loc: BCE->getBeginLoc(),
7785 DiagId: diag::note_constexpr_bit_cast_unsupported_type)
7786 << Ty;
7787 return false;
7788 }
7789 }
7790 llvm_unreachable("Unhandled APValue::ValueKind");
7791 }
7792
7793 bool visitRecord(const APValue &Val, QualType Ty, CharUnits Offset) {
7794 const RecordDecl *RD = Ty->getAsRecordDecl();
7795 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD);
7796
7797 // Visit the base classes.
7798 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
7799 for (size_t I = 0, E = CXXRD->getNumBases(); I != E; ++I) {
7800 const CXXBaseSpecifier &BS = CXXRD->bases_begin()[I];
7801 CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
7802 const APValue &Base = Val.getStructBase(i: I);
7803
7804 // Can happen in error cases.
7805 if (!Base.isStruct())
7806 return false;
7807
7808 if (!visitRecord(Val: Base, Ty: BS.getType(),
7809 Offset: Layout.getBaseClassOffset(Base: BaseDecl) + Offset))
7810 return false;
7811 }
7812 }
7813
7814 // Visit the fields.
7815 unsigned FieldIdx = 0;
7816 for (FieldDecl *FD : RD->fields()) {
7817 if (FD->isBitField()) {
7818 Info.FFDiag(Loc: BCE->getBeginLoc(),
7819 DiagId: diag::note_constexpr_bit_cast_unsupported_bitfield);
7820 return false;
7821 }
7822
7823 uint64_t FieldOffsetBits = Layout.getFieldOffset(FieldNo: FieldIdx);
7824
7825 assert(FieldOffsetBits % Info.Ctx.getCharWidth() == 0 &&
7826 "only bit-fields can have sub-char alignment");
7827 CharUnits FieldOffset =
7828 Info.Ctx.toCharUnitsFromBits(BitSize: FieldOffsetBits) + Offset;
7829 QualType FieldTy = FD->getType();
7830 if (!visit(Val: Val.getStructField(i: FieldIdx), Ty: FieldTy, Offset: FieldOffset))
7831 return false;
7832 ++FieldIdx;
7833 }
7834
7835 return true;
7836 }
7837
7838 bool visitArray(const APValue &Val, QualType Ty, CharUnits Offset) {
7839 const auto *CAT =
7840 dyn_cast_or_null<ConstantArrayType>(Val: Ty->getAsArrayTypeUnsafe());
7841 if (!CAT)
7842 return false;
7843
7844 CharUnits ElemWidth = Info.Ctx.getTypeSizeInChars(T: CAT->getElementType());
7845 unsigned NumInitializedElts = Val.getArrayInitializedElts();
7846 unsigned ArraySize = Val.getArraySize();
7847 // First, initialize the initialized elements.
7848 for (unsigned I = 0; I != NumInitializedElts; ++I) {
7849 const APValue &SubObj = Val.getArrayInitializedElt(I);
7850 if (!visit(Val: SubObj, Ty: CAT->getElementType(), Offset: Offset + I * ElemWidth))
7851 return false;
7852 }
7853
7854 // Next, initialize the rest of the array using the filler.
7855 if (Val.hasArrayFiller()) {
7856 const APValue &Filler = Val.getArrayFiller();
7857 for (unsigned I = NumInitializedElts; I != ArraySize; ++I) {
7858 if (!visit(Val: Filler, Ty: CAT->getElementType(), Offset: Offset + I * ElemWidth))
7859 return false;
7860 }
7861 }
7862
7863 return true;
7864 }
7865
7866 bool visitComplex(const APValue &Val, QualType Ty, CharUnits Offset) {
7867 const ComplexType *ComplexTy = Ty->castAs<ComplexType>();
7868 QualType EltTy = ComplexTy->getElementType();
7869 CharUnits EltSizeChars = Info.Ctx.getTypeSizeInChars(T: EltTy);
7870 bool IsInt = Val.isComplexInt();
7871
7872 if (IsInt) {
7873 if (!visitInt(Val: Val.getComplexIntReal(), Ty: EltTy,
7874 Offset: Offset + (0 * EltSizeChars)))
7875 return false;
7876 if (!visitInt(Val: Val.getComplexIntImag(), Ty: EltTy,
7877 Offset: Offset + (1 * EltSizeChars)))
7878 return false;
7879 } else {
7880 if (!visitFloat(Val: Val.getComplexFloatReal(), Ty: EltTy,
7881 Offset: Offset + (0 * EltSizeChars)))
7882 return false;
7883 if (!visitFloat(Val: Val.getComplexFloatImag(), Ty: EltTy,
7884 Offset: Offset + (1 * EltSizeChars)))
7885 return false;
7886 }
7887
7888 return true;
7889 }
7890
7891 bool visitVector(const APValue &Val, QualType Ty, CharUnits Offset) {
7892 const VectorType *VTy = Ty->castAs<VectorType>();
7893 QualType EltTy = VTy->getElementType();
7894 unsigned NElts = VTy->getNumElements();
7895
7896 if (VTy->isPackedVectorBoolType(ctx: Info.Ctx)) {
7897 // Special handling for OpenCL bool vectors:
7898 // Since these vectors are stored as packed bits, but we can't write
7899 // individual bits to the BitCastBuffer, we'll buffer all of the elements
7900 // together into an appropriately sized APInt and write them all out at
7901 // once. Because we don't accept vectors where NElts * EltSize isn't a
7902 // multiple of the char size, there will be no padding space, so we don't
7903 // have to worry about writing data which should have been left
7904 // uninitialized.
7905 bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
7906
7907 llvm::APInt Res = llvm::APInt::getZero(numBits: NElts);
7908 for (unsigned I = 0; I < NElts; ++I) {
7909 const llvm::APSInt &EltAsInt = Val.getVectorElt(I).getInt();
7910 assert(EltAsInt.isUnsigned() && EltAsInt.getBitWidth() == 1 &&
7911 "bool vector element must be 1-bit unsigned integer!");
7912
7913 Res.insertBits(SubBits: EltAsInt, bitPosition: BigEndian ? (NElts - I - 1) : I);
7914 }
7915
7916 SmallVector<uint8_t, 8> Bytes(NElts / 8);
7917 llvm::StoreIntToMemory(IntVal: Res, Dst: &*Bytes.begin(), StoreBytes: NElts / 8);
7918 Buffer.writeObject(Offset, Input&: Bytes);
7919 } else {
7920 // Iterate over each of the elements and write them out to the buffer at
7921 // the appropriate offset.
7922 CharUnits EltSizeChars = Info.Ctx.getTypeSizeInChars(T: EltTy);
7923 for (unsigned I = 0; I < NElts; ++I) {
7924 if (!visit(Val: Val.getVectorElt(I), Ty: EltTy, Offset: Offset + I * EltSizeChars))
7925 return false;
7926 }
7927 }
7928
7929 return true;
7930 }
7931
7932 bool visitInt(const APSInt &Val, QualType Ty, CharUnits Offset) {
7933 APSInt AdjustedVal = Val;
7934 unsigned Width = AdjustedVal.getBitWidth();
7935 if (Ty->isBooleanType()) {
7936 Width = Info.Ctx.getTypeSize(T: Ty);
7937 AdjustedVal = AdjustedVal.extend(width: Width);
7938 }
7939
7940 SmallVector<uint8_t, 8> Bytes(Width / 8);
7941 llvm::StoreIntToMemory(IntVal: AdjustedVal, Dst: &*Bytes.begin(), StoreBytes: Width / 8);
7942 Buffer.writeObject(Offset, Input&: Bytes);
7943 return true;
7944 }
7945
7946 bool visitFloat(const APFloat &Val, QualType Ty, CharUnits Offset) {
7947 APSInt AsInt(Val.bitcastToAPInt());
7948 return visitInt(Val: AsInt, Ty, Offset);
7949 }
7950
7951public:
7952 static std::optional<BitCastBuffer>
7953 convert(EvalInfo &Info, const APValue &Src, const CastExpr *BCE) {
7954 CharUnits DstSize = Info.Ctx.getTypeSizeInChars(T: BCE->getType());
7955 APValueToBufferConverter Converter(Info, DstSize, BCE);
7956 if (!Converter.visit(Val: Src, Ty: BCE->getSubExpr()->getType()))
7957 return std::nullopt;
7958 return Converter.Buffer;
7959 }
7960};
7961
7962/// Write an BitCastBuffer into an APValue.
7963class BufferToAPValueConverter {
7964 EvalInfo &Info;
7965 const BitCastBuffer &Buffer;
7966 const CastExpr *BCE;
7967
7968 BufferToAPValueConverter(EvalInfo &Info, const BitCastBuffer &Buffer,
7969 const CastExpr *BCE)
7970 : Info(Info), Buffer(Buffer), BCE(BCE) {}
7971
7972 // Emit an unsupported bit_cast type error. Sema refuses to build a bit_cast
7973 // with an invalid type, so anything left is a deficiency on our part (FIXME).
7974 // Ideally this will be unreachable.
7975 std::nullopt_t unsupportedType(QualType Ty) {
7976 Info.FFDiag(Loc: BCE->getBeginLoc(),
7977 DiagId: diag::note_constexpr_bit_cast_unsupported_type)
7978 << Ty;
7979 return std::nullopt;
7980 }
7981
7982 std::nullopt_t unrepresentableValue(QualType Ty, const APSInt &Val) {
7983 Info.FFDiag(Loc: BCE->getBeginLoc(),
7984 DiagId: diag::note_constexpr_bit_cast_unrepresentable_value)
7985 << Ty << toString(I: Val, /*Radix=*/10);
7986 return std::nullopt;
7987 }
7988
7989 std::optional<APValue> visit(const BuiltinType *T, CharUnits Offset,
7990 const EnumType *EnumSugar = nullptr) {
7991 if (T->isNullPtrType()) {
7992 uint64_t NullValue = Info.Ctx.getTargetNullPointerValue(QT: QualType(T, 0));
7993 return APValue((Expr *)nullptr,
7994 /*Offset=*/CharUnits::fromQuantity(Quantity: NullValue),
7995 APValue::NoLValuePath{}, /*IsNullPtr=*/true);
7996 }
7997
7998 CharUnits SizeOf = Info.Ctx.getTypeSizeInChars(T);
7999
8000 // Work around floating point types that contain unused padding bytes. This
8001 // is really just `long double` on x86, which is the only fundamental type
8002 // with padding bytes.
8003 if (T->isRealFloatingType()) {
8004 const llvm::fltSemantics &Semantics =
8005 Info.Ctx.getFloatTypeSemantics(T: QualType(T, 0));
8006 unsigned NumBits = llvm::APFloatBase::getSizeInBits(Sem: Semantics);
8007 assert(NumBits % 8 == 0);
8008 CharUnits NumBytes = CharUnits::fromQuantity(Quantity: NumBits / 8);
8009 if (NumBytes != SizeOf)
8010 SizeOf = NumBytes;
8011 }
8012
8013 SmallVector<uint8_t, 8> Bytes;
8014 if (!Buffer.readObject(Offset, Width: SizeOf, Output&: Bytes)) {
8015 // If this is std::byte or unsigned char, then its okay to store an
8016 // indeterminate value.
8017 bool IsStdByte = EnumSugar && EnumSugar->isStdByteType();
8018 bool IsUChar =
8019 !EnumSugar && (T->isSpecificBuiltinType(K: BuiltinType::UChar) ||
8020 T->isSpecificBuiltinType(K: BuiltinType::Char_U));
8021 if (!IsStdByte && !IsUChar) {
8022 QualType DisplayType(EnumSugar ? (const Type *)EnumSugar : T, 0);
8023 Info.FFDiag(Loc: BCE->getExprLoc(),
8024 DiagId: diag::note_constexpr_bit_cast_indet_dest)
8025 << DisplayType << Info.Ctx.getLangOpts().CharIsSigned;
8026 return std::nullopt;
8027 }
8028
8029 return APValue::IndeterminateValue();
8030 }
8031
8032 APSInt Val(SizeOf.getQuantity() * Info.Ctx.getCharWidth(), true);
8033 llvm::LoadIntFromMemory(IntVal&: Val, Src: &*Bytes.begin(), LoadBytes: Bytes.size());
8034
8035 if (T->isIntegralOrEnumerationType()) {
8036 Val.setIsSigned(T->isSignedIntegerOrEnumerationType());
8037
8038 unsigned IntWidth = Info.Ctx.getIntWidth(T: QualType(T, 0));
8039 if (IntWidth != Val.getBitWidth()) {
8040 APSInt Truncated = Val.trunc(width: IntWidth);
8041 if (Truncated.extend(width: Val.getBitWidth()) != Val)
8042 return unrepresentableValue(Ty: QualType(T, 0), Val);
8043 Val = Truncated;
8044 }
8045
8046 return APValue(Val);
8047 }
8048
8049 if (T->isRealFloatingType()) {
8050 const llvm::fltSemantics &Semantics =
8051 Info.Ctx.getFloatTypeSemantics(T: QualType(T, 0));
8052 return APValue(APFloat(Semantics, Val));
8053 }
8054
8055 return unsupportedType(Ty: QualType(T, 0));
8056 }
8057
8058 std::optional<APValue> visit(const RecordType *RTy, CharUnits Offset) {
8059 const RecordDecl *RD = RTy->getAsRecordDecl();
8060 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD);
8061
8062 unsigned NumBases = 0;
8063 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD))
8064 NumBases = CXXRD->getNumBases();
8065
8066 APValue ResultVal(APValue::UninitStruct(), NumBases, RD->getNumFields());
8067
8068 // Visit the base classes.
8069 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
8070 for (size_t I = 0, E = CXXRD->getNumBases(); I != E; ++I) {
8071 const CXXBaseSpecifier &BS = CXXRD->bases_begin()[I];
8072 CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
8073
8074 std::optional<APValue> SubObj = visitType(
8075 Ty: BS.getType(), Offset: Layout.getBaseClassOffset(Base: BaseDecl) + Offset);
8076 if (!SubObj)
8077 return std::nullopt;
8078 ResultVal.getStructBase(i: I) = *SubObj;
8079 }
8080 }
8081
8082 // Visit the fields.
8083 unsigned FieldIdx = 0;
8084 for (FieldDecl *FD : RD->fields()) {
8085 // FIXME: We don't currently support bit-fields. A lot of the logic for
8086 // this is in CodeGen, so we need to factor it around.
8087 if (FD->isBitField()) {
8088 Info.FFDiag(Loc: BCE->getBeginLoc(),
8089 DiagId: diag::note_constexpr_bit_cast_unsupported_bitfield);
8090 return std::nullopt;
8091 }
8092
8093 uint64_t FieldOffsetBits = Layout.getFieldOffset(FieldNo: FieldIdx);
8094 assert(FieldOffsetBits % Info.Ctx.getCharWidth() == 0);
8095
8096 CharUnits FieldOffset =
8097 CharUnits::fromQuantity(Quantity: FieldOffsetBits / Info.Ctx.getCharWidth()) +
8098 Offset;
8099 QualType FieldTy = FD->getType();
8100 std::optional<APValue> SubObj = visitType(Ty: FieldTy, Offset: FieldOffset);
8101 if (!SubObj)
8102 return std::nullopt;
8103 ResultVal.getStructField(i: FieldIdx) = *SubObj;
8104 ++FieldIdx;
8105 }
8106
8107 return ResultVal;
8108 }
8109
8110 std::optional<APValue> visit(const EnumType *Ty, CharUnits Offset) {
8111 QualType RepresentationType =
8112 Ty->getDecl()->getDefinitionOrSelf()->getIntegerType();
8113 assert(!RepresentationType.isNull() &&
8114 "enum forward decl should be caught by Sema");
8115 const auto *AsBuiltin =
8116 RepresentationType.getCanonicalType()->castAs<BuiltinType>();
8117 // Recurse into the underlying type. Treat std::byte transparently as
8118 // unsigned char.
8119 return visit(T: AsBuiltin, Offset, /*EnumTy=*/EnumSugar: Ty);
8120 }
8121
8122 std::optional<APValue> visit(const ConstantArrayType *Ty, CharUnits Offset) {
8123 size_t Size = Ty->getLimitedSize();
8124 CharUnits ElementWidth = Info.Ctx.getTypeSizeInChars(T: Ty->getElementType());
8125
8126 APValue ArrayValue(APValue::UninitArray(), Size, Size);
8127 for (size_t I = 0; I != Size; ++I) {
8128 std::optional<APValue> ElementValue =
8129 visitType(Ty: Ty->getElementType(), Offset: Offset + I * ElementWidth);
8130 if (!ElementValue)
8131 return std::nullopt;
8132 ArrayValue.getArrayInitializedElt(I) = std::move(*ElementValue);
8133 }
8134
8135 return ArrayValue;
8136 }
8137
8138 std::optional<APValue> visit(const ComplexType *Ty, CharUnits Offset) {
8139 QualType ElementType = Ty->getElementType();
8140 CharUnits ElementWidth = Info.Ctx.getTypeSizeInChars(T: ElementType);
8141 bool IsInt = ElementType->isIntegerType();
8142
8143 std::optional<APValue> Values[2];
8144 for (unsigned I = 0; I != 2; ++I) {
8145 Values[I] = visitType(Ty: Ty->getElementType(), Offset: Offset + I * ElementWidth);
8146 if (!Values[I])
8147 return std::nullopt;
8148 }
8149
8150 if (IsInt)
8151 return APValue(Values[0]->getInt(), Values[1]->getInt());
8152 return APValue(Values[0]->getFloat(), Values[1]->getFloat());
8153 }
8154
8155 std::optional<APValue> visit(const VectorType *VTy, CharUnits Offset) {
8156 QualType EltTy = VTy->getElementType();
8157 unsigned NElts = VTy->getNumElements();
8158 unsigned EltSize =
8159 VTy->isPackedVectorBoolType(ctx: Info.Ctx) ? 1 : Info.Ctx.getTypeSize(T: EltTy);
8160
8161 SmallVector<APValue, 4> Elts;
8162 Elts.reserve(N: NElts);
8163 if (VTy->isPackedVectorBoolType(ctx: Info.Ctx)) {
8164 // Special handling for OpenCL bool vectors:
8165 // Since these vectors are stored as packed bits, but we can't read
8166 // individual bits from the BitCastBuffer, we'll buffer all of the
8167 // elements together into an appropriately sized APInt and write them all
8168 // out at once. Because we don't accept vectors where NElts * EltSize
8169 // isn't a multiple of the char size, there will be no padding space, so
8170 // we don't have to worry about reading any padding data which didn't
8171 // actually need to be accessed.
8172 bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
8173
8174 SmallVector<uint8_t, 8> Bytes;
8175 Bytes.reserve(N: NElts / 8);
8176 if (!Buffer.readObject(Offset, Width: CharUnits::fromQuantity(Quantity: NElts / 8), Output&: Bytes))
8177 return std::nullopt;
8178
8179 APSInt SValInt(NElts, true);
8180 llvm::LoadIntFromMemory(IntVal&: SValInt, Src: &*Bytes.begin(), LoadBytes: Bytes.size());
8181
8182 for (unsigned I = 0; I < NElts; ++I) {
8183 llvm::APInt Elt =
8184 SValInt.extractBits(numBits: 1, bitPosition: (BigEndian ? NElts - I - 1 : I) * EltSize);
8185 Elts.emplace_back(
8186 Args: APSInt(std::move(Elt), !EltTy->isSignedIntegerType()));
8187 }
8188 } else {
8189 // Iterate over each of the elements and read them from the buffer at
8190 // the appropriate offset.
8191 CharUnits EltSizeChars = Info.Ctx.getTypeSizeInChars(T: EltTy);
8192 for (unsigned I = 0; I < NElts; ++I) {
8193 std::optional<APValue> EltValue =
8194 visitType(Ty: EltTy, Offset: Offset + I * EltSizeChars);
8195 if (!EltValue)
8196 return std::nullopt;
8197 Elts.push_back(Elt: std::move(*EltValue));
8198 }
8199 }
8200
8201 return APValue(Elts.data(), Elts.size());
8202 }
8203
8204 std::optional<APValue> visit(const Type *Ty, CharUnits Offset) {
8205 return unsupportedType(Ty: QualType(Ty, 0));
8206 }
8207
8208 std::optional<APValue> visitType(QualType Ty, CharUnits Offset) {
8209 QualType Can = Ty.getCanonicalType();
8210
8211 switch (Can->getTypeClass()) {
8212#define TYPE(Class, Base) \
8213 case Type::Class: \
8214 return visit(cast<Class##Type>(Can.getTypePtr()), Offset);
8215#define ABSTRACT_TYPE(Class, Base)
8216#define NON_CANONICAL_TYPE(Class, Base) \
8217 case Type::Class: \
8218 llvm_unreachable("non-canonical type should be impossible!");
8219#define DEPENDENT_TYPE(Class, Base) \
8220 case Type::Class: \
8221 llvm_unreachable( \
8222 "dependent types aren't supported in the constant evaluator!");
8223#define NON_CANONICAL_UNLESS_DEPENDENT(Class, Base) \
8224 case Type::Class: \
8225 llvm_unreachable("either dependent or not canonical!");
8226#include "clang/AST/TypeNodes.inc"
8227 }
8228 llvm_unreachable("Unhandled Type::TypeClass");
8229 }
8230
8231public:
8232 // Pull out a full value of type DstType.
8233 static std::optional<APValue> convert(EvalInfo &Info, BitCastBuffer &Buffer,
8234 const CastExpr *BCE) {
8235 BufferToAPValueConverter Converter(Info, Buffer, BCE);
8236 return Converter.visitType(Ty: BCE->getType(), Offset: CharUnits::fromQuantity(Quantity: 0));
8237 }
8238};
8239
8240static bool checkBitCastConstexprEligibilityType(SourceLocation Loc,
8241 QualType Ty, EvalInfo *Info,
8242 const ASTContext &Ctx,
8243 bool CheckingDest) {
8244 Ty = Ty.getCanonicalType();
8245
8246 auto diag = [&](int Reason) {
8247 if (Info)
8248 Info->FFDiag(Loc, DiagId: diag::note_constexpr_bit_cast_invalid_type)
8249 << CheckingDest << (Reason == 4) << Reason;
8250 return false;
8251 };
8252 auto note = [&](int Construct, QualType NoteTy, SourceLocation NoteLoc) {
8253 if (Info)
8254 Info->Note(Loc: NoteLoc, DiagId: diag::note_constexpr_bit_cast_invalid_subtype)
8255 << NoteTy << Construct << Ty;
8256 return false;
8257 };
8258
8259 if (Ty->isUnionType())
8260 return diag(0);
8261 if (Ty->isPointerType())
8262 return diag(1);
8263 if (Ty->isMemberPointerType())
8264 return diag(2);
8265 if (Ty.isVolatileQualified())
8266 return diag(3);
8267
8268 if (RecordDecl *Record = Ty->getAsRecordDecl()) {
8269 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: Record)) {
8270 for (CXXBaseSpecifier &BS : CXXRD->bases())
8271 if (!checkBitCastConstexprEligibilityType(Loc, Ty: BS.getType(), Info, Ctx,
8272 CheckingDest))
8273 return note(1, BS.getType(), BS.getBeginLoc());
8274 }
8275 for (FieldDecl *FD : Record->fields()) {
8276 if (FD->getType()->isReferenceType())
8277 return diag(4);
8278 if (!checkBitCastConstexprEligibilityType(Loc, Ty: FD->getType(), Info, Ctx,
8279 CheckingDest))
8280 return note(0, FD->getType(), FD->getBeginLoc());
8281 }
8282 }
8283
8284 if (Ty->isArrayType() &&
8285 !checkBitCastConstexprEligibilityType(Loc, Ty: Ctx.getBaseElementType(QT: Ty),
8286 Info, Ctx, CheckingDest))
8287 return false;
8288
8289 if (const auto *VTy = Ty->getAs<VectorType>()) {
8290 QualType EltTy = VTy->getElementType();
8291 unsigned NElts = VTy->getNumElements();
8292 unsigned EltSize =
8293 VTy->isPackedVectorBoolType(ctx: Ctx) ? 1 : Ctx.getTypeSize(T: EltTy);
8294
8295 if ((NElts * EltSize) % Ctx.getCharWidth() != 0) {
8296 // The vector's size in bits is not a multiple of the target's byte size,
8297 // so its layout is unspecified. For now, we'll simply treat these cases
8298 // as unsupported (this should only be possible with OpenCL bool vectors
8299 // whose element count isn't a multiple of the byte size).
8300 if (Info)
8301 Info->FFDiag(Loc, DiagId: diag::note_constexpr_bit_cast_invalid_vector)
8302 << QualType(VTy, 0) << EltSize << NElts << Ctx.getCharWidth();
8303 return false;
8304 }
8305
8306 if (EltTy->isRealFloatingType() &&
8307 &Ctx.getFloatTypeSemantics(T: EltTy) == &APFloat::x87DoubleExtended()) {
8308 // The layout for x86_fp80 vectors seems to be handled very inconsistently
8309 // by both clang and LLVM, so for now we won't allow bit_casts involving
8310 // it in a constexpr context.
8311 if (Info)
8312 Info->FFDiag(Loc, DiagId: diag::note_constexpr_bit_cast_unsupported_type)
8313 << EltTy;
8314 return false;
8315 }
8316 }
8317
8318 return true;
8319}
8320
8321static bool checkBitCastConstexprEligibility(EvalInfo *Info,
8322 const ASTContext &Ctx,
8323 const CastExpr *BCE) {
8324 bool DestOK = checkBitCastConstexprEligibilityType(
8325 Loc: BCE->getBeginLoc(), Ty: BCE->getType(), Info, Ctx, CheckingDest: true);
8326 bool SourceOK = DestOK && checkBitCastConstexprEligibilityType(
8327 Loc: BCE->getBeginLoc(),
8328 Ty: BCE->getSubExpr()->getType(), Info, Ctx, CheckingDest: false);
8329 return SourceOK;
8330}
8331
8332static bool handleRValueToRValueBitCast(EvalInfo &Info, APValue &DestValue,
8333 const APValue &SourceRValue,
8334 const CastExpr *BCE) {
8335 assert(CHAR_BIT == 8 && Info.Ctx.getTargetInfo().getCharWidth() == 8 &&
8336 "no host or target supports non 8-bit chars");
8337
8338 if (!checkBitCastConstexprEligibility(Info: &Info, Ctx: Info.Ctx, BCE))
8339 return false;
8340
8341 // Read out SourceValue into a char buffer.
8342 std::optional<BitCastBuffer> Buffer =
8343 APValueToBufferConverter::convert(Info, Src: SourceRValue, BCE);
8344 if (!Buffer)
8345 return false;
8346
8347 // Write out the buffer into a new APValue.
8348 std::optional<APValue> MaybeDestValue =
8349 BufferToAPValueConverter::convert(Info, Buffer&: *Buffer, BCE);
8350 if (!MaybeDestValue)
8351 return false;
8352
8353 DestValue = std::move(*MaybeDestValue);
8354 return true;
8355}
8356
8357static bool handleLValueToRValueBitCast(EvalInfo &Info, APValue &DestValue,
8358 APValue &SourceValue,
8359 const CastExpr *BCE) {
8360 assert(CHAR_BIT == 8 && Info.Ctx.getTargetInfo().getCharWidth() == 8 &&
8361 "no host or target supports non 8-bit chars");
8362 assert(SourceValue.isLValue() &&
8363 "LValueToRValueBitcast requires an lvalue operand!");
8364
8365 LValue SourceLValue;
8366 APValue SourceRValue;
8367 SourceLValue.setFrom(Ctx: Info.Ctx, V: SourceValue);
8368 if (!handleLValueToRValueConversion(
8369 Info, Conv: BCE, Type: BCE->getSubExpr()->getType().withConst(), LVal: SourceLValue,
8370 RVal&: SourceRValue, /*WantObjectRepresentation=*/true))
8371 return false;
8372
8373 return handleRValueToRValueBitCast(Info, DestValue, SourceRValue, BCE);
8374}
8375
8376template <class Derived>
8377class ExprEvaluatorBase
8378 : public ConstStmtVisitor<Derived, bool> {
8379private:
8380 Derived &getDerived() { return static_cast<Derived&>(*this); }
8381 bool DerivedSuccess(const APValue &V, const Expr *E) {
8382 return getDerived().Success(V, E);
8383 }
8384 bool DerivedZeroInitialization(const Expr *E) {
8385 return getDerived().ZeroInitialization(E);
8386 }
8387
8388 // Check whether a conditional operator with a non-constant condition is a
8389 // potential constant expression. If neither arm is a potential constant
8390 // expression, then the conditional operator is not either.
8391 template<typename ConditionalOperator>
8392 void CheckPotentialConstantConditional(const ConditionalOperator *E) {
8393 assert(Info.checkingPotentialConstantExpression());
8394
8395 // Speculatively evaluate both arms.
8396 SmallVector<PartialDiagnosticAt, 8> Diag;
8397 {
8398 SpeculativeEvaluationRAII Speculate(Info, &Diag);
8399 StmtVisitorTy::Visit(E->getFalseExpr());
8400 if (Diag.empty())
8401 return;
8402 }
8403
8404 {
8405 SpeculativeEvaluationRAII Speculate(Info, &Diag);
8406 Diag.clear();
8407 StmtVisitorTy::Visit(E->getTrueExpr());
8408 if (Diag.empty())
8409 return;
8410 }
8411
8412 Error(E, diag::note_constexpr_conditional_never_const);
8413 }
8414
8415
8416 template<typename ConditionalOperator>
8417 bool HandleConditionalOperator(const ConditionalOperator *E) {
8418 bool BoolResult;
8419 if (!EvaluateAsBooleanCondition(E->getCond(), BoolResult, Info)) {
8420 if (Info.checkingPotentialConstantExpression() && Info.noteFailure()) {
8421 CheckPotentialConstantConditional(E);
8422 return false;
8423 }
8424 if (Info.noteFailure()) {
8425 StmtVisitorTy::Visit(E->getTrueExpr());
8426 StmtVisitorTy::Visit(E->getFalseExpr());
8427 }
8428 return false;
8429 }
8430
8431 Expr *EvalExpr = BoolResult ? E->getTrueExpr() : E->getFalseExpr();
8432 return StmtVisitorTy::Visit(EvalExpr);
8433 }
8434
8435protected:
8436 EvalInfo &Info;
8437 typedef ConstStmtVisitor<Derived, bool> StmtVisitorTy;
8438 typedef ExprEvaluatorBase ExprEvaluatorBaseTy;
8439
8440 OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) {
8441 return Info.CCEDiag(E, DiagId: D);
8442 }
8443
8444 bool ZeroInitialization(const Expr *E) { return Error(E); }
8445
8446 bool IsConstantEvaluatedBuiltinCall(const CallExpr *E) {
8447 unsigned BuiltinOp = E->getBuiltinCallee();
8448 return BuiltinOp != 0 &&
8449 Info.Ctx.BuiltinInfo.isConstantEvaluated(ID: BuiltinOp);
8450 }
8451
8452public:
8453 ExprEvaluatorBase(EvalInfo &Info) : Info(Info) {}
8454
8455 EvalInfo &getEvalInfo() { return Info; }
8456
8457 /// Report an evaluation error. This should only be called when an error is
8458 /// first discovered. When propagating an error, just return false.
8459 bool Error(const Expr *E, diag::kind D) {
8460 Info.FFDiag(E, DiagId: D) << E->getSourceRange();
8461 return false;
8462 }
8463 bool Error(const Expr *E) {
8464 return Error(E, diag::note_invalid_subexpr_in_const_expr);
8465 }
8466
8467 bool VisitStmt(const Stmt *) {
8468 llvm_unreachable("Expression evaluator should not be called on stmts");
8469 }
8470 bool VisitExpr(const Expr *E) {
8471 return Error(E);
8472 }
8473
8474 bool VisitEmbedExpr(const EmbedExpr *E) {
8475 const auto It = E->begin();
8476 return StmtVisitorTy::Visit(*It);
8477 }
8478
8479 bool VisitPredefinedExpr(const PredefinedExpr *E) {
8480 return StmtVisitorTy::Visit(E->getFunctionName());
8481 }
8482 bool VisitConstantExpr(const ConstantExpr *E) {
8483 if (E->hasAPValueResult())
8484 return DerivedSuccess(V: E->getAPValueResult(), E);
8485
8486 return StmtVisitorTy::Visit(E->getSubExpr());
8487 }
8488
8489 bool VisitParenExpr(const ParenExpr *E)
8490 { return StmtVisitorTy::Visit(E->getSubExpr()); }
8491 bool VisitUnaryExtension(const UnaryOperator *E)
8492 { return StmtVisitorTy::Visit(E->getSubExpr()); }
8493 bool VisitUnaryPlus(const UnaryOperator *E)
8494 { return StmtVisitorTy::Visit(E->getSubExpr()); }
8495 bool VisitChooseExpr(const ChooseExpr *E)
8496 { return StmtVisitorTy::Visit(E->getChosenSubExpr()); }
8497 bool VisitGenericSelectionExpr(const GenericSelectionExpr *E)
8498 { return StmtVisitorTy::Visit(E->getResultExpr()); }
8499 bool VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E)
8500 { return StmtVisitorTy::Visit(E->getReplacement()); }
8501 bool VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E) {
8502 TempVersionRAII RAII(*Info.CurrentCall);
8503 SourceLocExprScopeGuard Guard(E, Info.CurrentCall->CurSourceLocExprScope);
8504 return StmtVisitorTy::Visit(E->getExpr());
8505 }
8506 bool VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E) {
8507 TempVersionRAII RAII(*Info.CurrentCall);
8508 // The initializer may not have been parsed yet, or might be erroneous.
8509 if (!E->getExpr())
8510 return Error(E);
8511 SourceLocExprScopeGuard Guard(E, Info.CurrentCall->CurSourceLocExprScope);
8512 return StmtVisitorTy::Visit(E->getExpr());
8513 }
8514
8515 bool VisitExprWithCleanups(const ExprWithCleanups *E) {
8516 FullExpressionRAII Scope(Info);
8517 return StmtVisitorTy::Visit(E->getSubExpr()) && Scope.destroy();
8518 }
8519
8520 // Temporaries are registered when created, so we don't care about
8521 // CXXBindTemporaryExpr.
8522 bool VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *E) {
8523 return StmtVisitorTy::Visit(E->getSubExpr());
8524 }
8525
8526 bool VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *E) {
8527 CCEDiag(E, D: diag::note_constexpr_invalid_cast)
8528 << diag::ConstexprInvalidCastKind::Reinterpret;
8529 return static_cast<Derived*>(this)->VisitCastExpr(E);
8530 }
8531 bool VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *E) {
8532 if (!Info.Ctx.getLangOpts().CPlusPlus20)
8533 CCEDiag(E, D: diag::note_constexpr_invalid_cast)
8534 << diag::ConstexprInvalidCastKind::Dynamic;
8535 return static_cast<Derived*>(this)->VisitCastExpr(E);
8536 }
8537 bool VisitBuiltinBitCastExpr(const BuiltinBitCastExpr *E) {
8538 return static_cast<Derived*>(this)->VisitCastExpr(E);
8539 }
8540
8541 bool VisitBinaryOperator(const BinaryOperator *E) {
8542 switch (E->getOpcode()) {
8543 default:
8544 return Error(E);
8545
8546 case BO_Comma:
8547 VisitIgnoredValue(E: E->getLHS());
8548 return StmtVisitorTy::Visit(E->getRHS());
8549
8550 case BO_PtrMemD:
8551 case BO_PtrMemI: {
8552 LValue Obj;
8553 if (!HandleMemberPointerAccess(Info, BO: E, LV&: Obj))
8554 return false;
8555 APValue Result;
8556 if (!handleLValueToRValueConversion(Info, Conv: E, Type: E->getType(), LVal: Obj, RVal&: Result))
8557 return false;
8558 return DerivedSuccess(V: Result, E);
8559 }
8560 }
8561 }
8562
8563 bool VisitCXXRewrittenBinaryOperator(const CXXRewrittenBinaryOperator *E) {
8564 return StmtVisitorTy::Visit(E->getSemanticForm());
8565 }
8566
8567 bool VisitBinaryConditionalOperator(const BinaryConditionalOperator *E) {
8568 // Evaluate and cache the common expression. We treat it as a temporary,
8569 // even though it's not quite the same thing.
8570 LValue CommonLV;
8571 if (!Evaluate(Result&: Info.CurrentCall->createTemporary(
8572 Key: E->getOpaqueValue(),
8573 T: getStorageType(Ctx: Info.Ctx, E: E->getOpaqueValue()),
8574 Scope: ScopeKind::FullExpression, LV&: CommonLV),
8575 Info, E: E->getCommon()))
8576 return false;
8577
8578 return HandleConditionalOperator(E);
8579 }
8580
8581 bool VisitConditionalOperator(const ConditionalOperator *E) {
8582 bool IsBcpCall = false;
8583 // If the condition (ignoring parens) is a __builtin_constant_p call,
8584 // the result is a constant expression if it can be folded without
8585 // side-effects. This is an important GNU extension. See GCC PR38377
8586 // for discussion.
8587 if (const CallExpr *CallCE =
8588 dyn_cast<CallExpr>(Val: E->getCond()->IgnoreParenCasts()))
8589 if (CallCE->getBuiltinCallee() == Builtin::BI__builtin_constant_p)
8590 IsBcpCall = true;
8591
8592 // Always assume __builtin_constant_p(...) ? ... : ... is a potential
8593 // constant expression; we can't check whether it's potentially foldable.
8594 // FIXME: We should instead treat __builtin_constant_p as non-constant if
8595 // it would return 'false' in this mode.
8596 if (Info.checkingPotentialConstantExpression() && IsBcpCall)
8597 return false;
8598
8599 FoldConstant Fold(Info, IsBcpCall);
8600 if (!HandleConditionalOperator(E)) {
8601 Fold.keepDiagnostics();
8602 return false;
8603 }
8604
8605 return true;
8606 }
8607
8608 bool VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
8609 if (APValue *Value = Info.CurrentCall->getCurrentTemporary(Key: E);
8610 Value && !Value->isAbsent())
8611 return DerivedSuccess(V: *Value, E);
8612
8613 const Expr *Source = E->getSourceExpr();
8614 if (!Source)
8615 return Error(E);
8616 if (Source == E) {
8617 assert(0 && "OpaqueValueExpr recursively refers to itself");
8618 return Error(E);
8619 }
8620 return StmtVisitorTy::Visit(Source);
8621 }
8622
8623 bool VisitPseudoObjectExpr(const PseudoObjectExpr *E) {
8624 for (const Expr *SemE : E->semantics()) {
8625 if (auto *OVE = dyn_cast<OpaqueValueExpr>(Val: SemE)) {
8626 // FIXME: We can't handle the case where an OpaqueValueExpr is also the
8627 // result expression: there could be two different LValues that would
8628 // refer to the same object in that case, and we can't model that.
8629 if (SemE == E->getResultExpr())
8630 return Error(E);
8631
8632 // Unique OVEs get evaluated if and when we encounter them when
8633 // emitting the rest of the semantic form, rather than eagerly.
8634 if (OVE->isUnique())
8635 continue;
8636
8637 LValue LV;
8638 if (!Evaluate(Result&: Info.CurrentCall->createTemporary(
8639 Key: OVE, T: getStorageType(Ctx: Info.Ctx, E: OVE),
8640 Scope: ScopeKind::FullExpression, LV),
8641 Info, E: OVE->getSourceExpr()))
8642 return false;
8643 } else if (SemE == E->getResultExpr()) {
8644 if (!StmtVisitorTy::Visit(SemE))
8645 return false;
8646 } else {
8647 if (!EvaluateIgnoredValue(Info, E: SemE))
8648 return false;
8649 }
8650 }
8651 return true;
8652 }
8653
8654 bool VisitCallExpr(const CallExpr *E) {
8655 APValue Result;
8656 if (!handleCallExpr(E, Result, ResultSlot: nullptr))
8657 return false;
8658 return DerivedSuccess(V: Result, E);
8659 }
8660
8661 bool handleCallExpr(const CallExpr *E, APValue &Result,
8662 const LValue *ResultSlot) {
8663 CallScopeRAII CallScope(Info);
8664
8665 const Expr *Callee = E->getCallee()->IgnoreParens();
8666 QualType CalleeType = Callee->getType();
8667
8668 const FunctionDecl *FD = nullptr;
8669 LValue *This = nullptr, ObjectArg;
8670 auto Args = ArrayRef(E->getArgs(), E->getNumArgs());
8671 bool HasQualifier = false;
8672
8673 CallRef Call;
8674
8675 // Extract function decl and 'this' pointer from the callee.
8676 if (CalleeType->isSpecificBuiltinType(K: BuiltinType::BoundMember)) {
8677 const CXXMethodDecl *Member = nullptr;
8678 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Val: Callee)) {
8679 // Explicit bound member calls, such as x.f() or p->g();
8680 if (!EvaluateObjectArgument(Info, Object: ME->getBase(), This&: ObjectArg))
8681 return false;
8682 Member = dyn_cast<CXXMethodDecl>(Val: ME->getMemberDecl());
8683 if (!Member)
8684 return Error(Callee);
8685 This = &ObjectArg;
8686 HasQualifier = ME->hasQualifier();
8687 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(Val: Callee)) {
8688 // Indirect bound member calls ('.*' or '->*').
8689 const ValueDecl *D =
8690 HandleMemberPointerAccess(Info, BO: BE, LV&: ObjectArg, IncludeMember: false);
8691 if (!D)
8692 return false;
8693 Member = dyn_cast<CXXMethodDecl>(Val: D);
8694 if (!Member)
8695 return Error(Callee);
8696 This = &ObjectArg;
8697 } else if (const auto *PDE = dyn_cast<CXXPseudoDestructorExpr>(Val: Callee)) {
8698 if (!Info.getLangOpts().CPlusPlus20)
8699 Info.CCEDiag(E: PDE, DiagId: diag::note_constexpr_pseudo_destructor);
8700 return EvaluateObjectArgument(Info, Object: PDE->getBase(), This&: ObjectArg) &&
8701 HandleDestruction(Info, E: PDE, This: ObjectArg, ThisType: PDE->getDestroyedType());
8702 } else
8703 return Error(Callee);
8704 FD = Member;
8705 } else if (CalleeType->isFunctionPointerType()) {
8706 LValue CalleeLV;
8707 if (!EvaluatePointer(E: Callee, Result&: CalleeLV, Info))
8708 return false;
8709
8710 if (!CalleeLV.getLValueOffset().isZero())
8711 return Error(Callee);
8712 if (CalleeLV.isNullPointer()) {
8713 Info.FFDiag(E: Callee, DiagId: diag::note_constexpr_null_callee)
8714 << const_cast<Expr *>(Callee);
8715 return false;
8716 }
8717 FD = dyn_cast_or_null<FunctionDecl>(
8718 Val: CalleeLV.getLValueBase().dyn_cast<const ValueDecl *>());
8719 if (!FD)
8720 return Error(Callee);
8721 // Don't call function pointers which have been cast to some other type.
8722 // Per DR (no number yet), the caller and callee can differ in noexcept.
8723 if (!Info.Ctx.hasSameFunctionTypeIgnoringExceptionSpec(
8724 T: CalleeType->getPointeeType(), U: FD->getType())) {
8725 return Error(E);
8726 }
8727
8728 // For an (overloaded) assignment expression, evaluate the RHS before the
8729 // LHS.
8730 auto *OCE = dyn_cast<CXXOperatorCallExpr>(Val: E);
8731 if (OCE && OCE->isAssignmentOp()) {
8732 assert(Args.size() == 2 && "wrong number of arguments in assignment");
8733 Call = Info.CurrentCall->createCall(Callee: FD);
8734 bool HasThis = false;
8735 if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: FD))
8736 HasThis = MD->isImplicitObjectMemberFunction();
8737 if (!EvaluateArgs(Args: HasThis ? Args.slice(N: 1) : Args, Call, Info, Callee: FD,
8738 /*RightToLeft=*/true, ObjectArg: &ObjectArg))
8739 return false;
8740 }
8741
8742 // Overloaded operator calls to member functions are represented as normal
8743 // calls with '*this' as the first argument.
8744 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: FD);
8745 if (MD &&
8746 (MD->isImplicitObjectMemberFunction() || (OCE && MD->isStatic()))) {
8747 // FIXME: When selecting an implicit conversion for an overloaded
8748 // operator delete, we sometimes try to evaluate calls to conversion
8749 // operators without a 'this' parameter!
8750 if (Args.empty())
8751 return Error(E);
8752
8753 if (!EvaluateObjectArgument(Info, Object: Args[0], This&: ObjectArg))
8754 return false;
8755
8756 // If we are calling a static operator, the 'this' argument needs to be
8757 // ignored after being evaluated.
8758 if (MD->isInstance())
8759 This = &ObjectArg;
8760
8761 // If this is syntactically a simple assignment using a trivial
8762 // assignment operator, start the lifetimes of union members as needed,
8763 // per C++20 [class.union]5.
8764 if (Info.getLangOpts().CPlusPlus20 && OCE &&
8765 OCE->getOperator() == OO_Equal && MD->isTrivial() &&
8766 !MaybeHandleUnionActiveMemberChange(Info, LHSExpr: Args[0], LHS: ObjectArg))
8767 return false;
8768
8769 Args = Args.slice(N: 1);
8770 } else if (MD && MD->isLambdaStaticInvoker()) {
8771 // Map the static invoker for the lambda back to the call operator.
8772 // Conveniently, we don't have to slice out the 'this' argument (as is
8773 // being done for the non-static case), since a static member function
8774 // doesn't have an implicit argument passed in.
8775 const CXXRecordDecl *ClosureClass = MD->getParent();
8776 assert(
8777 ClosureClass->captures().empty() &&
8778 "Number of captures must be zero for conversion to function-ptr");
8779
8780 const CXXMethodDecl *LambdaCallOp =
8781 ClosureClass->getLambdaCallOperator();
8782
8783 // Set 'FD', the function that will be called below, to the call
8784 // operator. If the closure object represents a generic lambda, find
8785 // the corresponding specialization of the call operator.
8786
8787 if (ClosureClass->isGenericLambda()) {
8788 assert(MD->isFunctionTemplateSpecialization() &&
8789 "A generic lambda's static-invoker function must be a "
8790 "template specialization");
8791 const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs();
8792 FunctionTemplateDecl *CallOpTemplate =
8793 LambdaCallOp->getDescribedFunctionTemplate();
8794 void *InsertPos = nullptr;
8795 FunctionDecl *CorrespondingCallOpSpecialization =
8796 CallOpTemplate->findSpecialization(Args: TAL->asArray(), InsertPos);
8797 assert(CorrespondingCallOpSpecialization &&
8798 "We must always have a function call operator specialization "
8799 "that corresponds to our static invoker specialization");
8800 assert(isa<CXXMethodDecl>(CorrespondingCallOpSpecialization));
8801 FD = CorrespondingCallOpSpecialization;
8802 } else
8803 FD = LambdaCallOp;
8804 } else if (FD->isUsableAsGlobalAllocationFunctionInConstantEvaluation()) {
8805 if (FD->getDeclName().isAnyOperatorNew()) {
8806 LValue Ptr;
8807 if (!HandleOperatorNewCall(Info, E, Result&: Ptr))
8808 return false;
8809 Ptr.moveInto(V&: Result);
8810 return CallScope.destroy();
8811 } else {
8812 return HandleOperatorDeleteCall(Info, E) && CallScope.destroy();
8813 }
8814 }
8815 } else
8816 return Error(E);
8817
8818 // Evaluate the arguments now if we've not already done so.
8819 if (!Call) {
8820 Call = Info.CurrentCall->createCall(Callee: FD);
8821 if (!EvaluateArgs(Args, Call, Info, Callee: FD, /*RightToLeft*/ false,
8822 ObjectArg: &ObjectArg))
8823 return false;
8824 }
8825
8826 SmallVector<QualType, 4> CovariantAdjustmentPath;
8827 if (This) {
8828 auto *NamedMember = dyn_cast<CXXMethodDecl>(Val: FD);
8829 if (NamedMember && NamedMember->isVirtual() && !HasQualifier) {
8830 // Perform virtual dispatch, if necessary.
8831 FD = HandleVirtualDispatch(Info, E, This&: *This, Found: NamedMember,
8832 CovariantAdjustmentPath);
8833 if (!FD)
8834 return false;
8835 } else if (NamedMember && NamedMember->isImplicitObjectMemberFunction()) {
8836 // Check that the 'this' pointer points to an object of the right type.
8837 // FIXME: If this is an assignment operator call, we may need to change
8838 // the active union member before we check this.
8839 if (!checkNonVirtualMemberCallThisPointer(Info, E, This: *This, NamedMember))
8840 return false;
8841 }
8842 }
8843
8844 // Destructor calls are different enough that they have their own codepath.
8845 if (auto *DD = dyn_cast<CXXDestructorDecl>(Val: FD)) {
8846 assert(This && "no 'this' pointer for destructor call");
8847 return HandleDestruction(Info, E, This: *This,
8848 ThisType: Info.Ctx.getCanonicalTagType(TD: DD->getParent())) &&
8849 CallScope.destroy();
8850 }
8851
8852 const FunctionDecl *Definition = nullptr;
8853 Stmt *Body = FD->getBody(Definition);
8854 SourceLocation Loc = E->getExprLoc();
8855
8856 // Treat the object argument as `this` when evaluating defaulted
8857 // special menmber functions
8858 if (FD->hasCXXExplicitFunctionObjectParameter())
8859 This = &ObjectArg;
8860
8861 if (!CheckConstexprFunction(Info, CallLoc: Loc, Declaration: FD, Definition, Body) ||
8862 !HandleFunctionCall(CallLoc: Loc, Callee: Definition, ObjectArg: This, E, Args, Call, Body, Info,
8863 Result, ResultSlot))
8864 return false;
8865
8866 if (!CovariantAdjustmentPath.empty() &&
8867 !HandleCovariantReturnAdjustment(Info, E, Result,
8868 Path: CovariantAdjustmentPath))
8869 return false;
8870
8871 return CallScope.destroy();
8872 }
8873
8874 bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
8875 return StmtVisitorTy::Visit(E->getInitializer());
8876 }
8877 bool VisitInitListExpr(const InitListExpr *E) {
8878 if (E->getNumInits() == 0)
8879 return DerivedZeroInitialization(E);
8880 if (E->getNumInits() == 1)
8881 return StmtVisitorTy::Visit(E->getInit(Init: 0));
8882 return Error(E);
8883 }
8884 bool VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
8885 return DerivedZeroInitialization(E);
8886 }
8887 bool VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
8888 return DerivedZeroInitialization(E);
8889 }
8890 bool VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
8891 return DerivedZeroInitialization(E);
8892 }
8893
8894 /// A member expression where the object is a prvalue is itself a prvalue.
8895 bool VisitMemberExpr(const MemberExpr *E) {
8896 assert(!Info.Ctx.getLangOpts().CPlusPlus11 &&
8897 "missing temporary materialization conversion");
8898 assert(!E->isArrow() && "missing call to bound member function?");
8899
8900 APValue Val;
8901 if (!Evaluate(Result&: Val, Info, E: E->getBase()))
8902 return false;
8903
8904 QualType BaseTy = E->getBase()->getType();
8905
8906 const FieldDecl *FD = dyn_cast<FieldDecl>(Val: E->getMemberDecl());
8907 if (!FD) return Error(E);
8908 assert(!FD->getType()->isReferenceType() && "prvalue reference?");
8909 assert(BaseTy->castAsCanonical<RecordType>()->getDecl() ==
8910 FD->getParent()->getCanonicalDecl() &&
8911 "record / field mismatch");
8912
8913 // Note: there is no lvalue base here. But this case should only ever
8914 // happen in C or in C++98, where we cannot be evaluating a constexpr
8915 // constructor, which is the only case the base matters.
8916 CompleteObject Obj(APValue::LValueBase(), &Val, BaseTy);
8917 SubobjectDesignator Designator(BaseTy);
8918 Designator.addDeclUnchecked(D: FD);
8919
8920 APValue Result;
8921 return extractSubobject(Info, E, Obj, Sub: Designator, Result) &&
8922 DerivedSuccess(V: Result, E);
8923 }
8924
8925 bool VisitExtVectorElementExpr(const ExtVectorElementExpr *E) {
8926 APValue Val;
8927 if (!Evaluate(Result&: Val, Info, E: E->getBase()))
8928 return false;
8929
8930 if (Val.isVector()) {
8931 SmallVector<uint32_t, 4> Indices;
8932 E->getEncodedElementAccess(Elts&: Indices);
8933 if (Indices.size() == 1) {
8934 // Return scalar.
8935 return DerivedSuccess(V: Val.getVectorElt(I: Indices[0]), E);
8936 } else {
8937 // Construct new APValue vector.
8938 SmallVector<APValue, 4> Elts;
8939 for (unsigned I = 0; I < Indices.size(); ++I) {
8940 Elts.push_back(Elt: Val.getVectorElt(I: Indices[I]));
8941 }
8942 APValue VecResult(Elts.data(), Indices.size());
8943 return DerivedSuccess(V: VecResult, E);
8944 }
8945 }
8946
8947 return false;
8948 }
8949
8950 bool VisitCastExpr(const CastExpr *E) {
8951 switch (E->getCastKind()) {
8952 default:
8953 break;
8954
8955 case CK_AtomicToNonAtomic: {
8956 APValue AtomicVal;
8957 // This does not need to be done in place even for class/array types:
8958 // atomic-to-non-atomic conversion implies copying the object
8959 // representation.
8960 if (!Evaluate(Result&: AtomicVal, Info, E: E->getSubExpr()))
8961 return false;
8962 return DerivedSuccess(V: AtomicVal, E);
8963 }
8964
8965 case CK_NoOp:
8966 case CK_UserDefinedConversion:
8967 return StmtVisitorTy::Visit(E->getSubExpr());
8968
8969 case CK_HLSLArrayRValue: {
8970 const Expr *SubExpr = E->getSubExpr();
8971 if (!SubExpr->isGLValue()) {
8972 APValue Val;
8973 if (!Evaluate(Result&: Val, Info, E: SubExpr))
8974 return false;
8975 return DerivedSuccess(V: Val, E);
8976 }
8977
8978 LValue LVal;
8979 if (!EvaluateLValue(E: SubExpr, Result&: LVal, Info))
8980 return false;
8981 APValue RVal;
8982 // Note, we use the subexpression's type in order to retain cv-qualifiers.
8983 if (!handleLValueToRValueConversion(Info, Conv: E, Type: SubExpr->getType(), LVal,
8984 RVal))
8985 return false;
8986 return DerivedSuccess(V: RVal, E);
8987 }
8988 case CK_LValueToRValue: {
8989 LValue LVal;
8990 if (!EvaluateLValue(E: E->getSubExpr(), Result&: LVal, Info))
8991 return false;
8992 APValue RVal;
8993 // Note, we use the subexpression's type in order to retain cv-qualifiers.
8994 if (!handleLValueToRValueConversion(Info, Conv: E, Type: E->getSubExpr()->getType(),
8995 LVal, RVal))
8996 return false;
8997 return DerivedSuccess(V: RVal, E);
8998 }
8999 case CK_LValueToRValueBitCast: {
9000 APValue DestValue, SourceValue;
9001 if (!Evaluate(Result&: SourceValue, Info, E: E->getSubExpr()))
9002 return false;
9003 if (!handleLValueToRValueBitCast(Info, DestValue, SourceValue, BCE: E))
9004 return false;
9005 return DerivedSuccess(V: DestValue, E);
9006 }
9007
9008 case CK_AddressSpaceConversion: {
9009 APValue Value;
9010 if (!Evaluate(Result&: Value, Info, E: E->getSubExpr()))
9011 return false;
9012 return DerivedSuccess(V: Value, E);
9013 }
9014 }
9015
9016 return Error(E);
9017 }
9018
9019 bool VisitUnaryPostInc(const UnaryOperator *UO) {
9020 return VisitUnaryPostIncDec(UO);
9021 }
9022 bool VisitUnaryPostDec(const UnaryOperator *UO) {
9023 return VisitUnaryPostIncDec(UO);
9024 }
9025 bool VisitUnaryPostIncDec(const UnaryOperator *UO) {
9026 if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
9027 return Error(UO);
9028
9029 LValue LVal;
9030 if (!EvaluateLValue(E: UO->getSubExpr(), Result&: LVal, Info))
9031 return false;
9032 APValue RVal;
9033 if (!handleIncDec(this->Info, UO, LVal, UO->getSubExpr()->getType(),
9034 UO->isIncrementOp(), &RVal))
9035 return false;
9036 return DerivedSuccess(V: RVal, E: UO);
9037 }
9038
9039 bool VisitStmtExpr(const StmtExpr *E) {
9040 // We will have checked the full-expressions inside the statement expression
9041 // when they were completed, and don't need to check them again now.
9042 llvm::SaveAndRestore NotCheckingForUB(Info.CheckingForUndefinedBehavior,
9043 false);
9044
9045 const CompoundStmt *CS = E->getSubStmt();
9046 if (CS->body_empty())
9047 return true;
9048
9049 BlockScopeRAII Scope(Info);
9050 for (CompoundStmt::const_body_iterator BI = CS->body_begin(),
9051 BE = CS->body_end();
9052 /**/; ++BI) {
9053 if (BI + 1 == BE) {
9054 const Expr *FinalExpr = dyn_cast<Expr>(Val: *BI);
9055 if (!FinalExpr) {
9056 Info.FFDiag(Loc: (*BI)->getBeginLoc(),
9057 DiagId: diag::note_constexpr_stmt_expr_unsupported);
9058 return false;
9059 }
9060 return this->Visit(FinalExpr) && Scope.destroy();
9061 }
9062
9063 APValue ReturnValue;
9064 StmtResult Result = { .Value: ReturnValue, .Slot: nullptr };
9065 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: *BI);
9066 if (ESR != ESR_Succeeded) {
9067 // FIXME: If the statement-expression terminated due to 'return',
9068 // 'break', or 'continue', it would be nice to propagate that to
9069 // the outer statement evaluation rather than bailing out.
9070 if (ESR != ESR_Failed)
9071 Info.FFDiag(Loc: (*BI)->getBeginLoc(),
9072 DiagId: diag::note_constexpr_stmt_expr_unsupported);
9073 return false;
9074 }
9075 }
9076
9077 llvm_unreachable("Return from function from the loop above.");
9078 }
9079
9080 bool VisitPackIndexingExpr(const PackIndexingExpr *E) {
9081 return StmtVisitorTy::Visit(E->getSelectedExpr());
9082 }
9083
9084 /// Visit a value which is evaluated, but whose value is ignored.
9085 void VisitIgnoredValue(const Expr *E) {
9086 EvaluateIgnoredValue(Info, E);
9087 }
9088
9089 /// Potentially visit a MemberExpr's base expression.
9090 void VisitIgnoredBaseExpression(const Expr *E) {
9091 // While MSVC doesn't evaluate the base expression, it does diagnose the
9092 // presence of side-effecting behavior.
9093 if (Info.getLangOpts().MSVCCompat && !E->HasSideEffects(Ctx: Info.Ctx))
9094 return;
9095 VisitIgnoredValue(E);
9096 }
9097};
9098
9099} // namespace
9100
9101//===----------------------------------------------------------------------===//
9102// Common base class for lvalue and temporary evaluation.
9103//===----------------------------------------------------------------------===//
9104namespace {
9105template<class Derived>
9106class LValueExprEvaluatorBase
9107 : public ExprEvaluatorBase<Derived> {
9108protected:
9109 LValue &Result;
9110 bool InvalidBaseOK;
9111 typedef LValueExprEvaluatorBase LValueExprEvaluatorBaseTy;
9112 typedef ExprEvaluatorBase<Derived> ExprEvaluatorBaseTy;
9113
9114 bool Success(APValue::LValueBase B) {
9115 Result.set(B);
9116 return true;
9117 }
9118
9119 bool evaluatePointer(const Expr *E, LValue &Result) {
9120 return EvaluatePointer(E, Result, this->Info, InvalidBaseOK);
9121 }
9122
9123public:
9124 LValueExprEvaluatorBase(EvalInfo &Info, LValue &Result, bool InvalidBaseOK)
9125 : ExprEvaluatorBaseTy(Info), Result(Result),
9126 InvalidBaseOK(InvalidBaseOK) {}
9127
9128 bool Success(const APValue &V, const Expr *E) {
9129 Result.setFrom(Ctx: this->Info.Ctx, V);
9130 return true;
9131 }
9132
9133 bool VisitMemberExpr(const MemberExpr *E) {
9134 // Handle non-static data members.
9135 QualType BaseTy;
9136 bool EvalOK;
9137 if (E->isArrow()) {
9138 EvalOK = evaluatePointer(E: E->getBase(), Result);
9139 BaseTy = E->getBase()->getType()->castAs<PointerType>()->getPointeeType();
9140 } else if (E->getBase()->isPRValue()) {
9141 assert(E->getBase()->getType()->isRecordType());
9142 EvalOK = EvaluateTemporary(E->getBase(), Result, this->Info);
9143 BaseTy = E->getBase()->getType();
9144 } else {
9145 EvalOK = this->Visit(E->getBase());
9146 BaseTy = E->getBase()->getType();
9147 }
9148 if (!EvalOK) {
9149 if (!InvalidBaseOK)
9150 return false;
9151 Result.setInvalid(B: E);
9152 return true;
9153 }
9154
9155 const ValueDecl *MD = E->getMemberDecl();
9156 if (const FieldDecl *FD = dyn_cast<FieldDecl>(Val: E->getMemberDecl())) {
9157 assert(BaseTy->castAsCanonical<RecordType>()->getDecl() ==
9158 FD->getParent()->getCanonicalDecl() &&
9159 "record / field mismatch");
9160 (void)BaseTy;
9161 if (!HandleLValueMember(this->Info, E, Result, FD))
9162 return false;
9163 } else if (const IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(Val: MD)) {
9164 if (!HandleLValueIndirectMember(this->Info, E, Result, IFD))
9165 return false;
9166 } else
9167 return this->Error(E);
9168
9169 if (MD->getType()->isReferenceType()) {
9170 APValue RefValue;
9171 if (!handleLValueToRValueConversion(this->Info, E, MD->getType(), Result,
9172 RefValue))
9173 return false;
9174 return Success(RefValue, E);
9175 }
9176 return true;
9177 }
9178
9179 bool VisitBinaryOperator(const BinaryOperator *E) {
9180 switch (E->getOpcode()) {
9181 default:
9182 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
9183
9184 case BO_PtrMemD:
9185 case BO_PtrMemI:
9186 return HandleMemberPointerAccess(this->Info, E, Result);
9187 }
9188 }
9189
9190 bool VisitCastExpr(const CastExpr *E) {
9191 switch (E->getCastKind()) {
9192 default:
9193 return ExprEvaluatorBaseTy::VisitCastExpr(E);
9194
9195 case CK_DerivedToBase:
9196 case CK_UncheckedDerivedToBase:
9197 if (!this->Visit(E->getSubExpr()))
9198 return false;
9199
9200 // Now figure out the necessary offset to add to the base LV to get from
9201 // the derived class to the base class.
9202 return HandleLValueBasePath(this->Info, E, E->getSubExpr()->getType(),
9203 Result);
9204 }
9205 }
9206};
9207}
9208
9209//===----------------------------------------------------------------------===//
9210// LValue Evaluation
9211//
9212// This is used for evaluating lvalues (in C and C++), xvalues (in C++11),
9213// function designators (in C), decl references to void objects (in C), and
9214// temporaries (if building with -Wno-address-of-temporary).
9215//
9216// LValue evaluation produces values comprising a base expression of one of the
9217// following types:
9218// - Declarations
9219// * VarDecl
9220// * FunctionDecl
9221// - Literals
9222// * CompoundLiteralExpr in C (and in global scope in C++)
9223// * StringLiteral
9224// * PredefinedExpr
9225// * ObjCStringLiteralExpr
9226// * ObjCEncodeExpr
9227// * AddrLabelExpr
9228// * BlockExpr
9229// * CallExpr for a MakeStringConstant builtin
9230// - typeid(T) expressions, as TypeInfoLValues
9231// - Locals and temporaries
9232// * MaterializeTemporaryExpr
9233// * Any Expr, with a CallIndex indicating the function in which the temporary
9234// was evaluated, for cases where the MaterializeTemporaryExpr is missing
9235// from the AST (FIXME).
9236// * A MaterializeTemporaryExpr that has static storage duration, with no
9237// CallIndex, for a lifetime-extended temporary.
9238// * The ConstantExpr that is currently being evaluated during evaluation of an
9239// immediate invocation.
9240// plus an offset in bytes.
9241//===----------------------------------------------------------------------===//
9242namespace {
9243class LValueExprEvaluator
9244 : public LValueExprEvaluatorBase<LValueExprEvaluator> {
9245public:
9246 LValueExprEvaluator(EvalInfo &Info, LValue &Result, bool InvalidBaseOK) :
9247 LValueExprEvaluatorBaseTy(Info, Result, InvalidBaseOK) {}
9248
9249 bool VisitVarDecl(const Expr *E, const VarDecl *VD);
9250 bool VisitUnaryPreIncDec(const UnaryOperator *UO);
9251
9252 bool VisitCallExpr(const CallExpr *E);
9253 bool VisitDeclRefExpr(const DeclRefExpr *E);
9254 bool VisitPredefinedExpr(const PredefinedExpr *E) { return Success(B: E); }
9255 bool VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
9256 bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E);
9257 bool VisitMemberExpr(const MemberExpr *E);
9258 bool VisitStringLiteral(const StringLiteral *E) {
9259 return Success(
9260 B: APValue::LValueBase(E, 0, Info.Ctx.getNextStringLiteralVersion()));
9261 }
9262 bool VisitObjCEncodeExpr(const ObjCEncodeExpr *E) { return Success(B: E); }
9263 bool VisitCXXTypeidExpr(const CXXTypeidExpr *E);
9264 bool VisitCXXUuidofExpr(const CXXUuidofExpr *E);
9265 bool VisitArraySubscriptExpr(const ArraySubscriptExpr *E);
9266 bool VisitExtVectorElementExpr(const ExtVectorElementExpr *E);
9267 bool VisitUnaryDeref(const UnaryOperator *E);
9268 bool VisitUnaryReal(const UnaryOperator *E);
9269 bool VisitUnaryImag(const UnaryOperator *E);
9270 bool VisitUnaryPreInc(const UnaryOperator *UO) {
9271 return VisitUnaryPreIncDec(UO);
9272 }
9273 bool VisitUnaryPreDec(const UnaryOperator *UO) {
9274 return VisitUnaryPreIncDec(UO);
9275 }
9276 bool VisitBinAssign(const BinaryOperator *BO);
9277 bool VisitCompoundAssignOperator(const CompoundAssignOperator *CAO);
9278
9279 bool VisitCastExpr(const CastExpr *E) {
9280 switch (E->getCastKind()) {
9281 default:
9282 return LValueExprEvaluatorBaseTy::VisitCastExpr(E);
9283
9284 case CK_LValueBitCast:
9285 this->CCEDiag(E, D: diag::note_constexpr_invalid_cast)
9286 << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret
9287 << Info.Ctx.getLangOpts().CPlusPlus;
9288 if (!Visit(S: E->getSubExpr()))
9289 return false;
9290 Result.Designator.setInvalid();
9291 return true;
9292
9293 case CK_BaseToDerived:
9294 if (!Visit(S: E->getSubExpr()))
9295 return false;
9296 return HandleBaseToDerivedCast(Info, E, Result);
9297
9298 case CK_Dynamic:
9299 if (!Visit(S: E->getSubExpr()))
9300 return false;
9301 return HandleDynamicCast(Info, E: cast<ExplicitCastExpr>(Val: E), Ptr&: Result);
9302 }
9303 }
9304};
9305} // end anonymous namespace
9306
9307/// Get an lvalue to a field of a lambda's closure type.
9308static bool HandleLambdaCapture(EvalInfo &Info, const Expr *E, LValue &Result,
9309 const CXXMethodDecl *MD, const FieldDecl *FD,
9310 bool LValueToRValueConversion) {
9311 // Static lambda function call operators can't have captures. We already
9312 // diagnosed this, so bail out here.
9313 if (MD->isStatic()) {
9314 assert(Info.CurrentCall->This == nullptr &&
9315 "This should not be set for a static call operator");
9316 return false;
9317 }
9318
9319 // Start with 'Result' referring to the complete closure object...
9320 if (MD->isExplicitObjectMemberFunction()) {
9321 // Self may be passed by reference or by value.
9322 const ParmVarDecl *Self = MD->getParamDecl(i: 0);
9323 if (Self->getType()->isReferenceType()) {
9324 APValue *RefValue = Info.getParamSlot(Call: Info.CurrentCall->Arguments, PVD: Self);
9325 if (!RefValue->allowConstexprUnknown() || RefValue->hasValue())
9326 Result.setFrom(Ctx: Info.Ctx, V: *RefValue);
9327 } else {
9328 const ParmVarDecl *VD = Info.CurrentCall->Arguments.getOrigParam(PVD: Self);
9329 CallStackFrame *Frame =
9330 Info.getCallFrameAndDepth(CallIndex: Info.CurrentCall->Arguments.CallIndex)
9331 .first;
9332 unsigned Version = Info.CurrentCall->Arguments.Version;
9333 Result.set(B: {VD, Frame->Index, Version});
9334 }
9335 } else
9336 Result = *Info.CurrentCall->This;
9337
9338 // ... then update it to refer to the field of the closure object
9339 // that represents the capture.
9340 if (!HandleLValueMember(Info, E, LVal&: Result, FD))
9341 return false;
9342
9343 // And if the field is of reference type (or if we captured '*this' by
9344 // reference), update 'Result' to refer to what
9345 // the field refers to.
9346 if (LValueToRValueConversion) {
9347 APValue RVal;
9348 if (!handleLValueToRValueConversion(Info, Conv: E, Type: FD->getType(), LVal: Result, RVal))
9349 return false;
9350 Result.setFrom(Ctx: Info.Ctx, V: RVal);
9351 }
9352 return true;
9353}
9354
9355/// Evaluate an expression as an lvalue. This can be legitimately called on
9356/// expressions which are not glvalues, in three cases:
9357/// * function designators in C, and
9358/// * "extern void" objects
9359/// * @selector() expressions in Objective-C
9360static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info,
9361 bool InvalidBaseOK) {
9362 assert(!E->isValueDependent());
9363 assert(E->isGLValue() || E->getType()->isFunctionType() ||
9364 E->getType()->isVoidType() || isa<ObjCSelectorExpr>(E->IgnoreParens()));
9365 return LValueExprEvaluator(Info, Result, InvalidBaseOK).Visit(S: E);
9366}
9367
9368bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) {
9369 const ValueDecl *D = E->getDecl();
9370
9371 // If we are within a lambda's call operator, check whether the 'VD' referred
9372 // to within 'E' actually represents a lambda-capture that maps to a
9373 // data-member/field within the closure object, and if so, evaluate to the
9374 // field or what the field refers to.
9375 if (Info.CurrentCall && isLambdaCallOperator(DC: Info.CurrentCall->Callee) &&
9376 E->refersToEnclosingVariableOrCapture()) {
9377 // We don't always have a complete capture-map when checking or inferring if
9378 // the function call operator meets the requirements of a constexpr function
9379 // - but we don't need to evaluate the captures to determine constexprness
9380 // (dcl.constexpr C++17).
9381 if (Info.checkingPotentialConstantExpression())
9382 return false;
9383
9384 if (auto *FD = Info.CurrentCall->LambdaCaptureFields.lookup(Val: D)) {
9385 const auto *MD = cast<CXXMethodDecl>(Val: Info.CurrentCall->Callee);
9386 return HandleLambdaCapture(Info, E, Result, MD, FD,
9387 LValueToRValueConversion: FD->getType()->isReferenceType());
9388 }
9389 }
9390
9391 if (isa<FunctionDecl, MSGuidDecl, TemplateParamObjectDecl,
9392 UnnamedGlobalConstantDecl>(Val: D))
9393 return Success(B: cast<ValueDecl>(Val: D));
9394 if (const VarDecl *VD = dyn_cast<VarDecl>(Val: D))
9395 return VisitVarDecl(E, VD);
9396 if (const BindingDecl *BD = dyn_cast<BindingDecl>(Val: D))
9397 return Visit(S: BD->getBinding());
9398 return Error(E);
9399}
9400
9401bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) {
9402 CallStackFrame *Frame = nullptr;
9403 unsigned Version = 0;
9404 if (VD->hasLocalStorage()) {
9405 // Only if a local variable was declared in the function currently being
9406 // evaluated, do we expect to be able to find its value in the current
9407 // frame. (Otherwise it was likely declared in an enclosing context and
9408 // could either have a valid evaluatable value (for e.g. a constexpr
9409 // variable) or be ill-formed (and trigger an appropriate evaluation
9410 // diagnostic)).
9411 CallStackFrame *CurrFrame = Info.CurrentCall;
9412 if (CurrFrame->Callee && CurrFrame->Callee->Equals(DC: VD->getDeclContext())) {
9413 // Function parameters are stored in some caller's frame. (Usually the
9414 // immediate caller, but for an inherited constructor they may be more
9415 // distant.)
9416 if (auto *PVD = dyn_cast<ParmVarDecl>(Val: VD)) {
9417 if (CurrFrame->Arguments) {
9418 VD = CurrFrame->Arguments.getOrigParam(PVD);
9419 Frame =
9420 Info.getCallFrameAndDepth(CallIndex: CurrFrame->Arguments.CallIndex).first;
9421 Version = CurrFrame->Arguments.Version;
9422 }
9423 } else {
9424 Frame = CurrFrame;
9425 Version = CurrFrame->getCurrentTemporaryVersion(Key: VD);
9426 }
9427 }
9428 }
9429
9430 if (!VD->getType()->isReferenceType()) {
9431 if (Frame) {
9432 Result.set(B: {VD, Frame->Index, Version});
9433 return true;
9434 }
9435 return Success(B: VD);
9436 }
9437
9438 if (!Info.getLangOpts().CPlusPlus11) {
9439 Info.CCEDiag(E, DiagId: diag::note_constexpr_ltor_non_integral, ExtraNotes: 1)
9440 << VD << VD->getType();
9441 Info.Note(Loc: VD->getLocation(), DiagId: diag::note_declared_at);
9442 }
9443
9444 APValue *V;
9445 if (!evaluateVarDeclInit(Info, E, VD, Frame, Version, Result&: V))
9446 return false;
9447
9448 if (!V) {
9449 Result.set(B: VD);
9450 Result.AllowConstexprUnknown = true;
9451 return true;
9452 }
9453
9454 return Success(V: *V, E);
9455}
9456
9457bool LValueExprEvaluator::VisitCallExpr(const CallExpr *E) {
9458 if (!IsConstantEvaluatedBuiltinCall(E))
9459 return ExprEvaluatorBaseTy::VisitCallExpr(E);
9460
9461 switch (E->getBuiltinCallee()) {
9462 default:
9463 return false;
9464 case Builtin::BIas_const:
9465 case Builtin::BIforward:
9466 case Builtin::BIforward_like:
9467 case Builtin::BImove:
9468 case Builtin::BImove_if_noexcept:
9469 if (cast<FunctionDecl>(Val: E->getCalleeDecl())->isConstexpr())
9470 return Visit(S: E->getArg(Arg: 0));
9471 break;
9472 }
9473
9474 return ExprEvaluatorBaseTy::VisitCallExpr(E);
9475}
9476
9477bool LValueExprEvaluator::VisitMaterializeTemporaryExpr(
9478 const MaterializeTemporaryExpr *E) {
9479 // Walk through the expression to find the materialized temporary itself.
9480 SmallVector<const Expr *, 2> CommaLHSs;
9481 SmallVector<SubobjectAdjustment, 2> Adjustments;
9482 const Expr *Inner =
9483 E->getSubExpr()->skipRValueSubobjectAdjustments(CommaLHS&: CommaLHSs, Adjustments);
9484
9485 // If we passed any comma operators, evaluate their LHSs.
9486 for (const Expr *E : CommaLHSs)
9487 if (!EvaluateIgnoredValue(Info, E))
9488 return false;
9489
9490 // A materialized temporary with static storage duration can appear within the
9491 // result of a constant expression evaluation, so we need to preserve its
9492 // value for use outside this evaluation.
9493 APValue *Value;
9494 if (E->getStorageDuration() == SD_Static) {
9495 if (Info.EvalMode == EvaluationMode::ConstantFold)
9496 return false;
9497 // FIXME: What about SD_Thread?
9498 Value = E->getOrCreateValue(MayCreate: true);
9499 *Value = APValue();
9500 Result.set(B: E);
9501 } else {
9502 Value = &Info.CurrentCall->createTemporary(
9503 Key: E, T: Inner->getType(),
9504 Scope: E->getStorageDuration() == SD_FullExpression ? ScopeKind::FullExpression
9505 : ScopeKind::Block,
9506 LV&: Result);
9507 }
9508
9509 QualType Type = Inner->getType();
9510
9511 // Materialize the temporary itself.
9512 if (!EvaluateInPlace(Result&: *Value, Info, This: Result, E: Inner)) {
9513 *Value = APValue();
9514 return false;
9515 }
9516
9517 // Adjust our lvalue to refer to the desired subobject.
9518 for (unsigned I = Adjustments.size(); I != 0; /**/) {
9519 --I;
9520 switch (Adjustments[I].Kind) {
9521 case SubobjectAdjustment::DerivedToBaseAdjustment:
9522 if (!HandleLValueBasePath(Info, E: Adjustments[I].DerivedToBase.BasePath,
9523 Type, Result))
9524 return false;
9525 Type = Adjustments[I].DerivedToBase.BasePath->getType();
9526 break;
9527
9528 case SubobjectAdjustment::FieldAdjustment:
9529 if (!HandleLValueMember(Info, E, LVal&: Result, FD: Adjustments[I].Field))
9530 return false;
9531 Type = Adjustments[I].Field->getType();
9532 break;
9533
9534 case SubobjectAdjustment::MemberPointerAdjustment:
9535 if (!HandleMemberPointerAccess(Info&: this->Info, LVType: Type, LV&: Result,
9536 RHS: Adjustments[I].Ptr.RHS))
9537 return false;
9538 Type = Adjustments[I].Ptr.MPT->getPointeeType();
9539 break;
9540 }
9541 }
9542
9543 return true;
9544}
9545
9546bool
9547LValueExprEvaluator::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
9548 assert((!Info.getLangOpts().CPlusPlus || E->isFileScope()) &&
9549 "lvalue compound literal in c++?");
9550 APValue *Lit;
9551 // If CompountLiteral has static storage, its value can be used outside
9552 // this expression. So evaluate it once and store it in ASTContext.
9553 if (E->hasStaticStorage()) {
9554 Lit = &E->getOrCreateStaticValue(Ctx&: Info.Ctx);
9555 Result.set(B: E);
9556 // Reset any previously evaluated state, otherwise evaluation below might
9557 // fail.
9558 // FIXME: Should we just re-use the previously evaluated value instead?
9559 *Lit = APValue();
9560 } else {
9561 assert(!Info.getLangOpts().CPlusPlus);
9562 Lit = &Info.CurrentCall->createTemporary(Key: E, T: E->getInitializer()->getType(),
9563 Scope: ScopeKind::Block, LV&: Result);
9564 }
9565 // FIXME: Evaluating in place isn't always right. We should figure out how to
9566 // use appropriate evaluation context here, see
9567 // clang/test/AST/static-compound-literals-reeval.cpp for a failure.
9568 if (!EvaluateInPlace(Result&: *Lit, Info, This: Result, E: E->getInitializer())) {
9569 *Lit = APValue();
9570 return false;
9571 }
9572 return true;
9573}
9574
9575bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
9576 TypeInfoLValue TypeInfo;
9577
9578 if (!E->isPotentiallyEvaluated()) {
9579 if (E->isTypeOperand())
9580 TypeInfo = TypeInfoLValue(E->getTypeOperand(Context: Info.Ctx).getTypePtr());
9581 else
9582 TypeInfo = TypeInfoLValue(E->getExprOperand()->getType().getTypePtr());
9583 } else {
9584 if (!Info.Ctx.getLangOpts().CPlusPlus20) {
9585 Info.CCEDiag(E, DiagId: diag::note_constexpr_typeid_polymorphic)
9586 << E->getExprOperand()->getType()
9587 << E->getExprOperand()->getSourceRange();
9588 }
9589
9590 if (!Visit(S: E->getExprOperand()))
9591 return false;
9592
9593 std::optional<DynamicType> DynType =
9594 ComputeDynamicType(Info, E, This&: Result, AK: AK_TypeId);
9595 if (!DynType)
9596 return false;
9597
9598 TypeInfo = TypeInfoLValue(
9599 Info.Ctx.getCanonicalTagType(TD: DynType->Type).getTypePtr());
9600 }
9601
9602 return Success(B: APValue::LValueBase::getTypeInfo(LV: TypeInfo, TypeInfo: E->getType()));
9603}
9604
9605bool LValueExprEvaluator::VisitCXXUuidofExpr(const CXXUuidofExpr *E) {
9606 return Success(B: E->getGuidDecl());
9607}
9608
9609bool LValueExprEvaluator::VisitMemberExpr(const MemberExpr *E) {
9610 // Handle static data members.
9611 if (const VarDecl *VD = dyn_cast<VarDecl>(Val: E->getMemberDecl())) {
9612 VisitIgnoredBaseExpression(E: E->getBase());
9613 return VisitVarDecl(E, VD);
9614 }
9615
9616 // Handle static member functions.
9617 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: E->getMemberDecl())) {
9618 if (MD->isStatic()) {
9619 VisitIgnoredBaseExpression(E: E->getBase());
9620 return Success(B: MD);
9621 }
9622 }
9623
9624 // Handle non-static data members.
9625 return LValueExprEvaluatorBaseTy::VisitMemberExpr(E);
9626}
9627
9628bool LValueExprEvaluator::VisitExtVectorElementExpr(
9629 const ExtVectorElementExpr *E) {
9630 bool Success = true;
9631
9632 APValue Val;
9633 if (!Evaluate(Result&: Val, Info, E: E->getBase())) {
9634 if (!Info.noteFailure())
9635 return false;
9636 Success = false;
9637 }
9638
9639 SmallVector<uint32_t, 4> Indices;
9640 E->getEncodedElementAccess(Elts&: Indices);
9641 // FIXME: support accessing more than one element
9642 if (Indices.size() > 1)
9643 return false;
9644
9645 if (Success) {
9646 Result.setFrom(Ctx: Info.Ctx, V: Val);
9647 QualType BaseType = E->getBase()->getType();
9648 if (E->isArrow())
9649 BaseType = BaseType->getPointeeType();
9650 const auto *VT = BaseType->castAs<VectorType>();
9651 HandleLValueVectorElement(Info, E, LVal&: Result, EltTy: VT->getElementType(),
9652 Size: VT->getNumElements(), Idx: Indices[0]);
9653 }
9654
9655 return Success;
9656}
9657
9658bool LValueExprEvaluator::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
9659 if (E->getBase()->getType()->isSveVLSBuiltinType())
9660 return Error(E);
9661
9662 APSInt Index;
9663 bool Success = true;
9664
9665 if (const auto *VT = E->getBase()->getType()->getAs<VectorType>()) {
9666 APValue Val;
9667 if (!Evaluate(Result&: Val, Info, E: E->getBase())) {
9668 if (!Info.noteFailure())
9669 return false;
9670 Success = false;
9671 }
9672
9673 if (!EvaluateInteger(E: E->getIdx(), Result&: Index, Info)) {
9674 if (!Info.noteFailure())
9675 return false;
9676 Success = false;
9677 }
9678
9679 if (Success) {
9680 Result.setFrom(Ctx: Info.Ctx, V: Val);
9681 HandleLValueVectorElement(Info, E, LVal&: Result, EltTy: VT->getElementType(),
9682 Size: VT->getNumElements(), Idx: Index.getZExtValue());
9683 }
9684
9685 return Success;
9686 }
9687
9688 // C++17's rules require us to evaluate the LHS first, regardless of which
9689 // side is the base.
9690 for (const Expr *SubExpr : {E->getLHS(), E->getRHS()}) {
9691 if (SubExpr == E->getBase() ? !evaluatePointer(E: SubExpr, Result)
9692 : !EvaluateInteger(E: SubExpr, Result&: Index, Info)) {
9693 if (!Info.noteFailure())
9694 return false;
9695 Success = false;
9696 }
9697 }
9698
9699 return Success &&
9700 HandleLValueArrayAdjustment(Info, E, LVal&: Result, EltTy: E->getType(), Adjustment: Index);
9701}
9702
9703bool LValueExprEvaluator::VisitUnaryDeref(const UnaryOperator *E) {
9704 bool Success = evaluatePointer(E: E->getSubExpr(), Result);
9705 // [C++26][expr.unary.op]
9706 // If the operand points to an object or function, the result
9707 // denotes that object or function; otherwise, the behavior is undefined.
9708 // Because &(*(type*)0) is a common pattern, we do not fail the evaluation
9709 // immediately.
9710 if (!Success || !E->getType().getNonReferenceType()->isObjectType())
9711 return Success;
9712 return bool(findCompleteObject(Info, E, AK: AK_Dereference, LVal: Result,
9713 LValType: E->getType())) ||
9714 Info.noteUndefinedBehavior();
9715}
9716
9717bool LValueExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
9718 if (!Visit(S: E->getSubExpr()))
9719 return false;
9720 // __real is a no-op on scalar lvalues.
9721 if (E->getSubExpr()->getType()->isAnyComplexType())
9722 HandleLValueComplexElement(Info, E, LVal&: Result, EltTy: E->getType(), Imag: false);
9723 return true;
9724}
9725
9726bool LValueExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
9727 assert(E->getSubExpr()->getType()->isAnyComplexType() &&
9728 "lvalue __imag__ on scalar?");
9729 if (!Visit(S: E->getSubExpr()))
9730 return false;
9731 HandleLValueComplexElement(Info, E, LVal&: Result, EltTy: E->getType(), Imag: true);
9732 return true;
9733}
9734
9735bool LValueExprEvaluator::VisitUnaryPreIncDec(const UnaryOperator *UO) {
9736 if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
9737 return Error(E: UO);
9738
9739 if (!this->Visit(S: UO->getSubExpr()))
9740 return false;
9741
9742 return handleIncDec(
9743 Info&: this->Info, E: UO, LVal: Result, LValType: UO->getSubExpr()->getType(),
9744 IsIncrement: UO->isIncrementOp(), Old: nullptr);
9745}
9746
9747bool LValueExprEvaluator::VisitCompoundAssignOperator(
9748 const CompoundAssignOperator *CAO) {
9749 if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
9750 return Error(E: CAO);
9751
9752 bool Success = true;
9753
9754 // C++17 onwards require that we evaluate the RHS first.
9755 APValue RHS;
9756 if (!Evaluate(Result&: RHS, Info&: this->Info, E: CAO->getRHS())) {
9757 if (!Info.noteFailure())
9758 return false;
9759 Success = false;
9760 }
9761
9762 // The overall lvalue result is the result of evaluating the LHS.
9763 if (!this->Visit(S: CAO->getLHS()) || !Success)
9764 return false;
9765
9766 return handleCompoundAssignment(
9767 Info&: this->Info, E: CAO,
9768 LVal: Result, LValType: CAO->getLHS()->getType(), PromotedLValType: CAO->getComputationLHSType(),
9769 Opcode: CAO->getOpForCompoundAssignment(Opc: CAO->getOpcode()), RVal: RHS);
9770}
9771
9772bool LValueExprEvaluator::VisitBinAssign(const BinaryOperator *E) {
9773 if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
9774 return Error(E);
9775
9776 bool Success = true;
9777
9778 // C++17 onwards require that we evaluate the RHS first.
9779 APValue NewVal;
9780 if (!Evaluate(Result&: NewVal, Info&: this->Info, E: E->getRHS())) {
9781 if (!Info.noteFailure())
9782 return false;
9783 Success = false;
9784 }
9785
9786 if (!this->Visit(S: E->getLHS()) || !Success)
9787 return false;
9788
9789 if (Info.getLangOpts().CPlusPlus20 &&
9790 !MaybeHandleUnionActiveMemberChange(Info, LHSExpr: E->getLHS(), LHS: Result))
9791 return false;
9792
9793 return handleAssignment(Info&: this->Info, E, LVal: Result, LValType: E->getLHS()->getType(),
9794 Val&: NewVal);
9795}
9796
9797//===----------------------------------------------------------------------===//
9798// Pointer Evaluation
9799//===----------------------------------------------------------------------===//
9800
9801/// Convenience function. LVal's base must be a call to an alloc_size
9802/// function.
9803static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
9804 const LValue &LVal,
9805 llvm::APInt &Result) {
9806 assert(isBaseAnAllocSizeCall(LVal.getLValueBase()) &&
9807 "Can't get the size of a non alloc_size function");
9808 const auto *Base = LVal.getLValueBase().get<const Expr *>();
9809 const CallExpr *CE = tryUnwrapAllocSizeCall(E: Base);
9810 std::optional<llvm::APInt> Size =
9811 CE->evaluateBytesReturnedByAllocSizeCall(Ctx);
9812 if (!Size)
9813 return false;
9814
9815 Result = std::move(*Size);
9816 return true;
9817}
9818
9819/// Attempts to evaluate the given LValueBase as the result of a call to
9820/// a function with the alloc_size attribute. If it was possible to do so, this
9821/// function will return true, make Result's Base point to said function call,
9822/// and mark Result's Base as invalid.
9823static bool evaluateLValueAsAllocSize(EvalInfo &Info, APValue::LValueBase Base,
9824 LValue &Result) {
9825 if (Base.isNull())
9826 return false;
9827
9828 // Because we do no form of static analysis, we only support const variables.
9829 //
9830 // Additionally, we can't support parameters, nor can we support static
9831 // variables (in the latter case, use-before-assign isn't UB; in the former,
9832 // we have no clue what they'll be assigned to).
9833 const auto *VD =
9834 dyn_cast_or_null<VarDecl>(Val: Base.dyn_cast<const ValueDecl *>());
9835 if (!VD || !VD->isLocalVarDecl() || !VD->getType().isConstQualified())
9836 return false;
9837
9838 const Expr *Init = VD->getAnyInitializer();
9839 if (!Init || Init->getType().isNull())
9840 return false;
9841
9842 const Expr *E = Init->IgnoreParens();
9843 if (!tryUnwrapAllocSizeCall(E))
9844 return false;
9845
9846 // Store E instead of E unwrapped so that the type of the LValue's base is
9847 // what the user wanted.
9848 Result.setInvalid(B: E);
9849
9850 QualType Pointee = E->getType()->castAs<PointerType>()->getPointeeType();
9851 Result.addUnsizedArray(Info, E, ElemTy: Pointee);
9852 return true;
9853}
9854
9855namespace {
9856class PointerExprEvaluator
9857 : public ExprEvaluatorBase<PointerExprEvaluator> {
9858 LValue &Result;
9859 bool InvalidBaseOK;
9860
9861 bool Success(const Expr *E) {
9862 Result.set(B: E);
9863 return true;
9864 }
9865
9866 bool evaluateLValue(const Expr *E, LValue &Result) {
9867 return EvaluateLValue(E, Result, Info, InvalidBaseOK);
9868 }
9869
9870 bool evaluatePointer(const Expr *E, LValue &Result) {
9871 return EvaluatePointer(E, Result, Info, InvalidBaseOK);
9872 }
9873
9874 bool visitNonBuiltinCallExpr(const CallExpr *E);
9875public:
9876
9877 PointerExprEvaluator(EvalInfo &info, LValue &Result, bool InvalidBaseOK)
9878 : ExprEvaluatorBaseTy(info), Result(Result),
9879 InvalidBaseOK(InvalidBaseOK) {}
9880
9881 bool Success(const APValue &V, const Expr *E) {
9882 Result.setFrom(Ctx: Info.Ctx, V);
9883 return true;
9884 }
9885 bool ZeroInitialization(const Expr *E) {
9886 Result.setNull(Ctx&: Info.Ctx, PointerTy: E->getType());
9887 return true;
9888 }
9889
9890 bool VisitBinaryOperator(const BinaryOperator *E);
9891 bool VisitCastExpr(const CastExpr* E);
9892 bool VisitUnaryAddrOf(const UnaryOperator *E);
9893 bool VisitObjCStringLiteral(const ObjCStringLiteral *E)
9894 { return Success(E); }
9895 bool VisitObjCBoxedExpr(const ObjCBoxedExpr *E) {
9896 if (E->isExpressibleAsConstantInitializer())
9897 return Success(E);
9898 if (Info.noteFailure())
9899 EvaluateIgnoredValue(Info, E: E->getSubExpr());
9900 return Error(E);
9901 }
9902 bool VisitAddrLabelExpr(const AddrLabelExpr *E)
9903 { return Success(E); }
9904 bool VisitCallExpr(const CallExpr *E);
9905 bool VisitBuiltinCallExpr(const CallExpr *E, unsigned BuiltinOp);
9906 bool VisitBlockExpr(const BlockExpr *E) {
9907 if (!E->getBlockDecl()->hasCaptures())
9908 return Success(E);
9909 return Error(E);
9910 }
9911 bool VisitCXXThisExpr(const CXXThisExpr *E) {
9912 auto DiagnoseInvalidUseOfThis = [&] {
9913 if (Info.getLangOpts().CPlusPlus11)
9914 Info.FFDiag(E, DiagId: diag::note_constexpr_this) << E->isImplicit();
9915 else
9916 Info.FFDiag(E);
9917 };
9918
9919 // Can't look at 'this' when checking a potential constant expression.
9920 if (Info.checkingPotentialConstantExpression())
9921 return false;
9922
9923 bool IsExplicitLambda =
9924 isLambdaCallWithExplicitObjectParameter(DC: Info.CurrentCall->Callee);
9925 if (!IsExplicitLambda) {
9926 if (!Info.CurrentCall->This) {
9927 DiagnoseInvalidUseOfThis();
9928 return false;
9929 }
9930
9931 Result = *Info.CurrentCall->This;
9932 }
9933
9934 if (isLambdaCallOperator(DC: Info.CurrentCall->Callee)) {
9935 // Ensure we actually have captured 'this'. If something was wrong with
9936 // 'this' capture, the error would have been previously reported.
9937 // Otherwise we can be inside of a default initialization of an object
9938 // declared by lambda's body, so no need to return false.
9939 if (!Info.CurrentCall->LambdaThisCaptureField) {
9940 if (IsExplicitLambda && !Info.CurrentCall->This) {
9941 DiagnoseInvalidUseOfThis();
9942 return false;
9943 }
9944
9945 return true;
9946 }
9947
9948 const auto *MD = cast<CXXMethodDecl>(Val: Info.CurrentCall->Callee);
9949 return HandleLambdaCapture(
9950 Info, E, Result, MD, FD: Info.CurrentCall->LambdaThisCaptureField,
9951 LValueToRValueConversion: Info.CurrentCall->LambdaThisCaptureField->getType()->isPointerType());
9952 }
9953 return true;
9954 }
9955
9956 bool VisitCXXNewExpr(const CXXNewExpr *E);
9957
9958 bool VisitSourceLocExpr(const SourceLocExpr *E) {
9959 assert(!E->isIntType() && "SourceLocExpr isn't a pointer type?");
9960 APValue LValResult = E->EvaluateInContext(
9961 Ctx: Info.Ctx, DefaultExpr: Info.CurrentCall->CurSourceLocExprScope.getDefaultExpr());
9962 Result.setFrom(Ctx: Info.Ctx, V: LValResult);
9963 return true;
9964 }
9965
9966 bool VisitEmbedExpr(const EmbedExpr *E) {
9967 llvm::report_fatal_error(reason: "Not yet implemented for ExprConstant.cpp");
9968 return true;
9969 }
9970
9971 bool VisitSYCLUniqueStableNameExpr(const SYCLUniqueStableNameExpr *E) {
9972 std::string ResultStr = E->ComputeName(Context&: Info.Ctx);
9973
9974 QualType CharTy = Info.Ctx.CharTy.withConst();
9975 APInt Size(Info.Ctx.getTypeSize(T: Info.Ctx.getSizeType()),
9976 ResultStr.size() + 1);
9977 QualType ArrayTy = Info.Ctx.getConstantArrayType(
9978 EltTy: CharTy, ArySize: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
9979
9980 StringLiteral *SL =
9981 StringLiteral::Create(Ctx: Info.Ctx, Str: ResultStr, Kind: StringLiteralKind::Ordinary,
9982 /*Pascal*/ false, Ty: ArrayTy, Locs: E->getLocation());
9983
9984 evaluateLValue(E: SL, Result);
9985 Result.addArray(Info, E, CAT: cast<ConstantArrayType>(Val&: ArrayTy));
9986 return true;
9987 }
9988
9989 // FIXME: Missing: @protocol, @selector
9990};
9991} // end anonymous namespace
9992
9993static bool EvaluatePointer(const Expr* E, LValue& Result, EvalInfo &Info,
9994 bool InvalidBaseOK) {
9995 assert(!E->isValueDependent());
9996 assert(E->isPRValue() && E->getType()->hasPointerRepresentation());
9997 return PointerExprEvaluator(Info, Result, InvalidBaseOK).Visit(S: E);
9998}
9999
10000bool PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
10001 if (E->getOpcode() != BO_Add &&
10002 E->getOpcode() != BO_Sub)
10003 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
10004
10005 const Expr *PExp = E->getLHS();
10006 const Expr *IExp = E->getRHS();
10007 if (IExp->getType()->isPointerType())
10008 std::swap(a&: PExp, b&: IExp);
10009
10010 bool EvalPtrOK = evaluatePointer(E: PExp, Result);
10011 if (!EvalPtrOK && !Info.noteFailure())
10012 return false;
10013
10014 llvm::APSInt Offset;
10015 if (!EvaluateInteger(E: IExp, Result&: Offset, Info) || !EvalPtrOK)
10016 return false;
10017
10018 if (E->getOpcode() == BO_Sub)
10019 negateAsSigned(Int&: Offset);
10020
10021 QualType Pointee = PExp->getType()->castAs<PointerType>()->getPointeeType();
10022 return HandleLValueArrayAdjustment(Info, E, LVal&: Result, EltTy: Pointee, Adjustment: Offset);
10023}
10024
10025bool PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
10026 return evaluateLValue(E: E->getSubExpr(), Result);
10027}
10028
10029// Is the provided decl 'std::source_location::current'?
10030static bool IsDeclSourceLocationCurrent(const FunctionDecl *FD) {
10031 if (!FD)
10032 return false;
10033 const IdentifierInfo *FnII = FD->getIdentifier();
10034 if (!FnII || !FnII->isStr(Str: "current"))
10035 return false;
10036
10037 const auto *RD = dyn_cast<RecordDecl>(Val: FD->getParent());
10038 if (!RD)
10039 return false;
10040
10041 const IdentifierInfo *ClassII = RD->getIdentifier();
10042 return RD->isInStdNamespace() && ClassII && ClassII->isStr(Str: "source_location");
10043}
10044
10045bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
10046 const Expr *SubExpr = E->getSubExpr();
10047
10048 switch (E->getCastKind()) {
10049 default:
10050 break;
10051 case CK_BitCast:
10052 case CK_CPointerToObjCPointerCast:
10053 case CK_BlockPointerToObjCPointerCast:
10054 case CK_AnyPointerToBlockPointerCast:
10055 case CK_AddressSpaceConversion:
10056 if (!Visit(S: SubExpr))
10057 return false;
10058 if (E->getType()->isFunctionPointerType() ||
10059 SubExpr->getType()->isFunctionPointerType()) {
10060 // Casting between two function pointer types, or between a function
10061 // pointer and an object pointer, is always a reinterpret_cast.
10062 CCEDiag(E, D: diag::note_constexpr_invalid_cast)
10063 << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret
10064 << Info.Ctx.getLangOpts().CPlusPlus;
10065 Result.Designator.setInvalid();
10066 } else if (!E->getType()->isVoidPointerType()) {
10067 // Bitcasts to cv void* are static_casts, not reinterpret_casts, so are
10068 // permitted in constant expressions in C++11. Bitcasts from cv void* are
10069 // also static_casts, but we disallow them as a resolution to DR1312.
10070 //
10071 // In some circumstances, we permit casting from void* to cv1 T*, when the
10072 // actual pointee object is actually a cv2 T.
10073 bool HasValidResult = !Result.InvalidBase && !Result.Designator.Invalid &&
10074 !Result.IsNullPtr;
10075 bool VoidPtrCastMaybeOK =
10076 Result.IsNullPtr ||
10077 (HasValidResult &&
10078 Info.Ctx.hasSimilarType(T1: Result.Designator.getType(Ctx&: Info.Ctx),
10079 T2: E->getType()->getPointeeType()));
10080 // 1. We'll allow it in std::allocator::allocate, and anything which that
10081 // calls.
10082 // 2. HACK 2022-03-28: Work around an issue with libstdc++'s
10083 // <source_location> header. Fixed in GCC 12 and later (2022-04-??).
10084 // We'll allow it in the body of std::source_location::current. GCC's
10085 // implementation had a parameter of type `void*`, and casts from
10086 // that back to `const __impl*` in its body.
10087 if (VoidPtrCastMaybeOK &&
10088 (Info.getStdAllocatorCaller(FnName: "allocate") ||
10089 IsDeclSourceLocationCurrent(FD: Info.CurrentCall->Callee) ||
10090 Info.getLangOpts().CPlusPlus26)) {
10091 // Permitted.
10092 } else {
10093 if (SubExpr->getType()->isVoidPointerType() &&
10094 Info.getLangOpts().CPlusPlus) {
10095 if (HasValidResult)
10096 CCEDiag(E, D: diag::note_constexpr_invalid_void_star_cast)
10097 << SubExpr->getType() << Info.getLangOpts().CPlusPlus26
10098 << Result.Designator.getType(Ctx&: Info.Ctx).getCanonicalType()
10099 << E->getType()->getPointeeType();
10100 else
10101 CCEDiag(E, D: diag::note_constexpr_invalid_cast)
10102 << diag::ConstexprInvalidCastKind::CastFrom
10103 << SubExpr->getType();
10104 } else
10105 CCEDiag(E, D: diag::note_constexpr_invalid_cast)
10106 << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret
10107 << Info.Ctx.getLangOpts().CPlusPlus;
10108 Result.Designator.setInvalid();
10109 }
10110 }
10111 if (E->getCastKind() == CK_AddressSpaceConversion && Result.IsNullPtr)
10112 ZeroInitialization(E);
10113 return true;
10114
10115 case CK_DerivedToBase:
10116 case CK_UncheckedDerivedToBase:
10117 if (!evaluatePointer(E: E->getSubExpr(), Result))
10118 return false;
10119 if (!Result.Base && Result.Offset.isZero())
10120 return true;
10121
10122 // Now figure out the necessary offset to add to the base LV to get from
10123 // the derived class to the base class.
10124 return HandleLValueBasePath(Info, E, Type: E->getSubExpr()->getType()->
10125 castAs<PointerType>()->getPointeeType(),
10126 Result);
10127
10128 case CK_BaseToDerived:
10129 if (!Visit(S: E->getSubExpr()))
10130 return false;
10131 if (!Result.Base && Result.Offset.isZero())
10132 return true;
10133 return HandleBaseToDerivedCast(Info, E, Result);
10134
10135 case CK_Dynamic:
10136 if (!Visit(S: E->getSubExpr()))
10137 return false;
10138 return HandleDynamicCast(Info, E: cast<ExplicitCastExpr>(Val: E), Ptr&: Result);
10139
10140 case CK_NullToPointer:
10141 VisitIgnoredValue(E: E->getSubExpr());
10142 return ZeroInitialization(E);
10143
10144 case CK_IntegralToPointer: {
10145 CCEDiag(E, D: diag::note_constexpr_invalid_cast)
10146 << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret
10147 << Info.Ctx.getLangOpts().CPlusPlus;
10148
10149 APValue Value;
10150 if (!EvaluateIntegerOrLValue(E: SubExpr, Result&: Value, Info))
10151 break;
10152
10153 if (Value.isInt()) {
10154 unsigned Size = Info.Ctx.getTypeSize(T: E->getType());
10155 uint64_t N = Value.getInt().extOrTrunc(width: Size).getZExtValue();
10156 if (N == Info.Ctx.getTargetNullPointerValue(QT: E->getType())) {
10157 Result.setNull(Ctx&: Info.Ctx, PointerTy: E->getType());
10158 } else {
10159 Result.Base = (Expr *)nullptr;
10160 Result.InvalidBase = false;
10161 Result.Offset = CharUnits::fromQuantity(Quantity: N);
10162 Result.Designator.setInvalid();
10163 Result.IsNullPtr = false;
10164 }
10165 return true;
10166 } else {
10167 // In rare instances, the value isn't an lvalue.
10168 // For example, when the value is the difference between the addresses of
10169 // two labels. We reject that as a constant expression because we can't
10170 // compute a valid offset to convert into a pointer.
10171 if (!Value.isLValue())
10172 return false;
10173
10174 // Cast is of an lvalue, no need to change value.
10175 Result.setFrom(Ctx: Info.Ctx, V: Value);
10176 return true;
10177 }
10178 }
10179
10180 case CK_ArrayToPointerDecay: {
10181 if (SubExpr->isGLValue()) {
10182 if (!evaluateLValue(E: SubExpr, Result))
10183 return false;
10184 } else {
10185 APValue &Value = Info.CurrentCall->createTemporary(
10186 Key: SubExpr, T: SubExpr->getType(), Scope: ScopeKind::FullExpression, LV&: Result);
10187 if (!EvaluateInPlace(Result&: Value, Info, This: Result, E: SubExpr))
10188 return false;
10189 }
10190 // The result is a pointer to the first element of the array.
10191 auto *AT = Info.Ctx.getAsArrayType(T: SubExpr->getType());
10192 if (auto *CAT = dyn_cast<ConstantArrayType>(Val: AT))
10193 Result.addArray(Info, E, CAT);
10194 else
10195 Result.addUnsizedArray(Info, E, ElemTy: AT->getElementType());
10196 return true;
10197 }
10198
10199 case CK_FunctionToPointerDecay:
10200 return evaluateLValue(E: SubExpr, Result);
10201
10202 case CK_LValueToRValue: {
10203 LValue LVal;
10204 if (!evaluateLValue(E: E->getSubExpr(), Result&: LVal))
10205 return false;
10206
10207 APValue RVal;
10208 // Note, we use the subexpression's type in order to retain cv-qualifiers.
10209 if (!handleLValueToRValueConversion(Info, Conv: E, Type: E->getSubExpr()->getType(),
10210 LVal, RVal))
10211 return InvalidBaseOK &&
10212 evaluateLValueAsAllocSize(Info, Base: LVal.Base, Result);
10213 return Success(V: RVal, E);
10214 }
10215 }
10216
10217 return ExprEvaluatorBaseTy::VisitCastExpr(E);
10218}
10219
10220static CharUnits GetAlignOfType(const ASTContext &Ctx, QualType T,
10221 UnaryExprOrTypeTrait ExprKind) {
10222 // C++ [expr.alignof]p3:
10223 // When alignof is applied to a reference type, the result is the
10224 // alignment of the referenced type.
10225 T = T.getNonReferenceType();
10226
10227 if (T.getQualifiers().hasUnaligned())
10228 return CharUnits::One();
10229
10230 const bool AlignOfReturnsPreferred =
10231 Ctx.getLangOpts().getClangABICompat() <= LangOptions::ClangABI::Ver7;
10232
10233 // __alignof is defined to return the preferred alignment.
10234 // Before 8, clang returned the preferred alignment for alignof and _Alignof
10235 // as well.
10236 if (ExprKind == UETT_PreferredAlignOf || AlignOfReturnsPreferred)
10237 return Ctx.toCharUnitsFromBits(BitSize: Ctx.getPreferredTypeAlign(T: T.getTypePtr()));
10238 // alignof and _Alignof are defined to return the ABI alignment.
10239 else if (ExprKind == UETT_AlignOf)
10240 return Ctx.getTypeAlignInChars(T: T.getTypePtr());
10241 else
10242 llvm_unreachable("GetAlignOfType on a non-alignment ExprKind");
10243}
10244
10245CharUnits GetAlignOfExpr(const ASTContext &Ctx, const Expr *E,
10246 UnaryExprOrTypeTrait ExprKind) {
10247 E = E->IgnoreParens();
10248
10249 // The kinds of expressions that we have special-case logic here for
10250 // should be kept up to date with the special checks for those
10251 // expressions in Sema.
10252
10253 // alignof decl is always accepted, even if it doesn't make sense: we default
10254 // to 1 in those cases.
10255 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Val: E))
10256 return Ctx.getDeclAlign(D: DRE->getDecl(),
10257 /*RefAsPointee*/ ForAlignof: true);
10258
10259 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Val: E))
10260 return Ctx.getDeclAlign(D: ME->getMemberDecl(),
10261 /*RefAsPointee*/ ForAlignof: true);
10262
10263 return GetAlignOfType(Ctx, T: E->getType(), ExprKind);
10264}
10265
10266static CharUnits getBaseAlignment(EvalInfo &Info, const LValue &Value) {
10267 if (const auto *VD = Value.Base.dyn_cast<const ValueDecl *>())
10268 return Info.Ctx.getDeclAlign(D: VD);
10269 if (const auto *E = Value.Base.dyn_cast<const Expr *>())
10270 return GetAlignOfExpr(Ctx: Info.Ctx, E, ExprKind: UETT_AlignOf);
10271 return GetAlignOfType(Ctx: Info.Ctx, T: Value.Base.getTypeInfoType(), ExprKind: UETT_AlignOf);
10272}
10273
10274/// Evaluate the value of the alignment argument to __builtin_align_{up,down},
10275/// __builtin_is_aligned and __builtin_assume_aligned.
10276static bool getAlignmentArgument(const Expr *E, QualType ForType,
10277 EvalInfo &Info, APSInt &Alignment) {
10278 if (!EvaluateInteger(E, Result&: Alignment, Info))
10279 return false;
10280 if (Alignment < 0 || !Alignment.isPowerOf2()) {
10281 Info.FFDiag(E, DiagId: diag::note_constexpr_invalid_alignment) << Alignment;
10282 return false;
10283 }
10284 unsigned SrcWidth = Info.Ctx.getIntWidth(T: ForType);
10285 APSInt MaxValue(APInt::getOneBitSet(numBits: SrcWidth, BitNo: SrcWidth - 1));
10286 if (APSInt::compareValues(I1: Alignment, I2: MaxValue) > 0) {
10287 Info.FFDiag(E, DiagId: diag::note_constexpr_alignment_too_big)
10288 << MaxValue << ForType << Alignment;
10289 return false;
10290 }
10291 // Ensure both alignment and source value have the same bit width so that we
10292 // don't assert when computing the resulting value.
10293 APSInt ExtAlignment =
10294 APSInt(Alignment.zextOrTrunc(width: SrcWidth), /*isUnsigned=*/true);
10295 assert(APSInt::compareValues(Alignment, ExtAlignment) == 0 &&
10296 "Alignment should not be changed by ext/trunc");
10297 Alignment = ExtAlignment;
10298 assert(Alignment.getBitWidth() == SrcWidth);
10299 return true;
10300}
10301
10302// To be clear: this happily visits unsupported builtins. Better name welcomed.
10303bool PointerExprEvaluator::visitNonBuiltinCallExpr(const CallExpr *E) {
10304 if (ExprEvaluatorBaseTy::VisitCallExpr(E))
10305 return true;
10306
10307 if (!(InvalidBaseOK && E->getCalleeAllocSizeAttr()))
10308 return false;
10309
10310 Result.setInvalid(B: E);
10311 QualType PointeeTy = E->getType()->castAs<PointerType>()->getPointeeType();
10312 Result.addUnsizedArray(Info, E, ElemTy: PointeeTy);
10313 return true;
10314}
10315
10316bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) {
10317 if (!IsConstantEvaluatedBuiltinCall(E))
10318 return visitNonBuiltinCallExpr(E);
10319 return VisitBuiltinCallExpr(E, BuiltinOp: E->getBuiltinCallee());
10320}
10321
10322// Determine if T is a character type for which we guarantee that
10323// sizeof(T) == 1.
10324static bool isOneByteCharacterType(QualType T) {
10325 return T->isCharType() || T->isChar8Type();
10326}
10327
10328bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
10329 unsigned BuiltinOp) {
10330 if (IsOpaqueConstantCall(E))
10331 return Success(E);
10332
10333 switch (BuiltinOp) {
10334 case Builtin::BIaddressof:
10335 case Builtin::BI__addressof:
10336 case Builtin::BI__builtin_addressof:
10337 return evaluateLValue(E: E->getArg(Arg: 0), Result);
10338 case Builtin::BI__builtin_assume_aligned: {
10339 // We need to be very careful here because: if the pointer does not have the
10340 // asserted alignment, then the behavior is undefined, and undefined
10341 // behavior is non-constant.
10342 if (!evaluatePointer(E: E->getArg(Arg: 0), Result))
10343 return false;
10344
10345 LValue OffsetResult(Result);
10346 APSInt Alignment;
10347 if (!getAlignmentArgument(E: E->getArg(Arg: 1), ForType: E->getArg(Arg: 0)->getType(), Info,
10348 Alignment))
10349 return false;
10350 CharUnits Align = CharUnits::fromQuantity(Quantity: Alignment.getZExtValue());
10351
10352 if (E->getNumArgs() > 2) {
10353 APSInt Offset;
10354 if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: Offset, Info))
10355 return false;
10356
10357 int64_t AdditionalOffset = -Offset.getZExtValue();
10358 OffsetResult.Offset += CharUnits::fromQuantity(Quantity: AdditionalOffset);
10359 }
10360
10361 // If there is a base object, then it must have the correct alignment.
10362 if (OffsetResult.Base) {
10363 CharUnits BaseAlignment = getBaseAlignment(Info, Value: OffsetResult);
10364
10365 if (BaseAlignment < Align) {
10366 Result.Designator.setInvalid();
10367 CCEDiag(E: E->getArg(Arg: 0), D: diag::note_constexpr_baa_insufficient_alignment)
10368 << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
10369 return false;
10370 }
10371 }
10372
10373 // The offset must also have the correct alignment.
10374 if (OffsetResult.Offset.alignTo(Align) != OffsetResult.Offset) {
10375 Result.Designator.setInvalid();
10376
10377 (OffsetResult.Base
10378 ? CCEDiag(E: E->getArg(Arg: 0),
10379 D: diag::note_constexpr_baa_insufficient_alignment)
10380 << 1
10381 : CCEDiag(E: E->getArg(Arg: 0),
10382 D: diag::note_constexpr_baa_value_insufficient_alignment))
10383 << OffsetResult.Offset.getQuantity() << Align.getQuantity();
10384 return false;
10385 }
10386
10387 return true;
10388 }
10389 case Builtin::BI__builtin_align_up:
10390 case Builtin::BI__builtin_align_down: {
10391 if (!evaluatePointer(E: E->getArg(Arg: 0), Result))
10392 return false;
10393 APSInt Alignment;
10394 if (!getAlignmentArgument(E: E->getArg(Arg: 1), ForType: E->getArg(Arg: 0)->getType(), Info,
10395 Alignment))
10396 return false;
10397 CharUnits BaseAlignment = getBaseAlignment(Info, Value: Result);
10398 CharUnits PtrAlign = BaseAlignment.alignmentAtOffset(offset: Result.Offset);
10399 // For align_up/align_down, we can return the same value if the alignment
10400 // is known to be greater or equal to the requested value.
10401 if (PtrAlign.getQuantity() >= Alignment)
10402 return true;
10403
10404 // The alignment could be greater than the minimum at run-time, so we cannot
10405 // infer much about the resulting pointer value. One case is possible:
10406 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
10407 // can infer the correct index if the requested alignment is smaller than
10408 // the base alignment so we can perform the computation on the offset.
10409 if (BaseAlignment.getQuantity() >= Alignment) {
10410 assert(Alignment.getBitWidth() <= 64 &&
10411 "Cannot handle > 64-bit address-space");
10412 uint64_t Alignment64 = Alignment.getZExtValue();
10413 CharUnits NewOffset = CharUnits::fromQuantity(
10414 Quantity: BuiltinOp == Builtin::BI__builtin_align_down
10415 ? llvm::alignDown(Value: Result.Offset.getQuantity(), Align: Alignment64)
10416 : llvm::alignTo(Value: Result.Offset.getQuantity(), Align: Alignment64));
10417 Result.adjustOffset(N: NewOffset - Result.Offset);
10418 // TODO: diagnose out-of-bounds values/only allow for arrays?
10419 return true;
10420 }
10421 // Otherwise, we cannot constant-evaluate the result.
10422 Info.FFDiag(E: E->getArg(Arg: 0), DiagId: diag::note_constexpr_alignment_adjust)
10423 << Alignment;
10424 return false;
10425 }
10426 case Builtin::BI__builtin_operator_new:
10427 return HandleOperatorNewCall(Info, E, Result);
10428 case Builtin::BI__builtin_launder:
10429 return evaluatePointer(E: E->getArg(Arg: 0), Result);
10430 case Builtin::BIstrchr:
10431 case Builtin::BIwcschr:
10432 case Builtin::BImemchr:
10433 case Builtin::BIwmemchr:
10434 if (Info.getLangOpts().CPlusPlus11)
10435 Info.CCEDiag(E, DiagId: diag::note_constexpr_invalid_function)
10436 << /*isConstexpr*/ 0 << /*isConstructor*/ 0
10437 << Info.Ctx.BuiltinInfo.getQuotedName(ID: BuiltinOp);
10438 else
10439 Info.CCEDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
10440 [[fallthrough]];
10441 case Builtin::BI__builtin_strchr:
10442 case Builtin::BI__builtin_wcschr:
10443 case Builtin::BI__builtin_memchr:
10444 case Builtin::BI__builtin_char_memchr:
10445 case Builtin::BI__builtin_wmemchr: {
10446 if (!Visit(S: E->getArg(Arg: 0)))
10447 return false;
10448 APSInt Desired;
10449 if (!EvaluateInteger(E: E->getArg(Arg: 1), Result&: Desired, Info))
10450 return false;
10451 uint64_t MaxLength = uint64_t(-1);
10452 if (BuiltinOp != Builtin::BIstrchr &&
10453 BuiltinOp != Builtin::BIwcschr &&
10454 BuiltinOp != Builtin::BI__builtin_strchr &&
10455 BuiltinOp != Builtin::BI__builtin_wcschr) {
10456 APSInt N;
10457 if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: N, Info))
10458 return false;
10459 MaxLength = N.getZExtValue();
10460 }
10461 // We cannot find the value if there are no candidates to match against.
10462 if (MaxLength == 0u)
10463 return ZeroInitialization(E);
10464 if (!Result.checkNullPointerForFoldAccess(Info, E, AK: AK_Read) ||
10465 Result.Designator.Invalid)
10466 return false;
10467 QualType CharTy = Result.Designator.getType(Ctx&: Info.Ctx);
10468 bool IsRawByte = BuiltinOp == Builtin::BImemchr ||
10469 BuiltinOp == Builtin::BI__builtin_memchr;
10470 assert(IsRawByte ||
10471 Info.Ctx.hasSameUnqualifiedType(
10472 CharTy, E->getArg(0)->getType()->getPointeeType()));
10473 // Pointers to const void may point to objects of incomplete type.
10474 if (IsRawByte && CharTy->isIncompleteType()) {
10475 Info.FFDiag(E, DiagId: diag::note_constexpr_ltor_incomplete_type) << CharTy;
10476 return false;
10477 }
10478 // Give up on byte-oriented matching against multibyte elements.
10479 // FIXME: We can compare the bytes in the correct order.
10480 if (IsRawByte && !isOneByteCharacterType(T: CharTy)) {
10481 Info.FFDiag(E, DiagId: diag::note_constexpr_memchr_unsupported)
10482 << Info.Ctx.BuiltinInfo.getQuotedName(ID: BuiltinOp) << CharTy;
10483 return false;
10484 }
10485 // Figure out what value we're actually looking for (after converting to
10486 // the corresponding unsigned type if necessary).
10487 uint64_t DesiredVal;
10488 bool StopAtNull = false;
10489 switch (BuiltinOp) {
10490 case Builtin::BIstrchr:
10491 case Builtin::BI__builtin_strchr:
10492 // strchr compares directly to the passed integer, and therefore
10493 // always fails if given an int that is not a char.
10494 if (!APSInt::isSameValue(I1: HandleIntToIntCast(Info, E, DestType: CharTy,
10495 SrcType: E->getArg(Arg: 1)->getType(),
10496 Value: Desired),
10497 I2: Desired))
10498 return ZeroInitialization(E);
10499 StopAtNull = true;
10500 [[fallthrough]];
10501 case Builtin::BImemchr:
10502 case Builtin::BI__builtin_memchr:
10503 case Builtin::BI__builtin_char_memchr:
10504 // memchr compares by converting both sides to unsigned char. That's also
10505 // correct for strchr if we get this far (to cope with plain char being
10506 // unsigned in the strchr case).
10507 DesiredVal = Desired.trunc(width: Info.Ctx.getCharWidth()).getZExtValue();
10508 break;
10509
10510 case Builtin::BIwcschr:
10511 case Builtin::BI__builtin_wcschr:
10512 StopAtNull = true;
10513 [[fallthrough]];
10514 case Builtin::BIwmemchr:
10515 case Builtin::BI__builtin_wmemchr:
10516 // wcschr and wmemchr are given a wchar_t to look for. Just use it.
10517 DesiredVal = Desired.getZExtValue();
10518 break;
10519 }
10520
10521 for (; MaxLength; --MaxLength) {
10522 APValue Char;
10523 if (!handleLValueToRValueConversion(Info, Conv: E, Type: CharTy, LVal: Result, RVal&: Char) ||
10524 !Char.isInt())
10525 return false;
10526 if (Char.getInt().getZExtValue() == DesiredVal)
10527 return true;
10528 if (StopAtNull && !Char.getInt())
10529 break;
10530 if (!HandleLValueArrayAdjustment(Info, E, LVal&: Result, EltTy: CharTy, Adjustment: 1))
10531 return false;
10532 }
10533 // Not found: return nullptr.
10534 return ZeroInitialization(E);
10535 }
10536
10537 case Builtin::BImemcpy:
10538 case Builtin::BImemmove:
10539 case Builtin::BIwmemcpy:
10540 case Builtin::BIwmemmove:
10541 if (Info.getLangOpts().CPlusPlus11)
10542 Info.CCEDiag(E, DiagId: diag::note_constexpr_invalid_function)
10543 << /*isConstexpr*/ 0 << /*isConstructor*/ 0
10544 << Info.Ctx.BuiltinInfo.getQuotedName(ID: BuiltinOp);
10545 else
10546 Info.CCEDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
10547 [[fallthrough]];
10548 case Builtin::BI__builtin_memcpy:
10549 case Builtin::BI__builtin_memmove:
10550 case Builtin::BI__builtin_wmemcpy:
10551 case Builtin::BI__builtin_wmemmove: {
10552 bool WChar = BuiltinOp == Builtin::BIwmemcpy ||
10553 BuiltinOp == Builtin::BIwmemmove ||
10554 BuiltinOp == Builtin::BI__builtin_wmemcpy ||
10555 BuiltinOp == Builtin::BI__builtin_wmemmove;
10556 bool Move = BuiltinOp == Builtin::BImemmove ||
10557 BuiltinOp == Builtin::BIwmemmove ||
10558 BuiltinOp == Builtin::BI__builtin_memmove ||
10559 BuiltinOp == Builtin::BI__builtin_wmemmove;
10560
10561 // The result of mem* is the first argument.
10562 if (!Visit(S: E->getArg(Arg: 0)))
10563 return false;
10564 LValue Dest = Result;
10565
10566 LValue Src;
10567 if (!EvaluatePointer(E: E->getArg(Arg: 1), Result&: Src, Info))
10568 return false;
10569
10570 APSInt N;
10571 if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: N, Info))
10572 return false;
10573 assert(!N.isSigned() && "memcpy and friends take an unsigned size");
10574
10575 // If the size is zero, we treat this as always being a valid no-op.
10576 // (Even if one of the src and dest pointers is null.)
10577 if (!N)
10578 return true;
10579
10580 // Otherwise, if either of the operands is null, we can't proceed. Don't
10581 // try to determine the type of the copied objects, because there aren't
10582 // any.
10583 if (!Src.Base || !Dest.Base) {
10584 APValue Val;
10585 (!Src.Base ? Src : Dest).moveInto(V&: Val);
10586 Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_null)
10587 << Move << WChar << !!Src.Base
10588 << Val.getAsString(Ctx: Info.Ctx, Ty: E->getArg(Arg: 0)->getType());
10589 return false;
10590 }
10591 if (Src.Designator.Invalid || Dest.Designator.Invalid)
10592 return false;
10593
10594 // We require that Src and Dest are both pointers to arrays of
10595 // trivially-copyable type. (For the wide version, the designator will be
10596 // invalid if the designated object is not a wchar_t.)
10597 QualType T = Dest.Designator.getType(Ctx&: Info.Ctx);
10598 QualType SrcT = Src.Designator.getType(Ctx&: Info.Ctx);
10599 if (!Info.Ctx.hasSameUnqualifiedType(T1: T, T2: SrcT)) {
10600 // FIXME: Consider using our bit_cast implementation to support this.
10601 Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_type_pun) << Move << SrcT << T;
10602 return false;
10603 }
10604 if (T->isIncompleteType()) {
10605 Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_incomplete_type) << Move << T;
10606 return false;
10607 }
10608 if (!T.isTriviallyCopyableType(Context: Info.Ctx)) {
10609 Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_nontrivial) << Move << T;
10610 return false;
10611 }
10612
10613 // Figure out how many T's we're copying.
10614 uint64_t TSize = Info.Ctx.getTypeSizeInChars(T).getQuantity();
10615 if (TSize == 0)
10616 return false;
10617 if (!WChar) {
10618 uint64_t Remainder;
10619 llvm::APInt OrigN = N;
10620 llvm::APInt::udivrem(LHS: OrigN, RHS: TSize, Quotient&: N, Remainder);
10621 if (Remainder) {
10622 Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_unsupported)
10623 << Move << WChar << 0 << T << toString(I: OrigN, Radix: 10, /*Signed*/false)
10624 << (unsigned)TSize;
10625 return false;
10626 }
10627 }
10628
10629 // Check that the copying will remain within the arrays, just so that we
10630 // can give a more meaningful diagnostic. This implicitly also checks that
10631 // N fits into 64 bits.
10632 uint64_t RemainingSrcSize = Src.Designator.validIndexAdjustments().second;
10633 uint64_t RemainingDestSize = Dest.Designator.validIndexAdjustments().second;
10634 if (N.ugt(RHS: RemainingSrcSize) || N.ugt(RHS: RemainingDestSize)) {
10635 Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_unsupported)
10636 << Move << WChar << (N.ugt(RHS: RemainingSrcSize) ? 1 : 2) << T
10637 << toString(I: N, Radix: 10, /*Signed*/false);
10638 return false;
10639 }
10640 uint64_t NElems = N.getZExtValue();
10641 uint64_t NBytes = NElems * TSize;
10642
10643 // Check for overlap.
10644 int Direction = 1;
10645 if (HasSameBase(A: Src, B: Dest)) {
10646 uint64_t SrcOffset = Src.getLValueOffset().getQuantity();
10647 uint64_t DestOffset = Dest.getLValueOffset().getQuantity();
10648 if (DestOffset >= SrcOffset && DestOffset - SrcOffset < NBytes) {
10649 // Dest is inside the source region.
10650 if (!Move) {
10651 Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_overlap) << WChar;
10652 return false;
10653 }
10654 // For memmove and friends, copy backwards.
10655 if (!HandleLValueArrayAdjustment(Info, E, LVal&: Src, EltTy: T, Adjustment: NElems - 1) ||
10656 !HandleLValueArrayAdjustment(Info, E, LVal&: Dest, EltTy: T, Adjustment: NElems - 1))
10657 return false;
10658 Direction = -1;
10659 } else if (!Move && SrcOffset >= DestOffset &&
10660 SrcOffset - DestOffset < NBytes) {
10661 // Src is inside the destination region for memcpy: invalid.
10662 Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_overlap) << WChar;
10663 return false;
10664 }
10665 }
10666
10667 while (true) {
10668 APValue Val;
10669 // FIXME: Set WantObjectRepresentation to true if we're copying a
10670 // char-like type?
10671 if (!handleLValueToRValueConversion(Info, Conv: E, Type: T, LVal: Src, RVal&: Val) ||
10672 !handleAssignment(Info, E, LVal: Dest, LValType: T, Val))
10673 return false;
10674 // Do not iterate past the last element; if we're copying backwards, that
10675 // might take us off the start of the array.
10676 if (--NElems == 0)
10677 return true;
10678 if (!HandleLValueArrayAdjustment(Info, E, LVal&: Src, EltTy: T, Adjustment: Direction) ||
10679 !HandleLValueArrayAdjustment(Info, E, LVal&: Dest, EltTy: T, Adjustment: Direction))
10680 return false;
10681 }
10682 }
10683
10684 default:
10685 return false;
10686 }
10687}
10688
10689static bool EvaluateArrayNewInitList(EvalInfo &Info, LValue &This,
10690 APValue &Result, const InitListExpr *ILE,
10691 QualType AllocType);
10692static bool EvaluateArrayNewConstructExpr(EvalInfo &Info, LValue &This,
10693 APValue &Result,
10694 const CXXConstructExpr *CCE,
10695 QualType AllocType);
10696
10697bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
10698 if (!Info.getLangOpts().CPlusPlus20)
10699 Info.CCEDiag(E, DiagId: diag::note_constexpr_new);
10700
10701 // We cannot speculatively evaluate a delete expression.
10702 if (Info.SpeculativeEvaluationDepth)
10703 return false;
10704
10705 FunctionDecl *OperatorNew = E->getOperatorNew();
10706 QualType AllocType = E->getAllocatedType();
10707 QualType TargetType = AllocType;
10708
10709 bool IsNothrow = false;
10710 bool IsPlacement = false;
10711
10712 if (E->getNumPlacementArgs() == 1 &&
10713 E->getPlacementArg(I: 0)->getType()->isNothrowT()) {
10714 // The only new-placement list we support is of the form (std::nothrow).
10715 //
10716 // FIXME: There is no restriction on this, but it's not clear that any
10717 // other form makes any sense. We get here for cases such as:
10718 //
10719 // new (std::align_val_t{N}) X(int)
10720 //
10721 // (which should presumably be valid only if N is a multiple of
10722 // alignof(int), and in any case can't be deallocated unless N is
10723 // alignof(X) and X has new-extended alignment).
10724 LValue Nothrow;
10725 if (!EvaluateLValue(E: E->getPlacementArg(I: 0), Result&: Nothrow, Info))
10726 return false;
10727 IsNothrow = true;
10728 } else if (OperatorNew->isReservedGlobalPlacementOperator()) {
10729 if (Info.CurrentCall->isStdFunction() || Info.getLangOpts().CPlusPlus26 ||
10730 (Info.CurrentCall->CanEvalMSConstexpr &&
10731 OperatorNew->hasAttr<MSConstexprAttr>())) {
10732 if (!EvaluatePointer(E: E->getPlacementArg(I: 0), Result, Info))
10733 return false;
10734 if (Result.Designator.Invalid)
10735 return false;
10736 TargetType = E->getPlacementArg(I: 0)->getType();
10737 IsPlacement = true;
10738 } else {
10739 Info.FFDiag(E, DiagId: diag::note_constexpr_new_placement)
10740 << /*C++26 feature*/ 1 << E->getSourceRange();
10741 return false;
10742 }
10743 } else if (E->getNumPlacementArgs()) {
10744 Info.FFDiag(E, DiagId: diag::note_constexpr_new_placement)
10745 << /*Unsupported*/ 0 << E->getSourceRange();
10746 return false;
10747 } else if (!OperatorNew
10748 ->isUsableAsGlobalAllocationFunctionInConstantEvaluation()) {
10749 Info.FFDiag(E, DiagId: diag::note_constexpr_new_non_replaceable)
10750 << isa<CXXMethodDecl>(Val: OperatorNew) << OperatorNew;
10751 return false;
10752 }
10753
10754 const Expr *Init = E->getInitializer();
10755 const InitListExpr *ResizedArrayILE = nullptr;
10756 const CXXConstructExpr *ResizedArrayCCE = nullptr;
10757 bool ValueInit = false;
10758
10759 if (std::optional<const Expr *> ArraySize = E->getArraySize()) {
10760 const Expr *Stripped = *ArraySize;
10761 for (; auto *ICE = dyn_cast<ImplicitCastExpr>(Val: Stripped);
10762 Stripped = ICE->getSubExpr())
10763 if (ICE->getCastKind() != CK_NoOp &&
10764 ICE->getCastKind() != CK_IntegralCast)
10765 break;
10766
10767 llvm::APSInt ArrayBound;
10768 if (!EvaluateInteger(E: Stripped, Result&: ArrayBound, Info))
10769 return false;
10770
10771 // C++ [expr.new]p9:
10772 // The expression is erroneous if:
10773 // -- [...] its value before converting to size_t [or] applying the
10774 // second standard conversion sequence is less than zero
10775 if (ArrayBound.isSigned() && ArrayBound.isNegative()) {
10776 if (IsNothrow)
10777 return ZeroInitialization(E);
10778
10779 Info.FFDiag(E: *ArraySize, DiagId: diag::note_constexpr_new_negative)
10780 << ArrayBound << (*ArraySize)->getSourceRange();
10781 return false;
10782 }
10783
10784 // -- its value is such that the size of the allocated object would
10785 // exceed the implementation-defined limit
10786 if (!Info.CheckArraySize(Loc: ArraySize.value()->getExprLoc(),
10787 BitWidth: ConstantArrayType::getNumAddressingBits(
10788 Context: Info.Ctx, ElementType: AllocType, NumElements: ArrayBound),
10789 ElemCount: ArrayBound.getZExtValue(), /*Diag=*/!IsNothrow)) {
10790 if (IsNothrow)
10791 return ZeroInitialization(E);
10792 return false;
10793 }
10794
10795 // -- the new-initializer is a braced-init-list and the number of
10796 // array elements for which initializers are provided [...]
10797 // exceeds the number of elements to initialize
10798 if (!Init) {
10799 // No initialization is performed.
10800 } else if (isa<CXXScalarValueInitExpr>(Val: Init) ||
10801 isa<ImplicitValueInitExpr>(Val: Init)) {
10802 ValueInit = true;
10803 } else if (auto *CCE = dyn_cast<CXXConstructExpr>(Val: Init)) {
10804 ResizedArrayCCE = CCE;
10805 } else {
10806 auto *CAT = Info.Ctx.getAsConstantArrayType(T: Init->getType());
10807 assert(CAT && "unexpected type for array initializer");
10808
10809 unsigned Bits =
10810 std::max(a: CAT->getSizeBitWidth(), b: ArrayBound.getBitWidth());
10811 llvm::APInt InitBound = CAT->getSize().zext(width: Bits);
10812 llvm::APInt AllocBound = ArrayBound.zext(width: Bits);
10813 if (InitBound.ugt(RHS: AllocBound)) {
10814 if (IsNothrow)
10815 return ZeroInitialization(E);
10816
10817 Info.FFDiag(E: *ArraySize, DiagId: diag::note_constexpr_new_too_small)
10818 << toString(I: AllocBound, Radix: 10, /*Signed=*/false)
10819 << toString(I: InitBound, Radix: 10, /*Signed=*/false)
10820 << (*ArraySize)->getSourceRange();
10821 return false;
10822 }
10823
10824 // If the sizes differ, we must have an initializer list, and we need
10825 // special handling for this case when we initialize.
10826 if (InitBound != AllocBound)
10827 ResizedArrayILE = cast<InitListExpr>(Val: Init);
10828 }
10829
10830 AllocType = Info.Ctx.getConstantArrayType(EltTy: AllocType, ArySize: ArrayBound, SizeExpr: nullptr,
10831 ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10832 } else {
10833 assert(!AllocType->isArrayType() &&
10834 "array allocation with non-array new");
10835 }
10836
10837 APValue *Val;
10838 if (IsPlacement) {
10839 AccessKinds AK = AK_Construct;
10840 struct FindObjectHandler {
10841 EvalInfo &Info;
10842 const Expr *E;
10843 QualType AllocType;
10844 const AccessKinds AccessKind;
10845 APValue *Value;
10846
10847 typedef bool result_type;
10848 bool failed() { return false; }
10849 bool checkConst(QualType QT) {
10850 if (QT.isConstQualified()) {
10851 Info.FFDiag(E, DiagId: diag::note_constexpr_modify_const_type) << QT;
10852 return false;
10853 }
10854 return true;
10855 }
10856 bool found(APValue &Subobj, QualType SubobjType) {
10857 if (!checkConst(QT: SubobjType))
10858 return false;
10859 // FIXME: Reject the cases where [basic.life]p8 would not permit the
10860 // old name of the object to be used to name the new object.
10861 unsigned SubobjectSize = 1;
10862 unsigned AllocSize = 1;
10863 if (auto *CAT = dyn_cast<ConstantArrayType>(Val&: AllocType))
10864 AllocSize = CAT->getZExtSize();
10865 if (auto *CAT = dyn_cast<ConstantArrayType>(Val&: SubobjType))
10866 SubobjectSize = CAT->getZExtSize();
10867 if (SubobjectSize < AllocSize ||
10868 !Info.Ctx.hasSimilarType(T1: Info.Ctx.getBaseElementType(QT: SubobjType),
10869 T2: Info.Ctx.getBaseElementType(QT: AllocType))) {
10870 Info.FFDiag(E, DiagId: diag::note_constexpr_placement_new_wrong_type)
10871 << SubobjType << AllocType;
10872 return false;
10873 }
10874 Value = &Subobj;
10875 return true;
10876 }
10877 bool found(APSInt &Value, QualType SubobjType) {
10878 Info.FFDiag(E, DiagId: diag::note_constexpr_construct_complex_elem);
10879 return false;
10880 }
10881 bool found(APFloat &Value, QualType SubobjType) {
10882 Info.FFDiag(E, DiagId: diag::note_constexpr_construct_complex_elem);
10883 return false;
10884 }
10885 } Handler = {.Info: Info, .E: E, .AllocType: AllocType, .AccessKind: AK, .Value: nullptr};
10886
10887 CompleteObject Obj = findCompleteObject(Info, E, AK, LVal: Result, LValType: AllocType);
10888 if (!Obj || !findSubobject(Info, E, Obj, Sub: Result.Designator, handler&: Handler))
10889 return false;
10890
10891 Val = Handler.Value;
10892
10893 // [basic.life]p1:
10894 // The lifetime of an object o of type T ends when [...] the storage
10895 // which the object occupies is [...] reused by an object that is not
10896 // nested within o (6.6.2).
10897 *Val = APValue();
10898 } else {
10899 // Perform the allocation and obtain a pointer to the resulting object.
10900 Val = Info.createHeapAlloc(E, T: AllocType, LV&: Result);
10901 if (!Val)
10902 return false;
10903 }
10904
10905 if (ValueInit) {
10906 ImplicitValueInitExpr VIE(AllocType);
10907 if (!EvaluateInPlace(Result&: *Val, Info, This: Result, E: &VIE))
10908 return false;
10909 } else if (ResizedArrayILE) {
10910 if (!EvaluateArrayNewInitList(Info, This&: Result, Result&: *Val, ILE: ResizedArrayILE,
10911 AllocType))
10912 return false;
10913 } else if (ResizedArrayCCE) {
10914 if (!EvaluateArrayNewConstructExpr(Info, This&: Result, Result&: *Val, CCE: ResizedArrayCCE,
10915 AllocType))
10916 return false;
10917 } else if (Init) {
10918 if (!EvaluateInPlace(Result&: *Val, Info, This: Result, E: Init))
10919 return false;
10920 } else if (!handleDefaultInitValue(T: AllocType, Result&: *Val)) {
10921 return false;
10922 }
10923
10924 // Array new returns a pointer to the first element, not a pointer to the
10925 // array.
10926 if (auto *AT = AllocType->getAsArrayTypeUnsafe())
10927 Result.addArray(Info, E, CAT: cast<ConstantArrayType>(Val: AT));
10928
10929 return true;
10930}
10931//===----------------------------------------------------------------------===//
10932// Member Pointer Evaluation
10933//===----------------------------------------------------------------------===//
10934
10935namespace {
10936class MemberPointerExprEvaluator
10937 : public ExprEvaluatorBase<MemberPointerExprEvaluator> {
10938 MemberPtr &Result;
10939
10940 bool Success(const ValueDecl *D) {
10941 Result = MemberPtr(D);
10942 return true;
10943 }
10944public:
10945
10946 MemberPointerExprEvaluator(EvalInfo &Info, MemberPtr &Result)
10947 : ExprEvaluatorBaseTy(Info), Result(Result) {}
10948
10949 bool Success(const APValue &V, const Expr *E) {
10950 Result.setFrom(V);
10951 return true;
10952 }
10953 bool ZeroInitialization(const Expr *E) {
10954 return Success(D: (const ValueDecl*)nullptr);
10955 }
10956
10957 bool VisitCastExpr(const CastExpr *E);
10958 bool VisitUnaryAddrOf(const UnaryOperator *E);
10959};
10960} // end anonymous namespace
10961
10962static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
10963 EvalInfo &Info) {
10964 assert(!E->isValueDependent());
10965 assert(E->isPRValue() && E->getType()->isMemberPointerType());
10966 return MemberPointerExprEvaluator(Info, Result).Visit(S: E);
10967}
10968
10969bool MemberPointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
10970 switch (E->getCastKind()) {
10971 default:
10972 return ExprEvaluatorBaseTy::VisitCastExpr(E);
10973
10974 case CK_NullToMemberPointer:
10975 VisitIgnoredValue(E: E->getSubExpr());
10976 return ZeroInitialization(E);
10977
10978 case CK_BaseToDerivedMemberPointer: {
10979 if (!Visit(S: E->getSubExpr()))
10980 return false;
10981 if (E->path_empty())
10982 return true;
10983 // Base-to-derived member pointer casts store the path in derived-to-base
10984 // order, so iterate backwards. The CXXBaseSpecifier also provides us with
10985 // the wrong end of the derived->base arc, so stagger the path by one class.
10986 typedef std::reverse_iterator<CastExpr::path_const_iterator> ReverseIter;
10987 for (ReverseIter PathI(E->path_end() - 1), PathE(E->path_begin());
10988 PathI != PathE; ++PathI) {
10989 assert(!(*PathI)->isVirtual() && "memptr cast through vbase");
10990 const CXXRecordDecl *Derived = (*PathI)->getType()->getAsCXXRecordDecl();
10991 if (!Result.castToDerived(Derived))
10992 return Error(E);
10993 }
10994 if (!Result.castToDerived(Derived: E->getType()
10995 ->castAs<MemberPointerType>()
10996 ->getMostRecentCXXRecordDecl()))
10997 return Error(E);
10998 return true;
10999 }
11000
11001 case CK_DerivedToBaseMemberPointer:
11002 if (!Visit(S: E->getSubExpr()))
11003 return false;
11004 for (CastExpr::path_const_iterator PathI = E->path_begin(),
11005 PathE = E->path_end(); PathI != PathE; ++PathI) {
11006 assert(!(*PathI)->isVirtual() && "memptr cast through vbase");
11007 const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl();
11008 if (!Result.castToBase(Base))
11009 return Error(E);
11010 }
11011 return true;
11012 }
11013}
11014
11015bool MemberPointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
11016 // C++11 [expr.unary.op]p3 has very strict rules on how the address of a
11017 // member can be formed.
11018 return Success(D: cast<DeclRefExpr>(Val: E->getSubExpr())->getDecl());
11019}
11020
11021//===----------------------------------------------------------------------===//
11022// Record Evaluation
11023//===----------------------------------------------------------------------===//
11024
11025namespace {
11026 class RecordExprEvaluator
11027 : public ExprEvaluatorBase<RecordExprEvaluator> {
11028 const LValue &This;
11029 APValue &Result;
11030 public:
11031
11032 RecordExprEvaluator(EvalInfo &info, const LValue &This, APValue &Result)
11033 : ExprEvaluatorBaseTy(info), This(This), Result(Result) {}
11034
11035 bool Success(const APValue &V, const Expr *E) {
11036 Result = V;
11037 return true;
11038 }
11039 bool ZeroInitialization(const Expr *E) {
11040 return ZeroInitialization(E, T: E->getType());
11041 }
11042 bool ZeroInitialization(const Expr *E, QualType T);
11043
11044 bool VisitCallExpr(const CallExpr *E) {
11045 return handleCallExpr(E, Result, ResultSlot: &This);
11046 }
11047 bool VisitCastExpr(const CastExpr *E);
11048 bool VisitInitListExpr(const InitListExpr *E);
11049 bool VisitCXXConstructExpr(const CXXConstructExpr *E) {
11050 return VisitCXXConstructExpr(E, T: E->getType());
11051 }
11052 bool VisitLambdaExpr(const LambdaExpr *E);
11053 bool VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
11054 bool VisitCXXConstructExpr(const CXXConstructExpr *E, QualType T);
11055 bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E);
11056 bool VisitBinCmp(const BinaryOperator *E);
11057 bool VisitCXXParenListInitExpr(const CXXParenListInitExpr *E);
11058 bool VisitCXXParenListOrInitListExpr(const Expr *ExprToVisit,
11059 ArrayRef<Expr *> Args);
11060 };
11061}
11062
11063/// Perform zero-initialization on an object of non-union class type.
11064/// C++11 [dcl.init]p5:
11065/// To zero-initialize an object or reference of type T means:
11066/// [...]
11067/// -- if T is a (possibly cv-qualified) non-union class type,
11068/// each non-static data member and each base-class subobject is
11069/// zero-initialized
11070static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E,
11071 const RecordDecl *RD,
11072 const LValue &This, APValue &Result) {
11073 assert(!RD->isUnion() && "Expected non-union class type");
11074 const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(Val: RD);
11075 Result = APValue(APValue::UninitStruct(), CD ? CD->getNumBases() : 0,
11076 RD->getNumFields());
11077
11078 if (RD->isInvalidDecl()) return false;
11079 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD);
11080
11081 if (CD) {
11082 unsigned Index = 0;
11083 for (CXXRecordDecl::base_class_const_iterator I = CD->bases_begin(),
11084 End = CD->bases_end(); I != End; ++I, ++Index) {
11085 const CXXRecordDecl *Base = I->getType()->getAsCXXRecordDecl();
11086 LValue Subobject = This;
11087 if (!HandleLValueDirectBase(Info, E, Obj&: Subobject, Derived: CD, Base, RL: &Layout))
11088 return false;
11089 if (!HandleClassZeroInitialization(Info, E, RD: Base, This: Subobject,
11090 Result&: Result.getStructBase(i: Index)))
11091 return false;
11092 }
11093 }
11094
11095 for (const auto *I : RD->fields()) {
11096 // -- if T is a reference type, no initialization is performed.
11097 if (I->isUnnamedBitField() || I->getType()->isReferenceType())
11098 continue;
11099
11100 LValue Subobject = This;
11101 if (!HandleLValueMember(Info, E, LVal&: Subobject, FD: I, RL: &Layout))
11102 return false;
11103
11104 ImplicitValueInitExpr VIE(I->getType());
11105 if (!EvaluateInPlace(
11106 Result&: Result.getStructField(i: I->getFieldIndex()), Info, This: Subobject, E: &VIE))
11107 return false;
11108 }
11109
11110 return true;
11111}
11112
11113bool RecordExprEvaluator::ZeroInitialization(const Expr *E, QualType T) {
11114 const auto *RD = T->castAsRecordDecl();
11115 if (RD->isInvalidDecl()) return false;
11116 if (RD->isUnion()) {
11117 // C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the
11118 // object's first non-static named data member is zero-initialized
11119 RecordDecl::field_iterator I = RD->field_begin();
11120 while (I != RD->field_end() && (*I)->isUnnamedBitField())
11121 ++I;
11122 if (I == RD->field_end()) {
11123 Result = APValue((const FieldDecl*)nullptr);
11124 return true;
11125 }
11126
11127 LValue Subobject = This;
11128 if (!HandleLValueMember(Info, E, LVal&: Subobject, FD: *I))
11129 return false;
11130 Result = APValue(*I);
11131 ImplicitValueInitExpr VIE(I->getType());
11132 return EvaluateInPlace(Result&: Result.getUnionValue(), Info, This: Subobject, E: &VIE);
11133 }
11134
11135 if (isa<CXXRecordDecl>(Val: RD) && cast<CXXRecordDecl>(Val: RD)->getNumVBases()) {
11136 Info.FFDiag(E, DiagId: diag::note_constexpr_virtual_base) << RD;
11137 return false;
11138 }
11139
11140 return HandleClassZeroInitialization(Info, E, RD, This, Result);
11141}
11142
11143bool RecordExprEvaluator::VisitCastExpr(const CastExpr *E) {
11144 switch (E->getCastKind()) {
11145 default:
11146 return ExprEvaluatorBaseTy::VisitCastExpr(E);
11147
11148 case CK_ConstructorConversion:
11149 return Visit(S: E->getSubExpr());
11150
11151 case CK_DerivedToBase:
11152 case CK_UncheckedDerivedToBase: {
11153 APValue DerivedObject;
11154 if (!Evaluate(Result&: DerivedObject, Info, E: E->getSubExpr()))
11155 return false;
11156 if (!DerivedObject.isStruct())
11157 return Error(E: E->getSubExpr());
11158
11159 // Derived-to-base rvalue conversion: just slice off the derived part.
11160 APValue *Value = &DerivedObject;
11161 const CXXRecordDecl *RD = E->getSubExpr()->getType()->getAsCXXRecordDecl();
11162 for (CastExpr::path_const_iterator PathI = E->path_begin(),
11163 PathE = E->path_end(); PathI != PathE; ++PathI) {
11164 assert(!(*PathI)->isVirtual() && "record rvalue with virtual base");
11165 const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl();
11166 Value = &Value->getStructBase(i: getBaseIndex(Derived: RD, Base));
11167 RD = Base;
11168 }
11169 Result = *Value;
11170 return true;
11171 }
11172 case CK_HLSLAggregateSplatCast: {
11173 APValue Val;
11174 QualType ValTy;
11175
11176 if (!hlslAggSplatHelper(Info, E: E->getSubExpr(), SrcVal&: Val, SrcTy&: ValTy))
11177 return false;
11178
11179 unsigned NEls = elementwiseSize(Info, BaseTy: E->getType());
11180 // splat our Val
11181 SmallVector<APValue> SplatEls(NEls, Val);
11182 SmallVector<QualType> SplatType(NEls, ValTy);
11183
11184 // cast the elements and construct our struct result
11185 const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
11186 if (!constructAggregate(Info, FPO, E, Result, ResultType: E->getType(), Elements&: SplatEls,
11187 ElTypes&: SplatType))
11188 return false;
11189
11190 return true;
11191 }
11192 case CK_HLSLElementwiseCast: {
11193 SmallVector<APValue> SrcEls;
11194 SmallVector<QualType> SrcTypes;
11195
11196 if (!hlslElementwiseCastHelper(Info, E: E->getSubExpr(), DestTy: E->getType(), SrcVals&: SrcEls,
11197 SrcTypes))
11198 return false;
11199
11200 // cast the elements and construct our struct result
11201 const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
11202 if (!constructAggregate(Info, FPO, E, Result, ResultType: E->getType(), Elements&: SrcEls,
11203 ElTypes&: SrcTypes))
11204 return false;
11205
11206 return true;
11207 }
11208 }
11209}
11210
11211bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
11212 if (E->isTransparent())
11213 return Visit(S: E->getInit(Init: 0));
11214 return VisitCXXParenListOrInitListExpr(ExprToVisit: E, Args: E->inits());
11215}
11216
11217bool RecordExprEvaluator::VisitCXXParenListOrInitListExpr(
11218 const Expr *ExprToVisit, ArrayRef<Expr *> Args) {
11219 const auto *RD = ExprToVisit->getType()->castAsRecordDecl();
11220 if (RD->isInvalidDecl()) return false;
11221 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD);
11222 auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD);
11223
11224 EvalInfo::EvaluatingConstructorRAII EvalObj(
11225 Info,
11226 ObjectUnderConstruction{.Base: This.getLValueBase(), .Path: This.Designator.Entries},
11227 CXXRD && CXXRD->getNumBases());
11228
11229 if (RD->isUnion()) {
11230 const FieldDecl *Field;
11231 if (auto *ILE = dyn_cast<InitListExpr>(Val: ExprToVisit)) {
11232 Field = ILE->getInitializedFieldInUnion();
11233 } else if (auto *PLIE = dyn_cast<CXXParenListInitExpr>(Val: ExprToVisit)) {
11234 Field = PLIE->getInitializedFieldInUnion();
11235 } else {
11236 llvm_unreachable(
11237 "Expression is neither an init list nor a C++ paren list");
11238 }
11239
11240 Result = APValue(Field);
11241 if (!Field)
11242 return true;
11243
11244 // If the initializer list for a union does not contain any elements, the
11245 // first element of the union is value-initialized.
11246 // FIXME: The element should be initialized from an initializer list.
11247 // Is this difference ever observable for initializer lists which
11248 // we don't build?
11249 ImplicitValueInitExpr VIE(Field->getType());
11250 const Expr *InitExpr = Args.empty() ? &VIE : Args[0];
11251
11252 LValue Subobject = This;
11253 if (!HandleLValueMember(Info, E: InitExpr, LVal&: Subobject, FD: Field, RL: &Layout))
11254 return false;
11255
11256 // Temporarily override This, in case there's a CXXDefaultInitExpr in here.
11257 ThisOverrideRAII ThisOverride(*Info.CurrentCall, &This,
11258 isa<CXXDefaultInitExpr>(Val: InitExpr));
11259
11260 if (EvaluateInPlace(Result&: Result.getUnionValue(), Info, This: Subobject, E: InitExpr)) {
11261 if (Field->isBitField())
11262 return truncateBitfieldValue(Info, E: InitExpr, Value&: Result.getUnionValue(),
11263 FD: Field);
11264 return true;
11265 }
11266
11267 return false;
11268 }
11269
11270 if (!Result.hasValue())
11271 Result = APValue(APValue::UninitStruct(), CXXRD ? CXXRD->getNumBases() : 0,
11272 RD->getNumFields());
11273 unsigned ElementNo = 0;
11274 bool Success = true;
11275
11276 // Initialize base classes.
11277 if (CXXRD && CXXRD->getNumBases()) {
11278 for (const auto &Base : CXXRD->bases()) {
11279 assert(ElementNo < Args.size() && "missing init for base class");
11280 const Expr *Init = Args[ElementNo];
11281
11282 LValue Subobject = This;
11283 if (!HandleLValueBase(Info, E: Init, Obj&: Subobject, DerivedDecl: CXXRD, Base: &Base))
11284 return false;
11285
11286 APValue &FieldVal = Result.getStructBase(i: ElementNo);
11287 if (!EvaluateInPlace(Result&: FieldVal, Info, This: Subobject, E: Init)) {
11288 if (!Info.noteFailure())
11289 return false;
11290 Success = false;
11291 }
11292 ++ElementNo;
11293 }
11294
11295 EvalObj.finishedConstructingBases();
11296 }
11297
11298 // Initialize members.
11299 for (const auto *Field : RD->fields()) {
11300 // Anonymous bit-fields are not considered members of the class for
11301 // purposes of aggregate initialization.
11302 if (Field->isUnnamedBitField())
11303 continue;
11304
11305 LValue Subobject = This;
11306
11307 bool HaveInit = ElementNo < Args.size();
11308
11309 // FIXME: Diagnostics here should point to the end of the initializer
11310 // list, not the start.
11311 if (!HandleLValueMember(Info, E: HaveInit ? Args[ElementNo] : ExprToVisit,
11312 LVal&: Subobject, FD: Field, RL: &Layout))
11313 return false;
11314
11315 // Perform an implicit value-initialization for members beyond the end of
11316 // the initializer list.
11317 ImplicitValueInitExpr VIE(HaveInit ? Info.Ctx.IntTy : Field->getType());
11318 const Expr *Init = HaveInit ? Args[ElementNo++] : &VIE;
11319
11320 if (Field->getType()->isIncompleteArrayType()) {
11321 if (auto *CAT = Info.Ctx.getAsConstantArrayType(T: Init->getType())) {
11322 if (!CAT->isZeroSize()) {
11323 // Bail out for now. This might sort of "work", but the rest of the
11324 // code isn't really prepared to handle it.
11325 Info.FFDiag(E: Init, DiagId: diag::note_constexpr_unsupported_flexible_array);
11326 return false;
11327 }
11328 }
11329 }
11330
11331 // Temporarily override This, in case there's a CXXDefaultInitExpr in here.
11332 ThisOverrideRAII ThisOverride(*Info.CurrentCall, &This,
11333 isa<CXXDefaultInitExpr>(Val: Init));
11334
11335 APValue &FieldVal = Result.getStructField(i: Field->getFieldIndex());
11336 if (Field->getType()->isReferenceType()) {
11337 LValue Result;
11338 if (!EvaluateInitForDeclOfReferenceType(Info, D: Field, Init, Result,
11339 Val&: FieldVal)) {
11340 if (!Info.noteFailure())
11341 return false;
11342 Success = false;
11343 }
11344 } else if (!EvaluateInPlace(Result&: FieldVal, Info, This: Subobject, E: Init) ||
11345 (Field->isBitField() &&
11346 !truncateBitfieldValue(Info, E: Init, Value&: FieldVal, FD: Field))) {
11347 if (!Info.noteFailure())
11348 return false;
11349 Success = false;
11350 }
11351 }
11352
11353 EvalObj.finishedConstructingFields();
11354
11355 return Success;
11356}
11357
11358bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
11359 QualType T) {
11360 // Note that E's type is not necessarily the type of our class here; we might
11361 // be initializing an array element instead.
11362 const CXXConstructorDecl *FD = E->getConstructor();
11363 if (FD->isInvalidDecl() || FD->getParent()->isInvalidDecl()) return false;
11364
11365 bool ZeroInit = E->requiresZeroInitialization();
11366 if (CheckTrivialDefaultConstructor(Info, Loc: E->getExprLoc(), CD: FD, IsValueInitialization: ZeroInit)) {
11367 if (ZeroInit)
11368 return ZeroInitialization(E, T);
11369
11370 return handleDefaultInitValue(T, Result);
11371 }
11372
11373 const FunctionDecl *Definition = nullptr;
11374 auto Body = FD->getBody(Definition);
11375
11376 if (!CheckConstexprFunction(Info, CallLoc: E->getExprLoc(), Declaration: FD, Definition, Body))
11377 return false;
11378
11379 // Avoid materializing a temporary for an elidable copy/move constructor.
11380 if (E->isElidable() && !ZeroInit) {
11381 // FIXME: This only handles the simplest case, where the source object
11382 // is passed directly as the first argument to the constructor.
11383 // This should also handle stepping though implicit casts and
11384 // and conversion sequences which involve two steps, with a
11385 // conversion operator followed by a converting constructor.
11386 const Expr *SrcObj = E->getArg(Arg: 0);
11387 assert(SrcObj->isTemporaryObject(Info.Ctx, FD->getParent()));
11388 assert(Info.Ctx.hasSameUnqualifiedType(E->getType(), SrcObj->getType()));
11389 if (const MaterializeTemporaryExpr *ME =
11390 dyn_cast<MaterializeTemporaryExpr>(Val: SrcObj))
11391 return Visit(S: ME->getSubExpr());
11392 }
11393
11394 if (ZeroInit && !ZeroInitialization(E, T))
11395 return false;
11396
11397 auto Args = ArrayRef(E->getArgs(), E->getNumArgs());
11398 return HandleConstructorCall(E, This, Args,
11399 Definition: cast<CXXConstructorDecl>(Val: Definition), Info,
11400 Result);
11401}
11402
11403bool RecordExprEvaluator::VisitCXXInheritedCtorInitExpr(
11404 const CXXInheritedCtorInitExpr *E) {
11405 if (!Info.CurrentCall) {
11406 assert(Info.checkingPotentialConstantExpression());
11407 return false;
11408 }
11409
11410 const CXXConstructorDecl *FD = E->getConstructor();
11411 if (FD->isInvalidDecl() || FD->getParent()->isInvalidDecl())
11412 return false;
11413
11414 const FunctionDecl *Definition = nullptr;
11415 auto Body = FD->getBody(Definition);
11416
11417 if (!CheckConstexprFunction(Info, CallLoc: E->getExprLoc(), Declaration: FD, Definition, Body))
11418 return false;
11419
11420 return HandleConstructorCall(E, This, Call: Info.CurrentCall->Arguments,
11421 Definition: cast<CXXConstructorDecl>(Val: Definition), Info,
11422 Result);
11423}
11424
11425bool RecordExprEvaluator::VisitCXXStdInitializerListExpr(
11426 const CXXStdInitializerListExpr *E) {
11427 const ConstantArrayType *ArrayType =
11428 Info.Ctx.getAsConstantArrayType(T: E->getSubExpr()->getType());
11429
11430 LValue Array;
11431 if (!EvaluateLValue(E: E->getSubExpr(), Result&: Array, Info))
11432 return false;
11433
11434 assert(ArrayType && "unexpected type for array initializer");
11435
11436 // Get a pointer to the first element of the array.
11437 Array.addArray(Info, E, CAT: ArrayType);
11438
11439 // FIXME: What if the initializer_list type has base classes, etc?
11440 Result = APValue(APValue::UninitStruct(), 0, 2);
11441 Array.moveInto(V&: Result.getStructField(i: 0));
11442
11443 auto *Record = E->getType()->castAsRecordDecl();
11444 RecordDecl::field_iterator Field = Record->field_begin();
11445 assert(Field != Record->field_end() &&
11446 Info.Ctx.hasSameType(Field->getType()->getPointeeType(),
11447 ArrayType->getElementType()) &&
11448 "Expected std::initializer_list first field to be const E *");
11449 ++Field;
11450 assert(Field != Record->field_end() &&
11451 "Expected std::initializer_list to have two fields");
11452
11453 if (Info.Ctx.hasSameType(T1: Field->getType(), T2: Info.Ctx.getSizeType())) {
11454 // Length.
11455 Result.getStructField(i: 1) = APValue(APSInt(ArrayType->getSize()));
11456 } else {
11457 // End pointer.
11458 assert(Info.Ctx.hasSameType(Field->getType()->getPointeeType(),
11459 ArrayType->getElementType()) &&
11460 "Expected std::initializer_list second field to be const E *");
11461 if (!HandleLValueArrayAdjustment(Info, E, LVal&: Array,
11462 EltTy: ArrayType->getElementType(),
11463 Adjustment: ArrayType->getZExtSize()))
11464 return false;
11465 Array.moveInto(V&: Result.getStructField(i: 1));
11466 }
11467
11468 assert(++Field == Record->field_end() &&
11469 "Expected std::initializer_list to only have two fields");
11470
11471 return true;
11472}
11473
11474bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) {
11475 const CXXRecordDecl *ClosureClass = E->getLambdaClass();
11476 if (ClosureClass->isInvalidDecl())
11477 return false;
11478
11479 const size_t NumFields = ClosureClass->getNumFields();
11480
11481 assert(NumFields == (size_t)std::distance(E->capture_init_begin(),
11482 E->capture_init_end()) &&
11483 "The number of lambda capture initializers should equal the number of "
11484 "fields within the closure type");
11485
11486 Result = APValue(APValue::UninitStruct(), /*NumBases*/0, NumFields);
11487 // Iterate through all the lambda's closure object's fields and initialize
11488 // them.
11489 auto *CaptureInitIt = E->capture_init_begin();
11490 bool Success = true;
11491 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: ClosureClass);
11492 for (const auto *Field : ClosureClass->fields()) {
11493 assert(CaptureInitIt != E->capture_init_end());
11494 // Get the initializer for this field
11495 Expr *const CurFieldInit = *CaptureInitIt++;
11496
11497 // If there is no initializer, either this is a VLA or an error has
11498 // occurred.
11499 if (!CurFieldInit || CurFieldInit->containsErrors())
11500 return Error(E);
11501
11502 LValue Subobject = This;
11503
11504 if (!HandleLValueMember(Info, E, LVal&: Subobject, FD: Field, RL: &Layout))
11505 return false;
11506
11507 APValue &FieldVal = Result.getStructField(i: Field->getFieldIndex());
11508 if (!EvaluateInPlace(Result&: FieldVal, Info, This: Subobject, E: CurFieldInit)) {
11509 if (!Info.keepEvaluatingAfterFailure())
11510 return false;
11511 Success = false;
11512 }
11513 }
11514 return Success;
11515}
11516
11517static bool EvaluateRecord(const Expr *E, const LValue &This,
11518 APValue &Result, EvalInfo &Info) {
11519 assert(!E->isValueDependent());
11520 assert(E->isPRValue() && E->getType()->isRecordType() &&
11521 "can't evaluate expression as a record rvalue");
11522 return RecordExprEvaluator(Info, This, Result).Visit(S: E);
11523}
11524
11525//===----------------------------------------------------------------------===//
11526// Temporary Evaluation
11527//
11528// Temporaries are represented in the AST as rvalues, but generally behave like
11529// lvalues. The full-object of which the temporary is a subobject is implicitly
11530// materialized so that a reference can bind to it.
11531//===----------------------------------------------------------------------===//
11532namespace {
11533class TemporaryExprEvaluator
11534 : public LValueExprEvaluatorBase<TemporaryExprEvaluator> {
11535public:
11536 TemporaryExprEvaluator(EvalInfo &Info, LValue &Result) :
11537 LValueExprEvaluatorBaseTy(Info, Result, false) {}
11538
11539 /// Visit an expression which constructs the value of this temporary.
11540 bool VisitConstructExpr(const Expr *E) {
11541 APValue &Value = Info.CurrentCall->createTemporary(
11542 Key: E, T: E->getType(), Scope: ScopeKind::FullExpression, LV&: Result);
11543 return EvaluateInPlace(Result&: Value, Info, This: Result, E);
11544 }
11545
11546 bool VisitCastExpr(const CastExpr *E) {
11547 switch (E->getCastKind()) {
11548 default:
11549 return LValueExprEvaluatorBaseTy::VisitCastExpr(E);
11550
11551 case CK_ConstructorConversion:
11552 return VisitConstructExpr(E: E->getSubExpr());
11553 }
11554 }
11555 bool VisitInitListExpr(const InitListExpr *E) {
11556 return VisitConstructExpr(E);
11557 }
11558 bool VisitCXXConstructExpr(const CXXConstructExpr *E) {
11559 return VisitConstructExpr(E);
11560 }
11561 bool VisitCallExpr(const CallExpr *E) {
11562 return VisitConstructExpr(E);
11563 }
11564 bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E) {
11565 return VisitConstructExpr(E);
11566 }
11567 bool VisitLambdaExpr(const LambdaExpr *E) {
11568 return VisitConstructExpr(E);
11569 }
11570};
11571} // end anonymous namespace
11572
11573/// Evaluate an expression of record type as a temporary.
11574static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info) {
11575 assert(!E->isValueDependent());
11576 assert(E->isPRValue() && E->getType()->isRecordType());
11577 return TemporaryExprEvaluator(Info, Result).Visit(S: E);
11578}
11579
11580//===----------------------------------------------------------------------===//
11581// Vector Evaluation
11582//===----------------------------------------------------------------------===//
11583
11584namespace {
11585 class VectorExprEvaluator
11586 : public ExprEvaluatorBase<VectorExprEvaluator> {
11587 APValue &Result;
11588 public:
11589
11590 VectorExprEvaluator(EvalInfo &info, APValue &Result)
11591 : ExprEvaluatorBaseTy(info), Result(Result) {}
11592
11593 bool Success(ArrayRef<APValue> V, const Expr *E) {
11594 assert(V.size() == E->getType()->castAs<VectorType>()->getNumElements());
11595 // FIXME: remove this APValue copy.
11596 Result = APValue(V.data(), V.size());
11597 return true;
11598 }
11599 bool Success(const APValue &V, const Expr *E) {
11600 assert(V.isVector());
11601 Result = V;
11602 return true;
11603 }
11604 bool ZeroInitialization(const Expr *E);
11605
11606 bool VisitUnaryReal(const UnaryOperator *E)
11607 { return Visit(S: E->getSubExpr()); }
11608 bool VisitCastExpr(const CastExpr* E);
11609 bool VisitInitListExpr(const InitListExpr *E);
11610 bool VisitUnaryImag(const UnaryOperator *E);
11611 bool VisitBinaryOperator(const BinaryOperator *E);
11612 bool VisitUnaryOperator(const UnaryOperator *E);
11613 bool VisitCallExpr(const CallExpr *E);
11614 bool VisitConvertVectorExpr(const ConvertVectorExpr *E);
11615 bool VisitShuffleVectorExpr(const ShuffleVectorExpr *E);
11616
11617 // FIXME: Missing: conditional operator (for GNU
11618 // conditional select), ExtVectorElementExpr
11619 };
11620} // end anonymous namespace
11621
11622static bool EvaluateVector(const Expr* E, APValue& Result, EvalInfo &Info) {
11623 assert(E->isPRValue() && E->getType()->isVectorType() &&
11624 "not a vector prvalue");
11625 return VectorExprEvaluator(Info, Result).Visit(S: E);
11626}
11627
11628static llvm::APInt ConvertBoolVectorToInt(const APValue &Val) {
11629 assert(Val.isVector() && "expected vector APValue");
11630 unsigned NumElts = Val.getVectorLength();
11631
11632 // Each element is one bit, so create an integer with NumElts bits.
11633 llvm::APInt Result(NumElts, 0);
11634
11635 for (unsigned I = 0; I < NumElts; ++I) {
11636 const APValue &Elt = Val.getVectorElt(I);
11637 assert(Elt.isInt() && "expected integer element in bool vector");
11638
11639 if (Elt.getInt().getBoolValue())
11640 Result.setBit(I);
11641 }
11642
11643 return Result;
11644}
11645
11646bool VectorExprEvaluator::VisitCastExpr(const CastExpr *E) {
11647 const VectorType *VTy = E->getType()->castAs<VectorType>();
11648 unsigned NElts = VTy->getNumElements();
11649
11650 const Expr *SE = E->getSubExpr();
11651 QualType SETy = SE->getType();
11652
11653 switch (E->getCastKind()) {
11654 case CK_VectorSplat: {
11655 APValue Val = APValue();
11656 if (SETy->isIntegerType()) {
11657 APSInt IntResult;
11658 if (!EvaluateInteger(E: SE, Result&: IntResult, Info))
11659 return false;
11660 Val = APValue(std::move(IntResult));
11661 } else if (SETy->isRealFloatingType()) {
11662 APFloat FloatResult(0.0);
11663 if (!EvaluateFloat(E: SE, Result&: FloatResult, Info))
11664 return false;
11665 Val = APValue(std::move(FloatResult));
11666 } else {
11667 return Error(E);
11668 }
11669
11670 // Splat and create vector APValue.
11671 SmallVector<APValue, 4> Elts(NElts, Val);
11672 return Success(V: Elts, E);
11673 }
11674 case CK_BitCast: {
11675 APValue SVal;
11676 if (!Evaluate(Result&: SVal, Info, E: SE))
11677 return false;
11678
11679 if (!SVal.isInt() && !SVal.isFloat() && !SVal.isVector()) {
11680 // Give up if the input isn't an int, float, or vector. For example, we
11681 // reject "(v4i16)(intptr_t)&a".
11682 Info.FFDiag(E, DiagId: diag::note_constexpr_invalid_cast)
11683 << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret
11684 << Info.Ctx.getLangOpts().CPlusPlus;
11685 return false;
11686 }
11687
11688 if (!handleRValueToRValueBitCast(Info, DestValue&: Result, SourceRValue: SVal, BCE: E))
11689 return false;
11690
11691 return true;
11692 }
11693 case CK_HLSLVectorTruncation: {
11694 APValue Val;
11695 SmallVector<APValue, 4> Elements;
11696 if (!EvaluateVector(E: SE, Result&: Val, Info))
11697 return Error(E);
11698 for (unsigned I = 0; I < NElts; I++)
11699 Elements.push_back(Elt: Val.getVectorElt(I));
11700 return Success(V: Elements, E);
11701 }
11702 case CK_HLSLMatrixTruncation: {
11703 // Matrix truncation occurs in row-major order.
11704 APValue Val;
11705 if (!EvaluateMatrix(E: SE, Result&: Val, Info))
11706 return Error(E);
11707 SmallVector<APValue, 16> Elements;
11708 for (unsigned Row = 0;
11709 Row < Val.getMatrixNumRows() && Elements.size() < NElts; Row++)
11710 for (unsigned Col = 0;
11711 Col < Val.getMatrixNumColumns() && Elements.size() < NElts; Col++)
11712 Elements.push_back(Elt: Val.getMatrixElt(Row, Col));
11713 return Success(V: Elements, E);
11714 }
11715 case CK_HLSLAggregateSplatCast: {
11716 APValue Val;
11717 QualType ValTy;
11718
11719 if (!hlslAggSplatHelper(Info, E: SE, SrcVal&: Val, SrcTy&: ValTy))
11720 return false;
11721
11722 // cast our Val once.
11723 APValue Result;
11724 const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
11725 if (!handleScalarCast(Info, FPO, E, SourceTy: ValTy, DestTy: VTy->getElementType(), Original: Val,
11726 Result))
11727 return false;
11728
11729 SmallVector<APValue, 4> SplatEls(NElts, Result);
11730 return Success(V: SplatEls, E);
11731 }
11732 case CK_HLSLElementwiseCast: {
11733 SmallVector<APValue> SrcVals;
11734 SmallVector<QualType> SrcTypes;
11735
11736 if (!hlslElementwiseCastHelper(Info, E: SE, DestTy: E->getType(), SrcVals, SrcTypes))
11737 return false;
11738
11739 const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
11740 SmallVector<QualType, 4> DestTypes(NElts, VTy->getElementType());
11741 SmallVector<APValue, 4> ResultEls(NElts);
11742 if (!handleElementwiseCast(Info, E, FPO, Elements&: SrcVals, SrcTypes, DestTypes,
11743 Results&: ResultEls))
11744 return false;
11745 return Success(V: ResultEls, E);
11746 }
11747 default:
11748 return ExprEvaluatorBaseTy::VisitCastExpr(E);
11749 }
11750}
11751
11752bool
11753VectorExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
11754 const VectorType *VT = E->getType()->castAs<VectorType>();
11755 unsigned NumInits = E->getNumInits();
11756 unsigned NumElements = VT->getNumElements();
11757
11758 QualType EltTy = VT->getElementType();
11759 SmallVector<APValue, 4> Elements;
11760
11761 // MFloat8 type doesn't have constants and thus constant folding
11762 // is impossible.
11763 if (EltTy->isMFloat8Type())
11764 return false;
11765
11766 // The number of initializers can be less than the number of
11767 // vector elements. For OpenCL, this can be due to nested vector
11768 // initialization. For GCC compatibility, missing trailing elements
11769 // should be initialized with zeroes.
11770 unsigned CountInits = 0, CountElts = 0;
11771 while (CountElts < NumElements) {
11772 // Handle nested vector initialization.
11773 if (CountInits < NumInits
11774 && E->getInit(Init: CountInits)->getType()->isVectorType()) {
11775 APValue v;
11776 if (!EvaluateVector(E: E->getInit(Init: CountInits), Result&: v, Info))
11777 return Error(E);
11778 unsigned vlen = v.getVectorLength();
11779 for (unsigned j = 0; j < vlen; j++)
11780 Elements.push_back(Elt: v.getVectorElt(I: j));
11781 CountElts += vlen;
11782 } else if (EltTy->isIntegerType()) {
11783 llvm::APSInt sInt(32);
11784 if (CountInits < NumInits) {
11785 if (!EvaluateInteger(E: E->getInit(Init: CountInits), Result&: sInt, Info))
11786 return false;
11787 } else // trailing integer zero.
11788 sInt = Info.Ctx.MakeIntValue(Value: 0, Type: EltTy);
11789 Elements.push_back(Elt: APValue(sInt));
11790 CountElts++;
11791 } else {
11792 llvm::APFloat f(0.0);
11793 if (CountInits < NumInits) {
11794 if (!EvaluateFloat(E: E->getInit(Init: CountInits), Result&: f, Info))
11795 return false;
11796 } else // trailing float zero.
11797 f = APFloat::getZero(Sem: Info.Ctx.getFloatTypeSemantics(T: EltTy));
11798 Elements.push_back(Elt: APValue(f));
11799 CountElts++;
11800 }
11801 CountInits++;
11802 }
11803 return Success(V: Elements, E);
11804}
11805
11806bool
11807VectorExprEvaluator::ZeroInitialization(const Expr *E) {
11808 const auto *VT = E->getType()->castAs<VectorType>();
11809 QualType EltTy = VT->getElementType();
11810 APValue ZeroElement;
11811 if (EltTy->isIntegerType())
11812 ZeroElement = APValue(Info.Ctx.MakeIntValue(Value: 0, Type: EltTy));
11813 else
11814 ZeroElement =
11815 APValue(APFloat::getZero(Sem: Info.Ctx.getFloatTypeSemantics(T: EltTy)));
11816
11817 SmallVector<APValue, 4> Elements(VT->getNumElements(), ZeroElement);
11818 return Success(V: Elements, E);
11819}
11820
11821bool VectorExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
11822 VisitIgnoredValue(E: E->getSubExpr());
11823 return ZeroInitialization(E);
11824}
11825
11826bool VectorExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
11827 BinaryOperatorKind Op = E->getOpcode();
11828 assert(Op != BO_PtrMemD && Op != BO_PtrMemI && Op != BO_Cmp &&
11829 "Operation not supported on vector types");
11830
11831 if (Op == BO_Comma)
11832 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
11833
11834 Expr *LHS = E->getLHS();
11835 Expr *RHS = E->getRHS();
11836
11837 assert(LHS->getType()->isVectorType() && RHS->getType()->isVectorType() &&
11838 "Must both be vector types");
11839 // Checking JUST the types are the same would be fine, except shifts don't
11840 // need to have their types be the same (since you always shift by an int).
11841 assert(LHS->getType()->castAs<VectorType>()->getNumElements() ==
11842 E->getType()->castAs<VectorType>()->getNumElements() &&
11843 RHS->getType()->castAs<VectorType>()->getNumElements() ==
11844 E->getType()->castAs<VectorType>()->getNumElements() &&
11845 "All operands must be the same size.");
11846
11847 APValue LHSValue;
11848 APValue RHSValue;
11849 bool LHSOK = Evaluate(Result&: LHSValue, Info, E: LHS);
11850 if (!LHSOK && !Info.noteFailure())
11851 return false;
11852 if (!Evaluate(Result&: RHSValue, Info, E: RHS) || !LHSOK)
11853 return false;
11854
11855 if (!handleVectorVectorBinOp(Info, E, Opcode: Op, LHSValue, RHSValue))
11856 return false;
11857
11858 return Success(V: LHSValue, E);
11859}
11860
11861static std::optional<APValue> handleVectorUnaryOperator(ASTContext &Ctx,
11862 QualType ResultTy,
11863 UnaryOperatorKind Op,
11864 APValue Elt) {
11865 switch (Op) {
11866 case UO_Plus:
11867 // Nothing to do here.
11868 return Elt;
11869 case UO_Minus:
11870 if (Elt.getKind() == APValue::Int) {
11871 Elt.getInt().negate();
11872 } else {
11873 assert(Elt.getKind() == APValue::Float &&
11874 "Vector can only be int or float type");
11875 Elt.getFloat().changeSign();
11876 }
11877 return Elt;
11878 case UO_Not:
11879 // This is only valid for integral types anyway, so we don't have to handle
11880 // float here.
11881 assert(Elt.getKind() == APValue::Int &&
11882 "Vector operator ~ can only be int");
11883 Elt.getInt().flipAllBits();
11884 return Elt;
11885 case UO_LNot: {
11886 if (Elt.getKind() == APValue::Int) {
11887 Elt.getInt() = !Elt.getInt();
11888 // operator ! on vectors returns -1 for 'truth', so negate it.
11889 Elt.getInt().negate();
11890 return Elt;
11891 }
11892 assert(Elt.getKind() == APValue::Float &&
11893 "Vector can only be int or float type");
11894 // Float types result in an int of the same size, but -1 for true, or 0 for
11895 // false.
11896 APSInt EltResult{Ctx.getIntWidth(T: ResultTy),
11897 ResultTy->isUnsignedIntegerType()};
11898 if (Elt.getFloat().isZero())
11899 EltResult.setAllBits();
11900 else
11901 EltResult.clearAllBits();
11902
11903 return APValue{EltResult};
11904 }
11905 default:
11906 // FIXME: Implement the rest of the unary operators.
11907 return std::nullopt;
11908 }
11909}
11910
11911bool VectorExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
11912 Expr *SubExpr = E->getSubExpr();
11913 const auto *VD = SubExpr->getType()->castAs<VectorType>();
11914 // This result element type differs in the case of negating a floating point
11915 // vector, since the result type is the a vector of the equivilant sized
11916 // integer.
11917 const QualType ResultEltTy = VD->getElementType();
11918 UnaryOperatorKind Op = E->getOpcode();
11919
11920 APValue SubExprValue;
11921 if (!Evaluate(Result&: SubExprValue, Info, E: SubExpr))
11922 return false;
11923
11924 // FIXME: This vector evaluator someday needs to be changed to be LValue
11925 // aware/keep LValue information around, rather than dealing with just vector
11926 // types directly. Until then, we cannot handle cases where the operand to
11927 // these unary operators is an LValue. The only case I've been able to see
11928 // cause this is operator++ assigning to a member expression (only valid in
11929 // altivec compilations) in C mode, so this shouldn't limit us too much.
11930 if (SubExprValue.isLValue())
11931 return false;
11932
11933 assert(SubExprValue.getVectorLength() == VD->getNumElements() &&
11934 "Vector length doesn't match type?");
11935
11936 SmallVector<APValue, 4> ResultElements;
11937 for (unsigned EltNum = 0; EltNum < VD->getNumElements(); ++EltNum) {
11938 std::optional<APValue> Elt = handleVectorUnaryOperator(
11939 Ctx&: Info.Ctx, ResultTy: ResultEltTy, Op, Elt: SubExprValue.getVectorElt(I: EltNum));
11940 if (!Elt)
11941 return false;
11942 ResultElements.push_back(Elt: *Elt);
11943 }
11944 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
11945}
11946
11947static bool handleVectorElementCast(EvalInfo &Info, const FPOptions FPO,
11948 const Expr *E, QualType SourceTy,
11949 QualType DestTy, APValue const &Original,
11950 APValue &Result) {
11951 if (SourceTy->isIntegerType()) {
11952 if (DestTy->isRealFloatingType()) {
11953 Result = APValue(APFloat(0.0));
11954 return HandleIntToFloatCast(Info, E, FPO, SrcType: SourceTy, Value: Original.getInt(),
11955 DestType: DestTy, Result&: Result.getFloat());
11956 }
11957 if (DestTy->isIntegerType()) {
11958 Result = APValue(
11959 HandleIntToIntCast(Info, E, DestType: DestTy, SrcType: SourceTy, Value: Original.getInt()));
11960 return true;
11961 }
11962 } else if (SourceTy->isRealFloatingType()) {
11963 if (DestTy->isRealFloatingType()) {
11964 Result = Original;
11965 return HandleFloatToFloatCast(Info, E, SrcType: SourceTy, DestType: DestTy,
11966 Result&: Result.getFloat());
11967 }
11968 if (DestTy->isIntegerType()) {
11969 Result = APValue(APSInt());
11970 return HandleFloatToIntCast(Info, E, SrcType: SourceTy, Value: Original.getFloat(),
11971 DestType: DestTy, Result&: Result.getInt());
11972 }
11973 }
11974
11975 Info.FFDiag(E, DiagId: diag::err_convertvector_constexpr_unsupported_vector_cast)
11976 << SourceTy << DestTy;
11977 return false;
11978}
11979
11980static bool evalPackBuiltin(const CallExpr *E, EvalInfo &Info, APValue &Result,
11981 llvm::function_ref<APInt(const APSInt &)> PackFn) {
11982 APValue LHS, RHS;
11983 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: LHS) ||
11984 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: RHS))
11985 return false;
11986
11987 unsigned LHSVecLen = LHS.getVectorLength();
11988 unsigned RHSVecLen = RHS.getVectorLength();
11989
11990 assert(LHSVecLen != 0 && LHSVecLen == RHSVecLen &&
11991 "pack builtin LHSVecLen must equal to RHSVecLen");
11992
11993 const VectorType *VT0 = E->getArg(Arg: 0)->getType()->castAs<VectorType>();
11994 const unsigned SrcBits = Info.Ctx.getIntWidth(T: VT0->getElementType());
11995
11996 const VectorType *DstVT = E->getType()->castAs<VectorType>();
11997 QualType DstElemTy = DstVT->getElementType();
11998 const bool DstIsUnsigned = DstElemTy->isUnsignedIntegerType();
11999
12000 const unsigned SrcPerLane = 128 / SrcBits;
12001 const unsigned Lanes = LHSVecLen * SrcBits / 128;
12002
12003 SmallVector<APValue, 64> Out;
12004 Out.reserve(N: LHSVecLen + RHSVecLen);
12005
12006 for (unsigned Lane = 0; Lane != Lanes; ++Lane) {
12007 unsigned base = Lane * SrcPerLane;
12008 for (unsigned I = 0; I != SrcPerLane; ++I)
12009 Out.emplace_back(Args: APValue(
12010 APSInt(PackFn(LHS.getVectorElt(I: base + I).getInt()), DstIsUnsigned)));
12011 for (unsigned I = 0; I != SrcPerLane; ++I)
12012 Out.emplace_back(Args: APValue(
12013 APSInt(PackFn(RHS.getVectorElt(I: base + I).getInt()), DstIsUnsigned)));
12014 }
12015
12016 Result = APValue(Out.data(), Out.size());
12017 return true;
12018}
12019
12020static bool evalShuffleGeneric(
12021 EvalInfo &Info, const CallExpr *Call, APValue &Out,
12022 llvm::function_ref<std::pair<unsigned, int>(unsigned, unsigned)>
12023 GetSourceIndex) {
12024
12025 const auto *VT = Call->getType()->getAs<VectorType>();
12026 if (!VT)
12027 return false;
12028
12029 unsigned ShuffleMask = 0;
12030 APValue A, MaskVector, B;
12031 bool IsVectorMask = false;
12032 bool IsSingleOperand = (Call->getNumArgs() == 2);
12033
12034 if (IsSingleOperand) {
12035 QualType MaskType = Call->getArg(Arg: 1)->getType();
12036 if (MaskType->isVectorType()) {
12037 IsVectorMask = true;
12038 if (!EvaluateAsRValue(Info, E: Call->getArg(Arg: 0), Result&: A) ||
12039 !EvaluateAsRValue(Info, E: Call->getArg(Arg: 1), Result&: MaskVector))
12040 return false;
12041 B = A;
12042 } else if (MaskType->isIntegerType()) {
12043 APSInt MaskImm;
12044 if (!EvaluateInteger(E: Call->getArg(Arg: 1), Result&: MaskImm, Info))
12045 return false;
12046 ShuffleMask = static_cast<unsigned>(MaskImm.getZExtValue());
12047 if (!EvaluateAsRValue(Info, E: Call->getArg(Arg: 0), Result&: A))
12048 return false;
12049 B = A;
12050 } else {
12051 return false;
12052 }
12053 } else {
12054 QualType Arg2Type = Call->getArg(Arg: 2)->getType();
12055 if (Arg2Type->isVectorType()) {
12056 IsVectorMask = true;
12057 if (!EvaluateAsRValue(Info, E: Call->getArg(Arg: 0), Result&: A) ||
12058 !EvaluateAsRValue(Info, E: Call->getArg(Arg: 1), Result&: MaskVector) ||
12059 !EvaluateAsRValue(Info, E: Call->getArg(Arg: 2), Result&: B))
12060 return false;
12061 } else if (Arg2Type->isIntegerType()) {
12062 APSInt MaskImm;
12063 if (!EvaluateInteger(E: Call->getArg(Arg: 2), Result&: MaskImm, Info))
12064 return false;
12065 ShuffleMask = static_cast<unsigned>(MaskImm.getZExtValue());
12066 if (!EvaluateAsRValue(Info, E: Call->getArg(Arg: 0), Result&: A) ||
12067 !EvaluateAsRValue(Info, E: Call->getArg(Arg: 1), Result&: B))
12068 return false;
12069 } else {
12070 return false;
12071 }
12072 }
12073
12074 unsigned NumElts = VT->getNumElements();
12075 SmallVector<APValue, 64> ResultElements;
12076 ResultElements.reserve(N: NumElts);
12077
12078 for (unsigned DstIdx = 0; DstIdx != NumElts; ++DstIdx) {
12079 if (IsVectorMask) {
12080 ShuffleMask = static_cast<unsigned>(
12081 MaskVector.getVectorElt(I: DstIdx).getInt().getZExtValue());
12082 }
12083 auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask);
12084
12085 if (SrcIdx < 0) {
12086 // Zero out this element
12087 QualType ElemTy = VT->getElementType();
12088 if (ElemTy->isRealFloatingType()) {
12089 ResultElements.push_back(
12090 Elt: APValue(APFloat::getZero(Sem: Info.Ctx.getFloatTypeSemantics(T: ElemTy))));
12091 } else if (ElemTy->isIntegerType()) {
12092 APValue Zero(Info.Ctx.MakeIntValue(Value: 0, Type: ElemTy));
12093 ResultElements.push_back(Elt: APValue(Zero));
12094 } else {
12095 // Other types of fallback logic
12096 ResultElements.push_back(Elt: APValue());
12097 }
12098 } else {
12099 const APValue &Src = (SrcVecIdx == 0) ? A : B;
12100 ResultElements.push_back(Elt: Src.getVectorElt(I: SrcIdx));
12101 }
12102 }
12103
12104 Out = APValue(ResultElements.data(), ResultElements.size());
12105 return true;
12106}
12107static bool ConvertDoubleToFloatStrict(EvalInfo &Info, const Expr *E,
12108 APFloat OrigVal, APValue &Result) {
12109
12110 if (OrigVal.isInfinity()) {
12111 Info.CCEDiag(E, DiagId: diag::note_constexpr_float_arithmetic) << 0;
12112 return false;
12113 }
12114 if (OrigVal.isNaN()) {
12115 Info.CCEDiag(E, DiagId: diag::note_constexpr_float_arithmetic) << 1;
12116 return false;
12117 }
12118
12119 APFloat Val = OrigVal;
12120 bool LosesInfo = false;
12121 APFloat::opStatus Status = Val.convert(
12122 ToSemantics: APFloat::IEEEsingle(), RM: APFloat::rmNearestTiesToEven, losesInfo: &LosesInfo);
12123
12124 if (LosesInfo || Val.isDenormal()) {
12125 Info.CCEDiag(E, DiagId: diag::note_constexpr_float_arithmetic_strict);
12126 return false;
12127 }
12128
12129 if (Status != APFloat::opOK) {
12130 Info.CCEDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
12131 return false;
12132 }
12133
12134 Result = APValue(Val);
12135 return true;
12136}
12137static bool evalShiftWithCount(
12138 EvalInfo &Info, const CallExpr *Call, APValue &Out,
12139 llvm::function_ref<APInt(const APInt &, uint64_t)> ShiftOp,
12140 llvm::function_ref<APInt(const APInt &, unsigned)> OverflowOp) {
12141
12142 APValue Source, Count;
12143 if (!EvaluateAsRValue(Info, E: Call->getArg(Arg: 0), Result&: Source) ||
12144 !EvaluateAsRValue(Info, E: Call->getArg(Arg: 1), Result&: Count))
12145 return false;
12146
12147 assert(Call->getNumArgs() == 2);
12148
12149 QualType SourceTy = Call->getArg(Arg: 0)->getType();
12150 assert(SourceTy->isVectorType() &&
12151 Call->getArg(1)->getType()->isVectorType());
12152
12153 QualType DestEltTy = SourceTy->castAs<VectorType>()->getElementType();
12154 unsigned DestEltWidth = Source.getVectorElt(I: 0).getInt().getBitWidth();
12155 unsigned DestLen = Source.getVectorLength();
12156 bool IsDestUnsigned = DestEltTy->isUnsignedIntegerType();
12157 unsigned CountEltWidth = Count.getVectorElt(I: 0).getInt().getBitWidth();
12158 unsigned NumBitsInQWord = 64;
12159 unsigned NumCountElts = NumBitsInQWord / CountEltWidth;
12160 SmallVector<APValue, 64> Result;
12161 Result.reserve(N: DestLen);
12162
12163 uint64_t CountLQWord = 0;
12164 for (unsigned EltIdx = 0; EltIdx != NumCountElts; ++EltIdx) {
12165 uint64_t Elt = Count.getVectorElt(I: EltIdx).getInt().getZExtValue();
12166 CountLQWord |= (Elt << (EltIdx * CountEltWidth));
12167 }
12168
12169 for (unsigned EltIdx = 0; EltIdx != DestLen; ++EltIdx) {
12170 APInt Elt = Source.getVectorElt(I: EltIdx).getInt();
12171 if (CountLQWord < DestEltWidth) {
12172 Result.push_back(
12173 Elt: APValue(APSInt(ShiftOp(Elt, CountLQWord), IsDestUnsigned)));
12174 } else {
12175 Result.push_back(
12176 Elt: APValue(APSInt(OverflowOp(Elt, DestEltWidth), IsDestUnsigned)));
12177 }
12178 }
12179 Out = APValue(Result.data(), Result.size());
12180 return true;
12181}
12182
12183std::optional<APFloat> EvalScalarMinMaxFp(const APFloat &A, const APFloat &B,
12184 std::optional<APSInt> RoundingMode,
12185 bool IsMin) {
12186 APSInt DefaultMode(APInt(32, 4), /*isUnsigned=*/true);
12187 if (RoundingMode.value_or(u&: DefaultMode) != 4)
12188 return std::nullopt;
12189 if (A.isNaN() || A.isInfinity() || A.isDenormal() || B.isNaN() ||
12190 B.isInfinity() || B.isDenormal())
12191 return std::nullopt;
12192 if (A.isZero() && B.isZero())
12193 return B;
12194 return IsMin ? llvm::minimum(A, B) : llvm::maximum(A, B);
12195}
12196
12197bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
12198 if (!IsConstantEvaluatedBuiltinCall(E))
12199 return ExprEvaluatorBaseTy::VisitCallExpr(E);
12200
12201 auto EvaluateBinOpExpr =
12202 [&](llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) {
12203 APValue SourceLHS, SourceRHS;
12204 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) ||
12205 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS))
12206 return false;
12207
12208 auto *DestTy = E->getType()->castAs<VectorType>();
12209 QualType DestEltTy = DestTy->getElementType();
12210 bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType();
12211 unsigned SourceLen = SourceLHS.getVectorLength();
12212 SmallVector<APValue, 4> ResultElements;
12213 ResultElements.reserve(N: SourceLen);
12214
12215 if (SourceRHS.isInt()) {
12216 const APSInt &RHS = SourceRHS.getInt();
12217 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
12218 const APSInt &LHS = SourceLHS.getVectorElt(I: EltNum).getInt();
12219 ResultElements.push_back(
12220 Elt: APValue(APSInt(Fn(LHS, RHS), DestUnsigned)));
12221 }
12222 } else {
12223 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
12224 const APSInt &LHS = SourceLHS.getVectorElt(I: EltNum).getInt();
12225 const APSInt &RHS = SourceRHS.getVectorElt(I: EltNum).getInt();
12226 ResultElements.push_back(
12227 Elt: APValue(APSInt(Fn(LHS, RHS), DestUnsigned)));
12228 }
12229 }
12230 return Success(V: APValue(ResultElements.data(), SourceLen), E);
12231 };
12232
12233 auto EvaluateFpBinOpExpr =
12234 [&](llvm::function_ref<std::optional<APFloat>(
12235 const APFloat &, const APFloat &, std::optional<APSInt>)>
12236 Fn,
12237 bool IsScalar = false) {
12238 assert(E->getNumArgs() == 2 || E->getNumArgs() == 3);
12239 APValue A, B;
12240 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: A) ||
12241 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: B))
12242 return false;
12243
12244 assert(A.isVector() && B.isVector());
12245 assert(A.getVectorLength() == B.getVectorLength());
12246
12247 std::optional<APSInt> RoundingMode;
12248 if (E->getNumArgs() == 3) {
12249 APSInt Imm;
12250 if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: Imm, Info))
12251 return false;
12252 RoundingMode = Imm;
12253 }
12254
12255 unsigned NumElems = A.getVectorLength();
12256 SmallVector<APValue, 4> ResultElements;
12257 ResultElements.reserve(N: NumElems);
12258
12259 for (unsigned EltNum = 0; EltNum < NumElems; ++EltNum) {
12260 if (IsScalar && EltNum > 0) {
12261 ResultElements.push_back(Elt: A.getVectorElt(I: EltNum));
12262 continue;
12263 }
12264 const APFloat &EltA = A.getVectorElt(I: EltNum).getFloat();
12265 const APFloat &EltB = B.getVectorElt(I: EltNum).getFloat();
12266 std::optional<APFloat> Result = Fn(EltA, EltB, RoundingMode);
12267 if (!Result)
12268 return false;
12269 ResultElements.push_back(Elt: APValue(*Result));
12270 }
12271 return Success(V: APValue(ResultElements.data(), NumElems), E);
12272 };
12273
12274 auto EvaluateScalarFpRoundMaskBinOp =
12275 [&](llvm::function_ref<std::optional<APFloat>(
12276 const APFloat &, const APFloat &, std::optional<APSInt>)>
12277 Fn) {
12278 assert(E->getNumArgs() == 5);
12279 APValue VecA, VecB, VecSrc;
12280 APSInt MaskVal, Rounding;
12281
12282 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: VecA) ||
12283 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: VecB) ||
12284 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: VecSrc) ||
12285 !EvaluateInteger(E: E->getArg(Arg: 3), Result&: MaskVal, Info) ||
12286 !EvaluateInteger(E: E->getArg(Arg: 4), Result&: Rounding, Info))
12287 return false;
12288
12289 unsigned NumElems = VecA.getVectorLength();
12290 SmallVector<APValue, 8> ResultElements;
12291 ResultElements.reserve(N: NumElems);
12292
12293 if (MaskVal.getZExtValue() & 1) {
12294 const APFloat &EltA = VecA.getVectorElt(I: 0).getFloat();
12295 const APFloat &EltB = VecB.getVectorElt(I: 0).getFloat();
12296 std::optional<APFloat> Result = Fn(EltA, EltB, Rounding);
12297 if (!Result)
12298 return false;
12299 ResultElements.push_back(Elt: APValue(*Result));
12300 } else {
12301 ResultElements.push_back(Elt: VecSrc.getVectorElt(I: 0));
12302 }
12303
12304 for (unsigned I = 1; I < NumElems; ++I)
12305 ResultElements.push_back(Elt: VecA.getVectorElt(I));
12306
12307 return Success(V: APValue(ResultElements.data(), NumElems), E);
12308 };
12309
12310 auto EvalSelectScalar = [&](unsigned Len) -> bool {
12311 APSInt Mask;
12312 APValue AVal, WVal;
12313 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Mask, Info) ||
12314 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: AVal) ||
12315 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: WVal))
12316 return false;
12317
12318 bool TakeA0 = (Mask.getZExtValue() & 1u) != 0;
12319 SmallVector<APValue, 4> Res;
12320 Res.reserve(N: Len);
12321 Res.push_back(Elt: TakeA0 ? AVal.getVectorElt(I: 0) : WVal.getVectorElt(I: 0));
12322 for (unsigned I = 1; I < Len; ++I)
12323 Res.push_back(Elt: WVal.getVectorElt(I));
12324 APValue V(Res.data(), Res.size());
12325 return Success(V, E);
12326 };
12327
12328 switch (E->getBuiltinCallee()) {
12329 default:
12330 return false;
12331 case Builtin::BI__builtin_elementwise_popcount:
12332 case Builtin::BI__builtin_elementwise_bitreverse: {
12333 APValue Source;
12334 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: Source))
12335 return false;
12336
12337 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
12338 unsigned SourceLen = Source.getVectorLength();
12339 SmallVector<APValue, 4> ResultElements;
12340 ResultElements.reserve(N: SourceLen);
12341
12342 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
12343 APSInt Elt = Source.getVectorElt(I: EltNum).getInt();
12344 switch (E->getBuiltinCallee()) {
12345 case Builtin::BI__builtin_elementwise_popcount:
12346 ResultElements.push_back(Elt: APValue(
12347 APSInt(APInt(Info.Ctx.getIntWidth(T: DestEltTy), Elt.popcount()),
12348 DestEltTy->isUnsignedIntegerOrEnumerationType())));
12349 break;
12350 case Builtin::BI__builtin_elementwise_bitreverse:
12351 ResultElements.push_back(
12352 Elt: APValue(APSInt(Elt.reverseBits(),
12353 DestEltTy->isUnsignedIntegerOrEnumerationType())));
12354 break;
12355 }
12356 }
12357
12358 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12359 }
12360 case Builtin::BI__builtin_elementwise_abs: {
12361 APValue Source;
12362 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: Source))
12363 return false;
12364
12365 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
12366 unsigned SourceLen = Source.getVectorLength();
12367 SmallVector<APValue, 4> ResultElements;
12368 ResultElements.reserve(N: SourceLen);
12369
12370 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
12371 APValue CurrentEle = Source.getVectorElt(I: EltNum);
12372 APValue Val = DestEltTy->isFloatingType()
12373 ? APValue(llvm::abs(X: CurrentEle.getFloat()))
12374 : APValue(APSInt(
12375 CurrentEle.getInt().abs(),
12376 DestEltTy->isUnsignedIntegerOrEnumerationType()));
12377 ResultElements.push_back(Elt: Val);
12378 }
12379
12380 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12381 }
12382
12383 case Builtin::BI__builtin_elementwise_add_sat:
12384 return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) {
12385 return LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS);
12386 });
12387
12388 case Builtin::BI__builtin_elementwise_sub_sat:
12389 return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) {
12390 return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
12391 });
12392
12393 case X86::BI__builtin_ia32_extract128i256:
12394 case X86::BI__builtin_ia32_vextractf128_pd256:
12395 case X86::BI__builtin_ia32_vextractf128_ps256:
12396 case X86::BI__builtin_ia32_vextractf128_si256: {
12397 APValue SourceVec, SourceImm;
12398 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceVec) ||
12399 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceImm))
12400 return false;
12401
12402 if (!SourceVec.isVector())
12403 return false;
12404
12405 const auto *RetVT = E->getType()->castAs<VectorType>();
12406 unsigned RetLen = RetVT->getNumElements();
12407 unsigned Idx = SourceImm.getInt().getZExtValue() & 1;
12408
12409 SmallVector<APValue, 32> ResultElements;
12410 ResultElements.reserve(N: RetLen);
12411
12412 for (unsigned I = 0; I < RetLen; I++)
12413 ResultElements.push_back(Elt: SourceVec.getVectorElt(I: Idx * RetLen + I));
12414
12415 return Success(V: APValue(ResultElements.data(), RetLen), E);
12416 }
12417
12418 case clang::X86::BI__builtin_ia32_cvtmask2b128:
12419 case clang::X86::BI__builtin_ia32_cvtmask2b256:
12420 case clang::X86::BI__builtin_ia32_cvtmask2b512:
12421 case clang::X86::BI__builtin_ia32_cvtmask2w128:
12422 case clang::X86::BI__builtin_ia32_cvtmask2w256:
12423 case clang::X86::BI__builtin_ia32_cvtmask2w512:
12424 case clang::X86::BI__builtin_ia32_cvtmask2d128:
12425 case clang::X86::BI__builtin_ia32_cvtmask2d256:
12426 case clang::X86::BI__builtin_ia32_cvtmask2d512:
12427 case clang::X86::BI__builtin_ia32_cvtmask2q128:
12428 case clang::X86::BI__builtin_ia32_cvtmask2q256:
12429 case clang::X86::BI__builtin_ia32_cvtmask2q512: {
12430 assert(E->getNumArgs() == 1);
12431 APSInt Mask;
12432 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Mask, Info))
12433 return false;
12434
12435 QualType VecTy = E->getType();
12436 const VectorType *VT = VecTy->castAs<VectorType>();
12437 unsigned VectorLen = VT->getNumElements();
12438 QualType ElemTy = VT->getElementType();
12439 unsigned ElemWidth = Info.Ctx.getTypeSize(T: ElemTy);
12440
12441 SmallVector<APValue, 16> Elems;
12442 for (unsigned I = 0; I != VectorLen; ++I) {
12443 bool BitSet = Mask[I];
12444 APSInt ElemVal(ElemWidth, /*isUnsigned=*/false);
12445 if (BitSet) {
12446 ElemVal.setAllBits();
12447 }
12448 Elems.push_back(Elt: APValue(ElemVal));
12449 }
12450 return Success(V: APValue(Elems.data(), VectorLen), E);
12451 }
12452
12453 case X86::BI__builtin_ia32_extracti32x4_256_mask:
12454 case X86::BI__builtin_ia32_extractf32x4_256_mask:
12455 case X86::BI__builtin_ia32_extracti32x4_mask:
12456 case X86::BI__builtin_ia32_extractf32x4_mask:
12457 case X86::BI__builtin_ia32_extracti32x8_mask:
12458 case X86::BI__builtin_ia32_extractf32x8_mask:
12459 case X86::BI__builtin_ia32_extracti64x2_256_mask:
12460 case X86::BI__builtin_ia32_extractf64x2_256_mask:
12461 case X86::BI__builtin_ia32_extracti64x2_512_mask:
12462 case X86::BI__builtin_ia32_extractf64x2_512_mask:
12463 case X86::BI__builtin_ia32_extracti64x4_mask:
12464 case X86::BI__builtin_ia32_extractf64x4_mask: {
12465 APValue SourceVec, MergeVec;
12466 APSInt Imm, MaskImm;
12467
12468 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceVec) ||
12469 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Imm, Info) ||
12470 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: MergeVec) ||
12471 !EvaluateInteger(E: E->getArg(Arg: 3), Result&: MaskImm, Info))
12472 return false;
12473
12474 const auto *RetVT = E->getType()->castAs<VectorType>();
12475 unsigned RetLen = RetVT->getNumElements();
12476
12477 if (!SourceVec.isVector() || !MergeVec.isVector())
12478 return false;
12479 unsigned SrcLen = SourceVec.getVectorLength();
12480 unsigned Lanes = SrcLen / RetLen;
12481 unsigned Lane = static_cast<unsigned>(Imm.getZExtValue() % Lanes);
12482 unsigned Base = Lane * RetLen;
12483
12484 SmallVector<APValue, 32> ResultElements;
12485 ResultElements.reserve(N: RetLen);
12486 for (unsigned I = 0; I < RetLen; ++I) {
12487 if (MaskImm[I])
12488 ResultElements.push_back(Elt: SourceVec.getVectorElt(I: Base + I));
12489 else
12490 ResultElements.push_back(Elt: MergeVec.getVectorElt(I));
12491 }
12492 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12493 }
12494
12495 case clang::X86::BI__builtin_ia32_pavgb128:
12496 case clang::X86::BI__builtin_ia32_pavgw128:
12497 case clang::X86::BI__builtin_ia32_pavgb256:
12498 case clang::X86::BI__builtin_ia32_pavgw256:
12499 case clang::X86::BI__builtin_ia32_pavgb512:
12500 case clang::X86::BI__builtin_ia32_pavgw512:
12501 return EvaluateBinOpExpr(llvm::APIntOps::avgCeilU);
12502
12503 case clang::X86::BI__builtin_ia32_pmulhrsw128:
12504 case clang::X86::BI__builtin_ia32_pmulhrsw256:
12505 case clang::X86::BI__builtin_ia32_pmulhrsw512:
12506 return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) {
12507 return (llvm::APIntOps::mulsExtended(C1: LHS, C2: RHS).ashr(ShiftAmt: 14) + 1)
12508 .extractBits(numBits: 16, bitPosition: 1);
12509 });
12510
12511 case clang::X86::BI__builtin_ia32_pmaddubsw128:
12512 case clang::X86::BI__builtin_ia32_pmaddubsw256:
12513 case clang::X86::BI__builtin_ia32_pmaddubsw512:
12514 case clang::X86::BI__builtin_ia32_pmaddwd128:
12515 case clang::X86::BI__builtin_ia32_pmaddwd256:
12516 case clang::X86::BI__builtin_ia32_pmaddwd512: {
12517 APValue SourceLHS, SourceRHS;
12518 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) ||
12519 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS))
12520 return false;
12521
12522 auto *DestTy = E->getType()->castAs<VectorType>();
12523 QualType DestEltTy = DestTy->getElementType();
12524 unsigned SourceLen = SourceLHS.getVectorLength();
12525 bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType();
12526 SmallVector<APValue, 4> ResultElements;
12527 ResultElements.reserve(N: SourceLen / 2);
12528
12529 for (unsigned EltNum = 0; EltNum < SourceLen; EltNum += 2) {
12530 const APSInt &LoLHS = SourceLHS.getVectorElt(I: EltNum).getInt();
12531 const APSInt &HiLHS = SourceLHS.getVectorElt(I: EltNum + 1).getInt();
12532 const APSInt &LoRHS = SourceRHS.getVectorElt(I: EltNum).getInt();
12533 const APSInt &HiRHS = SourceRHS.getVectorElt(I: EltNum + 1).getInt();
12534 unsigned BitWidth = 2 * LoLHS.getBitWidth();
12535
12536 switch (E->getBuiltinCallee()) {
12537 case clang::X86::BI__builtin_ia32_pmaddubsw128:
12538 case clang::X86::BI__builtin_ia32_pmaddubsw256:
12539 case clang::X86::BI__builtin_ia32_pmaddubsw512:
12540 ResultElements.push_back(Elt: APValue(
12541 APSInt((LoLHS.zext(width: BitWidth) * LoRHS.sext(width: BitWidth))
12542 .sadd_sat(RHS: (HiLHS.zext(width: BitWidth) * HiRHS.sext(width: BitWidth))),
12543 DestUnsigned)));
12544 break;
12545 case clang::X86::BI__builtin_ia32_pmaddwd128:
12546 case clang::X86::BI__builtin_ia32_pmaddwd256:
12547 case clang::X86::BI__builtin_ia32_pmaddwd512:
12548 ResultElements.push_back(
12549 Elt: APValue(APSInt((LoLHS.sext(width: BitWidth) * LoRHS.sext(width: BitWidth)) +
12550 (HiLHS.sext(width: BitWidth) * HiRHS.sext(width: BitWidth)),
12551 DestUnsigned)));
12552 break;
12553 }
12554 }
12555
12556 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12557 }
12558
12559 case clang::X86::BI__builtin_ia32_pmulhuw128:
12560 case clang::X86::BI__builtin_ia32_pmulhuw256:
12561 case clang::X86::BI__builtin_ia32_pmulhuw512:
12562 return EvaluateBinOpExpr(llvm::APIntOps::mulhu);
12563
12564 case clang::X86::BI__builtin_ia32_pmulhw128:
12565 case clang::X86::BI__builtin_ia32_pmulhw256:
12566 case clang::X86::BI__builtin_ia32_pmulhw512:
12567 return EvaluateBinOpExpr(llvm::APIntOps::mulhs);
12568
12569 case clang::X86::BI__builtin_ia32_psllv2di:
12570 case clang::X86::BI__builtin_ia32_psllv4di:
12571 case clang::X86::BI__builtin_ia32_psllv4si:
12572 case clang::X86::BI__builtin_ia32_psllv8di:
12573 case clang::X86::BI__builtin_ia32_psllv8hi:
12574 case clang::X86::BI__builtin_ia32_psllv8si:
12575 case clang::X86::BI__builtin_ia32_psllv16hi:
12576 case clang::X86::BI__builtin_ia32_psllv16si:
12577 case clang::X86::BI__builtin_ia32_psllv32hi:
12578 case clang::X86::BI__builtin_ia32_psllwi128:
12579 case clang::X86::BI__builtin_ia32_pslldi128:
12580 case clang::X86::BI__builtin_ia32_psllqi128:
12581 case clang::X86::BI__builtin_ia32_psllwi256:
12582 case clang::X86::BI__builtin_ia32_pslldi256:
12583 case clang::X86::BI__builtin_ia32_psllqi256:
12584 case clang::X86::BI__builtin_ia32_psllwi512:
12585 case clang::X86::BI__builtin_ia32_pslldi512:
12586 case clang::X86::BI__builtin_ia32_psllqi512:
12587 return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) {
12588 if (RHS.uge(RHS: LHS.getBitWidth())) {
12589 return APInt::getZero(numBits: LHS.getBitWidth());
12590 }
12591 return LHS.shl(shiftAmt: RHS.getZExtValue());
12592 });
12593
12594 case clang::X86::BI__builtin_ia32_psrav4si:
12595 case clang::X86::BI__builtin_ia32_psrav8di:
12596 case clang::X86::BI__builtin_ia32_psrav8hi:
12597 case clang::X86::BI__builtin_ia32_psrav8si:
12598 case clang::X86::BI__builtin_ia32_psrav16hi:
12599 case clang::X86::BI__builtin_ia32_psrav16si:
12600 case clang::X86::BI__builtin_ia32_psrav32hi:
12601 case clang::X86::BI__builtin_ia32_psravq128:
12602 case clang::X86::BI__builtin_ia32_psravq256:
12603 case clang::X86::BI__builtin_ia32_psrawi128:
12604 case clang::X86::BI__builtin_ia32_psradi128:
12605 case clang::X86::BI__builtin_ia32_psraqi128:
12606 case clang::X86::BI__builtin_ia32_psrawi256:
12607 case clang::X86::BI__builtin_ia32_psradi256:
12608 case clang::X86::BI__builtin_ia32_psraqi256:
12609 case clang::X86::BI__builtin_ia32_psrawi512:
12610 case clang::X86::BI__builtin_ia32_psradi512:
12611 case clang::X86::BI__builtin_ia32_psraqi512:
12612 return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) {
12613 if (RHS.uge(RHS: LHS.getBitWidth())) {
12614 return LHS.ashr(ShiftAmt: LHS.getBitWidth() - 1);
12615 }
12616 return LHS.ashr(ShiftAmt: RHS.getZExtValue());
12617 });
12618
12619 case clang::X86::BI__builtin_ia32_psrlv2di:
12620 case clang::X86::BI__builtin_ia32_psrlv4di:
12621 case clang::X86::BI__builtin_ia32_psrlv4si:
12622 case clang::X86::BI__builtin_ia32_psrlv8di:
12623 case clang::X86::BI__builtin_ia32_psrlv8hi:
12624 case clang::X86::BI__builtin_ia32_psrlv8si:
12625 case clang::X86::BI__builtin_ia32_psrlv16hi:
12626 case clang::X86::BI__builtin_ia32_psrlv16si:
12627 case clang::X86::BI__builtin_ia32_psrlv32hi:
12628 case clang::X86::BI__builtin_ia32_psrlwi128:
12629 case clang::X86::BI__builtin_ia32_psrldi128:
12630 case clang::X86::BI__builtin_ia32_psrlqi128:
12631 case clang::X86::BI__builtin_ia32_psrlwi256:
12632 case clang::X86::BI__builtin_ia32_psrldi256:
12633 case clang::X86::BI__builtin_ia32_psrlqi256:
12634 case clang::X86::BI__builtin_ia32_psrlwi512:
12635 case clang::X86::BI__builtin_ia32_psrldi512:
12636 case clang::X86::BI__builtin_ia32_psrlqi512:
12637 return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) {
12638 if (RHS.uge(RHS: LHS.getBitWidth())) {
12639 return APInt::getZero(numBits: LHS.getBitWidth());
12640 }
12641 return LHS.lshr(shiftAmt: RHS.getZExtValue());
12642 });
12643 case X86::BI__builtin_ia32_packsswb128:
12644 case X86::BI__builtin_ia32_packsswb256:
12645 case X86::BI__builtin_ia32_packsswb512:
12646 case X86::BI__builtin_ia32_packssdw128:
12647 case X86::BI__builtin_ia32_packssdw256:
12648 case X86::BI__builtin_ia32_packssdw512:
12649 return evalPackBuiltin(E, Info, Result, PackFn: [](const APSInt &Src) {
12650 return APSInt(Src).truncSSat(width: Src.getBitWidth() / 2);
12651 });
12652 case X86::BI__builtin_ia32_packusdw128:
12653 case X86::BI__builtin_ia32_packusdw256:
12654 case X86::BI__builtin_ia32_packusdw512:
12655 case X86::BI__builtin_ia32_packuswb128:
12656 case X86::BI__builtin_ia32_packuswb256:
12657 case X86::BI__builtin_ia32_packuswb512:
12658 return evalPackBuiltin(E, Info, Result, PackFn: [](const APSInt &Src) {
12659 return APSInt(Src).truncSSatU(width: Src.getBitWidth() / 2);
12660 });
12661 case clang::X86::BI__builtin_ia32_selectss_128:
12662 return EvalSelectScalar(4);
12663 case clang::X86::BI__builtin_ia32_selectsd_128:
12664 return EvalSelectScalar(2);
12665 case clang::X86::BI__builtin_ia32_selectsh_128:
12666 case clang::X86::BI__builtin_ia32_selectsbf_128:
12667 return EvalSelectScalar(8);
12668 case clang::X86::BI__builtin_ia32_pmuldq128:
12669 case clang::X86::BI__builtin_ia32_pmuldq256:
12670 case clang::X86::BI__builtin_ia32_pmuldq512:
12671 case clang::X86::BI__builtin_ia32_pmuludq128:
12672 case clang::X86::BI__builtin_ia32_pmuludq256:
12673 case clang::X86::BI__builtin_ia32_pmuludq512: {
12674 APValue SourceLHS, SourceRHS;
12675 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) ||
12676 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS))
12677 return false;
12678
12679 unsigned SourceLen = SourceLHS.getVectorLength();
12680 SmallVector<APValue, 4> ResultElements;
12681 ResultElements.reserve(N: SourceLen / 2);
12682
12683 for (unsigned EltNum = 0; EltNum < SourceLen; EltNum += 2) {
12684 APSInt LHS = SourceLHS.getVectorElt(I: EltNum).getInt();
12685 APSInt RHS = SourceRHS.getVectorElt(I: EltNum).getInt();
12686
12687 switch (E->getBuiltinCallee()) {
12688 case clang::X86::BI__builtin_ia32_pmuludq128:
12689 case clang::X86::BI__builtin_ia32_pmuludq256:
12690 case clang::X86::BI__builtin_ia32_pmuludq512:
12691 ResultElements.push_back(
12692 Elt: APValue(APSInt(llvm::APIntOps::muluExtended(C1: LHS, C2: RHS), true)));
12693 break;
12694 case clang::X86::BI__builtin_ia32_pmuldq128:
12695 case clang::X86::BI__builtin_ia32_pmuldq256:
12696 case clang::X86::BI__builtin_ia32_pmuldq512:
12697 ResultElements.push_back(
12698 Elt: APValue(APSInt(llvm::APIntOps::mulsExtended(C1: LHS, C2: RHS), false)));
12699 break;
12700 }
12701 }
12702
12703 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12704 }
12705
12706 case X86::BI__builtin_ia32_vpmadd52luq128:
12707 case X86::BI__builtin_ia32_vpmadd52luq256:
12708 case X86::BI__builtin_ia32_vpmadd52luq512: {
12709 APValue A, B, C;
12710 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: A) ||
12711 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: B) ||
12712 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: C))
12713 return false;
12714
12715 unsigned ALen = A.getVectorLength();
12716 SmallVector<APValue, 4> ResultElements;
12717 ResultElements.reserve(N: ALen);
12718
12719 for (unsigned EltNum = 0; EltNum < ALen; EltNum += 1) {
12720 APInt AElt = A.getVectorElt(I: EltNum).getInt();
12721 APInt BElt = B.getVectorElt(I: EltNum).getInt().trunc(width: 52);
12722 APInt CElt = C.getVectorElt(I: EltNum).getInt().trunc(width: 52);
12723 APSInt ResElt(AElt + (BElt * CElt).zext(width: 64), false);
12724 ResultElements.push_back(Elt: APValue(ResElt));
12725 }
12726
12727 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12728 }
12729 case X86::BI__builtin_ia32_vpmadd52huq128:
12730 case X86::BI__builtin_ia32_vpmadd52huq256:
12731 case X86::BI__builtin_ia32_vpmadd52huq512: {
12732 APValue A, B, C;
12733 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: A) ||
12734 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: B) ||
12735 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: C))
12736 return false;
12737
12738 unsigned ALen = A.getVectorLength();
12739 SmallVector<APValue, 4> ResultElements;
12740 ResultElements.reserve(N: ALen);
12741
12742 for (unsigned EltNum = 0; EltNum < ALen; EltNum += 1) {
12743 APInt AElt = A.getVectorElt(I: EltNum).getInt();
12744 APInt BElt = B.getVectorElt(I: EltNum).getInt().trunc(width: 52);
12745 APInt CElt = C.getVectorElt(I: EltNum).getInt().trunc(width: 52);
12746 APSInt ResElt(AElt + llvm::APIntOps::mulhu(C1: BElt, C2: CElt).zext(width: 64), false);
12747 ResultElements.push_back(Elt: APValue(ResElt));
12748 }
12749
12750 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12751 }
12752
12753 case clang::X86::BI__builtin_ia32_vprotbi:
12754 case clang::X86::BI__builtin_ia32_vprotdi:
12755 case clang::X86::BI__builtin_ia32_vprotqi:
12756 case clang::X86::BI__builtin_ia32_vprotwi:
12757 case clang::X86::BI__builtin_ia32_prold128:
12758 case clang::X86::BI__builtin_ia32_prold256:
12759 case clang::X86::BI__builtin_ia32_prold512:
12760 case clang::X86::BI__builtin_ia32_prolq128:
12761 case clang::X86::BI__builtin_ia32_prolq256:
12762 case clang::X86::BI__builtin_ia32_prolq512:
12763 return EvaluateBinOpExpr(
12764 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotl(rotateAmt: RHS); });
12765
12766 case clang::X86::BI__builtin_ia32_prord128:
12767 case clang::X86::BI__builtin_ia32_prord256:
12768 case clang::X86::BI__builtin_ia32_prord512:
12769 case clang::X86::BI__builtin_ia32_prorq128:
12770 case clang::X86::BI__builtin_ia32_prorq256:
12771 case clang::X86::BI__builtin_ia32_prorq512:
12772 return EvaluateBinOpExpr(
12773 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotr(rotateAmt: RHS); });
12774
12775 case Builtin::BI__builtin_elementwise_max:
12776 case Builtin::BI__builtin_elementwise_min: {
12777 APValue SourceLHS, SourceRHS;
12778 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) ||
12779 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS))
12780 return false;
12781
12782 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
12783
12784 if (!DestEltTy->isIntegerType())
12785 return false;
12786
12787 unsigned SourceLen = SourceLHS.getVectorLength();
12788 SmallVector<APValue, 4> ResultElements;
12789 ResultElements.reserve(N: SourceLen);
12790
12791 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
12792 APSInt LHS = SourceLHS.getVectorElt(I: EltNum).getInt();
12793 APSInt RHS = SourceRHS.getVectorElt(I: EltNum).getInt();
12794 switch (E->getBuiltinCallee()) {
12795 case Builtin::BI__builtin_elementwise_max:
12796 ResultElements.push_back(
12797 Elt: APValue(APSInt(std::max(a: LHS, b: RHS),
12798 DestEltTy->isUnsignedIntegerOrEnumerationType())));
12799 break;
12800 case Builtin::BI__builtin_elementwise_min:
12801 ResultElements.push_back(
12802 Elt: APValue(APSInt(std::min(a: LHS, b: RHS),
12803 DestEltTy->isUnsignedIntegerOrEnumerationType())));
12804 break;
12805 }
12806 }
12807
12808 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12809 }
12810 case X86::BI__builtin_ia32_vpshldd128:
12811 case X86::BI__builtin_ia32_vpshldd256:
12812 case X86::BI__builtin_ia32_vpshldd512:
12813 case X86::BI__builtin_ia32_vpshldq128:
12814 case X86::BI__builtin_ia32_vpshldq256:
12815 case X86::BI__builtin_ia32_vpshldq512:
12816 case X86::BI__builtin_ia32_vpshldw128:
12817 case X86::BI__builtin_ia32_vpshldw256:
12818 case X86::BI__builtin_ia32_vpshldw512: {
12819 APValue SourceHi, SourceLo, SourceAmt;
12820 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceHi) ||
12821 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceLo) ||
12822 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceAmt))
12823 return false;
12824
12825 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
12826 unsigned SourceLen = SourceHi.getVectorLength();
12827 SmallVector<APValue, 32> ResultElements;
12828 ResultElements.reserve(N: SourceLen);
12829
12830 APInt Amt = SourceAmt.getInt();
12831 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
12832 APInt Hi = SourceHi.getVectorElt(I: EltNum).getInt();
12833 APInt Lo = SourceLo.getVectorElt(I: EltNum).getInt();
12834 APInt R = llvm::APIntOps::fshl(Hi, Lo, Shift: Amt);
12835 ResultElements.push_back(
12836 Elt: APValue(APSInt(R, DestEltTy->isUnsignedIntegerOrEnumerationType())));
12837 }
12838
12839 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12840 }
12841 case X86::BI__builtin_ia32_vpshrdd128:
12842 case X86::BI__builtin_ia32_vpshrdd256:
12843 case X86::BI__builtin_ia32_vpshrdd512:
12844 case X86::BI__builtin_ia32_vpshrdq128:
12845 case X86::BI__builtin_ia32_vpshrdq256:
12846 case X86::BI__builtin_ia32_vpshrdq512:
12847 case X86::BI__builtin_ia32_vpshrdw128:
12848 case X86::BI__builtin_ia32_vpshrdw256:
12849 case X86::BI__builtin_ia32_vpshrdw512: {
12850 // NOTE: Reversed Hi/Lo operands.
12851 APValue SourceHi, SourceLo, SourceAmt;
12852 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLo) ||
12853 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceHi) ||
12854 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceAmt))
12855 return false;
12856
12857 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
12858 unsigned SourceLen = SourceHi.getVectorLength();
12859 SmallVector<APValue, 32> ResultElements;
12860 ResultElements.reserve(N: SourceLen);
12861
12862 APInt Amt = SourceAmt.getInt();
12863 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
12864 APInt Hi = SourceHi.getVectorElt(I: EltNum).getInt();
12865 APInt Lo = SourceLo.getVectorElt(I: EltNum).getInt();
12866 APInt R = llvm::APIntOps::fshr(Hi, Lo, Shift: Amt);
12867 ResultElements.push_back(
12868 Elt: APValue(APSInt(R, DestEltTy->isUnsignedIntegerOrEnumerationType())));
12869 }
12870
12871 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12872 }
12873 case X86::BI__builtin_ia32_vpconflictsi_128:
12874 case X86::BI__builtin_ia32_vpconflictsi_256:
12875 case X86::BI__builtin_ia32_vpconflictsi_512:
12876 case X86::BI__builtin_ia32_vpconflictdi_128:
12877 case X86::BI__builtin_ia32_vpconflictdi_256:
12878 case X86::BI__builtin_ia32_vpconflictdi_512: {
12879 APValue Source;
12880
12881 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: Source))
12882 return false;
12883
12884 unsigned SourceLen = Source.getVectorLength();
12885 SmallVector<APValue, 32> ResultElements;
12886 ResultElements.reserve(N: SourceLen);
12887
12888 const auto *VecT = E->getType()->castAs<VectorType>();
12889 bool DestUnsigned =
12890 VecT->getElementType()->isUnsignedIntegerOrEnumerationType();
12891
12892 for (unsigned I = 0; I != SourceLen; ++I) {
12893 const APValue &EltI = Source.getVectorElt(I);
12894
12895 APInt ConflictMask(EltI.getInt().getBitWidth(), 0);
12896 for (unsigned J = 0; J != I; ++J) {
12897 const APValue &EltJ = Source.getVectorElt(I: J);
12898 ConflictMask.setBitVal(BitPosition: J, BitValue: EltI.getInt() == EltJ.getInt());
12899 }
12900 ResultElements.push_back(Elt: APValue(APSInt(ConflictMask, DestUnsigned)));
12901 }
12902 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12903 }
12904 case X86::BI__builtin_ia32_blendpd:
12905 case X86::BI__builtin_ia32_blendpd256:
12906 case X86::BI__builtin_ia32_blendps:
12907 case X86::BI__builtin_ia32_blendps256:
12908 case X86::BI__builtin_ia32_pblendw128:
12909 case X86::BI__builtin_ia32_pblendw256:
12910 case X86::BI__builtin_ia32_pblendd128:
12911 case X86::BI__builtin_ia32_pblendd256: {
12912 APValue SourceF, SourceT, SourceC;
12913 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceF) ||
12914 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceT) ||
12915 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceC))
12916 return false;
12917
12918 const APInt &C = SourceC.getInt();
12919 unsigned SourceLen = SourceF.getVectorLength();
12920 SmallVector<APValue, 32> ResultElements;
12921 ResultElements.reserve(N: SourceLen);
12922 for (unsigned EltNum = 0; EltNum != SourceLen; ++EltNum) {
12923 const APValue &F = SourceF.getVectorElt(I: EltNum);
12924 const APValue &T = SourceT.getVectorElt(I: EltNum);
12925 ResultElements.push_back(Elt: C[EltNum % 8] ? T : F);
12926 }
12927
12928 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12929 }
12930
12931 case X86::BI__builtin_ia32_psignb128:
12932 case X86::BI__builtin_ia32_psignb256:
12933 case X86::BI__builtin_ia32_psignw128:
12934 case X86::BI__builtin_ia32_psignw256:
12935 case X86::BI__builtin_ia32_psignd128:
12936 case X86::BI__builtin_ia32_psignd256:
12937 return EvaluateBinOpExpr([](const APInt &AElem, const APInt &BElem) {
12938 if (BElem.isZero())
12939 return APInt::getZero(numBits: AElem.getBitWidth());
12940 if (BElem.isNegative())
12941 return -AElem;
12942 return AElem;
12943 });
12944
12945 case X86::BI__builtin_ia32_blendvpd:
12946 case X86::BI__builtin_ia32_blendvpd256:
12947 case X86::BI__builtin_ia32_blendvps:
12948 case X86::BI__builtin_ia32_blendvps256:
12949 case X86::BI__builtin_ia32_pblendvb128:
12950 case X86::BI__builtin_ia32_pblendvb256: {
12951 // SSE blendv by mask signbit: "Result = C[] < 0 ? T[] : F[]".
12952 APValue SourceF, SourceT, SourceC;
12953 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceF) ||
12954 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceT) ||
12955 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceC))
12956 return false;
12957
12958 unsigned SourceLen = SourceF.getVectorLength();
12959 SmallVector<APValue, 32> ResultElements;
12960 ResultElements.reserve(N: SourceLen);
12961
12962 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
12963 const APValue &F = SourceF.getVectorElt(I: EltNum);
12964 const APValue &T = SourceT.getVectorElt(I: EltNum);
12965 const APValue &C = SourceC.getVectorElt(I: EltNum);
12966 APInt M = C.isInt() ? (APInt)C.getInt() : C.getFloat().bitcastToAPInt();
12967 ResultElements.push_back(Elt: M.isNegative() ? T : F);
12968 }
12969
12970 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12971 }
12972 case X86::BI__builtin_ia32_selectb_128:
12973 case X86::BI__builtin_ia32_selectb_256:
12974 case X86::BI__builtin_ia32_selectb_512:
12975 case X86::BI__builtin_ia32_selectw_128:
12976 case X86::BI__builtin_ia32_selectw_256:
12977 case X86::BI__builtin_ia32_selectw_512:
12978 case X86::BI__builtin_ia32_selectd_128:
12979 case X86::BI__builtin_ia32_selectd_256:
12980 case X86::BI__builtin_ia32_selectd_512:
12981 case X86::BI__builtin_ia32_selectq_128:
12982 case X86::BI__builtin_ia32_selectq_256:
12983 case X86::BI__builtin_ia32_selectq_512:
12984 case X86::BI__builtin_ia32_selectph_128:
12985 case X86::BI__builtin_ia32_selectph_256:
12986 case X86::BI__builtin_ia32_selectph_512:
12987 case X86::BI__builtin_ia32_selectpbf_128:
12988 case X86::BI__builtin_ia32_selectpbf_256:
12989 case X86::BI__builtin_ia32_selectpbf_512:
12990 case X86::BI__builtin_ia32_selectps_128:
12991 case X86::BI__builtin_ia32_selectps_256:
12992 case X86::BI__builtin_ia32_selectps_512:
12993 case X86::BI__builtin_ia32_selectpd_128:
12994 case X86::BI__builtin_ia32_selectpd_256:
12995 case X86::BI__builtin_ia32_selectpd_512: {
12996 // AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
12997 APValue SourceMask, SourceLHS, SourceRHS;
12998 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceMask) ||
12999 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceLHS) ||
13000 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceRHS))
13001 return false;
13002
13003 APSInt Mask = SourceMask.getInt();
13004 unsigned SourceLen = SourceLHS.getVectorLength();
13005 SmallVector<APValue, 4> ResultElements;
13006 ResultElements.reserve(N: SourceLen);
13007
13008 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
13009 const APValue &LHS = SourceLHS.getVectorElt(I: EltNum);
13010 const APValue &RHS = SourceRHS.getVectorElt(I: EltNum);
13011 ResultElements.push_back(Elt: Mask[EltNum] ? LHS : RHS);
13012 }
13013
13014 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
13015 }
13016
13017 case X86::BI__builtin_ia32_cvtsd2ss: {
13018 APValue VecA, VecB;
13019 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: VecA) ||
13020 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: VecB))
13021 return false;
13022
13023 SmallVector<APValue, 4> Elements;
13024
13025 APValue ResultVal;
13026 if (!ConvertDoubleToFloatStrict(Info, E, OrigVal: VecB.getVectorElt(I: 0).getFloat(),
13027 Result&: ResultVal))
13028 return false;
13029
13030 Elements.push_back(Elt: ResultVal);
13031
13032 unsigned NumEltsA = VecA.getVectorLength();
13033 for (unsigned I = 1; I < NumEltsA; ++I) {
13034 Elements.push_back(Elt: VecA.getVectorElt(I));
13035 }
13036
13037 return Success(V: Elements, E);
13038 }
13039 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: {
13040 APValue VecA, VecB, VecSrc, MaskValue;
13041
13042 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: VecA) ||
13043 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: VecB) ||
13044 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: VecSrc) ||
13045 !EvaluateAsRValue(Info, E: E->getArg(Arg: 3), Result&: MaskValue))
13046 return false;
13047
13048 unsigned Mask = MaskValue.getInt().getZExtValue();
13049 SmallVector<APValue, 4> Elements;
13050
13051 if (Mask & 1) {
13052 APValue ResultVal;
13053 if (!ConvertDoubleToFloatStrict(Info, E, OrigVal: VecB.getVectorElt(I: 0).getFloat(),
13054 Result&: ResultVal))
13055 return false;
13056 Elements.push_back(Elt: ResultVal);
13057 } else {
13058 Elements.push_back(Elt: VecSrc.getVectorElt(I: 0));
13059 }
13060
13061 unsigned NumEltsA = VecA.getVectorLength();
13062 for (unsigned I = 1; I < NumEltsA; ++I) {
13063 Elements.push_back(Elt: VecA.getVectorElt(I));
13064 }
13065
13066 return Success(V: Elements, E);
13067 }
13068 case X86::BI__builtin_ia32_cvtpd2ps:
13069 case X86::BI__builtin_ia32_cvtpd2ps256:
13070 case X86::BI__builtin_ia32_cvtpd2ps_mask:
13071 case X86::BI__builtin_ia32_cvtpd2ps512_mask: {
13072
13073 const auto BuiltinID = E->getBuiltinCallee();
13074 bool IsMasked = (BuiltinID == X86::BI__builtin_ia32_cvtpd2ps_mask ||
13075 BuiltinID == X86::BI__builtin_ia32_cvtpd2ps512_mask);
13076
13077 APValue InputValue;
13078 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: InputValue))
13079 return false;
13080
13081 APValue MergeValue;
13082 unsigned Mask = 0xFFFFFFFF;
13083 bool NeedsMerge = false;
13084 if (IsMasked) {
13085 APValue MaskValue;
13086 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: MaskValue))
13087 return false;
13088 Mask = MaskValue.getInt().getZExtValue();
13089 auto NumEltsResult = E->getType()->getAs<VectorType>()->getNumElements();
13090 for (unsigned I = 0; I < NumEltsResult; ++I) {
13091 if (!((Mask >> I) & 1)) {
13092 NeedsMerge = true;
13093 break;
13094 }
13095 }
13096 if (NeedsMerge) {
13097 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: MergeValue))
13098 return false;
13099 }
13100 }
13101
13102 unsigned NumEltsResult =
13103 E->getType()->getAs<VectorType>()->getNumElements();
13104 unsigned NumEltsInput = InputValue.getVectorLength();
13105 SmallVector<APValue, 8> Elements;
13106 for (unsigned I = 0; I < NumEltsResult; ++I) {
13107 if (IsMasked && !((Mask >> I) & 1)) {
13108 if (!NeedsMerge) {
13109 return false;
13110 }
13111 Elements.push_back(Elt: MergeValue.getVectorElt(I));
13112 continue;
13113 }
13114
13115 if (I >= NumEltsInput) {
13116 Elements.push_back(Elt: APValue(APFloat::getZero(Sem: APFloat::IEEEsingle())));
13117 continue;
13118 }
13119
13120 APValue ResultVal;
13121 if (!ConvertDoubleToFloatStrict(
13122 Info, E, OrigVal: InputValue.getVectorElt(I).getFloat(), Result&: ResultVal))
13123 return false;
13124
13125 Elements.push_back(Elt: ResultVal);
13126 }
13127 return Success(V: Elements, E);
13128 }
13129
13130 case X86::BI__builtin_ia32_shufps:
13131 case X86::BI__builtin_ia32_shufps256:
13132 case X86::BI__builtin_ia32_shufps512: {
13133 APValue R;
13134 if (!evalShuffleGeneric(
13135 Info, Call: E, Out&: R,
13136 GetSourceIndex: [](unsigned DstIdx,
13137 unsigned ShuffleMask) -> std::pair<unsigned, int> {
13138 constexpr unsigned LaneBits = 128u;
13139 unsigned NumElemPerLane = LaneBits / 32;
13140 unsigned NumSelectableElems = NumElemPerLane / 2;
13141 unsigned BitsPerElem = 2;
13142 unsigned IndexMask = (1u << BitsPerElem) - 1;
13143 unsigned MaskBits = 8;
13144 unsigned Lane = DstIdx / NumElemPerLane;
13145 unsigned ElemInLane = DstIdx % NumElemPerLane;
13146 unsigned LaneOffset = Lane * NumElemPerLane;
13147 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
13148 unsigned SrcIdx = (ElemInLane < NumSelectableElems) ? 0 : 1;
13149 unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
13150 return {SrcIdx, static_cast<int>(LaneOffset + Index)};
13151 }))
13152 return false;
13153 return Success(V: R, E);
13154 }
13155 case X86::BI__builtin_ia32_shufpd:
13156 case X86::BI__builtin_ia32_shufpd256:
13157 case X86::BI__builtin_ia32_shufpd512: {
13158 APValue R;
13159 if (!evalShuffleGeneric(
13160 Info, Call: E, Out&: R,
13161 GetSourceIndex: [](unsigned DstIdx,
13162 unsigned ShuffleMask) -> std::pair<unsigned, int> {
13163 constexpr unsigned LaneBits = 128u;
13164 unsigned NumElemPerLane = LaneBits / 64;
13165 unsigned NumSelectableElems = NumElemPerLane / 2;
13166 unsigned BitsPerElem = 1;
13167 unsigned IndexMask = (1u << BitsPerElem) - 1;
13168 unsigned MaskBits = 8;
13169 unsigned Lane = DstIdx / NumElemPerLane;
13170 unsigned ElemInLane = DstIdx % NumElemPerLane;
13171 unsigned LaneOffset = Lane * NumElemPerLane;
13172 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
13173 unsigned SrcIdx = (ElemInLane < NumSelectableElems) ? 0 : 1;
13174 unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
13175 return {SrcIdx, static_cast<int>(LaneOffset + Index)};
13176 }))
13177 return false;
13178 return Success(V: R, E);
13179 }
13180 case X86::BI__builtin_ia32_insertps128: {
13181 APValue R;
13182 if (!evalShuffleGeneric(
13183 Info, Call: E, Out&: R,
13184 GetSourceIndex: [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
13185 // Bits [3:0]: zero mask - if bit is set, zero this element
13186 if ((Mask & (1 << DstIdx)) != 0) {
13187 return {0, -1};
13188 }
13189 // Bits [7:6]: select element from source vector Y (0-3)
13190 // Bits [5:4]: select destination position (0-3)
13191 unsigned SrcElem = (Mask >> 6) & 0x3;
13192 unsigned DstElem = (Mask >> 4) & 0x3;
13193 if (DstIdx == DstElem) {
13194 // Insert element from source vector (B) at this position
13195 return {1, static_cast<int>(SrcElem)};
13196 } else {
13197 // Copy from destination vector (A)
13198 return {0, static_cast<int>(DstIdx)};
13199 }
13200 }))
13201 return false;
13202 return Success(V: R, E);
13203 }
13204 case X86::BI__builtin_ia32_pshufb128:
13205 case X86::BI__builtin_ia32_pshufb256:
13206 case X86::BI__builtin_ia32_pshufb512: {
13207 APValue R;
13208 if (!evalShuffleGeneric(
13209 Info, Call: E, Out&: R,
13210 GetSourceIndex: [](unsigned DstIdx,
13211 unsigned ShuffleMask) -> std::pair<unsigned, int> {
13212 uint8_t Ctlb = static_cast<uint8_t>(ShuffleMask);
13213 if (Ctlb & 0x80)
13214 return std::make_pair(x: 0, y: -1);
13215
13216 unsigned LaneBase = (DstIdx / 16) * 16;
13217 unsigned SrcOffset = Ctlb & 0x0F;
13218 unsigned SrcIdx = LaneBase + SrcOffset;
13219 return std::make_pair(x: 0, y: static_cast<int>(SrcIdx));
13220 }))
13221 return false;
13222 return Success(V: R, E);
13223 }
13224
13225 case X86::BI__builtin_ia32_pshuflw:
13226 case X86::BI__builtin_ia32_pshuflw256:
13227 case X86::BI__builtin_ia32_pshuflw512: {
13228 APValue R;
13229 if (!evalShuffleGeneric(
13230 Info, Call: E, Out&: R,
13231 GetSourceIndex: [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
13232 constexpr unsigned LaneBits = 128u;
13233 constexpr unsigned ElemBits = 16u;
13234 constexpr unsigned LaneElts = LaneBits / ElemBits;
13235 constexpr unsigned HalfSize = 4;
13236 unsigned LaneBase = (DstIdx / LaneElts) * LaneElts;
13237 unsigned LaneIdx = DstIdx % LaneElts;
13238 if (LaneIdx < HalfSize) {
13239 unsigned Sel = (Mask >> (2 * LaneIdx)) & 0x3;
13240 return std::make_pair(x: 0, y: static_cast<int>(LaneBase + Sel));
13241 }
13242 return std::make_pair(x: 0, y: static_cast<int>(DstIdx));
13243 }))
13244 return false;
13245 return Success(V: R, E);
13246 }
13247
13248 case X86::BI__builtin_ia32_pshufhw:
13249 case X86::BI__builtin_ia32_pshufhw256:
13250 case X86::BI__builtin_ia32_pshufhw512: {
13251 APValue R;
13252 if (!evalShuffleGeneric(
13253 Info, Call: E, Out&: R,
13254 GetSourceIndex: [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
13255 constexpr unsigned LaneBits = 128u;
13256 constexpr unsigned ElemBits = 16u;
13257 constexpr unsigned LaneElts = LaneBits / ElemBits;
13258 constexpr unsigned HalfSize = 4;
13259 unsigned LaneBase = (DstIdx / LaneElts) * LaneElts;
13260 unsigned LaneIdx = DstIdx % LaneElts;
13261 if (LaneIdx >= HalfSize) {
13262 unsigned Rel = LaneIdx - HalfSize;
13263 unsigned Sel = (Mask >> (2 * Rel)) & 0x3;
13264 return std::make_pair(
13265 x: 0, y: static_cast<int>(LaneBase + HalfSize + Sel));
13266 }
13267 return std::make_pair(x: 0, y: static_cast<int>(DstIdx));
13268 }))
13269 return false;
13270 return Success(V: R, E);
13271 }
13272
13273 case X86::BI__builtin_ia32_pshufd:
13274 case X86::BI__builtin_ia32_pshufd256:
13275 case X86::BI__builtin_ia32_pshufd512:
13276 case X86::BI__builtin_ia32_vpermilps:
13277 case X86::BI__builtin_ia32_vpermilps256:
13278 case X86::BI__builtin_ia32_vpermilps512: {
13279 APValue R;
13280 if (!evalShuffleGeneric(
13281 Info, Call: E, Out&: R,
13282 GetSourceIndex: [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
13283 constexpr unsigned LaneBits = 128u;
13284 constexpr unsigned ElemBits = 32u;
13285 constexpr unsigned LaneElts = LaneBits / ElemBits;
13286 unsigned LaneBase = (DstIdx / LaneElts) * LaneElts;
13287 unsigned LaneIdx = DstIdx % LaneElts;
13288 unsigned Sel = (Mask >> (2 * LaneIdx)) & 0x3;
13289 return std::make_pair(x: 0, y: static_cast<int>(LaneBase + Sel));
13290 }))
13291 return false;
13292 return Success(V: R, E);
13293 }
13294
13295 case X86::BI__builtin_ia32_vpermilvarpd:
13296 case X86::BI__builtin_ia32_vpermilvarpd256:
13297 case X86::BI__builtin_ia32_vpermilvarpd512: {
13298 APValue R;
13299 if (!evalShuffleGeneric(
13300 Info, Call: E, Out&: R,
13301 GetSourceIndex: [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
13302 unsigned NumElemPerLane = 2;
13303 unsigned Lane = DstIdx / NumElemPerLane;
13304 unsigned Offset = Mask & 0b10 ? 1 : 0;
13305 return std::make_pair(
13306 x: 0, y: static_cast<int>(Lane * NumElemPerLane + Offset));
13307 }))
13308 return false;
13309 return Success(V: R, E);
13310 }
13311
13312 case X86::BI__builtin_ia32_vpermilpd:
13313 case X86::BI__builtin_ia32_vpermilpd256:
13314 case X86::BI__builtin_ia32_vpermilpd512: {
13315 APValue R;
13316 if (!evalShuffleGeneric(Info, Call: E, Out&: R, GetSourceIndex: [](unsigned DstIdx, unsigned Control) {
13317 unsigned NumElemPerLane = 2;
13318 unsigned BitsPerElem = 1;
13319 unsigned MaskBits = 8;
13320 unsigned IndexMask = 0x1;
13321 unsigned Lane = DstIdx / NumElemPerLane;
13322 unsigned LaneOffset = Lane * NumElemPerLane;
13323 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
13324 unsigned Index = (Control >> BitIndex) & IndexMask;
13325 return std::make_pair(x: 0, y: static_cast<int>(LaneOffset + Index));
13326 }))
13327 return false;
13328 return Success(V: R, E);
13329 }
13330
13331 case X86::BI__builtin_ia32_permdf256:
13332 case X86::BI__builtin_ia32_permdi256: {
13333 APValue R;
13334 if (!evalShuffleGeneric(Info, Call: E, Out&: R, GetSourceIndex: [](unsigned DstIdx, unsigned Control) {
13335 // permute4x64 operates on 4 64-bit elements
13336 // For element i (0-3), extract bits [2*i+1:2*i] from Control
13337 unsigned Index = (Control >> (2 * DstIdx)) & 0x3;
13338 return std::make_pair(x: 0, y: static_cast<int>(Index));
13339 }))
13340 return false;
13341 return Success(V: R, E);
13342 }
13343
13344 case X86::BI__builtin_ia32_vpermilvarps:
13345 case X86::BI__builtin_ia32_vpermilvarps256:
13346 case X86::BI__builtin_ia32_vpermilvarps512: {
13347 APValue R;
13348 if (!evalShuffleGeneric(
13349 Info, Call: E, Out&: R,
13350 GetSourceIndex: [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
13351 unsigned NumElemPerLane = 4;
13352 unsigned Lane = DstIdx / NumElemPerLane;
13353 unsigned Offset = Mask & 0b11;
13354 return std::make_pair(
13355 x: 0, y: static_cast<int>(Lane * NumElemPerLane + Offset));
13356 }))
13357 return false;
13358 return Success(V: R, E);
13359 }
13360
13361 case X86::BI__builtin_ia32_vpmultishiftqb128:
13362 case X86::BI__builtin_ia32_vpmultishiftqb256:
13363 case X86::BI__builtin_ia32_vpmultishiftqb512: {
13364 assert(E->getNumArgs() == 2);
13365
13366 APValue A, B;
13367 if (!Evaluate(Result&: A, Info, E: E->getArg(Arg: 0)) || !Evaluate(Result&: B, Info, E: E->getArg(Arg: 1)))
13368 return false;
13369
13370 assert(A.getVectorLength() == B.getVectorLength());
13371 unsigned NumBytesInQWord = 8;
13372 unsigned NumBitsInByte = 8;
13373 unsigned NumBytes = A.getVectorLength();
13374 unsigned NumQWords = NumBytes / NumBytesInQWord;
13375 SmallVector<APValue, 64> Result;
13376 Result.reserve(N: NumBytes);
13377
13378 for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) {
13379 APInt BQWord(64, 0);
13380 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
13381 unsigned Idx = QWordId * NumBytesInQWord + ByteIdx;
13382 uint64_t Byte = B.getVectorElt(I: Idx).getInt().getZExtValue();
13383 BQWord.insertBits(SubBits: APInt(8, Byte & 0xFF), bitPosition: ByteIdx * NumBitsInByte);
13384 }
13385
13386 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
13387 unsigned Idx = QWordId * NumBytesInQWord + ByteIdx;
13388 uint64_t Ctrl = A.getVectorElt(I: Idx).getInt().getZExtValue() & 0x3F;
13389
13390 APInt Byte(8, 0);
13391 for (unsigned BitIdx = 0; BitIdx != NumBitsInByte; ++BitIdx) {
13392 Byte.setBitVal(BitPosition: BitIdx, BitValue: BQWord[(Ctrl + BitIdx) & 0x3F]);
13393 }
13394 Result.push_back(Elt: APValue(APSInt(Byte, /*isUnsigned*/ true)));
13395 }
13396 }
13397 return Success(V: APValue(Result.data(), Result.size()), E);
13398 }
13399
13400 case X86::BI__builtin_ia32_phminposuw128: {
13401 APValue Source;
13402 if (!Evaluate(Result&: Source, Info, E: E->getArg(Arg: 0)))
13403 return false;
13404 unsigned SourceLen = Source.getVectorLength();
13405 const VectorType *VT = E->getArg(Arg: 0)->getType()->castAs<VectorType>();
13406 QualType ElemQT = VT->getElementType();
13407 unsigned ElemBitWidth = Info.Ctx.getTypeSize(T: ElemQT);
13408
13409 APInt MinIndex(ElemBitWidth, 0);
13410 APInt MinVal = Source.getVectorElt(I: 0).getInt();
13411 for (unsigned I = 1; I != SourceLen; ++I) {
13412 APInt Val = Source.getVectorElt(I).getInt();
13413 if (MinVal.ugt(RHS: Val)) {
13414 MinVal = Val;
13415 MinIndex = I;
13416 }
13417 }
13418
13419 bool ResultUnsigned = E->getCallReturnType(Ctx: Info.Ctx)
13420 ->castAs<VectorType>()
13421 ->getElementType()
13422 ->isUnsignedIntegerOrEnumerationType();
13423
13424 SmallVector<APValue, 8> Result;
13425 Result.reserve(N: SourceLen);
13426 Result.emplace_back(Args: APSInt(MinVal, ResultUnsigned));
13427 Result.emplace_back(Args: APSInt(MinIndex, ResultUnsigned));
13428 for (unsigned I = 0; I != SourceLen - 2; ++I) {
13429 Result.emplace_back(Args: APSInt(APInt(ElemBitWidth, 0), ResultUnsigned));
13430 }
13431 return Success(V: APValue(Result.data(), Result.size()), E);
13432 }
13433
13434 case X86::BI__builtin_ia32_psraq128:
13435 case X86::BI__builtin_ia32_psraq256:
13436 case X86::BI__builtin_ia32_psraq512:
13437 case X86::BI__builtin_ia32_psrad128:
13438 case X86::BI__builtin_ia32_psrad256:
13439 case X86::BI__builtin_ia32_psrad512:
13440 case X86::BI__builtin_ia32_psraw128:
13441 case X86::BI__builtin_ia32_psraw256:
13442 case X86::BI__builtin_ia32_psraw512: {
13443 APValue R;
13444 if (!evalShiftWithCount(
13445 Info, Call: E, Out&: R,
13446 ShiftOp: [](const APInt &Elt, uint64_t Count) { return Elt.ashr(ShiftAmt: Count); },
13447 OverflowOp: [](const APInt &Elt, unsigned Width) {
13448 return Elt.ashr(ShiftAmt: Width - 1);
13449 }))
13450 return false;
13451 return Success(V: R, E);
13452 }
13453
13454 case X86::BI__builtin_ia32_psllq128:
13455 case X86::BI__builtin_ia32_psllq256:
13456 case X86::BI__builtin_ia32_psllq512:
13457 case X86::BI__builtin_ia32_pslld128:
13458 case X86::BI__builtin_ia32_pslld256:
13459 case X86::BI__builtin_ia32_pslld512:
13460 case X86::BI__builtin_ia32_psllw128:
13461 case X86::BI__builtin_ia32_psllw256:
13462 case X86::BI__builtin_ia32_psllw512: {
13463 APValue R;
13464 if (!evalShiftWithCount(
13465 Info, Call: E, Out&: R,
13466 ShiftOp: [](const APInt &Elt, uint64_t Count) { return Elt.shl(shiftAmt: Count); },
13467 OverflowOp: [](const APInt &Elt, unsigned Width) {
13468 return APInt::getZero(numBits: Width);
13469 }))
13470 return false;
13471 return Success(V: R, E);
13472 }
13473
13474 case X86::BI__builtin_ia32_psrlq128:
13475 case X86::BI__builtin_ia32_psrlq256:
13476 case X86::BI__builtin_ia32_psrlq512:
13477 case X86::BI__builtin_ia32_psrld128:
13478 case X86::BI__builtin_ia32_psrld256:
13479 case X86::BI__builtin_ia32_psrld512:
13480 case X86::BI__builtin_ia32_psrlw128:
13481 case X86::BI__builtin_ia32_psrlw256:
13482 case X86::BI__builtin_ia32_psrlw512: {
13483 APValue R;
13484 if (!evalShiftWithCount(
13485 Info, Call: E, Out&: R,
13486 ShiftOp: [](const APInt &Elt, uint64_t Count) { return Elt.lshr(shiftAmt: Count); },
13487 OverflowOp: [](const APInt &Elt, unsigned Width) {
13488 return APInt::getZero(numBits: Width);
13489 }))
13490 return false;
13491 return Success(V: R, E);
13492 }
13493
13494 case X86::BI__builtin_ia32_pternlogd128_mask:
13495 case X86::BI__builtin_ia32_pternlogd256_mask:
13496 case X86::BI__builtin_ia32_pternlogd512_mask:
13497 case X86::BI__builtin_ia32_pternlogq128_mask:
13498 case X86::BI__builtin_ia32_pternlogq256_mask:
13499 case X86::BI__builtin_ia32_pternlogq512_mask: {
13500 APValue AValue, BValue, CValue, ImmValue, UValue;
13501 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: AValue) ||
13502 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: BValue) ||
13503 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: CValue) ||
13504 !EvaluateAsRValue(Info, E: E->getArg(Arg: 3), Result&: ImmValue) ||
13505 !EvaluateAsRValue(Info, E: E->getArg(Arg: 4), Result&: UValue))
13506 return false;
13507
13508 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
13509 bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType();
13510 APInt Imm = ImmValue.getInt();
13511 APInt U = UValue.getInt();
13512 unsigned ResultLen = AValue.getVectorLength();
13513 SmallVector<APValue, 16> ResultElements;
13514 ResultElements.reserve(N: ResultLen);
13515
13516 for (unsigned EltNum = 0; EltNum < ResultLen; ++EltNum) {
13517 APInt ALane = AValue.getVectorElt(I: EltNum).getInt();
13518 APInt BLane = BValue.getVectorElt(I: EltNum).getInt();
13519 APInt CLane = CValue.getVectorElt(I: EltNum).getInt();
13520
13521 if (U[EltNum]) {
13522 unsigned BitWidth = ALane.getBitWidth();
13523 APInt ResLane(BitWidth, 0);
13524
13525 for (unsigned Bit = 0; Bit < BitWidth; ++Bit) {
13526 unsigned ABit = ALane[Bit];
13527 unsigned BBit = BLane[Bit];
13528 unsigned CBit = CLane[Bit];
13529
13530 unsigned Idx = (ABit << 2) | (BBit << 1) | CBit;
13531 ResLane.setBitVal(BitPosition: Bit, BitValue: Imm[Idx]);
13532 }
13533 ResultElements.push_back(Elt: APValue(APSInt(ResLane, DestUnsigned)));
13534 } else {
13535 ResultElements.push_back(Elt: APValue(APSInt(ALane, DestUnsigned)));
13536 }
13537 }
13538 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
13539 }
13540 case X86::BI__builtin_ia32_pternlogd128_maskz:
13541 case X86::BI__builtin_ia32_pternlogd256_maskz:
13542 case X86::BI__builtin_ia32_pternlogd512_maskz:
13543 case X86::BI__builtin_ia32_pternlogq128_maskz:
13544 case X86::BI__builtin_ia32_pternlogq256_maskz:
13545 case X86::BI__builtin_ia32_pternlogq512_maskz: {
13546 APValue AValue, BValue, CValue, ImmValue, UValue;
13547 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: AValue) ||
13548 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: BValue) ||
13549 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: CValue) ||
13550 !EvaluateAsRValue(Info, E: E->getArg(Arg: 3), Result&: ImmValue) ||
13551 !EvaluateAsRValue(Info, E: E->getArg(Arg: 4), Result&: UValue))
13552 return false;
13553
13554 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
13555 bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType();
13556 APInt Imm = ImmValue.getInt();
13557 APInt U = UValue.getInt();
13558 unsigned ResultLen = AValue.getVectorLength();
13559 SmallVector<APValue, 16> ResultElements;
13560 ResultElements.reserve(N: ResultLen);
13561
13562 for (unsigned EltNum = 0; EltNum < ResultLen; ++EltNum) {
13563 APInt ALane = AValue.getVectorElt(I: EltNum).getInt();
13564 APInt BLane = BValue.getVectorElt(I: EltNum).getInt();
13565 APInt CLane = CValue.getVectorElt(I: EltNum).getInt();
13566
13567 unsigned BitWidth = ALane.getBitWidth();
13568 APInt ResLane(BitWidth, 0);
13569
13570 if (U[EltNum]) {
13571 for (unsigned Bit = 0; Bit < BitWidth; ++Bit) {
13572 unsigned ABit = ALane[Bit];
13573 unsigned BBit = BLane[Bit];
13574 unsigned CBit = CLane[Bit];
13575
13576 unsigned Idx = (ABit << 2) | (BBit << 1) | CBit;
13577 ResLane.setBitVal(BitPosition: Bit, BitValue: Imm[Idx]);
13578 }
13579 }
13580 ResultElements.push_back(Elt: APValue(APSInt(ResLane, DestUnsigned)));
13581 }
13582 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
13583 }
13584
13585 case Builtin::BI__builtin_elementwise_clzg:
13586 case Builtin::BI__builtin_elementwise_ctzg: {
13587 APValue SourceLHS;
13588 std::optional<APValue> Fallback;
13589 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS))
13590 return false;
13591 if (E->getNumArgs() > 1) {
13592 APValue FallbackTmp;
13593 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: FallbackTmp))
13594 return false;
13595 Fallback = FallbackTmp;
13596 }
13597
13598 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
13599 unsigned SourceLen = SourceLHS.getVectorLength();
13600 SmallVector<APValue, 4> ResultElements;
13601 ResultElements.reserve(N: SourceLen);
13602
13603 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
13604 APSInt LHS = SourceLHS.getVectorElt(I: EltNum).getInt();
13605 if (!LHS) {
13606 // Without a fallback, a zero element is undefined
13607 if (!Fallback) {
13608 Info.FFDiag(E, DiagId: diag::note_constexpr_countzeroes_zero)
13609 << /*IsTrailing=*/(E->getBuiltinCallee() ==
13610 Builtin::BI__builtin_elementwise_ctzg);
13611 return false;
13612 }
13613 ResultElements.push_back(Elt: Fallback->getVectorElt(I: EltNum));
13614 continue;
13615 }
13616 switch (E->getBuiltinCallee()) {
13617 case Builtin::BI__builtin_elementwise_clzg:
13618 ResultElements.push_back(Elt: APValue(
13619 APSInt(APInt(Info.Ctx.getIntWidth(T: DestEltTy), LHS.countl_zero()),
13620 DestEltTy->isUnsignedIntegerOrEnumerationType())));
13621 break;
13622 case Builtin::BI__builtin_elementwise_ctzg:
13623 ResultElements.push_back(Elt: APValue(
13624 APSInt(APInt(Info.Ctx.getIntWidth(T: DestEltTy), LHS.countr_zero()),
13625 DestEltTy->isUnsignedIntegerOrEnumerationType())));
13626 break;
13627 }
13628 }
13629
13630 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
13631 }
13632
13633 case Builtin::BI__builtin_elementwise_fma: {
13634 APValue SourceX, SourceY, SourceZ;
13635 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceX) ||
13636 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceY) ||
13637 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceZ))
13638 return false;
13639
13640 unsigned SourceLen = SourceX.getVectorLength();
13641 SmallVector<APValue> ResultElements;
13642 ResultElements.reserve(N: SourceLen);
13643 llvm::RoundingMode RM = getActiveRoundingMode(Info&: getEvalInfo(), E);
13644 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
13645 const APFloat &X = SourceX.getVectorElt(I: EltNum).getFloat();
13646 const APFloat &Y = SourceY.getVectorElt(I: EltNum).getFloat();
13647 const APFloat &Z = SourceZ.getVectorElt(I: EltNum).getFloat();
13648 APFloat Result(X);
13649 (void)Result.fusedMultiplyAdd(Multiplicand: Y, Addend: Z, RM);
13650 ResultElements.push_back(Elt: APValue(Result));
13651 }
13652 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
13653 }
13654
13655 case clang::X86::BI__builtin_ia32_phaddw128:
13656 case clang::X86::BI__builtin_ia32_phaddw256:
13657 case clang::X86::BI__builtin_ia32_phaddd128:
13658 case clang::X86::BI__builtin_ia32_phaddd256:
13659 case clang::X86::BI__builtin_ia32_phaddsw128:
13660 case clang::X86::BI__builtin_ia32_phaddsw256:
13661
13662 case clang::X86::BI__builtin_ia32_phsubw128:
13663 case clang::X86::BI__builtin_ia32_phsubw256:
13664 case clang::X86::BI__builtin_ia32_phsubd128:
13665 case clang::X86::BI__builtin_ia32_phsubd256:
13666 case clang::X86::BI__builtin_ia32_phsubsw128:
13667 case clang::X86::BI__builtin_ia32_phsubsw256: {
13668 APValue SourceLHS, SourceRHS;
13669 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) ||
13670 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS))
13671 return false;
13672 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
13673 bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType();
13674
13675 unsigned NumElts = SourceLHS.getVectorLength();
13676 unsigned EltBits = Info.Ctx.getIntWidth(T: DestEltTy);
13677 unsigned EltsPerLane = 128 / EltBits;
13678 SmallVector<APValue, 4> ResultElements;
13679 ResultElements.reserve(N: NumElts);
13680
13681 for (unsigned LaneStart = 0; LaneStart != NumElts;
13682 LaneStart += EltsPerLane) {
13683 for (unsigned I = 0; I != EltsPerLane; I += 2) {
13684 APSInt LHSA = SourceLHS.getVectorElt(I: LaneStart + I).getInt();
13685 APSInt LHSB = SourceLHS.getVectorElt(I: LaneStart + I + 1).getInt();
13686 switch (E->getBuiltinCallee()) {
13687 case clang::X86::BI__builtin_ia32_phaddw128:
13688 case clang::X86::BI__builtin_ia32_phaddw256:
13689 case clang::X86::BI__builtin_ia32_phaddd128:
13690 case clang::X86::BI__builtin_ia32_phaddd256: {
13691 APSInt Res(LHSA + LHSB, DestUnsigned);
13692 ResultElements.push_back(Elt: APValue(Res));
13693 break;
13694 }
13695 case clang::X86::BI__builtin_ia32_phaddsw128:
13696 case clang::X86::BI__builtin_ia32_phaddsw256: {
13697 APSInt Res(LHSA.sadd_sat(RHS: LHSB));
13698 ResultElements.push_back(Elt: APValue(Res));
13699 break;
13700 }
13701 case clang::X86::BI__builtin_ia32_phsubw128:
13702 case clang::X86::BI__builtin_ia32_phsubw256:
13703 case clang::X86::BI__builtin_ia32_phsubd128:
13704 case clang::X86::BI__builtin_ia32_phsubd256: {
13705 APSInt Res(LHSA - LHSB, DestUnsigned);
13706 ResultElements.push_back(Elt: APValue(Res));
13707 break;
13708 }
13709 case clang::X86::BI__builtin_ia32_phsubsw128:
13710 case clang::X86::BI__builtin_ia32_phsubsw256: {
13711 APSInt Res(LHSA.ssub_sat(RHS: LHSB));
13712 ResultElements.push_back(Elt: APValue(Res));
13713 break;
13714 }
13715 }
13716 }
13717 for (unsigned I = 0; I != EltsPerLane; I += 2) {
13718 APSInt RHSA = SourceRHS.getVectorElt(I: LaneStart + I).getInt();
13719 APSInt RHSB = SourceRHS.getVectorElt(I: LaneStart + I + 1).getInt();
13720 switch (E->getBuiltinCallee()) {
13721 case clang::X86::BI__builtin_ia32_phaddw128:
13722 case clang::X86::BI__builtin_ia32_phaddw256:
13723 case clang::X86::BI__builtin_ia32_phaddd128:
13724 case clang::X86::BI__builtin_ia32_phaddd256: {
13725 APSInt Res(RHSA + RHSB, DestUnsigned);
13726 ResultElements.push_back(Elt: APValue(Res));
13727 break;
13728 }
13729 case clang::X86::BI__builtin_ia32_phaddsw128:
13730 case clang::X86::BI__builtin_ia32_phaddsw256: {
13731 APSInt Res(RHSA.sadd_sat(RHS: RHSB));
13732 ResultElements.push_back(Elt: APValue(Res));
13733 break;
13734 }
13735 case clang::X86::BI__builtin_ia32_phsubw128:
13736 case clang::X86::BI__builtin_ia32_phsubw256:
13737 case clang::X86::BI__builtin_ia32_phsubd128:
13738 case clang::X86::BI__builtin_ia32_phsubd256: {
13739 APSInt Res(RHSA - RHSB, DestUnsigned);
13740 ResultElements.push_back(Elt: APValue(Res));
13741 break;
13742 }
13743 case clang::X86::BI__builtin_ia32_phsubsw128:
13744 case clang::X86::BI__builtin_ia32_phsubsw256: {
13745 APSInt Res(RHSA.ssub_sat(RHS: RHSB));
13746 ResultElements.push_back(Elt: APValue(Res));
13747 break;
13748 }
13749 }
13750 }
13751 }
13752 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
13753 }
13754 case clang::X86::BI__builtin_ia32_haddpd:
13755 case clang::X86::BI__builtin_ia32_haddps:
13756 case clang::X86::BI__builtin_ia32_haddps256:
13757 case clang::X86::BI__builtin_ia32_haddpd256:
13758 case clang::X86::BI__builtin_ia32_hsubpd:
13759 case clang::X86::BI__builtin_ia32_hsubps:
13760 case clang::X86::BI__builtin_ia32_hsubps256:
13761 case clang::X86::BI__builtin_ia32_hsubpd256: {
13762 APValue SourceLHS, SourceRHS;
13763 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) ||
13764 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS))
13765 return false;
13766 unsigned NumElts = SourceLHS.getVectorLength();
13767 SmallVector<APValue, 4> ResultElements;
13768 ResultElements.reserve(N: NumElts);
13769 llvm::RoundingMode RM = getActiveRoundingMode(Info&: getEvalInfo(), E);
13770 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
13771 unsigned EltBits = Info.Ctx.getTypeSize(T: DestEltTy);
13772 unsigned NumLanes = NumElts * EltBits / 128;
13773 unsigned NumElemsPerLane = NumElts / NumLanes;
13774 unsigned HalfElemsPerLane = NumElemsPerLane / 2;
13775
13776 for (unsigned L = 0; L != NumElts; L += NumElemsPerLane) {
13777 for (unsigned I = 0; I != HalfElemsPerLane; ++I) {
13778 APFloat LHSA = SourceLHS.getVectorElt(I: L + (2 * I) + 0).getFloat();
13779 APFloat LHSB = SourceLHS.getVectorElt(I: L + (2 * I) + 1).getFloat();
13780 switch (E->getBuiltinCallee()) {
13781 case clang::X86::BI__builtin_ia32_haddpd:
13782 case clang::X86::BI__builtin_ia32_haddps:
13783 case clang::X86::BI__builtin_ia32_haddps256:
13784 case clang::X86::BI__builtin_ia32_haddpd256:
13785 LHSA.add(RHS: LHSB, RM);
13786 break;
13787 case clang::X86::BI__builtin_ia32_hsubpd:
13788 case clang::X86::BI__builtin_ia32_hsubps:
13789 case clang::X86::BI__builtin_ia32_hsubps256:
13790 case clang::X86::BI__builtin_ia32_hsubpd256:
13791 LHSA.subtract(RHS: LHSB, RM);
13792 break;
13793 }
13794 ResultElements.push_back(Elt: APValue(LHSA));
13795 }
13796 for (unsigned I = 0; I != HalfElemsPerLane; ++I) {
13797 APFloat RHSA = SourceRHS.getVectorElt(I: L + (2 * I) + 0).getFloat();
13798 APFloat RHSB = SourceRHS.getVectorElt(I: L + (2 * I) + 1).getFloat();
13799 switch (E->getBuiltinCallee()) {
13800 case clang::X86::BI__builtin_ia32_haddpd:
13801 case clang::X86::BI__builtin_ia32_haddps:
13802 case clang::X86::BI__builtin_ia32_haddps256:
13803 case clang::X86::BI__builtin_ia32_haddpd256:
13804 RHSA.add(RHS: RHSB, RM);
13805 break;
13806 case clang::X86::BI__builtin_ia32_hsubpd:
13807 case clang::X86::BI__builtin_ia32_hsubps:
13808 case clang::X86::BI__builtin_ia32_hsubps256:
13809 case clang::X86::BI__builtin_ia32_hsubpd256:
13810 RHSA.subtract(RHS: RHSB, RM);
13811 break;
13812 }
13813 ResultElements.push_back(Elt: APValue(RHSA));
13814 }
13815 }
13816 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
13817 }
13818 case clang::X86::BI__builtin_ia32_addsubpd:
13819 case clang::X86::BI__builtin_ia32_addsubps:
13820 case clang::X86::BI__builtin_ia32_addsubpd256:
13821 case clang::X86::BI__builtin_ia32_addsubps256: {
13822 // Addsub: alternates between subtraction and addition
13823 // Result[i] = (i % 2 == 0) ? (a[i] - b[i]) : (a[i] + b[i])
13824 APValue SourceLHS, SourceRHS;
13825 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) ||
13826 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS))
13827 return false;
13828 unsigned NumElems = SourceLHS.getVectorLength();
13829 SmallVector<APValue, 8> ResultElements;
13830 ResultElements.reserve(N: NumElems);
13831 llvm::RoundingMode RM = getActiveRoundingMode(Info&: getEvalInfo(), E);
13832
13833 for (unsigned I = 0; I != NumElems; ++I) {
13834 APFloat LHS = SourceLHS.getVectorElt(I).getFloat();
13835 APFloat RHS = SourceRHS.getVectorElt(I).getFloat();
13836 if (I % 2 == 0) {
13837 // Even indices: subtract
13838 LHS.subtract(RHS, RM);
13839 } else {
13840 // Odd indices: add
13841 LHS.add(RHS, RM);
13842 }
13843 ResultElements.push_back(Elt: APValue(LHS));
13844 }
13845 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
13846 }
13847 case clang::X86::BI__builtin_ia32_pclmulqdq128:
13848 case clang::X86::BI__builtin_ia32_pclmulqdq256:
13849 case clang::X86::BI__builtin_ia32_pclmulqdq512: {
13850 // PCLMULQDQ: carry-less multiplication of selected 64-bit halves
13851 // imm8 bit 0: selects lower (0) or upper (1) 64 bits of first operand
13852 // imm8 bit 4: selects lower (0) or upper (1) 64 bits of second operand
13853 APValue SourceLHS, SourceRHS;
13854 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) ||
13855 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS))
13856 return false;
13857
13858 APSInt Imm8;
13859 if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: Imm8, Info))
13860 return false;
13861
13862 // Extract bits 0 and 4 from imm8
13863 bool SelectUpperA = (Imm8 & 0x01) != 0;
13864 bool SelectUpperB = (Imm8 & 0x10) != 0;
13865
13866 unsigned NumElems = SourceLHS.getVectorLength();
13867 SmallVector<APValue, 8> ResultElements;
13868 ResultElements.reserve(N: NumElems);
13869 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
13870 bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType();
13871
13872 // Process each 128-bit lane
13873 for (unsigned Lane = 0; Lane < NumElems; Lane += 2) {
13874 // Get the two 64-bit halves of the first operand
13875 APSInt A0 = SourceLHS.getVectorElt(I: Lane + 0).getInt();
13876 APSInt A1 = SourceLHS.getVectorElt(I: Lane + 1).getInt();
13877 // Get the two 64-bit halves of the second operand
13878 APSInt B0 = SourceRHS.getVectorElt(I: Lane + 0).getInt();
13879 APSInt B1 = SourceRHS.getVectorElt(I: Lane + 1).getInt();
13880
13881 // Select the appropriate 64-bit values based on imm8
13882 APInt A = SelectUpperA ? A1 : A0;
13883 APInt B = SelectUpperB ? B1 : B0;
13884
13885 // Extend both operands to 128 bits for carry-less multiplication
13886 APInt A128 = A.zext(width: 128);
13887 APInt B128 = B.zext(width: 128);
13888
13889 // Use APIntOps::clmul for carry-less multiplication
13890 APInt Result = llvm::APIntOps::clmul(LHS: A128, RHS: B128);
13891
13892 // Split the 128-bit result into two 64-bit halves
13893 APSInt ResultLow(Result.extractBits(numBits: 64, bitPosition: 0), DestUnsigned);
13894 APSInt ResultHigh(Result.extractBits(numBits: 64, bitPosition: 64), DestUnsigned);
13895
13896 ResultElements.push_back(Elt: APValue(ResultLow));
13897 ResultElements.push_back(Elt: APValue(ResultHigh));
13898 }
13899
13900 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
13901 }
13902 case Builtin::BI__builtin_elementwise_fshl:
13903 case Builtin::BI__builtin_elementwise_fshr: {
13904 APValue SourceHi, SourceLo, SourceShift;
13905 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceHi) ||
13906 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceLo) ||
13907 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceShift))
13908 return false;
13909
13910 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
13911 if (!DestEltTy->isIntegerType())
13912 return false;
13913
13914 unsigned SourceLen = SourceHi.getVectorLength();
13915 SmallVector<APValue> ResultElements;
13916 ResultElements.reserve(N: SourceLen);
13917 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
13918 const APSInt &Hi = SourceHi.getVectorElt(I: EltNum).getInt();
13919 const APSInt &Lo = SourceLo.getVectorElt(I: EltNum).getInt();
13920 const APSInt &Shift = SourceShift.getVectorElt(I: EltNum).getInt();
13921 switch (E->getBuiltinCallee()) {
13922 case Builtin::BI__builtin_elementwise_fshl:
13923 ResultElements.push_back(Elt: APValue(
13924 APSInt(llvm::APIntOps::fshl(Hi, Lo, Shift), Hi.isUnsigned())));
13925 break;
13926 case Builtin::BI__builtin_elementwise_fshr:
13927 ResultElements.push_back(Elt: APValue(
13928 APSInt(llvm::APIntOps::fshr(Hi, Lo, Shift), Hi.isUnsigned())));
13929 break;
13930 }
13931 }
13932
13933 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
13934 }
13935
13936 case X86::BI__builtin_ia32_shuf_f32x4_256:
13937 case X86::BI__builtin_ia32_shuf_i32x4_256:
13938 case X86::BI__builtin_ia32_shuf_f64x2_256:
13939 case X86::BI__builtin_ia32_shuf_i64x2_256:
13940 case X86::BI__builtin_ia32_shuf_f32x4:
13941 case X86::BI__builtin_ia32_shuf_i32x4:
13942 case X86::BI__builtin_ia32_shuf_f64x2:
13943 case X86::BI__builtin_ia32_shuf_i64x2: {
13944 APValue SourceA, SourceB;
13945 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceA) ||
13946 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceB))
13947 return false;
13948
13949 APSInt Imm;
13950 if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: Imm, Info))
13951 return false;
13952
13953 // Destination and sources A, B all have the same type.
13954 unsigned NumElems = SourceA.getVectorLength();
13955 const VectorType *VT = E->getArg(Arg: 0)->getType()->castAs<VectorType>();
13956 QualType ElemQT = VT->getElementType();
13957 unsigned ElemBits = Info.Ctx.getTypeSize(T: ElemQT);
13958 unsigned LaneBits = 128u;
13959 unsigned NumLanes = (NumElems * ElemBits) / LaneBits;
13960 unsigned NumElemsPerLane = LaneBits / ElemBits;
13961
13962 unsigned DstLen = SourceA.getVectorLength();
13963 SmallVector<APValue, 16> ResultElements;
13964 ResultElements.reserve(N: DstLen);
13965
13966 APValue R;
13967 if (!evalShuffleGeneric(
13968 Info, Call: E, Out&: R,
13969 GetSourceIndex: [NumLanes, NumElemsPerLane](unsigned DstIdx, unsigned ShuffleMask)
13970 -> std::pair<unsigned, int> {
13971 // DstIdx determines source. ShuffleMask selects lane in source.
13972 unsigned BitsPerElem = NumLanes / 2;
13973 unsigned IndexMask = (1u << BitsPerElem) - 1;
13974 unsigned Lane = DstIdx / NumElemsPerLane;
13975 unsigned SrcIdx = (Lane < NumLanes / 2) ? 0 : 1;
13976 unsigned BitIdx = BitsPerElem * Lane;
13977 unsigned SrcLaneIdx = (ShuffleMask >> BitIdx) & IndexMask;
13978 unsigned ElemInLane = DstIdx % NumElemsPerLane;
13979 unsigned IdxToPick = SrcLaneIdx * NumElemsPerLane + ElemInLane;
13980 return {SrcIdx, IdxToPick};
13981 }))
13982 return false;
13983 return Success(V: R, E);
13984 }
13985
13986 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v16qi:
13987 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v32qi:
13988 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v64qi:
13989 case X86::BI__builtin_ia32_vgf2p8affineqb_v16qi:
13990 case X86::BI__builtin_ia32_vgf2p8affineqb_v32qi:
13991 case X86::BI__builtin_ia32_vgf2p8affineqb_v64qi: {
13992
13993 APValue X, A;
13994 APSInt Imm;
13995 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: X) ||
13996 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: A) ||
13997 !EvaluateInteger(E: E->getArg(Arg: 2), Result&: Imm, Info))
13998 return false;
13999
14000 assert(X.isVector() && A.isVector());
14001 assert(X.getVectorLength() == A.getVectorLength());
14002
14003 bool IsInverse = false;
14004 switch (E->getBuiltinCallee()) {
14005 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v16qi:
14006 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v32qi:
14007 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v64qi: {
14008 IsInverse = true;
14009 }
14010 }
14011
14012 unsigned NumBitsInByte = 8;
14013 unsigned NumBytesInQWord = 8;
14014 unsigned NumBitsInQWord = 64;
14015 unsigned NumBytes = A.getVectorLength();
14016 unsigned NumQWords = NumBytes / NumBytesInQWord;
14017 SmallVector<APValue, 64> Result;
14018 Result.reserve(N: NumBytes);
14019
14020 // computing A*X + Imm
14021 for (unsigned QWordIdx = 0; QWordIdx != NumQWords; ++QWordIdx) {
14022 // Extract the QWords from X, A
14023 APInt XQWord(NumBitsInQWord, 0);
14024 APInt AQWord(NumBitsInQWord, 0);
14025 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
14026 unsigned Idx = QWordIdx * NumBytesInQWord + ByteIdx;
14027 APInt XByte = X.getVectorElt(I: Idx).getInt();
14028 APInt AByte = A.getVectorElt(I: Idx).getInt();
14029 XQWord.insertBits(SubBits: XByte, bitPosition: ByteIdx * NumBitsInByte);
14030 AQWord.insertBits(SubBits: AByte, bitPosition: ByteIdx * NumBitsInByte);
14031 }
14032
14033 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
14034 uint8_t XByte =
14035 XQWord.lshr(shiftAmt: ByteIdx * NumBitsInByte).getLoBits(numBits: 8).getZExtValue();
14036 Result.push_back(Elt: APValue(APSInt(
14037 APInt(8, GFNIAffine(XByte, AQword: AQWord, Imm, Inverse: IsInverse)), false)));
14038 }
14039 }
14040
14041 return Success(V: APValue(Result.data(), Result.size()), E);
14042 }
14043
14044 case X86::BI__builtin_ia32_vgf2p8mulb_v16qi:
14045 case X86::BI__builtin_ia32_vgf2p8mulb_v32qi:
14046 case X86::BI__builtin_ia32_vgf2p8mulb_v64qi: {
14047 APValue A, B;
14048 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: A) ||
14049 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: B))
14050 return false;
14051
14052 assert(A.isVector() && B.isVector());
14053 assert(A.getVectorLength() == B.getVectorLength());
14054
14055 unsigned NumBytes = A.getVectorLength();
14056 SmallVector<APValue, 64> Result;
14057 Result.reserve(N: NumBytes);
14058
14059 for (unsigned ByteIdx = 0; ByteIdx != NumBytes; ++ByteIdx) {
14060 uint8_t AByte = A.getVectorElt(I: ByteIdx).getInt().getZExtValue();
14061 uint8_t BByte = B.getVectorElt(I: ByteIdx).getInt().getZExtValue();
14062 Result.push_back(Elt: APValue(
14063 APSInt(APInt(8, GFNIMul(AByte, BByte)), /*IsUnsigned=*/false)));
14064 }
14065
14066 return Success(V: APValue(Result.data(), Result.size()), E);
14067 }
14068
14069 case X86::BI__builtin_ia32_insertf32x4_256:
14070 case X86::BI__builtin_ia32_inserti32x4_256:
14071 case X86::BI__builtin_ia32_insertf64x2_256:
14072 case X86::BI__builtin_ia32_inserti64x2_256:
14073 case X86::BI__builtin_ia32_insertf32x4:
14074 case X86::BI__builtin_ia32_inserti32x4:
14075 case X86::BI__builtin_ia32_insertf64x2_512:
14076 case X86::BI__builtin_ia32_inserti64x2_512:
14077 case X86::BI__builtin_ia32_insertf32x8:
14078 case X86::BI__builtin_ia32_inserti32x8:
14079 case X86::BI__builtin_ia32_insertf64x4:
14080 case X86::BI__builtin_ia32_inserti64x4:
14081 case X86::BI__builtin_ia32_vinsertf128_ps256:
14082 case X86::BI__builtin_ia32_vinsertf128_pd256:
14083 case X86::BI__builtin_ia32_vinsertf128_si256:
14084 case X86::BI__builtin_ia32_insert128i256: {
14085 APValue SourceDst, SourceSub;
14086 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceDst) ||
14087 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceSub))
14088 return false;
14089
14090 APSInt Imm;
14091 if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: Imm, Info))
14092 return false;
14093
14094 assert(SourceDst.isVector() && SourceSub.isVector());
14095 unsigned DstLen = SourceDst.getVectorLength();
14096 unsigned SubLen = SourceSub.getVectorLength();
14097 assert(SubLen != 0 && DstLen != 0 && (DstLen % SubLen) == 0);
14098 unsigned NumLanes = DstLen / SubLen;
14099 unsigned LaneIdx = (Imm.getZExtValue() % NumLanes) * SubLen;
14100
14101 SmallVector<APValue, 16> ResultElements;
14102 ResultElements.reserve(N: DstLen);
14103
14104 for (unsigned EltNum = 0; EltNum < DstLen; ++EltNum) {
14105 if (EltNum >= LaneIdx && EltNum < LaneIdx + SubLen)
14106 ResultElements.push_back(Elt: SourceSub.getVectorElt(I: EltNum - LaneIdx));
14107 else
14108 ResultElements.push_back(Elt: SourceDst.getVectorElt(I: EltNum));
14109 }
14110
14111 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
14112 }
14113
14114 case clang::X86::BI__builtin_ia32_vec_set_v4hi:
14115 case clang::X86::BI__builtin_ia32_vec_set_v16qi:
14116 case clang::X86::BI__builtin_ia32_vec_set_v8hi:
14117 case clang::X86::BI__builtin_ia32_vec_set_v4si:
14118 case clang::X86::BI__builtin_ia32_vec_set_v2di:
14119 case clang::X86::BI__builtin_ia32_vec_set_v32qi:
14120 case clang::X86::BI__builtin_ia32_vec_set_v16hi:
14121 case clang::X86::BI__builtin_ia32_vec_set_v8si:
14122 case clang::X86::BI__builtin_ia32_vec_set_v4di: {
14123 APValue VecVal;
14124 APSInt Scalar, IndexAPS;
14125 if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: VecVal, Info) ||
14126 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Scalar, Info) ||
14127 !EvaluateInteger(E: E->getArg(Arg: 2), Result&: IndexAPS, Info))
14128 return false;
14129
14130 QualType ElemTy = E->getType()->castAs<VectorType>()->getElementType();
14131 unsigned ElemWidth = Info.Ctx.getIntWidth(T: ElemTy);
14132 bool ElemUnsigned = ElemTy->isUnsignedIntegerOrEnumerationType();
14133 Scalar.setIsUnsigned(ElemUnsigned);
14134 APSInt ElemAPS = Scalar.extOrTrunc(width: ElemWidth);
14135 APValue ElemAV(ElemAPS);
14136
14137 unsigned NumElems = VecVal.getVectorLength();
14138 unsigned Index =
14139 static_cast<unsigned>(IndexAPS.getZExtValue() & (NumElems - 1));
14140
14141 SmallVector<APValue, 4> Elems;
14142 Elems.reserve(N: NumElems);
14143 for (unsigned ElemNum = 0; ElemNum != NumElems; ++ElemNum)
14144 Elems.push_back(Elt: ElemNum == Index ? ElemAV : VecVal.getVectorElt(I: ElemNum));
14145
14146 return Success(V: APValue(Elems.data(), NumElems), E);
14147 }
14148
14149 case X86::BI__builtin_ia32_pslldqi128_byteshift:
14150 case X86::BI__builtin_ia32_pslldqi256_byteshift:
14151 case X86::BI__builtin_ia32_pslldqi512_byteshift: {
14152 APValue R;
14153 if (!evalShuffleGeneric(
14154 Info, Call: E, Out&: R,
14155 GetSourceIndex: [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
14156 unsigned LaneBase = (DstIdx / 16) * 16;
14157 unsigned LaneIdx = DstIdx % 16;
14158 if (LaneIdx < Shift)
14159 return std::make_pair(x: 0, y: -1);
14160
14161 return std::make_pair(
14162 x: 0, y: static_cast<int>(LaneBase + LaneIdx - Shift));
14163 }))
14164 return false;
14165 return Success(V: R, E);
14166 }
14167
14168 case X86::BI__builtin_ia32_psrldqi128_byteshift:
14169 case X86::BI__builtin_ia32_psrldqi256_byteshift:
14170 case X86::BI__builtin_ia32_psrldqi512_byteshift: {
14171 APValue R;
14172 if (!evalShuffleGeneric(
14173 Info, Call: E, Out&: R,
14174 GetSourceIndex: [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
14175 unsigned LaneBase = (DstIdx / 16) * 16;
14176 unsigned LaneIdx = DstIdx % 16;
14177 if (LaneIdx + Shift < 16)
14178 return std::make_pair(
14179 x: 0, y: static_cast<int>(LaneBase + LaneIdx + Shift));
14180
14181 return std::make_pair(x: 0, y: -1);
14182 }))
14183 return false;
14184 return Success(V: R, E);
14185 }
14186
14187 case X86::BI__builtin_ia32_palignr128:
14188 case X86::BI__builtin_ia32_palignr256:
14189 case X86::BI__builtin_ia32_palignr512: {
14190 APValue R;
14191 if (!evalShuffleGeneric(Info, Call: E, Out&: R, GetSourceIndex: [](unsigned DstIdx, unsigned Shift) {
14192 // Default to -1 → zero-fill this destination element
14193 unsigned VecIdx = 1;
14194 int ElemIdx = -1;
14195
14196 int Lane = DstIdx / 16;
14197 int Offset = DstIdx % 16;
14198
14199 // Elements come from VecB first, then VecA after the shift boundary
14200 unsigned ShiftedIdx = Offset + (Shift & 0xFF);
14201 if (ShiftedIdx < 16) { // from VecB
14202 ElemIdx = ShiftedIdx + (Lane * 16);
14203 } else if (ShiftedIdx < 32) { // from VecA
14204 VecIdx = 0;
14205 ElemIdx = (ShiftedIdx - 16) + (Lane * 16);
14206 }
14207
14208 return std::pair<unsigned, int>{VecIdx, ElemIdx};
14209 }))
14210 return false;
14211 return Success(V: R, E);
14212 }
14213 case X86::BI__builtin_ia32_alignd128:
14214 case X86::BI__builtin_ia32_alignd256:
14215 case X86::BI__builtin_ia32_alignd512:
14216 case X86::BI__builtin_ia32_alignq128:
14217 case X86::BI__builtin_ia32_alignq256:
14218 case X86::BI__builtin_ia32_alignq512: {
14219 APValue R;
14220 unsigned NumElems = E->getType()->castAs<VectorType>()->getNumElements();
14221 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14222 GetSourceIndex: [NumElems](unsigned DstIdx, unsigned Shift) {
14223 unsigned Imm = Shift & 0xFF;
14224 unsigned EffectiveShift = Imm & (NumElems - 1);
14225 unsigned SourcePos = DstIdx + EffectiveShift;
14226 unsigned VecIdx = SourcePos < NumElems ? 1 : 0;
14227 unsigned ElemIdx = SourcePos & (NumElems - 1);
14228
14229 return std::pair<unsigned, int>{
14230 VecIdx, static_cast<int>(ElemIdx)};
14231 }))
14232 return false;
14233 return Success(V: R, E);
14234 }
14235 case X86::BI__builtin_ia32_permvarsi256:
14236 case X86::BI__builtin_ia32_permvarsf256:
14237 case X86::BI__builtin_ia32_permvardf512:
14238 case X86::BI__builtin_ia32_permvardi512:
14239 case X86::BI__builtin_ia32_permvarhi128: {
14240 APValue R;
14241 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14242 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14243 int Offset = ShuffleMask & 0x7;
14244 return std::pair<unsigned, int>{0, Offset};
14245 }))
14246 return false;
14247 return Success(V: R, E);
14248 }
14249 case X86::BI__builtin_ia32_permvarqi128:
14250 case X86::BI__builtin_ia32_permvarhi256:
14251 case X86::BI__builtin_ia32_permvarsi512:
14252 case X86::BI__builtin_ia32_permvarsf512: {
14253 APValue R;
14254 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14255 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14256 int Offset = ShuffleMask & 0xF;
14257 return std::pair<unsigned, int>{0, Offset};
14258 }))
14259 return false;
14260 return Success(V: R, E);
14261 }
14262 case X86::BI__builtin_ia32_permvardi256:
14263 case X86::BI__builtin_ia32_permvardf256: {
14264 APValue R;
14265 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14266 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14267 int Offset = ShuffleMask & 0x3;
14268 return std::pair<unsigned, int>{0, Offset};
14269 }))
14270 return false;
14271 return Success(V: R, E);
14272 }
14273 case X86::BI__builtin_ia32_permvarqi256:
14274 case X86::BI__builtin_ia32_permvarhi512: {
14275 APValue R;
14276 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14277 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14278 int Offset = ShuffleMask & 0x1F;
14279 return std::pair<unsigned, int>{0, Offset};
14280 }))
14281 return false;
14282 return Success(V: R, E);
14283 }
14284 case X86::BI__builtin_ia32_permvarqi512: {
14285 APValue R;
14286 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14287 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14288 int Offset = ShuffleMask & 0x3F;
14289 return std::pair<unsigned, int>{0, Offset};
14290 }))
14291 return false;
14292 return Success(V: R, E);
14293 }
14294 case X86::BI__builtin_ia32_vpermi2varq128:
14295 case X86::BI__builtin_ia32_vpermi2varpd128: {
14296 APValue R;
14297 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14298 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14299 int Offset = ShuffleMask & 0x1;
14300 unsigned SrcIdx = (ShuffleMask >> 1) & 0x1;
14301 return std::pair<unsigned, int>{SrcIdx, Offset};
14302 }))
14303 return false;
14304 return Success(V: R, E);
14305 }
14306 case X86::BI__builtin_ia32_vpermi2vard128:
14307 case X86::BI__builtin_ia32_vpermi2varps128:
14308 case X86::BI__builtin_ia32_vpermi2varq256:
14309 case X86::BI__builtin_ia32_vpermi2varpd256: {
14310 APValue R;
14311 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14312 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14313 int Offset = ShuffleMask & 0x3;
14314 unsigned SrcIdx = (ShuffleMask >> 2) & 0x1;
14315 return std::pair<unsigned, int>{SrcIdx, Offset};
14316 }))
14317 return false;
14318 return Success(V: R, E);
14319 }
14320 case X86::BI__builtin_ia32_vpermi2varhi128:
14321 case X86::BI__builtin_ia32_vpermi2vard256:
14322 case X86::BI__builtin_ia32_vpermi2varps256:
14323 case X86::BI__builtin_ia32_vpermi2varq512:
14324 case X86::BI__builtin_ia32_vpermi2varpd512: {
14325 APValue R;
14326 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14327 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14328 int Offset = ShuffleMask & 0x7;
14329 unsigned SrcIdx = (ShuffleMask >> 3) & 0x1;
14330 return std::pair<unsigned, int>{SrcIdx, Offset};
14331 }))
14332 return false;
14333 return Success(V: R, E);
14334 }
14335 case X86::BI__builtin_ia32_vpermi2varqi128:
14336 case X86::BI__builtin_ia32_vpermi2varhi256:
14337 case X86::BI__builtin_ia32_vpermi2vard512:
14338 case X86::BI__builtin_ia32_vpermi2varps512: {
14339 APValue R;
14340 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14341 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14342 int Offset = ShuffleMask & 0xF;
14343 unsigned SrcIdx = (ShuffleMask >> 4) & 0x1;
14344 return std::pair<unsigned, int>{SrcIdx, Offset};
14345 }))
14346 return false;
14347 return Success(V: R, E);
14348 }
14349 case X86::BI__builtin_ia32_vpermi2varqi256:
14350 case X86::BI__builtin_ia32_vpermi2varhi512: {
14351 APValue R;
14352 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14353 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14354 int Offset = ShuffleMask & 0x1F;
14355 unsigned SrcIdx = (ShuffleMask >> 5) & 0x1;
14356 return std::pair<unsigned, int>{SrcIdx, Offset};
14357 }))
14358 return false;
14359 return Success(V: R, E);
14360 }
14361 case X86::BI__builtin_ia32_vpermi2varqi512: {
14362 APValue R;
14363 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14364 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14365 int Offset = ShuffleMask & 0x3F;
14366 unsigned SrcIdx = (ShuffleMask >> 6) & 0x1;
14367 return std::pair<unsigned, int>{SrcIdx, Offset};
14368 }))
14369 return false;
14370 return Success(V: R, E);
14371 }
14372
14373 case clang::X86::BI__builtin_ia32_minps:
14374 case clang::X86::BI__builtin_ia32_minpd:
14375 case clang::X86::BI__builtin_ia32_minps256:
14376 case clang::X86::BI__builtin_ia32_minpd256:
14377 case clang::X86::BI__builtin_ia32_minps512:
14378 case clang::X86::BI__builtin_ia32_minpd512:
14379 case clang::X86::BI__builtin_ia32_minph128:
14380 case clang::X86::BI__builtin_ia32_minph256:
14381 case clang::X86::BI__builtin_ia32_minph512:
14382 return EvaluateFpBinOpExpr(
14383 [](const APFloat &A, const APFloat &B,
14384 std::optional<APSInt>) -> std::optional<APFloat> {
14385 if (A.isNaN() || A.isInfinity() || A.isDenormal() || B.isNaN() ||
14386 B.isInfinity() || B.isDenormal())
14387 return std::nullopt;
14388 if (A.isZero() && B.isZero())
14389 return B;
14390 return llvm::minimum(A, B);
14391 });
14392
14393 case clang::X86::BI__builtin_ia32_minss:
14394 case clang::X86::BI__builtin_ia32_minsd:
14395 return EvaluateFpBinOpExpr(
14396 [](const APFloat &A, const APFloat &B,
14397 std::optional<APSInt> RoundingMode) -> std::optional<APFloat> {
14398 return EvalScalarMinMaxFp(A, B, RoundingMode, /*IsMin=*/true);
14399 },
14400 /*IsScalar=*/true);
14401
14402 case clang::X86::BI__builtin_ia32_minsd_round_mask:
14403 case clang::X86::BI__builtin_ia32_minss_round_mask:
14404 case clang::X86::BI__builtin_ia32_minsh_round_mask:
14405 case clang::X86::BI__builtin_ia32_maxsd_round_mask:
14406 case clang::X86::BI__builtin_ia32_maxss_round_mask:
14407 case clang::X86::BI__builtin_ia32_maxsh_round_mask: {
14408 bool IsMin =
14409 E->getBuiltinCallee() ==
14410 clang::X86::BI__builtin_ia32_minsd_round_mask ||
14411 E->getBuiltinCallee() ==
14412 clang::X86::BI__builtin_ia32_minss_round_mask ||
14413 E->getBuiltinCallee() == clang::X86::BI__builtin_ia32_minsh_round_mask;
14414 return EvaluateScalarFpRoundMaskBinOp(
14415 [IsMin](const APFloat &A, const APFloat &B,
14416 std::optional<APSInt> RoundingMode) -> std::optional<APFloat> {
14417 return EvalScalarMinMaxFp(A, B, RoundingMode, IsMin);
14418 });
14419 }
14420
14421 case clang::X86::BI__builtin_ia32_maxps:
14422 case clang::X86::BI__builtin_ia32_maxpd:
14423 case clang::X86::BI__builtin_ia32_maxps256:
14424 case clang::X86::BI__builtin_ia32_maxpd256:
14425 case clang::X86::BI__builtin_ia32_maxps512:
14426 case clang::X86::BI__builtin_ia32_maxpd512:
14427 case clang::X86::BI__builtin_ia32_maxph128:
14428 case clang::X86::BI__builtin_ia32_maxph256:
14429 case clang::X86::BI__builtin_ia32_maxph512:
14430 return EvaluateFpBinOpExpr(
14431 [](const APFloat &A, const APFloat &B,
14432 std::optional<APSInt>) -> std::optional<APFloat> {
14433 if (A.isNaN() || A.isInfinity() || A.isDenormal() || B.isNaN() ||
14434 B.isInfinity() || B.isDenormal())
14435 return std::nullopt;
14436 if (A.isZero() && B.isZero())
14437 return B;
14438 return llvm::maximum(A, B);
14439 });
14440
14441 case clang::X86::BI__builtin_ia32_maxss:
14442 case clang::X86::BI__builtin_ia32_maxsd:
14443 return EvaluateFpBinOpExpr(
14444 [](const APFloat &A, const APFloat &B,
14445 std::optional<APSInt> RoundingMode) -> std::optional<APFloat> {
14446 return EvalScalarMinMaxFp(A, B, RoundingMode, /*IsMin=*/false);
14447 },
14448 /*IsScalar=*/true);
14449
14450 case clang::X86::BI__builtin_ia32_vcvtps2ph:
14451 case clang::X86::BI__builtin_ia32_vcvtps2ph256: {
14452 APValue SrcVec;
14453 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SrcVec))
14454 return false;
14455
14456 APSInt Imm;
14457 if (!EvaluateInteger(E: E->getArg(Arg: 1), Result&: Imm, Info))
14458 return false;
14459
14460 const auto *SrcVTy = E->getArg(Arg: 0)->getType()->castAs<VectorType>();
14461 unsigned SrcNumElems = SrcVTy->getNumElements();
14462 const auto *DstVTy = E->getType()->castAs<VectorType>();
14463 unsigned DstNumElems = DstVTy->getNumElements();
14464 QualType DstElemTy = DstVTy->getElementType();
14465
14466 const llvm::fltSemantics &HalfSem =
14467 Info.Ctx.getFloatTypeSemantics(T: Info.Ctx.HalfTy);
14468
14469 int ImmVal = Imm.getZExtValue();
14470 bool UseMXCSR = (ImmVal & 4) != 0;
14471 bool IsFPConstrained =
14472 E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts()).isFPConstrained();
14473
14474 llvm::RoundingMode RM;
14475 if (!UseMXCSR) {
14476 switch (ImmVal & 3) {
14477 case 0:
14478 RM = llvm::RoundingMode::NearestTiesToEven;
14479 break;
14480 case 1:
14481 RM = llvm::RoundingMode::TowardNegative;
14482 break;
14483 case 2:
14484 RM = llvm::RoundingMode::TowardPositive;
14485 break;
14486 case 3:
14487 RM = llvm::RoundingMode::TowardZero;
14488 break;
14489 default:
14490 llvm_unreachable("Invalid immediate rounding mode");
14491 }
14492 } else {
14493 RM = llvm::RoundingMode::NearestTiesToEven;
14494 }
14495
14496 SmallVector<APValue, 8> ResultElements;
14497 ResultElements.reserve(N: DstNumElems);
14498
14499 for (unsigned I = 0; I < SrcNumElems; ++I) {
14500 APFloat SrcVal = SrcVec.getVectorElt(I).getFloat();
14501
14502 bool LostInfo;
14503 APFloat::opStatus St = SrcVal.convert(ToSemantics: HalfSem, RM, losesInfo: &LostInfo);
14504
14505 if (UseMXCSR && IsFPConstrained && St != APFloat::opOK) {
14506 Info.FFDiag(E, DiagId: diag::note_constexpr_dynamic_rounding);
14507 return false;
14508 }
14509
14510 APSInt DstInt(SrcVal.bitcastToAPInt(),
14511 DstElemTy->isUnsignedIntegerOrEnumerationType());
14512 ResultElements.push_back(Elt: APValue(DstInt));
14513 }
14514
14515 if (DstNumElems > SrcNumElems) {
14516 APSInt Zero = Info.Ctx.MakeIntValue(Value: 0, Type: DstElemTy);
14517 for (unsigned I = SrcNumElems; I < DstNumElems; ++I) {
14518 ResultElements.push_back(Elt: APValue(Zero));
14519 }
14520 }
14521
14522 return Success(V: ResultElements, E);
14523 }
14524 case X86::BI__builtin_ia32_vperm2f128_pd256:
14525 case X86::BI__builtin_ia32_vperm2f128_ps256:
14526 case X86::BI__builtin_ia32_vperm2f128_si256:
14527 case X86::BI__builtin_ia32_permti256: {
14528 unsigned NumElements =
14529 E->getArg(Arg: 0)->getType()->getAs<VectorType>()->getNumElements();
14530 unsigned PreservedBitsCnt = NumElements >> 2;
14531 APValue R;
14532 if (!evalShuffleGeneric(
14533 Info, Call: E, Out&: R,
14534 GetSourceIndex: [PreservedBitsCnt](unsigned DstIdx, unsigned ShuffleMask) {
14535 unsigned ControlBitsCnt = DstIdx >> PreservedBitsCnt << 2;
14536 unsigned ControlBits = ShuffleMask >> ControlBitsCnt;
14537
14538 if (ControlBits & 0b1000)
14539 return std::make_pair(x: 0u, y: -1);
14540
14541 unsigned SrcVecIdx = (ControlBits & 0b10) >> 1;
14542 unsigned PreservedBitsMask = (1 << PreservedBitsCnt) - 1;
14543 int SrcIdx = ((ControlBits & 0b1) << PreservedBitsCnt) |
14544 (DstIdx & PreservedBitsMask);
14545 return std::make_pair(x&: SrcVecIdx, y&: SrcIdx);
14546 }))
14547 return false;
14548 return Success(V: R, E);
14549 }
14550 }
14551}
14552
14553bool VectorExprEvaluator::VisitConvertVectorExpr(const ConvertVectorExpr *E) {
14554 APValue Source;
14555 QualType SourceVecType = E->getSrcExpr()->getType();
14556 if (!EvaluateAsRValue(Info, E: E->getSrcExpr(), Result&: Source))
14557 return false;
14558
14559 QualType DestTy = E->getType()->castAs<VectorType>()->getElementType();
14560 QualType SourceTy = SourceVecType->castAs<VectorType>()->getElementType();
14561
14562 const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
14563
14564 auto SourceLen = Source.getVectorLength();
14565 SmallVector<APValue, 4> ResultElements;
14566 ResultElements.reserve(N: SourceLen);
14567 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
14568 APValue Elt;
14569 if (!handleVectorElementCast(Info, FPO, E, SourceTy, DestTy,
14570 Original: Source.getVectorElt(I: EltNum), Result&: Elt))
14571 return false;
14572 ResultElements.push_back(Elt: std::move(Elt));
14573 }
14574
14575 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
14576}
14577
14578static bool handleVectorShuffle(EvalInfo &Info, const ShuffleVectorExpr *E,
14579 QualType ElemType, APValue const &VecVal1,
14580 APValue const &VecVal2, unsigned EltNum,
14581 APValue &Result) {
14582 unsigned const TotalElementsInInputVector1 = VecVal1.getVectorLength();
14583 unsigned const TotalElementsInInputVector2 = VecVal2.getVectorLength();
14584
14585 APSInt IndexVal = E->getShuffleMaskIdx(N: EltNum);
14586 int64_t index = IndexVal.getExtValue();
14587 // The spec says that -1 should be treated as undef for optimizations,
14588 // but in constexpr we'd have to produce an APValue::Indeterminate,
14589 // which is prohibited from being a top-level constant value. Emit a
14590 // diagnostic instead.
14591 if (index == -1) {
14592 Info.FFDiag(
14593 E, DiagId: diag::err_shufflevector_minus_one_is_undefined_behavior_constexpr)
14594 << EltNum;
14595 return false;
14596 }
14597
14598 if (index < 0 ||
14599 index >= TotalElementsInInputVector1 + TotalElementsInInputVector2)
14600 llvm_unreachable("Out of bounds shuffle index");
14601
14602 if (index >= TotalElementsInInputVector1)
14603 Result = VecVal2.getVectorElt(I: index - TotalElementsInInputVector1);
14604 else
14605 Result = VecVal1.getVectorElt(I: index);
14606 return true;
14607}
14608
14609bool VectorExprEvaluator::VisitShuffleVectorExpr(const ShuffleVectorExpr *E) {
14610 // FIXME: Unary shuffle with mask not currently supported.
14611 if (E->getNumSubExprs() == 2)
14612 return Error(E);
14613 APValue VecVal1;
14614 const Expr *Vec1 = E->getExpr(Index: 0);
14615 if (!EvaluateAsRValue(Info, E: Vec1, Result&: VecVal1))
14616 return false;
14617 APValue VecVal2;
14618 const Expr *Vec2 = E->getExpr(Index: 1);
14619 if (!EvaluateAsRValue(Info, E: Vec2, Result&: VecVal2))
14620 return false;
14621
14622 VectorType const *DestVecTy = E->getType()->castAs<VectorType>();
14623 QualType DestElTy = DestVecTy->getElementType();
14624
14625 auto TotalElementsInOutputVector = DestVecTy->getNumElements();
14626
14627 SmallVector<APValue, 4> ResultElements;
14628 ResultElements.reserve(N: TotalElementsInOutputVector);
14629 for (unsigned EltNum = 0; EltNum < TotalElementsInOutputVector; ++EltNum) {
14630 APValue Elt;
14631 if (!handleVectorShuffle(Info, E, ElemType: DestElTy, VecVal1, VecVal2, EltNum, Result&: Elt))
14632 return false;
14633 ResultElements.push_back(Elt: std::move(Elt));
14634 }
14635
14636 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
14637}
14638
14639//===----------------------------------------------------------------------===//
14640// Matrix Evaluation
14641//===----------------------------------------------------------------------===//
14642
14643namespace {
14644class MatrixExprEvaluator : public ExprEvaluatorBase<MatrixExprEvaluator> {
14645 APValue &Result;
14646
14647public:
14648 MatrixExprEvaluator(EvalInfo &Info, APValue &Result)
14649 : ExprEvaluatorBaseTy(Info), Result(Result) {}
14650
14651 bool Success(ArrayRef<APValue> M, const Expr *E) {
14652 auto *CMTy = E->getType()->castAs<ConstantMatrixType>();
14653 assert(M.size() == CMTy->getNumElementsFlattened());
14654 // FIXME: remove this APValue copy.
14655 Result = APValue(M.data(), CMTy->getNumRows(), CMTy->getNumColumns());
14656 return true;
14657 }
14658 bool Success(const APValue &M, const Expr *E) {
14659 assert(M.isMatrix() && "expected matrix");
14660 Result = M;
14661 return true;
14662 }
14663
14664 bool VisitCastExpr(const CastExpr *E);
14665 bool VisitInitListExpr(const InitListExpr *E);
14666};
14667} // end anonymous namespace
14668
14669static bool EvaluateMatrix(const Expr *E, APValue &Result, EvalInfo &Info) {
14670 assert(E->isPRValue() && E->getType()->isConstantMatrixType() &&
14671 "not a matrix prvalue");
14672 return MatrixExprEvaluator(Info, Result).Visit(S: E);
14673}
14674
14675bool MatrixExprEvaluator::VisitCastExpr(const CastExpr *E) {
14676 const auto *MT = E->getType()->castAs<ConstantMatrixType>();
14677 unsigned NumRows = MT->getNumRows();
14678 unsigned NumCols = MT->getNumColumns();
14679 unsigned NElts = NumRows * NumCols;
14680 QualType EltTy = MT->getElementType();
14681 const Expr *SE = E->getSubExpr();
14682
14683 switch (E->getCastKind()) {
14684 case CK_HLSLAggregateSplatCast: {
14685 APValue Val;
14686 QualType ValTy;
14687
14688 if (!hlslAggSplatHelper(Info, E: SE, SrcVal&: Val, SrcTy&: ValTy))
14689 return false;
14690
14691 APValue CastedVal;
14692 const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
14693 if (!handleScalarCast(Info, FPO, E, SourceTy: ValTy, DestTy: EltTy, Original: Val, Result&: CastedVal))
14694 return false;
14695
14696 SmallVector<APValue, 16> SplatEls(NElts, CastedVal);
14697 return Success(M: SplatEls, E);
14698 }
14699 case CK_HLSLElementwiseCast: {
14700 SmallVector<APValue> SrcVals;
14701 SmallVector<QualType> SrcTypes;
14702
14703 if (!hlslElementwiseCastHelper(Info, E: SE, DestTy: E->getType(), SrcVals, SrcTypes))
14704 return false;
14705
14706 const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
14707 SmallVector<QualType, 16> DestTypes(NElts, EltTy);
14708 SmallVector<APValue, 16> ResultEls(NElts);
14709 if (!handleElementwiseCast(Info, E, FPO, Elements&: SrcVals, SrcTypes, DestTypes,
14710 Results&: ResultEls))
14711 return false;
14712 return Success(M: ResultEls, E);
14713 }
14714 default:
14715 return ExprEvaluatorBaseTy::VisitCastExpr(E);
14716 }
14717}
14718
14719bool MatrixExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
14720 const auto *MT = E->getType()->castAs<ConstantMatrixType>();
14721 QualType EltTy = MT->getElementType();
14722
14723 assert(E->getNumInits() == MT->getNumElementsFlattened() &&
14724 "Expected number of elements in initializer list to match the number "
14725 "of matrix elements");
14726
14727 SmallVector<APValue, 16> Elements;
14728 Elements.reserve(N: MT->getNumElementsFlattened());
14729
14730 // The following loop assumes the elements of the matrix InitListExpr are in
14731 // row-major order, which matches the row-major ordering assumption of the
14732 // matrix APValue.
14733 for (unsigned I = 0, N = MT->getNumElementsFlattened(); I < N; ++I) {
14734 if (EltTy->isIntegerType()) {
14735 llvm::APSInt IntVal;
14736 if (!EvaluateInteger(E: E->getInit(Init: I), Result&: IntVal, Info))
14737 return false;
14738 Elements.push_back(Elt: APValue(IntVal));
14739 } else {
14740 llvm::APFloat FloatVal(0.0);
14741 if (!EvaluateFloat(E: E->getInit(Init: I), Result&: FloatVal, Info))
14742 return false;
14743 Elements.push_back(Elt: APValue(FloatVal));
14744 }
14745 }
14746
14747 return Success(M: Elements, E);
14748}
14749
14750//===----------------------------------------------------------------------===//
14751// Array Evaluation
14752//===----------------------------------------------------------------------===//
14753
14754namespace {
14755 class ArrayExprEvaluator
14756 : public ExprEvaluatorBase<ArrayExprEvaluator> {
14757 const LValue &This;
14758 APValue &Result;
14759 public:
14760
14761 ArrayExprEvaluator(EvalInfo &Info, const LValue &This, APValue &Result)
14762 : ExprEvaluatorBaseTy(Info), This(This), Result(Result) {}
14763
14764 bool Success(const APValue &V, const Expr *E) {
14765 assert(V.isArray() && "expected array");
14766 Result = V;
14767 return true;
14768 }
14769
14770 bool ZeroInitialization(const Expr *E) {
14771 const ConstantArrayType *CAT =
14772 Info.Ctx.getAsConstantArrayType(T: E->getType());
14773 if (!CAT) {
14774 if (E->getType()->isIncompleteArrayType()) {
14775 // We can be asked to zero-initialize a flexible array member; this
14776 // is represented as an ImplicitValueInitExpr of incomplete array
14777 // type. In this case, the array has zero elements.
14778 Result = APValue(APValue::UninitArray(), 0, 0);
14779 return true;
14780 }
14781 // FIXME: We could handle VLAs here.
14782 return Error(E);
14783 }
14784
14785 Result = APValue(APValue::UninitArray(), 0, CAT->getZExtSize());
14786 if (!Result.hasArrayFiller())
14787 return true;
14788
14789 // Zero-initialize all elements.
14790 LValue Subobject = This;
14791 Subobject.addArray(Info, E, CAT);
14792 ImplicitValueInitExpr VIE(CAT->getElementType());
14793 return EvaluateInPlace(Result&: Result.getArrayFiller(), Info, This: Subobject, E: &VIE);
14794 }
14795
14796 bool VisitCallExpr(const CallExpr *E) {
14797 return handleCallExpr(E, Result, ResultSlot: &This);
14798 }
14799 bool VisitCastExpr(const CastExpr *E);
14800 bool VisitInitListExpr(const InitListExpr *E,
14801 QualType AllocType = QualType());
14802 bool VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E);
14803 bool VisitCXXConstructExpr(const CXXConstructExpr *E);
14804 bool VisitCXXConstructExpr(const CXXConstructExpr *E,
14805 const LValue &Subobject,
14806 APValue *Value, QualType Type);
14807 bool VisitStringLiteral(const StringLiteral *E,
14808 QualType AllocType = QualType()) {
14809 expandStringLiteral(Info, S: E, Result, AllocType);
14810 return true;
14811 }
14812 bool VisitCXXParenListInitExpr(const CXXParenListInitExpr *E);
14813 bool VisitCXXParenListOrInitListExpr(const Expr *ExprToVisit,
14814 ArrayRef<Expr *> Args,
14815 const Expr *ArrayFiller,
14816 QualType AllocType = QualType());
14817 };
14818} // end anonymous namespace
14819
14820static bool EvaluateArray(const Expr *E, const LValue &This,
14821 APValue &Result, EvalInfo &Info) {
14822 assert(!E->isValueDependent());
14823 assert(E->isPRValue() && E->getType()->isArrayType() &&
14824 "not an array prvalue");
14825 return ArrayExprEvaluator(Info, This, Result).Visit(S: E);
14826}
14827
14828static bool EvaluateArrayNewInitList(EvalInfo &Info, LValue &This,
14829 APValue &Result, const InitListExpr *ILE,
14830 QualType AllocType) {
14831 assert(!ILE->isValueDependent());
14832 assert(ILE->isPRValue() && ILE->getType()->isArrayType() &&
14833 "not an array prvalue");
14834 return ArrayExprEvaluator(Info, This, Result)
14835 .VisitInitListExpr(E: ILE, AllocType);
14836}
14837
14838static bool EvaluateArrayNewConstructExpr(EvalInfo &Info, LValue &This,
14839 APValue &Result,
14840 const CXXConstructExpr *CCE,
14841 QualType AllocType) {
14842 assert(!CCE->isValueDependent());
14843 assert(CCE->isPRValue() && CCE->getType()->isArrayType() &&
14844 "not an array prvalue");
14845 return ArrayExprEvaluator(Info, This, Result)
14846 .VisitCXXConstructExpr(E: CCE, Subobject: This, Value: &Result, Type: AllocType);
14847}
14848
14849// Return true iff the given array filler may depend on the element index.
14850static bool MaybeElementDependentArrayFiller(const Expr *FillerExpr) {
14851 // For now, just allow non-class value-initialization and initialization
14852 // lists comprised of them.
14853 if (isa<ImplicitValueInitExpr>(Val: FillerExpr))
14854 return false;
14855 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Val: FillerExpr)) {
14856 for (unsigned I = 0, E = ILE->getNumInits(); I != E; ++I) {
14857 if (MaybeElementDependentArrayFiller(FillerExpr: ILE->getInit(Init: I)))
14858 return true;
14859 }
14860
14861 if (ILE->hasArrayFiller() &&
14862 MaybeElementDependentArrayFiller(FillerExpr: ILE->getArrayFiller()))
14863 return true;
14864
14865 return false;
14866 }
14867 return true;
14868}
14869
14870bool ArrayExprEvaluator::VisitCastExpr(const CastExpr *E) {
14871 const Expr *SE = E->getSubExpr();
14872
14873 switch (E->getCastKind()) {
14874 default:
14875 return ExprEvaluatorBaseTy::VisitCastExpr(E);
14876 case CK_HLSLAggregateSplatCast: {
14877 APValue Val;
14878 QualType ValTy;
14879
14880 if (!hlslAggSplatHelper(Info, E: SE, SrcVal&: Val, SrcTy&: ValTy))
14881 return false;
14882
14883 unsigned NEls = elementwiseSize(Info, BaseTy: E->getType());
14884
14885 SmallVector<APValue> SplatEls(NEls, Val);
14886 SmallVector<QualType> SplatType(NEls, ValTy);
14887
14888 // cast the elements
14889 const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
14890 if (!constructAggregate(Info, FPO, E, Result, ResultType: E->getType(), Elements&: SplatEls,
14891 ElTypes&: SplatType))
14892 return false;
14893
14894 return true;
14895 }
14896 case CK_HLSLElementwiseCast: {
14897 SmallVector<APValue> SrcEls;
14898 SmallVector<QualType> SrcTypes;
14899
14900 if (!hlslElementwiseCastHelper(Info, E: SE, DestTy: E->getType(), SrcVals&: SrcEls, SrcTypes))
14901 return false;
14902
14903 // cast the elements
14904 const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
14905 if (!constructAggregate(Info, FPO, E, Result, ResultType: E->getType(), Elements&: SrcEls,
14906 ElTypes&: SrcTypes))
14907 return false;
14908 return true;
14909 }
14910 }
14911}
14912
14913bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E,
14914 QualType AllocType) {
14915 const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(
14916 T: AllocType.isNull() ? E->getType() : AllocType);
14917 if (!CAT)
14918 return Error(E);
14919
14920 // C++11 [dcl.init.string]p1: A char array [...] can be initialized by [...]
14921 // an appropriately-typed string literal enclosed in braces.
14922 if (E->isStringLiteralInit()) {
14923 auto *SL = dyn_cast<StringLiteral>(Val: E->getInit(Init: 0)->IgnoreParenImpCasts());
14924 // FIXME: Support ObjCEncodeExpr here once we support it in
14925 // ArrayExprEvaluator generally.
14926 if (!SL)
14927 return Error(E);
14928 return VisitStringLiteral(E: SL, AllocType);
14929 }
14930 // Any other transparent list init will need proper handling of the
14931 // AllocType; we can't just recurse to the inner initializer.
14932 assert(!E->isTransparent() &&
14933 "transparent array list initialization is not string literal init?");
14934
14935 return VisitCXXParenListOrInitListExpr(ExprToVisit: E, Args: E->inits(), ArrayFiller: E->getArrayFiller(),
14936 AllocType);
14937}
14938
14939bool ArrayExprEvaluator::VisitCXXParenListOrInitListExpr(
14940 const Expr *ExprToVisit, ArrayRef<Expr *> Args, const Expr *ArrayFiller,
14941 QualType AllocType) {
14942 const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(
14943 T: AllocType.isNull() ? ExprToVisit->getType() : AllocType);
14944
14945 bool Success = true;
14946
14947 assert((!Result.isArray() || Result.getArrayInitializedElts() == 0) &&
14948 "zero-initialized array shouldn't have any initialized elts");
14949 APValue Filler;
14950 if (Result.isArray() && Result.hasArrayFiller())
14951 Filler = Result.getArrayFiller();
14952
14953 unsigned NumEltsToInit = Args.size();
14954 unsigned NumElts = CAT->getZExtSize();
14955
14956 // If the initializer might depend on the array index, run it for each
14957 // array element.
14958 if (NumEltsToInit != NumElts &&
14959 MaybeElementDependentArrayFiller(FillerExpr: ArrayFiller)) {
14960 NumEltsToInit = NumElts;
14961 } else {
14962 for (auto *Init : Args) {
14963 if (auto *EmbedS = dyn_cast<EmbedExpr>(Val: Init->IgnoreParenImpCasts()))
14964 NumEltsToInit += EmbedS->getDataElementCount() - 1;
14965 }
14966 if (NumEltsToInit > NumElts)
14967 NumEltsToInit = NumElts;
14968 }
14969
14970 LLVM_DEBUG(llvm::dbgs() << "The number of elements to initialize: "
14971 << NumEltsToInit << ".\n");
14972
14973 Result = APValue(APValue::UninitArray(), NumEltsToInit, NumElts);
14974
14975 // If the array was previously zero-initialized, preserve the
14976 // zero-initialized values.
14977 if (Filler.hasValue()) {
14978 for (unsigned I = 0, E = Result.getArrayInitializedElts(); I != E; ++I)
14979 Result.getArrayInitializedElt(I) = Filler;
14980 if (Result.hasArrayFiller())
14981 Result.getArrayFiller() = Filler;
14982 }
14983
14984 LValue Subobject = This;
14985 Subobject.addArray(Info, E: ExprToVisit, CAT);
14986 auto Eval = [&](const Expr *Init, unsigned ArrayIndex) {
14987 if (Init->isValueDependent())
14988 return EvaluateDependentExpr(E: Init, Info);
14989
14990 if (!EvaluateInPlace(Result&: Result.getArrayInitializedElt(I: ArrayIndex), Info,
14991 This: Subobject, E: Init) ||
14992 !HandleLValueArrayAdjustment(Info, E: Init, LVal&: Subobject,
14993 EltTy: CAT->getElementType(), Adjustment: 1)) {
14994 if (!Info.noteFailure())
14995 return false;
14996 Success = false;
14997 }
14998 return true;
14999 };
15000 unsigned ArrayIndex = 0;
15001 QualType DestTy = CAT->getElementType();
15002 APSInt Value(Info.Ctx.getTypeSize(T: DestTy), DestTy->isUnsignedIntegerType());
15003 for (unsigned Index = 0; Index != NumEltsToInit; ++Index) {
15004 const Expr *Init = Index < Args.size() ? Args[Index] : ArrayFiller;
15005 if (ArrayIndex >= NumEltsToInit)
15006 break;
15007 if (auto *EmbedS = dyn_cast<EmbedExpr>(Val: Init->IgnoreParenImpCasts())) {
15008 StringLiteral *SL = EmbedS->getDataStringLiteral();
15009 for (unsigned I = EmbedS->getStartingElementPos(),
15010 N = EmbedS->getDataElementCount();
15011 I != EmbedS->getStartingElementPos() + N; ++I) {
15012 Value = SL->getCodeUnit(i: I);
15013 if (DestTy->isIntegerType()) {
15014 Result.getArrayInitializedElt(I: ArrayIndex) = APValue(Value);
15015 } else {
15016 assert(DestTy->isFloatingType() && "unexpected type");
15017 const FPOptions FPO =
15018 Init->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
15019 APFloat FValue(0.0);
15020 if (!HandleIntToFloatCast(Info, E: Init, FPO, SrcType: EmbedS->getType(), Value,
15021 DestType: DestTy, Result&: FValue))
15022 return false;
15023 Result.getArrayInitializedElt(I: ArrayIndex) = APValue(FValue);
15024 }
15025 ArrayIndex++;
15026 }
15027 } else {
15028 if (!Eval(Init, ArrayIndex))
15029 return false;
15030 ++ArrayIndex;
15031 }
15032 }
15033
15034 if (!Result.hasArrayFiller())
15035 return Success;
15036
15037 // If we get here, we have a trivial filler, which we can just evaluate
15038 // once and splat over the rest of the array elements.
15039 assert(ArrayFiller && "no array filler for incomplete init list");
15040 return EvaluateInPlace(Result&: Result.getArrayFiller(), Info, This: Subobject,
15041 E: ArrayFiller) &&
15042 Success;
15043}
15044
15045bool ArrayExprEvaluator::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) {
15046 LValue CommonLV;
15047 if (E->getCommonExpr() &&
15048 !Evaluate(Result&: Info.CurrentCall->createTemporary(
15049 Key: E->getCommonExpr(),
15050 T: getStorageType(Ctx: Info.Ctx, E: E->getCommonExpr()),
15051 Scope: ScopeKind::FullExpression, LV&: CommonLV),
15052 Info, E: E->getCommonExpr()->getSourceExpr()))
15053 return false;
15054
15055 auto *CAT = cast<ConstantArrayType>(Val: E->getType()->castAsArrayTypeUnsafe());
15056
15057 uint64_t Elements = CAT->getZExtSize();
15058 Result = APValue(APValue::UninitArray(), Elements, Elements);
15059
15060 LValue Subobject = This;
15061 Subobject.addArray(Info, E, CAT);
15062
15063 bool Success = true;
15064 for (EvalInfo::ArrayInitLoopIndex Index(Info); Index != Elements; ++Index) {
15065 // C++ [class.temporary]/5
15066 // There are four contexts in which temporaries are destroyed at a different
15067 // point than the end of the full-expression. [...] The second context is
15068 // when a copy constructor is called to copy an element of an array while
15069 // the entire array is copied [...]. In either case, if the constructor has
15070 // one or more default arguments, the destruction of every temporary created
15071 // in a default argument is sequenced before the construction of the next
15072 // array element, if any.
15073 FullExpressionRAII Scope(Info);
15074
15075 if (!EvaluateInPlace(Result&: Result.getArrayInitializedElt(I: Index),
15076 Info, This: Subobject, E: E->getSubExpr()) ||
15077 !HandleLValueArrayAdjustment(Info, E, LVal&: Subobject,
15078 EltTy: CAT->getElementType(), Adjustment: 1)) {
15079 if (!Info.noteFailure())
15080 return false;
15081 Success = false;
15082 }
15083
15084 // Make sure we run the destructors too.
15085 Scope.destroy();
15086 }
15087
15088 return Success;
15089}
15090
15091bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) {
15092 return VisitCXXConstructExpr(E, Subobject: This, Value: &Result, Type: E->getType());
15093}
15094
15095bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
15096 const LValue &Subobject,
15097 APValue *Value,
15098 QualType Type) {
15099 bool HadZeroInit = Value->hasValue();
15100
15101 if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(T: Type)) {
15102 unsigned FinalSize = CAT->getZExtSize();
15103
15104 // Preserve the array filler if we had prior zero-initialization.
15105 APValue Filler =
15106 HadZeroInit && Value->hasArrayFiller() ? Value->getArrayFiller()
15107 : APValue();
15108
15109 *Value = APValue(APValue::UninitArray(), 0, FinalSize);
15110 if (FinalSize == 0)
15111 return true;
15112
15113 bool HasTrivialConstructor = CheckTrivialDefaultConstructor(
15114 Info, Loc: E->getExprLoc(), CD: E->getConstructor(),
15115 IsValueInitialization: E->requiresZeroInitialization());
15116 LValue ArrayElt = Subobject;
15117 ArrayElt.addArray(Info, E, CAT);
15118 // We do the whole initialization in two passes, first for just one element,
15119 // then for the whole array. It's possible we may find out we can't do const
15120 // init in the first pass, in which case we avoid allocating a potentially
15121 // large array. We don't do more passes because expanding array requires
15122 // copying the data, which is wasteful.
15123 for (const unsigned N : {1u, FinalSize}) {
15124 unsigned OldElts = Value->getArrayInitializedElts();
15125 if (OldElts == N)
15126 break;
15127
15128 // Expand the array to appropriate size.
15129 APValue NewValue(APValue::UninitArray(), N, FinalSize);
15130 for (unsigned I = 0; I < OldElts; ++I)
15131 NewValue.getArrayInitializedElt(I).swap(
15132 RHS&: Value->getArrayInitializedElt(I));
15133 Value->swap(RHS&: NewValue);
15134
15135 if (HadZeroInit)
15136 for (unsigned I = OldElts; I < N; ++I)
15137 Value->getArrayInitializedElt(I) = Filler;
15138
15139 if (HasTrivialConstructor && N == FinalSize && FinalSize != 1) {
15140 // If we have a trivial constructor, only evaluate it once and copy
15141 // the result into all the array elements.
15142 APValue &FirstResult = Value->getArrayInitializedElt(I: 0);
15143 for (unsigned I = OldElts; I < FinalSize; ++I)
15144 Value->getArrayInitializedElt(I) = FirstResult;
15145 } else {
15146 for (unsigned I = OldElts; I < N; ++I) {
15147 if (!VisitCXXConstructExpr(E, Subobject: ArrayElt,
15148 Value: &Value->getArrayInitializedElt(I),
15149 Type: CAT->getElementType()) ||
15150 !HandleLValueArrayAdjustment(Info, E, LVal&: ArrayElt,
15151 EltTy: CAT->getElementType(), Adjustment: 1))
15152 return false;
15153 // When checking for const initilization any diagnostic is considered
15154 // an error.
15155 if (Info.EvalStatus.Diag && !Info.EvalStatus.Diag->empty() &&
15156 !Info.keepEvaluatingAfterFailure())
15157 return false;
15158 }
15159 }
15160 }
15161
15162 return true;
15163 }
15164
15165 if (!Type->isRecordType())
15166 return Error(E);
15167
15168 return RecordExprEvaluator(Info, Subobject, *Value)
15169 .VisitCXXConstructExpr(E, T: Type);
15170}
15171
15172bool ArrayExprEvaluator::VisitCXXParenListInitExpr(
15173 const CXXParenListInitExpr *E) {
15174 assert(E->getType()->isConstantArrayType() &&
15175 "Expression result is not a constant array type");
15176
15177 return VisitCXXParenListOrInitListExpr(ExprToVisit: E, Args: E->getInitExprs(),
15178 ArrayFiller: E->getArrayFiller());
15179}
15180
15181//===----------------------------------------------------------------------===//
15182// Integer Evaluation
15183//
15184// As a GNU extension, we support casting pointers to sufficiently-wide integer
15185// types and back in constant folding. Integer values are thus represented
15186// either as an integer-valued APValue, or as an lvalue-valued APValue.
15187//===----------------------------------------------------------------------===//
15188
15189namespace {
15190class IntExprEvaluator
15191 : public ExprEvaluatorBase<IntExprEvaluator> {
15192 APValue &Result;
15193public:
15194 IntExprEvaluator(EvalInfo &info, APValue &result)
15195 : ExprEvaluatorBaseTy(info), Result(result) {}
15196
15197 bool Success(const llvm::APSInt &SI, const Expr *E, APValue &Result) {
15198 assert(E->getType()->isIntegralOrEnumerationType() &&
15199 "Invalid evaluation result.");
15200 assert(SI.isSigned() == E->getType()->isSignedIntegerOrEnumerationType() &&
15201 "Invalid evaluation result.");
15202 assert(SI.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
15203 "Invalid evaluation result.");
15204 Result = APValue(SI);
15205 return true;
15206 }
15207 bool Success(const llvm::APSInt &SI, const Expr *E) {
15208 return Success(SI, E, Result);
15209 }
15210
15211 bool Success(const llvm::APInt &I, const Expr *E, APValue &Result) {
15212 assert(E->getType()->isIntegralOrEnumerationType() &&
15213 "Invalid evaluation result.");
15214 assert(I.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
15215 "Invalid evaluation result.");
15216 Result = APValue(APSInt(I));
15217 Result.getInt().setIsUnsigned(
15218 E->getType()->isUnsignedIntegerOrEnumerationType());
15219 return true;
15220 }
15221 bool Success(const llvm::APInt &I, const Expr *E) {
15222 return Success(I, E, Result);
15223 }
15224
15225 bool Success(uint64_t Value, const Expr *E, APValue &Result) {
15226 assert(E->getType()->isIntegralOrEnumerationType() &&
15227 "Invalid evaluation result.");
15228 Result = APValue(Info.Ctx.MakeIntValue(Value, Type: E->getType()));
15229 return true;
15230 }
15231 bool Success(uint64_t Value, const Expr *E) {
15232 return Success(Value, E, Result);
15233 }
15234
15235 bool Success(CharUnits Size, const Expr *E) {
15236 return Success(Value: Size.getQuantity(), E);
15237 }
15238
15239 bool Success(const APValue &V, const Expr *E) {
15240 // C++23 [expr.const]p8 If we have a variable that is unknown reference or
15241 // pointer allow further evaluation of the value.
15242 if (V.isLValue() || V.isAddrLabelDiff() || V.isIndeterminate() ||
15243 V.allowConstexprUnknown()) {
15244 Result = V;
15245 return true;
15246 }
15247 return Success(SI: V.getInt(), E);
15248 }
15249
15250 bool ZeroInitialization(const Expr *E) { return Success(Value: 0, E); }
15251
15252 friend std::optional<bool> EvaluateBuiltinIsWithinLifetime(IntExprEvaluator &,
15253 const CallExpr *);
15254
15255 //===--------------------------------------------------------------------===//
15256 // Visitor Methods
15257 //===--------------------------------------------------------------------===//
15258
15259 bool VisitIntegerLiteral(const IntegerLiteral *E) {
15260 return Success(I: E->getValue(), E);
15261 }
15262 bool VisitCharacterLiteral(const CharacterLiteral *E) {
15263 return Success(Value: E->getValue(), E);
15264 }
15265
15266 bool CheckReferencedDecl(const Expr *E, const Decl *D);
15267 bool VisitDeclRefExpr(const DeclRefExpr *E) {
15268 if (CheckReferencedDecl(E, D: E->getDecl()))
15269 return true;
15270
15271 return ExprEvaluatorBaseTy::VisitDeclRefExpr(S: E);
15272 }
15273 bool VisitMemberExpr(const MemberExpr *E) {
15274 if (CheckReferencedDecl(E, D: E->getMemberDecl())) {
15275 VisitIgnoredBaseExpression(E: E->getBase());
15276 return true;
15277 }
15278
15279 return ExprEvaluatorBaseTy::VisitMemberExpr(E);
15280 }
15281
15282 bool VisitCallExpr(const CallExpr *E);
15283 bool VisitBuiltinCallExpr(const CallExpr *E, unsigned BuiltinOp);
15284 bool VisitBinaryOperator(const BinaryOperator *E);
15285 bool VisitOffsetOfExpr(const OffsetOfExpr *E);
15286 bool VisitUnaryOperator(const UnaryOperator *E);
15287
15288 bool VisitCastExpr(const CastExpr* E);
15289 bool VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
15290
15291 bool VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
15292 return Success(Value: E->getValue(), E);
15293 }
15294
15295 bool VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
15296 return Success(Value: E->getValue(), E);
15297 }
15298
15299 bool VisitArrayInitIndexExpr(const ArrayInitIndexExpr *E) {
15300 if (Info.ArrayInitIndex == uint64_t(-1)) {
15301 // We were asked to evaluate this subexpression independent of the
15302 // enclosing ArrayInitLoopExpr. We can't do that.
15303 Info.FFDiag(E);
15304 return false;
15305 }
15306 return Success(Value: Info.ArrayInitIndex, E);
15307 }
15308
15309 // Note, GNU defines __null as an integer, not a pointer.
15310 bool VisitGNUNullExpr(const GNUNullExpr *E) {
15311 return ZeroInitialization(E);
15312 }
15313
15314 bool VisitTypeTraitExpr(const TypeTraitExpr *E) {
15315 if (E->isStoredAsBoolean())
15316 return Success(Value: E->getBoolValue(), E);
15317 if (E->getAPValue().isAbsent())
15318 return false;
15319 assert(E->getAPValue().isInt() && "APValue type not supported");
15320 return Success(SI: E->getAPValue().getInt(), E);
15321 }
15322
15323 bool VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
15324 return Success(Value: E->getValue(), E);
15325 }
15326
15327 bool VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
15328 return Success(Value: E->getValue(), E);
15329 }
15330
15331 bool VisitOpenACCAsteriskSizeExpr(const OpenACCAsteriskSizeExpr *E) {
15332 // This should not be evaluated during constant expr evaluation, as it
15333 // should always be in an unevaluated context (the args list of a 'gang' or
15334 // 'tile' clause).
15335 return Error(E);
15336 }
15337
15338 bool VisitUnaryReal(const UnaryOperator *E);
15339 bool VisitUnaryImag(const UnaryOperator *E);
15340
15341 bool VisitCXXNoexceptExpr(const CXXNoexceptExpr *E);
15342 bool VisitSizeOfPackExpr(const SizeOfPackExpr *E);
15343 bool VisitSourceLocExpr(const SourceLocExpr *E);
15344 bool VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E);
15345 bool VisitRequiresExpr(const RequiresExpr *E);
15346 // FIXME: Missing: array subscript of vector, member of vector
15347};
15348
15349class FixedPointExprEvaluator
15350 : public ExprEvaluatorBase<FixedPointExprEvaluator> {
15351 APValue &Result;
15352
15353 public:
15354 FixedPointExprEvaluator(EvalInfo &info, APValue &result)
15355 : ExprEvaluatorBaseTy(info), Result(result) {}
15356
15357 bool Success(const llvm::APInt &I, const Expr *E) {
15358 return Success(
15359 V: APFixedPoint(I, Info.Ctx.getFixedPointSemantics(Ty: E->getType())), E);
15360 }
15361
15362 bool Success(uint64_t Value, const Expr *E) {
15363 return Success(
15364 V: APFixedPoint(Value, Info.Ctx.getFixedPointSemantics(Ty: E->getType())), E);
15365 }
15366
15367 bool Success(const APValue &V, const Expr *E) {
15368 return Success(V: V.getFixedPoint(), E);
15369 }
15370
15371 bool Success(const APFixedPoint &V, const Expr *E) {
15372 assert(E->getType()->isFixedPointType() && "Invalid evaluation result.");
15373 assert(V.getWidth() == Info.Ctx.getIntWidth(E->getType()) &&
15374 "Invalid evaluation result.");
15375 Result = APValue(V);
15376 return true;
15377 }
15378
15379 bool ZeroInitialization(const Expr *E) {
15380 return Success(Value: 0, E);
15381 }
15382
15383 //===--------------------------------------------------------------------===//
15384 // Visitor Methods
15385 //===--------------------------------------------------------------------===//
15386
15387 bool VisitFixedPointLiteral(const FixedPointLiteral *E) {
15388 return Success(I: E->getValue(), E);
15389 }
15390
15391 bool VisitCastExpr(const CastExpr *E);
15392 bool VisitUnaryOperator(const UnaryOperator *E);
15393 bool VisitBinaryOperator(const BinaryOperator *E);
15394};
15395} // end anonymous namespace
15396
15397/// EvaluateIntegerOrLValue - Evaluate an rvalue integral-typed expression, and
15398/// produce either the integer value or a pointer.
15399///
15400/// GCC has a heinous extension which folds casts between pointer types and
15401/// pointer-sized integral types. We support this by allowing the evaluation of
15402/// an integer rvalue to produce a pointer (represented as an lvalue) instead.
15403/// Some simple arithmetic on such values is supported (they are treated much
15404/// like char*).
15405static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
15406 EvalInfo &Info) {
15407 assert(!E->isValueDependent());
15408 assert(E->isPRValue() && E->getType()->isIntegralOrEnumerationType());
15409 return IntExprEvaluator(Info, Result).Visit(S: E);
15410}
15411
15412static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info) {
15413 assert(!E->isValueDependent());
15414 APValue Val;
15415 if (!EvaluateIntegerOrLValue(E, Result&: Val, Info))
15416 return false;
15417 if (!Val.isInt()) {
15418 // FIXME: It would be better to produce the diagnostic for casting
15419 // a pointer to an integer.
15420 Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
15421 return false;
15422 }
15423 Result = Val.getInt();
15424 return true;
15425}
15426
15427bool IntExprEvaluator::VisitSourceLocExpr(const SourceLocExpr *E) {
15428 APValue Evaluated = E->EvaluateInContext(
15429 Ctx: Info.Ctx, DefaultExpr: Info.CurrentCall->CurSourceLocExprScope.getDefaultExpr());
15430 return Success(V: Evaluated, E);
15431}
15432
15433static bool EvaluateFixedPoint(const Expr *E, APFixedPoint &Result,
15434 EvalInfo &Info) {
15435 assert(!E->isValueDependent());
15436 if (E->getType()->isFixedPointType()) {
15437 APValue Val;
15438 if (!FixedPointExprEvaluator(Info, Val).Visit(S: E))
15439 return false;
15440 if (!Val.isFixedPoint())
15441 return false;
15442
15443 Result = Val.getFixedPoint();
15444 return true;
15445 }
15446 return false;
15447}
15448
15449static bool EvaluateFixedPointOrInteger(const Expr *E, APFixedPoint &Result,
15450 EvalInfo &Info) {
15451 assert(!E->isValueDependent());
15452 if (E->getType()->isIntegerType()) {
15453 auto FXSema = Info.Ctx.getFixedPointSemantics(Ty: E->getType());
15454 APSInt Val;
15455 if (!EvaluateInteger(E, Result&: Val, Info))
15456 return false;
15457 Result = APFixedPoint(Val, FXSema);
15458 return true;
15459 } else if (E->getType()->isFixedPointType()) {
15460 return EvaluateFixedPoint(E, Result, Info);
15461 }
15462 return false;
15463}
15464
15465/// Check whether the given declaration can be directly converted to an integral
15466/// rvalue. If not, no diagnostic is produced; there are other things we can
15467/// try.
15468bool IntExprEvaluator::CheckReferencedDecl(const Expr* E, const Decl* D) {
15469 // Enums are integer constant exprs.
15470 if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(Val: D)) {
15471 // Check for signedness/width mismatches between E type and ECD value.
15472 bool SameSign = (ECD->getInitVal().isSigned()
15473 == E->getType()->isSignedIntegerOrEnumerationType());
15474 bool SameWidth = (ECD->getInitVal().getBitWidth()
15475 == Info.Ctx.getIntWidth(T: E->getType()));
15476 if (SameSign && SameWidth)
15477 return Success(SI: ECD->getInitVal(), E);
15478 else {
15479 // Get rid of mismatch (otherwise Success assertions will fail)
15480 // by computing a new value matching the type of E.
15481 llvm::APSInt Val = ECD->getInitVal();
15482 if (!SameSign)
15483 Val.setIsSigned(!ECD->getInitVal().isSigned());
15484 if (!SameWidth)
15485 Val = Val.extOrTrunc(width: Info.Ctx.getIntWidth(T: E->getType()));
15486 return Success(SI: Val, E);
15487 }
15488 }
15489 return false;
15490}
15491
15492/// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way
15493/// as GCC.
15494GCCTypeClass EvaluateBuiltinClassifyType(QualType T,
15495 const LangOptions &LangOpts) {
15496 assert(!T->isDependentType() && "unexpected dependent type");
15497
15498 QualType CanTy = T.getCanonicalType();
15499
15500 switch (CanTy->getTypeClass()) {
15501#define TYPE(ID, BASE)
15502#define DEPENDENT_TYPE(ID, BASE) case Type::ID:
15503#define NON_CANONICAL_TYPE(ID, BASE) case Type::ID:
15504#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(ID, BASE) case Type::ID:
15505#include "clang/AST/TypeNodes.inc"
15506 case Type::Auto:
15507 case Type::DeducedTemplateSpecialization:
15508 llvm_unreachable("unexpected non-canonical or dependent type");
15509
15510 case Type::Builtin:
15511 switch (cast<BuiltinType>(Val&: CanTy)->getKind()) {
15512#define BUILTIN_TYPE(ID, SINGLETON_ID)
15513#define SIGNED_TYPE(ID, SINGLETON_ID) \
15514 case BuiltinType::ID: return GCCTypeClass::Integer;
15515#define FLOATING_TYPE(ID, SINGLETON_ID) \
15516 case BuiltinType::ID: return GCCTypeClass::RealFloat;
15517#define PLACEHOLDER_TYPE(ID, SINGLETON_ID) \
15518 case BuiltinType::ID: break;
15519#include "clang/AST/BuiltinTypes.def"
15520 case BuiltinType::Void:
15521 return GCCTypeClass::Void;
15522
15523 case BuiltinType::Bool:
15524 return GCCTypeClass::Bool;
15525
15526 case BuiltinType::Char_U:
15527 case BuiltinType::UChar:
15528 case BuiltinType::WChar_U:
15529 case BuiltinType::Char8:
15530 case BuiltinType::Char16:
15531 case BuiltinType::Char32:
15532 case BuiltinType::UShort:
15533 case BuiltinType::UInt:
15534 case BuiltinType::ULong:
15535 case BuiltinType::ULongLong:
15536 case BuiltinType::UInt128:
15537 return GCCTypeClass::Integer;
15538
15539 case BuiltinType::UShortAccum:
15540 case BuiltinType::UAccum:
15541 case BuiltinType::ULongAccum:
15542 case BuiltinType::UShortFract:
15543 case BuiltinType::UFract:
15544 case BuiltinType::ULongFract:
15545 case BuiltinType::SatUShortAccum:
15546 case BuiltinType::SatUAccum:
15547 case BuiltinType::SatULongAccum:
15548 case BuiltinType::SatUShortFract:
15549 case BuiltinType::SatUFract:
15550 case BuiltinType::SatULongFract:
15551 return GCCTypeClass::None;
15552
15553 case BuiltinType::NullPtr:
15554
15555 case BuiltinType::ObjCId:
15556 case BuiltinType::ObjCClass:
15557 case BuiltinType::ObjCSel:
15558#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
15559 case BuiltinType::Id:
15560#include "clang/Basic/OpenCLImageTypes.def"
15561#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
15562 case BuiltinType::Id:
15563#include "clang/Basic/OpenCLExtensionTypes.def"
15564 case BuiltinType::OCLSampler:
15565 case BuiltinType::OCLEvent:
15566 case BuiltinType::OCLClkEvent:
15567 case BuiltinType::OCLQueue:
15568 case BuiltinType::OCLReserveID:
15569#define SVE_TYPE(Name, Id, SingletonId) \
15570 case BuiltinType::Id:
15571#include "clang/Basic/AArch64ACLETypes.def"
15572#define PPC_VECTOR_TYPE(Name, Id, Size) \
15573 case BuiltinType::Id:
15574#include "clang/Basic/PPCTypes.def"
15575#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
15576#include "clang/Basic/RISCVVTypes.def"
15577#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
15578#include "clang/Basic/WebAssemblyReferenceTypes.def"
15579#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
15580#include "clang/Basic/AMDGPUTypes.def"
15581#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
15582#include "clang/Basic/HLSLIntangibleTypes.def"
15583 return GCCTypeClass::None;
15584
15585 case BuiltinType::Dependent:
15586 llvm_unreachable("unexpected dependent type");
15587 };
15588 llvm_unreachable("unexpected placeholder type");
15589
15590 case Type::Enum:
15591 return LangOpts.CPlusPlus ? GCCTypeClass::Enum : GCCTypeClass::Integer;
15592
15593 case Type::Pointer:
15594 case Type::ConstantArray:
15595 case Type::VariableArray:
15596 case Type::IncompleteArray:
15597 case Type::FunctionNoProto:
15598 case Type::FunctionProto:
15599 case Type::ArrayParameter:
15600 return GCCTypeClass::Pointer;
15601
15602 case Type::MemberPointer:
15603 return CanTy->isMemberDataPointerType()
15604 ? GCCTypeClass::PointerToDataMember
15605 : GCCTypeClass::PointerToMemberFunction;
15606
15607 case Type::Complex:
15608 return GCCTypeClass::Complex;
15609
15610 case Type::Record:
15611 return CanTy->isUnionType() ? GCCTypeClass::Union
15612 : GCCTypeClass::ClassOrStruct;
15613
15614 case Type::Atomic:
15615 // GCC classifies _Atomic T the same as T.
15616 return EvaluateBuiltinClassifyType(
15617 T: CanTy->castAs<AtomicType>()->getValueType(), LangOpts);
15618
15619 case Type::Vector:
15620 case Type::ExtVector:
15621 return GCCTypeClass::Vector;
15622
15623 case Type::BlockPointer:
15624 case Type::ConstantMatrix:
15625 case Type::ObjCObject:
15626 case Type::ObjCInterface:
15627 case Type::ObjCObjectPointer:
15628 case Type::Pipe:
15629 case Type::HLSLAttributedResource:
15630 case Type::HLSLInlineSpirv:
15631 case Type::OverflowBehavior:
15632 // Classify all other types that don't fit into the regular
15633 // classification the same way.
15634 return GCCTypeClass::None;
15635
15636 case Type::BitInt:
15637 return GCCTypeClass::BitInt;
15638
15639 case Type::LValueReference:
15640 case Type::RValueReference:
15641 llvm_unreachable("invalid type for expression");
15642 }
15643
15644 llvm_unreachable("unexpected type class");
15645}
15646
15647/// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way
15648/// as GCC.
15649static GCCTypeClass
15650EvaluateBuiltinClassifyType(const CallExpr *E, const LangOptions &LangOpts) {
15651 // If no argument was supplied, default to None. This isn't
15652 // ideal, however it is what gcc does.
15653 if (E->getNumArgs() == 0)
15654 return GCCTypeClass::None;
15655
15656 // FIXME: Bizarrely, GCC treats a call with more than one argument as not
15657 // being an ICE, but still folds it to a constant using the type of the first
15658 // argument.
15659 return EvaluateBuiltinClassifyType(T: E->getArg(Arg: 0)->getType(), LangOpts);
15660}
15661
15662/// EvaluateBuiltinConstantPForLValue - Determine the result of
15663/// __builtin_constant_p when applied to the given pointer.
15664///
15665/// A pointer is only "constant" if it is null (or a pointer cast to integer)
15666/// or it points to the first character of a string literal.
15667static bool EvaluateBuiltinConstantPForLValue(const APValue &LV) {
15668 APValue::LValueBase Base = LV.getLValueBase();
15669 if (Base.isNull()) {
15670 // A null base is acceptable.
15671 return true;
15672 } else if (const Expr *E = Base.dyn_cast<const Expr *>()) {
15673 if (!isa<StringLiteral>(Val: E))
15674 return false;
15675 return LV.getLValueOffset().isZero();
15676 } else if (Base.is<TypeInfoLValue>()) {
15677 // Surprisingly, GCC considers __builtin_constant_p(&typeid(int)) to
15678 // evaluate to true.
15679 return true;
15680 } else {
15681 // Any other base is not constant enough for GCC.
15682 return false;
15683 }
15684}
15685
15686/// EvaluateBuiltinConstantP - Evaluate __builtin_constant_p as similarly to
15687/// GCC as we can manage.
15688static bool EvaluateBuiltinConstantP(EvalInfo &Info, const Expr *Arg) {
15689 // This evaluation is not permitted to have side-effects, so evaluate it in
15690 // a speculative evaluation context.
15691 SpeculativeEvaluationRAII SpeculativeEval(Info);
15692
15693 // Constant-folding is always enabled for the operand of __builtin_constant_p
15694 // (even when the enclosing evaluation context otherwise requires a strict
15695 // language-specific constant expression).
15696 FoldConstant Fold(Info, true);
15697
15698 QualType ArgType = Arg->getType();
15699
15700 // __builtin_constant_p always has one operand. The rules which gcc follows
15701 // are not precisely documented, but are as follows:
15702 //
15703 // - If the operand is of integral, floating, complex or enumeration type,
15704 // and can be folded to a known value of that type, it returns 1.
15705 // - If the operand can be folded to a pointer to the first character
15706 // of a string literal (or such a pointer cast to an integral type)
15707 // or to a null pointer or an integer cast to a pointer, it returns 1.
15708 //
15709 // Otherwise, it returns 0.
15710 //
15711 // FIXME: GCC also intends to return 1 for literals of aggregate types, but
15712 // its support for this did not work prior to GCC 9 and is not yet well
15713 // understood.
15714 if (ArgType->isIntegralOrEnumerationType() || ArgType->isFloatingType() ||
15715 ArgType->isAnyComplexType() || ArgType->isPointerType() ||
15716 ArgType->isNullPtrType()) {
15717 APValue V;
15718 if (!::EvaluateAsRValue(Info, E: Arg, Result&: V) || Info.EvalStatus.HasSideEffects) {
15719 Fold.keepDiagnostics();
15720 return false;
15721 }
15722
15723 // For a pointer (possibly cast to integer), there are special rules.
15724 if (V.getKind() == APValue::LValue)
15725 return EvaluateBuiltinConstantPForLValue(LV: V);
15726
15727 // Otherwise, any constant value is good enough.
15728 return V.hasValue();
15729 }
15730
15731 // Anything else isn't considered to be sufficiently constant.
15732 return false;
15733}
15734
15735/// Retrieves the "underlying object type" of the given expression,
15736/// as used by __builtin_object_size.
15737static QualType getObjectType(APValue::LValueBase B) {
15738 if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
15739 if (const VarDecl *VD = dyn_cast<VarDecl>(Val: D))
15740 return VD->getType();
15741 } else if (const Expr *E = B.dyn_cast<const Expr*>()) {
15742 if (isa<CompoundLiteralExpr>(Val: E))
15743 return E->getType();
15744 } else if (B.is<TypeInfoLValue>()) {
15745 return B.getTypeInfoType();
15746 } else if (B.is<DynamicAllocLValue>()) {
15747 return B.getDynamicAllocType();
15748 }
15749
15750 return QualType();
15751}
15752
15753/// A more selective version of E->IgnoreParenCasts for
15754/// tryEvaluateBuiltinObjectSize. This ignores some casts/parens that serve only
15755/// to change the type of E.
15756/// Ex. For E = `(short*)((char*)(&foo))`, returns `&foo`
15757///
15758/// Always returns an RValue with a pointer representation.
15759static const Expr *ignorePointerCastsAndParens(const Expr *E) {
15760 assert(E->isPRValue() && E->getType()->hasPointerRepresentation());
15761
15762 const Expr *NoParens = E->IgnoreParens();
15763 const auto *Cast = dyn_cast<CastExpr>(Val: NoParens);
15764 if (Cast == nullptr)
15765 return NoParens;
15766
15767 // We only conservatively allow a few kinds of casts, because this code is
15768 // inherently a simple solution that seeks to support the common case.
15769 auto CastKind = Cast->getCastKind();
15770 if (CastKind != CK_NoOp && CastKind != CK_BitCast &&
15771 CastKind != CK_AddressSpaceConversion)
15772 return NoParens;
15773
15774 const auto *SubExpr = Cast->getSubExpr();
15775 if (!SubExpr->getType()->hasPointerRepresentation() || !SubExpr->isPRValue())
15776 return NoParens;
15777 return ignorePointerCastsAndParens(E: SubExpr);
15778}
15779
15780/// Checks to see if the given LValue's Designator is at the end of the LValue's
15781/// record layout. e.g.
15782/// struct { struct { int a, b; } fst, snd; } obj;
15783/// obj.fst // no
15784/// obj.snd // yes
15785/// obj.fst.a // no
15786/// obj.fst.b // no
15787/// obj.snd.a // no
15788/// obj.snd.b // yes
15789///
15790/// Please note: this function is specialized for how __builtin_object_size
15791/// views "objects".
15792///
15793/// If this encounters an invalid RecordDecl or otherwise cannot determine the
15794/// correct result, it will always return true.
15795static bool isDesignatorAtObjectEnd(const ASTContext &Ctx, const LValue &LVal) {
15796 assert(!LVal.Designator.Invalid);
15797
15798 auto IsLastOrInvalidFieldDecl = [&Ctx](const FieldDecl *FD) {
15799 const RecordDecl *Parent = FD->getParent();
15800 if (Parent->isInvalidDecl() || Parent->isUnion())
15801 return true;
15802 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(D: Parent);
15803 return FD->getFieldIndex() + 1 == Layout.getFieldCount();
15804 };
15805
15806 auto &Base = LVal.getLValueBase();
15807 if (auto *ME = dyn_cast_or_null<MemberExpr>(Val: Base.dyn_cast<const Expr *>())) {
15808 if (auto *FD = dyn_cast<FieldDecl>(Val: ME->getMemberDecl())) {
15809 if (!IsLastOrInvalidFieldDecl(FD))
15810 return false;
15811 } else if (auto *IFD = dyn_cast<IndirectFieldDecl>(Val: ME->getMemberDecl())) {
15812 for (auto *FD : IFD->chain()) {
15813 if (!IsLastOrInvalidFieldDecl(cast<FieldDecl>(Val: FD)))
15814 return false;
15815 }
15816 }
15817 }
15818
15819 unsigned I = 0;
15820 QualType BaseType = getType(B: Base);
15821 if (LVal.Designator.FirstEntryIsAnUnsizedArray) {
15822 // If we don't know the array bound, conservatively assume we're looking at
15823 // the final array element.
15824 ++I;
15825 if (BaseType->isIncompleteArrayType())
15826 BaseType = Ctx.getAsArrayType(T: BaseType)->getElementType();
15827 else
15828 BaseType = BaseType->castAs<PointerType>()->getPointeeType();
15829 }
15830
15831 for (unsigned E = LVal.Designator.Entries.size(); I != E; ++I) {
15832 const auto &Entry = LVal.Designator.Entries[I];
15833 if (BaseType->isArrayType()) {
15834 // Because __builtin_object_size treats arrays as objects, we can ignore
15835 // the index iff this is the last array in the Designator.
15836 if (I + 1 == E)
15837 return true;
15838 const auto *CAT = cast<ConstantArrayType>(Val: Ctx.getAsArrayType(T: BaseType));
15839 uint64_t Index = Entry.getAsArrayIndex();
15840 if (Index + 1 != CAT->getZExtSize())
15841 return false;
15842 BaseType = CAT->getElementType();
15843 } else if (BaseType->isAnyComplexType()) {
15844 const auto *CT = BaseType->castAs<ComplexType>();
15845 uint64_t Index = Entry.getAsArrayIndex();
15846 if (Index != 1)
15847 return false;
15848 BaseType = CT->getElementType();
15849 } else if (auto *FD = getAsField(E: Entry)) {
15850 if (!IsLastOrInvalidFieldDecl(FD))
15851 return false;
15852 BaseType = FD->getType();
15853 } else {
15854 assert(getAsBaseClass(Entry) && "Expecting cast to a base class");
15855 return false;
15856 }
15857 }
15858 return true;
15859}
15860
15861/// Tests to see if the LValue has a user-specified designator (that isn't
15862/// necessarily valid). Note that this always returns 'true' if the LValue has
15863/// an unsized array as its first designator entry, because there's currently no
15864/// way to tell if the user typed *foo or foo[0].
15865static bool refersToCompleteObject(const LValue &LVal) {
15866 if (LVal.Designator.Invalid)
15867 return false;
15868
15869 if (!LVal.Designator.Entries.empty())
15870 return LVal.Designator.isMostDerivedAnUnsizedArray();
15871
15872 if (!LVal.InvalidBase)
15873 return true;
15874
15875 // If `E` is a MemberExpr, then the first part of the designator is hiding in
15876 // the LValueBase.
15877 const auto *E = LVal.Base.dyn_cast<const Expr *>();
15878 return !E || !isa<MemberExpr>(Val: E);
15879}
15880
15881/// Attempts to detect a user writing into a piece of memory that's impossible
15882/// to figure out the size of by just using types.
15883static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal) {
15884 const SubobjectDesignator &Designator = LVal.Designator;
15885 // Notes:
15886 // - Users can only write off of the end when we have an invalid base. Invalid
15887 // bases imply we don't know where the memory came from.
15888 // - We used to be a bit more aggressive here; we'd only be conservative if
15889 // the array at the end was flexible, or if it had 0 or 1 elements. This
15890 // broke some common standard library extensions (PR30346), but was
15891 // otherwise seemingly fine. It may be useful to reintroduce this behavior
15892 // with some sort of list. OTOH, it seems that GCC is always
15893 // conservative with the last element in structs (if it's an array), so our
15894 // current behavior is more compatible than an explicit list approach would
15895 // be.
15896 auto isFlexibleArrayMember = [&] {
15897 using FAMKind = LangOptions::StrictFlexArraysLevelKind;
15898 FAMKind StrictFlexArraysLevel =
15899 Ctx.getLangOpts().getStrictFlexArraysLevel();
15900
15901 if (Designator.isMostDerivedAnUnsizedArray())
15902 return true;
15903
15904 if (StrictFlexArraysLevel == FAMKind::Default)
15905 return true;
15906
15907 if (Designator.getMostDerivedArraySize() == 0 &&
15908 StrictFlexArraysLevel != FAMKind::IncompleteOnly)
15909 return true;
15910
15911 if (Designator.getMostDerivedArraySize() == 1 &&
15912 StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete)
15913 return true;
15914
15915 return false;
15916 };
15917
15918 return LVal.InvalidBase &&
15919 Designator.Entries.size() == Designator.MostDerivedPathLength &&
15920 Designator.MostDerivedIsArrayElement && isFlexibleArrayMember() &&
15921 isDesignatorAtObjectEnd(Ctx, LVal);
15922}
15923
15924/// Converts the given APInt to CharUnits, assuming the APInt is unsigned.
15925/// Fails if the conversion would cause loss of precision.
15926static bool convertUnsignedAPIntToCharUnits(const llvm::APInt &Int,
15927 CharUnits &Result) {
15928 auto CharUnitsMax = std::numeric_limits<CharUnits::QuantityType>::max();
15929 if (Int.ugt(RHS: CharUnitsMax))
15930 return false;
15931 Result = CharUnits::fromQuantity(Quantity: Int.getZExtValue());
15932 return true;
15933}
15934
15935/// If we're evaluating the object size of an instance of a struct that
15936/// contains a flexible array member, add the size of the initializer.
15937static void addFlexibleArrayMemberInitSize(EvalInfo &Info, const QualType &T,
15938 const LValue &LV, CharUnits &Size) {
15939 if (!T.isNull() && T->isStructureType() &&
15940 T->castAsRecordDecl()->hasFlexibleArrayMember())
15941 if (const auto *V = LV.getLValueBase().dyn_cast<const ValueDecl *>())
15942 if (const auto *VD = dyn_cast<VarDecl>(Val: V))
15943 if (VD->hasInit())
15944 Size += VD->getFlexibleArrayInitChars(Ctx: Info.Ctx);
15945}
15946
15947/// Helper for tryEvaluateBuiltinObjectSize -- Given an LValue, this will
15948/// determine how many bytes exist from the beginning of the object to either
15949/// the end of the current subobject, or the end of the object itself, depending
15950/// on what the LValue looks like + the value of Type.
15951///
15952/// If this returns false, the value of Result is undefined.
15953static bool determineEndOffset(EvalInfo &Info, SourceLocation ExprLoc,
15954 unsigned Type, const LValue &LVal,
15955 CharUnits &EndOffset) {
15956 bool DetermineForCompleteObject = refersToCompleteObject(LVal);
15957
15958 auto CheckedHandleSizeof = [&](QualType Ty, CharUnits &Result) {
15959 if (Ty.isNull())
15960 return false;
15961
15962 Ty = Ty.getNonReferenceType();
15963
15964 if (Ty->isIncompleteType() || Ty->isFunctionType())
15965 return false;
15966
15967 return HandleSizeof(Info, Loc: ExprLoc, Type: Ty, Size&: Result);
15968 };
15969
15970 // We want to evaluate the size of the entire object. This is a valid fallback
15971 // for when Type=1 and the designator is invalid, because we're asked for an
15972 // upper-bound.
15973 if (!(Type & 1) || LVal.Designator.Invalid || DetermineForCompleteObject) {
15974 // Type=3 wants a lower bound, so we can't fall back to this.
15975 if (Type == 3 && !DetermineForCompleteObject)
15976 return false;
15977
15978 llvm::APInt APEndOffset;
15979 if (isBaseAnAllocSizeCall(Base: LVal.getLValueBase()) &&
15980 getBytesReturnedByAllocSizeCall(Ctx: Info.Ctx, LVal, Result&: APEndOffset))
15981 return convertUnsignedAPIntToCharUnits(Int: APEndOffset, Result&: EndOffset);
15982
15983 if (LVal.InvalidBase)
15984 return false;
15985
15986 QualType BaseTy = getObjectType(B: LVal.getLValueBase());
15987 const bool Ret = CheckedHandleSizeof(BaseTy, EndOffset);
15988 addFlexibleArrayMemberInitSize(Info, T: BaseTy, LV: LVal, Size&: EndOffset);
15989 return Ret;
15990 }
15991
15992 // We want to evaluate the size of a subobject.
15993 const SubobjectDesignator &Designator = LVal.Designator;
15994
15995 // The following is a moderately common idiom in C:
15996 //
15997 // struct Foo { int a; char c[1]; };
15998 // struct Foo *F = (struct Foo *)malloc(sizeof(struct Foo) + strlen(Bar));
15999 // strcpy(&F->c[0], Bar);
16000 //
16001 // In order to not break too much legacy code, we need to support it.
16002 if (isUserWritingOffTheEnd(Ctx: Info.Ctx, LVal)) {
16003 // If we can resolve this to an alloc_size call, we can hand that back,
16004 // because we know for certain how many bytes there are to write to.
16005 llvm::APInt APEndOffset;
16006 if (isBaseAnAllocSizeCall(Base: LVal.getLValueBase()) &&
16007 getBytesReturnedByAllocSizeCall(Ctx: Info.Ctx, LVal, Result&: APEndOffset))
16008 return convertUnsignedAPIntToCharUnits(Int: APEndOffset, Result&: EndOffset);
16009
16010 // If we cannot determine the size of the initial allocation, then we can't
16011 // given an accurate upper-bound. However, we are still able to give
16012 // conservative lower-bounds for Type=3.
16013 if (Type == 1)
16014 return false;
16015 }
16016
16017 CharUnits BytesPerElem;
16018 if (!CheckedHandleSizeof(Designator.MostDerivedType, BytesPerElem))
16019 return false;
16020
16021 // According to the GCC documentation, we want the size of the subobject
16022 // denoted by the pointer. But that's not quite right -- what we actually
16023 // want is the size of the immediately-enclosing array, if there is one.
16024 int64_t ElemsRemaining;
16025 if (Designator.MostDerivedIsArrayElement &&
16026 Designator.Entries.size() == Designator.MostDerivedPathLength) {
16027 uint64_t ArraySize = Designator.getMostDerivedArraySize();
16028 uint64_t ArrayIndex = Designator.Entries.back().getAsArrayIndex();
16029 ElemsRemaining = ArraySize <= ArrayIndex ? 0 : ArraySize - ArrayIndex;
16030 } else {
16031 ElemsRemaining = Designator.isOnePastTheEnd() ? 0 : 1;
16032 }
16033
16034 EndOffset = LVal.getLValueOffset() + BytesPerElem * ElemsRemaining;
16035 return true;
16036}
16037
16038/// Tries to evaluate the __builtin_object_size for @p E. If successful,
16039/// returns true and stores the result in @p Size.
16040///
16041/// If @p WasError is non-null, this will report whether the failure to evaluate
16042/// is to be treated as an Error in IntExprEvaluator.
16043static std::optional<uint64_t>
16044tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type, EvalInfo &Info) {
16045 // Determine the denoted object.
16046 LValue LVal;
16047 {
16048 // The operand of __builtin_object_size is never evaluated for side-effects.
16049 // If there are any, but we can determine the pointed-to object anyway, then
16050 // ignore the side-effects.
16051 SpeculativeEvaluationRAII SpeculativeEval(Info);
16052 IgnoreSideEffectsRAII Fold(Info);
16053
16054 if (E->isGLValue()) {
16055 // It's possible for us to be given GLValues if we're called via
16056 // Expr::tryEvaluateObjectSize.
16057 APValue RVal;
16058 if (!EvaluateAsRValue(Info, E, Result&: RVal))
16059 return std::nullopt;
16060 LVal.setFrom(Ctx: Info.Ctx, V: RVal);
16061 } else if (!EvaluatePointer(E: ignorePointerCastsAndParens(E), Result&: LVal, Info,
16062 /*InvalidBaseOK=*/true))
16063 return std::nullopt;
16064 }
16065
16066 // If we point to before the start of the object, there are no accessible
16067 // bytes.
16068 if (LVal.getLValueOffset().isNegative())
16069 return 0;
16070
16071 CharUnits EndOffset;
16072 if (!determineEndOffset(Info, ExprLoc: E->getExprLoc(), Type, LVal, EndOffset))
16073 return std::nullopt;
16074
16075 // If we've fallen outside of the end offset, just pretend there's nothing to
16076 // write to/read from.
16077 if (EndOffset <= LVal.getLValueOffset())
16078 return 0;
16079 return (EndOffset - LVal.getLValueOffset()).getQuantity();
16080}
16081
16082bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
16083 if (!IsConstantEvaluatedBuiltinCall(E))
16084 return ExprEvaluatorBaseTy::VisitCallExpr(E);
16085 return VisitBuiltinCallExpr(E, BuiltinOp: E->getBuiltinCallee());
16086}
16087
16088static bool getBuiltinAlignArguments(const CallExpr *E, EvalInfo &Info,
16089 APValue &Val, APSInt &Alignment) {
16090 QualType SrcTy = E->getArg(Arg: 0)->getType();
16091 if (!getAlignmentArgument(E: E->getArg(Arg: 1), ForType: SrcTy, Info, Alignment))
16092 return false;
16093 // Even though we are evaluating integer expressions we could get a pointer
16094 // argument for the __builtin_is_aligned() case.
16095 if (SrcTy->isPointerType()) {
16096 LValue Ptr;
16097 if (!EvaluatePointer(E: E->getArg(Arg: 0), Result&: Ptr, Info))
16098 return false;
16099 Ptr.moveInto(V&: Val);
16100 } else if (!SrcTy->isIntegralOrEnumerationType()) {
16101 Info.FFDiag(E: E->getArg(Arg: 0));
16102 return false;
16103 } else {
16104 APSInt SrcInt;
16105 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: SrcInt, Info))
16106 return false;
16107 assert(SrcInt.getBitWidth() >= Alignment.getBitWidth() &&
16108 "Bit widths must be the same");
16109 Val = APValue(SrcInt);
16110 }
16111 assert(Val.hasValue());
16112 return true;
16113}
16114
16115bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
16116 unsigned BuiltinOp) {
16117 auto EvalTestOp = [&](llvm::function_ref<bool(const APInt &, const APInt &)>
16118 Fn) {
16119 APValue SourceLHS, SourceRHS;
16120 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) ||
16121 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS))
16122 return false;
16123
16124 unsigned SourceLen = SourceLHS.getVectorLength();
16125 const VectorType *VT = E->getArg(Arg: 0)->getType()->castAs<VectorType>();
16126 QualType ElemQT = VT->getElementType();
16127 unsigned LaneWidth = Info.Ctx.getTypeSize(T: ElemQT);
16128
16129 APInt AWide(LaneWidth * SourceLen, 0);
16130 APInt BWide(LaneWidth * SourceLen, 0);
16131
16132 for (unsigned I = 0; I != SourceLen; ++I) {
16133 APInt ALane;
16134 APInt BLane;
16135 if (ElemQT->isIntegerType()) { // Get value.
16136 ALane = SourceLHS.getVectorElt(I).getInt();
16137 BLane = SourceRHS.getVectorElt(I).getInt();
16138 } else if (ElemQT->isFloatingType()) { // Get only sign bit.
16139 ALane =
16140 SourceLHS.getVectorElt(I).getFloat().bitcastToAPInt().isNegative();
16141 BLane =
16142 SourceRHS.getVectorElt(I).getFloat().bitcastToAPInt().isNegative();
16143 } else { // Must be integer or floating type.
16144 return false;
16145 }
16146 AWide.insertBits(SubBits: ALane, bitPosition: I * LaneWidth);
16147 BWide.insertBits(SubBits: BLane, bitPosition: I * LaneWidth);
16148 }
16149 return Success(Value: Fn(AWide, BWide), E);
16150 };
16151
16152 auto HandleMaskBinOp =
16153 [&](llvm::function_ref<APSInt(const APSInt &, const APSInt &)> Fn)
16154 -> bool {
16155 APValue LHS, RHS;
16156 if (!Evaluate(Result&: LHS, Info, E: E->getArg(Arg: 0)) ||
16157 !Evaluate(Result&: RHS, Info, E: E->getArg(Arg: 1)))
16158 return false;
16159
16160 APSInt ResultInt = Fn(LHS.getInt(), RHS.getInt());
16161
16162 return Success(V: APValue(ResultInt), E);
16163 };
16164
16165 auto HandleCRC32 = [&](unsigned DataBytes) -> bool {
16166 APSInt CRC, Data;
16167 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: CRC, Info) ||
16168 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Data, Info))
16169 return false;
16170
16171 uint64_t CRCVal = CRC.getZExtValue();
16172 uint64_t DataVal = Data.getZExtValue();
16173
16174 // CRC32C polynomial (iSCSI polynomial, bit-reversed)
16175 static const uint32_t CRC32C_POLY = 0x82F63B78;
16176
16177 // Process each byte
16178 uint32_t Result = static_cast<uint32_t>(CRCVal);
16179 for (unsigned I = 0; I != DataBytes; ++I) {
16180 uint8_t Byte = static_cast<uint8_t>((DataVal >> (I * 8)) & 0xFF);
16181 Result ^= Byte;
16182 for (int J = 0; J != 8; ++J) {
16183 Result = (Result >> 1) ^ ((Result & 1) ? CRC32C_POLY : 0);
16184 }
16185 }
16186
16187 return Success(Value: Result, E);
16188 };
16189
16190 switch (BuiltinOp) {
16191 default:
16192 return false;
16193
16194 case X86::BI__builtin_ia32_crc32qi:
16195 return HandleCRC32(1);
16196 case X86::BI__builtin_ia32_crc32hi:
16197 return HandleCRC32(2);
16198 case X86::BI__builtin_ia32_crc32si:
16199 return HandleCRC32(4);
16200 case X86::BI__builtin_ia32_crc32di:
16201 return HandleCRC32(8);
16202
16203 case Builtin::BI__builtin_dynamic_object_size:
16204 case Builtin::BI__builtin_object_size: {
16205 // The type was checked when we built the expression.
16206 unsigned Type =
16207 E->getArg(Arg: 1)->EvaluateKnownConstInt(Ctx: Info.Ctx).getZExtValue();
16208 assert(Type <= 3 && "unexpected type");
16209
16210 if (std::optional<uint64_t> Size =
16211 tryEvaluateBuiltinObjectSize(E: E->getArg(Arg: 0), Type, Info))
16212 return Success(Value: *Size, E);
16213
16214 if (E->getArg(Arg: 0)->HasSideEffects(Ctx: Info.Ctx))
16215 return Success(Value: (Type & 2) ? 0 : -1, E);
16216
16217 // Expression had no side effects, but we couldn't statically determine the
16218 // size of the referenced object.
16219 switch (Info.EvalMode) {
16220 case EvaluationMode::ConstantExpression:
16221 case EvaluationMode::ConstantFold:
16222 case EvaluationMode::IgnoreSideEffects:
16223 // Leave it to IR generation.
16224 return Error(E);
16225 case EvaluationMode::ConstantExpressionUnevaluated:
16226 // Reduce it to a constant now.
16227 return Success(Value: (Type & 2) ? 0 : -1, E);
16228 }
16229
16230 llvm_unreachable("unexpected EvalMode");
16231 }
16232
16233 case Builtin::BI__builtin_os_log_format_buffer_size: {
16234 analyze_os_log::OSLogBufferLayout Layout;
16235 analyze_os_log::computeOSLogBufferLayout(Ctx&: Info.Ctx, E, layout&: Layout);
16236 return Success(Value: Layout.size().getQuantity(), E);
16237 }
16238
16239 case Builtin::BI__builtin_is_aligned: {
16240 APValue Src;
16241 APSInt Alignment;
16242 if (!getBuiltinAlignArguments(E, Info, Val&: Src, Alignment))
16243 return false;
16244 if (Src.isLValue()) {
16245 // If we evaluated a pointer, check the minimum known alignment.
16246 LValue Ptr;
16247 Ptr.setFrom(Ctx: Info.Ctx, V: Src);
16248 CharUnits BaseAlignment = getBaseAlignment(Info, Value: Ptr);
16249 CharUnits PtrAlign = BaseAlignment.alignmentAtOffset(offset: Ptr.Offset);
16250 // We can return true if the known alignment at the computed offset is
16251 // greater than the requested alignment.
16252 assert(PtrAlign.isPowerOfTwo());
16253 assert(Alignment.isPowerOf2());
16254 if (PtrAlign.getQuantity() >= Alignment)
16255 return Success(Value: 1, E);
16256 // If the alignment is not known to be sufficient, some cases could still
16257 // be aligned at run time. However, if the requested alignment is less or
16258 // equal to the base alignment and the offset is not aligned, we know that
16259 // the run-time value can never be aligned.
16260 if (BaseAlignment.getQuantity() >= Alignment &&
16261 PtrAlign.getQuantity() < Alignment)
16262 return Success(Value: 0, E);
16263 // Otherwise we can't infer whether the value is sufficiently aligned.
16264 // TODO: __builtin_is_aligned(__builtin_align_{down,up{(expr, N), N)
16265 // in cases where we can't fully evaluate the pointer.
16266 Info.FFDiag(E: E->getArg(Arg: 0), DiagId: diag::note_constexpr_alignment_compute)
16267 << Alignment;
16268 return false;
16269 }
16270 assert(Src.isInt());
16271 return Success(Value: (Src.getInt() & (Alignment - 1)) == 0 ? 1 : 0, E);
16272 }
16273 case Builtin::BI__builtin_align_up: {
16274 APValue Src;
16275 APSInt Alignment;
16276 if (!getBuiltinAlignArguments(E, Info, Val&: Src, Alignment))
16277 return false;
16278 if (!Src.isInt())
16279 return Error(E);
16280 APSInt AlignedVal =
16281 APSInt((Src.getInt() + (Alignment - 1)) & ~(Alignment - 1),
16282 Src.getInt().isUnsigned());
16283 assert(AlignedVal.getBitWidth() == Src.getInt().getBitWidth());
16284 return Success(SI: AlignedVal, E);
16285 }
16286 case Builtin::BI__builtin_align_down: {
16287 APValue Src;
16288 APSInt Alignment;
16289 if (!getBuiltinAlignArguments(E, Info, Val&: Src, Alignment))
16290 return false;
16291 if (!Src.isInt())
16292 return Error(E);
16293 APSInt AlignedVal =
16294 APSInt(Src.getInt() & ~(Alignment - 1), Src.getInt().isUnsigned());
16295 assert(AlignedVal.getBitWidth() == Src.getInt().getBitWidth());
16296 return Success(SI: AlignedVal, E);
16297 }
16298
16299 case Builtin::BI__builtin_bitreverseg:
16300 case Builtin::BI__builtin_bitreverse8:
16301 case Builtin::BI__builtin_bitreverse16:
16302 case Builtin::BI__builtin_bitreverse32:
16303 case Builtin::BI__builtin_bitreverse64:
16304 case Builtin::BI__builtin_elementwise_bitreverse: {
16305 APSInt Val;
16306 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
16307 return false;
16308
16309 return Success(I: Val.reverseBits(), E);
16310 }
16311 case Builtin::BI__builtin_bswapg:
16312 case Builtin::BI__builtin_bswap16:
16313 case Builtin::BI__builtin_bswap32:
16314 case Builtin::BI__builtin_bswap64: {
16315 APSInt Val;
16316 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
16317 return false;
16318 if (Val.getBitWidth() == 8 || Val.getBitWidth() == 1)
16319 return Success(SI: Val, E);
16320
16321 return Success(I: Val.byteSwap(), E);
16322 }
16323
16324 case Builtin::BI__builtin_classify_type:
16325 return Success(Value: (int)EvaluateBuiltinClassifyType(E, LangOpts: Info.getLangOpts()), E);
16326
16327 case Builtin::BI__builtin_clrsb:
16328 case Builtin::BI__builtin_clrsbl:
16329 case Builtin::BI__builtin_clrsbll: {
16330 APSInt Val;
16331 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
16332 return false;
16333
16334 return Success(Value: Val.getBitWidth() - Val.getSignificantBits(), E);
16335 }
16336
16337 case Builtin::BI__builtin_clz:
16338 case Builtin::BI__builtin_clzl:
16339 case Builtin::BI__builtin_clzll:
16340 case Builtin::BI__builtin_clzs:
16341 case Builtin::BI__builtin_clzg:
16342 case Builtin::BI__builtin_elementwise_clzg:
16343 case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
16344 case Builtin::BI__lzcnt:
16345 case Builtin::BI__lzcnt64: {
16346 APSInt Val;
16347 if (E->getArg(Arg: 0)->getType()->isExtVectorBoolType()) {
16348 APValue Vec;
16349 if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Vec, Info))
16350 return false;
16351 Val = ConvertBoolVectorToInt(Val: Vec);
16352 } else if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info)) {
16353 return false;
16354 }
16355
16356 std::optional<APSInt> Fallback;
16357 if ((BuiltinOp == Builtin::BI__builtin_clzg ||
16358 BuiltinOp == Builtin::BI__builtin_elementwise_clzg) &&
16359 E->getNumArgs() > 1) {
16360 APSInt FallbackTemp;
16361 if (!EvaluateInteger(E: E->getArg(Arg: 1), Result&: FallbackTemp, Info))
16362 return false;
16363 Fallback = FallbackTemp;
16364 }
16365
16366 if (!Val) {
16367 if (Fallback)
16368 return Success(SI: *Fallback, E);
16369
16370 // When the argument is 0, the result of GCC builtins is undefined,
16371 // whereas for Microsoft intrinsics, the result is the bit-width of the
16372 // argument.
16373 bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
16374 BuiltinOp != Builtin::BI__lzcnt &&
16375 BuiltinOp != Builtin::BI__lzcnt64;
16376
16377 if (BuiltinOp == Builtin::BI__builtin_elementwise_clzg) {
16378 Info.FFDiag(E, DiagId: diag::note_constexpr_countzeroes_zero)
16379 << /*IsTrailing=*/false;
16380 }
16381
16382 if (ZeroIsUndefined)
16383 return Error(E);
16384 }
16385
16386 return Success(Value: Val.countl_zero(), E);
16387 }
16388
16389 case Builtin::BI__builtin_constant_p: {
16390 const Expr *Arg = E->getArg(Arg: 0);
16391 if (EvaluateBuiltinConstantP(Info, Arg))
16392 return Success(Value: true, E);
16393 if (Info.InConstantContext || Arg->HasSideEffects(Ctx: Info.Ctx)) {
16394 // Outside a constant context, eagerly evaluate to false in the presence
16395 // of side-effects in order to avoid -Wunsequenced false-positives in
16396 // a branch on __builtin_constant_p(expr).
16397 return Success(Value: false, E);
16398 }
16399 Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
16400 return false;
16401 }
16402
16403 case Builtin::BI__noop:
16404 // __noop always evaluates successfully and returns 0.
16405 return Success(Value: 0, E);
16406
16407 case Builtin::BI__builtin_is_constant_evaluated: {
16408 const auto *Callee = Info.CurrentCall->getCallee();
16409 if (Info.InConstantContext && !Info.CheckingPotentialConstantExpression &&
16410 (Info.CallStackDepth == 1 ||
16411 (Info.CallStackDepth == 2 && Callee->isInStdNamespace() &&
16412 Callee->getIdentifier() &&
16413 Callee->getIdentifier()->isStr(Str: "is_constant_evaluated")))) {
16414 // FIXME: Find a better way to avoid duplicated diagnostics.
16415 if (Info.EvalStatus.Diag)
16416 Info.report(Loc: (Info.CallStackDepth == 1)
16417 ? E->getExprLoc()
16418 : Info.CurrentCall->getCallRange().getBegin(),
16419 DiagId: diag::warn_is_constant_evaluated_always_true_constexpr)
16420 << (Info.CallStackDepth == 1 ? "__builtin_is_constant_evaluated"
16421 : "std::is_constant_evaluated");
16422 }
16423
16424 return Success(Value: Info.InConstantContext, E);
16425 }
16426
16427 case Builtin::BI__builtin_is_within_lifetime:
16428 if (auto result = EvaluateBuiltinIsWithinLifetime(*this, E))
16429 return Success(Value: *result, E);
16430 return false;
16431
16432 case Builtin::BI__builtin_ctz:
16433 case Builtin::BI__builtin_ctzl:
16434 case Builtin::BI__builtin_ctzll:
16435 case Builtin::BI__builtin_ctzs:
16436 case Builtin::BI__builtin_ctzg:
16437 case Builtin::BI__builtin_elementwise_ctzg: {
16438 APSInt Val;
16439 if (E->getArg(Arg: 0)->getType()->isExtVectorBoolType()) {
16440 APValue Vec;
16441 if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Vec, Info))
16442 return false;
16443 Val = ConvertBoolVectorToInt(Val: Vec);
16444 } else if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info)) {
16445 return false;
16446 }
16447
16448 std::optional<APSInt> Fallback;
16449 if ((BuiltinOp == Builtin::BI__builtin_ctzg ||
16450 BuiltinOp == Builtin::BI__builtin_elementwise_ctzg) &&
16451 E->getNumArgs() > 1) {
16452 APSInt FallbackTemp;
16453 if (!EvaluateInteger(E: E->getArg(Arg: 1), Result&: FallbackTemp, Info))
16454 return false;
16455 Fallback = FallbackTemp;
16456 }
16457
16458 if (!Val) {
16459 if (Fallback)
16460 return Success(SI: *Fallback, E);
16461
16462 if (BuiltinOp == Builtin::BI__builtin_elementwise_ctzg) {
16463 Info.FFDiag(E, DiagId: diag::note_constexpr_countzeroes_zero)
16464 << /*IsTrailing=*/true;
16465 }
16466 return Error(E);
16467 }
16468
16469 return Success(Value: Val.countr_zero(), E);
16470 }
16471
16472 case Builtin::BI__builtin_eh_return_data_regno: {
16473 int Operand = E->getArg(Arg: 0)->EvaluateKnownConstInt(Ctx: Info.Ctx).getZExtValue();
16474 Operand = Info.Ctx.getTargetInfo().getEHDataRegisterNumber(RegNo: Operand);
16475 return Success(Value: Operand, E);
16476 }
16477
16478 case Builtin::BI__builtin_elementwise_abs: {
16479 APSInt Val;
16480 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
16481 return false;
16482
16483 return Success(I: Val.abs(), E);
16484 }
16485
16486 case Builtin::BI__builtin_expect:
16487 case Builtin::BI__builtin_expect_with_probability:
16488 return Visit(S: E->getArg(Arg: 0));
16489
16490 case Builtin::BI__builtin_ptrauth_string_discriminator: {
16491 const auto *Literal =
16492 cast<StringLiteral>(Val: E->getArg(Arg: 0)->IgnoreParenImpCasts());
16493 uint64_t Result = getPointerAuthStableSipHash(S: Literal->getString());
16494 return Success(Value: Result, E);
16495 }
16496
16497 case Builtin::BI__builtin_infer_alloc_token: {
16498 // If we fail to infer a type, this fails to be a constant expression; this
16499 // can be checked with __builtin_constant_p(...).
16500 QualType AllocType = infer_alloc::inferPossibleType(E, Ctx: Info.Ctx, CastE: nullptr);
16501 if (AllocType.isNull())
16502 return Error(
16503 E, D: diag::note_constexpr_infer_alloc_token_type_inference_failed);
16504 auto ATMD = infer_alloc::getAllocTokenMetadata(T: AllocType, Ctx: Info.Ctx);
16505 if (!ATMD)
16506 return Error(E, D: diag::note_constexpr_infer_alloc_token_no_metadata);
16507 auto Mode =
16508 Info.getLangOpts().AllocTokenMode.value_or(u: llvm::DefaultAllocTokenMode);
16509 uint64_t BitWidth = Info.Ctx.getTypeSize(T: Info.Ctx.getSizeType());
16510 auto MaxTokensOpt = Info.getLangOpts().AllocTokenMax;
16511 uint64_t MaxTokens =
16512 MaxTokensOpt.value_or(u: 0) ? *MaxTokensOpt : (~0ULL >> (64 - BitWidth));
16513 auto MaybeToken = llvm::getAllocToken(Mode, Metadata: *ATMD, MaxTokens);
16514 if (!MaybeToken)
16515 return Error(E, D: diag::note_constexpr_infer_alloc_token_stateful_mode);
16516 return Success(I: llvm::APInt(BitWidth, *MaybeToken), E);
16517 }
16518
16519 case Builtin::BI__builtin_ffs:
16520 case Builtin::BI__builtin_ffsl:
16521 case Builtin::BI__builtin_ffsll: {
16522 APSInt Val;
16523 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
16524 return false;
16525
16526 unsigned N = Val.countr_zero();
16527 return Success(Value: N == Val.getBitWidth() ? 0 : N + 1, E);
16528 }
16529
16530 case Builtin::BI__builtin_fpclassify: {
16531 APFloat Val(0.0);
16532 if (!EvaluateFloat(E: E->getArg(Arg: 5), Result&: Val, Info))
16533 return false;
16534 unsigned Arg;
16535 switch (Val.getCategory()) {
16536 case APFloat::fcNaN: Arg = 0; break;
16537 case APFloat::fcInfinity: Arg = 1; break;
16538 case APFloat::fcNormal: Arg = Val.isDenormal() ? 3 : 2; break;
16539 case APFloat::fcZero: Arg = 4; break;
16540 }
16541 return Visit(S: E->getArg(Arg));
16542 }
16543
16544 case Builtin::BI__builtin_isinf_sign: {
16545 APFloat Val(0.0);
16546 return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) &&
16547 Success(Value: Val.isInfinity() ? (Val.isNegative() ? -1 : 1) : 0, E);
16548 }
16549
16550 case Builtin::BI__builtin_isinf: {
16551 APFloat Val(0.0);
16552 return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) &&
16553 Success(Value: Val.isInfinity() ? 1 : 0, E);
16554 }
16555
16556 case Builtin::BI__builtin_isfinite: {
16557 APFloat Val(0.0);
16558 return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) &&
16559 Success(Value: Val.isFinite() ? 1 : 0, E);
16560 }
16561
16562 case Builtin::BI__builtin_isnan: {
16563 APFloat Val(0.0);
16564 return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) &&
16565 Success(Value: Val.isNaN() ? 1 : 0, E);
16566 }
16567
16568 case Builtin::BI__builtin_isnormal: {
16569 APFloat Val(0.0);
16570 return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) &&
16571 Success(Value: Val.isNormal() ? 1 : 0, E);
16572 }
16573
16574 case Builtin::BI__builtin_issubnormal: {
16575 APFloat Val(0.0);
16576 return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) &&
16577 Success(Value: Val.isDenormal() ? 1 : 0, E);
16578 }
16579
16580 case Builtin::BI__builtin_iszero: {
16581 APFloat Val(0.0);
16582 return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) &&
16583 Success(Value: Val.isZero() ? 1 : 0, E);
16584 }
16585
16586 case Builtin::BI__builtin_signbit:
16587 case Builtin::BI__builtin_signbitf:
16588 case Builtin::BI__builtin_signbitl: {
16589 APFloat Val(0.0);
16590 return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) &&
16591 Success(Value: Val.isNegative() ? 1 : 0, E);
16592 }
16593
16594 case Builtin::BI__builtin_isgreater:
16595 case Builtin::BI__builtin_isgreaterequal:
16596 case Builtin::BI__builtin_isless:
16597 case Builtin::BI__builtin_islessequal:
16598 case Builtin::BI__builtin_islessgreater:
16599 case Builtin::BI__builtin_isunordered: {
16600 APFloat LHS(0.0);
16601 APFloat RHS(0.0);
16602 if (!EvaluateFloat(E: E->getArg(Arg: 0), Result&: LHS, Info) ||
16603 !EvaluateFloat(E: E->getArg(Arg: 1), Result&: RHS, Info))
16604 return false;
16605
16606 return Success(
16607 Value: [&] {
16608 switch (BuiltinOp) {
16609 case Builtin::BI__builtin_isgreater:
16610 return LHS > RHS;
16611 case Builtin::BI__builtin_isgreaterequal:
16612 return LHS >= RHS;
16613 case Builtin::BI__builtin_isless:
16614 return LHS < RHS;
16615 case Builtin::BI__builtin_islessequal:
16616 return LHS <= RHS;
16617 case Builtin::BI__builtin_islessgreater: {
16618 APFloat::cmpResult cmp = LHS.compare(RHS);
16619 return cmp == APFloat::cmpResult::cmpLessThan ||
16620 cmp == APFloat::cmpResult::cmpGreaterThan;
16621 }
16622 case Builtin::BI__builtin_isunordered:
16623 return LHS.compare(RHS) == APFloat::cmpResult::cmpUnordered;
16624 default:
16625 llvm_unreachable("Unexpected builtin ID: Should be a floating "
16626 "point comparison function");
16627 }
16628 }()
16629 ? 1
16630 : 0,
16631 E);
16632 }
16633
16634 case Builtin::BI__builtin_issignaling: {
16635 APFloat Val(0.0);
16636 return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) &&
16637 Success(Value: Val.isSignaling() ? 1 : 0, E);
16638 }
16639
16640 case Builtin::BI__builtin_isfpclass: {
16641 APSInt MaskVal;
16642 if (!EvaluateInteger(E: E->getArg(Arg: 1), Result&: MaskVal, Info))
16643 return false;
16644 unsigned Test = static_cast<llvm::FPClassTest>(MaskVal.getZExtValue());
16645 APFloat Val(0.0);
16646 return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) &&
16647 Success(Value: (Val.classify() & Test) ? 1 : 0, E);
16648 }
16649
16650 case Builtin::BI__builtin_parity:
16651 case Builtin::BI__builtin_parityl:
16652 case Builtin::BI__builtin_parityll: {
16653 APSInt Val;
16654 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
16655 return false;
16656
16657 return Success(Value: Val.popcount() % 2, E);
16658 }
16659
16660 case Builtin::BI__builtin_abs:
16661 case Builtin::BI__builtin_labs:
16662 case Builtin::BI__builtin_llabs: {
16663 APSInt Val;
16664 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
16665 return false;
16666 if (Val == APSInt(APInt::getSignedMinValue(numBits: Val.getBitWidth()),
16667 /*IsUnsigned=*/false))
16668 return false;
16669 if (Val.isNegative())
16670 Val.negate();
16671 return Success(SI: Val, E);
16672 }
16673
16674 case Builtin::BI__builtin_popcount:
16675 case Builtin::BI__builtin_popcountl:
16676 case Builtin::BI__builtin_popcountll:
16677 case Builtin::BI__builtin_popcountg:
16678 case Builtin::BI__builtin_elementwise_popcount:
16679 case Builtin::BI__popcnt16: // Microsoft variants of popcount
16680 case Builtin::BI__popcnt:
16681 case Builtin::BI__popcnt64: {
16682 APSInt Val;
16683 if (E->getArg(Arg: 0)->getType()->isExtVectorBoolType()) {
16684 APValue Vec;
16685 if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Vec, Info))
16686 return false;
16687 Val = ConvertBoolVectorToInt(Val: Vec);
16688 } else if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info)) {
16689 return false;
16690 }
16691
16692 return Success(Value: Val.popcount(), E);
16693 }
16694
16695 case Builtin::BI__builtin_rotateleft8:
16696 case Builtin::BI__builtin_rotateleft16:
16697 case Builtin::BI__builtin_rotateleft32:
16698 case Builtin::BI__builtin_rotateleft64:
16699 case Builtin::BI__builtin_rotateright8:
16700 case Builtin::BI__builtin_rotateright16:
16701 case Builtin::BI__builtin_rotateright32:
16702 case Builtin::BI__builtin_rotateright64:
16703 case Builtin::BI__builtin_stdc_rotate_left:
16704 case Builtin::BI__builtin_stdc_rotate_right:
16705 case Builtin::BI_rotl8: // Microsoft variants of rotate left
16706 case Builtin::BI_rotl16:
16707 case Builtin::BI_rotl:
16708 case Builtin::BI_lrotl:
16709 case Builtin::BI_rotl64:
16710 case Builtin::BI_rotr8: // Microsoft variants of rotate right
16711 case Builtin::BI_rotr16:
16712 case Builtin::BI_rotr:
16713 case Builtin::BI_lrotr:
16714 case Builtin::BI_rotr64: {
16715 APSInt Value, Amount;
16716 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Value, Info) ||
16717 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Amount, Info))
16718 return false;
16719
16720 Amount = NormalizeRotateAmount(Value, Amount);
16721
16722 switch (BuiltinOp) {
16723 case Builtin::BI__builtin_rotateright8:
16724 case Builtin::BI__builtin_rotateright16:
16725 case Builtin::BI__builtin_rotateright32:
16726 case Builtin::BI__builtin_rotateright64:
16727 case Builtin::BI__builtin_stdc_rotate_right:
16728 case Builtin::BI_rotr8:
16729 case Builtin::BI_rotr16:
16730 case Builtin::BI_rotr:
16731 case Builtin::BI_lrotr:
16732 case Builtin::BI_rotr64:
16733 return Success(
16734 SI: APSInt(Value.rotr(rotateAmt: Amount.getZExtValue()), Value.isUnsigned()), E);
16735 default:
16736 return Success(
16737 SI: APSInt(Value.rotl(rotateAmt: Amount.getZExtValue()), Value.isUnsigned()), E);
16738 }
16739 }
16740
16741 case Builtin::BI__builtin_elementwise_add_sat: {
16742 APSInt LHS, RHS;
16743 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: LHS, Info) ||
16744 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: RHS, Info))
16745 return false;
16746
16747 APInt Result = LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS);
16748 return Success(SI: APSInt(Result, !LHS.isSigned()), E);
16749 }
16750 case Builtin::BI__builtin_elementwise_sub_sat: {
16751 APSInt LHS, RHS;
16752 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: LHS, Info) ||
16753 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: RHS, Info))
16754 return false;
16755
16756 APInt Result = LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
16757 return Success(SI: APSInt(Result, !LHS.isSigned()), E);
16758 }
16759 case Builtin::BI__builtin_elementwise_max: {
16760 APSInt LHS, RHS;
16761 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: LHS, Info) ||
16762 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: RHS, Info))
16763 return false;
16764
16765 APInt Result = std::max(a: LHS, b: RHS);
16766 return Success(SI: APSInt(Result, !LHS.isSigned()), E);
16767 }
16768 case Builtin::BI__builtin_elementwise_min: {
16769 APSInt LHS, RHS;
16770 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: LHS, Info) ||
16771 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: RHS, Info))
16772 return false;
16773
16774 APInt Result = std::min(a: LHS, b: RHS);
16775 return Success(SI: APSInt(Result, !LHS.isSigned()), E);
16776 }
16777 case Builtin::BI__builtin_elementwise_fshl:
16778 case Builtin::BI__builtin_elementwise_fshr: {
16779 APSInt Hi, Lo, Shift;
16780 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Hi, Info) ||
16781 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Lo, Info) ||
16782 !EvaluateInteger(E: E->getArg(Arg: 2), Result&: Shift, Info))
16783 return false;
16784
16785 switch (BuiltinOp) {
16786 case Builtin::BI__builtin_elementwise_fshl: {
16787 APSInt Result(llvm::APIntOps::fshl(Hi, Lo, Shift), Hi.isUnsigned());
16788 return Success(SI: Result, E);
16789 }
16790 case Builtin::BI__builtin_elementwise_fshr: {
16791 APSInt Result(llvm::APIntOps::fshr(Hi, Lo, Shift), Hi.isUnsigned());
16792 return Success(SI: Result, E);
16793 }
16794 }
16795 llvm_unreachable("Fully covered switch above");
16796 }
16797 case Builtin::BIstrlen:
16798 case Builtin::BIwcslen:
16799 // A call to strlen is not a constant expression.
16800 if (Info.getLangOpts().CPlusPlus11)
16801 Info.CCEDiag(E, DiagId: diag::note_constexpr_invalid_function)
16802 << /*isConstexpr*/ 0 << /*isConstructor*/ 0
16803 << Info.Ctx.BuiltinInfo.getQuotedName(ID: BuiltinOp);
16804 else
16805 Info.CCEDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
16806 [[fallthrough]];
16807 case Builtin::BI__builtin_strlen:
16808 case Builtin::BI__builtin_wcslen: {
16809 // As an extension, we support __builtin_strlen() as a constant expression,
16810 // and support folding strlen() to a constant.
16811 if (std::optional<uint64_t> StrLen =
16812 EvaluateBuiltinStrLen(E: E->getArg(Arg: 0), Info))
16813 return Success(Value: *StrLen, E);
16814 return false;
16815 }
16816
16817 case Builtin::BIstrcmp:
16818 case Builtin::BIwcscmp:
16819 case Builtin::BIstrncmp:
16820 case Builtin::BIwcsncmp:
16821 case Builtin::BImemcmp:
16822 case Builtin::BIbcmp:
16823 case Builtin::BIwmemcmp:
16824 // A call to strlen is not a constant expression.
16825 if (Info.getLangOpts().CPlusPlus11)
16826 Info.CCEDiag(E, DiagId: diag::note_constexpr_invalid_function)
16827 << /*isConstexpr*/ 0 << /*isConstructor*/ 0
16828 << Info.Ctx.BuiltinInfo.getQuotedName(ID: BuiltinOp);
16829 else
16830 Info.CCEDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
16831 [[fallthrough]];
16832 case Builtin::BI__builtin_strcmp:
16833 case Builtin::BI__builtin_wcscmp:
16834 case Builtin::BI__builtin_strncmp:
16835 case Builtin::BI__builtin_wcsncmp:
16836 case Builtin::BI__builtin_memcmp:
16837 case Builtin::BI__builtin_bcmp:
16838 case Builtin::BI__builtin_wmemcmp: {
16839 LValue String1, String2;
16840 if (!EvaluatePointer(E: E->getArg(Arg: 0), Result&: String1, Info) ||
16841 !EvaluatePointer(E: E->getArg(Arg: 1), Result&: String2, Info))
16842 return false;
16843
16844 uint64_t MaxLength = uint64_t(-1);
16845 if (BuiltinOp != Builtin::BIstrcmp &&
16846 BuiltinOp != Builtin::BIwcscmp &&
16847 BuiltinOp != Builtin::BI__builtin_strcmp &&
16848 BuiltinOp != Builtin::BI__builtin_wcscmp) {
16849 APSInt N;
16850 if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: N, Info))
16851 return false;
16852 MaxLength = N.getZExtValue();
16853 }
16854
16855 // Empty substrings compare equal by definition.
16856 if (MaxLength == 0u)
16857 return Success(Value: 0, E);
16858
16859 if (!String1.checkNullPointerForFoldAccess(Info, E, AK: AK_Read) ||
16860 !String2.checkNullPointerForFoldAccess(Info, E, AK: AK_Read) ||
16861 String1.Designator.Invalid || String2.Designator.Invalid)
16862 return false;
16863
16864 QualType CharTy1 = String1.Designator.getType(Ctx&: Info.Ctx);
16865 QualType CharTy2 = String2.Designator.getType(Ctx&: Info.Ctx);
16866
16867 bool IsRawByte = BuiltinOp == Builtin::BImemcmp ||
16868 BuiltinOp == Builtin::BIbcmp ||
16869 BuiltinOp == Builtin::BI__builtin_memcmp ||
16870 BuiltinOp == Builtin::BI__builtin_bcmp;
16871
16872 assert(IsRawByte ||
16873 (Info.Ctx.hasSameUnqualifiedType(
16874 CharTy1, E->getArg(0)->getType()->getPointeeType()) &&
16875 Info.Ctx.hasSameUnqualifiedType(CharTy1, CharTy2)));
16876
16877 // For memcmp, allow comparing any arrays of '[[un]signed] char' or
16878 // 'char8_t', but no other types.
16879 if (IsRawByte &&
16880 !(isOneByteCharacterType(T: CharTy1) && isOneByteCharacterType(T: CharTy2))) {
16881 // FIXME: Consider using our bit_cast implementation to support this.
16882 Info.FFDiag(E, DiagId: diag::note_constexpr_memcmp_unsupported)
16883 << Info.Ctx.BuiltinInfo.getQuotedName(ID: BuiltinOp) << CharTy1
16884 << CharTy2;
16885 return false;
16886 }
16887
16888 const auto &ReadCurElems = [&](APValue &Char1, APValue &Char2) {
16889 return handleLValueToRValueConversion(Info, Conv: E, Type: CharTy1, LVal: String1, RVal&: Char1) &&
16890 handleLValueToRValueConversion(Info, Conv: E, Type: CharTy2, LVal: String2, RVal&: Char2) &&
16891 Char1.isInt() && Char2.isInt();
16892 };
16893 const auto &AdvanceElems = [&] {
16894 return HandleLValueArrayAdjustment(Info, E, LVal&: String1, EltTy: CharTy1, Adjustment: 1) &&
16895 HandleLValueArrayAdjustment(Info, E, LVal&: String2, EltTy: CharTy2, Adjustment: 1);
16896 };
16897
16898 bool StopAtNull =
16899 (BuiltinOp != Builtin::BImemcmp && BuiltinOp != Builtin::BIbcmp &&
16900 BuiltinOp != Builtin::BIwmemcmp &&
16901 BuiltinOp != Builtin::BI__builtin_memcmp &&
16902 BuiltinOp != Builtin::BI__builtin_bcmp &&
16903 BuiltinOp != Builtin::BI__builtin_wmemcmp);
16904 bool IsWide = BuiltinOp == Builtin::BIwcscmp ||
16905 BuiltinOp == Builtin::BIwcsncmp ||
16906 BuiltinOp == Builtin::BIwmemcmp ||
16907 BuiltinOp == Builtin::BI__builtin_wcscmp ||
16908 BuiltinOp == Builtin::BI__builtin_wcsncmp ||
16909 BuiltinOp == Builtin::BI__builtin_wmemcmp;
16910
16911 for (; MaxLength; --MaxLength) {
16912 APValue Char1, Char2;
16913 if (!ReadCurElems(Char1, Char2))
16914 return false;
16915 if (Char1.getInt().ne(RHS: Char2.getInt())) {
16916 if (IsWide) // wmemcmp compares with wchar_t signedness.
16917 return Success(Value: Char1.getInt() < Char2.getInt() ? -1 : 1, E);
16918 // memcmp always compares unsigned chars.
16919 return Success(Value: Char1.getInt().ult(RHS: Char2.getInt()) ? -1 : 1, E);
16920 }
16921 if (StopAtNull && !Char1.getInt())
16922 return Success(Value: 0, E);
16923 assert(!(StopAtNull && !Char2.getInt()));
16924 if (!AdvanceElems())
16925 return false;
16926 }
16927 // We hit the strncmp / memcmp limit.
16928 return Success(Value: 0, E);
16929 }
16930
16931 case Builtin::BI__atomic_always_lock_free:
16932 case Builtin::BI__atomic_is_lock_free:
16933 case Builtin::BI__c11_atomic_is_lock_free: {
16934 APSInt SizeVal;
16935 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: SizeVal, Info))
16936 return false;
16937
16938 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
16939 // of two less than or equal to the maximum inline atomic width, we know it
16940 // is lock-free. If the size isn't a power of two, or greater than the
16941 // maximum alignment where we promote atomics, we know it is not lock-free
16942 // (at least not in the sense of atomic_is_lock_free). Otherwise,
16943 // the answer can only be determined at runtime; for example, 16-byte
16944 // atomics have lock-free implementations on some, but not all,
16945 // x86-64 processors.
16946
16947 // Check power-of-two.
16948 CharUnits Size = CharUnits::fromQuantity(Quantity: SizeVal.getZExtValue());
16949 if (Size.isPowerOfTwo()) {
16950 // Check against inlining width.
16951 unsigned InlineWidthBits =
16952 Info.Ctx.getTargetInfo().getMaxAtomicInlineWidth();
16953 if (Size <= Info.Ctx.toCharUnitsFromBits(BitSize: InlineWidthBits)) {
16954 if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free ||
16955 Size == CharUnits::One())
16956 return Success(Value: 1, E);
16957
16958 // If the pointer argument can be evaluated to a compile-time constant
16959 // integer (or nullptr), check if that value is appropriately aligned.
16960 const Expr *PtrArg = E->getArg(Arg: 1);
16961 Expr::EvalResult ExprResult;
16962 APSInt IntResult;
16963 if (PtrArg->EvaluateAsRValue(Result&: ExprResult, Ctx: Info.Ctx) &&
16964 ExprResult.Val.toIntegralConstant(Result&: IntResult, SrcTy: PtrArg->getType(),
16965 Ctx: Info.Ctx) &&
16966 IntResult.isAligned(A: Size.getAsAlign()))
16967 return Success(Value: 1, E);
16968
16969 // Otherwise, check if the type's alignment against Size.
16970 if (auto *ICE = dyn_cast<ImplicitCastExpr>(Val: PtrArg)) {
16971 // Drop the potential implicit-cast to 'const volatile void*', getting
16972 // the underlying type.
16973 if (ICE->getCastKind() == CK_BitCast)
16974 PtrArg = ICE->getSubExpr();
16975 }
16976
16977 if (auto PtrTy = PtrArg->getType()->getAs<PointerType>()) {
16978 QualType PointeeType = PtrTy->getPointeeType();
16979 if (!PointeeType->isIncompleteType() &&
16980 Info.Ctx.getTypeAlignInChars(T: PointeeType) >= Size) {
16981 // OK, we will inline operations on this object.
16982 return Success(Value: 1, E);
16983 }
16984 }
16985 }
16986 }
16987
16988 return BuiltinOp == Builtin::BI__atomic_always_lock_free ?
16989 Success(Value: 0, E) : Error(E);
16990 }
16991 case Builtin::BI__builtin_addcb:
16992 case Builtin::BI__builtin_addcs:
16993 case Builtin::BI__builtin_addc:
16994 case Builtin::BI__builtin_addcl:
16995 case Builtin::BI__builtin_addcll:
16996 case Builtin::BI__builtin_subcb:
16997 case Builtin::BI__builtin_subcs:
16998 case Builtin::BI__builtin_subc:
16999 case Builtin::BI__builtin_subcl:
17000 case Builtin::BI__builtin_subcll: {
17001 LValue CarryOutLValue;
17002 APSInt LHS, RHS, CarryIn, CarryOut, Result;
17003 QualType ResultType = E->getArg(Arg: 0)->getType();
17004 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: LHS, Info) ||
17005 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: RHS, Info) ||
17006 !EvaluateInteger(E: E->getArg(Arg: 2), Result&: CarryIn, Info) ||
17007 !EvaluatePointer(E: E->getArg(Arg: 3), Result&: CarryOutLValue, Info))
17008 return false;
17009 // Copy the number of bits and sign.
17010 Result = LHS;
17011 CarryOut = LHS;
17012
17013 bool FirstOverflowed = false;
17014 bool SecondOverflowed = false;
17015 switch (BuiltinOp) {
17016 default:
17017 llvm_unreachable("Invalid value for BuiltinOp");
17018 case Builtin::BI__builtin_addcb:
17019 case Builtin::BI__builtin_addcs:
17020 case Builtin::BI__builtin_addc:
17021 case Builtin::BI__builtin_addcl:
17022 case Builtin::BI__builtin_addcll:
17023 Result =
17024 LHS.uadd_ov(RHS, Overflow&: FirstOverflowed).uadd_ov(RHS: CarryIn, Overflow&: SecondOverflowed);
17025 break;
17026 case Builtin::BI__builtin_subcb:
17027 case Builtin::BI__builtin_subcs:
17028 case Builtin::BI__builtin_subc:
17029 case Builtin::BI__builtin_subcl:
17030 case Builtin::BI__builtin_subcll:
17031 Result =
17032 LHS.usub_ov(RHS, Overflow&: FirstOverflowed).usub_ov(RHS: CarryIn, Overflow&: SecondOverflowed);
17033 break;
17034 }
17035
17036 // It is possible for both overflows to happen but CGBuiltin uses an OR so
17037 // this is consistent.
17038 CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
17039 APValue APV{CarryOut};
17040 if (!handleAssignment(Info, E, LVal: CarryOutLValue, LValType: ResultType, Val&: APV))
17041 return false;
17042 return Success(SI: Result, E);
17043 }
17044 case Builtin::BI__builtin_add_overflow:
17045 case Builtin::BI__builtin_sub_overflow:
17046 case Builtin::BI__builtin_mul_overflow:
17047 case Builtin::BI__builtin_sadd_overflow:
17048 case Builtin::BI__builtin_uadd_overflow:
17049 case Builtin::BI__builtin_uaddl_overflow:
17050 case Builtin::BI__builtin_uaddll_overflow:
17051 case Builtin::BI__builtin_usub_overflow:
17052 case Builtin::BI__builtin_usubl_overflow:
17053 case Builtin::BI__builtin_usubll_overflow:
17054 case Builtin::BI__builtin_umul_overflow:
17055 case Builtin::BI__builtin_umull_overflow:
17056 case Builtin::BI__builtin_umulll_overflow:
17057 case Builtin::BI__builtin_saddl_overflow:
17058 case Builtin::BI__builtin_saddll_overflow:
17059 case Builtin::BI__builtin_ssub_overflow:
17060 case Builtin::BI__builtin_ssubl_overflow:
17061 case Builtin::BI__builtin_ssubll_overflow:
17062 case Builtin::BI__builtin_smul_overflow:
17063 case Builtin::BI__builtin_smull_overflow:
17064 case Builtin::BI__builtin_smulll_overflow: {
17065 LValue ResultLValue;
17066 APSInt LHS, RHS;
17067
17068 QualType ResultType = E->getArg(Arg: 2)->getType()->getPointeeType();
17069 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: LHS, Info) ||
17070 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: RHS, Info) ||
17071 !EvaluatePointer(E: E->getArg(Arg: 2), Result&: ResultLValue, Info))
17072 return false;
17073
17074 APSInt Result;
17075 bool DidOverflow = false;
17076
17077 // If the types don't have to match, enlarge all 3 to the largest of them.
17078 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
17079 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
17080 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
17081 bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
17082 ResultType->isSignedIntegerOrEnumerationType();
17083 bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
17084 ResultType->isSignedIntegerOrEnumerationType();
17085 uint64_t LHSSize = LHS.getBitWidth();
17086 uint64_t RHSSize = RHS.getBitWidth();
17087 uint64_t ResultSize = Info.Ctx.getTypeSize(T: ResultType);
17088 uint64_t MaxBits = std::max(a: std::max(a: LHSSize, b: RHSSize), b: ResultSize);
17089
17090 // Add an additional bit if the signedness isn't uniformly agreed to. We
17091 // could do this ONLY if there is a signed and an unsigned that both have
17092 // MaxBits, but the code to check that is pretty nasty. The issue will be
17093 // caught in the shrink-to-result later anyway.
17094 if (IsSigned && !AllSigned)
17095 ++MaxBits;
17096
17097 LHS = APSInt(LHS.extOrTrunc(width: MaxBits), !IsSigned);
17098 RHS = APSInt(RHS.extOrTrunc(width: MaxBits), !IsSigned);
17099 Result = APSInt(MaxBits, !IsSigned);
17100 }
17101
17102 // Find largest int.
17103 switch (BuiltinOp) {
17104 default:
17105 llvm_unreachable("Invalid value for BuiltinOp");
17106 case Builtin::BI__builtin_add_overflow:
17107 case Builtin::BI__builtin_sadd_overflow:
17108 case Builtin::BI__builtin_saddl_overflow:
17109 case Builtin::BI__builtin_saddll_overflow:
17110 case Builtin::BI__builtin_uadd_overflow:
17111 case Builtin::BI__builtin_uaddl_overflow:
17112 case Builtin::BI__builtin_uaddll_overflow:
17113 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow&: DidOverflow)
17114 : LHS.uadd_ov(RHS, Overflow&: DidOverflow);
17115 break;
17116 case Builtin::BI__builtin_sub_overflow:
17117 case Builtin::BI__builtin_ssub_overflow:
17118 case Builtin::BI__builtin_ssubl_overflow:
17119 case Builtin::BI__builtin_ssubll_overflow:
17120 case Builtin::BI__builtin_usub_overflow:
17121 case Builtin::BI__builtin_usubl_overflow:
17122 case Builtin::BI__builtin_usubll_overflow:
17123 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow&: DidOverflow)
17124 : LHS.usub_ov(RHS, Overflow&: DidOverflow);
17125 break;
17126 case Builtin::BI__builtin_mul_overflow:
17127 case Builtin::BI__builtin_smul_overflow:
17128 case Builtin::BI__builtin_smull_overflow:
17129 case Builtin::BI__builtin_smulll_overflow:
17130 case Builtin::BI__builtin_umul_overflow:
17131 case Builtin::BI__builtin_umull_overflow:
17132 case Builtin::BI__builtin_umulll_overflow:
17133 Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow&: DidOverflow)
17134 : LHS.umul_ov(RHS, Overflow&: DidOverflow);
17135 break;
17136 }
17137
17138 // In the case where multiple sizes are allowed, truncate and see if
17139 // the values are the same.
17140 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
17141 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
17142 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
17143 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
17144 // since it will give us the behavior of a TruncOrSelf in the case where
17145 // its parameter <= its size. We previously set Result to be at least the
17146 // type-size of the result, so getTypeSize(ResultType) <= Result.BitWidth
17147 // will work exactly like TruncOrSelf.
17148 APSInt Temp = Result.extOrTrunc(width: Info.Ctx.getTypeSize(T: ResultType));
17149 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
17150
17151 if (!APSInt::isSameValue(I1: Temp, I2: Result))
17152 DidOverflow = true;
17153 Result = Temp;
17154 }
17155
17156 APValue APV{Result};
17157 if (!handleAssignment(Info, E, LVal: ResultLValue, LValType: ResultType, Val&: APV))
17158 return false;
17159 return Success(Value: DidOverflow, E);
17160 }
17161
17162 case Builtin::BI__builtin_reduce_add:
17163 case Builtin::BI__builtin_reduce_mul:
17164 case Builtin::BI__builtin_reduce_and:
17165 case Builtin::BI__builtin_reduce_or:
17166 case Builtin::BI__builtin_reduce_xor:
17167 case Builtin::BI__builtin_reduce_min:
17168 case Builtin::BI__builtin_reduce_max: {
17169 APValue Source;
17170 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: Source))
17171 return false;
17172
17173 unsigned SourceLen = Source.getVectorLength();
17174 APSInt Reduced = Source.getVectorElt(I: 0).getInt();
17175 for (unsigned EltNum = 1; EltNum < SourceLen; ++EltNum) {
17176 switch (BuiltinOp) {
17177 default:
17178 return false;
17179 case Builtin::BI__builtin_reduce_add: {
17180 if (!CheckedIntArithmetic(
17181 Info, E, LHS: Reduced, RHS: Source.getVectorElt(I: EltNum).getInt(),
17182 BitWidth: Reduced.getBitWidth() + 1, Op: std::plus<APSInt>(), Result&: Reduced))
17183 return false;
17184 break;
17185 }
17186 case Builtin::BI__builtin_reduce_mul: {
17187 if (!CheckedIntArithmetic(
17188 Info, E, LHS: Reduced, RHS: Source.getVectorElt(I: EltNum).getInt(),
17189 BitWidth: Reduced.getBitWidth() * 2, Op: std::multiplies<APSInt>(), Result&: Reduced))
17190 return false;
17191 break;
17192 }
17193 case Builtin::BI__builtin_reduce_and: {
17194 Reduced &= Source.getVectorElt(I: EltNum).getInt();
17195 break;
17196 }
17197 case Builtin::BI__builtin_reduce_or: {
17198 Reduced |= Source.getVectorElt(I: EltNum).getInt();
17199 break;
17200 }
17201 case Builtin::BI__builtin_reduce_xor: {
17202 Reduced ^= Source.getVectorElt(I: EltNum).getInt();
17203 break;
17204 }
17205 case Builtin::BI__builtin_reduce_min: {
17206 Reduced = std::min(a: Reduced, b: Source.getVectorElt(I: EltNum).getInt());
17207 break;
17208 }
17209 case Builtin::BI__builtin_reduce_max: {
17210 Reduced = std::max(a: Reduced, b: Source.getVectorElt(I: EltNum).getInt());
17211 break;
17212 }
17213 }
17214 }
17215
17216 return Success(SI: Reduced, E);
17217 }
17218
17219 case clang::X86::BI__builtin_ia32_addcarryx_u32:
17220 case clang::X86::BI__builtin_ia32_addcarryx_u64:
17221 case clang::X86::BI__builtin_ia32_subborrow_u32:
17222 case clang::X86::BI__builtin_ia32_subborrow_u64: {
17223 LValue ResultLValue;
17224 APSInt CarryIn, LHS, RHS;
17225 QualType ResultType = E->getArg(Arg: 3)->getType()->getPointeeType();
17226 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: CarryIn, Info) ||
17227 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: LHS, Info) ||
17228 !EvaluateInteger(E: E->getArg(Arg: 2), Result&: RHS, Info) ||
17229 !EvaluatePointer(E: E->getArg(Arg: 3), Result&: ResultLValue, Info))
17230 return false;
17231
17232 bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
17233 BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
17234
17235 unsigned BitWidth = LHS.getBitWidth();
17236 unsigned CarryInBit = CarryIn.ugt(RHS: 0) ? 1 : 0;
17237 APInt ExResult =
17238 IsAdd
17239 ? (LHS.zext(width: BitWidth + 1) + (RHS.zext(width: BitWidth + 1) + CarryInBit))
17240 : (LHS.zext(width: BitWidth + 1) - (RHS.zext(width: BitWidth + 1) + CarryInBit));
17241
17242 APInt Result = ExResult.extractBits(numBits: BitWidth, bitPosition: 0);
17243 uint64_t CarryOut = ExResult.extractBitsAsZExtValue(numBits: 1, bitPosition: BitWidth);
17244
17245 APValue APV{APSInt(Result, /*isUnsigned=*/true)};
17246 if (!handleAssignment(Info, E, LVal: ResultLValue, LValType: ResultType, Val&: APV))
17247 return false;
17248 return Success(Value: CarryOut, E);
17249 }
17250
17251 case clang::X86::BI__builtin_ia32_movmskps:
17252 case clang::X86::BI__builtin_ia32_movmskpd:
17253 case clang::X86::BI__builtin_ia32_pmovmskb128:
17254 case clang::X86::BI__builtin_ia32_pmovmskb256:
17255 case clang::X86::BI__builtin_ia32_movmskps256:
17256 case clang::X86::BI__builtin_ia32_movmskpd256: {
17257 APValue Source;
17258 if (!Evaluate(Result&: Source, Info, E: E->getArg(Arg: 0)))
17259 return false;
17260 unsigned SourceLen = Source.getVectorLength();
17261 const VectorType *VT = E->getArg(Arg: 0)->getType()->castAs<VectorType>();
17262 QualType ElemQT = VT->getElementType();
17263 unsigned ResultLen = Info.Ctx.getTypeSize(
17264 T: E->getCallReturnType(Ctx: Info.Ctx)); // Always 32-bit integer.
17265 APInt Result(ResultLen, 0);
17266
17267 for (unsigned I = 0; I != SourceLen; ++I) {
17268 APInt Elem;
17269 if (ElemQT->isIntegerType()) {
17270 Elem = Source.getVectorElt(I).getInt();
17271 } else if (ElemQT->isRealFloatingType()) {
17272 Elem = Source.getVectorElt(I).getFloat().bitcastToAPInt();
17273 } else {
17274 return false;
17275 }
17276 Result.setBitVal(BitPosition: I, BitValue: Elem.isNegative());
17277 }
17278 return Success(I: Result, E);
17279 }
17280
17281 case clang::X86::BI__builtin_ia32_bextr_u32:
17282 case clang::X86::BI__builtin_ia32_bextr_u64:
17283 case clang::X86::BI__builtin_ia32_bextri_u32:
17284 case clang::X86::BI__builtin_ia32_bextri_u64: {
17285 APSInt Val, Idx;
17286 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info) ||
17287 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Idx, Info))
17288 return false;
17289
17290 unsigned BitWidth = Val.getBitWidth();
17291 uint64_t Shift = Idx.extractBitsAsZExtValue(numBits: 8, bitPosition: 0);
17292 uint64_t Length = Idx.extractBitsAsZExtValue(numBits: 8, bitPosition: 8);
17293 Length = Length > BitWidth ? BitWidth : Length;
17294
17295 // Handle out of bounds cases.
17296 if (Length == 0 || Shift >= BitWidth)
17297 return Success(Value: 0, E);
17298
17299 uint64_t Result = Val.getZExtValue() >> Shift;
17300 Result &= llvm::maskTrailingOnes<uint64_t>(N: Length);
17301 return Success(Value: Result, E);
17302 }
17303
17304 case clang::X86::BI__builtin_ia32_bzhi_si:
17305 case clang::X86::BI__builtin_ia32_bzhi_di: {
17306 APSInt Val, Idx;
17307 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info) ||
17308 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Idx, Info))
17309 return false;
17310
17311 unsigned BitWidth = Val.getBitWidth();
17312 unsigned Index = Idx.extractBitsAsZExtValue(numBits: 8, bitPosition: 0);
17313 if (Index < BitWidth)
17314 Val.clearHighBits(hiBits: BitWidth - Index);
17315 return Success(SI: Val, E);
17316 }
17317
17318 case clang::X86::BI__builtin_ia32_ktestcqi:
17319 case clang::X86::BI__builtin_ia32_ktestchi:
17320 case clang::X86::BI__builtin_ia32_ktestcsi:
17321 case clang::X86::BI__builtin_ia32_ktestcdi: {
17322 APSInt A, B;
17323 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: A, Info) ||
17324 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: B, Info))
17325 return false;
17326
17327 return Success(Value: (~A & B) == 0, E);
17328 }
17329
17330 case clang::X86::BI__builtin_ia32_ktestzqi:
17331 case clang::X86::BI__builtin_ia32_ktestzhi:
17332 case clang::X86::BI__builtin_ia32_ktestzsi:
17333 case clang::X86::BI__builtin_ia32_ktestzdi: {
17334 APSInt A, B;
17335 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: A, Info) ||
17336 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: B, Info))
17337 return false;
17338
17339 return Success(Value: (A & B) == 0, E);
17340 }
17341
17342 case clang::X86::BI__builtin_ia32_kortestcqi:
17343 case clang::X86::BI__builtin_ia32_kortestchi:
17344 case clang::X86::BI__builtin_ia32_kortestcsi:
17345 case clang::X86::BI__builtin_ia32_kortestcdi: {
17346 APSInt A, B;
17347 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: A, Info) ||
17348 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: B, Info))
17349 return false;
17350
17351 return Success(Value: ~(A | B) == 0, E);
17352 }
17353
17354 case clang::X86::BI__builtin_ia32_kortestzqi:
17355 case clang::X86::BI__builtin_ia32_kortestzhi:
17356 case clang::X86::BI__builtin_ia32_kortestzsi:
17357 case clang::X86::BI__builtin_ia32_kortestzdi: {
17358 APSInt A, B;
17359 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: A, Info) ||
17360 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: B, Info))
17361 return false;
17362
17363 return Success(Value: (A | B) == 0, E);
17364 }
17365
17366 case clang::X86::BI__builtin_ia32_kunpckhi:
17367 case clang::X86::BI__builtin_ia32_kunpckdi:
17368 case clang::X86::BI__builtin_ia32_kunpcksi: {
17369 APSInt A, B;
17370 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: A, Info) ||
17371 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: B, Info))
17372 return false;
17373
17374 // Generic kunpack: extract lower half of each operand and concatenate
17375 // Result = A[HalfWidth-1:0] concat B[HalfWidth-1:0]
17376 unsigned BW = A.getBitWidth();
17377 APSInt Result(A.trunc(width: BW / 2).concat(NewLSB: B.trunc(width: BW / 2)), A.isUnsigned());
17378 return Success(SI: Result, E);
17379 }
17380
17381 case clang::X86::BI__builtin_ia32_lzcnt_u16:
17382 case clang::X86::BI__builtin_ia32_lzcnt_u32:
17383 case clang::X86::BI__builtin_ia32_lzcnt_u64: {
17384 APSInt Val;
17385 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
17386 return false;
17387 return Success(Value: Val.countLeadingZeros(), E);
17388 }
17389
17390 case clang::X86::BI__builtin_ia32_tzcnt_u16:
17391 case clang::X86::BI__builtin_ia32_tzcnt_u32:
17392 case clang::X86::BI__builtin_ia32_tzcnt_u64: {
17393 APSInt Val;
17394 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
17395 return false;
17396 return Success(Value: Val.countTrailingZeros(), E);
17397 }
17398
17399 case clang::X86::BI__builtin_ia32_pdep_si:
17400 case clang::X86::BI__builtin_ia32_pdep_di: {
17401 APSInt Val, Msk;
17402 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info) ||
17403 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Msk, Info))
17404 return false;
17405
17406 unsigned BitWidth = Val.getBitWidth();
17407 APInt Result = APInt::getZero(numBits: BitWidth);
17408 for (unsigned I = 0, P = 0; I != BitWidth; ++I)
17409 if (Msk[I])
17410 Result.setBitVal(BitPosition: I, BitValue: Val[P++]);
17411 return Success(I: Result, E);
17412 }
17413
17414 case clang::X86::BI__builtin_ia32_pext_si:
17415 case clang::X86::BI__builtin_ia32_pext_di: {
17416 APSInt Val, Msk;
17417 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info) ||
17418 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Msk, Info))
17419 return false;
17420
17421 unsigned BitWidth = Val.getBitWidth();
17422 APInt Result = APInt::getZero(numBits: BitWidth);
17423 for (unsigned I = 0, P = 0; I != BitWidth; ++I)
17424 if (Msk[I])
17425 Result.setBitVal(BitPosition: P++, BitValue: Val[I]);
17426 return Success(I: Result, E);
17427 }
17428 case X86::BI__builtin_ia32_ptestz128:
17429 case X86::BI__builtin_ia32_ptestz256:
17430 case X86::BI__builtin_ia32_vtestzps:
17431 case X86::BI__builtin_ia32_vtestzps256:
17432 case X86::BI__builtin_ia32_vtestzpd:
17433 case X86::BI__builtin_ia32_vtestzpd256: {
17434 return EvalTestOp(
17435 [](const APInt &A, const APInt &B) { return (A & B) == 0; });
17436 }
17437 case X86::BI__builtin_ia32_ptestc128:
17438 case X86::BI__builtin_ia32_ptestc256:
17439 case X86::BI__builtin_ia32_vtestcps:
17440 case X86::BI__builtin_ia32_vtestcps256:
17441 case X86::BI__builtin_ia32_vtestcpd:
17442 case X86::BI__builtin_ia32_vtestcpd256: {
17443 return EvalTestOp(
17444 [](const APInt &A, const APInt &B) { return (~A & B) == 0; });
17445 }
17446 case X86::BI__builtin_ia32_ptestnzc128:
17447 case X86::BI__builtin_ia32_ptestnzc256:
17448 case X86::BI__builtin_ia32_vtestnzcps:
17449 case X86::BI__builtin_ia32_vtestnzcps256:
17450 case X86::BI__builtin_ia32_vtestnzcpd:
17451 case X86::BI__builtin_ia32_vtestnzcpd256: {
17452 return EvalTestOp([](const APInt &A, const APInt &B) {
17453 return ((A & B) != 0) && ((~A & B) != 0);
17454 });
17455 }
17456 case X86::BI__builtin_ia32_kandqi:
17457 case X86::BI__builtin_ia32_kandhi:
17458 case X86::BI__builtin_ia32_kandsi:
17459 case X86::BI__builtin_ia32_kanddi: {
17460 return HandleMaskBinOp(
17461 [](const APSInt &LHS, const APSInt &RHS) { return LHS & RHS; });
17462 }
17463
17464 case X86::BI__builtin_ia32_kandnqi:
17465 case X86::BI__builtin_ia32_kandnhi:
17466 case X86::BI__builtin_ia32_kandnsi:
17467 case X86::BI__builtin_ia32_kandndi: {
17468 return HandleMaskBinOp(
17469 [](const APSInt &LHS, const APSInt &RHS) { return ~LHS & RHS; });
17470 }
17471
17472 case X86::BI__builtin_ia32_korqi:
17473 case X86::BI__builtin_ia32_korhi:
17474 case X86::BI__builtin_ia32_korsi:
17475 case X86::BI__builtin_ia32_kordi: {
17476 return HandleMaskBinOp(
17477 [](const APSInt &LHS, const APSInt &RHS) { return LHS | RHS; });
17478 }
17479
17480 case X86::BI__builtin_ia32_kxnorqi:
17481 case X86::BI__builtin_ia32_kxnorhi:
17482 case X86::BI__builtin_ia32_kxnorsi:
17483 case X86::BI__builtin_ia32_kxnordi: {
17484 return HandleMaskBinOp(
17485 [](const APSInt &LHS, const APSInt &RHS) { return ~(LHS ^ RHS); });
17486 }
17487
17488 case X86::BI__builtin_ia32_kxorqi:
17489 case X86::BI__builtin_ia32_kxorhi:
17490 case X86::BI__builtin_ia32_kxorsi:
17491 case X86::BI__builtin_ia32_kxordi: {
17492 return HandleMaskBinOp(
17493 [](const APSInt &LHS, const APSInt &RHS) { return LHS ^ RHS; });
17494 }
17495
17496 case X86::BI__builtin_ia32_knotqi:
17497 case X86::BI__builtin_ia32_knothi:
17498 case X86::BI__builtin_ia32_knotsi:
17499 case X86::BI__builtin_ia32_knotdi: {
17500 APSInt Val;
17501 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
17502 return false;
17503 APSInt Result = ~Val;
17504 return Success(V: APValue(Result), E);
17505 }
17506
17507 case X86::BI__builtin_ia32_kaddqi:
17508 case X86::BI__builtin_ia32_kaddhi:
17509 case X86::BI__builtin_ia32_kaddsi:
17510 case X86::BI__builtin_ia32_kadddi: {
17511 return HandleMaskBinOp(
17512 [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
17513 }
17514
17515 case X86::BI__builtin_ia32_kmovb:
17516 case X86::BI__builtin_ia32_kmovw:
17517 case X86::BI__builtin_ia32_kmovd:
17518 case X86::BI__builtin_ia32_kmovq: {
17519 APSInt Val;
17520 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
17521 return false;
17522 return Success(SI: Val, E);
17523 }
17524
17525 case X86::BI__builtin_ia32_kshiftliqi:
17526 case X86::BI__builtin_ia32_kshiftlihi:
17527 case X86::BI__builtin_ia32_kshiftlisi:
17528 case X86::BI__builtin_ia32_kshiftlidi: {
17529 return HandleMaskBinOp([](const APSInt &LHS, const APSInt &RHS) {
17530 unsigned Amt = RHS.getZExtValue() & 0xFF;
17531 if (Amt >= LHS.getBitWidth())
17532 return APSInt(APInt::getZero(numBits: LHS.getBitWidth()), LHS.isUnsigned());
17533 return APSInt(LHS.shl(shiftAmt: Amt), LHS.isUnsigned());
17534 });
17535 }
17536
17537 case X86::BI__builtin_ia32_kshiftriqi:
17538 case X86::BI__builtin_ia32_kshiftrihi:
17539 case X86::BI__builtin_ia32_kshiftrisi:
17540 case X86::BI__builtin_ia32_kshiftridi: {
17541 return HandleMaskBinOp([](const APSInt &LHS, const APSInt &RHS) {
17542 unsigned Amt = RHS.getZExtValue() & 0xFF;
17543 if (Amt >= LHS.getBitWidth())
17544 return APSInt(APInt::getZero(numBits: LHS.getBitWidth()), LHS.isUnsigned());
17545 return APSInt(LHS.lshr(shiftAmt: Amt), LHS.isUnsigned());
17546 });
17547 }
17548
17549 case clang::X86::BI__builtin_ia32_vec_ext_v4hi:
17550 case clang::X86::BI__builtin_ia32_vec_ext_v16qi:
17551 case clang::X86::BI__builtin_ia32_vec_ext_v8hi:
17552 case clang::X86::BI__builtin_ia32_vec_ext_v4si:
17553 case clang::X86::BI__builtin_ia32_vec_ext_v2di:
17554 case clang::X86::BI__builtin_ia32_vec_ext_v32qi:
17555 case clang::X86::BI__builtin_ia32_vec_ext_v16hi:
17556 case clang::X86::BI__builtin_ia32_vec_ext_v8si:
17557 case clang::X86::BI__builtin_ia32_vec_ext_v4di: {
17558 APValue Vec;
17559 APSInt IdxAPS;
17560 if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Vec, Info) ||
17561 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: IdxAPS, Info))
17562 return false;
17563 unsigned N = Vec.getVectorLength();
17564 unsigned Idx = static_cast<unsigned>(IdxAPS.getZExtValue() & (N - 1));
17565 return Success(SI: Vec.getVectorElt(I: Idx).getInt(), E);
17566 }
17567
17568 case clang::X86::BI__builtin_ia32_cvtb2mask128:
17569 case clang::X86::BI__builtin_ia32_cvtb2mask256:
17570 case clang::X86::BI__builtin_ia32_cvtb2mask512:
17571 case clang::X86::BI__builtin_ia32_cvtw2mask128:
17572 case clang::X86::BI__builtin_ia32_cvtw2mask256:
17573 case clang::X86::BI__builtin_ia32_cvtw2mask512:
17574 case clang::X86::BI__builtin_ia32_cvtd2mask128:
17575 case clang::X86::BI__builtin_ia32_cvtd2mask256:
17576 case clang::X86::BI__builtin_ia32_cvtd2mask512:
17577 case clang::X86::BI__builtin_ia32_cvtq2mask128:
17578 case clang::X86::BI__builtin_ia32_cvtq2mask256:
17579 case clang::X86::BI__builtin_ia32_cvtq2mask512: {
17580 assert(E->getNumArgs() == 1);
17581 APValue Vec;
17582 if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Vec, Info))
17583 return false;
17584
17585 unsigned VectorLen = Vec.getVectorLength();
17586 unsigned RetWidth = Info.Ctx.getIntWidth(T: E->getType());
17587 llvm::APInt Bits(RetWidth, 0);
17588
17589 for (unsigned ElemNum = 0; ElemNum != VectorLen; ++ElemNum) {
17590 const APSInt &A = Vec.getVectorElt(I: ElemNum).getInt();
17591 unsigned MSB = A[A.getBitWidth() - 1];
17592 Bits.setBitVal(BitPosition: ElemNum, BitValue: MSB);
17593 }
17594
17595 APSInt RetMask(Bits, /*isUnsigned=*/true);
17596 return Success(V: APValue(RetMask), E);
17597 }
17598
17599 case clang::X86::BI__builtin_ia32_cmpb128_mask:
17600 case clang::X86::BI__builtin_ia32_cmpw128_mask:
17601 case clang::X86::BI__builtin_ia32_cmpd128_mask:
17602 case clang::X86::BI__builtin_ia32_cmpq128_mask:
17603 case clang::X86::BI__builtin_ia32_cmpb256_mask:
17604 case clang::X86::BI__builtin_ia32_cmpw256_mask:
17605 case clang::X86::BI__builtin_ia32_cmpd256_mask:
17606 case clang::X86::BI__builtin_ia32_cmpq256_mask:
17607 case clang::X86::BI__builtin_ia32_cmpb512_mask:
17608 case clang::X86::BI__builtin_ia32_cmpw512_mask:
17609 case clang::X86::BI__builtin_ia32_cmpd512_mask:
17610 case clang::X86::BI__builtin_ia32_cmpq512_mask:
17611 case clang::X86::BI__builtin_ia32_ucmpb128_mask:
17612 case clang::X86::BI__builtin_ia32_ucmpw128_mask:
17613 case clang::X86::BI__builtin_ia32_ucmpd128_mask:
17614 case clang::X86::BI__builtin_ia32_ucmpq128_mask:
17615 case clang::X86::BI__builtin_ia32_ucmpb256_mask:
17616 case clang::X86::BI__builtin_ia32_ucmpw256_mask:
17617 case clang::X86::BI__builtin_ia32_ucmpd256_mask:
17618 case clang::X86::BI__builtin_ia32_ucmpq256_mask:
17619 case clang::X86::BI__builtin_ia32_ucmpb512_mask:
17620 case clang::X86::BI__builtin_ia32_ucmpw512_mask:
17621 case clang::X86::BI__builtin_ia32_ucmpd512_mask:
17622 case clang::X86::BI__builtin_ia32_ucmpq512_mask: {
17623 assert(E->getNumArgs() == 4);
17624
17625 bool IsUnsigned =
17626 (BuiltinOp >= clang::X86::BI__builtin_ia32_ucmpb128_mask &&
17627 BuiltinOp <= clang::X86::BI__builtin_ia32_ucmpw512_mask);
17628
17629 APValue LHS, RHS;
17630 APSInt Mask, Opcode;
17631 if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: LHS, Info) ||
17632 !EvaluateVector(E: E->getArg(Arg: 1), Result&: RHS, Info) ||
17633 !EvaluateInteger(E: E->getArg(Arg: 2), Result&: Opcode, Info) ||
17634 !EvaluateInteger(E: E->getArg(Arg: 3), Result&: Mask, Info))
17635 return false;
17636
17637 assert(LHS.getVectorLength() == RHS.getVectorLength());
17638
17639 unsigned VectorLen = LHS.getVectorLength();
17640 unsigned RetWidth = Mask.getBitWidth();
17641
17642 APSInt RetMask(llvm::APInt(RetWidth, 0), /*isUnsigned=*/true);
17643
17644 for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
17645 const APSInt &A = LHS.getVectorElt(I: ElemNum).getInt();
17646 const APSInt &B = RHS.getVectorElt(I: ElemNum).getInt();
17647 bool Result = false;
17648
17649 switch (Opcode.getExtValue() & 0x7) {
17650 case 0: // _MM_CMPINT_EQ
17651 Result = (A == B);
17652 break;
17653 case 1: // _MM_CMPINT_LT
17654 Result = IsUnsigned ? A.ult(RHS: B) : A.slt(RHS: B);
17655 break;
17656 case 2: // _MM_CMPINT_LE
17657 Result = IsUnsigned ? A.ule(RHS: B) : A.sle(RHS: B);
17658 break;
17659 case 3: // _MM_CMPINT_FALSE
17660 Result = false;
17661 break;
17662 case 4: // _MM_CMPINT_NE
17663 Result = (A != B);
17664 break;
17665 case 5: // _MM_CMPINT_NLT (>=)
17666 Result = IsUnsigned ? A.uge(RHS: B) : A.sge(RHS: B);
17667 break;
17668 case 6: // _MM_CMPINT_NLE (>)
17669 Result = IsUnsigned ? A.ugt(RHS: B) : A.sgt(RHS: B);
17670 break;
17671 case 7: // _MM_CMPINT_TRUE
17672 Result = true;
17673 break;
17674 }
17675
17676 RetMask.setBitVal(BitPosition: ElemNum, BitValue: Mask[ElemNum] && Result);
17677 }
17678
17679 return Success(V: APValue(RetMask), E);
17680 }
17681 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
17682 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
17683 case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
17684 assert(E->getNumArgs() == 3);
17685
17686 APValue Source, ShuffleMask;
17687 APSInt ZeroMask;
17688 if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Source, Info) ||
17689 !EvaluateVector(E: E->getArg(Arg: 1), Result&: ShuffleMask, Info) ||
17690 !EvaluateInteger(E: E->getArg(Arg: 2), Result&: ZeroMask, Info))
17691 return false;
17692
17693 assert(Source.getVectorLength() == ShuffleMask.getVectorLength());
17694 assert(ZeroMask.getBitWidth() == Source.getVectorLength());
17695
17696 unsigned NumBytesInQWord = 8;
17697 unsigned NumBitsInByte = 8;
17698 unsigned NumBytes = Source.getVectorLength();
17699 unsigned NumQWords = NumBytes / NumBytesInQWord;
17700 unsigned RetWidth = ZeroMask.getBitWidth();
17701 APSInt RetMask(llvm::APInt(RetWidth, 0), /*isUnsigned=*/true);
17702
17703 for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) {
17704 APInt SourceQWord(64, 0);
17705 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
17706 uint64_t Byte = Source.getVectorElt(I: QWordId * NumBytesInQWord + ByteIdx)
17707 .getInt()
17708 .getZExtValue();
17709 SourceQWord.insertBits(SubBits: APInt(8, Byte & 0xFF), bitPosition: ByteIdx * NumBitsInByte);
17710 }
17711
17712 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
17713 unsigned SelIdx = QWordId * NumBytesInQWord + ByteIdx;
17714 unsigned M =
17715 ShuffleMask.getVectorElt(I: SelIdx).getInt().getZExtValue() & 0x3F;
17716 if (ZeroMask[SelIdx]) {
17717 RetMask.setBitVal(BitPosition: SelIdx, BitValue: SourceQWord[M]);
17718 }
17719 }
17720 }
17721 return Success(V: APValue(RetMask), E);
17722 }
17723 }
17724}
17725
17726/// Determine whether this is a pointer past the end of the complete
17727/// object referred to by the lvalue.
17728static bool isOnePastTheEndOfCompleteObject(const ASTContext &Ctx,
17729 const LValue &LV) {
17730 // A null pointer can be viewed as being "past the end" but we don't
17731 // choose to look at it that way here.
17732 if (!LV.getLValueBase())
17733 return false;
17734
17735 // If the designator is valid and refers to a subobject, we're not pointing
17736 // past the end.
17737 if (!LV.getLValueDesignator().Invalid &&
17738 !LV.getLValueDesignator().isOnePastTheEnd())
17739 return false;
17740
17741 // A pointer to an incomplete type might be past-the-end if the type's size is
17742 // zero. We cannot tell because the type is incomplete.
17743 QualType Ty = getType(B: LV.getLValueBase());
17744 if (Ty->isIncompleteType())
17745 return true;
17746
17747 // Can't be past the end of an invalid object.
17748 if (LV.getLValueDesignator().Invalid)
17749 return false;
17750
17751 // We're a past-the-end pointer if we point to the byte after the object,
17752 // no matter what our type or path is.
17753 auto Size = Ctx.getTypeSizeInChars(T: Ty);
17754 return LV.getLValueOffset() == Size;
17755}
17756
17757namespace {
17758
17759/// Data recursive integer evaluator of certain binary operators.
17760///
17761/// We use a data recursive algorithm for binary operators so that we are able
17762/// to handle extreme cases of chained binary operators without causing stack
17763/// overflow.
17764class DataRecursiveIntBinOpEvaluator {
17765 struct EvalResult {
17766 APValue Val;
17767 bool Failed = false;
17768
17769 EvalResult() = default;
17770
17771 void swap(EvalResult &RHS) {
17772 Val.swap(RHS&: RHS.Val);
17773 Failed = RHS.Failed;
17774 RHS.Failed = false;
17775 }
17776 };
17777
17778 struct Job {
17779 const Expr *E;
17780 EvalResult LHSResult; // meaningful only for binary operator expression.
17781 enum { AnyExprKind, BinOpKind, BinOpVisitedLHSKind } Kind;
17782
17783 Job() = default;
17784 Job(Job &&) = default;
17785
17786 void startSpeculativeEval(EvalInfo &Info) {
17787 SpecEvalRAII = SpeculativeEvaluationRAII(Info);
17788 }
17789
17790 private:
17791 SpeculativeEvaluationRAII SpecEvalRAII;
17792 };
17793
17794 SmallVector<Job, 16> Queue;
17795
17796 IntExprEvaluator &IntEval;
17797 EvalInfo &Info;
17798 APValue &FinalResult;
17799
17800public:
17801 DataRecursiveIntBinOpEvaluator(IntExprEvaluator &IntEval, APValue &Result)
17802 : IntEval(IntEval), Info(IntEval.getEvalInfo()), FinalResult(Result) { }
17803
17804 /// True if \param E is a binary operator that we are going to handle
17805 /// data recursively.
17806 /// We handle binary operators that are comma, logical, or that have operands
17807 /// with integral or enumeration type.
17808 static bool shouldEnqueue(const BinaryOperator *E) {
17809 return E->getOpcode() == BO_Comma || E->isLogicalOp() ||
17810 (E->isPRValue() && E->getType()->isIntegralOrEnumerationType() &&
17811 E->getLHS()->getType()->isIntegralOrEnumerationType() &&
17812 E->getRHS()->getType()->isIntegralOrEnumerationType());
17813 }
17814
17815 bool Traverse(const BinaryOperator *E) {
17816 enqueue(E);
17817 EvalResult PrevResult;
17818 while (!Queue.empty())
17819 process(Result&: PrevResult);
17820
17821 if (PrevResult.Failed) return false;
17822
17823 FinalResult.swap(RHS&: PrevResult.Val);
17824 return true;
17825 }
17826
17827private:
17828 bool Success(uint64_t Value, const Expr *E, APValue &Result) {
17829 return IntEval.Success(Value, E, Result);
17830 }
17831 bool Success(const APSInt &Value, const Expr *E, APValue &Result) {
17832 return IntEval.Success(SI: Value, E, Result);
17833 }
17834 bool Error(const Expr *E) {
17835 return IntEval.Error(E);
17836 }
17837 bool Error(const Expr *E, diag::kind D) {
17838 return IntEval.Error(E, D);
17839 }
17840
17841 OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) {
17842 return Info.CCEDiag(E, DiagId: D);
17843 }
17844
17845 // Returns true if visiting the RHS is necessary, false otherwise.
17846 bool VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E,
17847 bool &SuppressRHSDiags);
17848
17849 bool VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult,
17850 const BinaryOperator *E, APValue &Result);
17851
17852 void EvaluateExpr(const Expr *E, EvalResult &Result) {
17853 Result.Failed = !Evaluate(Result&: Result.Val, Info, E);
17854 if (Result.Failed)
17855 Result.Val = APValue();
17856 }
17857
17858 void process(EvalResult &Result);
17859
17860 void enqueue(const Expr *E) {
17861 E = E->IgnoreParens();
17862 Queue.resize(N: Queue.size()+1);
17863 Queue.back().E = E;
17864 Queue.back().Kind = Job::AnyExprKind;
17865 }
17866};
17867
17868}
17869
17870bool DataRecursiveIntBinOpEvaluator::
17871 VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E,
17872 bool &SuppressRHSDiags) {
17873 if (E->getOpcode() == BO_Comma) {
17874 // Ignore LHS but note if we could not evaluate it.
17875 if (LHSResult.Failed)
17876 return Info.noteSideEffect();
17877 return true;
17878 }
17879
17880 if (E->isLogicalOp()) {
17881 bool LHSAsBool;
17882 if (!LHSResult.Failed && HandleConversionToBool(Val: LHSResult.Val, Result&: LHSAsBool)) {
17883 // We were able to evaluate the LHS, see if we can get away with not
17884 // evaluating the RHS: 0 && X -> 0, 1 || X -> 1
17885 if (LHSAsBool == (E->getOpcode() == BO_LOr)) {
17886 Success(Value: LHSAsBool, E, Result&: LHSResult.Val);
17887 return false; // Ignore RHS
17888 }
17889 } else {
17890 LHSResult.Failed = true;
17891
17892 // Since we weren't able to evaluate the left hand side, it
17893 // might have had side effects.
17894 if (!Info.noteSideEffect())
17895 return false;
17896
17897 // We can't evaluate the LHS; however, sometimes the result
17898 // is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
17899 // Don't ignore RHS and suppress diagnostics from this arm.
17900 SuppressRHSDiags = true;
17901 }
17902
17903 return true;
17904 }
17905
17906 assert(E->getLHS()->getType()->isIntegralOrEnumerationType() &&
17907 E->getRHS()->getType()->isIntegralOrEnumerationType());
17908
17909 if (LHSResult.Failed && !Info.noteFailure())
17910 return false; // Ignore RHS;
17911
17912 return true;
17913}
17914
17915static void addOrSubLValueAsInteger(APValue &LVal, const APSInt &Index,
17916 bool IsSub) {
17917 // Compute the new offset in the appropriate width, wrapping at 64 bits.
17918 // FIXME: When compiling for a 32-bit target, we should use 32-bit
17919 // offsets.
17920 assert(!LVal.hasLValuePath() && "have designator for integer lvalue");
17921 CharUnits &Offset = LVal.getLValueOffset();
17922 uint64_t Offset64 = Offset.getQuantity();
17923 uint64_t Index64 = Index.extOrTrunc(width: 64).getZExtValue();
17924 Offset = CharUnits::fromQuantity(Quantity: IsSub ? Offset64 - Index64
17925 : Offset64 + Index64);
17926}
17927
17928bool DataRecursiveIntBinOpEvaluator::
17929 VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult,
17930 const BinaryOperator *E, APValue &Result) {
17931 if (E->getOpcode() == BO_Comma) {
17932 if (RHSResult.Failed)
17933 return false;
17934 Result = RHSResult.Val;
17935 return true;
17936 }
17937
17938 if (E->isLogicalOp()) {
17939 bool lhsResult, rhsResult;
17940 bool LHSIsOK = HandleConversionToBool(Val: LHSResult.Val, Result&: lhsResult);
17941 bool RHSIsOK = HandleConversionToBool(Val: RHSResult.Val, Result&: rhsResult);
17942
17943 if (LHSIsOK) {
17944 if (RHSIsOK) {
17945 if (E->getOpcode() == BO_LOr)
17946 return Success(Value: lhsResult || rhsResult, E, Result);
17947 else
17948 return Success(Value: lhsResult && rhsResult, E, Result);
17949 }
17950 } else {
17951 if (RHSIsOK) {
17952 // We can't evaluate the LHS; however, sometimes the result
17953 // is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
17954 if (rhsResult == (E->getOpcode() == BO_LOr))
17955 return Success(Value: rhsResult, E, Result);
17956 }
17957 }
17958
17959 return false;
17960 }
17961
17962 assert(E->getLHS()->getType()->isIntegralOrEnumerationType() &&
17963 E->getRHS()->getType()->isIntegralOrEnumerationType());
17964
17965 if (LHSResult.Failed || RHSResult.Failed)
17966 return false;
17967
17968 const APValue &LHSVal = LHSResult.Val;
17969 const APValue &RHSVal = RHSResult.Val;
17970
17971 // Handle cases like (unsigned long)&a + 4.
17972 if (E->isAdditiveOp() && LHSVal.isLValue() && RHSVal.isInt()) {
17973 Result = LHSVal;
17974 addOrSubLValueAsInteger(LVal&: Result, Index: RHSVal.getInt(), IsSub: E->getOpcode() == BO_Sub);
17975 return true;
17976 }
17977
17978 // Handle cases like 4 + (unsigned long)&a
17979 if (E->getOpcode() == BO_Add &&
17980 RHSVal.isLValue() && LHSVal.isInt()) {
17981 Result = RHSVal;
17982 addOrSubLValueAsInteger(LVal&: Result, Index: LHSVal.getInt(), /*IsSub*/false);
17983 return true;
17984 }
17985
17986 if (E->getOpcode() == BO_Sub && LHSVal.isLValue() && RHSVal.isLValue()) {
17987 // Handle (intptr_t)&&A - (intptr_t)&&B.
17988 if (!LHSVal.getLValueOffset().isZero() ||
17989 !RHSVal.getLValueOffset().isZero())
17990 return false;
17991 const Expr *LHSExpr = LHSVal.getLValueBase().dyn_cast<const Expr*>();
17992 const Expr *RHSExpr = RHSVal.getLValueBase().dyn_cast<const Expr*>();
17993 if (!LHSExpr || !RHSExpr)
17994 return false;
17995 const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(Val: LHSExpr);
17996 const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(Val: RHSExpr);
17997 if (!LHSAddrExpr || !RHSAddrExpr)
17998 return false;
17999 // Make sure both labels come from the same function.
18000 if (LHSAddrExpr->getLabel()->getDeclContext() !=
18001 RHSAddrExpr->getLabel()->getDeclContext())
18002 return false;
18003 Result = APValue(LHSAddrExpr, RHSAddrExpr);
18004 return true;
18005 }
18006
18007 // All the remaining cases expect both operands to be an integer
18008 if (!LHSVal.isInt() || !RHSVal.isInt())
18009 return Error(E);
18010
18011 // Set up the width and signedness manually, in case it can't be deduced
18012 // from the operation we're performing.
18013 // FIXME: Don't do this in the cases where we can deduce it.
18014 APSInt Value(Info.Ctx.getIntWidth(T: E->getType()),
18015 E->getType()->isUnsignedIntegerOrEnumerationType());
18016 if (!handleIntIntBinOp(Info, E, LHS: LHSVal.getInt(), Opcode: E->getOpcode(),
18017 RHS: RHSVal.getInt(), Result&: Value))
18018 return false;
18019 return Success(Value, E, Result);
18020}
18021
18022void DataRecursiveIntBinOpEvaluator::process(EvalResult &Result) {
18023 Job &job = Queue.back();
18024
18025 switch (job.Kind) {
18026 case Job::AnyExprKind: {
18027 if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(Val: job.E)) {
18028 if (shouldEnqueue(E: Bop)) {
18029 job.Kind = Job::BinOpKind;
18030 enqueue(E: Bop->getLHS());
18031 return;
18032 }
18033 }
18034
18035 EvaluateExpr(E: job.E, Result);
18036 Queue.pop_back();
18037 return;
18038 }
18039
18040 case Job::BinOpKind: {
18041 const BinaryOperator *Bop = cast<BinaryOperator>(Val: job.E);
18042 bool SuppressRHSDiags = false;
18043 if (!VisitBinOpLHSOnly(LHSResult&: Result, E: Bop, SuppressRHSDiags)) {
18044 Queue.pop_back();
18045 return;
18046 }
18047 if (SuppressRHSDiags)
18048 job.startSpeculativeEval(Info);
18049 job.LHSResult.swap(RHS&: Result);
18050 job.Kind = Job::BinOpVisitedLHSKind;
18051 enqueue(E: Bop->getRHS());
18052 return;
18053 }
18054
18055 case Job::BinOpVisitedLHSKind: {
18056 const BinaryOperator *Bop = cast<BinaryOperator>(Val: job.E);
18057 EvalResult RHS;
18058 RHS.swap(RHS&: Result);
18059 Result.Failed = !VisitBinOp(LHSResult: job.LHSResult, RHSResult: RHS, E: Bop, Result&: Result.Val);
18060 Queue.pop_back();
18061 return;
18062 }
18063 }
18064
18065 llvm_unreachable("Invalid Job::Kind!");
18066}
18067
18068namespace {
18069enum class CmpResult {
18070 Unequal,
18071 Less,
18072 Equal,
18073 Greater,
18074 Unordered,
18075};
18076}
18077
18078template <class SuccessCB, class AfterCB>
18079static bool
18080EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E,
18081 SuccessCB &&Success, AfterCB &&DoAfter) {
18082 assert(!E->isValueDependent());
18083 assert(E->isComparisonOp() && "expected comparison operator");
18084 assert((E->getOpcode() == BO_Cmp ||
18085 E->getType()->isIntegralOrEnumerationType()) &&
18086 "unsupported binary expression evaluation");
18087 auto Error = [&](const Expr *E) {
18088 Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
18089 return false;
18090 };
18091
18092 bool IsRelational = E->isRelationalOp() || E->getOpcode() == BO_Cmp;
18093 bool IsEquality = E->isEqualityOp();
18094
18095 QualType LHSTy = E->getLHS()->getType();
18096 QualType RHSTy = E->getRHS()->getType();
18097
18098 if (LHSTy->isIntegralOrEnumerationType() &&
18099 RHSTy->isIntegralOrEnumerationType()) {
18100 APSInt LHS, RHS;
18101 bool LHSOK = EvaluateInteger(E: E->getLHS(), Result&: LHS, Info);
18102 if (!LHSOK && !Info.noteFailure())
18103 return false;
18104 if (!EvaluateInteger(E: E->getRHS(), Result&: RHS, Info) || !LHSOK)
18105 return false;
18106 if (LHS < RHS)
18107 return Success(CmpResult::Less, E);
18108 if (LHS > RHS)
18109 return Success(CmpResult::Greater, E);
18110 return Success(CmpResult::Equal, E);
18111 }
18112
18113 if (LHSTy->isFixedPointType() || RHSTy->isFixedPointType()) {
18114 APFixedPoint LHSFX(Info.Ctx.getFixedPointSemantics(Ty: LHSTy));
18115 APFixedPoint RHSFX(Info.Ctx.getFixedPointSemantics(Ty: RHSTy));
18116
18117 bool LHSOK = EvaluateFixedPointOrInteger(E: E->getLHS(), Result&: LHSFX, Info);
18118 if (!LHSOK && !Info.noteFailure())
18119 return false;
18120 if (!EvaluateFixedPointOrInteger(E: E->getRHS(), Result&: RHSFX, Info) || !LHSOK)
18121 return false;
18122 if (LHSFX < RHSFX)
18123 return Success(CmpResult::Less, E);
18124 if (LHSFX > RHSFX)
18125 return Success(CmpResult::Greater, E);
18126 return Success(CmpResult::Equal, E);
18127 }
18128
18129 if (LHSTy->isAnyComplexType() || RHSTy->isAnyComplexType()) {
18130 ComplexValue LHS, RHS;
18131 bool LHSOK;
18132 if (E->isAssignmentOp()) {
18133 LValue LV;
18134 EvaluateLValue(E: E->getLHS(), Result&: LV, Info);
18135 LHSOK = false;
18136 } else if (LHSTy->isRealFloatingType()) {
18137 LHSOK = EvaluateFloat(E: E->getLHS(), Result&: LHS.FloatReal, Info);
18138 if (LHSOK) {
18139 LHS.makeComplexFloat();
18140 LHS.FloatImag = APFloat(LHS.FloatReal.getSemantics());
18141 }
18142 } else {
18143 LHSOK = EvaluateComplex(E: E->getLHS(), Res&: LHS, Info);
18144 }
18145 if (!LHSOK && !Info.noteFailure())
18146 return false;
18147
18148 if (E->getRHS()->getType()->isRealFloatingType()) {
18149 if (!EvaluateFloat(E: E->getRHS(), Result&: RHS.FloatReal, Info) || !LHSOK)
18150 return false;
18151 RHS.makeComplexFloat();
18152 RHS.FloatImag = APFloat(RHS.FloatReal.getSemantics());
18153 } else if (!EvaluateComplex(E: E->getRHS(), Res&: RHS, Info) || !LHSOK)
18154 return false;
18155
18156 if (LHS.isComplexFloat()) {
18157 APFloat::cmpResult CR_r =
18158 LHS.getComplexFloatReal().compare(RHS: RHS.getComplexFloatReal());
18159 APFloat::cmpResult CR_i =
18160 LHS.getComplexFloatImag().compare(RHS: RHS.getComplexFloatImag());
18161 bool IsEqual = CR_r == APFloat::cmpEqual && CR_i == APFloat::cmpEqual;
18162 return Success(IsEqual ? CmpResult::Equal : CmpResult::Unequal, E);
18163 } else {
18164 assert(IsEquality && "invalid complex comparison");
18165 bool IsEqual = LHS.getComplexIntReal() == RHS.getComplexIntReal() &&
18166 LHS.getComplexIntImag() == RHS.getComplexIntImag();
18167 return Success(IsEqual ? CmpResult::Equal : CmpResult::Unequal, E);
18168 }
18169 }
18170
18171 if (LHSTy->isRealFloatingType() &&
18172 RHSTy->isRealFloatingType()) {
18173 APFloat RHS(0.0), LHS(0.0);
18174
18175 bool LHSOK = EvaluateFloat(E: E->getRHS(), Result&: RHS, Info);
18176 if (!LHSOK && !Info.noteFailure())
18177 return false;
18178
18179 if (!EvaluateFloat(E: E->getLHS(), Result&: LHS, Info) || !LHSOK)
18180 return false;
18181
18182 assert(E->isComparisonOp() && "Invalid binary operator!");
18183 llvm::APFloatBase::cmpResult APFloatCmpResult = LHS.compare(RHS);
18184 if (!Info.InConstantContext &&
18185 APFloatCmpResult == APFloat::cmpUnordered &&
18186 E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts()).isFPConstrained()) {
18187 // Note: Compares may raise invalid in some cases involving NaN or sNaN.
18188 Info.FFDiag(E, DiagId: diag::note_constexpr_float_arithmetic_strict);
18189 return false;
18190 }
18191 auto GetCmpRes = [&]() {
18192 switch (APFloatCmpResult) {
18193 case APFloat::cmpEqual:
18194 return CmpResult::Equal;
18195 case APFloat::cmpLessThan:
18196 return CmpResult::Less;
18197 case APFloat::cmpGreaterThan:
18198 return CmpResult::Greater;
18199 case APFloat::cmpUnordered:
18200 return CmpResult::Unordered;
18201 }
18202 llvm_unreachable("Unrecognised APFloat::cmpResult enum");
18203 };
18204 return Success(GetCmpRes(), E);
18205 }
18206
18207 if (LHSTy->isPointerType() && RHSTy->isPointerType()) {
18208 LValue LHSValue, RHSValue;
18209
18210 bool LHSOK = EvaluatePointer(E: E->getLHS(), Result&: LHSValue, Info);
18211 if (!LHSOK && !Info.noteFailure())
18212 return false;
18213
18214 if (!EvaluatePointer(E: E->getRHS(), Result&: RHSValue, Info) || !LHSOK)
18215 return false;
18216
18217 // Reject differing bases from the normal codepath; we special-case
18218 // comparisons to null.
18219 if (!HasSameBase(A: LHSValue, B: RHSValue)) {
18220 // Bail out early if we're checking potential constant expression.
18221 // Otherwise, prefer to diagnose other issues.
18222 if (Info.checkingPotentialConstantExpression() &&
18223 (LHSValue.AllowConstexprUnknown || RHSValue.AllowConstexprUnknown))
18224 return false;
18225 auto DiagComparison = [&] (unsigned DiagID, bool Reversed = false) {
18226 std::string LHS = LHSValue.toString(Ctx&: Info.Ctx, T: E->getLHS()->getType());
18227 std::string RHS = RHSValue.toString(Ctx&: Info.Ctx, T: E->getRHS()->getType());
18228 Info.FFDiag(E, DiagId: DiagID)
18229 << (Reversed ? RHS : LHS) << (Reversed ? LHS : RHS);
18230 return false;
18231 };
18232 // Inequalities and subtractions between unrelated pointers have
18233 // unspecified or undefined behavior.
18234 if (!IsEquality)
18235 return DiagComparison(
18236 diag::note_constexpr_pointer_comparison_unspecified);
18237 // A constant address may compare equal to the address of a symbol.
18238 // The one exception is that address of an object cannot compare equal
18239 // to a null pointer constant.
18240 // TODO: Should we restrict this to actual null pointers, and exclude the
18241 // case of zero cast to pointer type?
18242 if ((!LHSValue.Base && !LHSValue.Offset.isZero()) ||
18243 (!RHSValue.Base && !RHSValue.Offset.isZero()))
18244 return DiagComparison(diag::note_constexpr_pointer_constant_comparison,
18245 !RHSValue.Base);
18246 // C++2c [intro.object]/10:
18247 // Two objects [...] may have the same address if [...] they are both
18248 // potentially non-unique objects.
18249 // C++2c [intro.object]/9:
18250 // An object is potentially non-unique if it is a string literal object,
18251 // the backing array of an initializer list, or a subobject thereof.
18252 //
18253 // This makes the comparison result unspecified, so it's not a constant
18254 // expression.
18255 //
18256 // TODO: Do we need to handle the initializer list case here?
18257 if (ArePotentiallyOverlappingStringLiterals(Info, LHS: LHSValue, RHS: RHSValue))
18258 return DiagComparison(diag::note_constexpr_literal_comparison);
18259 if (IsOpaqueConstantCall(LVal: LHSValue) || IsOpaqueConstantCall(LVal: RHSValue))
18260 return DiagComparison(diag::note_constexpr_opaque_call_comparison,
18261 !IsOpaqueConstantCall(LVal: LHSValue));
18262 // We can't tell whether weak symbols will end up pointing to the same
18263 // object.
18264 if (IsWeakLValue(Value: LHSValue) || IsWeakLValue(Value: RHSValue))
18265 return DiagComparison(diag::note_constexpr_pointer_weak_comparison,
18266 !IsWeakLValue(Value: LHSValue));
18267 // We can't compare the address of the start of one object with the
18268 // past-the-end address of another object, per C++ DR1652.
18269 if (LHSValue.Base && LHSValue.Offset.isZero() &&
18270 isOnePastTheEndOfCompleteObject(Ctx: Info.Ctx, LV: RHSValue))
18271 return DiagComparison(diag::note_constexpr_pointer_comparison_past_end,
18272 true);
18273 if (RHSValue.Base && RHSValue.Offset.isZero() &&
18274 isOnePastTheEndOfCompleteObject(Ctx: Info.Ctx, LV: LHSValue))
18275 return DiagComparison(diag::note_constexpr_pointer_comparison_past_end,
18276 false);
18277 // We can't tell whether an object is at the same address as another
18278 // zero sized object.
18279 if ((RHSValue.Base && isZeroSized(Value: LHSValue)) ||
18280 (LHSValue.Base && isZeroSized(Value: RHSValue)))
18281 return DiagComparison(
18282 diag::note_constexpr_pointer_comparison_zero_sized);
18283 if (LHSValue.AllowConstexprUnknown || RHSValue.AllowConstexprUnknown)
18284 return DiagComparison(
18285 diag::note_constexpr_pointer_comparison_unspecified);
18286 // FIXME: Verify both variables are live.
18287 return Success(CmpResult::Unequal, E);
18288 }
18289
18290 const CharUnits &LHSOffset = LHSValue.getLValueOffset();
18291 const CharUnits &RHSOffset = RHSValue.getLValueOffset();
18292
18293 SubobjectDesignator &LHSDesignator = LHSValue.getLValueDesignator();
18294 SubobjectDesignator &RHSDesignator = RHSValue.getLValueDesignator();
18295
18296 // C++11 [expr.rel]p2:
18297 // - If two pointers point to non-static data members of the same object,
18298 // or to subobjects or array elements fo such members, recursively, the
18299 // pointer to the later declared member compares greater provided the
18300 // two members have the same access control and provided their class is
18301 // not a union.
18302 // [...]
18303 // - Otherwise pointer comparisons are unspecified.
18304 if (!LHSDesignator.Invalid && !RHSDesignator.Invalid && IsRelational) {
18305 bool WasArrayIndex;
18306 unsigned Mismatch = FindDesignatorMismatch(
18307 ObjType: LHSValue.Base.isNull() ? QualType()
18308 : getType(B: LHSValue.Base).getNonReferenceType(),
18309 A: LHSDesignator, B: RHSDesignator, WasArrayIndex);
18310 // At the point where the designators diverge, the comparison has a
18311 // specified value if:
18312 // - we are comparing array indices
18313 // - we are comparing fields of a union, or fields with the same access
18314 // Otherwise, the result is unspecified and thus the comparison is not a
18315 // constant expression.
18316 if (!WasArrayIndex && Mismatch < LHSDesignator.Entries.size() &&
18317 Mismatch < RHSDesignator.Entries.size()) {
18318 const FieldDecl *LF = getAsField(E: LHSDesignator.Entries[Mismatch]);
18319 const FieldDecl *RF = getAsField(E: RHSDesignator.Entries[Mismatch]);
18320 if (!LF && !RF)
18321 Info.CCEDiag(E, DiagId: diag::note_constexpr_pointer_comparison_base_classes);
18322 else if (!LF)
18323 Info.CCEDiag(E, DiagId: diag::note_constexpr_pointer_comparison_base_field)
18324 << getAsBaseClass(E: LHSDesignator.Entries[Mismatch])
18325 << RF->getParent() << RF;
18326 else if (!RF)
18327 Info.CCEDiag(E, DiagId: diag::note_constexpr_pointer_comparison_base_field)
18328 << getAsBaseClass(E: RHSDesignator.Entries[Mismatch])
18329 << LF->getParent() << LF;
18330 else if (!LF->getParent()->isUnion() &&
18331 LF->getAccess() != RF->getAccess())
18332 Info.CCEDiag(E,
18333 DiagId: diag::note_constexpr_pointer_comparison_differing_access)
18334 << LF << LF->getAccess() << RF << RF->getAccess()
18335 << LF->getParent();
18336 }
18337 }
18338
18339 // The comparison here must be unsigned, and performed with the same
18340 // width as the pointer.
18341 unsigned PtrSize = Info.Ctx.getTypeSize(T: LHSTy);
18342 uint64_t CompareLHS = LHSOffset.getQuantity();
18343 uint64_t CompareRHS = RHSOffset.getQuantity();
18344 assert(PtrSize <= 64 && "Unexpected pointer width");
18345 uint64_t Mask = ~0ULL >> (64 - PtrSize);
18346 CompareLHS &= Mask;
18347 CompareRHS &= Mask;
18348
18349 // If there is a base and this is a relational operator, we can only
18350 // compare pointers within the object in question; otherwise, the result
18351 // depends on where the object is located in memory.
18352 if (!LHSValue.Base.isNull() && IsRelational) {
18353 QualType BaseTy = getType(B: LHSValue.Base).getNonReferenceType();
18354 if (BaseTy->isIncompleteType())
18355 return Error(E);
18356 CharUnits Size = Info.Ctx.getTypeSizeInChars(T: BaseTy);
18357 uint64_t OffsetLimit = Size.getQuantity();
18358 if (CompareLHS > OffsetLimit || CompareRHS > OffsetLimit)
18359 return Error(E);
18360 }
18361
18362 if (CompareLHS < CompareRHS)
18363 return Success(CmpResult::Less, E);
18364 if (CompareLHS > CompareRHS)
18365 return Success(CmpResult::Greater, E);
18366 return Success(CmpResult::Equal, E);
18367 }
18368
18369 if (LHSTy->isMemberPointerType()) {
18370 assert(IsEquality && "unexpected member pointer operation");
18371 assert(RHSTy->isMemberPointerType() && "invalid comparison");
18372
18373 MemberPtr LHSValue, RHSValue;
18374
18375 bool LHSOK = EvaluateMemberPointer(E: E->getLHS(), Result&: LHSValue, Info);
18376 if (!LHSOK && !Info.noteFailure())
18377 return false;
18378
18379 if (!EvaluateMemberPointer(E: E->getRHS(), Result&: RHSValue, Info) || !LHSOK)
18380 return false;
18381
18382 // If either operand is a pointer to a weak function, the comparison is not
18383 // constant.
18384 if (LHSValue.getDecl() && LHSValue.getDecl()->isWeak()) {
18385 Info.FFDiag(E, DiagId: diag::note_constexpr_mem_pointer_weak_comparison)
18386 << LHSValue.getDecl();
18387 return false;
18388 }
18389 if (RHSValue.getDecl() && RHSValue.getDecl()->isWeak()) {
18390 Info.FFDiag(E, DiagId: diag::note_constexpr_mem_pointer_weak_comparison)
18391 << RHSValue.getDecl();
18392 return false;
18393 }
18394
18395 // C++11 [expr.eq]p2:
18396 // If both operands are null, they compare equal. Otherwise if only one is
18397 // null, they compare unequal.
18398 if (!LHSValue.getDecl() || !RHSValue.getDecl()) {
18399 bool Equal = !LHSValue.getDecl() && !RHSValue.getDecl();
18400 return Success(Equal ? CmpResult::Equal : CmpResult::Unequal, E);
18401 }
18402
18403 // Otherwise if either is a pointer to a virtual member function, the
18404 // result is unspecified.
18405 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: LHSValue.getDecl()))
18406 if (MD->isVirtual())
18407 Info.CCEDiag(E, DiagId: diag::note_constexpr_compare_virtual_mem_ptr) << MD;
18408 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: RHSValue.getDecl()))
18409 if (MD->isVirtual())
18410 Info.CCEDiag(E, DiagId: diag::note_constexpr_compare_virtual_mem_ptr) << MD;
18411
18412 // Otherwise they compare equal if and only if they would refer to the
18413 // same member of the same most derived object or the same subobject if
18414 // they were dereferenced with a hypothetical object of the associated
18415 // class type.
18416 bool Equal = LHSValue == RHSValue;
18417 return Success(Equal ? CmpResult::Equal : CmpResult::Unequal, E);
18418 }
18419
18420 if (LHSTy->isNullPtrType()) {
18421 assert(E->isComparisonOp() && "unexpected nullptr operation");
18422 assert(RHSTy->isNullPtrType() && "missing pointer conversion");
18423 // C++11 [expr.rel]p4, [expr.eq]p3: If two operands of type std::nullptr_t
18424 // are compared, the result is true of the operator is <=, >= or ==, and
18425 // false otherwise.
18426 LValue Res;
18427 if (!EvaluatePointer(E: E->getLHS(), Result&: Res, Info) ||
18428 !EvaluatePointer(E: E->getRHS(), Result&: Res, Info))
18429 return false;
18430 return Success(CmpResult::Equal, E);
18431 }
18432
18433 return DoAfter();
18434}
18435
18436bool RecordExprEvaluator::VisitBinCmp(const BinaryOperator *E) {
18437 if (!CheckLiteralType(Info, E))
18438 return false;
18439
18440 auto OnSuccess = [&](CmpResult CR, const BinaryOperator *E) {
18441 ComparisonCategoryResult CCR;
18442 switch (CR) {
18443 case CmpResult::Unequal:
18444 llvm_unreachable("should never produce Unequal for three-way comparison");
18445 case CmpResult::Less:
18446 CCR = ComparisonCategoryResult::Less;
18447 break;
18448 case CmpResult::Equal:
18449 CCR = ComparisonCategoryResult::Equal;
18450 break;
18451 case CmpResult::Greater:
18452 CCR = ComparisonCategoryResult::Greater;
18453 break;
18454 case CmpResult::Unordered:
18455 CCR = ComparisonCategoryResult::Unordered;
18456 break;
18457 }
18458 // Evaluation succeeded. Lookup the information for the comparison category
18459 // type and fetch the VarDecl for the result.
18460 const ComparisonCategoryInfo &CmpInfo =
18461 Info.Ctx.CompCategories.getInfoForType(Ty: E->getType());
18462 const VarDecl *VD = CmpInfo.getValueInfo(ValueKind: CmpInfo.makeWeakResult(Res: CCR))->VD;
18463 // Check and evaluate the result as a constant expression.
18464 LValue LV;
18465 LV.set(B: VD);
18466 if (!handleLValueToRValueConversion(Info, Conv: E, Type: E->getType(), LVal: LV, RVal&: Result))
18467 return false;
18468 return CheckConstantExpression(Info, DiagLoc: E->getExprLoc(), Type: E->getType(), Value: Result,
18469 Kind: ConstantExprKind::Normal);
18470 };
18471 return EvaluateComparisonBinaryOperator(Info, E, Success&: OnSuccess, DoAfter: [&]() {
18472 return ExprEvaluatorBaseTy::VisitBinCmp(S: E);
18473 });
18474}
18475
18476bool RecordExprEvaluator::VisitCXXParenListInitExpr(
18477 const CXXParenListInitExpr *E) {
18478 return VisitCXXParenListOrInitListExpr(ExprToVisit: E, Args: E->getInitExprs());
18479}
18480
18481bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
18482 // We don't support assignment in C. C++ assignments don't get here because
18483 // assignment is an lvalue in C++.
18484 if (E->isAssignmentOp()) {
18485 Error(E);
18486 if (!Info.noteFailure())
18487 return false;
18488 }
18489
18490 if (DataRecursiveIntBinOpEvaluator::shouldEnqueue(E))
18491 return DataRecursiveIntBinOpEvaluator(*this, Result).Traverse(E);
18492
18493 assert((!E->getLHS()->getType()->isIntegralOrEnumerationType() ||
18494 !E->getRHS()->getType()->isIntegralOrEnumerationType()) &&
18495 "DataRecursiveIntBinOpEvaluator should have handled integral types");
18496
18497 if (E->isComparisonOp()) {
18498 // Evaluate builtin binary comparisons by evaluating them as three-way
18499 // comparisons and then translating the result.
18500 auto OnSuccess = [&](CmpResult CR, const BinaryOperator *E) {
18501 assert((CR != CmpResult::Unequal || E->isEqualityOp()) &&
18502 "should only produce Unequal for equality comparisons");
18503 bool IsEqual = CR == CmpResult::Equal,
18504 IsLess = CR == CmpResult::Less,
18505 IsGreater = CR == CmpResult::Greater;
18506 auto Op = E->getOpcode();
18507 switch (Op) {
18508 default:
18509 llvm_unreachable("unsupported binary operator");
18510 case BO_EQ:
18511 case BO_NE:
18512 return Success(Value: IsEqual == (Op == BO_EQ), E);
18513 case BO_LT:
18514 return Success(Value: IsLess, E);
18515 case BO_GT:
18516 return Success(Value: IsGreater, E);
18517 case BO_LE:
18518 return Success(Value: IsEqual || IsLess, E);
18519 case BO_GE:
18520 return Success(Value: IsEqual || IsGreater, E);
18521 }
18522 };
18523 return EvaluateComparisonBinaryOperator(Info, E, Success&: OnSuccess, DoAfter: [&]() {
18524 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
18525 });
18526 }
18527
18528 QualType LHSTy = E->getLHS()->getType();
18529 QualType RHSTy = E->getRHS()->getType();
18530
18531 if (LHSTy->isPointerType() && RHSTy->isPointerType() &&
18532 E->getOpcode() == BO_Sub) {
18533 LValue LHSValue, RHSValue;
18534
18535 bool LHSOK = EvaluatePointer(E: E->getLHS(), Result&: LHSValue, Info);
18536 if (!LHSOK && !Info.noteFailure())
18537 return false;
18538
18539 if (!EvaluatePointer(E: E->getRHS(), Result&: RHSValue, Info) || !LHSOK)
18540 return false;
18541
18542 // Reject differing bases from the normal codepath; we special-case
18543 // comparisons to null.
18544 if (!HasSameBase(A: LHSValue, B: RHSValue)) {
18545 if (Info.checkingPotentialConstantExpression() &&
18546 (LHSValue.AllowConstexprUnknown || RHSValue.AllowConstexprUnknown))
18547 return false;
18548
18549 const Expr *LHSExpr = LHSValue.Base.dyn_cast<const Expr *>();
18550 const Expr *RHSExpr = RHSValue.Base.dyn_cast<const Expr *>();
18551
18552 auto DiagArith = [&](unsigned DiagID) {
18553 std::string LHS = LHSValue.toString(Ctx&: Info.Ctx, T: E->getLHS()->getType());
18554 std::string RHS = RHSValue.toString(Ctx&: Info.Ctx, T: E->getRHS()->getType());
18555 Info.FFDiag(E, DiagId: DiagID) << LHS << RHS;
18556 if (LHSExpr && LHSExpr == RHSExpr)
18557 Info.Note(Loc: LHSExpr->getExprLoc(),
18558 DiagId: diag::note_constexpr_repeated_literal_eval)
18559 << LHSExpr->getSourceRange();
18560 return false;
18561 };
18562
18563 if (!LHSExpr || !RHSExpr)
18564 return DiagArith(diag::note_constexpr_pointer_arith_unspecified);
18565
18566 if (ArePotentiallyOverlappingStringLiterals(Info, LHS: LHSValue, RHS: RHSValue))
18567 return DiagArith(diag::note_constexpr_literal_arith);
18568
18569 const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(Val: LHSExpr);
18570 const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(Val: RHSExpr);
18571 if (!LHSAddrExpr || !RHSAddrExpr)
18572 return Error(E);
18573 // Make sure both labels come from the same function.
18574 if (LHSAddrExpr->getLabel()->getDeclContext() !=
18575 RHSAddrExpr->getLabel()->getDeclContext())
18576 return Error(E);
18577 return Success(V: APValue(LHSAddrExpr, RHSAddrExpr), E);
18578 }
18579 const CharUnits &LHSOffset = LHSValue.getLValueOffset();
18580 const CharUnits &RHSOffset = RHSValue.getLValueOffset();
18581
18582 SubobjectDesignator &LHSDesignator = LHSValue.getLValueDesignator();
18583 SubobjectDesignator &RHSDesignator = RHSValue.getLValueDesignator();
18584
18585 // C++11 [expr.add]p6:
18586 // Unless both pointers point to elements of the same array object, or
18587 // one past the last element of the array object, the behavior is
18588 // undefined.
18589 if (!LHSDesignator.Invalid && !RHSDesignator.Invalid &&
18590 !AreElementsOfSameArray(ObjType: getType(B: LHSValue.Base), A: LHSDesignator,
18591 B: RHSDesignator))
18592 Info.CCEDiag(E, DiagId: diag::note_constexpr_pointer_subtraction_not_same_array);
18593
18594 QualType Type = E->getLHS()->getType();
18595 QualType ElementType = Type->castAs<PointerType>()->getPointeeType();
18596
18597 CharUnits ElementSize;
18598 if (!HandleSizeof(Info, Loc: E->getExprLoc(), Type: ElementType, Size&: ElementSize))
18599 return false;
18600
18601 // As an extension, a type may have zero size (empty struct or union in
18602 // C, array of zero length). Pointer subtraction in such cases has
18603 // undefined behavior, so is not constant.
18604 if (ElementSize.isZero()) {
18605 Info.FFDiag(E, DiagId: diag::note_constexpr_pointer_subtraction_zero_size)
18606 << ElementType;
18607 return false;
18608 }
18609
18610 // FIXME: LLVM and GCC both compute LHSOffset - RHSOffset at runtime,
18611 // and produce incorrect results when it overflows. Such behavior
18612 // appears to be non-conforming, but is common, so perhaps we should
18613 // assume the standard intended for such cases to be undefined behavior
18614 // and check for them.
18615
18616 // Compute (LHSOffset - RHSOffset) / Size carefully, checking for
18617 // overflow in the final conversion to ptrdiff_t.
18618 APSInt LHS(llvm::APInt(65, (int64_t)LHSOffset.getQuantity(), true), false);
18619 APSInt RHS(llvm::APInt(65, (int64_t)RHSOffset.getQuantity(), true), false);
18620 APSInt ElemSize(llvm::APInt(65, (int64_t)ElementSize.getQuantity(), true),
18621 false);
18622 APSInt TrueResult = (LHS - RHS) / ElemSize;
18623 APSInt Result = TrueResult.trunc(width: Info.Ctx.getIntWidth(T: E->getType()));
18624
18625 if (Result.extend(width: 65) != TrueResult &&
18626 !HandleOverflow(Info, E, SrcValue: TrueResult, DestType: E->getType()))
18627 return false;
18628 return Success(SI: Result, E);
18629 }
18630
18631 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
18632}
18633
18634/// VisitUnaryExprOrTypeTraitExpr - Evaluate a sizeof, alignof or vec_step with
18635/// a result as the expression's type.
18636bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
18637 const UnaryExprOrTypeTraitExpr *E) {
18638 switch(E->getKind()) {
18639 case UETT_PreferredAlignOf:
18640 case UETT_AlignOf: {
18641 if (E->isArgumentType())
18642 return Success(
18643 Size: GetAlignOfType(Ctx: Info.Ctx, T: E->getArgumentType(), ExprKind: E->getKind()), E);
18644 else
18645 return Success(
18646 Size: GetAlignOfExpr(Ctx: Info.Ctx, E: E->getArgumentExpr(), ExprKind: E->getKind()), E);
18647 }
18648
18649 case UETT_PtrAuthTypeDiscriminator: {
18650 if (E->getArgumentType()->isDependentType())
18651 return false;
18652 return Success(
18653 Value: Info.Ctx.getPointerAuthTypeDiscriminator(T: E->getArgumentType()), E);
18654 }
18655 case UETT_VecStep: {
18656 QualType Ty = E->getTypeOfArgument();
18657
18658 if (Ty->isVectorType()) {
18659 unsigned n = Ty->castAs<VectorType>()->getNumElements();
18660
18661 // The vec_step built-in functions that take a 3-component
18662 // vector return 4. (OpenCL 1.1 spec 6.11.12)
18663 if (n == 3)
18664 n = 4;
18665
18666 return Success(Value: n, E);
18667 } else
18668 return Success(Value: 1, E);
18669 }
18670
18671 case UETT_DataSizeOf:
18672 case UETT_SizeOf: {
18673 QualType SrcTy = E->getTypeOfArgument();
18674 // C++ [expr.sizeof]p2: "When applied to a reference or a reference type,
18675 // the result is the size of the referenced type."
18676 if (const ReferenceType *Ref = SrcTy->getAs<ReferenceType>())
18677 SrcTy = Ref->getPointeeType();
18678
18679 CharUnits Sizeof;
18680 if (!HandleSizeof(Info, Loc: E->getExprLoc(), Type: SrcTy, Size&: Sizeof,
18681 SOT: E->getKind() == UETT_DataSizeOf ? SizeOfType::DataSizeOf
18682 : SizeOfType::SizeOf)) {
18683 return false;
18684 }
18685 return Success(Size: Sizeof, E);
18686 }
18687 case UETT_OpenMPRequiredSimdAlign:
18688 assert(E->isArgumentType());
18689 return Success(
18690 Value: Info.Ctx.toCharUnitsFromBits(
18691 BitSize: Info.Ctx.getOpenMPDefaultSimdAlign(T: E->getArgumentType()))
18692 .getQuantity(),
18693 E);
18694 case UETT_VectorElements: {
18695 QualType Ty = E->getTypeOfArgument();
18696 // If the vector has a fixed size, we can determine the number of elements
18697 // at compile time.
18698 if (const auto *VT = Ty->getAs<VectorType>())
18699 return Success(Value: VT->getNumElements(), E);
18700
18701 assert(Ty->isSizelessVectorType());
18702 if (Info.InConstantContext)
18703 Info.CCEDiag(E, DiagId: diag::note_constexpr_non_const_vectorelements)
18704 << E->getSourceRange();
18705
18706 return false;
18707 }
18708 case UETT_CountOf: {
18709 QualType Ty = E->getTypeOfArgument();
18710 assert(Ty->isArrayType());
18711
18712 // We don't need to worry about array element qualifiers, so getting the
18713 // unsafe array type is fine.
18714 if (const auto *CAT =
18715 dyn_cast<ConstantArrayType>(Val: Ty->getAsArrayTypeUnsafe())) {
18716 return Success(I: CAT->getSize(), E);
18717 }
18718
18719 assert(!Ty->isConstantSizeType());
18720
18721 // If it's a variable-length array type, we need to check whether it is a
18722 // multidimensional array. If so, we need to check the size expression of
18723 // the VLA to see if it's a constant size. If so, we can return that value.
18724 const auto *VAT = Info.Ctx.getAsVariableArrayType(T: Ty);
18725 assert(VAT);
18726 if (VAT->getElementType()->isArrayType()) {
18727 // Variable array size expression could be missing (e.g. int a[*][10]) In
18728 // that case, it can't be a constant expression.
18729 if (!VAT->getSizeExpr()) {
18730 Info.FFDiag(Loc: E->getBeginLoc());
18731 return false;
18732 }
18733
18734 std::optional<APSInt> Res =
18735 VAT->getSizeExpr()->getIntegerConstantExpr(Ctx: Info.Ctx);
18736 if (Res) {
18737 // The resulting value always has type size_t, so we need to make the
18738 // returned APInt have the correct sign and bit-width.
18739 APInt Val{
18740 static_cast<unsigned>(Info.Ctx.getTypeSize(T: Info.Ctx.getSizeType())),
18741 Res->getZExtValue()};
18742 return Success(I: Val, E);
18743 }
18744 }
18745
18746 // Definitely a variable-length type, which is not an ICE.
18747 // FIXME: Better diagnostic.
18748 Info.FFDiag(Loc: E->getBeginLoc());
18749 return false;
18750 }
18751 }
18752
18753 llvm_unreachable("unknown expr/type trait");
18754}
18755
18756bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
18757 Info.Ctx.recordOffsetOfEvaluation(E: OOE);
18758 CharUnits Result;
18759 unsigned n = OOE->getNumComponents();
18760 if (n == 0)
18761 return Error(E: OOE);
18762 QualType CurrentType = OOE->getTypeSourceInfo()->getType();
18763 for (unsigned i = 0; i != n; ++i) {
18764 OffsetOfNode ON = OOE->getComponent(Idx: i);
18765 switch (ON.getKind()) {
18766 case OffsetOfNode::Array: {
18767 const Expr *Idx = OOE->getIndexExpr(Idx: ON.getArrayExprIndex());
18768 APSInt IdxResult;
18769 if (!EvaluateInteger(E: Idx, Result&: IdxResult, Info))
18770 return false;
18771 const ArrayType *AT = Info.Ctx.getAsArrayType(T: CurrentType);
18772 if (!AT)
18773 return Error(E: OOE);
18774 CurrentType = AT->getElementType();
18775 CharUnits ElementSize = Info.Ctx.getTypeSizeInChars(T: CurrentType);
18776 Result += IdxResult.getSExtValue() * ElementSize;
18777 break;
18778 }
18779
18780 case OffsetOfNode::Field: {
18781 FieldDecl *MemberDecl = ON.getField();
18782 const auto *RD = CurrentType->getAsRecordDecl();
18783 if (!RD)
18784 return Error(E: OOE);
18785 if (RD->isInvalidDecl()) return false;
18786 const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(D: RD);
18787 unsigned i = MemberDecl->getFieldIndex();
18788 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
18789 Result += Info.Ctx.toCharUnitsFromBits(BitSize: RL.getFieldOffset(FieldNo: i));
18790 CurrentType = MemberDecl->getType().getNonReferenceType();
18791 break;
18792 }
18793
18794 case OffsetOfNode::Identifier:
18795 llvm_unreachable("dependent __builtin_offsetof");
18796
18797 case OffsetOfNode::Base: {
18798 CXXBaseSpecifier *BaseSpec = ON.getBase();
18799 if (BaseSpec->isVirtual())
18800 return Error(E: OOE);
18801
18802 // Find the layout of the class whose base we are looking into.
18803 const auto *RD = CurrentType->getAsCXXRecordDecl();
18804 if (!RD)
18805 return Error(E: OOE);
18806 if (RD->isInvalidDecl()) return false;
18807 const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(D: RD);
18808
18809 // Find the base class itself.
18810 CurrentType = BaseSpec->getType();
18811 const auto *BaseRD = CurrentType->getAsCXXRecordDecl();
18812 if (!BaseRD)
18813 return Error(E: OOE);
18814
18815 // Add the offset to the base.
18816 Result += RL.getBaseClassOffset(Base: BaseRD);
18817 break;
18818 }
18819 }
18820 }
18821 return Success(Size: Result, E: OOE);
18822}
18823
18824bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
18825 switch (E->getOpcode()) {
18826 default:
18827 // Address, indirect, pre/post inc/dec, etc are not valid constant exprs.
18828 // See C99 6.6p3.
18829 return Error(E);
18830 case UO_Extension:
18831 // FIXME: Should extension allow i-c-e extension expressions in its scope?
18832 // If so, we could clear the diagnostic ID.
18833 return Visit(S: E->getSubExpr());
18834 case UO_Plus:
18835 // The result is just the value.
18836 return Visit(S: E->getSubExpr());
18837 case UO_Minus: {
18838 if (!Visit(S: E->getSubExpr()))
18839 return false;
18840 if (!Result.isInt()) return Error(E);
18841 const APSInt &Value = Result.getInt();
18842 if (Value.isSigned() && Value.isMinSignedValue() && E->canOverflow() &&
18843 !E->getType().isWrapType()) {
18844 if (Info.checkingForUndefinedBehavior())
18845 Info.Ctx.getDiagnostics().Report(Loc: E->getExprLoc(),
18846 DiagID: diag::warn_integer_constant_overflow)
18847 << toString(I: Value, Radix: 10, Signed: Value.isSigned(), /*formatAsCLiteral=*/false,
18848 /*UpperCase=*/true, /*InsertSeparators=*/true)
18849 << E->getType() << E->getSourceRange();
18850
18851 if (!HandleOverflow(Info, E, SrcValue: -Value.extend(width: Value.getBitWidth() + 1),
18852 DestType: E->getType()))
18853 return false;
18854 }
18855 return Success(SI: -Value, E);
18856 }
18857 case UO_Not: {
18858 if (!Visit(S: E->getSubExpr()))
18859 return false;
18860 if (!Result.isInt()) return Error(E);
18861 return Success(SI: ~Result.getInt(), E);
18862 }
18863 case UO_LNot: {
18864 bool bres;
18865 if (!EvaluateAsBooleanCondition(E: E->getSubExpr(), Result&: bres, Info))
18866 return false;
18867 return Success(Value: !bres, E);
18868 }
18869 }
18870}
18871
18872/// HandleCast - This is used to evaluate implicit or explicit casts where the
18873/// result type is integer.
18874bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
18875 const Expr *SubExpr = E->getSubExpr();
18876 QualType DestType = E->getType();
18877 QualType SrcType = SubExpr->getType();
18878
18879 switch (E->getCastKind()) {
18880 case CK_BaseToDerived:
18881 case CK_DerivedToBase:
18882 case CK_UncheckedDerivedToBase:
18883 case CK_Dynamic:
18884 case CK_ToUnion:
18885 case CK_ArrayToPointerDecay:
18886 case CK_FunctionToPointerDecay:
18887 case CK_NullToPointer:
18888 case CK_NullToMemberPointer:
18889 case CK_BaseToDerivedMemberPointer:
18890 case CK_DerivedToBaseMemberPointer:
18891 case CK_ReinterpretMemberPointer:
18892 case CK_ConstructorConversion:
18893 case CK_IntegralToPointer:
18894 case CK_ToVoid:
18895 case CK_VectorSplat:
18896 case CK_IntegralToFloating:
18897 case CK_FloatingCast:
18898 case CK_CPointerToObjCPointerCast:
18899 case CK_BlockPointerToObjCPointerCast:
18900 case CK_AnyPointerToBlockPointerCast:
18901 case CK_ObjCObjectLValueCast:
18902 case CK_FloatingRealToComplex:
18903 case CK_FloatingComplexToReal:
18904 case CK_FloatingComplexCast:
18905 case CK_FloatingComplexToIntegralComplex:
18906 case CK_IntegralRealToComplex:
18907 case CK_IntegralComplexCast:
18908 case CK_IntegralComplexToFloatingComplex:
18909 case CK_BuiltinFnToFnPtr:
18910 case CK_ZeroToOCLOpaqueType:
18911 case CK_NonAtomicToAtomic:
18912 case CK_AddressSpaceConversion:
18913 case CK_IntToOCLSampler:
18914 case CK_FloatingToFixedPoint:
18915 case CK_FixedPointToFloating:
18916 case CK_FixedPointCast:
18917 case CK_IntegralToFixedPoint:
18918 case CK_MatrixCast:
18919 case CK_HLSLAggregateSplatCast:
18920 llvm_unreachable("invalid cast kind for integral value");
18921
18922 case CK_BitCast:
18923 case CK_Dependent:
18924 case CK_LValueBitCast:
18925 case CK_ARCProduceObject:
18926 case CK_ARCConsumeObject:
18927 case CK_ARCReclaimReturnedObject:
18928 case CK_ARCExtendBlockObject:
18929 case CK_CopyAndAutoreleaseBlockObject:
18930 return Error(E);
18931
18932 case CK_UserDefinedConversion:
18933 case CK_LValueToRValue:
18934 case CK_AtomicToNonAtomic:
18935 case CK_NoOp:
18936 case CK_LValueToRValueBitCast:
18937 case CK_HLSLArrayRValue:
18938 return ExprEvaluatorBaseTy::VisitCastExpr(E);
18939
18940 case CK_MemberPointerToBoolean:
18941 case CK_PointerToBoolean:
18942 case CK_IntegralToBoolean:
18943 case CK_FloatingToBoolean:
18944 case CK_BooleanToSignedIntegral:
18945 case CK_FloatingComplexToBoolean:
18946 case CK_IntegralComplexToBoolean: {
18947 bool BoolResult;
18948 if (!EvaluateAsBooleanCondition(E: SubExpr, Result&: BoolResult, Info))
18949 return false;
18950 uint64_t IntResult = BoolResult;
18951 if (BoolResult && E->getCastKind() == CK_BooleanToSignedIntegral)
18952 IntResult = (uint64_t)-1;
18953 return Success(Value: IntResult, E);
18954 }
18955
18956 case CK_FixedPointToIntegral: {
18957 APFixedPoint Src(Info.Ctx.getFixedPointSemantics(Ty: SrcType));
18958 if (!EvaluateFixedPoint(E: SubExpr, Result&: Src, Info))
18959 return false;
18960 bool Overflowed;
18961 llvm::APSInt Result = Src.convertToInt(
18962 DstWidth: Info.Ctx.getIntWidth(T: DestType),
18963 DstSign: DestType->isSignedIntegerOrEnumerationType(), Overflow: &Overflowed);
18964 if (Overflowed && !HandleOverflow(Info, E, SrcValue: Result, DestType))
18965 return false;
18966 return Success(SI: Result, E);
18967 }
18968
18969 case CK_FixedPointToBoolean: {
18970 // Unsigned padding does not affect this.
18971 APValue Val;
18972 if (!Evaluate(Result&: Val, Info, E: SubExpr))
18973 return false;
18974 return Success(Value: Val.getFixedPoint().getBoolValue(), E);
18975 }
18976
18977 case CK_IntegralCast: {
18978 if (!Visit(S: SubExpr))
18979 return false;
18980
18981 if (!Result.isInt()) {
18982 // Allow casts of address-of-label differences if they are no-ops
18983 // or narrowing, if the result is at least 32 bits wide.
18984 // (The narrowing case isn't actually guaranteed to
18985 // be constant-evaluatable except in some narrow cases which are hard
18986 // to detect here. We let it through on the assumption the user knows
18987 // what they are doing.)
18988 if (Result.isAddrLabelDiff()) {
18989 unsigned DestBits = Info.Ctx.getTypeSize(T: DestType);
18990 return DestBits >= 32 && DestBits <= Info.Ctx.getTypeSize(T: SrcType);
18991 }
18992 // Only allow casts of lvalues if they are lossless.
18993 return Info.Ctx.getTypeSize(T: DestType) == Info.Ctx.getTypeSize(T: SrcType);
18994 }
18995
18996 if (Info.Ctx.getLangOpts().CPlusPlus && DestType->isEnumeralType()) {
18997 const auto *ED = DestType->getAsEnumDecl();
18998 // Check that the value is within the range of the enumeration values.
18999 //
19000 // This corressponds to [expr.static.cast]p10 which says:
19001 // A value of integral or enumeration type can be explicitly converted
19002 // to a complete enumeration type ... If the enumeration type does not
19003 // have a fixed underlying type, the value is unchanged if the original
19004 // value is within the range of the enumeration values ([dcl.enum]), and
19005 // otherwise, the behavior is undefined.
19006 //
19007 // This was resolved as part of DR2338 which has CD5 status.
19008 if (!ED->isFixed()) {
19009 llvm::APInt Min;
19010 llvm::APInt Max;
19011
19012 ED->getValueRange(Max, Min);
19013 --Max;
19014
19015 if (ED->getNumNegativeBits() &&
19016 (Max.slt(RHS: Result.getInt().getSExtValue()) ||
19017 Min.sgt(RHS: Result.getInt().getSExtValue())))
19018 Info.CCEDiag(E, DiagId: diag::note_constexpr_unscoped_enum_out_of_range)
19019 << llvm::toString(I: Result.getInt(), Radix: 10) << Min.getSExtValue()
19020 << Max.getSExtValue() << ED;
19021 else if (!ED->getNumNegativeBits() &&
19022 Max.ult(RHS: Result.getInt().getZExtValue()))
19023 Info.CCEDiag(E, DiagId: diag::note_constexpr_unscoped_enum_out_of_range)
19024 << llvm::toString(I: Result.getInt(), Radix: 10) << Min.getZExtValue()
19025 << Max.getZExtValue() << ED;
19026 }
19027 }
19028
19029 return Success(SI: HandleIntToIntCast(Info, E, DestType, SrcType,
19030 Value: Result.getInt()), E);
19031 }
19032
19033 case CK_PointerToIntegral: {
19034 CCEDiag(E, D: diag::note_constexpr_invalid_cast)
19035 << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret
19036 << Info.Ctx.getLangOpts().CPlusPlus << E->getSourceRange();
19037
19038 LValue LV;
19039 if (!EvaluatePointer(E: SubExpr, Result&: LV, Info))
19040 return false;
19041
19042 if (LV.getLValueBase()) {
19043 // Only allow based lvalue casts if they are lossless.
19044 // FIXME: Allow a larger integer size than the pointer size, and allow
19045 // narrowing back down to pointer width in subsequent integral casts.
19046 // FIXME: Check integer type's active bits, not its type size.
19047 if (Info.Ctx.getTypeSize(T: DestType) != Info.Ctx.getTypeSize(T: SrcType))
19048 return Error(E);
19049
19050 LV.Designator.setInvalid();
19051 LV.moveInto(V&: Result);
19052 return true;
19053 }
19054
19055 APSInt AsInt;
19056 APValue V;
19057 LV.moveInto(V);
19058 if (!V.toIntegralConstant(Result&: AsInt, SrcTy: SrcType, Ctx: Info.Ctx))
19059 llvm_unreachable("Can't cast this!");
19060
19061 return Success(SI: HandleIntToIntCast(Info, E, DestType, SrcType, Value: AsInt), E);
19062 }
19063
19064 case CK_IntegralComplexToReal: {
19065 ComplexValue C;
19066 if (!EvaluateComplex(E: SubExpr, Res&: C, Info))
19067 return false;
19068 return Success(SI: C.getComplexIntReal(), E);
19069 }
19070
19071 case CK_FloatingToIntegral: {
19072 APFloat F(0.0);
19073 if (!EvaluateFloat(E: SubExpr, Result&: F, Info))
19074 return false;
19075
19076 APSInt Value;
19077 if (!HandleFloatToIntCast(Info, E, SrcType, Value: F, DestType, Result&: Value))
19078 return false;
19079 return Success(SI: Value, E);
19080 }
19081 case CK_HLSLVectorTruncation: {
19082 APValue Val;
19083 if (!EvaluateVector(E: SubExpr, Result&: Val, Info))
19084 return Error(E);
19085 return Success(V: Val.getVectorElt(I: 0), E);
19086 }
19087 case CK_HLSLMatrixTruncation: {
19088 APValue Val;
19089 if (!EvaluateMatrix(E: SubExpr, Result&: Val, Info))
19090 return Error(E);
19091 return Success(V: Val.getMatrixElt(Row: 0, Col: 0), E);
19092 }
19093 case CK_HLSLElementwiseCast: {
19094 SmallVector<APValue> SrcVals;
19095 SmallVector<QualType> SrcTypes;
19096
19097 if (!hlslElementwiseCastHelper(Info, E: SubExpr, DestTy: DestType, SrcVals, SrcTypes))
19098 return false;
19099
19100 // cast our single element
19101 const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
19102 APValue ResultVal;
19103 if (!handleScalarCast(Info, FPO, E, SourceTy: SrcTypes[0], DestTy: DestType, Original: SrcVals[0],
19104 Result&: ResultVal))
19105 return false;
19106 return Success(V: ResultVal, E);
19107 }
19108 }
19109
19110 llvm_unreachable("unknown cast resulting in integral value");
19111}
19112
19113bool IntExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
19114 if (E->getSubExpr()->getType()->isAnyComplexType()) {
19115 ComplexValue LV;
19116 if (!EvaluateComplex(E: E->getSubExpr(), Res&: LV, Info))
19117 return false;
19118 if (!LV.isComplexInt())
19119 return Error(E);
19120 return Success(SI: LV.getComplexIntReal(), E);
19121 }
19122
19123 return Visit(S: E->getSubExpr());
19124}
19125
19126bool IntExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
19127 if (E->getSubExpr()->getType()->isComplexIntegerType()) {
19128 ComplexValue LV;
19129 if (!EvaluateComplex(E: E->getSubExpr(), Res&: LV, Info))
19130 return false;
19131 if (!LV.isComplexInt())
19132 return Error(E);
19133 return Success(SI: LV.getComplexIntImag(), E);
19134 }
19135
19136 VisitIgnoredValue(E: E->getSubExpr());
19137 return Success(Value: 0, E);
19138}
19139
19140bool IntExprEvaluator::VisitSizeOfPackExpr(const SizeOfPackExpr *E) {
19141 return Success(Value: E->getPackLength(), E);
19142}
19143
19144bool IntExprEvaluator::VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
19145 return Success(Value: E->getValue(), E);
19146}
19147
19148bool IntExprEvaluator::VisitConceptSpecializationExpr(
19149 const ConceptSpecializationExpr *E) {
19150 return Success(Value: E->isSatisfied(), E);
19151}
19152
19153bool IntExprEvaluator::VisitRequiresExpr(const RequiresExpr *E) {
19154 return Success(Value: E->isSatisfied(), E);
19155}
19156
19157bool FixedPointExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
19158 switch (E->getOpcode()) {
19159 default:
19160 // Invalid unary operators
19161 return Error(E);
19162 case UO_Plus:
19163 // The result is just the value.
19164 return Visit(S: E->getSubExpr());
19165 case UO_Minus: {
19166 if (!Visit(S: E->getSubExpr())) return false;
19167 if (!Result.isFixedPoint())
19168 return Error(E);
19169 bool Overflowed;
19170 APFixedPoint Negated = Result.getFixedPoint().negate(Overflow: &Overflowed);
19171 if (Overflowed && !HandleOverflow(Info, E, SrcValue: Negated, DestType: E->getType()))
19172 return false;
19173 return Success(V: Negated, E);
19174 }
19175 case UO_LNot: {
19176 bool bres;
19177 if (!EvaluateAsBooleanCondition(E: E->getSubExpr(), Result&: bres, Info))
19178 return false;
19179 return Success(Value: !bres, E);
19180 }
19181 }
19182}
19183
19184bool FixedPointExprEvaluator::VisitCastExpr(const CastExpr *E) {
19185 const Expr *SubExpr = E->getSubExpr();
19186 QualType DestType = E->getType();
19187 assert(DestType->isFixedPointType() &&
19188 "Expected destination type to be a fixed point type");
19189 auto DestFXSema = Info.Ctx.getFixedPointSemantics(Ty: DestType);
19190
19191 switch (E->getCastKind()) {
19192 case CK_FixedPointCast: {
19193 APFixedPoint Src(Info.Ctx.getFixedPointSemantics(Ty: SubExpr->getType()));
19194 if (!EvaluateFixedPoint(E: SubExpr, Result&: Src, Info))
19195 return false;
19196 bool Overflowed;
19197 APFixedPoint Result = Src.convert(DstSema: DestFXSema, Overflow: &Overflowed);
19198 if (Overflowed) {
19199 if (Info.checkingForUndefinedBehavior())
19200 Info.Ctx.getDiagnostics().Report(Loc: E->getExprLoc(),
19201 DiagID: diag::warn_fixedpoint_constant_overflow)
19202 << Result.toString() << E->getType();
19203 if (!HandleOverflow(Info, E, SrcValue: Result, DestType: E->getType()))
19204 return false;
19205 }
19206 return Success(V: Result, E);
19207 }
19208 case CK_IntegralToFixedPoint: {
19209 APSInt Src;
19210 if (!EvaluateInteger(E: SubExpr, Result&: Src, Info))
19211 return false;
19212
19213 bool Overflowed;
19214 APFixedPoint IntResult = APFixedPoint::getFromIntValue(
19215 Value: Src, DstFXSema: Info.Ctx.getFixedPointSemantics(Ty: DestType), Overflow: &Overflowed);
19216
19217 if (Overflowed) {
19218 if (Info.checkingForUndefinedBehavior())
19219 Info.Ctx.getDiagnostics().Report(Loc: E->getExprLoc(),
19220 DiagID: diag::warn_fixedpoint_constant_overflow)
19221 << IntResult.toString() << E->getType();
19222 if (!HandleOverflow(Info, E, SrcValue: IntResult, DestType: E->getType()))
19223 return false;
19224 }
19225
19226 return Success(V: IntResult, E);
19227 }
19228 case CK_FloatingToFixedPoint: {
19229 APFloat Src(0.0);
19230 if (!EvaluateFloat(E: SubExpr, Result&: Src, Info))
19231 return false;
19232
19233 bool Overflowed;
19234 APFixedPoint Result = APFixedPoint::getFromFloatValue(
19235 Value: Src, DstFXSema: Info.Ctx.getFixedPointSemantics(Ty: DestType), Overflow: &Overflowed);
19236
19237 if (Overflowed) {
19238 if (Info.checkingForUndefinedBehavior())
19239 Info.Ctx.getDiagnostics().Report(Loc: E->getExprLoc(),
19240 DiagID: diag::warn_fixedpoint_constant_overflow)
19241 << Result.toString() << E->getType();
19242 if (!HandleOverflow(Info, E, SrcValue: Result, DestType: E->getType()))
19243 return false;
19244 }
19245
19246 return Success(V: Result, E);
19247 }
19248 case CK_NoOp:
19249 case CK_LValueToRValue:
19250 return ExprEvaluatorBaseTy::VisitCastExpr(E);
19251 default:
19252 return Error(E);
19253 }
19254}
19255
19256bool FixedPointExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
19257 if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
19258 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
19259
19260 const Expr *LHS = E->getLHS();
19261 const Expr *RHS = E->getRHS();
19262 FixedPointSemantics ResultFXSema =
19263 Info.Ctx.getFixedPointSemantics(Ty: E->getType());
19264
19265 APFixedPoint LHSFX(Info.Ctx.getFixedPointSemantics(Ty: LHS->getType()));
19266 if (!EvaluateFixedPointOrInteger(E: LHS, Result&: LHSFX, Info))
19267 return false;
19268 APFixedPoint RHSFX(Info.Ctx.getFixedPointSemantics(Ty: RHS->getType()));
19269 if (!EvaluateFixedPointOrInteger(E: RHS, Result&: RHSFX, Info))
19270 return false;
19271
19272 bool OpOverflow = false, ConversionOverflow = false;
19273 APFixedPoint Result(LHSFX.getSemantics());
19274 switch (E->getOpcode()) {
19275 case BO_Add: {
19276 Result = LHSFX.add(Other: RHSFX, Overflow: &OpOverflow)
19277 .convert(DstSema: ResultFXSema, Overflow: &ConversionOverflow);
19278 break;
19279 }
19280 case BO_Sub: {
19281 Result = LHSFX.sub(Other: RHSFX, Overflow: &OpOverflow)
19282 .convert(DstSema: ResultFXSema, Overflow: &ConversionOverflow);
19283 break;
19284 }
19285 case BO_Mul: {
19286 Result = LHSFX.mul(Other: RHSFX, Overflow: &OpOverflow)
19287 .convert(DstSema: ResultFXSema, Overflow: &ConversionOverflow);
19288 break;
19289 }
19290 case BO_Div: {
19291 if (RHSFX.getValue() == 0) {
19292 Info.FFDiag(E, DiagId: diag::note_expr_divide_by_zero);
19293 return false;
19294 }
19295 Result = LHSFX.div(Other: RHSFX, Overflow: &OpOverflow)
19296 .convert(DstSema: ResultFXSema, Overflow: &ConversionOverflow);
19297 break;
19298 }
19299 case BO_Shl:
19300 case BO_Shr: {
19301 FixedPointSemantics LHSSema = LHSFX.getSemantics();
19302 llvm::APSInt RHSVal = RHSFX.getValue();
19303
19304 unsigned ShiftBW =
19305 LHSSema.getWidth() - (unsigned)LHSSema.hasUnsignedPadding();
19306 unsigned Amt = RHSVal.getLimitedValue(Limit: ShiftBW - 1);
19307 // Embedded-C 4.1.6.2.2:
19308 // The right operand must be nonnegative and less than the total number
19309 // of (nonpadding) bits of the fixed-point operand ...
19310 if (RHSVal.isNegative())
19311 Info.CCEDiag(E, DiagId: diag::note_constexpr_negative_shift) << RHSVal;
19312 else if (Amt != RHSVal)
19313 Info.CCEDiag(E, DiagId: diag::note_constexpr_large_shift)
19314 << RHSVal << E->getType() << ShiftBW;
19315
19316 if (E->getOpcode() == BO_Shl)
19317 Result = LHSFX.shl(Amt, Overflow: &OpOverflow);
19318 else
19319 Result = LHSFX.shr(Amt, Overflow: &OpOverflow);
19320 break;
19321 }
19322 default:
19323 return false;
19324 }
19325 if (OpOverflow || ConversionOverflow) {
19326 if (Info.checkingForUndefinedBehavior())
19327 Info.Ctx.getDiagnostics().Report(Loc: E->getExprLoc(),
19328 DiagID: diag::warn_fixedpoint_constant_overflow)
19329 << Result.toString() << E->getType();
19330 if (!HandleOverflow(Info, E, SrcValue: Result, DestType: E->getType()))
19331 return false;
19332 }
19333 return Success(V: Result, E);
19334}
19335
19336//===----------------------------------------------------------------------===//
19337// Float Evaluation
19338//===----------------------------------------------------------------------===//
19339
19340namespace {
19341class FloatExprEvaluator
19342 : public ExprEvaluatorBase<FloatExprEvaluator> {
19343 APFloat &Result;
19344public:
19345 FloatExprEvaluator(EvalInfo &info, APFloat &result)
19346 : ExprEvaluatorBaseTy(info), Result(result) {}
19347
19348 bool Success(const APValue &V, const Expr *e) {
19349 Result = V.getFloat();
19350 return true;
19351 }
19352
19353 bool ZeroInitialization(const Expr *E) {
19354 Result = APFloat::getZero(Sem: Info.Ctx.getFloatTypeSemantics(T: E->getType()));
19355 return true;
19356 }
19357
19358 bool VisitCallExpr(const CallExpr *E);
19359
19360 bool VisitUnaryOperator(const UnaryOperator *E);
19361 bool VisitBinaryOperator(const BinaryOperator *E);
19362 bool VisitFloatingLiteral(const FloatingLiteral *E);
19363 bool VisitCastExpr(const CastExpr *E);
19364
19365 bool VisitUnaryReal(const UnaryOperator *E);
19366 bool VisitUnaryImag(const UnaryOperator *E);
19367
19368 // FIXME: Missing: array subscript of vector, member of vector
19369};
19370} // end anonymous namespace
19371
19372static bool EvaluateFloat(const Expr* E, APFloat& Result, EvalInfo &Info) {
19373 assert(!E->isValueDependent());
19374 assert(E->isPRValue() && E->getType()->isRealFloatingType());
19375 return FloatExprEvaluator(Info, Result).Visit(S: E);
19376}
19377
19378static bool TryEvaluateBuiltinNaN(const ASTContext &Context,
19379 QualType ResultTy,
19380 const Expr *Arg,
19381 bool SNaN,
19382 llvm::APFloat &Result) {
19383 const StringLiteral *S = dyn_cast<StringLiteral>(Val: Arg->IgnoreParenCasts());
19384 if (!S) return false;
19385
19386 const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(T: ResultTy);
19387
19388 llvm::APInt fill;
19389
19390 // Treat empty strings as if they were zero.
19391 if (S->getString().empty())
19392 fill = llvm::APInt(32, 0);
19393 else if (S->getString().getAsInteger(Radix: 0, Result&: fill))
19394 return false;
19395
19396 if (Context.getTargetInfo().isNan2008()) {
19397 if (SNaN)
19398 Result = llvm::APFloat::getSNaN(Sem, Negative: false, payload: &fill);
19399 else
19400 Result = llvm::APFloat::getQNaN(Sem, Negative: false, payload: &fill);
19401 } else {
19402 // Prior to IEEE 754-2008, architectures were allowed to choose whether
19403 // the first bit of their significand was set for qNaN or sNaN. MIPS chose
19404 // a different encoding to what became a standard in 2008, and for pre-
19405 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
19406 // sNaN. This is now known as "legacy NaN" encoding.
19407 if (SNaN)
19408 Result = llvm::APFloat::getQNaN(Sem, Negative: false, payload: &fill);
19409 else
19410 Result = llvm::APFloat::getSNaN(Sem, Negative: false, payload: &fill);
19411 }
19412
19413 return true;
19414}
19415
19416bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
19417 if (!IsConstantEvaluatedBuiltinCall(E))
19418 return ExprEvaluatorBaseTy::VisitCallExpr(E);
19419
19420 switch (E->getBuiltinCallee()) {
19421 default:
19422 return false;
19423
19424 case Builtin::BI__builtin_huge_val:
19425 case Builtin::BI__builtin_huge_valf:
19426 case Builtin::BI__builtin_huge_vall:
19427 case Builtin::BI__builtin_huge_valf16:
19428 case Builtin::BI__builtin_huge_valf128:
19429 case Builtin::BI__builtin_inf:
19430 case Builtin::BI__builtin_inff:
19431 case Builtin::BI__builtin_infl:
19432 case Builtin::BI__builtin_inff16:
19433 case Builtin::BI__builtin_inff128: {
19434 const llvm::fltSemantics &Sem =
19435 Info.Ctx.getFloatTypeSemantics(T: E->getType());
19436 Result = llvm::APFloat::getInf(Sem);
19437 return true;
19438 }
19439
19440 case Builtin::BI__builtin_nans:
19441 case Builtin::BI__builtin_nansf:
19442 case Builtin::BI__builtin_nansl:
19443 case Builtin::BI__builtin_nansf16:
19444 case Builtin::BI__builtin_nansf128:
19445 if (!TryEvaluateBuiltinNaN(Context: Info.Ctx, ResultTy: E->getType(), Arg: E->getArg(Arg: 0),
19446 SNaN: true, Result))
19447 return Error(E);
19448 return true;
19449
19450 case Builtin::BI__builtin_nan:
19451 case Builtin::BI__builtin_nanf:
19452 case Builtin::BI__builtin_nanl:
19453 case Builtin::BI__builtin_nanf16:
19454 case Builtin::BI__builtin_nanf128:
19455 // If this is __builtin_nan() turn this into a nan, otherwise we
19456 // can't constant fold it.
19457 if (!TryEvaluateBuiltinNaN(Context: Info.Ctx, ResultTy: E->getType(), Arg: E->getArg(Arg: 0),
19458 SNaN: false, Result))
19459 return Error(E);
19460 return true;
19461
19462 case Builtin::BI__builtin_elementwise_abs:
19463 case Builtin::BI__builtin_fabs:
19464 case Builtin::BI__builtin_fabsf:
19465 case Builtin::BI__builtin_fabsl:
19466 case Builtin::BI__builtin_fabsf128:
19467 // The C standard says "fabs raises no floating-point exceptions,
19468 // even if x is a signaling NaN. The returned value is independent of
19469 // the current rounding direction mode." Therefore constant folding can
19470 // proceed without regard to the floating point settings.
19471 // Reference, WG14 N2478 F.10.4.3
19472 if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info))
19473 return false;
19474
19475 if (Result.isNegative())
19476 Result.changeSign();
19477 return true;
19478
19479 case Builtin::BI__arithmetic_fence:
19480 return EvaluateFloat(E: E->getArg(Arg: 0), Result, Info);
19481
19482 // FIXME: Builtin::BI__builtin_powi
19483 // FIXME: Builtin::BI__builtin_powif
19484 // FIXME: Builtin::BI__builtin_powil
19485
19486 case Builtin::BI__builtin_copysign:
19487 case Builtin::BI__builtin_copysignf:
19488 case Builtin::BI__builtin_copysignl:
19489 case Builtin::BI__builtin_copysignf128: {
19490 APFloat RHS(0.);
19491 if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info) ||
19492 !EvaluateFloat(E: E->getArg(Arg: 1), Result&: RHS, Info))
19493 return false;
19494 Result.copySign(RHS);
19495 return true;
19496 }
19497
19498 case Builtin::BI__builtin_fmax:
19499 case Builtin::BI__builtin_fmaxf:
19500 case Builtin::BI__builtin_fmaxl:
19501 case Builtin::BI__builtin_fmaxf16:
19502 case Builtin::BI__builtin_fmaxf128: {
19503 APFloat RHS(0.);
19504 if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info) ||
19505 !EvaluateFloat(E: E->getArg(Arg: 1), Result&: RHS, Info))
19506 return false;
19507 Result = maxnum(A: Result, B: RHS);
19508 return true;
19509 }
19510
19511 case Builtin::BI__builtin_fmin:
19512 case Builtin::BI__builtin_fminf:
19513 case Builtin::BI__builtin_fminl:
19514 case Builtin::BI__builtin_fminf16:
19515 case Builtin::BI__builtin_fminf128: {
19516 APFloat RHS(0.);
19517 if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info) ||
19518 !EvaluateFloat(E: E->getArg(Arg: 1), Result&: RHS, Info))
19519 return false;
19520 Result = minnum(A: Result, B: RHS);
19521 return true;
19522 }
19523
19524 case Builtin::BI__builtin_fmaximum_num:
19525 case Builtin::BI__builtin_fmaximum_numf:
19526 case Builtin::BI__builtin_fmaximum_numl:
19527 case Builtin::BI__builtin_fmaximum_numf16:
19528 case Builtin::BI__builtin_fmaximum_numf128: {
19529 APFloat RHS(0.);
19530 if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info) ||
19531 !EvaluateFloat(E: E->getArg(Arg: 1), Result&: RHS, Info))
19532 return false;
19533 Result = maximumnum(A: Result, B: RHS);
19534 return true;
19535 }
19536
19537 case Builtin::BI__builtin_fminimum_num:
19538 case Builtin::BI__builtin_fminimum_numf:
19539 case Builtin::BI__builtin_fminimum_numl:
19540 case Builtin::BI__builtin_fminimum_numf16:
19541 case Builtin::BI__builtin_fminimum_numf128: {
19542 APFloat RHS(0.);
19543 if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info) ||
19544 !EvaluateFloat(E: E->getArg(Arg: 1), Result&: RHS, Info))
19545 return false;
19546 Result = minimumnum(A: Result, B: RHS);
19547 return true;
19548 }
19549
19550 case Builtin::BI__builtin_elementwise_fma: {
19551 if (!E->getArg(Arg: 0)->isPRValue() || !E->getArg(Arg: 1)->isPRValue() ||
19552 !E->getArg(Arg: 2)->isPRValue()) {
19553 return false;
19554 }
19555 APFloat SourceY(0.), SourceZ(0.);
19556 if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info) ||
19557 !EvaluateFloat(E: E->getArg(Arg: 1), Result&: SourceY, Info) ||
19558 !EvaluateFloat(E: E->getArg(Arg: 2), Result&: SourceZ, Info))
19559 return false;
19560 llvm::RoundingMode RM = getActiveRoundingMode(Info&: getEvalInfo(), E);
19561 (void)Result.fusedMultiplyAdd(Multiplicand: SourceY, Addend: SourceZ, RM);
19562 return true;
19563 }
19564
19565 case clang::X86::BI__builtin_ia32_vec_ext_v4sf: {
19566 APValue Vec;
19567 APSInt IdxAPS;
19568 if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Vec, Info) ||
19569 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: IdxAPS, Info))
19570 return false;
19571 unsigned N = Vec.getVectorLength();
19572 unsigned Idx = static_cast<unsigned>(IdxAPS.getZExtValue() & (N - 1));
19573 return Success(V: Vec.getVectorElt(I: Idx), e: E);
19574 }
19575 }
19576}
19577
19578bool FloatExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
19579 if (E->getSubExpr()->getType()->isAnyComplexType()) {
19580 ComplexValue CV;
19581 if (!EvaluateComplex(E: E->getSubExpr(), Res&: CV, Info))
19582 return false;
19583 Result = CV.FloatReal;
19584 return true;
19585 }
19586
19587 return Visit(S: E->getSubExpr());
19588}
19589
19590bool FloatExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
19591 if (E->getSubExpr()->getType()->isAnyComplexType()) {
19592 ComplexValue CV;
19593 if (!EvaluateComplex(E: E->getSubExpr(), Res&: CV, Info))
19594 return false;
19595 Result = CV.FloatImag;
19596 return true;
19597 }
19598
19599 VisitIgnoredValue(E: E->getSubExpr());
19600 const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(T: E->getType());
19601 Result = llvm::APFloat::getZero(Sem);
19602 return true;
19603}
19604
19605bool FloatExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
19606 switch (E->getOpcode()) {
19607 default: return Error(E);
19608 case UO_Plus:
19609 return EvaluateFloat(E: E->getSubExpr(), Result, Info);
19610 case UO_Minus:
19611 // In C standard, WG14 N2478 F.3 p4
19612 // "the unary - raises no floating point exceptions,
19613 // even if the operand is signalling."
19614 if (!EvaluateFloat(E: E->getSubExpr(), Result, Info))
19615 return false;
19616 Result.changeSign();
19617 return true;
19618 }
19619}
19620
19621bool FloatExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
19622 if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
19623 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
19624
19625 APFloat RHS(0.0);
19626 bool LHSOK = EvaluateFloat(E: E->getLHS(), Result, Info);
19627 if (!LHSOK && !Info.noteFailure())
19628 return false;
19629 return EvaluateFloat(E: E->getRHS(), Result&: RHS, Info) && LHSOK &&
19630 handleFloatFloatBinOp(Info, E, LHS&: Result, Opcode: E->getOpcode(), RHS);
19631}
19632
19633bool FloatExprEvaluator::VisitFloatingLiteral(const FloatingLiteral *E) {
19634 Result = E->getValue();
19635 return true;
19636}
19637
19638bool FloatExprEvaluator::VisitCastExpr(const CastExpr *E) {
19639 const Expr* SubExpr = E->getSubExpr();
19640
19641 switch (E->getCastKind()) {
19642 default:
19643 return ExprEvaluatorBaseTy::VisitCastExpr(E);
19644
19645 case CK_HLSLAggregateSplatCast:
19646 llvm_unreachable("invalid cast kind for floating value");
19647
19648 case CK_IntegralToFloating: {
19649 APSInt IntResult;
19650 const FPOptions FPO = E->getFPFeaturesInEffect(
19651 LO: Info.Ctx.getLangOpts());
19652 return EvaluateInteger(E: SubExpr, Result&: IntResult, Info) &&
19653 HandleIntToFloatCast(Info, E, FPO, SrcType: SubExpr->getType(),
19654 Value: IntResult, DestType: E->getType(), Result);
19655 }
19656
19657 case CK_FixedPointToFloating: {
19658 APFixedPoint FixResult(Info.Ctx.getFixedPointSemantics(Ty: SubExpr->getType()));
19659 if (!EvaluateFixedPoint(E: SubExpr, Result&: FixResult, Info))
19660 return false;
19661 Result =
19662 FixResult.convertToFloat(FloatSema: Info.Ctx.getFloatTypeSemantics(T: E->getType()));
19663 return true;
19664 }
19665
19666 case CK_FloatingCast: {
19667 if (!Visit(S: SubExpr))
19668 return false;
19669 return HandleFloatToFloatCast(Info, E, SrcType: SubExpr->getType(), DestType: E->getType(),
19670 Result);
19671 }
19672
19673 case CK_FloatingComplexToReal: {
19674 ComplexValue V;
19675 if (!EvaluateComplex(E: SubExpr, Res&: V, Info))
19676 return false;
19677 Result = V.getComplexFloatReal();
19678 return true;
19679 }
19680 case CK_HLSLVectorTruncation: {
19681 APValue Val;
19682 if (!EvaluateVector(E: SubExpr, Result&: Val, Info))
19683 return Error(E);
19684 return Success(V: Val.getVectorElt(I: 0), e: E);
19685 }
19686 case CK_HLSLMatrixTruncation: {
19687 APValue Val;
19688 if (!EvaluateMatrix(E: SubExpr, Result&: Val, Info))
19689 return Error(E);
19690 return Success(V: Val.getMatrixElt(Row: 0, Col: 0), e: E);
19691 }
19692 case CK_HLSLElementwiseCast: {
19693 SmallVector<APValue> SrcVals;
19694 SmallVector<QualType> SrcTypes;
19695
19696 if (!hlslElementwiseCastHelper(Info, E: SubExpr, DestTy: E->getType(), SrcVals,
19697 SrcTypes))
19698 return false;
19699 APValue Val;
19700
19701 // cast our single element
19702 const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
19703 APValue ResultVal;
19704 if (!handleScalarCast(Info, FPO, E, SourceTy: SrcTypes[0], DestTy: E->getType(), Original: SrcVals[0],
19705 Result&: ResultVal))
19706 return false;
19707 return Success(V: ResultVal, e: E);
19708 }
19709 }
19710}
19711
19712//===----------------------------------------------------------------------===//
19713// Complex Evaluation (for float and integer)
19714//===----------------------------------------------------------------------===//
19715
19716namespace {
19717class ComplexExprEvaluator
19718 : public ExprEvaluatorBase<ComplexExprEvaluator> {
19719 ComplexValue &Result;
19720
19721public:
19722 ComplexExprEvaluator(EvalInfo &info, ComplexValue &Result)
19723 : ExprEvaluatorBaseTy(info), Result(Result) {}
19724
19725 bool Success(const APValue &V, const Expr *e) {
19726 Result.setFrom(V);
19727 return true;
19728 }
19729
19730 bool ZeroInitialization(const Expr *E);
19731
19732 //===--------------------------------------------------------------------===//
19733 // Visitor Methods
19734 //===--------------------------------------------------------------------===//
19735
19736 bool VisitImaginaryLiteral(const ImaginaryLiteral *E);
19737 bool VisitCastExpr(const CastExpr *E);
19738 bool VisitBinaryOperator(const BinaryOperator *E);
19739 bool VisitUnaryOperator(const UnaryOperator *E);
19740 bool VisitInitListExpr(const InitListExpr *E);
19741 bool VisitCallExpr(const CallExpr *E);
19742};
19743} // end anonymous namespace
19744
19745static bool EvaluateComplex(const Expr *E, ComplexValue &Result,
19746 EvalInfo &Info) {
19747 assert(!E->isValueDependent());
19748 assert(E->isPRValue() && E->getType()->isAnyComplexType());
19749 return ComplexExprEvaluator(Info, Result).Visit(S: E);
19750}
19751
19752bool ComplexExprEvaluator::ZeroInitialization(const Expr *E) {
19753 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
19754 if (ElemTy->isRealFloatingType()) {
19755 Result.makeComplexFloat();
19756 APFloat Zero = APFloat::getZero(Sem: Info.Ctx.getFloatTypeSemantics(T: ElemTy));
19757 Result.FloatReal = Zero;
19758 Result.FloatImag = Zero;
19759 } else {
19760 Result.makeComplexInt();
19761 APSInt Zero = Info.Ctx.MakeIntValue(Value: 0, Type: ElemTy);
19762 Result.IntReal = Zero;
19763 Result.IntImag = Zero;
19764 }
19765 return true;
19766}
19767
19768bool ComplexExprEvaluator::VisitImaginaryLiteral(const ImaginaryLiteral *E) {
19769 const Expr* SubExpr = E->getSubExpr();
19770
19771 if (SubExpr->getType()->isRealFloatingType()) {
19772 Result.makeComplexFloat();
19773 APFloat &Imag = Result.FloatImag;
19774 if (!EvaluateFloat(E: SubExpr, Result&: Imag, Info))
19775 return false;
19776
19777 Result.FloatReal = APFloat(Imag.getSemantics());
19778 return true;
19779 } else {
19780 assert(SubExpr->getType()->isIntegerType() &&
19781 "Unexpected imaginary literal.");
19782
19783 Result.makeComplexInt();
19784 APSInt &Imag = Result.IntImag;
19785 if (!EvaluateInteger(E: SubExpr, Result&: Imag, Info))
19786 return false;
19787
19788 Result.IntReal = APSInt(Imag.getBitWidth(), !Imag.isSigned());
19789 return true;
19790 }
19791}
19792
19793bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
19794
19795 switch (E->getCastKind()) {
19796 case CK_BitCast:
19797 case CK_BaseToDerived:
19798 case CK_DerivedToBase:
19799 case CK_UncheckedDerivedToBase:
19800 case CK_Dynamic:
19801 case CK_ToUnion:
19802 case CK_ArrayToPointerDecay:
19803 case CK_FunctionToPointerDecay:
19804 case CK_NullToPointer:
19805 case CK_NullToMemberPointer:
19806 case CK_BaseToDerivedMemberPointer:
19807 case CK_DerivedToBaseMemberPointer:
19808 case CK_MemberPointerToBoolean:
19809 case CK_ReinterpretMemberPointer:
19810 case CK_ConstructorConversion:
19811 case CK_IntegralToPointer:
19812 case CK_PointerToIntegral:
19813 case CK_PointerToBoolean:
19814 case CK_ToVoid:
19815 case CK_VectorSplat:
19816 case CK_IntegralCast:
19817 case CK_BooleanToSignedIntegral:
19818 case CK_IntegralToBoolean:
19819 case CK_IntegralToFloating:
19820 case CK_FloatingToIntegral:
19821 case CK_FloatingToBoolean:
19822 case CK_FloatingCast:
19823 case CK_CPointerToObjCPointerCast:
19824 case CK_BlockPointerToObjCPointerCast:
19825 case CK_AnyPointerToBlockPointerCast:
19826 case CK_ObjCObjectLValueCast:
19827 case CK_FloatingComplexToReal:
19828 case CK_FloatingComplexToBoolean:
19829 case CK_IntegralComplexToReal:
19830 case CK_IntegralComplexToBoolean:
19831 case CK_ARCProduceObject:
19832 case CK_ARCConsumeObject:
19833 case CK_ARCReclaimReturnedObject:
19834 case CK_ARCExtendBlockObject:
19835 case CK_CopyAndAutoreleaseBlockObject:
19836 case CK_BuiltinFnToFnPtr:
19837 case CK_ZeroToOCLOpaqueType:
19838 case CK_NonAtomicToAtomic:
19839 case CK_AddressSpaceConversion:
19840 case CK_IntToOCLSampler:
19841 case CK_FloatingToFixedPoint:
19842 case CK_FixedPointToFloating:
19843 case CK_FixedPointCast:
19844 case CK_FixedPointToBoolean:
19845 case CK_FixedPointToIntegral:
19846 case CK_IntegralToFixedPoint:
19847 case CK_MatrixCast:
19848 case CK_HLSLVectorTruncation:
19849 case CK_HLSLMatrixTruncation:
19850 case CK_HLSLElementwiseCast:
19851 case CK_HLSLAggregateSplatCast:
19852 llvm_unreachable("invalid cast kind for complex value");
19853
19854 case CK_LValueToRValue:
19855 case CK_AtomicToNonAtomic:
19856 case CK_NoOp:
19857 case CK_LValueToRValueBitCast:
19858 case CK_HLSLArrayRValue:
19859 return ExprEvaluatorBaseTy::VisitCastExpr(E);
19860
19861 case CK_Dependent:
19862 case CK_LValueBitCast:
19863 case CK_UserDefinedConversion:
19864 return Error(E);
19865
19866 case CK_FloatingRealToComplex: {
19867 APFloat &Real = Result.FloatReal;
19868 if (!EvaluateFloat(E: E->getSubExpr(), Result&: Real, Info))
19869 return false;
19870
19871 Result.makeComplexFloat();
19872 Result.FloatImag = APFloat(Real.getSemantics());
19873 return true;
19874 }
19875
19876 case CK_FloatingComplexCast: {
19877 if (!Visit(S: E->getSubExpr()))
19878 return false;
19879
19880 QualType To = E->getType()->castAs<ComplexType>()->getElementType();
19881 QualType From
19882 = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
19883
19884 return HandleFloatToFloatCast(Info, E, SrcType: From, DestType: To, Result&: Result.FloatReal) &&
19885 HandleFloatToFloatCast(Info, E, SrcType: From, DestType: To, Result&: Result.FloatImag);
19886 }
19887
19888 case CK_FloatingComplexToIntegralComplex: {
19889 if (!Visit(S: E->getSubExpr()))
19890 return false;
19891
19892 QualType To = E->getType()->castAs<ComplexType>()->getElementType();
19893 QualType From
19894 = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
19895 Result.makeComplexInt();
19896 return HandleFloatToIntCast(Info, E, SrcType: From, Value: Result.FloatReal,
19897 DestType: To, Result&: Result.IntReal) &&
19898 HandleFloatToIntCast(Info, E, SrcType: From, Value: Result.FloatImag,
19899 DestType: To, Result&: Result.IntImag);
19900 }
19901
19902 case CK_IntegralRealToComplex: {
19903 APSInt &Real = Result.IntReal;
19904 if (!EvaluateInteger(E: E->getSubExpr(), Result&: Real, Info))
19905 return false;
19906
19907 Result.makeComplexInt();
19908 Result.IntImag = APSInt(Real.getBitWidth(), !Real.isSigned());
19909 return true;
19910 }
19911
19912 case CK_IntegralComplexCast: {
19913 if (!Visit(S: E->getSubExpr()))
19914 return false;
19915
19916 QualType To = E->getType()->castAs<ComplexType>()->getElementType();
19917 QualType From
19918 = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
19919
19920 Result.IntReal = HandleIntToIntCast(Info, E, DestType: To, SrcType: From, Value: Result.IntReal);
19921 Result.IntImag = HandleIntToIntCast(Info, E, DestType: To, SrcType: From, Value: Result.IntImag);
19922 return true;
19923 }
19924
19925 case CK_IntegralComplexToFloatingComplex: {
19926 if (!Visit(S: E->getSubExpr()))
19927 return false;
19928
19929 const FPOptions FPO = E->getFPFeaturesInEffect(
19930 LO: Info.Ctx.getLangOpts());
19931 QualType To = E->getType()->castAs<ComplexType>()->getElementType();
19932 QualType From
19933 = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
19934 Result.makeComplexFloat();
19935 return HandleIntToFloatCast(Info, E, FPO, SrcType: From, Value: Result.IntReal,
19936 DestType: To, Result&: Result.FloatReal) &&
19937 HandleIntToFloatCast(Info, E, FPO, SrcType: From, Value: Result.IntImag,
19938 DestType: To, Result&: Result.FloatImag);
19939 }
19940 }
19941
19942 llvm_unreachable("unknown cast resulting in complex value");
19943}
19944
19945uint8_t GFNIMultiplicativeInverse(uint8_t Byte) {
19946 // Lookup Table for Multiplicative Inverse in GF(2^8)
19947 const uint8_t GFInv[256] = {
19948 0x00, 0x01, 0x8d, 0xf6, 0xcb, 0x52, 0x7b, 0xd1, 0xe8, 0x4f, 0x29, 0xc0,
19949 0xb0, 0xe1, 0xe5, 0xc7, 0x74, 0xb4, 0xaa, 0x4b, 0x99, 0x2b, 0x60, 0x5f,
19950 0x58, 0x3f, 0xfd, 0xcc, 0xff, 0x40, 0xee, 0xb2, 0x3a, 0x6e, 0x5a, 0xf1,
19951 0x55, 0x4d, 0xa8, 0xc9, 0xc1, 0x0a, 0x98, 0x15, 0x30, 0x44, 0xa2, 0xc2,
19952 0x2c, 0x45, 0x92, 0x6c, 0xf3, 0x39, 0x66, 0x42, 0xf2, 0x35, 0x20, 0x6f,
19953 0x77, 0xbb, 0x59, 0x19, 0x1d, 0xfe, 0x37, 0x67, 0x2d, 0x31, 0xf5, 0x69,
19954 0xa7, 0x64, 0xab, 0x13, 0x54, 0x25, 0xe9, 0x09, 0xed, 0x5c, 0x05, 0xca,
19955 0x4c, 0x24, 0x87, 0xbf, 0x18, 0x3e, 0x22, 0xf0, 0x51, 0xec, 0x61, 0x17,
19956 0x16, 0x5e, 0xaf, 0xd3, 0x49, 0xa6, 0x36, 0x43, 0xf4, 0x47, 0x91, 0xdf,
19957 0x33, 0x93, 0x21, 0x3b, 0x79, 0xb7, 0x97, 0x85, 0x10, 0xb5, 0xba, 0x3c,
19958 0xb6, 0x70, 0xd0, 0x06, 0xa1, 0xfa, 0x81, 0x82, 0x83, 0x7e, 0x7f, 0x80,
19959 0x96, 0x73, 0xbe, 0x56, 0x9b, 0x9e, 0x95, 0xd9, 0xf7, 0x02, 0xb9, 0xa4,
19960 0xde, 0x6a, 0x32, 0x6d, 0xd8, 0x8a, 0x84, 0x72, 0x2a, 0x14, 0x9f, 0x88,
19961 0xf9, 0xdc, 0x89, 0x9a, 0xfb, 0x7c, 0x2e, 0xc3, 0x8f, 0xb8, 0x65, 0x48,
19962 0x26, 0xc8, 0x12, 0x4a, 0xce, 0xe7, 0xd2, 0x62, 0x0c, 0xe0, 0x1f, 0xef,
19963 0x11, 0x75, 0x78, 0x71, 0xa5, 0x8e, 0x76, 0x3d, 0xbd, 0xbc, 0x86, 0x57,
19964 0x0b, 0x28, 0x2f, 0xa3, 0xda, 0xd4, 0xe4, 0x0f, 0xa9, 0x27, 0x53, 0x04,
19965 0x1b, 0xfc, 0xac, 0xe6, 0x7a, 0x07, 0xae, 0x63, 0xc5, 0xdb, 0xe2, 0xea,
19966 0x94, 0x8b, 0xc4, 0xd5, 0x9d, 0xf8, 0x90, 0x6b, 0xb1, 0x0d, 0xd6, 0xeb,
19967 0xc6, 0x0e, 0xcf, 0xad, 0x08, 0x4e, 0xd7, 0xe3, 0x5d, 0x50, 0x1e, 0xb3,
19968 0x5b, 0x23, 0x38, 0x34, 0x68, 0x46, 0x03, 0x8c, 0xdd, 0x9c, 0x7d, 0xa0,
19969 0xcd, 0x1a, 0x41, 0x1c};
19970
19971 return GFInv[Byte];
19972}
19973
19974uint8_t GFNIAffine(uint8_t XByte, const APInt &AQword, const APSInt &Imm,
19975 bool Inverse) {
19976 unsigned NumBitsInByte = 8;
19977 // Computing the affine transformation
19978 uint8_t RetByte = 0;
19979 for (uint32_t BitIdx = 0; BitIdx != NumBitsInByte; ++BitIdx) {
19980 uint8_t AByte =
19981 AQword.lshr(shiftAmt: (7 - static_cast<int32_t>(BitIdx)) * NumBitsInByte)
19982 .getLoBits(numBits: 8)
19983 .getZExtValue();
19984 uint8_t Product;
19985 if (Inverse) {
19986 Product = AByte & GFNIMultiplicativeInverse(Byte: XByte);
19987 } else {
19988 Product = AByte & XByte;
19989 }
19990 uint8_t Parity = 0;
19991
19992 // Dot product in GF(2) uses XOR instead of addition
19993 for (unsigned PBitIdx = 0; PBitIdx != NumBitsInByte; ++PBitIdx) {
19994 Parity = Parity ^ ((Product >> PBitIdx) & 0x1);
19995 }
19996
19997 uint8_t Temp = Imm[BitIdx] ? 1 : 0;
19998 RetByte |= (Temp ^ Parity) << BitIdx;
19999 }
20000 return RetByte;
20001}
20002
20003uint8_t GFNIMul(uint8_t AByte, uint8_t BByte) {
20004 // Multiplying two polynomials of degree 7
20005 // Polynomial of degree 7
20006 // x^7 + x^6 + x^5 + x^4 + x^3 + x^2 + x + 1
20007 uint16_t TWord = 0;
20008 unsigned NumBitsInByte = 8;
20009 for (unsigned BitIdx = 0; BitIdx != NumBitsInByte; ++BitIdx) {
20010 if ((BByte >> BitIdx) & 0x1) {
20011 TWord = TWord ^ (AByte << BitIdx);
20012 }
20013 }
20014
20015 // When multiplying two polynomials of degree 7
20016 // results in a polynomial of degree 14
20017 // so the result has to be reduced to 7
20018 // Reduction polynomial is x^8 + x^4 + x^3 + x + 1 i.e. 0x11B
20019 for (int32_t BitIdx = 14; BitIdx > 7; --BitIdx) {
20020 if ((TWord >> BitIdx) & 0x1) {
20021 TWord = TWord ^ (0x11B << (BitIdx - 8));
20022 }
20023 }
20024 return (TWord & 0xFF);
20025}
20026
20027void HandleComplexComplexMul(APFloat A, APFloat B, APFloat C, APFloat D,
20028 APFloat &ResR, APFloat &ResI) {
20029 // This is an implementation of complex multiplication according to the
20030 // constraints laid out in C11 Annex G. The implementation uses the
20031 // following naming scheme:
20032 // (a + ib) * (c + id)
20033
20034 APFloat AC = A * C;
20035 APFloat BD = B * D;
20036 APFloat AD = A * D;
20037 APFloat BC = B * C;
20038 ResR = AC - BD;
20039 ResI = AD + BC;
20040 if (ResR.isNaN() && ResI.isNaN()) {
20041 bool Recalc = false;
20042 if (A.isInfinity() || B.isInfinity()) {
20043 A = APFloat::copySign(Value: APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0),
20044 Sign: A);
20045 B = APFloat::copySign(Value: APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0),
20046 Sign: B);
20047 if (C.isNaN())
20048 C = APFloat::copySign(Value: APFloat(C.getSemantics()), Sign: C);
20049 if (D.isNaN())
20050 D = APFloat::copySign(Value: APFloat(D.getSemantics()), Sign: D);
20051 Recalc = true;
20052 }
20053 if (C.isInfinity() || D.isInfinity()) {
20054 C = APFloat::copySign(Value: APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0),
20055 Sign: C);
20056 D = APFloat::copySign(Value: APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0),
20057 Sign: D);
20058 if (A.isNaN())
20059 A = APFloat::copySign(Value: APFloat(A.getSemantics()), Sign: A);
20060 if (B.isNaN())
20061 B = APFloat::copySign(Value: APFloat(B.getSemantics()), Sign: B);
20062 Recalc = true;
20063 }
20064 if (!Recalc && (AC.isInfinity() || BD.isInfinity() || AD.isInfinity() ||
20065 BC.isInfinity())) {
20066 if (A.isNaN())
20067 A = APFloat::copySign(Value: APFloat(A.getSemantics()), Sign: A);
20068 if (B.isNaN())
20069 B = APFloat::copySign(Value: APFloat(B.getSemantics()), Sign: B);
20070 if (C.isNaN())
20071 C = APFloat::copySign(Value: APFloat(C.getSemantics()), Sign: C);
20072 if (D.isNaN())
20073 D = APFloat::copySign(Value: APFloat(D.getSemantics()), Sign: D);
20074 Recalc = true;
20075 }
20076 if (Recalc) {
20077 ResR = APFloat::getInf(Sem: A.getSemantics()) * (A * C - B * D);
20078 ResI = APFloat::getInf(Sem: A.getSemantics()) * (A * D + B * C);
20079 }
20080 }
20081}
20082
20083void HandleComplexComplexDiv(APFloat A, APFloat B, APFloat C, APFloat D,
20084 APFloat &ResR, APFloat &ResI) {
20085 // This is an implementation of complex division according to the
20086 // constraints laid out in C11 Annex G. The implementation uses the
20087 // following naming scheme:
20088 // (a + ib) / (c + id)
20089
20090 int DenomLogB = 0;
20091 APFloat MaxCD = maxnum(A: abs(X: C), B: abs(X: D));
20092 if (MaxCD.isFinite()) {
20093 DenomLogB = ilogb(Arg: MaxCD);
20094 C = scalbn(X: C, Exp: -DenomLogB, RM: APFloat::rmNearestTiesToEven);
20095 D = scalbn(X: D, Exp: -DenomLogB, RM: APFloat::rmNearestTiesToEven);
20096 }
20097 APFloat Denom = C * C + D * D;
20098 ResR =
20099 scalbn(X: (A * C + B * D) / Denom, Exp: -DenomLogB, RM: APFloat::rmNearestTiesToEven);
20100 ResI =
20101 scalbn(X: (B * C - A * D) / Denom, Exp: -DenomLogB, RM: APFloat::rmNearestTiesToEven);
20102 if (ResR.isNaN() && ResI.isNaN()) {
20103 if (Denom.isPosZero() && (!A.isNaN() || !B.isNaN())) {
20104 ResR = APFloat::getInf(Sem: ResR.getSemantics(), Negative: C.isNegative()) * A;
20105 ResI = APFloat::getInf(Sem: ResR.getSemantics(), Negative: C.isNegative()) * B;
20106 } else if ((A.isInfinity() || B.isInfinity()) && C.isFinite() &&
20107 D.isFinite()) {
20108 A = APFloat::copySign(Value: APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0),
20109 Sign: A);
20110 B = APFloat::copySign(Value: APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0),
20111 Sign: B);
20112 ResR = APFloat::getInf(Sem: ResR.getSemantics()) * (A * C + B * D);
20113 ResI = APFloat::getInf(Sem: ResI.getSemantics()) * (B * C - A * D);
20114 } else if (MaxCD.isInfinity() && A.isFinite() && B.isFinite()) {
20115 C = APFloat::copySign(Value: APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0),
20116 Sign: C);
20117 D = APFloat::copySign(Value: APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0),
20118 Sign: D);
20119 ResR = APFloat::getZero(Sem: ResR.getSemantics()) * (A * C + B * D);
20120 ResI = APFloat::getZero(Sem: ResI.getSemantics()) * (B * C - A * D);
20121 }
20122 }
20123}
20124
20125APSInt NormalizeRotateAmount(const APSInt &Value, const APSInt &Amount) {
20126 // Normalize shift amount to [0, BitWidth) range to match runtime behavior
20127 APSInt NormAmt = Amount;
20128 unsigned BitWidth = Value.getBitWidth();
20129 unsigned AmtBitWidth = NormAmt.getBitWidth();
20130 if (BitWidth == 1) {
20131 // Rotating a 1-bit value is always a no-op
20132 NormAmt = APSInt(APInt(AmtBitWidth, 0), NormAmt.isUnsigned());
20133 } else if (BitWidth == 2) {
20134 // For 2-bit values: rotation amount is 0 or 1 based on
20135 // whether the amount is even or odd. We can't use srem here because
20136 // the divisor (2) would be misinterpreted as -2 in 2-bit signed arithmetic.
20137 NormAmt =
20138 APSInt(APInt(AmtBitWidth, NormAmt[0] ? 1 : 0), NormAmt.isUnsigned());
20139 } else {
20140 APInt Divisor;
20141 if (AmtBitWidth > BitWidth) {
20142 Divisor = llvm::APInt(AmtBitWidth, BitWidth);
20143 } else {
20144 Divisor = llvm::APInt(BitWidth, BitWidth);
20145 if (AmtBitWidth < BitWidth) {
20146 NormAmt = NormAmt.extend(width: BitWidth);
20147 }
20148 }
20149
20150 // Normalize to [0, BitWidth)
20151 if (NormAmt.isSigned()) {
20152 NormAmt = APSInt(NormAmt.srem(RHS: Divisor), /*isUnsigned=*/false);
20153 if (NormAmt.isNegative()) {
20154 APSInt SignedDivisor(Divisor, /*isUnsigned=*/false);
20155 NormAmt += SignedDivisor;
20156 }
20157 } else {
20158 NormAmt = APSInt(NormAmt.urem(RHS: Divisor), /*isUnsigned=*/true);
20159 }
20160 }
20161
20162 return NormAmt;
20163}
20164
20165bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
20166 if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
20167 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
20168
20169 // Track whether the LHS or RHS is real at the type system level. When this is
20170 // the case we can simplify our evaluation strategy.
20171 bool LHSReal = false, RHSReal = false;
20172
20173 bool LHSOK;
20174 if (E->getLHS()->getType()->isRealFloatingType()) {
20175 LHSReal = true;
20176 APFloat &Real = Result.FloatReal;
20177 LHSOK = EvaluateFloat(E: E->getLHS(), Result&: Real, Info);
20178 if (LHSOK) {
20179 Result.makeComplexFloat();
20180 Result.FloatImag = APFloat(Real.getSemantics());
20181 }
20182 } else {
20183 LHSOK = Visit(S: E->getLHS());
20184 }
20185 if (!LHSOK && !Info.noteFailure())
20186 return false;
20187
20188 ComplexValue RHS;
20189 if (E->getRHS()->getType()->isRealFloatingType()) {
20190 RHSReal = true;
20191 APFloat &Real = RHS.FloatReal;
20192 if (!EvaluateFloat(E: E->getRHS(), Result&: Real, Info) || !LHSOK)
20193 return false;
20194 RHS.makeComplexFloat();
20195 RHS.FloatImag = APFloat(Real.getSemantics());
20196 } else if (!EvaluateComplex(E: E->getRHS(), Result&: RHS, Info) || !LHSOK)
20197 return false;
20198
20199 assert(!(LHSReal && RHSReal) &&
20200 "Cannot have both operands of a complex operation be real.");
20201 switch (E->getOpcode()) {
20202 default: return Error(E);
20203 case BO_Add:
20204 if (Result.isComplexFloat()) {
20205 Result.getComplexFloatReal().add(RHS: RHS.getComplexFloatReal(),
20206 RM: APFloat::rmNearestTiesToEven);
20207 if (LHSReal)
20208 Result.getComplexFloatImag() = RHS.getComplexFloatImag();
20209 else if (!RHSReal)
20210 Result.getComplexFloatImag().add(RHS: RHS.getComplexFloatImag(),
20211 RM: APFloat::rmNearestTiesToEven);
20212 } else {
20213 Result.getComplexIntReal() += RHS.getComplexIntReal();
20214 Result.getComplexIntImag() += RHS.getComplexIntImag();
20215 }
20216 break;
20217 case BO_Sub:
20218 if (Result.isComplexFloat()) {
20219 Result.getComplexFloatReal().subtract(RHS: RHS.getComplexFloatReal(),
20220 RM: APFloat::rmNearestTiesToEven);
20221 if (LHSReal) {
20222 Result.getComplexFloatImag() = RHS.getComplexFloatImag();
20223 Result.getComplexFloatImag().changeSign();
20224 } else if (!RHSReal) {
20225 Result.getComplexFloatImag().subtract(RHS: RHS.getComplexFloatImag(),
20226 RM: APFloat::rmNearestTiesToEven);
20227 }
20228 } else {
20229 Result.getComplexIntReal() -= RHS.getComplexIntReal();
20230 Result.getComplexIntImag() -= RHS.getComplexIntImag();
20231 }
20232 break;
20233 case BO_Mul:
20234 if (Result.isComplexFloat()) {
20235 // This is an implementation of complex multiplication according to the
20236 // constraints laid out in C11 Annex G. The implementation uses the
20237 // following naming scheme:
20238 // (a + ib) * (c + id)
20239 ComplexValue LHS = Result;
20240 APFloat &A = LHS.getComplexFloatReal();
20241 APFloat &B = LHS.getComplexFloatImag();
20242 APFloat &C = RHS.getComplexFloatReal();
20243 APFloat &D = RHS.getComplexFloatImag();
20244 APFloat &ResR = Result.getComplexFloatReal();
20245 APFloat &ResI = Result.getComplexFloatImag();
20246 if (LHSReal) {
20247 assert(!RHSReal && "Cannot have two real operands for a complex op!");
20248 ResR = A;
20249 ResI = A;
20250 // ResR = A * C;
20251 // ResI = A * D;
20252 if (!handleFloatFloatBinOp(Info, E, LHS&: ResR, Opcode: BO_Mul, RHS: C) ||
20253 !handleFloatFloatBinOp(Info, E, LHS&: ResI, Opcode: BO_Mul, RHS: D))
20254 return false;
20255 } else if (RHSReal) {
20256 // ResR = C * A;
20257 // ResI = C * B;
20258 ResR = C;
20259 ResI = C;
20260 if (!handleFloatFloatBinOp(Info, E, LHS&: ResR, Opcode: BO_Mul, RHS: A) ||
20261 !handleFloatFloatBinOp(Info, E, LHS&: ResI, Opcode: BO_Mul, RHS: B))
20262 return false;
20263 } else {
20264 HandleComplexComplexMul(A, B, C, D, ResR, ResI);
20265 }
20266 } else {
20267 ComplexValue LHS = Result;
20268 Result.getComplexIntReal() =
20269 (LHS.getComplexIntReal() * RHS.getComplexIntReal() -
20270 LHS.getComplexIntImag() * RHS.getComplexIntImag());
20271 Result.getComplexIntImag() =
20272 (LHS.getComplexIntReal() * RHS.getComplexIntImag() +
20273 LHS.getComplexIntImag() * RHS.getComplexIntReal());
20274 }
20275 break;
20276 case BO_Div:
20277 if (Result.isComplexFloat()) {
20278 // This is an implementation of complex division according to the
20279 // constraints laid out in C11 Annex G. The implementation uses the
20280 // following naming scheme:
20281 // (a + ib) / (c + id)
20282 ComplexValue LHS = Result;
20283 APFloat &A = LHS.getComplexFloatReal();
20284 APFloat &B = LHS.getComplexFloatImag();
20285 APFloat &C = RHS.getComplexFloatReal();
20286 APFloat &D = RHS.getComplexFloatImag();
20287 APFloat &ResR = Result.getComplexFloatReal();
20288 APFloat &ResI = Result.getComplexFloatImag();
20289 if (RHSReal) {
20290 ResR = A;
20291 ResI = B;
20292 // ResR = A / C;
20293 // ResI = B / C;
20294 if (!handleFloatFloatBinOp(Info, E, LHS&: ResR, Opcode: BO_Div, RHS: C) ||
20295 !handleFloatFloatBinOp(Info, E, LHS&: ResI, Opcode: BO_Div, RHS: C))
20296 return false;
20297 } else {
20298 if (LHSReal) {
20299 // No real optimizations we can do here, stub out with zero.
20300 B = APFloat::getZero(Sem: A.getSemantics());
20301 }
20302 HandleComplexComplexDiv(A, B, C, D, ResR, ResI);
20303 }
20304 } else {
20305 ComplexValue LHS = Result;
20306 APSInt Den = RHS.getComplexIntReal() * RHS.getComplexIntReal() +
20307 RHS.getComplexIntImag() * RHS.getComplexIntImag();
20308 if (Den.isZero())
20309 return Error(E, D: diag::note_expr_divide_by_zero);
20310
20311 Result.getComplexIntReal() =
20312 (LHS.getComplexIntReal() * RHS.getComplexIntReal() +
20313 LHS.getComplexIntImag() * RHS.getComplexIntImag()) / Den;
20314 Result.getComplexIntImag() =
20315 (LHS.getComplexIntImag() * RHS.getComplexIntReal() -
20316 LHS.getComplexIntReal() * RHS.getComplexIntImag()) / Den;
20317 }
20318 break;
20319 }
20320
20321 return true;
20322}
20323
20324bool ComplexExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
20325 // Get the operand value into 'Result'.
20326 if (!Visit(S: E->getSubExpr()))
20327 return false;
20328
20329 switch (E->getOpcode()) {
20330 default:
20331 return Error(E);
20332 case UO_Extension:
20333 return true;
20334 case UO_Plus:
20335 // The result is always just the subexpr.
20336 return true;
20337 case UO_Minus:
20338 if (Result.isComplexFloat()) {
20339 Result.getComplexFloatReal().changeSign();
20340 Result.getComplexFloatImag().changeSign();
20341 }
20342 else {
20343 Result.getComplexIntReal() = -Result.getComplexIntReal();
20344 Result.getComplexIntImag() = -Result.getComplexIntImag();
20345 }
20346 return true;
20347 case UO_Not:
20348 if (Result.isComplexFloat())
20349 Result.getComplexFloatImag().changeSign();
20350 else
20351 Result.getComplexIntImag() = -Result.getComplexIntImag();
20352 return true;
20353 }
20354}
20355
20356bool ComplexExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
20357 if (E->getNumInits() == 2) {
20358 if (E->getType()->isComplexType()) {
20359 Result.makeComplexFloat();
20360 if (!EvaluateFloat(E: E->getInit(Init: 0), Result&: Result.FloatReal, Info))
20361 return false;
20362 if (!EvaluateFloat(E: E->getInit(Init: 1), Result&: Result.FloatImag, Info))
20363 return false;
20364 } else {
20365 Result.makeComplexInt();
20366 if (!EvaluateInteger(E: E->getInit(Init: 0), Result&: Result.IntReal, Info))
20367 return false;
20368 if (!EvaluateInteger(E: E->getInit(Init: 1), Result&: Result.IntImag, Info))
20369 return false;
20370 }
20371 return true;
20372 }
20373 return ExprEvaluatorBaseTy::VisitInitListExpr(E);
20374}
20375
20376bool ComplexExprEvaluator::VisitCallExpr(const CallExpr *E) {
20377 if (!IsConstantEvaluatedBuiltinCall(E))
20378 return ExprEvaluatorBaseTy::VisitCallExpr(E);
20379
20380 switch (E->getBuiltinCallee()) {
20381 case Builtin::BI__builtin_complex:
20382 Result.makeComplexFloat();
20383 if (!EvaluateFloat(E: E->getArg(Arg: 0), Result&: Result.FloatReal, Info))
20384 return false;
20385 if (!EvaluateFloat(E: E->getArg(Arg: 1), Result&: Result.FloatImag, Info))
20386 return false;
20387 return true;
20388
20389 default:
20390 return false;
20391 }
20392}
20393
20394//===----------------------------------------------------------------------===//
20395// Atomic expression evaluation, essentially just handling the NonAtomicToAtomic
20396// implicit conversion.
20397//===----------------------------------------------------------------------===//
20398
20399namespace {
20400class AtomicExprEvaluator :
20401 public ExprEvaluatorBase<AtomicExprEvaluator> {
20402 const LValue *This;
20403 APValue &Result;
20404public:
20405 AtomicExprEvaluator(EvalInfo &Info, const LValue *This, APValue &Result)
20406 : ExprEvaluatorBaseTy(Info), This(This), Result(Result) {}
20407
20408 bool Success(const APValue &V, const Expr *E) {
20409 Result = V;
20410 return true;
20411 }
20412
20413 bool ZeroInitialization(const Expr *E) {
20414 ImplicitValueInitExpr VIE(
20415 E->getType()->castAs<AtomicType>()->getValueType());
20416 // For atomic-qualified class (and array) types in C++, initialize the
20417 // _Atomic-wrapped subobject directly, in-place.
20418 return This ? EvaluateInPlace(Result, Info, This: *This, E: &VIE)
20419 : Evaluate(Result, Info, E: &VIE);
20420 }
20421
20422 bool VisitCastExpr(const CastExpr *E) {
20423 switch (E->getCastKind()) {
20424 default:
20425 return ExprEvaluatorBaseTy::VisitCastExpr(E);
20426 case CK_NullToPointer:
20427 VisitIgnoredValue(E: E->getSubExpr());
20428 return ZeroInitialization(E);
20429 case CK_NonAtomicToAtomic:
20430 return This ? EvaluateInPlace(Result, Info, This: *This, E: E->getSubExpr())
20431 : Evaluate(Result, Info, E: E->getSubExpr());
20432 }
20433 }
20434};
20435} // end anonymous namespace
20436
20437static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result,
20438 EvalInfo &Info) {
20439 assert(!E->isValueDependent());
20440 assert(E->isPRValue() && E->getType()->isAtomicType());
20441 return AtomicExprEvaluator(Info, This, Result).Visit(S: E);
20442}
20443
20444//===----------------------------------------------------------------------===//
20445// Void expression evaluation, primarily for a cast to void on the LHS of a
20446// comma operator
20447//===----------------------------------------------------------------------===//
20448
20449namespace {
20450class VoidExprEvaluator
20451 : public ExprEvaluatorBase<VoidExprEvaluator> {
20452public:
20453 VoidExprEvaluator(EvalInfo &Info) : ExprEvaluatorBaseTy(Info) {}
20454
20455 bool Success(const APValue &V, const Expr *e) { return true; }
20456
20457 bool ZeroInitialization(const Expr *E) { return true; }
20458
20459 bool VisitCastExpr(const CastExpr *E) {
20460 switch (E->getCastKind()) {
20461 default:
20462 return ExprEvaluatorBaseTy::VisitCastExpr(E);
20463 case CK_ToVoid:
20464 VisitIgnoredValue(E: E->getSubExpr());
20465 return true;
20466 }
20467 }
20468
20469 bool VisitCallExpr(const CallExpr *E) {
20470 if (!IsConstantEvaluatedBuiltinCall(E))
20471 return ExprEvaluatorBaseTy::VisitCallExpr(E);
20472
20473 switch (E->getBuiltinCallee()) {
20474 case Builtin::BI__assume:
20475 case Builtin::BI__builtin_assume:
20476 // The argument is not evaluated!
20477 return true;
20478
20479 case Builtin::BI__builtin_operator_delete:
20480 return HandleOperatorDeleteCall(Info, E);
20481
20482 default:
20483 return false;
20484 }
20485 }
20486
20487 bool VisitCXXDeleteExpr(const CXXDeleteExpr *E);
20488};
20489} // end anonymous namespace
20490
20491bool VoidExprEvaluator::VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
20492 // We cannot speculatively evaluate a delete expression.
20493 if (Info.SpeculativeEvaluationDepth)
20494 return false;
20495
20496 FunctionDecl *OperatorDelete = E->getOperatorDelete();
20497 if (!OperatorDelete
20498 ->isUsableAsGlobalAllocationFunctionInConstantEvaluation()) {
20499 Info.FFDiag(E, DiagId: diag::note_constexpr_new_non_replaceable)
20500 << isa<CXXMethodDecl>(Val: OperatorDelete) << OperatorDelete;
20501 return false;
20502 }
20503
20504 const Expr *Arg = E->getArgument();
20505
20506 LValue Pointer;
20507 if (!EvaluatePointer(E: Arg, Result&: Pointer, Info))
20508 return false;
20509 if (Pointer.Designator.Invalid)
20510 return false;
20511
20512 // Deleting a null pointer has no effect.
20513 if (Pointer.isNullPointer()) {
20514 // This is the only case where we need to produce an extension warning:
20515 // the only other way we can succeed is if we find a dynamic allocation,
20516 // and we will have warned when we allocated it in that case.
20517 if (!Info.getLangOpts().CPlusPlus20)
20518 Info.CCEDiag(E, DiagId: diag::note_constexpr_new);
20519 return true;
20520 }
20521
20522 std::optional<DynAlloc *> Alloc = CheckDeleteKind(
20523 Info, E, Pointer, DeallocKind: E->isArrayForm() ? DynAlloc::ArrayNew : DynAlloc::New);
20524 if (!Alloc)
20525 return false;
20526 QualType AllocType = Pointer.Base.getDynamicAllocType();
20527
20528 // For the non-array case, the designator must be empty if the static type
20529 // does not have a virtual destructor.
20530 if (!E->isArrayForm() && Pointer.Designator.Entries.size() != 0 &&
20531 !hasVirtualDestructor(T: Arg->getType()->getPointeeType())) {
20532 Info.FFDiag(E, DiagId: diag::note_constexpr_delete_base_nonvirt_dtor)
20533 << Arg->getType()->getPointeeType() << AllocType;
20534 return false;
20535 }
20536
20537 // For a class type with a virtual destructor, the selected operator delete
20538 // is the one looked up when building the destructor.
20539 if (!E->isArrayForm() && !E->isGlobalDelete()) {
20540 const FunctionDecl *VirtualDelete = getVirtualOperatorDelete(T: AllocType);
20541 if (VirtualDelete &&
20542 !VirtualDelete
20543 ->isUsableAsGlobalAllocationFunctionInConstantEvaluation()) {
20544 Info.FFDiag(E, DiagId: diag::note_constexpr_new_non_replaceable)
20545 << isa<CXXMethodDecl>(Val: VirtualDelete) << VirtualDelete;
20546 return false;
20547 }
20548 }
20549
20550 if (!HandleDestruction(Info, Loc: E->getExprLoc(), LVBase: Pointer.getLValueBase(),
20551 Value&: (*Alloc)->Value, T: AllocType))
20552 return false;
20553
20554 if (!Info.HeapAllocs.erase(x: Pointer.Base.dyn_cast<DynamicAllocLValue>())) {
20555 // The element was already erased. This means the destructor call also
20556 // deleted the object.
20557 // FIXME: This probably results in undefined behavior before we get this
20558 // far, and should be diagnosed elsewhere first.
20559 Info.FFDiag(E, DiagId: diag::note_constexpr_double_delete);
20560 return false;
20561 }
20562
20563 return true;
20564}
20565
20566static bool EvaluateVoid(const Expr *E, EvalInfo &Info) {
20567 assert(!E->isValueDependent());
20568 assert(E->isPRValue() && E->getType()->isVoidType());
20569 return VoidExprEvaluator(Info).Visit(S: E);
20570}
20571
20572//===----------------------------------------------------------------------===//
20573// Top level Expr::EvaluateAsRValue method.
20574//===----------------------------------------------------------------------===//
20575
20576static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
20577 assert(!E->isValueDependent());
20578 // In C, function designators are not lvalues, but we evaluate them as if they
20579 // are.
20580 QualType T = E->getType();
20581 if (E->isGLValue() || T->isFunctionType()) {
20582 LValue LV;
20583 if (!EvaluateLValue(E, Result&: LV, Info))
20584 return false;
20585 LV.moveInto(V&: Result);
20586 } else if (T->isVectorType()) {
20587 if (!EvaluateVector(E, Result, Info))
20588 return false;
20589 } else if (T->isConstantMatrixType()) {
20590 if (!EvaluateMatrix(E, Result, Info))
20591 return false;
20592 } else if (T->isIntegralOrEnumerationType()) {
20593 if (!IntExprEvaluator(Info, Result).Visit(S: E))
20594 return false;
20595 } else if (T->hasPointerRepresentation()) {
20596 LValue LV;
20597 if (!EvaluatePointer(E, Result&: LV, Info))
20598 return false;
20599 LV.moveInto(V&: Result);
20600 } else if (T->isRealFloatingType()) {
20601 llvm::APFloat F(0.0);
20602 if (!EvaluateFloat(E, Result&: F, Info))
20603 return false;
20604 Result = APValue(F);
20605 } else if (T->isAnyComplexType()) {
20606 ComplexValue C;
20607 if (!EvaluateComplex(E, Result&: C, Info))
20608 return false;
20609 C.moveInto(v&: Result);
20610 } else if (T->isFixedPointType()) {
20611 if (!FixedPointExprEvaluator(Info, Result).Visit(S: E)) return false;
20612 } else if (T->isMemberPointerType()) {
20613 MemberPtr P;
20614 if (!EvaluateMemberPointer(E, Result&: P, Info))
20615 return false;
20616 P.moveInto(V&: Result);
20617 return true;
20618 } else if (T->isArrayType()) {
20619 LValue LV;
20620 APValue &Value =
20621 Info.CurrentCall->createTemporary(Key: E, T, Scope: ScopeKind::FullExpression, LV);
20622 if (!EvaluateArray(E, This: LV, Result&: Value, Info))
20623 return false;
20624 Result = Value;
20625 } else if (T->isRecordType()) {
20626 LValue LV;
20627 APValue &Value =
20628 Info.CurrentCall->createTemporary(Key: E, T, Scope: ScopeKind::FullExpression, LV);
20629 if (!EvaluateRecord(E, This: LV, Result&: Value, Info))
20630 return false;
20631 Result = Value;
20632 } else if (T->isVoidType()) {
20633 if (!Info.getLangOpts().CPlusPlus11)
20634 Info.CCEDiag(E, DiagId: diag::note_constexpr_nonliteral)
20635 << E->getType();
20636 if (!EvaluateVoid(E, Info))
20637 return false;
20638 } else if (T->isAtomicType()) {
20639 QualType Unqual = T.getAtomicUnqualifiedType();
20640 if (Unqual->isArrayType() || Unqual->isRecordType()) {
20641 LValue LV;
20642 APValue &Value = Info.CurrentCall->createTemporary(
20643 Key: E, T: Unqual, Scope: ScopeKind::FullExpression, LV);
20644 if (!EvaluateAtomic(E, This: &LV, Result&: Value, Info))
20645 return false;
20646 Result = Value;
20647 } else {
20648 if (!EvaluateAtomic(E, This: nullptr, Result, Info))
20649 return false;
20650 }
20651 } else if (Info.getLangOpts().CPlusPlus11) {
20652 Info.FFDiag(E, DiagId: diag::note_constexpr_nonliteral) << E->getType();
20653 return false;
20654 } else {
20655 Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
20656 return false;
20657 }
20658
20659 return true;
20660}
20661
20662/// EvaluateInPlace - Evaluate an expression in-place in an APValue. In some
20663/// cases, the in-place evaluation is essential, since later initializers for
20664/// an object can indirectly refer to subobjects which were initialized earlier.
20665static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, const LValue &This,
20666 const Expr *E, bool AllowNonLiteralTypes) {
20667 assert(!E->isValueDependent());
20668
20669 // Normally expressions passed to EvaluateInPlace have a type, but not when
20670 // a VarDecl initializer is evaluated before the untyped ParenListExpr is
20671 // replaced with a CXXConstructExpr. This can happen in LLDB.
20672 if (E->getType().isNull())
20673 return false;
20674
20675 if (!AllowNonLiteralTypes && !CheckLiteralType(Info, E, This: &This))
20676 return false;
20677
20678 if (E->isPRValue()) {
20679 // Evaluate arrays and record types in-place, so that later initializers can
20680 // refer to earlier-initialized members of the object.
20681 QualType T = E->getType();
20682 if (T->isArrayType())
20683 return EvaluateArray(E, This, Result, Info);
20684 else if (T->isRecordType())
20685 return EvaluateRecord(E, This, Result, Info);
20686 else if (T->isAtomicType()) {
20687 QualType Unqual = T.getAtomicUnqualifiedType();
20688 if (Unqual->isArrayType() || Unqual->isRecordType())
20689 return EvaluateAtomic(E, This: &This, Result, Info);
20690 }
20691 }
20692
20693 // For any other type, in-place evaluation is unimportant.
20694 return Evaluate(Result, Info, E);
20695}
20696
20697/// EvaluateAsRValue - Try to evaluate this expression, performing an implicit
20698/// lvalue-to-rvalue cast if it is an lvalue.
20699static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result) {
20700 assert(!E->isValueDependent());
20701
20702 if (E->getType().isNull())
20703 return false;
20704
20705 if (!CheckLiteralType(Info, E))
20706 return false;
20707
20708 if (Info.EnableNewConstInterp) {
20709 if (!Info.Ctx.getInterpContext().evaluateAsRValue(Parent&: Info, E, Result))
20710 return false;
20711 return CheckConstantExpression(Info, DiagLoc: E->getExprLoc(), Type: E->getType(), Value: Result,
20712 Kind: ConstantExprKind::Normal);
20713 }
20714
20715 if (!::Evaluate(Result, Info, E))
20716 return false;
20717
20718 // Implicit lvalue-to-rvalue cast.
20719 if (E->isGLValue()) {
20720 LValue LV;
20721 LV.setFrom(Ctx: Info.Ctx, V: Result);
20722 if (!handleLValueToRValueConversion(Info, Conv: E, Type: E->getType(), LVal: LV, RVal&: Result))
20723 return false;
20724 }
20725
20726 // Check this core constant expression is a constant expression.
20727 return CheckConstantExpression(Info, DiagLoc: E->getExprLoc(), Type: E->getType(), Value: Result,
20728 Kind: ConstantExprKind::Normal) &&
20729 CheckMemoryLeaks(Info);
20730}
20731
20732static bool FastEvaluateAsRValue(const Expr *Exp, APValue &Result,
20733 const ASTContext &Ctx, bool &IsConst) {
20734 // Fast-path evaluations of integer literals, since we sometimes see files
20735 // containing vast quantities of these.
20736 if (const auto *L = dyn_cast<IntegerLiteral>(Val: Exp)) {
20737 Result =
20738 APValue(APSInt(L->getValue(), L->getType()->isUnsignedIntegerType()));
20739 IsConst = true;
20740 return true;
20741 }
20742
20743 if (const auto *L = dyn_cast<CXXBoolLiteralExpr>(Val: Exp)) {
20744 Result = APValue(APSInt(APInt(1, L->getValue())));
20745 IsConst = true;
20746 return true;
20747 }
20748
20749 if (const auto *FL = dyn_cast<FloatingLiteral>(Val: Exp)) {
20750 Result = APValue(FL->getValue());
20751 IsConst = true;
20752 return true;
20753 }
20754
20755 if (const auto *L = dyn_cast<CharacterLiteral>(Val: Exp)) {
20756 Result = APValue(Ctx.MakeIntValue(Value: L->getValue(), Type: L->getType()));
20757 IsConst = true;
20758 return true;
20759 }
20760
20761 if (const auto *CE = dyn_cast<ConstantExpr>(Val: Exp)) {
20762 if (CE->hasAPValueResult()) {
20763 APValue APV = CE->getAPValueResult();
20764 if (!APV.isLValue()) {
20765 Result = std::move(APV);
20766 IsConst = true;
20767 return true;
20768 }
20769 }
20770
20771 // The SubExpr is usually just an IntegerLiteral.
20772 return FastEvaluateAsRValue(Exp: CE->getSubExpr(), Result, Ctx, IsConst);
20773 }
20774
20775 // This case should be rare, but we need to check it before we check on
20776 // the type below.
20777 if (Exp->getType().isNull()) {
20778 IsConst = false;
20779 return true;
20780 }
20781
20782 return false;
20783}
20784
20785static bool hasUnacceptableSideEffect(Expr::EvalStatus &Result,
20786 Expr::SideEffectsKind SEK) {
20787 return (SEK < Expr::SE_AllowSideEffects && Result.HasSideEffects) ||
20788 (SEK < Expr::SE_AllowUndefinedBehavior && Result.HasUndefinedBehavior);
20789}
20790
20791static bool EvaluateAsRValue(const Expr *E, Expr::EvalResult &Result,
20792 const ASTContext &Ctx, EvalInfo &Info) {
20793 assert(!E->isValueDependent());
20794 bool IsConst;
20795 if (FastEvaluateAsRValue(Exp: E, Result&: Result.Val, Ctx, IsConst))
20796 return IsConst;
20797
20798 return EvaluateAsRValue(Info, E, Result&: Result.Val);
20799}
20800
20801static bool EvaluateAsInt(const Expr *E, Expr::EvalResult &ExprResult,
20802 const ASTContext &Ctx,
20803 Expr::SideEffectsKind AllowSideEffects,
20804 EvalInfo &Info) {
20805 assert(!E->isValueDependent());
20806 if (!E->getType()->isIntegralOrEnumerationType())
20807 return false;
20808
20809 if (!::EvaluateAsRValue(E, Result&: ExprResult, Ctx, Info) ||
20810 !ExprResult.Val.isInt() ||
20811 hasUnacceptableSideEffect(Result&: ExprResult, SEK: AllowSideEffects))
20812 return false;
20813
20814 return true;
20815}
20816
20817static bool EvaluateAsFixedPoint(const Expr *E, Expr::EvalResult &ExprResult,
20818 const ASTContext &Ctx,
20819 Expr::SideEffectsKind AllowSideEffects,
20820 EvalInfo &Info) {
20821 assert(!E->isValueDependent());
20822 if (!E->getType()->isFixedPointType())
20823 return false;
20824
20825 if (!::EvaluateAsRValue(E, Result&: ExprResult, Ctx, Info))
20826 return false;
20827
20828 if (!ExprResult.Val.isFixedPoint() ||
20829 hasUnacceptableSideEffect(Result&: ExprResult, SEK: AllowSideEffects))
20830 return false;
20831
20832 return true;
20833}
20834
20835/// EvaluateAsRValue - Return true if this is a constant which we can fold using
20836/// any crazy technique (that has nothing to do with language standards) that
20837/// we want to. If this function returns true, it returns the folded constant
20838/// in Result. If this expression is a glvalue, an lvalue-to-rvalue conversion
20839/// will be applied to the result.
20840bool Expr::EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx,
20841 bool InConstantContext) const {
20842 assert(!isValueDependent() &&
20843 "Expression evaluator can't be called on a dependent expression.");
20844 ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsRValue");
20845 EvalInfo Info(Ctx, Result, EvaluationMode::IgnoreSideEffects);
20846 Info.InConstantContext = InConstantContext;
20847 return ::EvaluateAsRValue(E: this, Result, Ctx, Info);
20848}
20849
20850bool Expr::EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx,
20851 bool InConstantContext) const {
20852 assert(!isValueDependent() &&
20853 "Expression evaluator can't be called on a dependent expression.");
20854 ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsBooleanCondition");
20855 EvalResult Scratch;
20856 return EvaluateAsRValue(Result&: Scratch, Ctx, InConstantContext) &&
20857 HandleConversionToBool(Val: Scratch.Val, Result);
20858}
20859
20860bool Expr::EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx,
20861 SideEffectsKind AllowSideEffects,
20862 bool InConstantContext) const {
20863 assert(!isValueDependent() &&
20864 "Expression evaluator can't be called on a dependent expression.");
20865 ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsInt");
20866 EvalInfo Info(Ctx, Result, EvaluationMode::IgnoreSideEffects);
20867 Info.InConstantContext = InConstantContext;
20868 return ::EvaluateAsInt(E: this, ExprResult&: Result, Ctx, AllowSideEffects, Info);
20869}
20870
20871bool Expr::EvaluateAsFixedPoint(EvalResult &Result, const ASTContext &Ctx,
20872 SideEffectsKind AllowSideEffects,
20873 bool InConstantContext) const {
20874 assert(!isValueDependent() &&
20875 "Expression evaluator can't be called on a dependent expression.");
20876 ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsFixedPoint");
20877 EvalInfo Info(Ctx, Result, EvaluationMode::IgnoreSideEffects);
20878 Info.InConstantContext = InConstantContext;
20879 return ::EvaluateAsFixedPoint(E: this, ExprResult&: Result, Ctx, AllowSideEffects, Info);
20880}
20881
20882bool Expr::EvaluateAsFloat(APFloat &Result, const ASTContext &Ctx,
20883 SideEffectsKind AllowSideEffects,
20884 bool InConstantContext) const {
20885 assert(!isValueDependent() &&
20886 "Expression evaluator can't be called on a dependent expression.");
20887
20888 if (!getType()->isRealFloatingType())
20889 return false;
20890
20891 ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsFloat");
20892 EvalResult ExprResult;
20893 if (!EvaluateAsRValue(Result&: ExprResult, Ctx, InConstantContext) ||
20894 !ExprResult.Val.isFloat() ||
20895 hasUnacceptableSideEffect(Result&: ExprResult, SEK: AllowSideEffects))
20896 return false;
20897
20898 Result = ExprResult.Val.getFloat();
20899 return true;
20900}
20901
20902bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx,
20903 bool InConstantContext) const {
20904 assert(!isValueDependent() &&
20905 "Expression evaluator can't be called on a dependent expression.");
20906
20907 ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsLValue");
20908 EvalInfo Info(Ctx, Result, EvaluationMode::ConstantFold);
20909 Info.InConstantContext = InConstantContext;
20910 LValue LV;
20911 CheckedTemporaries CheckedTemps;
20912
20913 if (Info.EnableNewConstInterp) {
20914 if (!Info.Ctx.getInterpContext().evaluate(Parent&: Info, E: this, Result&: Result.Val,
20915 Kind: ConstantExprKind::Normal))
20916 return false;
20917
20918 LV.setFrom(Ctx, V: Result.Val);
20919 return CheckLValueConstantExpression(
20920 Info, Loc: getExprLoc(), Type: Ctx.getLValueReferenceType(T: getType()), LVal: LV,
20921 Kind: ConstantExprKind::Normal, CheckedTemps);
20922 }
20923
20924 if (!EvaluateLValue(E: this, Result&: LV, Info) || !Info.discardCleanups() ||
20925 Result.HasSideEffects ||
20926 !CheckLValueConstantExpression(Info, Loc: getExprLoc(),
20927 Type: Ctx.getLValueReferenceType(T: getType()), LVal: LV,
20928 Kind: ConstantExprKind::Normal, CheckedTemps))
20929 return false;
20930
20931 LV.moveInto(V&: Result.Val);
20932 return true;
20933}
20934
20935static bool EvaluateDestruction(const ASTContext &Ctx, APValue::LValueBase Base,
20936 APValue DestroyedValue, QualType Type,
20937 SourceLocation Loc, Expr::EvalStatus &EStatus,
20938 bool IsConstantDestruction) {
20939 EvalInfo Info(Ctx, EStatus,
20940 IsConstantDestruction ? EvaluationMode::ConstantExpression
20941 : EvaluationMode::ConstantFold);
20942 Info.setEvaluatingDecl(Base, Value&: DestroyedValue,
20943 EDK: EvalInfo::EvaluatingDeclKind::Dtor);
20944 Info.InConstantContext = IsConstantDestruction;
20945
20946 LValue LVal;
20947 LVal.set(B: Base);
20948
20949 if (!HandleDestruction(Info, Loc, LVBase: Base, Value&: DestroyedValue, T: Type) ||
20950 EStatus.HasSideEffects)
20951 return false;
20952
20953 if (!Info.discardCleanups())
20954 llvm_unreachable("Unhandled cleanup; missing full expression marker?");
20955
20956 return true;
20957}
20958
20959bool Expr::EvaluateAsConstantExpr(EvalResult &Result, const ASTContext &Ctx,
20960 ConstantExprKind Kind) const {
20961 assert(!isValueDependent() &&
20962 "Expression evaluator can't be called on a dependent expression.");
20963 bool IsConst;
20964 if (FastEvaluateAsRValue(Exp: this, Result&: Result.Val, Ctx, IsConst) &&
20965 Result.Val.hasValue())
20966 return true;
20967
20968 ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsConstantExpr");
20969 EvaluationMode EM = EvaluationMode::ConstantExpression;
20970 EvalInfo Info(Ctx, Result, EM);
20971 Info.InConstantContext = true;
20972
20973 if (Info.EnableNewConstInterp) {
20974 if (!Info.Ctx.getInterpContext().evaluate(Parent&: Info, E: this, Result&: Result.Val, Kind))
20975 return false;
20976 return CheckConstantExpression(Info, DiagLoc: getExprLoc(),
20977 Type: getStorageType(Ctx, E: this), Value: Result.Val, Kind);
20978 }
20979
20980 // The type of the object we're initializing is 'const T' for a class NTTP.
20981 QualType T = getType();
20982 if (Kind == ConstantExprKind::ClassTemplateArgument)
20983 T.addConst();
20984
20985 // If we're evaluating a prvalue, fake up a MaterializeTemporaryExpr to
20986 // represent the result of the evaluation. CheckConstantExpression ensures
20987 // this doesn't escape.
20988 MaterializeTemporaryExpr BaseMTE(T, const_cast<Expr*>(this), true);
20989 APValue::LValueBase Base(&BaseMTE);
20990 Info.setEvaluatingDecl(Base, Value&: Result.Val);
20991
20992 LValue LVal;
20993 LVal.set(B: Base);
20994 // C++23 [intro.execution]/p5
20995 // A full-expression is [...] a constant-expression
20996 // So we need to make sure temporary objects are destroyed after having
20997 // evaluating the expression (per C++23 [class.temporary]/p4).
20998 FullExpressionRAII Scope(Info);
20999 if (!::EvaluateInPlace(Result&: Result.Val, Info, This: LVal, E: this) ||
21000 Result.HasSideEffects || !Scope.destroy())
21001 return false;
21002
21003 if (!Info.discardCleanups())
21004 llvm_unreachable("Unhandled cleanup; missing full expression marker?");
21005
21006 if (!CheckConstantExpression(Info, DiagLoc: getExprLoc(), Type: getStorageType(Ctx, E: this),
21007 Value: Result.Val, Kind))
21008 return false;
21009 if (!CheckMemoryLeaks(Info))
21010 return false;
21011
21012 // If this is a class template argument, it's required to have constant
21013 // destruction too.
21014 if (Kind == ConstantExprKind::ClassTemplateArgument &&
21015 (!EvaluateDestruction(Ctx, Base, DestroyedValue: Result.Val, Type: T, Loc: getBeginLoc(), EStatus&: Result,
21016 IsConstantDestruction: true) ||
21017 Result.HasSideEffects)) {
21018 // FIXME: Prefix a note to indicate that the problem is lack of constant
21019 // destruction.
21020 return false;
21021 }
21022
21023 return true;
21024}
21025
21026bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
21027 const VarDecl *VD,
21028 SmallVectorImpl<PartialDiagnosticAt> &Notes,
21029 bool IsConstantInitialization) const {
21030 assert(!isValueDependent() &&
21031 "Expression evaluator can't be called on a dependent expression.");
21032 assert(VD && "Need a valid VarDecl");
21033
21034 llvm::TimeTraceScope TimeScope("EvaluateAsInitializer", [&] {
21035 std::string Name;
21036 llvm::raw_string_ostream OS(Name);
21037 VD->printQualifiedName(OS);
21038 return Name;
21039 });
21040
21041 Expr::EvalStatus EStatus;
21042 EStatus.Diag = &Notes;
21043
21044 EvalInfo Info(Ctx, EStatus,
21045 (IsConstantInitialization &&
21046 (Ctx.getLangOpts().CPlusPlus || Ctx.getLangOpts().C23))
21047 ? EvaluationMode::ConstantExpression
21048 : EvaluationMode::ConstantFold);
21049 Info.setEvaluatingDecl(Base: VD, Value);
21050 Info.InConstantContext = IsConstantInitialization;
21051
21052 SourceLocation DeclLoc = VD->getLocation();
21053 QualType DeclTy = VD->getType();
21054
21055 if (Info.EnableNewConstInterp) {
21056 auto &InterpCtx = Ctx.getInterpContext();
21057 if (!InterpCtx.evaluateAsInitializer(Parent&: Info, VD, Init: this, Result&: Value))
21058 return false;
21059
21060 return CheckConstantExpression(Info, DiagLoc: DeclLoc, Type: DeclTy, Value,
21061 Kind: ConstantExprKind::Normal);
21062 } else {
21063 LValue LVal;
21064 LVal.set(B: VD);
21065
21066 {
21067 // C++23 [intro.execution]/p5
21068 // A full-expression is ... an init-declarator ([dcl.decl]) or a
21069 // mem-initializer.
21070 // So we need to make sure temporary objects are destroyed after having
21071 // evaluated the expression (per C++23 [class.temporary]/p4).
21072 //
21073 // FIXME: Otherwise this may break test/Modules/pr68702.cpp because the
21074 // serialization code calls ParmVarDecl::getDefaultArg() which strips the
21075 // outermost FullExpr, such as ExprWithCleanups.
21076 FullExpressionRAII Scope(Info);
21077 if (!EvaluateInPlace(Result&: Value, Info, This: LVal, E: this,
21078 /*AllowNonLiteralTypes=*/true) ||
21079 EStatus.HasSideEffects)
21080 return false;
21081 }
21082
21083 // At this point, any lifetime-extended temporaries are completely
21084 // initialized.
21085 Info.performLifetimeExtension();
21086
21087 if (!Info.discardCleanups())
21088 llvm_unreachable("Unhandled cleanup; missing full expression marker?");
21089 }
21090
21091 return CheckConstantExpression(Info, DiagLoc: DeclLoc, Type: DeclTy, Value,
21092 Kind: ConstantExprKind::Normal) &&
21093 CheckMemoryLeaks(Info);
21094}
21095
21096bool VarDecl::evaluateDestruction(
21097 SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
21098 Expr::EvalStatus EStatus;
21099 EStatus.Diag = &Notes;
21100
21101 // Only treat the destruction as constant destruction if we formally have
21102 // constant initialization (or are usable in a constant expression).
21103 bool IsConstantDestruction = hasConstantInitialization();
21104
21105 // Make a copy of the value for the destructor to mutate, if we know it.
21106 // Otherwise, treat the value as default-initialized; if the destructor works
21107 // anyway, then the destruction is constant (and must be essentially empty).
21108 APValue DestroyedValue;
21109 if (getEvaluatedValue() && !getEvaluatedValue()->isAbsent())
21110 DestroyedValue = *getEvaluatedValue();
21111 else if (!handleDefaultInitValue(T: getType(), Result&: DestroyedValue))
21112 return false;
21113
21114 if (!EvaluateDestruction(Ctx: getASTContext(), Base: this, DestroyedValue: std::move(DestroyedValue),
21115 Type: getType(), Loc: getLocation(), EStatus,
21116 IsConstantDestruction) ||
21117 EStatus.HasSideEffects)
21118 return false;
21119
21120 ensureEvaluatedStmt()->HasConstantDestruction = true;
21121 return true;
21122}
21123
21124/// isEvaluatable - Call EvaluateAsRValue to see if this expression can be
21125/// constant folded, but discard the result.
21126bool Expr::isEvaluatable(const ASTContext &Ctx, SideEffectsKind SEK) const {
21127 assert(!isValueDependent() &&
21128 "Expression evaluator can't be called on a dependent expression.");
21129
21130 EvalResult Result;
21131 return EvaluateAsRValue(Result, Ctx, /* in constant context */ InConstantContext: true) &&
21132 !hasUnacceptableSideEffect(Result, SEK);
21133}
21134
21135APSInt Expr::EvaluateKnownConstInt(const ASTContext &Ctx) const {
21136 assert(!isValueDependent() &&
21137 "Expression evaluator can't be called on a dependent expression.");
21138
21139 ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateKnownConstInt");
21140 EvalResult EVResult;
21141 EvalInfo Info(Ctx, EVResult, EvaluationMode::IgnoreSideEffects);
21142 Info.InConstantContext = true;
21143
21144 bool Result = ::EvaluateAsRValue(E: this, Result&: EVResult, Ctx, Info);
21145 (void)Result;
21146 assert(Result && "Could not evaluate expression");
21147 assert(EVResult.Val.isInt() && "Expression did not evaluate to integer");
21148
21149 return EVResult.Val.getInt();
21150}
21151
21152APSInt Expr::EvaluateKnownConstIntCheckOverflow(
21153 const ASTContext &Ctx, SmallVectorImpl<PartialDiagnosticAt> *Diag) const {
21154 assert(!isValueDependent() &&
21155 "Expression evaluator can't be called on a dependent expression.");
21156
21157 ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateKnownConstIntCheckOverflow");
21158 EvalResult EVResult;
21159 EVResult.Diag = Diag;
21160 EvalInfo Info(Ctx, EVResult, EvaluationMode::IgnoreSideEffects);
21161 Info.InConstantContext = true;
21162 Info.CheckingForUndefinedBehavior = true;
21163
21164 bool Result = ::EvaluateAsRValue(Info, E: this, Result&: EVResult.Val);
21165 (void)Result;
21166 assert(Result && "Could not evaluate expression");
21167 assert(EVResult.Val.isInt() && "Expression did not evaluate to integer");
21168
21169 return EVResult.Val.getInt();
21170}
21171
21172void Expr::EvaluateForOverflow(const ASTContext &Ctx) const {
21173 assert(!isValueDependent() &&
21174 "Expression evaluator can't be called on a dependent expression.");
21175
21176 ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateForOverflow");
21177 bool IsConst;
21178 EvalResult EVResult;
21179 if (!FastEvaluateAsRValue(Exp: this, Result&: EVResult.Val, Ctx, IsConst)) {
21180 EvalInfo Info(Ctx, EVResult, EvaluationMode::IgnoreSideEffects);
21181 Info.CheckingForUndefinedBehavior = true;
21182 (void)::EvaluateAsRValue(Info, E: this, Result&: EVResult.Val);
21183 }
21184}
21185
21186bool Expr::EvalResult::isGlobalLValue() const {
21187 assert(Val.isLValue());
21188 return IsGlobalLValue(B: Val.getLValueBase());
21189}
21190
21191/// isIntegerConstantExpr - this recursive routine will test if an expression is
21192/// an integer constant expression.
21193
21194/// FIXME: Pass up a reason why! Invalid operation in i-c-e, division by zero,
21195/// comma, etc
21196
21197// CheckICE - This function does the fundamental ICE checking: the returned
21198// ICEDiag contains an ICEKind indicating whether the expression is an ICE.
21199//
21200// Note that to reduce code duplication, this helper does no evaluation
21201// itself; the caller checks whether the expression is evaluatable, and
21202// in the rare cases where CheckICE actually cares about the evaluated
21203// value, it calls into Evaluate.
21204
21205namespace {
21206
21207enum ICEKind {
21208 /// This expression is an ICE.
21209 IK_ICE,
21210 /// This expression is not an ICE, but if it isn't evaluated, it's
21211 /// a legal subexpression for an ICE. This return value is used to handle
21212 /// the comma operator in C99 mode, and non-constant subexpressions.
21213 IK_ICEIfUnevaluated,
21214 /// This expression is not an ICE, and is not a legal subexpression for one.
21215 IK_NotICE
21216};
21217
21218struct ICEDiag {
21219 ICEKind Kind;
21220 SourceLocation Loc;
21221
21222 ICEDiag(ICEKind IK, SourceLocation l) : Kind(IK), Loc(l) {}
21223};
21224
21225}
21226
21227static ICEDiag NoDiag() { return ICEDiag(IK_ICE, SourceLocation()); }
21228
21229static ICEDiag Worst(ICEDiag A, ICEDiag B) { return A.Kind >= B.Kind ? A : B; }
21230
21231static ICEDiag CheckEvalInICE(const Expr* E, const ASTContext &Ctx) {
21232 Expr::EvalResult EVResult;
21233 Expr::EvalStatus Status;
21234 EvalInfo Info(Ctx, Status, EvaluationMode::ConstantExpression);
21235
21236 Info.InConstantContext = true;
21237 if (!::EvaluateAsRValue(E, Result&: EVResult, Ctx, Info) || EVResult.HasSideEffects ||
21238 !EVResult.Val.isInt())
21239 return ICEDiag(IK_NotICE, E->getBeginLoc());
21240
21241 return NoDiag();
21242}
21243
21244static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
21245 assert(!E->isValueDependent() && "Should not see value dependent exprs!");
21246 if (!E->getType()->isIntegralOrEnumerationType())
21247 return ICEDiag(IK_NotICE, E->getBeginLoc());
21248
21249 switch (E->getStmtClass()) {
21250#define ABSTRACT_STMT(Node)
21251#define STMT(Node, Base) case Expr::Node##Class:
21252#define EXPR(Node, Base)
21253#include "clang/AST/StmtNodes.inc"
21254 case Expr::PredefinedExprClass:
21255 case Expr::FloatingLiteralClass:
21256 case Expr::ImaginaryLiteralClass:
21257 case Expr::StringLiteralClass:
21258 case Expr::ArraySubscriptExprClass:
21259 case Expr::MatrixSingleSubscriptExprClass:
21260 case Expr::MatrixSubscriptExprClass:
21261 case Expr::ArraySectionExprClass:
21262 case Expr::OMPArrayShapingExprClass:
21263 case Expr::OMPIteratorExprClass:
21264 case Expr::CompoundAssignOperatorClass:
21265 case Expr::CompoundLiteralExprClass:
21266 case Expr::ExtVectorElementExprClass:
21267 case Expr::MatrixElementExprClass:
21268 case Expr::DesignatedInitExprClass:
21269 case Expr::ArrayInitLoopExprClass:
21270 case Expr::ArrayInitIndexExprClass:
21271 case Expr::NoInitExprClass:
21272 case Expr::DesignatedInitUpdateExprClass:
21273 case Expr::ImplicitValueInitExprClass:
21274 case Expr::ParenListExprClass:
21275 case Expr::VAArgExprClass:
21276 case Expr::AddrLabelExprClass:
21277 case Expr::StmtExprClass:
21278 case Expr::CXXMemberCallExprClass:
21279 case Expr::CUDAKernelCallExprClass:
21280 case Expr::CXXAddrspaceCastExprClass:
21281 case Expr::CXXDynamicCastExprClass:
21282 case Expr::CXXTypeidExprClass:
21283 case Expr::CXXUuidofExprClass:
21284 case Expr::MSPropertyRefExprClass:
21285 case Expr::MSPropertySubscriptExprClass:
21286 case Expr::CXXNullPtrLiteralExprClass:
21287 case Expr::UserDefinedLiteralClass:
21288 case Expr::CXXThisExprClass:
21289 case Expr::CXXThrowExprClass:
21290 case Expr::CXXNewExprClass:
21291 case Expr::CXXDeleteExprClass:
21292 case Expr::CXXPseudoDestructorExprClass:
21293 case Expr::UnresolvedLookupExprClass:
21294 case Expr::RecoveryExprClass:
21295 case Expr::DependentScopeDeclRefExprClass:
21296 case Expr::CXXConstructExprClass:
21297 case Expr::CXXInheritedCtorInitExprClass:
21298 case Expr::CXXStdInitializerListExprClass:
21299 case Expr::CXXBindTemporaryExprClass:
21300 case Expr::ExprWithCleanupsClass:
21301 case Expr::CXXTemporaryObjectExprClass:
21302 case Expr::CXXUnresolvedConstructExprClass:
21303 case Expr::CXXDependentScopeMemberExprClass:
21304 case Expr::UnresolvedMemberExprClass:
21305 case Expr::ObjCStringLiteralClass:
21306 case Expr::ObjCBoxedExprClass:
21307 case Expr::ObjCArrayLiteralClass:
21308 case Expr::ObjCDictionaryLiteralClass:
21309 case Expr::ObjCEncodeExprClass:
21310 case Expr::ObjCMessageExprClass:
21311 case Expr::ObjCSelectorExprClass:
21312 case Expr::ObjCProtocolExprClass:
21313 case Expr::ObjCIvarRefExprClass:
21314 case Expr::ObjCPropertyRefExprClass:
21315 case Expr::ObjCSubscriptRefExprClass:
21316 case Expr::ObjCIsaExprClass:
21317 case Expr::ObjCAvailabilityCheckExprClass:
21318 case Expr::ShuffleVectorExprClass:
21319 case Expr::ConvertVectorExprClass:
21320 case Expr::BlockExprClass:
21321 case Expr::NoStmtClass:
21322 case Expr::OpaqueValueExprClass:
21323 case Expr::PackExpansionExprClass:
21324 case Expr::SubstNonTypeTemplateParmPackExprClass:
21325 case Expr::FunctionParmPackExprClass:
21326 case Expr::AsTypeExprClass:
21327 case Expr::ObjCIndirectCopyRestoreExprClass:
21328 case Expr::MaterializeTemporaryExprClass:
21329 case Expr::PseudoObjectExprClass:
21330 case Expr::AtomicExprClass:
21331 case Expr::LambdaExprClass:
21332 case Expr::CXXFoldExprClass:
21333 case Expr::CoawaitExprClass:
21334 case Expr::DependentCoawaitExprClass:
21335 case Expr::CoyieldExprClass:
21336 case Expr::SYCLUniqueStableNameExprClass:
21337 case Expr::CXXParenListInitExprClass:
21338 case Expr::HLSLOutArgExprClass:
21339 return ICEDiag(IK_NotICE, E->getBeginLoc());
21340
21341 case Expr::MemberExprClass: {
21342 if (Ctx.getLangOpts().C23) {
21343 const Expr *ME = E->IgnoreParenImpCasts();
21344 while (const auto *M = dyn_cast<MemberExpr>(Val: ME)) {
21345 if (M->isArrow())
21346 return ICEDiag(IK_NotICE, E->getBeginLoc());
21347 ME = M->getBase()->IgnoreParenImpCasts();
21348 }
21349 const auto *DRE = dyn_cast<DeclRefExpr>(Val: ME);
21350 if (DRE) {
21351 if (const auto *VD = dyn_cast<VarDecl>(Val: DRE->getDecl());
21352 VD && VD->isConstexpr())
21353 return CheckEvalInICE(E, Ctx);
21354 }
21355 }
21356 return ICEDiag(IK_NotICE, E->getBeginLoc());
21357 }
21358
21359 case Expr::InitListExprClass: {
21360 // C++03 [dcl.init]p13: If T is a scalar type, then a declaration of the
21361 // form "T x = { a };" is equivalent to "T x = a;".
21362 // Unless we're initializing a reference, T is a scalar as it is known to be
21363 // of integral or enumeration type.
21364 if (E->isPRValue())
21365 if (cast<InitListExpr>(Val: E)->getNumInits() == 1)
21366 return CheckICE(E: cast<InitListExpr>(Val: E)->getInit(Init: 0), Ctx);
21367 return ICEDiag(IK_NotICE, E->getBeginLoc());
21368 }
21369
21370 case Expr::SizeOfPackExprClass:
21371 case Expr::GNUNullExprClass:
21372 case Expr::SourceLocExprClass:
21373 case Expr::EmbedExprClass:
21374 case Expr::OpenACCAsteriskSizeExprClass:
21375 return NoDiag();
21376
21377 case Expr::PackIndexingExprClass:
21378 return CheckICE(E: cast<PackIndexingExpr>(Val: E)->getSelectedExpr(), Ctx);
21379
21380 case Expr::SubstNonTypeTemplateParmExprClass:
21381 return
21382 CheckICE(E: cast<SubstNonTypeTemplateParmExpr>(Val: E)->getReplacement(), Ctx);
21383
21384 case Expr::ConstantExprClass:
21385 return CheckICE(E: cast<ConstantExpr>(Val: E)->getSubExpr(), Ctx);
21386
21387 case Expr::ParenExprClass:
21388 return CheckICE(E: cast<ParenExpr>(Val: E)->getSubExpr(), Ctx);
21389 case Expr::GenericSelectionExprClass:
21390 return CheckICE(E: cast<GenericSelectionExpr>(Val: E)->getResultExpr(), Ctx);
21391 case Expr::IntegerLiteralClass:
21392 case Expr::FixedPointLiteralClass:
21393 case Expr::CharacterLiteralClass:
21394 case Expr::ObjCBoolLiteralExprClass:
21395 case Expr::CXXBoolLiteralExprClass:
21396 case Expr::CXXScalarValueInitExprClass:
21397 case Expr::TypeTraitExprClass:
21398 case Expr::ConceptSpecializationExprClass:
21399 case Expr::RequiresExprClass:
21400 case Expr::ArrayTypeTraitExprClass:
21401 case Expr::ExpressionTraitExprClass:
21402 case Expr::CXXNoexceptExprClass:
21403 case Expr::CXXReflectExprClass:
21404 return NoDiag();
21405 case Expr::CallExprClass:
21406 case Expr::CXXOperatorCallExprClass: {
21407 // C99 6.6/3 allows function calls within unevaluated subexpressions of
21408 // constant expressions, but they can never be ICEs because an ICE cannot
21409 // contain an operand of (pointer to) function type.
21410 const CallExpr *CE = cast<CallExpr>(Val: E);
21411 if (CE->getBuiltinCallee())
21412 return CheckEvalInICE(E, Ctx);
21413 return ICEDiag(IK_NotICE, E->getBeginLoc());
21414 }
21415 case Expr::CXXRewrittenBinaryOperatorClass:
21416 return CheckICE(E: cast<CXXRewrittenBinaryOperator>(Val: E)->getSemanticForm(),
21417 Ctx);
21418 case Expr::DeclRefExprClass: {
21419 const NamedDecl *D = cast<DeclRefExpr>(Val: E)->getDecl();
21420 if (isa<EnumConstantDecl>(Val: D))
21421 return NoDiag();
21422
21423 // C++ and OpenCL (FIXME: spec reference?) allow reading const-qualified
21424 // integer variables in constant expressions:
21425 //
21426 // C++ 7.1.5.1p2
21427 // A variable of non-volatile const-qualified integral or enumeration
21428 // type initialized by an ICE can be used in ICEs.
21429 //
21430 // We sometimes use CheckICE to check the C++98 rules in C++11 mode. In
21431 // that mode, use of reference variables should not be allowed.
21432 const VarDecl *VD = dyn_cast<VarDecl>(Val: D);
21433 if (VD && VD->isUsableInConstantExpressions(C: Ctx) &&
21434 !VD->getType()->isReferenceType())
21435 return NoDiag();
21436
21437 return ICEDiag(IK_NotICE, E->getBeginLoc());
21438 }
21439 case Expr::UnaryOperatorClass: {
21440 const UnaryOperator *Exp = cast<UnaryOperator>(Val: E);
21441 switch (Exp->getOpcode()) {
21442 case UO_PostInc:
21443 case UO_PostDec:
21444 case UO_PreInc:
21445 case UO_PreDec:
21446 case UO_AddrOf:
21447 case UO_Deref:
21448 case UO_Coawait:
21449 // C99 6.6/3 allows increment and decrement within unevaluated
21450 // subexpressions of constant expressions, but they can never be ICEs
21451 // because an ICE cannot contain an lvalue operand.
21452 return ICEDiag(IK_NotICE, E->getBeginLoc());
21453 case UO_Extension:
21454 case UO_LNot:
21455 case UO_Plus:
21456 case UO_Minus:
21457 case UO_Not:
21458 case UO_Real:
21459 case UO_Imag:
21460 return CheckICE(E: Exp->getSubExpr(), Ctx);
21461 }
21462 llvm_unreachable("invalid unary operator class");
21463 }
21464 case Expr::OffsetOfExprClass: {
21465 // Note that per C99, offsetof must be an ICE. And AFAIK, using
21466 // EvaluateAsRValue matches the proposed gcc behavior for cases like
21467 // "offsetof(struct s{int x[4];}, x[1.0])". This doesn't affect
21468 // compliance: we should warn earlier for offsetof expressions with
21469 // array subscripts that aren't ICEs, and if the array subscripts
21470 // are ICEs, the value of the offsetof must be an integer constant.
21471 return CheckEvalInICE(E, Ctx);
21472 }
21473 case Expr::UnaryExprOrTypeTraitExprClass: {
21474 const UnaryExprOrTypeTraitExpr *Exp = cast<UnaryExprOrTypeTraitExpr>(Val: E);
21475 if ((Exp->getKind() == UETT_SizeOf) &&
21476 Exp->getTypeOfArgument()->isVariableArrayType())
21477 return ICEDiag(IK_NotICE, E->getBeginLoc());
21478 if (Exp->getKind() == UETT_CountOf) {
21479 QualType ArgTy = Exp->getTypeOfArgument();
21480 if (ArgTy->isVariableArrayType()) {
21481 // We need to look whether the array is multidimensional. If it is,
21482 // then we want to check the size expression manually to see whether
21483 // it is an ICE or not.
21484 const auto *VAT = Ctx.getAsVariableArrayType(T: ArgTy);
21485 if (VAT->getElementType()->isArrayType())
21486 // Variable array size expression could be missing (e.g. int a[*][10])
21487 // In that case, it can't be a constant expression.
21488 return VAT->getSizeExpr() ? CheckICE(E: VAT->getSizeExpr(), Ctx)
21489 : ICEDiag(IK_NotICE, E->getBeginLoc());
21490
21491 // Otherwise, this is a regular VLA, which is definitely not an ICE.
21492 return ICEDiag(IK_NotICE, E->getBeginLoc());
21493 }
21494 }
21495 return NoDiag();
21496 }
21497 case Expr::BinaryOperatorClass: {
21498 const BinaryOperator *Exp = cast<BinaryOperator>(Val: E);
21499 switch (Exp->getOpcode()) {
21500 case BO_PtrMemD:
21501 case BO_PtrMemI:
21502 case BO_Assign:
21503 case BO_MulAssign:
21504 case BO_DivAssign:
21505 case BO_RemAssign:
21506 case BO_AddAssign:
21507 case BO_SubAssign:
21508 case BO_ShlAssign:
21509 case BO_ShrAssign:
21510 case BO_AndAssign:
21511 case BO_XorAssign:
21512 case BO_OrAssign:
21513 // C99 6.6/3 allows assignments within unevaluated subexpressions of
21514 // constant expressions, but they can never be ICEs because an ICE cannot
21515 // contain an lvalue operand.
21516 return ICEDiag(IK_NotICE, E->getBeginLoc());
21517
21518 case BO_Mul:
21519 case BO_Div:
21520 case BO_Rem:
21521 case BO_Add:
21522 case BO_Sub:
21523 case BO_Shl:
21524 case BO_Shr:
21525 case BO_LT:
21526 case BO_GT:
21527 case BO_LE:
21528 case BO_GE:
21529 case BO_EQ:
21530 case BO_NE:
21531 case BO_And:
21532 case BO_Xor:
21533 case BO_Or:
21534 case BO_Comma:
21535 case BO_Cmp: {
21536 ICEDiag LHSResult = CheckICE(E: Exp->getLHS(), Ctx);
21537 ICEDiag RHSResult = CheckICE(E: Exp->getRHS(), Ctx);
21538 if (Exp->getOpcode() == BO_Div ||
21539 Exp->getOpcode() == BO_Rem) {
21540 // EvaluateAsRValue gives an error for undefined Div/Rem, so make sure
21541 // we don't evaluate one.
21542 if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICE) {
21543 llvm::APSInt REval = Exp->getRHS()->EvaluateKnownConstInt(Ctx);
21544 if (REval == 0)
21545 return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc());
21546 if (REval.isSigned() && REval.isAllOnes()) {
21547 llvm::APSInt LEval = Exp->getLHS()->EvaluateKnownConstInt(Ctx);
21548 if (LEval.isMinSignedValue())
21549 return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc());
21550 }
21551 }
21552 }
21553 if (Exp->getOpcode() == BO_Comma) {
21554 if (Ctx.getLangOpts().C99) {
21555 // C99 6.6p3 introduces a strange edge case: comma can be in an ICE
21556 // if it isn't evaluated.
21557 if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICE)
21558 return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc());
21559 } else {
21560 // In both C89 and C++, commas in ICEs are illegal.
21561 return ICEDiag(IK_NotICE, E->getBeginLoc());
21562 }
21563 }
21564 return Worst(A: LHSResult, B: RHSResult);
21565 }
21566 case BO_LAnd:
21567 case BO_LOr: {
21568 ICEDiag LHSResult = CheckICE(E: Exp->getLHS(), Ctx);
21569 ICEDiag RHSResult = CheckICE(E: Exp->getRHS(), Ctx);
21570 if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICEIfUnevaluated) {
21571 // Rare case where the RHS has a comma "side-effect"; we need
21572 // to actually check the condition to see whether the side
21573 // with the comma is evaluated.
21574 if ((Exp->getOpcode() == BO_LAnd) !=
21575 (Exp->getLHS()->EvaluateKnownConstInt(Ctx) == 0))
21576 return RHSResult;
21577 return NoDiag();
21578 }
21579
21580 return Worst(A: LHSResult, B: RHSResult);
21581 }
21582 }
21583 llvm_unreachable("invalid binary operator kind");
21584 }
21585 case Expr::ImplicitCastExprClass:
21586 case Expr::CStyleCastExprClass:
21587 case Expr::CXXFunctionalCastExprClass:
21588 case Expr::CXXStaticCastExprClass:
21589 case Expr::CXXReinterpretCastExprClass:
21590 case Expr::CXXConstCastExprClass:
21591 case Expr::ObjCBridgedCastExprClass: {
21592 const Expr *SubExpr = cast<CastExpr>(Val: E)->getSubExpr();
21593 if (isa<ExplicitCastExpr>(Val: E)) {
21594 if (const FloatingLiteral *FL
21595 = dyn_cast<FloatingLiteral>(Val: SubExpr->IgnoreParenImpCasts())) {
21596 unsigned DestWidth = Ctx.getIntWidth(T: E->getType());
21597 bool DestSigned = E->getType()->isSignedIntegerOrEnumerationType();
21598 APSInt IgnoredVal(DestWidth, !DestSigned);
21599 bool Ignored;
21600 // If the value does not fit in the destination type, the behavior is
21601 // undefined, so we are not required to treat it as a constant
21602 // expression.
21603 if (FL->getValue().convertToInteger(Result&: IgnoredVal,
21604 RM: llvm::APFloat::rmTowardZero,
21605 IsExact: &Ignored) & APFloat::opInvalidOp)
21606 return ICEDiag(IK_NotICE, E->getBeginLoc());
21607 return NoDiag();
21608 }
21609 }
21610 switch (cast<CastExpr>(Val: E)->getCastKind()) {
21611 case CK_LValueToRValue:
21612 case CK_AtomicToNonAtomic:
21613 case CK_NonAtomicToAtomic:
21614 case CK_NoOp:
21615 case CK_IntegralToBoolean:
21616 case CK_IntegralCast:
21617 return CheckICE(E: SubExpr, Ctx);
21618 default:
21619 return ICEDiag(IK_NotICE, E->getBeginLoc());
21620 }
21621 }
21622 case Expr::BinaryConditionalOperatorClass: {
21623 const BinaryConditionalOperator *Exp = cast<BinaryConditionalOperator>(Val: E);
21624 ICEDiag CommonResult = CheckICE(E: Exp->getCommon(), Ctx);
21625 if (CommonResult.Kind == IK_NotICE) return CommonResult;
21626 ICEDiag FalseResult = CheckICE(E: Exp->getFalseExpr(), Ctx);
21627 if (FalseResult.Kind == IK_NotICE) return FalseResult;
21628 if (CommonResult.Kind == IK_ICEIfUnevaluated) return CommonResult;
21629 if (FalseResult.Kind == IK_ICEIfUnevaluated &&
21630 Exp->getCommon()->EvaluateKnownConstInt(Ctx) != 0) return NoDiag();
21631 return FalseResult;
21632 }
21633 case Expr::ConditionalOperatorClass: {
21634 const ConditionalOperator *Exp = cast<ConditionalOperator>(Val: E);
21635 // If the condition (ignoring parens) is a __builtin_constant_p call,
21636 // then only the true side is actually considered in an integer constant
21637 // expression, and it is fully evaluated. This is an important GNU
21638 // extension. See GCC PR38377 for discussion.
21639 if (const CallExpr *CallCE
21640 = dyn_cast<CallExpr>(Val: Exp->getCond()->IgnoreParenCasts()))
21641 if (CallCE->getBuiltinCallee() == Builtin::BI__builtin_constant_p)
21642 return CheckEvalInICE(E, Ctx);
21643 ICEDiag CondResult = CheckICE(E: Exp->getCond(), Ctx);
21644 if (CondResult.Kind == IK_NotICE)
21645 return CondResult;
21646
21647 ICEDiag TrueResult = CheckICE(E: Exp->getTrueExpr(), Ctx);
21648 ICEDiag FalseResult = CheckICE(E: Exp->getFalseExpr(), Ctx);
21649
21650 if (TrueResult.Kind == IK_NotICE)
21651 return TrueResult;
21652 if (FalseResult.Kind == IK_NotICE)
21653 return FalseResult;
21654 if (CondResult.Kind == IK_ICEIfUnevaluated)
21655 return CondResult;
21656 if (TrueResult.Kind == IK_ICE && FalseResult.Kind == IK_ICE)
21657 return NoDiag();
21658 // Rare case where the diagnostics depend on which side is evaluated
21659 // Note that if we get here, CondResult is 0, and at least one of
21660 // TrueResult and FalseResult is non-zero.
21661 if (Exp->getCond()->EvaluateKnownConstInt(Ctx) == 0)
21662 return FalseResult;
21663 return TrueResult;
21664 }
21665 case Expr::CXXDefaultArgExprClass:
21666 return CheckICE(E: cast<CXXDefaultArgExpr>(Val: E)->getExpr(), Ctx);
21667 case Expr::CXXDefaultInitExprClass:
21668 return CheckICE(E: cast<CXXDefaultInitExpr>(Val: E)->getExpr(), Ctx);
21669 case Expr::ChooseExprClass: {
21670 return CheckICE(E: cast<ChooseExpr>(Val: E)->getChosenSubExpr(), Ctx);
21671 }
21672 case Expr::BuiltinBitCastExprClass: {
21673 if (!checkBitCastConstexprEligibility(Info: nullptr, Ctx, BCE: cast<CastExpr>(Val: E)))
21674 return ICEDiag(IK_NotICE, E->getBeginLoc());
21675 return CheckICE(E: cast<CastExpr>(Val: E)->getSubExpr(), Ctx);
21676 }
21677 }
21678
21679 llvm_unreachable("Invalid StmtClass!");
21680}
21681
21682/// Evaluate an expression as a C++11 integral constant expression.
21683static bool EvaluateCPlusPlus11IntegralConstantExpr(const ASTContext &Ctx,
21684 const Expr *E,
21685 llvm::APSInt *Value) {
21686 if (!E->getType()->isIntegralOrUnscopedEnumerationType())
21687 return false;
21688
21689 APValue Result;
21690 if (!E->isCXX11ConstantExpr(Ctx, Result: &Result))
21691 return false;
21692
21693 if (!Result.isInt())
21694 return false;
21695
21696 if (Value) *Value = Result.getInt();
21697 return true;
21698}
21699
21700bool Expr::isIntegerConstantExpr(const ASTContext &Ctx) const {
21701 assert(!isValueDependent() &&
21702 "Expression evaluator can't be called on a dependent expression.");
21703
21704 ExprTimeTraceScope TimeScope(this, Ctx, "isIntegerConstantExpr");
21705
21706 if (Ctx.getLangOpts().CPlusPlus11)
21707 return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, E: this, Value: nullptr);
21708
21709 ICEDiag D = CheckICE(E: this, Ctx);
21710 if (D.Kind != IK_ICE)
21711 return false;
21712 return true;
21713}
21714
21715std::optional<llvm::APSInt>
21716Expr::getIntegerConstantExpr(const ASTContext &Ctx) const {
21717 if (isValueDependent()) {
21718 // Expression evaluator can't succeed on a dependent expression.
21719 return std::nullopt;
21720 }
21721
21722 if (Ctx.getLangOpts().CPlusPlus11) {
21723 APSInt Value;
21724 if (EvaluateCPlusPlus11IntegralConstantExpr(Ctx, E: this, Value: &Value))
21725 return Value;
21726 return std::nullopt;
21727 }
21728
21729 if (!isIntegerConstantExpr(Ctx))
21730 return std::nullopt;
21731
21732 // The only possible side-effects here are due to UB discovered in the
21733 // evaluation (for instance, INT_MAX + 1). In such a case, we are still
21734 // required to treat the expression as an ICE, so we produce the folded
21735 // value.
21736 EvalResult ExprResult;
21737 Expr::EvalStatus Status;
21738 EvalInfo Info(Ctx, Status, EvaluationMode::IgnoreSideEffects);
21739 Info.InConstantContext = true;
21740
21741 if (!::EvaluateAsInt(E: this, ExprResult, Ctx, AllowSideEffects: SE_AllowSideEffects, Info))
21742 llvm_unreachable("ICE cannot be evaluated!");
21743
21744 return ExprResult.Val.getInt();
21745}
21746
21747bool Expr::isCXX98IntegralConstantExpr(const ASTContext &Ctx) const {
21748 assert(!isValueDependent() &&
21749 "Expression evaluator can't be called on a dependent expression.");
21750
21751 return CheckICE(E: this, Ctx).Kind == IK_ICE;
21752}
21753
21754bool Expr::isCXX11ConstantExpr(const ASTContext &Ctx, APValue *Result) const {
21755 assert(!isValueDependent() &&
21756 "Expression evaluator can't be called on a dependent expression.");
21757
21758 // We support this checking in C++98 mode in order to diagnose compatibility
21759 // issues.
21760 assert(Ctx.getLangOpts().CPlusPlus);
21761
21762 bool IsConst;
21763 APValue Scratch;
21764 if (FastEvaluateAsRValue(Exp: this, Result&: Scratch, Ctx, IsConst) && Scratch.hasValue()) {
21765 if (Result)
21766 *Result = std::move(Scratch);
21767 return true;
21768 }
21769
21770 // Build evaluation settings.
21771 Expr::EvalStatus Status;
21772 SmallVector<PartialDiagnosticAt, 8> Diags;
21773 Status.Diag = &Diags;
21774 EvalInfo Info(Ctx, Status, EvaluationMode::ConstantExpression);
21775
21776 bool IsConstExpr =
21777 ::EvaluateAsRValue(Info, E: this, Result&: Result ? *Result : Scratch) &&
21778 // FIXME: We don't produce a diagnostic for this, but the callers that
21779 // call us on arbitrary full-expressions should generally not care.
21780 Info.discardCleanups() && !Status.HasSideEffects;
21781
21782 return IsConstExpr && Diags.empty();
21783}
21784
21785bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx,
21786 const FunctionDecl *Callee,
21787 ArrayRef<const Expr*> Args,
21788 const Expr *This) const {
21789 assert(!isValueDependent() &&
21790 "Expression evaluator can't be called on a dependent expression.");
21791
21792 llvm::TimeTraceScope TimeScope("EvaluateWithSubstitution", [&] {
21793 std::string Name;
21794 llvm::raw_string_ostream OS(Name);
21795 Callee->getNameForDiagnostic(OS, Policy: Ctx.getPrintingPolicy(),
21796 /*Qualified=*/true);
21797 return Name;
21798 });
21799
21800 Expr::EvalStatus Status;
21801 EvalInfo Info(Ctx, Status, EvaluationMode::ConstantExpressionUnevaluated);
21802 Info.InConstantContext = true;
21803
21804 LValue ThisVal;
21805 const LValue *ThisPtr = nullptr;
21806 if (This) {
21807#ifndef NDEBUG
21808 auto *MD = dyn_cast<CXXMethodDecl>(Callee);
21809 assert(MD && "Don't provide `this` for non-methods.");
21810 assert(MD->isImplicitObjectMemberFunction() &&
21811 "Don't provide `this` for methods without an implicit object.");
21812#endif
21813 if (!This->isValueDependent() &&
21814 EvaluateObjectArgument(Info, Object: This, This&: ThisVal) &&
21815 !Info.EvalStatus.HasSideEffects)
21816 ThisPtr = &ThisVal;
21817
21818 // Ignore any side-effects from a failed evaluation. This is safe because
21819 // they can't interfere with any other argument evaluation.
21820 Info.EvalStatus.HasSideEffects = false;
21821 }
21822
21823 CallRef Call = Info.CurrentCall->createCall(Callee);
21824 for (ArrayRef<const Expr*>::iterator I = Args.begin(), E = Args.end();
21825 I != E; ++I) {
21826 unsigned Idx = I - Args.begin();
21827 if (Idx >= Callee->getNumParams())
21828 break;
21829 const ParmVarDecl *PVD = Callee->getParamDecl(i: Idx);
21830 if ((*I)->isValueDependent() ||
21831 !EvaluateCallArg(PVD, Arg: *I, Call, Info) ||
21832 Info.EvalStatus.HasSideEffects) {
21833 // If evaluation fails, throw away the argument entirely.
21834 if (APValue *Slot = Info.getParamSlot(Call, PVD))
21835 *Slot = APValue();
21836 }
21837
21838 // Ignore any side-effects from a failed evaluation. This is safe because
21839 // they can't interfere with any other argument evaluation.
21840 Info.EvalStatus.HasSideEffects = false;
21841 }
21842
21843 // Parameter cleanups happen in the caller and are not part of this
21844 // evaluation.
21845 Info.discardCleanups();
21846 Info.EvalStatus.HasSideEffects = false;
21847
21848 // Build fake call to Callee.
21849 CallStackFrame Frame(Info, Callee->getLocation(), Callee, ThisPtr, This,
21850 Call);
21851 // FIXME: Missing ExprWithCleanups in enable_if conditions?
21852 FullExpressionRAII Scope(Info);
21853 return Evaluate(Result&: Value, Info, E: this) && Scope.destroy() &&
21854 !Info.EvalStatus.HasSideEffects;
21855}
21856
21857bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
21858 SmallVectorImpl<
21859 PartialDiagnosticAt> &Diags) {
21860 // FIXME: It would be useful to check constexpr function templates, but at the
21861 // moment the constant expression evaluator cannot cope with the non-rigorous
21862 // ASTs which we build for dependent expressions.
21863 if (FD->isDependentContext())
21864 return true;
21865
21866 llvm::TimeTraceScope TimeScope("isPotentialConstantExpr", [&] {
21867 std::string Name;
21868 llvm::raw_string_ostream OS(Name);
21869 FD->getNameForDiagnostic(OS, Policy: FD->getASTContext().getPrintingPolicy(),
21870 /*Qualified=*/true);
21871 return Name;
21872 });
21873
21874 Expr::EvalStatus Status;
21875 Status.Diag = &Diags;
21876
21877 EvalInfo Info(FD->getASTContext(), Status,
21878 EvaluationMode::ConstantExpression);
21879 Info.InConstantContext = true;
21880 Info.CheckingPotentialConstantExpression = true;
21881
21882 // The constexpr VM attempts to compile all methods to bytecode here.
21883 if (Info.EnableNewConstInterp) {
21884 Info.Ctx.getInterpContext().isPotentialConstantExpr(Parent&: Info, FD);
21885 return Diags.empty();
21886 }
21887
21888 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: FD);
21889 const CXXRecordDecl *RD = MD ? MD->getParent()->getCanonicalDecl() : nullptr;
21890
21891 // Fabricate an arbitrary expression on the stack and pretend that it
21892 // is a temporary being used as the 'this' pointer.
21893 LValue This;
21894 ImplicitValueInitExpr VIE(RD ? Info.Ctx.getCanonicalTagType(TD: RD)
21895 : Info.Ctx.IntTy);
21896 This.set(B: {&VIE, Info.CurrentCall->Index});
21897
21898 ArrayRef<const Expr*> Args;
21899
21900 APValue Scratch;
21901 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Val: FD)) {
21902 // Evaluate the call as a constant initializer, to allow the construction
21903 // of objects of non-literal types.
21904 Info.setEvaluatingDecl(Base: This.getLValueBase(), Value&: Scratch);
21905 HandleConstructorCall(E: &VIE, This, Args, Definition: CD, Info, Result&: Scratch);
21906 } else {
21907 SourceLocation Loc = FD->getLocation();
21908 HandleFunctionCall(
21909 CallLoc: Loc, Callee: FD, ObjectArg: (MD && MD->isImplicitObjectMemberFunction()) ? &This : nullptr,
21910 E: &VIE, Args, Call: CallRef(), Body: FD->getBody(), Info, Result&: Scratch,
21911 /*ResultSlot=*/nullptr);
21912 }
21913
21914 return Diags.empty();
21915}
21916
21917bool Expr::isPotentialConstantExprUnevaluated(Expr *E,
21918 const FunctionDecl *FD,
21919 SmallVectorImpl<
21920 PartialDiagnosticAt> &Diags) {
21921 assert(!E->isValueDependent() &&
21922 "Expression evaluator can't be called on a dependent expression.");
21923
21924 Expr::EvalStatus Status;
21925 Status.Diag = &Diags;
21926
21927 EvalInfo Info(FD->getASTContext(), Status,
21928 EvaluationMode::ConstantExpressionUnevaluated);
21929 Info.InConstantContext = true;
21930 Info.CheckingPotentialConstantExpression = true;
21931
21932 if (Info.EnableNewConstInterp) {
21933 Info.Ctx.getInterpContext().isPotentialConstantExprUnevaluated(Parent&: Info, E, FD);
21934 return Diags.empty();
21935 }
21936
21937 // Fabricate a call stack frame to give the arguments a plausible cover story.
21938 CallStackFrame Frame(Info, SourceLocation(), FD, /*This=*/nullptr,
21939 /*CallExpr=*/nullptr, CallRef());
21940
21941 APValue ResultScratch;
21942 Evaluate(Result&: ResultScratch, Info, E);
21943 return Diags.empty();
21944}
21945
21946std::optional<uint64_t> Expr::tryEvaluateObjectSize(const ASTContext &Ctx,
21947 unsigned Type) const {
21948 if (!getType()->isPointerType())
21949 return std::nullopt;
21950
21951 Expr::EvalStatus Status;
21952 EvalInfo Info(Ctx, Status, EvaluationMode::ConstantFold);
21953 if (Info.EnableNewConstInterp)
21954 return Info.Ctx.getInterpContext().tryEvaluateObjectSize(Parent&: Info, E: this, Kind: Type);
21955 return tryEvaluateBuiltinObjectSize(E: this, Type, Info);
21956}
21957
21958static std::optional<uint64_t>
21959EvaluateBuiltinStrLen(const Expr *E, EvalInfo &Info,
21960 std::string *StringResult) {
21961 if (!E->getType()->hasPointerRepresentation() || !E->isPRValue())
21962 return std::nullopt;
21963
21964 LValue String;
21965
21966 if (!EvaluatePointer(E, Result&: String, Info))
21967 return std::nullopt;
21968
21969 QualType CharTy = E->getType()->getPointeeType();
21970
21971 // Fast path: if it's a string literal, search the string value.
21972 if (const StringLiteral *S = dyn_cast_or_null<StringLiteral>(
21973 Val: String.getLValueBase().dyn_cast<const Expr *>())) {
21974 StringRef Str = S->getBytes();
21975 int64_t Off = String.Offset.getQuantity();
21976 if (Off >= 0 && (uint64_t)Off <= (uint64_t)Str.size() &&
21977 S->getCharByteWidth() == 1 &&
21978 // FIXME: Add fast-path for wchar_t too.
21979 Info.Ctx.hasSameUnqualifiedType(T1: CharTy, T2: Info.Ctx.CharTy)) {
21980 Str = Str.substr(Start: Off);
21981
21982 StringRef::size_type Pos = Str.find(C: 0);
21983 if (Pos != StringRef::npos)
21984 Str = Str.substr(Start: 0, N: Pos);
21985
21986 if (StringResult)
21987 *StringResult = Str;
21988 return Str.size();
21989 }
21990
21991 // Fall through to slow path.
21992 }
21993
21994 // Slow path: scan the bytes of the string looking for the terminating 0.
21995 for (uint64_t Strlen = 0; /**/; ++Strlen) {
21996 APValue Char;
21997 if (!handleLValueToRValueConversion(Info, Conv: E, Type: CharTy, LVal: String, RVal&: Char) ||
21998 !Char.isInt())
21999 return std::nullopt;
22000 if (!Char.getInt())
22001 return Strlen;
22002 else if (StringResult)
22003 StringResult->push_back(c: Char.getInt().getExtValue());
22004 if (!HandleLValueArrayAdjustment(Info, E, LVal&: String, EltTy: CharTy, Adjustment: 1))
22005 return std::nullopt;
22006 }
22007}
22008
22009std::optional<std::string> Expr::tryEvaluateString(ASTContext &Ctx) const {
22010 Expr::EvalStatus Status;
22011 EvalInfo Info(Ctx, Status, EvaluationMode::ConstantFold);
22012 std::string StringResult;
22013
22014 if (Info.EnableNewConstInterp) {
22015 if (!Info.Ctx.getInterpContext().evaluateString(Parent&: Info, E: this, Result&: StringResult))
22016 return std::nullopt;
22017 return StringResult;
22018 }
22019
22020 if (EvaluateBuiltinStrLen(E: this, Info, StringResult: &StringResult))
22021 return StringResult;
22022 return std::nullopt;
22023}
22024
22025template <typename T>
22026static bool EvaluateCharRangeAsStringImpl(const Expr *, T &Result,
22027 const Expr *SizeExpression,
22028 const Expr *PtrExpression,
22029 ASTContext &Ctx,
22030 Expr::EvalResult &Status) {
22031 EvalInfo Info(Ctx, Status, EvaluationMode::ConstantExpression);
22032 Info.InConstantContext = true;
22033
22034 if (Info.EnableNewConstInterp)
22035 return Info.Ctx.getInterpContext().evaluateCharRange(Info, SizeExpression,
22036 PtrExpression, Result);
22037
22038 LValue String;
22039 FullExpressionRAII Scope(Info);
22040 APSInt SizeValue;
22041 if (!::EvaluateInteger(E: SizeExpression, Result&: SizeValue, Info))
22042 return false;
22043
22044 uint64_t Size = SizeValue.getZExtValue();
22045
22046 // FIXME: better protect against invalid or excessive sizes
22047 if constexpr (std::is_same_v<APValue, T>)
22048 Result = APValue(APValue::UninitArray{}, Size, Size);
22049 else {
22050 if (Size < Result.max_size())
22051 Result.reserve(Size);
22052 }
22053 if (!::EvaluatePointer(E: PtrExpression, Result&: String, Info))
22054 return false;
22055
22056 QualType CharTy = PtrExpression->getType()->getPointeeType();
22057 for (uint64_t I = 0; I < Size; ++I) {
22058 APValue Char;
22059 if (!handleLValueToRValueConversion(Info, Conv: PtrExpression, Type: CharTy, LVal: String,
22060 RVal&: Char))
22061 return false;
22062
22063 if constexpr (std::is_same_v<APValue, T>) {
22064 Result.getArrayInitializedElt(I) = std::move(Char);
22065 } else {
22066 APSInt C = Char.getInt();
22067
22068 assert(C.getBitWidth() <= 8 &&
22069 "string element not representable in char");
22070
22071 Result.push_back(static_cast<char>(C.getExtValue()));
22072 }
22073
22074 if (!HandleLValueArrayAdjustment(Info, E: PtrExpression, LVal&: String, EltTy: CharTy, Adjustment: 1))
22075 return false;
22076 }
22077
22078 return Scope.destroy() && CheckMemoryLeaks(Info);
22079}
22080
22081bool Expr::EvaluateCharRangeAsString(std::string &Result,
22082 const Expr *SizeExpression,
22083 const Expr *PtrExpression, ASTContext &Ctx,
22084 EvalResult &Status) const {
22085 return EvaluateCharRangeAsStringImpl(this, Result, SizeExpression,
22086 PtrExpression, Ctx, Status);
22087}
22088
22089bool Expr::EvaluateCharRangeAsString(APValue &Result,
22090 const Expr *SizeExpression,
22091 const Expr *PtrExpression, ASTContext &Ctx,
22092 EvalResult &Status) const {
22093 return EvaluateCharRangeAsStringImpl(this, Result, SizeExpression,
22094 PtrExpression, Ctx, Status);
22095}
22096
22097std::optional<uint64_t> Expr::tryEvaluateStrLen(const ASTContext &Ctx) const {
22098 Expr::EvalStatus Status;
22099 EvalInfo Info(Ctx, Status, EvaluationMode::ConstantFold);
22100
22101 if (Info.EnableNewConstInterp)
22102 return Info.Ctx.getInterpContext().evaluateStrlen(Parent&: Info, E: this);
22103 return EvaluateBuiltinStrLen(E: this, Info);
22104}
22105
22106namespace {
22107struct IsWithinLifetimeHandler {
22108 EvalInfo &Info;
22109 static constexpr AccessKinds AccessKind = AccessKinds::AK_IsWithinLifetime;
22110 using result_type = std::optional<bool>;
22111 std::optional<bool> failed() { return std::nullopt; }
22112 template <typename T>
22113 std::optional<bool> found(T &Subobj, QualType SubobjType) {
22114 return true;
22115 }
22116};
22117
22118std::optional<bool> EvaluateBuiltinIsWithinLifetime(IntExprEvaluator &IEE,
22119 const CallExpr *E) {
22120 EvalInfo &Info = IEE.Info;
22121 // Sometimes this is called during some sorts of constant folding / early
22122 // evaluation. These are meant for non-constant expressions and are not
22123 // necessary since this consteval builtin will never be evaluated at runtime.
22124 // Just fail to evaluate when not in a constant context.
22125 if (!Info.InConstantContext)
22126 return std::nullopt;
22127 assert(E->getBuiltinCallee() == Builtin::BI__builtin_is_within_lifetime);
22128 const Expr *Arg = E->getArg(Arg: 0);
22129 if (Arg->isValueDependent())
22130 return std::nullopt;
22131 LValue Val;
22132 if (!EvaluatePointer(E: Arg, Result&: Val, Info))
22133 return std::nullopt;
22134
22135 if (Val.allowConstexprUnknown())
22136 return true;
22137
22138 auto Error = [&](int Diag) {
22139 bool CalledFromStd = false;
22140 const auto *Callee = Info.CurrentCall->getCallee();
22141 if (Callee && Callee->isInStdNamespace()) {
22142 const IdentifierInfo *Identifier = Callee->getIdentifier();
22143 CalledFromStd = Identifier && Identifier->isStr(Str: "is_within_lifetime");
22144 }
22145 Info.CCEDiag(Loc: CalledFromStd ? Info.CurrentCall->getCallRange().getBegin()
22146 : E->getExprLoc(),
22147 DiagId: diag::err_invalid_is_within_lifetime)
22148 << (CalledFromStd ? "std::is_within_lifetime"
22149 : "__builtin_is_within_lifetime")
22150 << Diag;
22151 return std::nullopt;
22152 };
22153 // C++2c [meta.const.eval]p4:
22154 // During the evaluation of an expression E as a core constant expression, a
22155 // call to this function is ill-formed unless p points to an object that is
22156 // usable in constant expressions or whose complete object's lifetime began
22157 // within E.
22158
22159 // Make sure it points to an object
22160 // nullptr does not point to an object
22161 if (Val.isNullPointer() || Val.getLValueBase().isNull())
22162 return Error(0);
22163 QualType T = Val.getLValueBase().getType();
22164 assert(!T->isFunctionType() &&
22165 "Pointers to functions should have been typed as function pointers "
22166 "which would have been rejected earlier");
22167 assert(T->isObjectType());
22168 // Hypothetical array element is not an object
22169 if (Val.getLValueDesignator().isOnePastTheEnd())
22170 return Error(1);
22171 assert(Val.getLValueDesignator().isValidSubobject() &&
22172 "Unchecked case for valid subobject");
22173 // All other ill-formed values should have failed EvaluatePointer, so the
22174 // object should be a pointer to an object that is usable in a constant
22175 // expression or whose complete lifetime began within the expression
22176 CompleteObject CO =
22177 findCompleteObject(Info, E, AK: AccessKinds::AK_IsWithinLifetime, LVal: Val, LValType: T);
22178 // The lifetime hasn't begun yet if we are still evaluating the
22179 // initializer ([basic.life]p(1.2))
22180 if (Info.EvaluatingDeclValue && CO.Value == Info.EvaluatingDeclValue)
22181 return Error(2);
22182
22183 if (!CO)
22184 return false;
22185 IsWithinLifetimeHandler handler{.Info: Info};
22186 return findSubobject(Info, E, Obj: CO, Sub: Val.getLValueDesignator(), handler);
22187}
22188} // namespace
22189