1//===--- ExprConstant.cpp - Expression Constant Evaluator -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the Expr constant evaluator.
10//
11// Constant expression evaluation produces four main results:
12//
13// * A success/failure flag indicating whether constant folding was successful.
14// This is the 'bool' return value used by most of the code in this file. A
15// 'false' return value indicates that constant folding has failed, and any
16// appropriate diagnostic has already been produced.
17//
18// * An evaluated result, valid only if constant folding has not failed.
19//
20// * A flag indicating if evaluation encountered (unevaluated) side-effects.
21// These arise in cases such as (sideEffect(), 0) and (sideEffect() || 1),
22// where it is possible to determine the evaluated result regardless.
23//
24// * A set of notes indicating why the evaluation was not a constant expression
25// (under the C++11 / C++1y rules only, at the moment), or, if folding failed
26// too, why the expression could not be folded.
27//
28// If we are checking for a potential constant expression, failure to constant
29// fold a potential constant sub-expression will be indicated by a 'false'
30// return value (the expression could not be folded) and no diagnostic (the
31// expression is not necessarily non-constant).
32//
33//===----------------------------------------------------------------------===//
34
35#include "ByteCode/Context.h"
36#include "ByteCode/Frame.h"
37#include "ByteCode/State.h"
38#include "ExprConstShared.h"
39#include "clang/AST/APValue.h"
40#include "clang/AST/ASTContext.h"
41#include "clang/AST/ASTLambda.h"
42#include "clang/AST/Attr.h"
43#include "clang/AST/CXXInheritance.h"
44#include "clang/AST/CharUnits.h"
45#include "clang/AST/CurrentSourceLocExprScope.h"
46#include "clang/AST/Expr.h"
47#include "clang/AST/InferAlloc.h"
48#include "clang/AST/OSLog.h"
49#include "clang/AST/OptionalDiagnostic.h"
50#include "clang/AST/RecordLayout.h"
51#include "clang/AST/StmtVisitor.h"
52#include "clang/AST/Type.h"
53#include "clang/AST/TypeLoc.h"
54#include "clang/Basic/Builtins.h"
55#include "clang/Basic/DiagnosticSema.h"
56#include "clang/Basic/TargetBuiltins.h"
57#include "clang/Basic/TargetInfo.h"
58#include "llvm/ADT/APFixedPoint.h"
59#include "llvm/ADT/Sequence.h"
60#include "llvm/ADT/SmallBitVector.h"
61#include "llvm/ADT/StringExtras.h"
62#include "llvm/Support/Casting.h"
63#include "llvm/Support/Debug.h"
64#include "llvm/Support/SaveAndRestore.h"
65#include "llvm/Support/SipHash.h"
66#include "llvm/Support/TimeProfiler.h"
67#include "llvm/Support/raw_ostream.h"
68#include <cstring>
69#include <functional>
70#include <limits>
71#include <optional>
72
73#define DEBUG_TYPE "exprconstant"
74
75using namespace clang;
76using llvm::APFixedPoint;
77using llvm::APInt;
78using llvm::APSInt;
79using llvm::APFloat;
80using llvm::FixedPointSemantics;
81
82namespace {
83 struct LValue;
84 class CallStackFrame;
85 class EvalInfo;
86
87 using SourceLocExprScopeGuard =
88 CurrentSourceLocExprScope::SourceLocExprScopeGuard;
89
90 static QualType getType(APValue::LValueBase B) {
91 return B.getType();
92 }
93
94 /// Get an LValue path entry, which is known to not be an array index, as a
95 /// field declaration.
96 static const FieldDecl *getAsField(APValue::LValuePathEntry E) {
97 return dyn_cast_or_null<FieldDecl>(Val: E.getAsBaseOrMember().getPointer());
98 }
99 /// Get an LValue path entry, which is known to not be an array index, as a
100 /// base class declaration.
101 static const CXXRecordDecl *getAsBaseClass(APValue::LValuePathEntry E) {
102 return dyn_cast_or_null<CXXRecordDecl>(Val: E.getAsBaseOrMember().getPointer());
103 }
104 /// Determine whether this LValue path entry for a base class names a virtual
105 /// base class.
106 static bool isVirtualBaseClass(APValue::LValuePathEntry E) {
107 return E.getAsBaseOrMember().getInt();
108 }
109
110 /// Given an expression, determine the type used to store the result of
111 /// evaluating that expression.
112 static QualType getStorageType(const ASTContext &Ctx, const Expr *E) {
113 if (E->isPRValue())
114 return E->getType();
115 return Ctx.getLValueReferenceType(T: E->getType());
116 }
117
118 /// Attempts to unwrap a CallExpr (with an alloc_size attribute) from an Expr.
119 /// This will look through a single cast.
120 ///
121 /// Returns null if we couldn't unwrap a function with alloc_size.
122 static const CallExpr *tryUnwrapAllocSizeCall(const Expr *E) {
123 if (!E->getType()->isPointerType())
124 return nullptr;
125
126 E = E->IgnoreParens();
127 // If we're doing a variable assignment from e.g. malloc(N), there will
128 // probably be a cast of some kind. In exotic cases, we might also see a
129 // top-level ExprWithCleanups. Ignore them either way.
130 if (const auto *FE = dyn_cast<FullExpr>(Val: E))
131 E = FE->getSubExpr()->IgnoreParens();
132
133 if (const auto *Cast = dyn_cast<CastExpr>(Val: E))
134 E = Cast->getSubExpr()->IgnoreParens();
135
136 if (const auto *CE = dyn_cast<CallExpr>(Val: E))
137 return CE->getCalleeAllocSizeAttr() ? CE : nullptr;
138 return nullptr;
139 }
140
141 /// Determines whether or not the given Base contains a call to a function
142 /// with the alloc_size attribute.
143 static bool isBaseAnAllocSizeCall(APValue::LValueBase Base) {
144 const auto *E = Base.dyn_cast<const Expr *>();
145 return E && E->getType()->isPointerType() && tryUnwrapAllocSizeCall(E);
146 }
147
148 /// Determines whether the given kind of constant expression is only ever
149 /// used for name mangling. If so, it's permitted to reference things that we
150 /// can't generate code for (in particular, dllimported functions).
151 static bool isForManglingOnly(ConstantExprKind Kind) {
152 switch (Kind) {
153 case ConstantExprKind::Normal:
154 case ConstantExprKind::ClassTemplateArgument:
155 case ConstantExprKind::ImmediateInvocation:
156 // Note that non-type template arguments of class type are emitted as
157 // template parameter objects.
158 return false;
159
160 case ConstantExprKind::NonClassTemplateArgument:
161 return true;
162 }
163 llvm_unreachable("unknown ConstantExprKind");
164 }
165
166 static bool isTemplateArgument(ConstantExprKind Kind) {
167 switch (Kind) {
168 case ConstantExprKind::Normal:
169 case ConstantExprKind::ImmediateInvocation:
170 return false;
171
172 case ConstantExprKind::ClassTemplateArgument:
173 case ConstantExprKind::NonClassTemplateArgument:
174 return true;
175 }
176 llvm_unreachable("unknown ConstantExprKind");
177 }
178
179 /// The bound to claim that an array of unknown bound has.
180 /// The value in MostDerivedArraySize is undefined in this case. So, set it
181 /// to an arbitrary value that's likely to loudly break things if it's used.
182 static const uint64_t AssumedSizeForUnsizedArray =
183 std::numeric_limits<uint64_t>::max() / 2;
184
185 /// Determines if an LValue with the given LValueBase will have an unsized
186 /// array in its designator.
187 /// Find the path length and type of the most-derived subobject in the given
188 /// path, and find the size of the containing array, if any.
189 static unsigned
190 findMostDerivedSubobject(const ASTContext &Ctx, APValue::LValueBase Base,
191 ArrayRef<APValue::LValuePathEntry> Path,
192 uint64_t &ArraySize, QualType &Type, bool &IsArray,
193 bool &FirstEntryIsUnsizedArray) {
194 // This only accepts LValueBases from APValues, and APValues don't support
195 // arrays that lack size info.
196 assert(!isBaseAnAllocSizeCall(Base) &&
197 "Unsized arrays shouldn't appear here");
198 unsigned MostDerivedLength = 0;
199 // The type of Base is a reference type if the base is a constexpr-unknown
200 // variable. In that case, look through the reference type.
201 Type = getType(B: Base).getNonReferenceType();
202
203 for (unsigned I = 0, N = Path.size(); I != N; ++I) {
204 if (Type->isArrayType()) {
205 const ArrayType *AT = Ctx.getAsArrayType(T: Type);
206 Type = AT->getElementType();
207 MostDerivedLength = I + 1;
208 IsArray = true;
209
210 if (auto *CAT = dyn_cast<ConstantArrayType>(Val: AT)) {
211 ArraySize = CAT->getZExtSize();
212 } else {
213 assert(I == 0 && "unexpected unsized array designator");
214 FirstEntryIsUnsizedArray = true;
215 ArraySize = AssumedSizeForUnsizedArray;
216 }
217 } else if (Type->isAnyComplexType()) {
218 const ComplexType *CT = Type->castAs<ComplexType>();
219 Type = CT->getElementType();
220 ArraySize = 2;
221 MostDerivedLength = I + 1;
222 IsArray = true;
223 } else if (const auto *VT = Type->getAs<VectorType>()) {
224 Type = VT->getElementType();
225 ArraySize = VT->getNumElements();
226 MostDerivedLength = I + 1;
227 IsArray = true;
228 } else if (const FieldDecl *FD = getAsField(E: Path[I])) {
229 Type = FD->getType();
230 ArraySize = 0;
231 MostDerivedLength = I + 1;
232 IsArray = false;
233 } else {
234 // Path[I] describes a base class.
235 ArraySize = 0;
236 IsArray = false;
237 }
238 }
239 return MostDerivedLength;
240 }
241
242 /// A path from a glvalue to a subobject of that glvalue.
243 struct SubobjectDesignator {
244 /// True if the subobject was named in a manner not supported by C++11. Such
245 /// lvalues can still be folded, but they are not core constant expressions
246 /// and we cannot perform lvalue-to-rvalue conversions on them.
247 LLVM_PREFERRED_TYPE(bool)
248 unsigned Invalid : 1;
249
250 /// Is this a pointer one past the end of an object?
251 LLVM_PREFERRED_TYPE(bool)
252 unsigned IsOnePastTheEnd : 1;
253
254 /// Indicator of whether the first entry is an unsized array.
255 LLVM_PREFERRED_TYPE(bool)
256 unsigned FirstEntryIsAnUnsizedArray : 1;
257
258 /// Indicator of whether the most-derived object is an array element.
259 LLVM_PREFERRED_TYPE(bool)
260 unsigned MostDerivedIsArrayElement : 1;
261
262 /// The length of the path to the most-derived object of which this is a
263 /// subobject.
264 unsigned MostDerivedPathLength : 28;
265
266 /// The size of the array of which the most-derived object is an element.
267 /// This will always be 0 if the most-derived object is not an array
268 /// element. 0 is not an indicator of whether or not the most-derived object
269 /// is an array, however, because 0-length arrays are allowed.
270 ///
271 /// If the current array is an unsized array, the value of this is
272 /// undefined.
273 uint64_t MostDerivedArraySize;
274 /// The type of the most derived object referred to by this address.
275 QualType MostDerivedType;
276
277 typedef APValue::LValuePathEntry PathEntry;
278
279 /// The entries on the path from the glvalue to the designated subobject.
280 SmallVector<PathEntry, 8> Entries;
281
282 SubobjectDesignator() : Invalid(true) {}
283
284 explicit SubobjectDesignator(QualType T)
285 : Invalid(false), IsOnePastTheEnd(false),
286 FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false),
287 MostDerivedPathLength(0), MostDerivedArraySize(0),
288 MostDerivedType(T.isNull() ? QualType() : T.getNonReferenceType()) {}
289
290 SubobjectDesignator(const ASTContext &Ctx, const APValue &V)
291 : Invalid(!V.isLValue() || !V.hasLValuePath()), IsOnePastTheEnd(false),
292 FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false),
293 MostDerivedPathLength(0), MostDerivedArraySize(0) {
294 assert(V.isLValue() && "Non-LValue used to make an LValue designator?");
295 if (!Invalid) {
296 IsOnePastTheEnd = V.isLValueOnePastTheEnd();
297 llvm::append_range(C&: Entries, R: V.getLValuePath());
298 if (V.getLValueBase()) {
299 bool IsArray = false;
300 bool FirstIsUnsizedArray = false;
301 MostDerivedPathLength = findMostDerivedSubobject(
302 Ctx, Base: V.getLValueBase(), Path: V.getLValuePath(), ArraySize&: MostDerivedArraySize,
303 Type&: MostDerivedType, IsArray, FirstEntryIsUnsizedArray&: FirstIsUnsizedArray);
304 MostDerivedIsArrayElement = IsArray;
305 FirstEntryIsAnUnsizedArray = FirstIsUnsizedArray;
306 }
307 }
308 }
309
310 void truncate(ASTContext &Ctx, APValue::LValueBase Base,
311 unsigned NewLength) {
312 if (Invalid)
313 return;
314
315 assert(Base && "cannot truncate path for null pointer");
316 assert(NewLength <= Entries.size() && "not a truncation");
317
318 if (NewLength == Entries.size())
319 return;
320 Entries.resize(N: NewLength);
321
322 bool IsArray = false;
323 bool FirstIsUnsizedArray = false;
324 MostDerivedPathLength = findMostDerivedSubobject(
325 Ctx, Base, Path: Entries, ArraySize&: MostDerivedArraySize, Type&: MostDerivedType, IsArray,
326 FirstEntryIsUnsizedArray&: FirstIsUnsizedArray);
327 MostDerivedIsArrayElement = IsArray;
328 FirstEntryIsAnUnsizedArray = FirstIsUnsizedArray;
329 }
330
331 void setInvalid() {
332 Invalid = true;
333 Entries.clear();
334 }
335
336 /// Determine whether the most derived subobject is an array without a
337 /// known bound.
338 bool isMostDerivedAnUnsizedArray() const {
339 assert(!Invalid && "Calling this makes no sense on invalid designators");
340 return Entries.size() == 1 && FirstEntryIsAnUnsizedArray;
341 }
342
343 /// Determine what the most derived array's size is. Results in an assertion
344 /// failure if the most derived array lacks a size.
345 uint64_t getMostDerivedArraySize() const {
346 assert(!isMostDerivedAnUnsizedArray() && "Unsized array has no size");
347 return MostDerivedArraySize;
348 }
349
350 /// Determine whether this is a one-past-the-end pointer.
351 bool isOnePastTheEnd() const {
352 assert(!Invalid);
353 if (IsOnePastTheEnd)
354 return true;
355 if (!isMostDerivedAnUnsizedArray() && MostDerivedIsArrayElement &&
356 Entries[MostDerivedPathLength - 1].getAsArrayIndex() ==
357 MostDerivedArraySize)
358 return true;
359 return false;
360 }
361
362 /// Get the range of valid index adjustments in the form
363 /// {maximum value that can be subtracted from this pointer,
364 /// maximum value that can be added to this pointer}
365 std::pair<uint64_t, uint64_t> validIndexAdjustments() {
366 if (Invalid || isMostDerivedAnUnsizedArray())
367 return {0, 0};
368
369 // [expr.add]p4: For the purposes of these operators, a pointer to a
370 // nonarray object behaves the same as a pointer to the first element of
371 // an array of length one with the type of the object as its element type.
372 bool IsArray = MostDerivedPathLength == Entries.size() &&
373 MostDerivedIsArrayElement;
374 uint64_t ArrayIndex = IsArray ? Entries.back().getAsArrayIndex()
375 : (uint64_t)IsOnePastTheEnd;
376 uint64_t ArraySize =
377 IsArray ? getMostDerivedArraySize() : (uint64_t)1;
378 return {ArrayIndex, ArraySize - ArrayIndex};
379 }
380
381 /// Check that this refers to a valid subobject.
382 bool isValidSubobject() const {
383 if (Invalid)
384 return false;
385 return !isOnePastTheEnd();
386 }
387 /// Check that this refers to a valid subobject, and if not, produce a
388 /// relevant diagnostic and set the designator as invalid.
389 bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK);
390
391 /// Get the type of the designated object.
392 QualType getType(ASTContext &Ctx) const {
393 assert(!Invalid && "invalid designator has no subobject type");
394 return MostDerivedPathLength == Entries.size()
395 ? MostDerivedType
396 : Ctx.getCanonicalTagType(TD: getAsBaseClass(E: Entries.back()));
397 }
398
399 /// Update this designator to refer to the first element within this array.
400 void addArrayUnchecked(const ConstantArrayType *CAT) {
401 Entries.push_back(Elt: PathEntry::ArrayIndex(Index: 0));
402
403 // This is a most-derived object.
404 MostDerivedType = CAT->getElementType();
405 MostDerivedIsArrayElement = true;
406 MostDerivedArraySize = CAT->getZExtSize();
407 MostDerivedPathLength = Entries.size();
408 }
409 /// Update this designator to refer to the first element within the array of
410 /// elements of type T. This is an array of unknown size.
411 void addUnsizedArrayUnchecked(QualType ElemTy) {
412 Entries.push_back(Elt: PathEntry::ArrayIndex(Index: 0));
413
414 MostDerivedType = ElemTy;
415 MostDerivedIsArrayElement = true;
416 // The value in MostDerivedArraySize is undefined in this case. So, set it
417 // to an arbitrary value that's likely to loudly break things if it's
418 // used.
419 MostDerivedArraySize = AssumedSizeForUnsizedArray;
420 MostDerivedPathLength = Entries.size();
421 }
422 /// Update this designator to refer to the given base or member of this
423 /// object.
424 void addDeclUnchecked(const Decl *D, bool Virtual = false) {
425 Entries.push_back(Elt: APValue::BaseOrMemberType(D, Virtual));
426
427 // If this isn't a base class, it's a new most-derived object.
428 if (const FieldDecl *FD = dyn_cast<FieldDecl>(Val: D)) {
429 MostDerivedType = FD->getType();
430 MostDerivedIsArrayElement = false;
431 MostDerivedArraySize = 0;
432 MostDerivedPathLength = Entries.size();
433 }
434 }
435 /// Update this designator to refer to the given complex component.
436 void addComplexUnchecked(QualType EltTy, bool Imag) {
437 Entries.push_back(Elt: PathEntry::ArrayIndex(Index: Imag));
438
439 // This is technically a most-derived object, though in practice this
440 // is unlikely to matter.
441 MostDerivedType = EltTy;
442 MostDerivedIsArrayElement = true;
443 MostDerivedArraySize = 2;
444 MostDerivedPathLength = Entries.size();
445 }
446
447 void addVectorElementUnchecked(QualType EltTy, uint64_t Size,
448 uint64_t Idx) {
449 Entries.push_back(Elt: PathEntry::ArrayIndex(Index: Idx));
450 MostDerivedType = EltTy;
451 MostDerivedPathLength = Entries.size();
452 MostDerivedArraySize = 0;
453 MostDerivedIsArrayElement = false;
454 }
455
456 void diagnoseUnsizedArrayPointerArithmetic(EvalInfo &Info, const Expr *E);
457 void diagnosePointerArithmetic(EvalInfo &Info, const Expr *E,
458 const APSInt &N);
459 /// Add N to the address of this subobject.
460 void adjustIndex(EvalInfo &Info, const Expr *E, APSInt N, const LValue &LV);
461 };
462
463 /// A scope at the end of which an object can need to be destroyed.
464 enum class ScopeKind {
465 Block,
466 FullExpression,
467 Call
468 };
469
470 /// A reference to a particular call and its arguments.
471 struct CallRef {
472 CallRef() : OrigCallee(), CallIndex(0), Version() {}
473 CallRef(const FunctionDecl *Callee, unsigned CallIndex, unsigned Version)
474 : OrigCallee(Callee), CallIndex(CallIndex), Version(Version) {}
475
476 explicit operator bool() const { return OrigCallee; }
477
478 /// Get the parameter that the caller initialized, corresponding to the
479 /// given parameter in the callee.
480 const ParmVarDecl *getOrigParam(const ParmVarDecl *PVD) const {
481 return OrigCallee ? OrigCallee->getParamDecl(i: PVD->getFunctionScopeIndex())
482 : PVD;
483 }
484
485 /// The callee at the point where the arguments were evaluated. This might
486 /// be different from the actual callee (a different redeclaration, or a
487 /// virtual override), but this function's parameters are the ones that
488 /// appear in the parameter map.
489 const FunctionDecl *OrigCallee;
490 /// The call index of the frame that holds the argument values.
491 unsigned CallIndex;
492 /// The version of the parameters corresponding to this call.
493 unsigned Version;
494 };
495
496 /// A stack frame in the constexpr call stack.
497 class CallStackFrame : public interp::Frame {
498 public:
499 EvalInfo &Info;
500
501 /// Parent - The caller of this stack frame.
502 CallStackFrame *Caller;
503
504 /// Callee - The function which was called.
505 const FunctionDecl *Callee;
506
507 /// This - The binding for the this pointer in this call, if any.
508 const LValue *This;
509
510 /// CallExpr - The syntactical structure of member function calls
511 const Expr *CallExpr;
512
513 /// Information on how to find the arguments to this call. Our arguments
514 /// are stored in our parent's CallStackFrame, using the ParmVarDecl* as a
515 /// key and this value as the version.
516 CallRef Arguments;
517
518 /// Source location information about the default argument or default
519 /// initializer expression we're evaluating, if any.
520 CurrentSourceLocExprScope CurSourceLocExprScope;
521
522 // Note that we intentionally use std::map here so that references to
523 // values are stable.
524 typedef std::pair<const void *, unsigned> MapKeyTy;
525 typedef std::map<MapKeyTy, APValue> MapTy;
526 /// Temporaries - Temporary lvalues materialized within this stack frame.
527 MapTy Temporaries;
528
529 /// CallRange - The source range of the call expression for this call.
530 SourceRange CallRange;
531
532 /// Index - The call index of this call.
533 unsigned Index;
534
535 /// The stack of integers for tracking version numbers for temporaries.
536 SmallVector<unsigned, 2> TempVersionStack = {1};
537 unsigned CurTempVersion = TempVersionStack.back();
538
539 unsigned getTempVersion() const { return TempVersionStack.back(); }
540
541 void pushTempVersion() {
542 TempVersionStack.push_back(Elt: ++CurTempVersion);
543 }
544
545 void popTempVersion() {
546 TempVersionStack.pop_back();
547 }
548
549 CallRef createCall(const FunctionDecl *Callee) {
550 return {Callee, Index, ++CurTempVersion};
551 }
552
553 // FIXME: Adding this to every 'CallStackFrame' may have a nontrivial impact
554 // on the overall stack usage of deeply-recursing constexpr evaluations.
555 // (We should cache this map rather than recomputing it repeatedly.)
556 // But let's try this and see how it goes; we can look into caching the map
557 // as a later change.
558
559 /// LambdaCaptureFields - Mapping from captured variables/this to
560 /// corresponding data members in the closure class.
561 llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
562 FieldDecl *LambdaThisCaptureField = nullptr;
563
564 CallStackFrame(EvalInfo &Info, SourceRange CallRange,
565 const FunctionDecl *Callee, const LValue *This,
566 const Expr *CallExpr, CallRef Arguments);
567 ~CallStackFrame();
568
569 // Return the temporary for Key whose version number is Version.
570 APValue *getTemporary(const void *Key, unsigned Version) {
571 MapKeyTy KV(Key, Version);
572 auto LB = Temporaries.lower_bound(x: KV);
573 if (LB != Temporaries.end() && LB->first == KV)
574 return &LB->second;
575 return nullptr;
576 }
577
578 // Return the current temporary for Key in the map.
579 APValue *getCurrentTemporary(const void *Key) {
580 auto UB = Temporaries.upper_bound(x: MapKeyTy(Key, UINT_MAX));
581 if (UB != Temporaries.begin() && std::prev(x: UB)->first.first == Key)
582 return &std::prev(x: UB)->second;
583 return nullptr;
584 }
585
586 // Return the version number of the current temporary for Key.
587 unsigned getCurrentTemporaryVersion(const void *Key) const {
588 auto UB = Temporaries.upper_bound(x: MapKeyTy(Key, UINT_MAX));
589 if (UB != Temporaries.begin() && std::prev(x: UB)->first.first == Key)
590 return std::prev(x: UB)->first.second;
591 return 0;
592 }
593
594 /// Allocate storage for an object of type T in this stack frame.
595 /// Populates LV with a handle to the created object. Key identifies
596 /// the temporary within the stack frame, and must not be reused without
597 /// bumping the temporary version number.
598 template<typename KeyT>
599 APValue &createTemporary(const KeyT *Key, QualType T,
600 ScopeKind Scope, LValue &LV);
601
602 /// Allocate storage for a parameter of a function call made in this frame.
603 APValue &createParam(CallRef Args, const ParmVarDecl *PVD, LValue &LV);
604
605 void describe(llvm::raw_ostream &OS) const override;
606
607 Frame *getCaller() const override { return Caller; }
608 SourceRange getCallRange() const override { return CallRange; }
609 const FunctionDecl *getCallee() const override { return Callee; }
610
611 bool isStdFunction() const {
612 for (const DeclContext *DC = Callee; DC; DC = DC->getParent())
613 if (DC->isStdNamespace())
614 return true;
615 return false;
616 }
617
618 /// Whether we're in a context where [[msvc::constexpr]] evaluation is
619 /// permitted. See MSConstexprDocs for description of permitted contexts.
620 bool CanEvalMSConstexpr = false;
621
622 private:
623 APValue &createLocal(APValue::LValueBase Base, const void *Key, QualType T,
624 ScopeKind Scope);
625 };
626
627 /// Temporarily override 'this'.
628 class ThisOverrideRAII {
629 public:
630 ThisOverrideRAII(CallStackFrame &Frame, const LValue *NewThis, bool Enable)
631 : Frame(Frame), OldThis(Frame.This) {
632 if (Enable)
633 Frame.This = NewThis;
634 }
635 ~ThisOverrideRAII() {
636 Frame.This = OldThis;
637 }
638 private:
639 CallStackFrame &Frame;
640 const LValue *OldThis;
641 };
642
643 // A shorthand time trace scope struct, prints source range, for example
644 // {"name":"EvaluateAsRValue","args":{"detail":"<test.cc:8:21, col:25>"}}}
645 class ExprTimeTraceScope {
646 public:
647 ExprTimeTraceScope(const Expr *E, const ASTContext &Ctx, StringRef Name)
648 : TimeScope(Name, [E, &Ctx] {
649 return E->getSourceRange().printToString(SM: Ctx.getSourceManager());
650 }) {}
651
652 private:
653 llvm::TimeTraceScope TimeScope;
654 };
655
656 /// RAII object used to change the current ability of
657 /// [[msvc::constexpr]] evaulation.
658 struct MSConstexprContextRAII {
659 CallStackFrame &Frame;
660 bool OldValue;
661 explicit MSConstexprContextRAII(CallStackFrame &Frame, bool Value)
662 : Frame(Frame), OldValue(Frame.CanEvalMSConstexpr) {
663 Frame.CanEvalMSConstexpr = Value;
664 }
665
666 ~MSConstexprContextRAII() { Frame.CanEvalMSConstexpr = OldValue; }
667 };
668}
669
670static bool HandleDestruction(EvalInfo &Info, const Expr *E,
671 const LValue &This, QualType ThisType);
672static bool HandleDestruction(EvalInfo &Info, SourceLocation Loc,
673 APValue::LValueBase LVBase, APValue &Value,
674 QualType T);
675
676namespace {
677 /// A cleanup, and a flag indicating whether it is lifetime-extended.
678 class Cleanup {
679 llvm::PointerIntPair<APValue*, 2, ScopeKind> Value;
680 APValue::LValueBase Base;
681 QualType T;
682
683 public:
684 Cleanup(APValue *Val, APValue::LValueBase Base, QualType T,
685 ScopeKind Scope)
686 : Value(Val, Scope), Base(Base), T(T) {}
687
688 /// Determine whether this cleanup should be performed at the end of the
689 /// given kind of scope.
690 bool isDestroyedAtEndOf(ScopeKind K) const {
691 return (int)Value.getInt() >= (int)K;
692 }
693 bool endLifetime(EvalInfo &Info, bool RunDestructors) {
694 if (RunDestructors) {
695 SourceLocation Loc;
696 if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>())
697 Loc = VD->getLocation();
698 else if (const Expr *E = Base.dyn_cast<const Expr*>())
699 Loc = E->getExprLoc();
700 return HandleDestruction(Info, Loc, LVBase: Base, Value&: *Value.getPointer(), T);
701 }
702 *Value.getPointer() = APValue();
703 return true;
704 }
705
706 bool hasSideEffect() {
707 return T.isDestructedType();
708 }
709 };
710
711 /// A reference to an object whose construction we are currently evaluating.
712 struct ObjectUnderConstruction {
713 APValue::LValueBase Base;
714 ArrayRef<APValue::LValuePathEntry> Path;
715 friend bool operator==(const ObjectUnderConstruction &LHS,
716 const ObjectUnderConstruction &RHS) {
717 return LHS.Base == RHS.Base && LHS.Path == RHS.Path;
718 }
719 friend llvm::hash_code hash_value(const ObjectUnderConstruction &Obj) {
720 return llvm::hash_combine(args: Obj.Base, args: Obj.Path);
721 }
722 };
723 enum class ConstructionPhase {
724 None,
725 Bases,
726 AfterBases,
727 AfterFields,
728 Destroying,
729 DestroyingBases
730 };
731}
732
733namespace llvm {
734template<> struct DenseMapInfo<ObjectUnderConstruction> {
735 using Base = DenseMapInfo<APValue::LValueBase>;
736 static ObjectUnderConstruction getEmptyKey() {
737 return {.Base: Base::getEmptyKey(), .Path: {}}; }
738 static ObjectUnderConstruction getTombstoneKey() {
739 return {.Base: Base::getTombstoneKey(), .Path: {}};
740 }
741 static unsigned getHashValue(const ObjectUnderConstruction &Object) {
742 return hash_value(Obj: Object);
743 }
744 static bool isEqual(const ObjectUnderConstruction &LHS,
745 const ObjectUnderConstruction &RHS) {
746 return LHS == RHS;
747 }
748};
749}
750
751namespace {
752 /// A dynamically-allocated heap object.
753 struct DynAlloc {
754 /// The value of this heap-allocated object.
755 APValue Value;
756 /// The allocating expression; used for diagnostics. Either a CXXNewExpr
757 /// or a CallExpr (the latter is for direct calls to operator new inside
758 /// std::allocator<T>::allocate).
759 const Expr *AllocExpr = nullptr;
760
761 enum Kind {
762 New,
763 ArrayNew,
764 StdAllocator
765 };
766
767 /// Get the kind of the allocation. This must match between allocation
768 /// and deallocation.
769 Kind getKind() const {
770 if (auto *NE = dyn_cast<CXXNewExpr>(Val: AllocExpr))
771 return NE->isArray() ? ArrayNew : New;
772 assert(isa<CallExpr>(AllocExpr));
773 return StdAllocator;
774 }
775 };
776
777 struct DynAllocOrder {
778 bool operator()(DynamicAllocLValue L, DynamicAllocLValue R) const {
779 return L.getIndex() < R.getIndex();
780 }
781 };
782
783 /// EvalInfo - This is a private struct used by the evaluator to capture
784 /// information about a subexpression as it is folded. It retains information
785 /// about the AST context, but also maintains information about the folded
786 /// expression.
787 ///
788 /// If an expression could be evaluated, it is still possible it is not a C
789 /// "integer constant expression" or constant expression. If not, this struct
790 /// captures information about how and why not.
791 ///
792 /// One bit of information passed *into* the request for constant folding
793 /// indicates whether the subexpression is "evaluated" or not according to C
794 /// rules. For example, the RHS of (0 && foo()) is not evaluated. We can
795 /// evaluate the expression regardless of what the RHS is, but C only allows
796 /// certain things in certain situations.
797 class EvalInfo final : public interp::State {
798 public:
799 /// CurrentCall - The top of the constexpr call stack.
800 CallStackFrame *CurrentCall;
801
802 /// CallStackDepth - The number of calls in the call stack right now.
803 unsigned CallStackDepth;
804
805 /// NextCallIndex - The next call index to assign.
806 unsigned NextCallIndex;
807
808 /// StepsLeft - The remaining number of evaluation steps we're permitted
809 /// to perform. This is essentially a limit for the number of statements
810 /// we will evaluate.
811 unsigned StepsLeft;
812
813 /// Enable the experimental new constant interpreter. If an expression is
814 /// not supported by the interpreter, an error is triggered.
815 bool EnableNewConstInterp;
816
817 /// BottomFrame - The frame in which evaluation started. This must be
818 /// initialized after CurrentCall and CallStackDepth.
819 CallStackFrame BottomFrame;
820
821 /// A stack of values whose lifetimes end at the end of some surrounding
822 /// evaluation frame.
823 llvm::SmallVector<Cleanup, 16> CleanupStack;
824
825 /// EvaluatingDecl - This is the declaration whose initializer is being
826 /// evaluated, if any.
827 APValue::LValueBase EvaluatingDecl;
828
829 enum class EvaluatingDeclKind {
830 None,
831 /// We're evaluating the construction of EvaluatingDecl.
832 Ctor,
833 /// We're evaluating the destruction of EvaluatingDecl.
834 Dtor,
835 };
836 EvaluatingDeclKind IsEvaluatingDecl = EvaluatingDeclKind::None;
837
838 /// EvaluatingDeclValue - This is the value being constructed for the
839 /// declaration whose initializer is being evaluated, if any.
840 APValue *EvaluatingDeclValue;
841
842 /// Stack of loops and 'switch' statements which we're currently
843 /// breaking/continuing; null entries are used to mark unlabeled
844 /// break/continue.
845 SmallVector<const Stmt *> BreakContinueStack;
846
847 /// Set of objects that are currently being constructed.
848 llvm::DenseMap<ObjectUnderConstruction, ConstructionPhase>
849 ObjectsUnderConstruction;
850
851 /// Current heap allocations, along with the location where each was
852 /// allocated. We use std::map here because we need stable addresses
853 /// for the stored APValues.
854 std::map<DynamicAllocLValue, DynAlloc, DynAllocOrder> HeapAllocs;
855
856 /// The number of heap allocations performed so far in this evaluation.
857 unsigned NumHeapAllocs = 0;
858
859 struct EvaluatingConstructorRAII {
860 EvalInfo &EI;
861 ObjectUnderConstruction Object;
862 bool DidInsert;
863 EvaluatingConstructorRAII(EvalInfo &EI, ObjectUnderConstruction Object,
864 bool HasBases)
865 : EI(EI), Object(Object) {
866 DidInsert =
867 EI.ObjectsUnderConstruction
868 .insert(KV: {Object, HasBases ? ConstructionPhase::Bases
869 : ConstructionPhase::AfterBases})
870 .second;
871 }
872 void finishedConstructingBases() {
873 EI.ObjectsUnderConstruction[Object] = ConstructionPhase::AfterBases;
874 }
875 void finishedConstructingFields() {
876 EI.ObjectsUnderConstruction[Object] = ConstructionPhase::AfterFields;
877 }
878 ~EvaluatingConstructorRAII() {
879 if (DidInsert) EI.ObjectsUnderConstruction.erase(Val: Object);
880 }
881 };
882
883 struct EvaluatingDestructorRAII {
884 EvalInfo &EI;
885 ObjectUnderConstruction Object;
886 bool DidInsert;
887 EvaluatingDestructorRAII(EvalInfo &EI, ObjectUnderConstruction Object)
888 : EI(EI), Object(Object) {
889 DidInsert = EI.ObjectsUnderConstruction
890 .insert(KV: {Object, ConstructionPhase::Destroying})
891 .second;
892 }
893 void startedDestroyingBases() {
894 EI.ObjectsUnderConstruction[Object] =
895 ConstructionPhase::DestroyingBases;
896 }
897 ~EvaluatingDestructorRAII() {
898 if (DidInsert)
899 EI.ObjectsUnderConstruction.erase(Val: Object);
900 }
901 };
902
903 ConstructionPhase
904 isEvaluatingCtorDtor(APValue::LValueBase Base,
905 ArrayRef<APValue::LValuePathEntry> Path) {
906 return ObjectsUnderConstruction.lookup(Val: {.Base: Base, .Path: Path});
907 }
908
909 /// If we're currently speculatively evaluating, the outermost call stack
910 /// depth at which we can mutate state, otherwise 0.
911 unsigned SpeculativeEvaluationDepth = 0;
912
913 /// The current array initialization index, if we're performing array
914 /// initialization.
915 uint64_t ArrayInitIndex = -1;
916
917 EvalInfo(const ASTContext &C, Expr::EvalStatus &S, EvaluationMode Mode)
918 : State(const_cast<ASTContext &>(C), S), CurrentCall(nullptr),
919 CallStackDepth(0), NextCallIndex(1),
920 StepsLeft(C.getLangOpts().ConstexprStepLimit),
921 EnableNewConstInterp(C.getLangOpts().EnableNewConstInterp),
922 BottomFrame(*this, SourceLocation(), /*Callee=*/nullptr,
923 /*This=*/nullptr,
924 /*CallExpr=*/nullptr, CallRef()),
925 EvaluatingDecl((const ValueDecl *)nullptr),
926 EvaluatingDeclValue(nullptr) {
927 EvalMode = Mode;
928 }
929
930 ~EvalInfo() {
931 discardCleanups();
932 }
933
934 void setEvaluatingDecl(APValue::LValueBase Base, APValue &Value,
935 EvaluatingDeclKind EDK = EvaluatingDeclKind::Ctor) {
936 EvaluatingDecl = Base;
937 IsEvaluatingDecl = EDK;
938 EvaluatingDeclValue = &Value;
939 }
940
941 bool CheckCallLimit(SourceLocation Loc) {
942 // Don't perform any constexpr calls (other than the call we're checking)
943 // when checking a potential constant expression.
944 if (checkingPotentialConstantExpression() && CallStackDepth > 1)
945 return false;
946 if (NextCallIndex == 0) {
947 // NextCallIndex has wrapped around.
948 FFDiag(Loc, DiagId: diag::note_constexpr_call_limit_exceeded);
949 return false;
950 }
951 if (CallStackDepth <= getLangOpts().ConstexprCallDepth)
952 return true;
953 FFDiag(Loc, DiagId: diag::note_constexpr_depth_limit_exceeded)
954 << getLangOpts().ConstexprCallDepth;
955 return false;
956 }
957
958 bool CheckArraySize(SourceLocation Loc, unsigned BitWidth,
959 uint64_t ElemCount, bool Diag) {
960 // FIXME: GH63562
961 // APValue stores array extents as unsigned,
962 // so anything that is greater that unsigned would overflow when
963 // constructing the array, we catch this here.
964 if (BitWidth > ConstantArrayType::getMaxSizeBits(Context: Ctx) ||
965 ElemCount > uint64_t(std::numeric_limits<unsigned>::max())) {
966 if (Diag)
967 FFDiag(Loc, DiagId: diag::note_constexpr_new_too_large) << ElemCount;
968 return false;
969 }
970
971 // FIXME: GH63562
972 // Arrays allocate an APValue per element.
973 // We use the number of constexpr steps as a proxy for the maximum size
974 // of arrays to avoid exhausting the system resources, as initialization
975 // of each element is likely to take some number of steps anyway.
976 uint64_t Limit = getLangOpts().ConstexprStepLimit;
977 if (Limit != 0 && ElemCount > Limit) {
978 if (Diag)
979 FFDiag(Loc, DiagId: diag::note_constexpr_new_exceeds_limits)
980 << ElemCount << Limit;
981 return false;
982 }
983 return true;
984 }
985
986 std::pair<CallStackFrame *, unsigned>
987 getCallFrameAndDepth(unsigned CallIndex) {
988 assert(CallIndex && "no call index in getCallFrameAndDepth");
989 // We will eventually hit BottomFrame, which has Index 1, so Frame can't
990 // be null in this loop.
991 unsigned Depth = CallStackDepth;
992 CallStackFrame *Frame = CurrentCall;
993 while (Frame->Index > CallIndex) {
994 Frame = Frame->Caller;
995 --Depth;
996 }
997 if (Frame->Index == CallIndex)
998 return {Frame, Depth};
999 return {nullptr, 0};
1000 }
1001
1002 bool nextStep(const Stmt *S) {
1003 if (getLangOpts().ConstexprStepLimit == 0)
1004 return true;
1005
1006 if (!StepsLeft) {
1007 FFDiag(Loc: S->getBeginLoc(), DiagId: diag::note_constexpr_step_limit_exceeded);
1008 return false;
1009 }
1010 --StepsLeft;
1011 return true;
1012 }
1013
1014 APValue *createHeapAlloc(const Expr *E, QualType T, LValue &LV);
1015
1016 std::optional<DynAlloc *> lookupDynamicAlloc(DynamicAllocLValue DA) {
1017 std::optional<DynAlloc *> Result;
1018 auto It = HeapAllocs.find(x: DA);
1019 if (It != HeapAllocs.end())
1020 Result = &It->second;
1021 return Result;
1022 }
1023
1024 /// Get the allocated storage for the given parameter of the given call.
1025 APValue *getParamSlot(CallRef Call, const ParmVarDecl *PVD) {
1026 CallStackFrame *Frame = getCallFrameAndDepth(CallIndex: Call.CallIndex).first;
1027 return Frame ? Frame->getTemporary(Key: Call.getOrigParam(PVD), Version: Call.Version)
1028 : nullptr;
1029 }
1030
1031 /// Information about a stack frame for std::allocator<T>::[de]allocate.
1032 struct StdAllocatorCaller {
1033 unsigned FrameIndex;
1034 QualType ElemType;
1035 const Expr *Call;
1036 explicit operator bool() const { return FrameIndex != 0; };
1037 };
1038
1039 StdAllocatorCaller getStdAllocatorCaller(StringRef FnName) const {
1040 for (const CallStackFrame *Call = CurrentCall; Call != &BottomFrame;
1041 Call = Call->Caller) {
1042 const auto *MD = dyn_cast_or_null<CXXMethodDecl>(Val: Call->Callee);
1043 if (!MD)
1044 continue;
1045 const IdentifierInfo *FnII = MD->getIdentifier();
1046 if (!FnII || !FnII->isStr(Str: FnName))
1047 continue;
1048
1049 const auto *CTSD =
1050 dyn_cast<ClassTemplateSpecializationDecl>(Val: MD->getParent());
1051 if (!CTSD)
1052 continue;
1053
1054 const IdentifierInfo *ClassII = CTSD->getIdentifier();
1055 const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
1056 if (CTSD->isInStdNamespace() && ClassII &&
1057 ClassII->isStr(Str: "allocator") && TAL.size() >= 1 &&
1058 TAL[0].getKind() == TemplateArgument::Type)
1059 return {.FrameIndex: Call->Index, .ElemType: TAL[0].getAsType(), .Call: Call->CallExpr};
1060 }
1061
1062 return {};
1063 }
1064
1065 void performLifetimeExtension() {
1066 // Disable the cleanups for lifetime-extended temporaries.
1067 llvm::erase_if(C&: CleanupStack, P: [](Cleanup &C) {
1068 return !C.isDestroyedAtEndOf(K: ScopeKind::FullExpression);
1069 });
1070 }
1071
1072 /// Throw away any remaining cleanups at the end of evaluation. If any
1073 /// cleanups would have had a side-effect, note that as an unmodeled
1074 /// side-effect and return false. Otherwise, return true.
1075 bool discardCleanups() {
1076 for (Cleanup &C : CleanupStack) {
1077 if (C.hasSideEffect() && !noteSideEffect()) {
1078 CleanupStack.clear();
1079 return false;
1080 }
1081 }
1082 CleanupStack.clear();
1083 return true;
1084 }
1085
1086 private:
1087 const interp::Frame *getCurrentFrame() override { return CurrentCall; }
1088 const interp::Frame *getBottomFrame() const override { return &BottomFrame; }
1089
1090 unsigned getCallStackDepth() override { return CallStackDepth; }
1091 bool stepsLeft() const override { return StepsLeft > 0; }
1092
1093 public:
1094 /// Notes that we failed to evaluate an expression that other expressions
1095 /// directly depend on, and determine if we should keep evaluating. This
1096 /// should only be called if we actually intend to keep evaluating.
1097 ///
1098 /// Call noteSideEffect() instead if we may be able to ignore the value that
1099 /// we failed to evaluate, e.g. if we failed to evaluate Foo() in:
1100 ///
1101 /// (Foo(), 1) // use noteSideEffect
1102 /// (Foo() || true) // use noteSideEffect
1103 /// Foo() + 1 // use noteFailure
1104 [[nodiscard]] bool noteFailure() {
1105 // Failure when evaluating some expression often means there is some
1106 // subexpression whose evaluation was skipped. Therefore, (because we
1107 // don't track whether we skipped an expression when unwinding after an
1108 // evaluation failure) every evaluation failure that bubbles up from a
1109 // subexpression implies that a side-effect has potentially happened. We
1110 // skip setting the HasSideEffects flag to true until we decide to
1111 // continue evaluating after that point, which happens here.
1112 bool KeepGoing = keepEvaluatingAfterFailure();
1113 EvalStatus.HasSideEffects |= KeepGoing;
1114 return KeepGoing;
1115 }
1116
1117 class ArrayInitLoopIndex {
1118 EvalInfo &Info;
1119 uint64_t OuterIndex;
1120
1121 public:
1122 ArrayInitLoopIndex(EvalInfo &Info)
1123 : Info(Info), OuterIndex(Info.ArrayInitIndex) {
1124 Info.ArrayInitIndex = 0;
1125 }
1126 ~ArrayInitLoopIndex() { Info.ArrayInitIndex = OuterIndex; }
1127
1128 operator uint64_t&() { return Info.ArrayInitIndex; }
1129 };
1130 };
1131
1132 /// Object used to treat all foldable expressions as constant expressions.
1133 struct FoldConstant {
1134 EvalInfo &Info;
1135 bool Enabled;
1136 bool HadNoPriorDiags;
1137 EvaluationMode OldMode;
1138
1139 explicit FoldConstant(EvalInfo &Info, bool Enabled)
1140 : Info(Info),
1141 Enabled(Enabled),
1142 HadNoPriorDiags(Info.EvalStatus.Diag &&
1143 Info.EvalStatus.Diag->empty() &&
1144 !Info.EvalStatus.HasSideEffects),
1145 OldMode(Info.EvalMode) {
1146 if (Enabled)
1147 Info.EvalMode = EvaluationMode::ConstantFold;
1148 }
1149 void keepDiagnostics() { Enabled = false; }
1150 ~FoldConstant() {
1151 if (Enabled && HadNoPriorDiags && !Info.EvalStatus.Diag->empty() &&
1152 !Info.EvalStatus.HasSideEffects)
1153 Info.EvalStatus.Diag->clear();
1154 Info.EvalMode = OldMode;
1155 }
1156 };
1157
1158 /// RAII object used to set the current evaluation mode to ignore
1159 /// side-effects.
1160 struct IgnoreSideEffectsRAII {
1161 EvalInfo &Info;
1162 EvaluationMode OldMode;
1163 explicit IgnoreSideEffectsRAII(EvalInfo &Info)
1164 : Info(Info), OldMode(Info.EvalMode) {
1165 Info.EvalMode = EvaluationMode::IgnoreSideEffects;
1166 }
1167
1168 ~IgnoreSideEffectsRAII() { Info.EvalMode = OldMode; }
1169 };
1170
1171 /// RAII object used to optionally suppress diagnostics and side-effects from
1172 /// a speculative evaluation.
1173 class SpeculativeEvaluationRAII {
1174 EvalInfo *Info = nullptr;
1175 Expr::EvalStatus OldStatus;
1176 unsigned OldSpeculativeEvaluationDepth = 0;
1177
1178 void moveFromAndCancel(SpeculativeEvaluationRAII &&Other) {
1179 Info = Other.Info;
1180 OldStatus = Other.OldStatus;
1181 OldSpeculativeEvaluationDepth = Other.OldSpeculativeEvaluationDepth;
1182 Other.Info = nullptr;
1183 }
1184
1185 void maybeRestoreState() {
1186 if (!Info)
1187 return;
1188
1189 Info->EvalStatus = OldStatus;
1190 Info->SpeculativeEvaluationDepth = OldSpeculativeEvaluationDepth;
1191 }
1192
1193 public:
1194 SpeculativeEvaluationRAII() = default;
1195
1196 SpeculativeEvaluationRAII(
1197 EvalInfo &Info, SmallVectorImpl<PartialDiagnosticAt> *NewDiag = nullptr)
1198 : Info(&Info), OldStatus(Info.EvalStatus),
1199 OldSpeculativeEvaluationDepth(Info.SpeculativeEvaluationDepth) {
1200 Info.EvalStatus.Diag = NewDiag;
1201 Info.SpeculativeEvaluationDepth = Info.CallStackDepth + 1;
1202 }
1203
1204 SpeculativeEvaluationRAII(const SpeculativeEvaluationRAII &Other) = delete;
1205 SpeculativeEvaluationRAII(SpeculativeEvaluationRAII &&Other) {
1206 moveFromAndCancel(Other: std::move(Other));
1207 }
1208
1209 SpeculativeEvaluationRAII &operator=(SpeculativeEvaluationRAII &&Other) {
1210 maybeRestoreState();
1211 moveFromAndCancel(Other: std::move(Other));
1212 return *this;
1213 }
1214
1215 ~SpeculativeEvaluationRAII() { maybeRestoreState(); }
1216 };
1217
1218 /// RAII object wrapping a full-expression or block scope, and handling
1219 /// the ending of the lifetime of temporaries created within it.
1220 template<ScopeKind Kind>
1221 class ScopeRAII {
1222 EvalInfo &Info;
1223 unsigned OldStackSize;
1224 public:
1225 ScopeRAII(EvalInfo &Info)
1226 : Info(Info), OldStackSize(Info.CleanupStack.size()) {
1227 // Push a new temporary version. This is needed to distinguish between
1228 // temporaries created in different iterations of a loop.
1229 Info.CurrentCall->pushTempVersion();
1230 }
1231 bool destroy(bool RunDestructors = true) {
1232 bool OK = cleanup(Info, RunDestructors, OldStackSize);
1233 OldStackSize = std::numeric_limits<unsigned>::max();
1234 return OK;
1235 }
1236 ~ScopeRAII() {
1237 if (OldStackSize != std::numeric_limits<unsigned>::max())
1238 destroy(RunDestructors: false);
1239 // Body moved to a static method to encourage the compiler to inline away
1240 // instances of this class.
1241 Info.CurrentCall->popTempVersion();
1242 }
1243 private:
1244 static bool cleanup(EvalInfo &Info, bool RunDestructors,
1245 unsigned OldStackSize) {
1246 assert(OldStackSize <= Info.CleanupStack.size() &&
1247 "running cleanups out of order?");
1248
1249 // Run all cleanups for a block scope, and non-lifetime-extended cleanups
1250 // for a full-expression scope.
1251 bool Success = true;
1252 for (unsigned I = Info.CleanupStack.size(); I > OldStackSize; --I) {
1253 if (Info.CleanupStack[I - 1].isDestroyedAtEndOf(K: Kind)) {
1254 if (!Info.CleanupStack[I - 1].endLifetime(Info, RunDestructors)) {
1255 Success = false;
1256 break;
1257 }
1258 }
1259 }
1260
1261 // Compact any retained cleanups.
1262 auto NewEnd = Info.CleanupStack.begin() + OldStackSize;
1263 if (Kind != ScopeKind::Block)
1264 NewEnd =
1265 std::remove_if(NewEnd, Info.CleanupStack.end(), [](Cleanup &C) {
1266 return C.isDestroyedAtEndOf(K: Kind);
1267 });
1268 Info.CleanupStack.erase(CS: NewEnd, CE: Info.CleanupStack.end());
1269 return Success;
1270 }
1271 };
1272 typedef ScopeRAII<ScopeKind::Block> BlockScopeRAII;
1273 typedef ScopeRAII<ScopeKind::FullExpression> FullExpressionRAII;
1274 typedef ScopeRAII<ScopeKind::Call> CallScopeRAII;
1275}
1276
1277bool SubobjectDesignator::checkSubobject(EvalInfo &Info, const Expr *E,
1278 CheckSubobjectKind CSK) {
1279 if (Invalid)
1280 return false;
1281 if (isOnePastTheEnd()) {
1282 Info.CCEDiag(E, DiagId: diag::note_constexpr_past_end_subobject)
1283 << CSK;
1284 setInvalid();
1285 return false;
1286 }
1287 // Note, we do not diagnose if isMostDerivedAnUnsizedArray(), because there
1288 // must actually be at least one array element; even a VLA cannot have a
1289 // bound of zero. And if our index is nonzero, we already had a CCEDiag.
1290 return true;
1291}
1292
1293void SubobjectDesignator::diagnoseUnsizedArrayPointerArithmetic(EvalInfo &Info,
1294 const Expr *E) {
1295 Info.CCEDiag(E, DiagId: diag::note_constexpr_unsized_array_indexed);
1296 // Do not set the designator as invalid: we can represent this situation,
1297 // and correct handling of __builtin_object_size requires us to do so.
1298}
1299
1300void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info,
1301 const Expr *E,
1302 const APSInt &N) {
1303 // If we're complaining, we must be able to statically determine the size of
1304 // the most derived array.
1305 if (MostDerivedPathLength == Entries.size() && MostDerivedIsArrayElement)
1306 Info.CCEDiag(E, DiagId: diag::note_constexpr_array_index)
1307 << N << /*array*/ 0
1308 << static_cast<unsigned>(getMostDerivedArraySize());
1309 else
1310 Info.CCEDiag(E, DiagId: diag::note_constexpr_array_index)
1311 << N << /*non-array*/ 1;
1312 setInvalid();
1313}
1314
1315CallStackFrame::CallStackFrame(EvalInfo &Info, SourceRange CallRange,
1316 const FunctionDecl *Callee, const LValue *This,
1317 const Expr *CallExpr, CallRef Call)
1318 : Info(Info), Caller(Info.CurrentCall), Callee(Callee), This(This),
1319 CallExpr(CallExpr), Arguments(Call), CallRange(CallRange),
1320 Index(Info.NextCallIndex++) {
1321 Info.CurrentCall = this;
1322 ++Info.CallStackDepth;
1323}
1324
1325CallStackFrame::~CallStackFrame() {
1326 assert(Info.CurrentCall == this && "calls retired out of order");
1327 --Info.CallStackDepth;
1328 Info.CurrentCall = Caller;
1329}
1330
1331static bool isRead(AccessKinds AK) {
1332 return AK == AK_Read || AK == AK_ReadObjectRepresentation ||
1333 AK == AK_IsWithinLifetime || AK == AK_Dereference;
1334}
1335
1336static bool isModification(AccessKinds AK) {
1337 switch (AK) {
1338 case AK_Read:
1339 case AK_ReadObjectRepresentation:
1340 case AK_MemberCall:
1341 case AK_DynamicCast:
1342 case AK_TypeId:
1343 case AK_IsWithinLifetime:
1344 case AK_Dereference:
1345 return false;
1346 case AK_Assign:
1347 case AK_Increment:
1348 case AK_Decrement:
1349 case AK_Construct:
1350 case AK_Destroy:
1351 return true;
1352 }
1353 llvm_unreachable("unknown access kind");
1354}
1355
1356static bool isAnyAccess(AccessKinds AK) {
1357 return isRead(AK) || isModification(AK);
1358}
1359
1360/// Is this an access per the C++ definition?
1361static bool isFormalAccess(AccessKinds AK) {
1362 return isAnyAccess(AK) && AK != AK_Construct && AK != AK_Destroy &&
1363 AK != AK_IsWithinLifetime && AK != AK_Dereference;
1364}
1365
1366/// Is this kind of access valid on an indeterminate object value?
1367static bool isValidIndeterminateAccess(AccessKinds AK) {
1368 switch (AK) {
1369 case AK_Read:
1370 case AK_Increment:
1371 case AK_Decrement:
1372 case AK_Dereference:
1373 // These need the object's value.
1374 return false;
1375
1376 case AK_IsWithinLifetime:
1377 case AK_ReadObjectRepresentation:
1378 case AK_Assign:
1379 case AK_Construct:
1380 case AK_Destroy:
1381 // Construction and destruction don't need the value.
1382 return true;
1383
1384 case AK_MemberCall:
1385 case AK_DynamicCast:
1386 case AK_TypeId:
1387 // These aren't really meaningful on scalars.
1388 return true;
1389 }
1390 llvm_unreachable("unknown access kind");
1391}
1392
1393namespace {
1394 struct ComplexValue {
1395 private:
1396 bool IsInt;
1397
1398 public:
1399 APSInt IntReal, IntImag;
1400 APFloat FloatReal, FloatImag;
1401
1402 ComplexValue() : FloatReal(APFloat::Bogus()), FloatImag(APFloat::Bogus()) {}
1403
1404 void makeComplexFloat() { IsInt = false; }
1405 bool isComplexFloat() const { return !IsInt; }
1406 APFloat &getComplexFloatReal() { return FloatReal; }
1407 APFloat &getComplexFloatImag() { return FloatImag; }
1408
1409 void makeComplexInt() { IsInt = true; }
1410 bool isComplexInt() const { return IsInt; }
1411 APSInt &getComplexIntReal() { return IntReal; }
1412 APSInt &getComplexIntImag() { return IntImag; }
1413
1414 void moveInto(APValue &v) const {
1415 if (isComplexFloat())
1416 v = APValue(FloatReal, FloatImag);
1417 else
1418 v = APValue(IntReal, IntImag);
1419 }
1420 void setFrom(const APValue &v) {
1421 assert(v.isComplexFloat() || v.isComplexInt());
1422 if (v.isComplexFloat()) {
1423 makeComplexFloat();
1424 FloatReal = v.getComplexFloatReal();
1425 FloatImag = v.getComplexFloatImag();
1426 } else {
1427 makeComplexInt();
1428 IntReal = v.getComplexIntReal();
1429 IntImag = v.getComplexIntImag();
1430 }
1431 }
1432 };
1433
1434 struct LValue {
1435 APValue::LValueBase Base;
1436 CharUnits Offset;
1437 SubobjectDesignator Designator;
1438 bool IsNullPtr : 1;
1439 bool InvalidBase : 1;
1440 // P2280R4 track if we have an unknown reference or pointer.
1441 bool AllowConstexprUnknown = false;
1442
1443 const APValue::LValueBase getLValueBase() const { return Base; }
1444 bool allowConstexprUnknown() const { return AllowConstexprUnknown; }
1445 CharUnits &getLValueOffset() { return Offset; }
1446 const CharUnits &getLValueOffset() const { return Offset; }
1447 SubobjectDesignator &getLValueDesignator() { return Designator; }
1448 const SubobjectDesignator &getLValueDesignator() const { return Designator;}
1449 bool isNullPointer() const { return IsNullPtr;}
1450
1451 unsigned getLValueCallIndex() const { return Base.getCallIndex(); }
1452 unsigned getLValueVersion() const { return Base.getVersion(); }
1453
1454 void moveInto(APValue &V) const {
1455 if (Designator.Invalid)
1456 V = APValue(Base, Offset, APValue::NoLValuePath(), IsNullPtr);
1457 else {
1458 assert(!InvalidBase && "APValues can't handle invalid LValue bases");
1459 V = APValue(Base, Offset, Designator.Entries,
1460 Designator.IsOnePastTheEnd, IsNullPtr);
1461 }
1462 if (AllowConstexprUnknown)
1463 V.setConstexprUnknown();
1464 }
1465 void setFrom(const ASTContext &Ctx, const APValue &V) {
1466 assert(V.isLValue() && "Setting LValue from a non-LValue?");
1467 Base = V.getLValueBase();
1468 Offset = V.getLValueOffset();
1469 InvalidBase = false;
1470 Designator = SubobjectDesignator(Ctx, V);
1471 IsNullPtr = V.isNullPointer();
1472 AllowConstexprUnknown = V.allowConstexprUnknown();
1473 }
1474
1475 void set(APValue::LValueBase B, bool BInvalid = false) {
1476#ifndef NDEBUG
1477 // We only allow a few types of invalid bases. Enforce that here.
1478 if (BInvalid) {
1479 const auto *E = B.get<const Expr *>();
1480 assert((isa<MemberExpr>(E) || tryUnwrapAllocSizeCall(E)) &&
1481 "Unexpected type of invalid base");
1482 }
1483#endif
1484
1485 Base = B;
1486 Offset = CharUnits::fromQuantity(Quantity: 0);
1487 InvalidBase = BInvalid;
1488 Designator = SubobjectDesignator(getType(B));
1489 IsNullPtr = false;
1490 AllowConstexprUnknown = false;
1491 }
1492
1493 void setNull(ASTContext &Ctx, QualType PointerTy) {
1494 Base = (const ValueDecl *)nullptr;
1495 Offset =
1496 CharUnits::fromQuantity(Quantity: Ctx.getTargetNullPointerValue(QT: PointerTy));
1497 InvalidBase = false;
1498 Designator = SubobjectDesignator(PointerTy->getPointeeType());
1499 IsNullPtr = true;
1500 AllowConstexprUnknown = false;
1501 }
1502
1503 void setInvalid(APValue::LValueBase B, unsigned I = 0) {
1504 set(B, BInvalid: true);
1505 }
1506
1507 std::string toString(ASTContext &Ctx, QualType T) const {
1508 APValue Printable;
1509 moveInto(V&: Printable);
1510 return Printable.getAsString(Ctx, Ty: T);
1511 }
1512
1513 private:
1514 // Check that this LValue is not based on a null pointer. If it is, produce
1515 // a diagnostic and mark the designator as invalid.
1516 template <typename GenDiagType>
1517 bool checkNullPointerDiagnosingWith(const GenDiagType &GenDiag) {
1518 if (Designator.Invalid)
1519 return false;
1520 if (IsNullPtr) {
1521 GenDiag();
1522 Designator.setInvalid();
1523 return false;
1524 }
1525 return true;
1526 }
1527
1528 public:
1529 bool checkNullPointer(EvalInfo &Info, const Expr *E,
1530 CheckSubobjectKind CSK) {
1531 return checkNullPointerDiagnosingWith(GenDiag: [&Info, E, CSK] {
1532 Info.CCEDiag(E, DiagId: diag::note_constexpr_null_subobject) << CSK;
1533 });
1534 }
1535
1536 bool checkNullPointerForFoldAccess(EvalInfo &Info, const Expr *E,
1537 AccessKinds AK) {
1538 return checkNullPointerDiagnosingWith(GenDiag: [&Info, E, AK] {
1539 if (AK == AccessKinds::AK_Dereference)
1540 Info.FFDiag(E, DiagId: diag::note_constexpr_dereferencing_null);
1541 else
1542 Info.FFDiag(E, DiagId: diag::note_constexpr_access_null) << AK;
1543 });
1544 }
1545
1546 // Check this LValue refers to an object. If not, set the designator to be
1547 // invalid and emit a diagnostic.
1548 bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK) {
1549 return (CSK == CSK_ArrayToPointer || checkNullPointer(Info, E, CSK)) &&
1550 Designator.checkSubobject(Info, E, CSK);
1551 }
1552
1553 void addDecl(EvalInfo &Info, const Expr *E,
1554 const Decl *D, bool Virtual = false) {
1555 if (checkSubobject(Info, E, CSK: isa<FieldDecl>(Val: D) ? CSK_Field : CSK_Base))
1556 Designator.addDeclUnchecked(D, Virtual);
1557 }
1558 void addUnsizedArray(EvalInfo &Info, const Expr *E, QualType ElemTy) {
1559 if (!Designator.Entries.empty()) {
1560 Info.CCEDiag(E, DiagId: diag::note_constexpr_unsupported_unsized_array);
1561 Designator.setInvalid();
1562 return;
1563 }
1564 if (checkSubobject(Info, E, CSK: CSK_ArrayToPointer)) {
1565 assert(getType(Base).getNonReferenceType()->isPointerType() ||
1566 getType(Base).getNonReferenceType()->isArrayType());
1567 Designator.FirstEntryIsAnUnsizedArray = true;
1568 Designator.addUnsizedArrayUnchecked(ElemTy);
1569 }
1570 }
1571 void addArray(EvalInfo &Info, const Expr *E, const ConstantArrayType *CAT) {
1572 if (checkSubobject(Info, E, CSK: CSK_ArrayToPointer))
1573 Designator.addArrayUnchecked(CAT);
1574 }
1575 void addComplex(EvalInfo &Info, const Expr *E, QualType EltTy, bool Imag) {
1576 if (checkSubobject(Info, E, CSK: Imag ? CSK_Imag : CSK_Real))
1577 Designator.addComplexUnchecked(EltTy, Imag);
1578 }
1579 void addVectorElement(EvalInfo &Info, const Expr *E, QualType EltTy,
1580 uint64_t Size, uint64_t Idx) {
1581 if (checkSubobject(Info, E, CSK: CSK_VectorElement))
1582 Designator.addVectorElementUnchecked(EltTy, Size, Idx);
1583 }
1584 void clearIsNullPointer() {
1585 IsNullPtr = false;
1586 }
1587 void adjustOffsetAndIndex(EvalInfo &Info, const Expr *E,
1588 const APSInt &Index, CharUnits ElementSize) {
1589 // An index of 0 has no effect. (In C, adding 0 to a null pointer is UB,
1590 // but we're not required to diagnose it and it's valid in C++.)
1591 if (!Index)
1592 return;
1593
1594 // Compute the new offset in the appropriate width, wrapping at 64 bits.
1595 // FIXME: When compiling for a 32-bit target, we should use 32-bit
1596 // offsets.
1597 uint64_t Offset64 = Offset.getQuantity();
1598 uint64_t ElemSize64 = ElementSize.getQuantity();
1599 uint64_t Index64 = Index.extOrTrunc(width: 64).getZExtValue();
1600 Offset = CharUnits::fromQuantity(Quantity: Offset64 + ElemSize64 * Index64);
1601
1602 if (checkNullPointer(Info, E, CSK: CSK_ArrayIndex))
1603 Designator.adjustIndex(Info, E, N: Index, LV: *this);
1604 clearIsNullPointer();
1605 }
1606 void adjustOffset(CharUnits N) {
1607 Offset += N;
1608 if (N.getQuantity())
1609 clearIsNullPointer();
1610 }
1611 };
1612
1613 struct MemberPtr {
1614 MemberPtr() {}
1615 explicit MemberPtr(const ValueDecl *Decl)
1616 : DeclAndIsDerivedMember(Decl, false) {}
1617
1618 /// The member or (direct or indirect) field referred to by this member
1619 /// pointer, or 0 if this is a null member pointer.
1620 const ValueDecl *getDecl() const {
1621 return DeclAndIsDerivedMember.getPointer();
1622 }
1623 /// Is this actually a member of some type derived from the relevant class?
1624 bool isDerivedMember() const {
1625 return DeclAndIsDerivedMember.getInt();
1626 }
1627 /// Get the class which the declaration actually lives in.
1628 const CXXRecordDecl *getContainingRecord() const {
1629 return cast<CXXRecordDecl>(
1630 Val: DeclAndIsDerivedMember.getPointer()->getDeclContext());
1631 }
1632
1633 void moveInto(APValue &V) const {
1634 V = APValue(getDecl(), isDerivedMember(), Path);
1635 }
1636 void setFrom(const APValue &V) {
1637 assert(V.isMemberPointer());
1638 DeclAndIsDerivedMember.setPointer(V.getMemberPointerDecl());
1639 DeclAndIsDerivedMember.setInt(V.isMemberPointerToDerivedMember());
1640 Path.clear();
1641 llvm::append_range(C&: Path, R: V.getMemberPointerPath());
1642 }
1643
1644 /// DeclAndIsDerivedMember - The member declaration, and a flag indicating
1645 /// whether the member is a member of some class derived from the class type
1646 /// of the member pointer.
1647 llvm::PointerIntPair<const ValueDecl*, 1, bool> DeclAndIsDerivedMember;
1648 /// Path - The path of base/derived classes from the member declaration's
1649 /// class (exclusive) to the class type of the member pointer (inclusive).
1650 SmallVector<const CXXRecordDecl*, 4> Path;
1651
1652 /// Perform a cast towards the class of the Decl (either up or down the
1653 /// hierarchy).
1654 bool castBack(const CXXRecordDecl *Class) {
1655 assert(!Path.empty());
1656 const CXXRecordDecl *Expected;
1657 if (Path.size() >= 2)
1658 Expected = Path[Path.size() - 2];
1659 else
1660 Expected = getContainingRecord();
1661 if (Expected->getCanonicalDecl() != Class->getCanonicalDecl()) {
1662 // C++11 [expr.static.cast]p12: In a conversion from (D::*) to (B::*),
1663 // if B does not contain the original member and is not a base or
1664 // derived class of the class containing the original member, the result
1665 // of the cast is undefined.
1666 // C++11 [conv.mem]p2 does not cover this case for a cast from (B::*) to
1667 // (D::*). We consider that to be a language defect.
1668 return false;
1669 }
1670 Path.pop_back();
1671 return true;
1672 }
1673 /// Perform a base-to-derived member pointer cast.
1674 bool castToDerived(const CXXRecordDecl *Derived) {
1675 if (!getDecl())
1676 return true;
1677 if (!isDerivedMember()) {
1678 Path.push_back(Elt: Derived);
1679 return true;
1680 }
1681 if (!castBack(Class: Derived))
1682 return false;
1683 if (Path.empty())
1684 DeclAndIsDerivedMember.setInt(false);
1685 return true;
1686 }
1687 /// Perform a derived-to-base member pointer cast.
1688 bool castToBase(const CXXRecordDecl *Base) {
1689 if (!getDecl())
1690 return true;
1691 if (Path.empty())
1692 DeclAndIsDerivedMember.setInt(true);
1693 if (isDerivedMember()) {
1694 Path.push_back(Elt: Base);
1695 return true;
1696 }
1697 return castBack(Class: Base);
1698 }
1699 };
1700
1701 /// Compare two member pointers, which are assumed to be of the same type.
1702 static bool operator==(const MemberPtr &LHS, const MemberPtr &RHS) {
1703 if (!LHS.getDecl() || !RHS.getDecl())
1704 return !LHS.getDecl() && !RHS.getDecl();
1705 if (LHS.getDecl()->getCanonicalDecl() != RHS.getDecl()->getCanonicalDecl())
1706 return false;
1707 return LHS.Path == RHS.Path;
1708 }
1709}
1710
1711void SubobjectDesignator::adjustIndex(EvalInfo &Info, const Expr *E, APSInt N,
1712 const LValue &LV) {
1713 if (Invalid || !N)
1714 return;
1715 uint64_t TruncatedN = N.extOrTrunc(width: 64).getZExtValue();
1716 if (isMostDerivedAnUnsizedArray()) {
1717 diagnoseUnsizedArrayPointerArithmetic(Info, E);
1718 // Can't verify -- trust that the user is doing the right thing (or if
1719 // not, trust that the caller will catch the bad behavior).
1720 // FIXME: Should we reject if this overflows, at least?
1721 Entries.back() =
1722 PathEntry::ArrayIndex(Index: Entries.back().getAsArrayIndex() + TruncatedN);
1723 return;
1724 }
1725
1726 // [expr.add]p4: For the purposes of these operators, a pointer to a
1727 // nonarray object behaves the same as a pointer to the first element of
1728 // an array of length one with the type of the object as its element type.
1729 bool IsArray =
1730 MostDerivedPathLength == Entries.size() && MostDerivedIsArrayElement;
1731 uint64_t ArrayIndex =
1732 IsArray ? Entries.back().getAsArrayIndex() : (uint64_t)IsOnePastTheEnd;
1733 uint64_t ArraySize = IsArray ? getMostDerivedArraySize() : (uint64_t)1;
1734
1735 if (N < -(int64_t)ArrayIndex || N > ArraySize - ArrayIndex) {
1736 if (!Info.checkingPotentialConstantExpression() ||
1737 !LV.AllowConstexprUnknown) {
1738 // Calculate the actual index in a wide enough type, so we can include
1739 // it in the note.
1740 N = N.extend(width: std::max<unsigned>(a: N.getBitWidth() + 1, b: 65));
1741 (llvm::APInt &)N += ArrayIndex;
1742 assert(N.ugt(ArraySize) && "bounds check failed for in-bounds index");
1743 diagnosePointerArithmetic(Info, E, N);
1744 }
1745 setInvalid();
1746 return;
1747 }
1748
1749 ArrayIndex += TruncatedN;
1750 assert(ArrayIndex <= ArraySize &&
1751 "bounds check succeeded for out-of-bounds index");
1752
1753 if (IsArray)
1754 Entries.back() = PathEntry::ArrayIndex(Index: ArrayIndex);
1755 else
1756 IsOnePastTheEnd = (ArrayIndex != 0);
1757}
1758
1759static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E);
1760static bool EvaluateInPlace(APValue &Result, EvalInfo &Info,
1761 const LValue &This, const Expr *E,
1762 bool AllowNonLiteralTypes = false);
1763static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info,
1764 bool InvalidBaseOK = false);
1765static bool EvaluatePointer(const Expr *E, LValue &Result, EvalInfo &Info,
1766 bool InvalidBaseOK = false);
1767static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
1768 EvalInfo &Info);
1769static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info);
1770static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info);
1771static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
1772 EvalInfo &Info);
1773static bool EvaluateFloat(const Expr *E, APFloat &Result, EvalInfo &Info);
1774static bool EvaluateComplex(const Expr *E, ComplexValue &Res, EvalInfo &Info);
1775static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result,
1776 EvalInfo &Info);
1777static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result);
1778static std::optional<uint64_t>
1779EvaluateBuiltinStrLen(const Expr *E, EvalInfo &Info,
1780 std::string *StringResult = nullptr);
1781
1782/// Evaluate an integer or fixed point expression into an APResult.
1783static bool EvaluateFixedPointOrInteger(const Expr *E, APFixedPoint &Result,
1784 EvalInfo &Info);
1785
1786/// Evaluate only a fixed point expression into an APResult.
1787static bool EvaluateFixedPoint(const Expr *E, APFixedPoint &Result,
1788 EvalInfo &Info);
1789
1790//===----------------------------------------------------------------------===//
1791// Misc utilities
1792//===----------------------------------------------------------------------===//
1793
1794/// Negate an APSInt in place, converting it to a signed form if necessary, and
1795/// preserving its value (by extending by up to one bit as needed).
1796static void negateAsSigned(APSInt &Int) {
1797 if (Int.isUnsigned() || Int.isMinSignedValue()) {
1798 Int = Int.extend(width: Int.getBitWidth() + 1);
1799 Int.setIsSigned(true);
1800 }
1801 Int = -Int;
1802}
1803
1804template<typename KeyT>
1805APValue &CallStackFrame::createTemporary(const KeyT *Key, QualType T,
1806 ScopeKind Scope, LValue &LV) {
1807 unsigned Version = getTempVersion();
1808 APValue::LValueBase Base(Key, Index, Version);
1809 LV.set(B: Base);
1810 return createLocal(Base, Key, T, Scope);
1811}
1812
1813/// Allocate storage for a parameter of a function call made in this frame.
1814APValue &CallStackFrame::createParam(CallRef Args, const ParmVarDecl *PVD,
1815 LValue &LV) {
1816 assert(Args.CallIndex == Index && "creating parameter in wrong frame");
1817 APValue::LValueBase Base(PVD, Index, Args.Version);
1818 LV.set(B: Base);
1819 // We always destroy parameters at the end of the call, even if we'd allow
1820 // them to live to the end of the full-expression at runtime, in order to
1821 // give portable results and match other compilers.
1822 return createLocal(Base, Key: PVD, T: PVD->getType(), Scope: ScopeKind::Call);
1823}
1824
1825APValue &CallStackFrame::createLocal(APValue::LValueBase Base, const void *Key,
1826 QualType T, ScopeKind Scope) {
1827 assert(Base.getCallIndex() == Index && "lvalue for wrong frame");
1828 unsigned Version = Base.getVersion();
1829 APValue &Result = Temporaries[MapKeyTy(Key, Version)];
1830 assert(Result.isAbsent() && "local created multiple times");
1831
1832 // If we're creating a local immediately in the operand of a speculative
1833 // evaluation, don't register a cleanup to be run outside the speculative
1834 // evaluation context, since we won't actually be able to initialize this
1835 // object.
1836 if (Index <= Info.SpeculativeEvaluationDepth) {
1837 if (T.isDestructedType())
1838 Info.noteSideEffect();
1839 } else {
1840 Info.CleanupStack.push_back(Elt: Cleanup(&Result, Base, T, Scope));
1841 }
1842 return Result;
1843}
1844
1845APValue *EvalInfo::createHeapAlloc(const Expr *E, QualType T, LValue &LV) {
1846 if (NumHeapAllocs > DynamicAllocLValue::getMaxIndex()) {
1847 FFDiag(E, DiagId: diag::note_constexpr_heap_alloc_limit_exceeded);
1848 return nullptr;
1849 }
1850
1851 DynamicAllocLValue DA(NumHeapAllocs++);
1852 LV.set(B: APValue::LValueBase::getDynamicAlloc(LV: DA, Type: T));
1853 auto Result = HeapAllocs.emplace(args: std::piecewise_construct,
1854 args: std::forward_as_tuple(args&: DA), args: std::tuple<>());
1855 assert(Result.second && "reused a heap alloc index?");
1856 Result.first->second.AllocExpr = E;
1857 return &Result.first->second.Value;
1858}
1859
1860/// Produce a string describing the given constexpr call.
1861void CallStackFrame::describe(raw_ostream &Out) const {
1862 bool IsMemberCall = false;
1863 bool ExplicitInstanceParam = false;
1864 if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: Callee)) {
1865 IsMemberCall = !isa<CXXConstructorDecl>(Val: MD) && !MD->isStatic();
1866 ExplicitInstanceParam = MD->isExplicitObjectMemberFunction();
1867 }
1868
1869 if (!IsMemberCall)
1870 Callee->getNameForDiagnostic(OS&: Out, Policy: Info.Ctx.getPrintingPolicy(),
1871 /*Qualified=*/false);
1872
1873 if (This && IsMemberCall) {
1874 if (const auto *MCE = dyn_cast_if_present<CXXMemberCallExpr>(Val: CallExpr)) {
1875 const Expr *Object = MCE->getImplicitObjectArgument();
1876 Object->printPretty(OS&: Out, /*Helper=*/nullptr, Policy: Info.Ctx.getPrintingPolicy(),
1877 /*Indentation=*/0);
1878 if (Object->getType()->isPointerType())
1879 Out << "->";
1880 else
1881 Out << ".";
1882 } else if (const auto *OCE =
1883 dyn_cast_if_present<CXXOperatorCallExpr>(Val: CallExpr)) {
1884 OCE->getArg(Arg: 0)->printPretty(OS&: Out, /*Helper=*/nullptr,
1885 Policy: Info.Ctx.getPrintingPolicy(),
1886 /*Indentation=*/0);
1887 Out << ".";
1888 } else {
1889 APValue Val;
1890 This->moveInto(V&: Val);
1891 Val.printPretty(
1892 OS&: Out, Ctx: Info.Ctx,
1893 Ty: Info.Ctx.getLValueReferenceType(T: This->Designator.MostDerivedType));
1894 Out << ".";
1895 }
1896 Callee->getNameForDiagnostic(OS&: Out, Policy: Info.Ctx.getPrintingPolicy(),
1897 /*Qualified=*/false);
1898 }
1899
1900 Out << '(';
1901
1902 llvm::ListSeparator Comma;
1903 for (const ParmVarDecl *Param :
1904 Callee->parameters().slice(N: ExplicitInstanceParam)) {
1905 Out << Comma;
1906 const APValue *V = Info.getParamSlot(Call: Arguments, PVD: Param);
1907 if (V)
1908 V->printPretty(OS&: Out, Ctx: Info.Ctx, Ty: Param->getType());
1909 else
1910 Out << "<...>";
1911 }
1912
1913 Out << ')';
1914}
1915
1916/// Evaluate an expression to see if it had side-effects, and discard its
1917/// result.
1918/// \return \c true if the caller should keep evaluating.
1919static bool EvaluateIgnoredValue(EvalInfo &Info, const Expr *E) {
1920 assert(!E->isValueDependent());
1921 APValue Scratch;
1922 if (!Evaluate(Result&: Scratch, Info, E))
1923 // We don't need the value, but we might have skipped a side effect here.
1924 return Info.noteSideEffect();
1925 return true;
1926}
1927
1928/// Should this call expression be treated as forming an opaque constant?
1929static bool IsOpaqueConstantCall(const CallExpr *E) {
1930 unsigned Builtin = E->getBuiltinCallee();
1931 return (Builtin == Builtin::BI__builtin___CFStringMakeConstantString ||
1932 Builtin == Builtin::BI__builtin___NSStringMakeConstantString ||
1933 Builtin == Builtin::BI__builtin_ptrauth_sign_constant ||
1934 Builtin == Builtin::BI__builtin_function_start);
1935}
1936
1937static bool IsOpaqueConstantCall(const LValue &LVal) {
1938 const auto *BaseExpr =
1939 llvm::dyn_cast_if_present<CallExpr>(Val: LVal.Base.dyn_cast<const Expr *>());
1940 return BaseExpr && IsOpaqueConstantCall(E: BaseExpr);
1941}
1942
1943static bool IsGlobalLValue(APValue::LValueBase B) {
1944 // C++11 [expr.const]p3 An address constant expression is a prvalue core
1945 // constant expression of pointer type that evaluates to...
1946
1947 // ... a null pointer value, or a prvalue core constant expression of type
1948 // std::nullptr_t.
1949 if (!B)
1950 return true;
1951
1952 if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
1953 // ... the address of an object with static storage duration,
1954 if (const VarDecl *VD = dyn_cast<VarDecl>(Val: D))
1955 return VD->hasGlobalStorage();
1956 if (isa<TemplateParamObjectDecl>(Val: D))
1957 return true;
1958 // ... the address of a function,
1959 // ... the address of a GUID [MS extension],
1960 // ... the address of an unnamed global constant
1961 return isa<FunctionDecl, MSGuidDecl, UnnamedGlobalConstantDecl>(Val: D);
1962 }
1963
1964 if (B.is<TypeInfoLValue>() || B.is<DynamicAllocLValue>())
1965 return true;
1966
1967 const Expr *E = B.get<const Expr*>();
1968 switch (E->getStmtClass()) {
1969 default:
1970 return false;
1971 case Expr::CompoundLiteralExprClass: {
1972 const CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(Val: E);
1973 return CLE->isFileScope() && CLE->isLValue();
1974 }
1975 case Expr::MaterializeTemporaryExprClass:
1976 // A materialized temporary might have been lifetime-extended to static
1977 // storage duration.
1978 return cast<MaterializeTemporaryExpr>(Val: E)->getStorageDuration() == SD_Static;
1979 // A string literal has static storage duration.
1980 case Expr::StringLiteralClass:
1981 case Expr::PredefinedExprClass:
1982 case Expr::ObjCStringLiteralClass:
1983 case Expr::ObjCEncodeExprClass:
1984 return true;
1985 case Expr::ObjCBoxedExprClass:
1986 return cast<ObjCBoxedExpr>(Val: E)->isExpressibleAsConstantInitializer();
1987 case Expr::CallExprClass:
1988 return IsOpaqueConstantCall(E: cast<CallExpr>(Val: E));
1989 // For GCC compatibility, &&label has static storage duration.
1990 case Expr::AddrLabelExprClass:
1991 return true;
1992 // A Block literal expression may be used as the initialization value for
1993 // Block variables at global or local static scope.
1994 case Expr::BlockExprClass:
1995 return !cast<BlockExpr>(Val: E)->getBlockDecl()->hasCaptures();
1996 // The APValue generated from a __builtin_source_location will be emitted as a
1997 // literal.
1998 case Expr::SourceLocExprClass:
1999 return true;
2000 case Expr::ImplicitValueInitExprClass:
2001 // FIXME:
2002 // We can never form an lvalue with an implicit value initialization as its
2003 // base through expression evaluation, so these only appear in one case: the
2004 // implicit variable declaration we invent when checking whether a constexpr
2005 // constructor can produce a constant expression. We must assume that such
2006 // an expression might be a global lvalue.
2007 return true;
2008 }
2009}
2010
2011static const ValueDecl *GetLValueBaseDecl(const LValue &LVal) {
2012 return LVal.Base.dyn_cast<const ValueDecl*>();
2013}
2014
2015// Information about an LValueBase that is some kind of string.
2016struct LValueBaseString {
2017 std::string ObjCEncodeStorage;
2018 StringRef Bytes;
2019 int CharWidth;
2020};
2021
2022// Gets the lvalue base of LVal as a string.
2023static bool GetLValueBaseAsString(const EvalInfo &Info, const LValue &LVal,
2024 LValueBaseString &AsString) {
2025 const auto *BaseExpr = LVal.Base.dyn_cast<const Expr *>();
2026 if (!BaseExpr)
2027 return false;
2028
2029 // For ObjCEncodeExpr, we need to compute and store the string.
2030 if (const auto *EE = dyn_cast<ObjCEncodeExpr>(Val: BaseExpr)) {
2031 Info.Ctx.getObjCEncodingForType(T: EE->getEncodedType(),
2032 S&: AsString.ObjCEncodeStorage);
2033 AsString.Bytes = AsString.ObjCEncodeStorage;
2034 AsString.CharWidth = 1;
2035 return true;
2036 }
2037
2038 // Otherwise, we have a StringLiteral.
2039 const auto *Lit = dyn_cast<StringLiteral>(Val: BaseExpr);
2040 if (const auto *PE = dyn_cast<PredefinedExpr>(Val: BaseExpr))
2041 Lit = PE->getFunctionName();
2042
2043 if (!Lit)
2044 return false;
2045
2046 AsString.Bytes = Lit->getBytes();
2047 AsString.CharWidth = Lit->getCharByteWidth();
2048 return true;
2049}
2050
2051// Determine whether two string literals potentially overlap. This will be the
2052// case if they agree on the values of all the bytes on the overlapping region
2053// between them.
2054//
2055// The overlapping region is the portion of the two string literals that must
2056// overlap in memory if the pointers actually point to the same address at
2057// runtime. For example, if LHS is "abcdef" + 3 and RHS is "cdef\0gh" + 1 then
2058// the overlapping region is "cdef\0", which in this case does agree, so the
2059// strings are potentially overlapping. Conversely, for "foobar" + 3 versus
2060// "bazbar" + 3, the overlapping region contains all of both strings, so they
2061// are not potentially overlapping, even though they agree from the given
2062// addresses onwards.
2063//
2064// See open core issue CWG2765 which is discussing the desired rule here.
2065static bool ArePotentiallyOverlappingStringLiterals(const EvalInfo &Info,
2066 const LValue &LHS,
2067 const LValue &RHS) {
2068 LValueBaseString LHSString, RHSString;
2069 if (!GetLValueBaseAsString(Info, LVal: LHS, AsString&: LHSString) ||
2070 !GetLValueBaseAsString(Info, LVal: RHS, AsString&: RHSString))
2071 return false;
2072
2073 // This is the byte offset to the location of the first character of LHS
2074 // within RHS. We don't need to look at the characters of one string that
2075 // would appear before the start of the other string if they were merged.
2076 CharUnits Offset = RHS.Offset - LHS.Offset;
2077 if (Offset.isNegative()) {
2078 if (LHSString.Bytes.size() < (size_t)-Offset.getQuantity())
2079 return false;
2080 LHSString.Bytes = LHSString.Bytes.drop_front(N: -Offset.getQuantity());
2081 } else {
2082 if (RHSString.Bytes.size() < (size_t)Offset.getQuantity())
2083 return false;
2084 RHSString.Bytes = RHSString.Bytes.drop_front(N: Offset.getQuantity());
2085 }
2086
2087 bool LHSIsLonger = LHSString.Bytes.size() > RHSString.Bytes.size();
2088 StringRef Longer = LHSIsLonger ? LHSString.Bytes : RHSString.Bytes;
2089 StringRef Shorter = LHSIsLonger ? RHSString.Bytes : LHSString.Bytes;
2090 int ShorterCharWidth = (LHSIsLonger ? RHSString : LHSString).CharWidth;
2091
2092 // The null terminator isn't included in the string data, so check for it
2093 // manually. If the longer string doesn't have a null terminator where the
2094 // shorter string ends, they aren't potentially overlapping.
2095 for (int NullByte : llvm::seq(Size: ShorterCharWidth)) {
2096 if (Shorter.size() + NullByte >= Longer.size())
2097 break;
2098 if (Longer[Shorter.size() + NullByte])
2099 return false;
2100 }
2101
2102 // Otherwise, they're potentially overlapping if and only if the overlapping
2103 // region is the same.
2104 return Shorter == Longer.take_front(N: Shorter.size());
2105}
2106
2107static bool IsWeakLValue(const LValue &Value) {
2108 const ValueDecl *Decl = GetLValueBaseDecl(LVal: Value);
2109 return Decl && Decl->isWeak();
2110}
2111
2112static bool isZeroSized(const LValue &Value) {
2113 const ValueDecl *Decl = GetLValueBaseDecl(LVal: Value);
2114 if (isa_and_nonnull<VarDecl>(Val: Decl)) {
2115 QualType Ty = Decl->getType();
2116 if (Ty->isArrayType())
2117 return Ty->isIncompleteType() ||
2118 Decl->getASTContext().getTypeSize(T: Ty) == 0;
2119 }
2120 return false;
2121}
2122
2123static bool HasSameBase(const LValue &A, const LValue &B) {
2124 if (!A.getLValueBase())
2125 return !B.getLValueBase();
2126 if (!B.getLValueBase())
2127 return false;
2128
2129 if (A.getLValueBase().getOpaqueValue() !=
2130 B.getLValueBase().getOpaqueValue())
2131 return false;
2132
2133 return A.getLValueCallIndex() == B.getLValueCallIndex() &&
2134 A.getLValueVersion() == B.getLValueVersion();
2135}
2136
2137static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) {
2138 assert(Base && "no location for a null lvalue");
2139 const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
2140
2141 // For a parameter, find the corresponding call stack frame (if it still
2142 // exists), and point at the parameter of the function definition we actually
2143 // invoked.
2144 if (auto *PVD = dyn_cast_or_null<ParmVarDecl>(Val: VD)) {
2145 unsigned Idx = PVD->getFunctionScopeIndex();
2146 for (CallStackFrame *F = Info.CurrentCall; F; F = F->Caller) {
2147 if (F->Arguments.CallIndex == Base.getCallIndex() &&
2148 F->Arguments.Version == Base.getVersion() && F->Callee &&
2149 Idx < F->Callee->getNumParams()) {
2150 VD = F->Callee->getParamDecl(i: Idx);
2151 break;
2152 }
2153 }
2154 }
2155
2156 if (VD)
2157 Info.Note(Loc: VD->getLocation(), DiagId: diag::note_declared_at);
2158 else if (const Expr *E = Base.dyn_cast<const Expr*>())
2159 Info.Note(Loc: E->getExprLoc(), DiagId: diag::note_constexpr_temporary_here);
2160 else if (DynamicAllocLValue DA = Base.dyn_cast<DynamicAllocLValue>()) {
2161 // FIXME: Produce a note for dangling pointers too.
2162 if (std::optional<DynAlloc *> Alloc = Info.lookupDynamicAlloc(DA))
2163 Info.Note(Loc: (*Alloc)->AllocExpr->getExprLoc(),
2164 DiagId: diag::note_constexpr_dynamic_alloc_here);
2165 }
2166
2167 // We have no information to show for a typeid(T) object.
2168}
2169
2170enum class CheckEvaluationResultKind {
2171 ConstantExpression,
2172 FullyInitialized,
2173};
2174
2175/// Materialized temporaries that we've already checked to determine if they're
2176/// initializsed by a constant expression.
2177using CheckedTemporaries =
2178 llvm::SmallPtrSet<const MaterializeTemporaryExpr *, 8>;
2179
2180static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
2181 EvalInfo &Info, SourceLocation DiagLoc,
2182 QualType Type, const APValue &Value,
2183 ConstantExprKind Kind,
2184 const FieldDecl *SubobjectDecl,
2185 CheckedTemporaries &CheckedTemps);
2186
2187/// Check that this reference or pointer core constant expression is a valid
2188/// value for an address or reference constant expression. Return true if we
2189/// can fold this expression, whether or not it's a constant expression.
2190static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
2191 QualType Type, const LValue &LVal,
2192 ConstantExprKind Kind,
2193 CheckedTemporaries &CheckedTemps) {
2194 bool IsReferenceType = Type->isReferenceType();
2195
2196 APValue::LValueBase Base = LVal.getLValueBase();
2197 const SubobjectDesignator &Designator = LVal.getLValueDesignator();
2198
2199 const Expr *BaseE = Base.dyn_cast<const Expr *>();
2200 const ValueDecl *BaseVD = Base.dyn_cast<const ValueDecl*>();
2201
2202 // Additional restrictions apply in a template argument. We only enforce the
2203 // C++20 restrictions here; additional syntactic and semantic restrictions
2204 // are applied elsewhere.
2205 if (isTemplateArgument(Kind)) {
2206 int InvalidBaseKind = -1;
2207 StringRef Ident;
2208 if (Base.is<TypeInfoLValue>())
2209 InvalidBaseKind = 0;
2210 else if (isa_and_nonnull<StringLiteral>(Val: BaseE))
2211 InvalidBaseKind = 1;
2212 else if (isa_and_nonnull<MaterializeTemporaryExpr>(Val: BaseE) ||
2213 isa_and_nonnull<LifetimeExtendedTemporaryDecl>(Val: BaseVD))
2214 InvalidBaseKind = 2;
2215 else if (auto *PE = dyn_cast_or_null<PredefinedExpr>(Val: BaseE)) {
2216 InvalidBaseKind = 3;
2217 Ident = PE->getIdentKindName();
2218 }
2219
2220 if (InvalidBaseKind != -1) {
2221 Info.FFDiag(Loc, DiagId: diag::note_constexpr_invalid_template_arg)
2222 << IsReferenceType << !Designator.Entries.empty() << InvalidBaseKind
2223 << Ident;
2224 return false;
2225 }
2226 }
2227
2228 if (auto *FD = dyn_cast_or_null<FunctionDecl>(Val: BaseVD);
2229 FD && FD->isImmediateFunction()) {
2230 Info.FFDiag(Loc, DiagId: diag::note_consteval_address_accessible)
2231 << !Type->isAnyPointerType();
2232 Info.Note(Loc: FD->getLocation(), DiagId: diag::note_declared_at);
2233 return false;
2234 }
2235
2236 // Check that the object is a global. Note that the fake 'this' object we
2237 // manufacture when checking potential constant expressions is conservatively
2238 // assumed to be global here.
2239 if (!IsGlobalLValue(B: Base)) {
2240 if (Info.getLangOpts().CPlusPlus11) {
2241 Info.FFDiag(Loc, DiagId: diag::note_constexpr_non_global, ExtraNotes: 1)
2242 << IsReferenceType << !Designator.Entries.empty() << !!BaseVD
2243 << BaseVD;
2244 auto *VarD = dyn_cast_or_null<VarDecl>(Val: BaseVD);
2245 if (VarD && VarD->isConstexpr()) {
2246 // Non-static local constexpr variables have unintuitive semantics:
2247 // constexpr int a = 1;
2248 // constexpr const int *p = &a;
2249 // ... is invalid because the address of 'a' is not constant. Suggest
2250 // adding a 'static' in this case.
2251 Info.Note(Loc: VarD->getLocation(), DiagId: diag::note_constexpr_not_static)
2252 << VarD
2253 << FixItHint::CreateInsertion(InsertionLoc: VarD->getBeginLoc(), Code: "static ");
2254 } else {
2255 NoteLValueLocation(Info, Base);
2256 }
2257 } else {
2258 Info.FFDiag(Loc);
2259 }
2260 // Don't allow references to temporaries to escape.
2261 return false;
2262 }
2263 assert((Info.checkingPotentialConstantExpression() ||
2264 LVal.getLValueCallIndex() == 0) &&
2265 "have call index for global lvalue");
2266
2267 if (LVal.allowConstexprUnknown()) {
2268 if (BaseVD) {
2269 Info.FFDiag(Loc, DiagId: diag::note_constexpr_var_init_non_constant, ExtraNotes: 1) << BaseVD;
2270 NoteLValueLocation(Info, Base);
2271 } else {
2272 Info.FFDiag(Loc);
2273 }
2274 return false;
2275 }
2276
2277 if (Base.is<DynamicAllocLValue>()) {
2278 Info.FFDiag(Loc, DiagId: diag::note_constexpr_dynamic_alloc)
2279 << IsReferenceType << !Designator.Entries.empty();
2280 NoteLValueLocation(Info, Base);
2281 return false;
2282 }
2283
2284 if (BaseVD) {
2285 if (const VarDecl *Var = dyn_cast<const VarDecl>(Val: BaseVD)) {
2286 // Check if this is a thread-local variable.
2287 if (Var->getTLSKind())
2288 // FIXME: Diagnostic!
2289 return false;
2290
2291 // A dllimport variable never acts like a constant, unless we're
2292 // evaluating a value for use only in name mangling, and unless it's a
2293 // static local. For the latter case, we'd still need to evaluate the
2294 // constant expression in case we're inside a (inlined) function.
2295 if (!isForManglingOnly(Kind) && Var->hasAttr<DLLImportAttr>() &&
2296 !Var->isStaticLocal())
2297 return false;
2298
2299 // In CUDA/HIP device compilation, only device side variables have
2300 // constant addresses.
2301 if (Info.getLangOpts().CUDA && Info.getLangOpts().CUDAIsDevice &&
2302 Info.Ctx.CUDAConstantEvalCtx.NoWrongSidedVars) {
2303 if ((!Var->hasAttr<CUDADeviceAttr>() &&
2304 !Var->hasAttr<CUDAConstantAttr>() &&
2305 !Var->getType()->isCUDADeviceBuiltinSurfaceType() &&
2306 !Var->getType()->isCUDADeviceBuiltinTextureType()) ||
2307 Var->hasAttr<HIPManagedAttr>())
2308 return false;
2309 }
2310 }
2311 if (const auto *FD = dyn_cast<const FunctionDecl>(Val: BaseVD)) {
2312 // __declspec(dllimport) must be handled very carefully:
2313 // We must never initialize an expression with the thunk in C++.
2314 // Doing otherwise would allow the same id-expression to yield
2315 // different addresses for the same function in different translation
2316 // units. However, this means that we must dynamically initialize the
2317 // expression with the contents of the import address table at runtime.
2318 //
2319 // The C language has no notion of ODR; furthermore, it has no notion of
2320 // dynamic initialization. This means that we are permitted to
2321 // perform initialization with the address of the thunk.
2322 if (Info.getLangOpts().CPlusPlus && !isForManglingOnly(Kind) &&
2323 FD->hasAttr<DLLImportAttr>())
2324 // FIXME: Diagnostic!
2325 return false;
2326 }
2327 } else if (const auto *MTE =
2328 dyn_cast_or_null<MaterializeTemporaryExpr>(Val: BaseE)) {
2329 if (CheckedTemps.insert(Ptr: MTE).second) {
2330 QualType TempType = getType(B: Base);
2331 if (TempType.isDestructedType()) {
2332 Info.FFDiag(Loc: MTE->getExprLoc(),
2333 DiagId: diag::note_constexpr_unsupported_temporary_nontrivial_dtor)
2334 << TempType;
2335 return false;
2336 }
2337
2338 APValue *V = MTE->getOrCreateValue(MayCreate: false);
2339 assert(V && "evasluation result refers to uninitialised temporary");
2340 if (!CheckEvaluationResult(CERK: CheckEvaluationResultKind::ConstantExpression,
2341 Info, DiagLoc: MTE->getExprLoc(), Type: TempType, Value: *V, Kind,
2342 /*SubobjectDecl=*/nullptr, CheckedTemps))
2343 return false;
2344 }
2345 }
2346
2347 // Allow address constant expressions to be past-the-end pointers. This is
2348 // an extension: the standard requires them to point to an object.
2349 if (!IsReferenceType)
2350 return true;
2351
2352 // A reference constant expression must refer to an object.
2353 if (!Base) {
2354 // FIXME: diagnostic
2355 Info.CCEDiag(Loc);
2356 return true;
2357 }
2358
2359 // Does this refer one past the end of some object?
2360 if (!Designator.Invalid && Designator.isOnePastTheEnd()) {
2361 Info.FFDiag(Loc, DiagId: diag::note_constexpr_past_end, ExtraNotes: 1)
2362 << !Designator.Entries.empty() << !!BaseVD << BaseVD;
2363 NoteLValueLocation(Info, Base);
2364 }
2365
2366 return true;
2367}
2368
2369/// Member pointers are constant expressions unless they point to a
2370/// non-virtual dllimport member function.
2371static bool CheckMemberPointerConstantExpression(EvalInfo &Info,
2372 SourceLocation Loc,
2373 QualType Type,
2374 const APValue &Value,
2375 ConstantExprKind Kind) {
2376 const ValueDecl *Member = Value.getMemberPointerDecl();
2377 const auto *FD = dyn_cast_or_null<CXXMethodDecl>(Val: Member);
2378 if (!FD)
2379 return true;
2380 if (FD->isImmediateFunction()) {
2381 Info.FFDiag(Loc, DiagId: diag::note_consteval_address_accessible) << /*pointer*/ 0;
2382 Info.Note(Loc: FD->getLocation(), DiagId: diag::note_declared_at);
2383 return false;
2384 }
2385 return isForManglingOnly(Kind) || FD->isVirtual() ||
2386 !FD->hasAttr<DLLImportAttr>();
2387}
2388
2389/// Check that this core constant expression is of literal type, and if not,
2390/// produce an appropriate diagnostic.
2391static bool CheckLiteralType(EvalInfo &Info, const Expr *E,
2392 const LValue *This = nullptr) {
2393 // The restriction to literal types does not exist in C++23 anymore.
2394 if (Info.getLangOpts().CPlusPlus23)
2395 return true;
2396
2397 if (!E->isPRValue() || E->getType()->isLiteralType(Ctx: Info.Ctx))
2398 return true;
2399
2400 // C++1y: A constant initializer for an object o [...] may also invoke
2401 // constexpr constructors for o and its subobjects even if those objects
2402 // are of non-literal class types.
2403 //
2404 // C++11 missed this detail for aggregates, so classes like this:
2405 // struct foo_t { union { int i; volatile int j; } u; };
2406 // are not (obviously) initializable like so:
2407 // __attribute__((__require_constant_initialization__))
2408 // static const foo_t x = {{0}};
2409 // because "i" is a subobject with non-literal initialization (due to the
2410 // volatile member of the union). See:
2411 // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#1677
2412 // Therefore, we use the C++1y behavior.
2413 if (This && Info.EvaluatingDecl == This->getLValueBase())
2414 return true;
2415
2416 // Prvalue constant expressions must be of literal types.
2417 if (Info.getLangOpts().CPlusPlus11)
2418 Info.FFDiag(E, DiagId: diag::note_constexpr_nonliteral)
2419 << E->getType();
2420 else
2421 Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
2422 return false;
2423}
2424
2425static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
2426 EvalInfo &Info, SourceLocation DiagLoc,
2427 QualType Type, const APValue &Value,
2428 ConstantExprKind Kind,
2429 const FieldDecl *SubobjectDecl,
2430 CheckedTemporaries &CheckedTemps) {
2431 if (!Value.hasValue()) {
2432 if (SubobjectDecl) {
2433 Info.FFDiag(Loc: DiagLoc, DiagId: diag::note_constexpr_uninitialized)
2434 << /*(name)*/ 1 << SubobjectDecl;
2435 Info.Note(Loc: SubobjectDecl->getLocation(),
2436 DiagId: diag::note_constexpr_subobject_declared_here);
2437 } else {
2438 Info.FFDiag(Loc: DiagLoc, DiagId: diag::note_constexpr_uninitialized)
2439 << /*of type*/ 0 << Type;
2440 }
2441 return false;
2442 }
2443
2444 // We allow _Atomic(T) to be initialized from anything that T can be
2445 // initialized from.
2446 if (const AtomicType *AT = Type->getAs<AtomicType>())
2447 Type = AT->getValueType();
2448
2449 // Core issue 1454: For a literal constant expression of array or class type,
2450 // each subobject of its value shall have been initialized by a constant
2451 // expression.
2452 if (Value.isArray()) {
2453 QualType EltTy = Type->castAsArrayTypeUnsafe()->getElementType();
2454 for (unsigned I = 0, N = Value.getArrayInitializedElts(); I != N; ++I) {
2455 if (!CheckEvaluationResult(CERK, Info, DiagLoc, Type: EltTy,
2456 Value: Value.getArrayInitializedElt(I), Kind,
2457 SubobjectDecl, CheckedTemps))
2458 return false;
2459 }
2460 if (!Value.hasArrayFiller())
2461 return true;
2462 return CheckEvaluationResult(CERK, Info, DiagLoc, Type: EltTy,
2463 Value: Value.getArrayFiller(), Kind, SubobjectDecl,
2464 CheckedTemps);
2465 }
2466 if (Value.isUnion() && Value.getUnionField()) {
2467 return CheckEvaluationResult(
2468 CERK, Info, DiagLoc, Type: Value.getUnionField()->getType(),
2469 Value: Value.getUnionValue(), Kind, SubobjectDecl: Value.getUnionField(), CheckedTemps);
2470 }
2471 if (Value.isStruct()) {
2472 auto *RD = Type->castAsRecordDecl();
2473 if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(Val: RD)) {
2474 unsigned BaseIndex = 0;
2475 for (const CXXBaseSpecifier &BS : CD->bases()) {
2476 const APValue &BaseValue = Value.getStructBase(i: BaseIndex);
2477 if (!BaseValue.hasValue()) {
2478 SourceLocation TypeBeginLoc = BS.getBaseTypeLoc();
2479 Info.FFDiag(Loc: TypeBeginLoc, DiagId: diag::note_constexpr_uninitialized_base)
2480 << BS.getType() << SourceRange(TypeBeginLoc, BS.getEndLoc());
2481 return false;
2482 }
2483 if (!CheckEvaluationResult(CERK, Info, DiagLoc, Type: BS.getType(), Value: BaseValue,
2484 Kind, /*SubobjectDecl=*/nullptr,
2485 CheckedTemps))
2486 return false;
2487 ++BaseIndex;
2488 }
2489 }
2490 for (const auto *I : RD->fields()) {
2491 if (I->isUnnamedBitField())
2492 continue;
2493
2494 if (!CheckEvaluationResult(CERK, Info, DiagLoc, Type: I->getType(),
2495 Value: Value.getStructField(i: I->getFieldIndex()), Kind,
2496 SubobjectDecl: I, CheckedTemps))
2497 return false;
2498 }
2499 }
2500
2501 if (Value.isLValue() &&
2502 CERK == CheckEvaluationResultKind::ConstantExpression) {
2503 LValue LVal;
2504 LVal.setFrom(Ctx: Info.Ctx, V: Value);
2505 return CheckLValueConstantExpression(Info, Loc: DiagLoc, Type, LVal, Kind,
2506 CheckedTemps);
2507 }
2508
2509 if (Value.isMemberPointer() &&
2510 CERK == CheckEvaluationResultKind::ConstantExpression)
2511 return CheckMemberPointerConstantExpression(Info, Loc: DiagLoc, Type, Value, Kind);
2512
2513 // Everything else is fine.
2514 return true;
2515}
2516
2517/// Check that this core constant expression value is a valid value for a
2518/// constant expression. If not, report an appropriate diagnostic. Does not
2519/// check that the expression is of literal type.
2520static bool CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc,
2521 QualType Type, const APValue &Value,
2522 ConstantExprKind Kind) {
2523 // Nothing to check for a constant expression of type 'cv void'.
2524 if (Type->isVoidType())
2525 return true;
2526
2527 CheckedTemporaries CheckedTemps;
2528 return CheckEvaluationResult(CERK: CheckEvaluationResultKind::ConstantExpression,
2529 Info, DiagLoc, Type, Value, Kind,
2530 /*SubobjectDecl=*/nullptr, CheckedTemps);
2531}
2532
2533/// Check that this evaluated value is fully-initialized and can be loaded by
2534/// an lvalue-to-rvalue conversion.
2535static bool CheckFullyInitialized(EvalInfo &Info, SourceLocation DiagLoc,
2536 QualType Type, const APValue &Value) {
2537 CheckedTemporaries CheckedTemps;
2538 return CheckEvaluationResult(
2539 CERK: CheckEvaluationResultKind::FullyInitialized, Info, DiagLoc, Type, Value,
2540 Kind: ConstantExprKind::Normal, /*SubobjectDecl=*/nullptr, CheckedTemps);
2541}
2542
2543/// Enforce C++2a [expr.const]/4.17, which disallows new-expressions unless
2544/// "the allocated storage is deallocated within the evaluation".
2545static bool CheckMemoryLeaks(EvalInfo &Info) {
2546 if (!Info.HeapAllocs.empty()) {
2547 // We can still fold to a constant despite a compile-time memory leak,
2548 // so long as the heap allocation isn't referenced in the result (we check
2549 // that in CheckConstantExpression).
2550 Info.CCEDiag(E: Info.HeapAllocs.begin()->second.AllocExpr,
2551 DiagId: diag::note_constexpr_memory_leak)
2552 << unsigned(Info.HeapAllocs.size() - 1);
2553 }
2554 return true;
2555}
2556
2557static bool EvalPointerValueAsBool(const APValue &Value, bool &Result) {
2558 // A null base expression indicates a null pointer. These are always
2559 // evaluatable, and they are false unless the offset is zero.
2560 if (!Value.getLValueBase()) {
2561 // TODO: Should a non-null pointer with an offset of zero evaluate to true?
2562 Result = !Value.getLValueOffset().isZero();
2563 return true;
2564 }
2565
2566 // We have a non-null base. These are generally known to be true, but if it's
2567 // a weak declaration it can be null at runtime.
2568 Result = true;
2569 const ValueDecl *Decl = Value.getLValueBase().dyn_cast<const ValueDecl*>();
2570 return !Decl || !Decl->isWeak();
2571}
2572
2573static bool HandleConversionToBool(const APValue &Val, bool &Result) {
2574 // TODO: This function should produce notes if it fails.
2575 switch (Val.getKind()) {
2576 case APValue::None:
2577 case APValue::Indeterminate:
2578 return false;
2579 case APValue::Int:
2580 Result = Val.getInt().getBoolValue();
2581 return true;
2582 case APValue::FixedPoint:
2583 Result = Val.getFixedPoint().getBoolValue();
2584 return true;
2585 case APValue::Float:
2586 Result = !Val.getFloat().isZero();
2587 return true;
2588 case APValue::ComplexInt:
2589 Result = Val.getComplexIntReal().getBoolValue() ||
2590 Val.getComplexIntImag().getBoolValue();
2591 return true;
2592 case APValue::ComplexFloat:
2593 Result = !Val.getComplexFloatReal().isZero() ||
2594 !Val.getComplexFloatImag().isZero();
2595 return true;
2596 case APValue::LValue:
2597 return EvalPointerValueAsBool(Value: Val, Result);
2598 case APValue::MemberPointer:
2599 if (Val.getMemberPointerDecl() && Val.getMemberPointerDecl()->isWeak()) {
2600 return false;
2601 }
2602 Result = Val.getMemberPointerDecl();
2603 return true;
2604 case APValue::Vector:
2605 case APValue::Array:
2606 case APValue::Struct:
2607 case APValue::Union:
2608 case APValue::AddrLabelDiff:
2609 return false;
2610 }
2611
2612 llvm_unreachable("unknown APValue kind");
2613}
2614
2615static bool EvaluateAsBooleanCondition(const Expr *E, bool &Result,
2616 EvalInfo &Info) {
2617 assert(!E->isValueDependent());
2618 assert(E->isPRValue() && "missing lvalue-to-rvalue conv in bool condition");
2619 APValue Val;
2620 if (!Evaluate(Result&: Val, Info, E))
2621 return false;
2622 return HandleConversionToBool(Val, Result);
2623}
2624
2625template<typename T>
2626static bool HandleOverflow(EvalInfo &Info, const Expr *E,
2627 const T &SrcValue, QualType DestType) {
2628 Info.CCEDiag(E, DiagId: diag::note_constexpr_overflow)
2629 << SrcValue << DestType;
2630 return Info.noteUndefinedBehavior();
2631}
2632
2633static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E,
2634 QualType SrcType, const APFloat &Value,
2635 QualType DestType, APSInt &Result) {
2636 unsigned DestWidth = Info.Ctx.getIntWidth(T: DestType);
2637 // Determine whether we are converting to unsigned or signed.
2638 bool DestSigned = DestType->isSignedIntegerOrEnumerationType();
2639
2640 Result = APSInt(DestWidth, !DestSigned);
2641 bool ignored;
2642 if (Value.convertToInteger(Result, RM: llvm::APFloat::rmTowardZero, IsExact: &ignored)
2643 & APFloat::opInvalidOp)
2644 return HandleOverflow(Info, E, SrcValue: Value, DestType);
2645 return true;
2646}
2647
2648/// Get rounding mode to use in evaluation of the specified expression.
2649///
2650/// If rounding mode is unknown at compile time, still try to evaluate the
2651/// expression. If the result is exact, it does not depend on rounding mode.
2652/// So return "tonearest" mode instead of "dynamic".
2653static llvm::RoundingMode getActiveRoundingMode(EvalInfo &Info, const Expr *E) {
2654 llvm::RoundingMode RM =
2655 E->getFPFeaturesInEffect(LO: Info.getLangOpts()).getRoundingMode();
2656 if (RM == llvm::RoundingMode::Dynamic)
2657 RM = llvm::RoundingMode::NearestTiesToEven;
2658 return RM;
2659}
2660
2661/// Check if the given evaluation result is allowed for constant evaluation.
2662static bool checkFloatingPointResult(EvalInfo &Info, const Expr *E,
2663 APFloat::opStatus St) {
2664 // In a constant context, assume that any dynamic rounding mode or FP
2665 // exception state matches the default floating-point environment.
2666 if (Info.InConstantContext)
2667 return true;
2668
2669 FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.getLangOpts());
2670 if ((St & APFloat::opInexact) &&
2671 FPO.getRoundingMode() == llvm::RoundingMode::Dynamic) {
2672 // Inexact result means that it depends on rounding mode. If the requested
2673 // mode is dynamic, the evaluation cannot be made in compile time.
2674 Info.FFDiag(E, DiagId: diag::note_constexpr_dynamic_rounding);
2675 return false;
2676 }
2677
2678 if ((St != APFloat::opOK) &&
2679 (FPO.getRoundingMode() == llvm::RoundingMode::Dynamic ||
2680 FPO.getExceptionMode() != LangOptions::FPE_Ignore ||
2681 FPO.getAllowFEnvAccess())) {
2682 Info.FFDiag(E, DiagId: diag::note_constexpr_float_arithmetic_strict);
2683 return false;
2684 }
2685
2686 if ((St & APFloat::opStatus::opInvalidOp) &&
2687 FPO.getExceptionMode() != LangOptions::FPE_Ignore) {
2688 // There is no usefully definable result.
2689 Info.FFDiag(E);
2690 return false;
2691 }
2692
2693 // FIXME: if:
2694 // - evaluation triggered other FP exception, and
2695 // - exception mode is not "ignore", and
2696 // - the expression being evaluated is not a part of global variable
2697 // initializer,
2698 // the evaluation probably need to be rejected.
2699 return true;
2700}
2701
2702static bool HandleFloatToFloatCast(EvalInfo &Info, const Expr *E,
2703 QualType SrcType, QualType DestType,
2704 APFloat &Result) {
2705 assert((isa<CastExpr>(E) || isa<CompoundAssignOperator>(E) ||
2706 isa<ConvertVectorExpr>(E)) &&
2707 "HandleFloatToFloatCast has been checked with only CastExpr, "
2708 "CompoundAssignOperator and ConvertVectorExpr. Please either validate "
2709 "the new expression or address the root cause of this usage.");
2710 llvm::RoundingMode RM = getActiveRoundingMode(Info, E);
2711 APFloat::opStatus St;
2712 APFloat Value = Result;
2713 bool ignored;
2714 St = Result.convert(ToSemantics: Info.Ctx.getFloatTypeSemantics(T: DestType), RM, losesInfo: &ignored);
2715 return checkFloatingPointResult(Info, E, St);
2716}
2717
2718static APSInt HandleIntToIntCast(EvalInfo &Info, const Expr *E,
2719 QualType DestType, QualType SrcType,
2720 const APSInt &Value) {
2721 unsigned DestWidth = Info.Ctx.getIntWidth(T: DestType);
2722 // Figure out if this is a truncate, extend or noop cast.
2723 // If the input is signed, do a sign extend, noop, or truncate.
2724 APSInt Result = Value.extOrTrunc(width: DestWidth);
2725 Result.setIsUnsigned(DestType->isUnsignedIntegerOrEnumerationType());
2726 if (DestType->isBooleanType())
2727 Result = Value.getBoolValue();
2728 return Result;
2729}
2730
2731static bool HandleIntToFloatCast(EvalInfo &Info, const Expr *E,
2732 const FPOptions FPO,
2733 QualType SrcType, const APSInt &Value,
2734 QualType DestType, APFloat &Result) {
2735 Result = APFloat(Info.Ctx.getFloatTypeSemantics(T: DestType), 1);
2736 llvm::RoundingMode RM = getActiveRoundingMode(Info, E);
2737 APFloat::opStatus St = Result.convertFromAPInt(Input: Value, IsSigned: Value.isSigned(), RM);
2738 return checkFloatingPointResult(Info, E, St);
2739}
2740
2741static bool truncateBitfieldValue(EvalInfo &Info, const Expr *E,
2742 APValue &Value, const FieldDecl *FD) {
2743 assert(FD->isBitField() && "truncateBitfieldValue on non-bitfield");
2744
2745 if (!Value.isInt()) {
2746 // Trying to store a pointer-cast-to-integer into a bitfield.
2747 // FIXME: In this case, we should provide the diagnostic for casting
2748 // a pointer to an integer.
2749 assert(Value.isLValue() && "integral value neither int nor lvalue?");
2750 Info.FFDiag(E);
2751 return false;
2752 }
2753
2754 APSInt &Int = Value.getInt();
2755 unsigned OldBitWidth = Int.getBitWidth();
2756 unsigned NewBitWidth = FD->getBitWidthValue();
2757 if (NewBitWidth < OldBitWidth)
2758 Int = Int.trunc(width: NewBitWidth).extend(width: OldBitWidth);
2759 return true;
2760}
2761
2762/// Perform the given integer operation, which is known to need at most BitWidth
2763/// bits, and check for overflow in the original type (if that type was not an
2764/// unsigned type).
2765template<typename Operation>
2766static bool CheckedIntArithmetic(EvalInfo &Info, const Expr *E,
2767 const APSInt &LHS, const APSInt &RHS,
2768 unsigned BitWidth, Operation Op,
2769 APSInt &Result) {
2770 if (LHS.isUnsigned()) {
2771 Result = Op(LHS, RHS);
2772 return true;
2773 }
2774
2775 APSInt Value(Op(LHS.extend(width: BitWidth), RHS.extend(width: BitWidth)), false);
2776 Result = Value.trunc(width: LHS.getBitWidth());
2777 if (Result.extend(width: BitWidth) != Value && !E->getType().isWrapType()) {
2778 if (Info.checkingForUndefinedBehavior())
2779 Info.Ctx.getDiagnostics().Report(Loc: E->getExprLoc(),
2780 DiagID: diag::warn_integer_constant_overflow)
2781 << toString(I: Result, Radix: 10, Signed: Result.isSigned(), /*formatAsCLiteral=*/false,
2782 /*UpperCase=*/true, /*InsertSeparators=*/true)
2783 << E->getType() << E->getSourceRange();
2784 return HandleOverflow(Info, E, SrcValue: Value, DestType: E->getType());
2785 }
2786 return true;
2787}
2788
2789/// Perform the given binary integer operation.
2790static bool handleIntIntBinOp(EvalInfo &Info, const BinaryOperator *E,
2791 const APSInt &LHS, BinaryOperatorKind Opcode,
2792 APSInt RHS, APSInt &Result) {
2793 bool HandleOverflowResult = true;
2794 switch (Opcode) {
2795 default:
2796 Info.FFDiag(E);
2797 return false;
2798 case BO_Mul:
2799 return CheckedIntArithmetic(Info, E, LHS, RHS, BitWidth: LHS.getBitWidth() * 2,
2800 Op: std::multiplies<APSInt>(), Result);
2801 case BO_Add:
2802 return CheckedIntArithmetic(Info, E, LHS, RHS, BitWidth: LHS.getBitWidth() + 1,
2803 Op: std::plus<APSInt>(), Result);
2804 case BO_Sub:
2805 return CheckedIntArithmetic(Info, E, LHS, RHS, BitWidth: LHS.getBitWidth() + 1,
2806 Op: std::minus<APSInt>(), Result);
2807 case BO_And: Result = LHS & RHS; return true;
2808 case BO_Xor: Result = LHS ^ RHS; return true;
2809 case BO_Or: Result = LHS | RHS; return true;
2810 case BO_Div:
2811 case BO_Rem:
2812 if (RHS == 0) {
2813 Info.FFDiag(E, DiagId: diag::note_expr_divide_by_zero)
2814 << E->getRHS()->getSourceRange();
2815 return false;
2816 }
2817 // Check for overflow case: INT_MIN / -1 or INT_MIN % -1. APSInt supports
2818 // this operation and gives the two's complement result.
2819 if (RHS.isNegative() && RHS.isAllOnes() && LHS.isSigned() &&
2820 LHS.isMinSignedValue())
2821 HandleOverflowResult = HandleOverflow(
2822 Info, E, SrcValue: -LHS.extend(width: LHS.getBitWidth() + 1), DestType: E->getType());
2823 Result = (Opcode == BO_Rem ? LHS % RHS : LHS / RHS);
2824 return HandleOverflowResult;
2825 case BO_Shl: {
2826 if (Info.getLangOpts().OpenCL)
2827 // OpenCL 6.3j: shift values are effectively % word size of LHS.
2828 RHS &= APSInt(llvm::APInt(RHS.getBitWidth(),
2829 static_cast<uint64_t>(LHS.getBitWidth() - 1)),
2830 RHS.isUnsigned());
2831 else if (RHS.isSigned() && RHS.isNegative()) {
2832 // During constant-folding, a negative shift is an opposite shift. Such
2833 // a shift is not a constant expression.
2834 Info.CCEDiag(E, DiagId: diag::note_constexpr_negative_shift) << RHS;
2835 if (!Info.noteUndefinedBehavior())
2836 return false;
2837 RHS = -RHS;
2838 goto shift_right;
2839 }
2840 shift_left:
2841 // C++11 [expr.shift]p1: Shift width must be less than the bit width of
2842 // the shifted type.
2843 unsigned SA = (unsigned) RHS.getLimitedValue(Limit: LHS.getBitWidth()-1);
2844 if (SA != RHS) {
2845 Info.CCEDiag(E, DiagId: diag::note_constexpr_large_shift)
2846 << RHS << E->getType() << LHS.getBitWidth();
2847 if (!Info.noteUndefinedBehavior())
2848 return false;
2849 } else if (LHS.isSigned() && !Info.getLangOpts().CPlusPlus20) {
2850 // C++11 [expr.shift]p2: A signed left shift must have a non-negative
2851 // operand, and must not overflow the corresponding unsigned type.
2852 // C++2a [expr.shift]p2: E1 << E2 is the unique value congruent to
2853 // E1 x 2^E2 module 2^N.
2854 if (LHS.isNegative()) {
2855 Info.CCEDiag(E, DiagId: diag::note_constexpr_lshift_of_negative) << LHS;
2856 if (!Info.noteUndefinedBehavior())
2857 return false;
2858 } else if (LHS.countl_zero() < SA) {
2859 Info.CCEDiag(E, DiagId: diag::note_constexpr_lshift_discards);
2860 if (!Info.noteUndefinedBehavior())
2861 return false;
2862 }
2863 }
2864 Result = LHS << SA;
2865 return true;
2866 }
2867 case BO_Shr: {
2868 if (Info.getLangOpts().OpenCL)
2869 // OpenCL 6.3j: shift values are effectively % word size of LHS.
2870 RHS &= APSInt(llvm::APInt(RHS.getBitWidth(),
2871 static_cast<uint64_t>(LHS.getBitWidth() - 1)),
2872 RHS.isUnsigned());
2873 else if (RHS.isSigned() && RHS.isNegative()) {
2874 // During constant-folding, a negative shift is an opposite shift. Such a
2875 // shift is not a constant expression.
2876 Info.CCEDiag(E, DiagId: diag::note_constexpr_negative_shift) << RHS;
2877 if (!Info.noteUndefinedBehavior())
2878 return false;
2879 RHS = -RHS;
2880 goto shift_left;
2881 }
2882 shift_right:
2883 // C++11 [expr.shift]p1: Shift width must be less than the bit width of the
2884 // shifted type.
2885 unsigned SA = (unsigned) RHS.getLimitedValue(Limit: LHS.getBitWidth()-1);
2886 if (SA != RHS) {
2887 Info.CCEDiag(E, DiagId: diag::note_constexpr_large_shift)
2888 << RHS << E->getType() << LHS.getBitWidth();
2889 if (!Info.noteUndefinedBehavior())
2890 return false;
2891 }
2892
2893 Result = LHS >> SA;
2894 return true;
2895 }
2896
2897 case BO_LT: Result = LHS < RHS; return true;
2898 case BO_GT: Result = LHS > RHS; return true;
2899 case BO_LE: Result = LHS <= RHS; return true;
2900 case BO_GE: Result = LHS >= RHS; return true;
2901 case BO_EQ: Result = LHS == RHS; return true;
2902 case BO_NE: Result = LHS != RHS; return true;
2903 case BO_Cmp:
2904 llvm_unreachable("BO_Cmp should be handled elsewhere");
2905 }
2906}
2907
2908/// Perform the given binary floating-point operation, in-place, on LHS.
2909static bool handleFloatFloatBinOp(EvalInfo &Info, const BinaryOperator *E,
2910 APFloat &LHS, BinaryOperatorKind Opcode,
2911 const APFloat &RHS) {
2912 llvm::RoundingMode RM = getActiveRoundingMode(Info, E);
2913 APFloat::opStatus St;
2914 switch (Opcode) {
2915 default:
2916 Info.FFDiag(E);
2917 return false;
2918 case BO_Mul:
2919 St = LHS.multiply(RHS, RM);
2920 break;
2921 case BO_Add:
2922 St = LHS.add(RHS, RM);
2923 break;
2924 case BO_Sub:
2925 St = LHS.subtract(RHS, RM);
2926 break;
2927 case BO_Div:
2928 // [expr.mul]p4:
2929 // If the second operand of / or % is zero the behavior is undefined.
2930 if (RHS.isZero())
2931 Info.CCEDiag(E, DiagId: diag::note_expr_divide_by_zero);
2932 St = LHS.divide(RHS, RM);
2933 break;
2934 }
2935
2936 // [expr.pre]p4:
2937 // If during the evaluation of an expression, the result is not
2938 // mathematically defined [...], the behavior is undefined.
2939 // FIXME: C++ rules require us to not conform to IEEE 754 here.
2940 if (LHS.isNaN()) {
2941 Info.CCEDiag(E, DiagId: diag::note_constexpr_float_arithmetic) << LHS.isNaN();
2942 return Info.noteUndefinedBehavior();
2943 }
2944
2945 return checkFloatingPointResult(Info, E, St);
2946}
2947
2948static bool handleLogicalOpForVector(const APInt &LHSValue,
2949 BinaryOperatorKind Opcode,
2950 const APInt &RHSValue, APInt &Result) {
2951 bool LHS = (LHSValue != 0);
2952 bool RHS = (RHSValue != 0);
2953
2954 if (Opcode == BO_LAnd)
2955 Result = LHS && RHS;
2956 else
2957 Result = LHS || RHS;
2958 return true;
2959}
2960static bool handleLogicalOpForVector(const APFloat &LHSValue,
2961 BinaryOperatorKind Opcode,
2962 const APFloat &RHSValue, APInt &Result) {
2963 bool LHS = !LHSValue.isZero();
2964 bool RHS = !RHSValue.isZero();
2965
2966 if (Opcode == BO_LAnd)
2967 Result = LHS && RHS;
2968 else
2969 Result = LHS || RHS;
2970 return true;
2971}
2972
2973static bool handleLogicalOpForVector(const APValue &LHSValue,
2974 BinaryOperatorKind Opcode,
2975 const APValue &RHSValue, APInt &Result) {
2976 // The result is always an int type, however operands match the first.
2977 if (LHSValue.getKind() == APValue::Int)
2978 return handleLogicalOpForVector(LHSValue: LHSValue.getInt(), Opcode,
2979 RHSValue: RHSValue.getInt(), Result);
2980 assert(LHSValue.getKind() == APValue::Float && "Should be no other options");
2981 return handleLogicalOpForVector(LHSValue: LHSValue.getFloat(), Opcode,
2982 RHSValue: RHSValue.getFloat(), Result);
2983}
2984
2985template <typename APTy>
2986static bool
2987handleCompareOpForVectorHelper(const APTy &LHSValue, BinaryOperatorKind Opcode,
2988 const APTy &RHSValue, APInt &Result) {
2989 switch (Opcode) {
2990 default:
2991 llvm_unreachable("unsupported binary operator");
2992 case BO_EQ:
2993 Result = (LHSValue == RHSValue);
2994 break;
2995 case BO_NE:
2996 Result = (LHSValue != RHSValue);
2997 break;
2998 case BO_LT:
2999 Result = (LHSValue < RHSValue);
3000 break;
3001 case BO_GT:
3002 Result = (LHSValue > RHSValue);
3003 break;
3004 case BO_LE:
3005 Result = (LHSValue <= RHSValue);
3006 break;
3007 case BO_GE:
3008 Result = (LHSValue >= RHSValue);
3009 break;
3010 }
3011
3012 // The boolean operations on these vector types use an instruction that
3013 // results in a mask of '-1' for the 'truth' value. Ensure that we negate 1
3014 // to -1 to make sure that we produce the correct value.
3015 Result.negate();
3016
3017 return true;
3018}
3019
3020static bool handleCompareOpForVector(const APValue &LHSValue,
3021 BinaryOperatorKind Opcode,
3022 const APValue &RHSValue, APInt &Result) {
3023 // The result is always an int type, however operands match the first.
3024 if (LHSValue.getKind() == APValue::Int)
3025 return handleCompareOpForVectorHelper(LHSValue: LHSValue.getInt(), Opcode,
3026 RHSValue: RHSValue.getInt(), Result);
3027 assert(LHSValue.getKind() == APValue::Float && "Should be no other options");
3028 return handleCompareOpForVectorHelper(LHSValue: LHSValue.getFloat(), Opcode,
3029 RHSValue: RHSValue.getFloat(), Result);
3030}
3031
3032// Perform binary operations for vector types, in place on the LHS.
3033static bool handleVectorVectorBinOp(EvalInfo &Info, const BinaryOperator *E,
3034 BinaryOperatorKind Opcode,
3035 APValue &LHSValue,
3036 const APValue &RHSValue) {
3037 assert(Opcode != BO_PtrMemD && Opcode != BO_PtrMemI &&
3038 "Operation not supported on vector types");
3039
3040 const auto *VT = E->getType()->castAs<VectorType>();
3041 unsigned NumElements = VT->getNumElements();
3042 QualType EltTy = VT->getElementType();
3043
3044 // In the cases (typically C as I've observed) where we aren't evaluating
3045 // constexpr but are checking for cases where the LHS isn't yet evaluatable,
3046 // just give up.
3047 if (!LHSValue.isVector()) {
3048 assert(LHSValue.isLValue() &&
3049 "A vector result that isn't a vector OR uncalculated LValue");
3050 Info.FFDiag(E);
3051 return false;
3052 }
3053
3054 assert(LHSValue.getVectorLength() == NumElements &&
3055 RHSValue.getVectorLength() == NumElements && "Different vector sizes");
3056
3057 SmallVector<APValue, 4> ResultElements;
3058
3059 for (unsigned EltNum = 0; EltNum < NumElements; ++EltNum) {
3060 APValue LHSElt = LHSValue.getVectorElt(I: EltNum);
3061 APValue RHSElt = RHSValue.getVectorElt(I: EltNum);
3062
3063 if (EltTy->isIntegerType()) {
3064 APSInt EltResult{Info.Ctx.getIntWidth(T: EltTy),
3065 EltTy->isUnsignedIntegerType()};
3066 bool Success = true;
3067
3068 if (BinaryOperator::isLogicalOp(Opc: Opcode))
3069 Success = handleLogicalOpForVector(LHSValue: LHSElt, Opcode, RHSValue: RHSElt, Result&: EltResult);
3070 else if (BinaryOperator::isComparisonOp(Opc: Opcode))
3071 Success = handleCompareOpForVector(LHSValue: LHSElt, Opcode, RHSValue: RHSElt, Result&: EltResult);
3072 else
3073 Success = handleIntIntBinOp(Info, E, LHS: LHSElt.getInt(), Opcode,
3074 RHS: RHSElt.getInt(), Result&: EltResult);
3075
3076 if (!Success) {
3077 Info.FFDiag(E);
3078 return false;
3079 }
3080 ResultElements.emplace_back(Args&: EltResult);
3081
3082 } else if (EltTy->isFloatingType()) {
3083 assert(LHSElt.getKind() == APValue::Float &&
3084 RHSElt.getKind() == APValue::Float &&
3085 "Mismatched LHS/RHS/Result Type");
3086 APFloat LHSFloat = LHSElt.getFloat();
3087
3088 if (!handleFloatFloatBinOp(Info, E, LHS&: LHSFloat, Opcode,
3089 RHS: RHSElt.getFloat())) {
3090 Info.FFDiag(E);
3091 return false;
3092 }
3093
3094 ResultElements.emplace_back(Args&: LHSFloat);
3095 }
3096 }
3097
3098 LHSValue = APValue(ResultElements.data(), ResultElements.size());
3099 return true;
3100}
3101
3102/// Cast an lvalue referring to a base subobject to a derived class, by
3103/// truncating the lvalue's path to the given length.
3104static bool CastToDerivedClass(EvalInfo &Info, const Expr *E, LValue &Result,
3105 const RecordDecl *TruncatedType,
3106 unsigned TruncatedElements) {
3107 SubobjectDesignator &D = Result.Designator;
3108
3109 // Check we actually point to a derived class object.
3110 if (TruncatedElements == D.Entries.size())
3111 return true;
3112 assert(TruncatedElements >= D.MostDerivedPathLength &&
3113 "not casting to a derived class");
3114 if (!Result.checkSubobject(Info, E, CSK: CSK_Derived))
3115 return false;
3116
3117 // Truncate the path to the subobject, and remove any derived-to-base offsets.
3118 const RecordDecl *RD = TruncatedType;
3119 for (unsigned I = TruncatedElements, N = D.Entries.size(); I != N; ++I) {
3120 if (RD->isInvalidDecl()) return false;
3121 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD);
3122 const CXXRecordDecl *Base = getAsBaseClass(E: D.Entries[I]);
3123 if (isVirtualBaseClass(E: D.Entries[I]))
3124 Result.Offset -= Layout.getVBaseClassOffset(VBase: Base);
3125 else
3126 Result.Offset -= Layout.getBaseClassOffset(Base);
3127 RD = Base;
3128 }
3129 D.Entries.resize(N: TruncatedElements);
3130 return true;
3131}
3132
3133static bool HandleLValueDirectBase(EvalInfo &Info, const Expr *E, LValue &Obj,
3134 const CXXRecordDecl *Derived,
3135 const CXXRecordDecl *Base,
3136 const ASTRecordLayout *RL = nullptr) {
3137 if (!RL) {
3138 if (Derived->isInvalidDecl()) return false;
3139 RL = &Info.Ctx.getASTRecordLayout(D: Derived);
3140 }
3141
3142 Obj.addDecl(Info, E, D: Base, /*Virtual*/ false);
3143 Obj.getLValueOffset() += RL->getBaseClassOffset(Base);
3144 return true;
3145}
3146
3147static bool HandleLValueBase(EvalInfo &Info, const Expr *E, LValue &Obj,
3148 const CXXRecordDecl *DerivedDecl,
3149 const CXXBaseSpecifier *Base) {
3150 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl();
3151
3152 if (!Base->isVirtual())
3153 return HandleLValueDirectBase(Info, E, Obj, Derived: DerivedDecl, Base: BaseDecl);
3154
3155 SubobjectDesignator &D = Obj.Designator;
3156 if (D.Invalid)
3157 return false;
3158
3159 // Extract most-derived object and corresponding type.
3160 // FIXME: After implementing P2280R4 it became possible to get references
3161 // here. We do MostDerivedType->getAsCXXRecordDecl() in several other
3162 // locations and if we see crashes in those locations in the future
3163 // it may make more sense to move this fix into Lvalue::set.
3164 DerivedDecl = D.MostDerivedType.getNonReferenceType()->getAsCXXRecordDecl();
3165 if (!CastToDerivedClass(Info, E, Result&: Obj, TruncatedType: DerivedDecl, TruncatedElements: D.MostDerivedPathLength))
3166 return false;
3167
3168 // Find the virtual base class.
3169 if (DerivedDecl->isInvalidDecl()) return false;
3170 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: DerivedDecl);
3171 Obj.addDecl(Info, E, D: BaseDecl, /*Virtual*/ true);
3172 Obj.getLValueOffset() += Layout.getVBaseClassOffset(VBase: BaseDecl);
3173 return true;
3174}
3175
3176static bool HandleLValueBasePath(EvalInfo &Info, const CastExpr *E,
3177 QualType Type, LValue &Result) {
3178 for (CastExpr::path_const_iterator PathI = E->path_begin(),
3179 PathE = E->path_end();
3180 PathI != PathE; ++PathI) {
3181 if (!HandleLValueBase(Info, E, Obj&: Result, DerivedDecl: Type->getAsCXXRecordDecl(),
3182 Base: *PathI))
3183 return false;
3184 Type = (*PathI)->getType();
3185 }
3186 return true;
3187}
3188
3189/// Cast an lvalue referring to a derived class to a known base subobject.
3190static bool CastToBaseClass(EvalInfo &Info, const Expr *E, LValue &Result,
3191 const CXXRecordDecl *DerivedRD,
3192 const CXXRecordDecl *BaseRD) {
3193 CXXBasePaths Paths(/*FindAmbiguities=*/false,
3194 /*RecordPaths=*/true, /*DetectVirtual=*/false);
3195 if (!DerivedRD->isDerivedFrom(Base: BaseRD, Paths))
3196 llvm_unreachable("Class must be derived from the passed in base class!");
3197
3198 for (CXXBasePathElement &Elem : Paths.front())
3199 if (!HandleLValueBase(Info, E, Obj&: Result, DerivedDecl: Elem.Class, Base: Elem.Base))
3200 return false;
3201 return true;
3202}
3203
3204/// Update LVal to refer to the given field, which must be a member of the type
3205/// currently described by LVal.
3206static bool HandleLValueMember(EvalInfo &Info, const Expr *E, LValue &LVal,
3207 const FieldDecl *FD,
3208 const ASTRecordLayout *RL = nullptr) {
3209 if (!RL) {
3210 if (FD->getParent()->isInvalidDecl()) return false;
3211 RL = &Info.Ctx.getASTRecordLayout(D: FD->getParent());
3212 }
3213
3214 unsigned I = FD->getFieldIndex();
3215 LVal.addDecl(Info, E, D: FD);
3216 LVal.adjustOffset(N: Info.Ctx.toCharUnitsFromBits(BitSize: RL->getFieldOffset(FieldNo: I)));
3217 return true;
3218}
3219
3220/// Update LVal to refer to the given indirect field.
3221static bool HandleLValueIndirectMember(EvalInfo &Info, const Expr *E,
3222 LValue &LVal,
3223 const IndirectFieldDecl *IFD) {
3224 for (const auto *C : IFD->chain())
3225 if (!HandleLValueMember(Info, E, LVal, FD: cast<FieldDecl>(Val: C)))
3226 return false;
3227 return true;
3228}
3229
3230enum class SizeOfType {
3231 SizeOf,
3232 DataSizeOf,
3233};
3234
3235/// Get the size of the given type in char units.
3236static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc, QualType Type,
3237 CharUnits &Size, SizeOfType SOT = SizeOfType::SizeOf) {
3238 // sizeof(void), __alignof__(void), sizeof(function) = 1 as a gcc
3239 // extension.
3240 if (Type->isVoidType() || Type->isFunctionType()) {
3241 Size = CharUnits::One();
3242 return true;
3243 }
3244
3245 if (Type->isDependentType()) {
3246 Info.FFDiag(Loc);
3247 return false;
3248 }
3249
3250 if (!Type->isConstantSizeType()) {
3251 // sizeof(vla) is not a constantexpr: C99 6.5.3.4p2.
3252 // FIXME: Better diagnostic.
3253 Info.FFDiag(Loc);
3254 return false;
3255 }
3256
3257 if (SOT == SizeOfType::SizeOf)
3258 Size = Info.Ctx.getTypeSizeInChars(T: Type);
3259 else
3260 Size = Info.Ctx.getTypeInfoDataSizeInChars(T: Type).Width;
3261 return true;
3262}
3263
3264/// Update a pointer value to model pointer arithmetic.
3265/// \param Info - Information about the ongoing evaluation.
3266/// \param E - The expression being evaluated, for diagnostic purposes.
3267/// \param LVal - The pointer value to be updated.
3268/// \param EltTy - The pointee type represented by LVal.
3269/// \param Adjustment - The adjustment, in objects of type EltTy, to add.
3270static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E,
3271 LValue &LVal, QualType EltTy,
3272 APSInt Adjustment) {
3273 CharUnits SizeOfPointee;
3274 if (!HandleSizeof(Info, Loc: E->getExprLoc(), Type: EltTy, Size&: SizeOfPointee))
3275 return false;
3276
3277 LVal.adjustOffsetAndIndex(Info, E, Index: Adjustment, ElementSize: SizeOfPointee);
3278 return true;
3279}
3280
3281static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E,
3282 LValue &LVal, QualType EltTy,
3283 int64_t Adjustment) {
3284 return HandleLValueArrayAdjustment(Info, E, LVal, EltTy,
3285 Adjustment: APSInt::get(X: Adjustment));
3286}
3287
3288/// Update an lvalue to refer to a component of a complex number.
3289/// \param Info - Information about the ongoing evaluation.
3290/// \param LVal - The lvalue to be updated.
3291/// \param EltTy - The complex number's component type.
3292/// \param Imag - False for the real component, true for the imaginary.
3293static bool HandleLValueComplexElement(EvalInfo &Info, const Expr *E,
3294 LValue &LVal, QualType EltTy,
3295 bool Imag) {
3296 if (Imag) {
3297 CharUnits SizeOfComponent;
3298 if (!HandleSizeof(Info, Loc: E->getExprLoc(), Type: EltTy, Size&: SizeOfComponent))
3299 return false;
3300 LVal.Offset += SizeOfComponent;
3301 }
3302 LVal.addComplex(Info, E, EltTy, Imag);
3303 return true;
3304}
3305
3306static bool HandleLValueVectorElement(EvalInfo &Info, const Expr *E,
3307 LValue &LVal, QualType EltTy,
3308 uint64_t Size, uint64_t Idx) {
3309 if (Idx) {
3310 CharUnits SizeOfElement;
3311 if (!HandleSizeof(Info, Loc: E->getExprLoc(), Type: EltTy, Size&: SizeOfElement))
3312 return false;
3313 LVal.Offset += SizeOfElement * Idx;
3314 }
3315 LVal.addVectorElement(Info, E, EltTy, Size, Idx);
3316 return true;
3317}
3318
3319/// Try to evaluate the initializer for a variable declaration.
3320///
3321/// \param Info Information about the ongoing evaluation.
3322/// \param E An expression to be used when printing diagnostics.
3323/// \param VD The variable whose initializer should be obtained.
3324/// \param Version The version of the variable within the frame.
3325/// \param Frame The frame in which the variable was created. Must be null
3326/// if this variable is not local to the evaluation.
3327/// \param Result Filled in with a pointer to the value of the variable.
3328static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
3329 const VarDecl *VD, CallStackFrame *Frame,
3330 unsigned Version, APValue *&Result) {
3331 // C++23 [expr.const]p8 If we have a reference type allow unknown references
3332 // and pointers.
3333 bool AllowConstexprUnknown =
3334 Info.getLangOpts().CPlusPlus23 && VD->getType()->isReferenceType();
3335
3336 APValue::LValueBase Base(VD, Frame ? Frame->Index : 0, Version);
3337
3338 auto CheckUninitReference = [&](bool IsLocalVariable) {
3339 if (!Result || (!Result->hasValue() && VD->getType()->isReferenceType())) {
3340 // C++23 [expr.const]p8
3341 // ... For such an object that is not usable in constant expressions, the
3342 // dynamic type of the object is constexpr-unknown. For such a reference
3343 // that is not usable in constant expressions, the reference is treated
3344 // as binding to an unspecified object of the referenced type whose
3345 // lifetime and that of all subobjects includes the entire constant
3346 // evaluation and whose dynamic type is constexpr-unknown.
3347 //
3348 // Variables that are part of the current evaluation are not
3349 // constexpr-unknown.
3350 if (!AllowConstexprUnknown || IsLocalVariable) {
3351 if (!Info.checkingPotentialConstantExpression())
3352 Info.FFDiag(E, DiagId: diag::note_constexpr_use_uninit_reference);
3353 return false;
3354 }
3355 Result = nullptr;
3356 }
3357 return true;
3358 };
3359
3360 // If this is a local variable, dig out its value.
3361 if (Frame) {
3362 Result = Frame->getTemporary(Key: VD, Version);
3363 if (Result)
3364 return CheckUninitReference(/*IsLocalVariable=*/true);
3365
3366 if (!isa<ParmVarDecl>(Val: VD)) {
3367 // Assume variables referenced within a lambda's call operator that were
3368 // not declared within the call operator are captures and during checking
3369 // of a potential constant expression, assume they are unknown constant
3370 // expressions.
3371 assert(isLambdaCallOperator(Frame->Callee) &&
3372 (VD->getDeclContext() != Frame->Callee || VD->isInitCapture()) &&
3373 "missing value for local variable");
3374 if (Info.checkingPotentialConstantExpression())
3375 return false;
3376
3377 llvm_unreachable(
3378 "A variable in a frame should either be a local or a parameter");
3379 }
3380 }
3381
3382 // If we're currently evaluating the initializer of this declaration, use that
3383 // in-flight value.
3384 if (Info.EvaluatingDecl == Base) {
3385 Result = Info.EvaluatingDeclValue;
3386 return CheckUninitReference(/*IsLocalVariable=*/false);
3387 }
3388
3389 // P2280R4 struck the restriction that variable of reference type lifetime
3390 // should begin within the evaluation of E
3391 // Used to be C++20 [expr.const]p5.12.2:
3392 // ... its lifetime began within the evaluation of E;
3393 if (isa<ParmVarDecl>(Val: VD)) {
3394 if (AllowConstexprUnknown) {
3395 Result = nullptr;
3396 return true;
3397 }
3398
3399 // Assume parameters of a potential constant expression are usable in
3400 // constant expressions.
3401 if (!Info.checkingPotentialConstantExpression() ||
3402 !Info.CurrentCall->Callee ||
3403 !Info.CurrentCall->Callee->Equals(DC: VD->getDeclContext())) {
3404 if (Info.getLangOpts().CPlusPlus11) {
3405 Info.FFDiag(E, DiagId: diag::note_constexpr_function_param_value_unknown)
3406 << VD;
3407 NoteLValueLocation(Info, Base);
3408 } else {
3409 Info.FFDiag(E);
3410 }
3411 }
3412 return false;
3413 }
3414
3415 if (E->isValueDependent())
3416 return false;
3417
3418 // Dig out the initializer, and use the declaration which it's attached to.
3419 // FIXME: We should eventually check whether the variable has a reachable
3420 // initializing declaration.
3421 const Expr *Init = VD->getAnyInitializer(D&: VD);
3422 // P2280R4 struck the restriction that variable of reference type should have
3423 // a preceding initialization.
3424 // Used to be C++20 [expr.const]p5.12:
3425 // ... reference has a preceding initialization and either ...
3426 if (!Init && !AllowConstexprUnknown) {
3427 // Don't diagnose during potential constant expression checking; an
3428 // initializer might be added later.
3429 if (!Info.checkingPotentialConstantExpression()) {
3430 Info.FFDiag(E, DiagId: diag::note_constexpr_var_init_unknown, ExtraNotes: 1)
3431 << VD;
3432 NoteLValueLocation(Info, Base);
3433 }
3434 return false;
3435 }
3436
3437 // P2280R4 struck the initialization requirement for variables of reference
3438 // type so we can no longer assume we have an Init.
3439 // Used to be C++20 [expr.const]p5.12:
3440 // ... reference has a preceding initialization and either ...
3441 if (Init && Init->isValueDependent()) {
3442 // The DeclRefExpr is not value-dependent, but the variable it refers to
3443 // has a value-dependent initializer. This should only happen in
3444 // constant-folding cases, where the variable is not actually of a suitable
3445 // type for use in a constant expression (otherwise the DeclRefExpr would
3446 // have been value-dependent too), so diagnose that.
3447 assert(!VD->mightBeUsableInConstantExpressions(Info.Ctx));
3448 if (!Info.checkingPotentialConstantExpression()) {
3449 Info.FFDiag(E, DiagId: Info.getLangOpts().CPlusPlus11
3450 ? diag::note_constexpr_ltor_non_constexpr
3451 : diag::note_constexpr_ltor_non_integral, ExtraNotes: 1)
3452 << VD << VD->getType();
3453 NoteLValueLocation(Info, Base);
3454 }
3455 return false;
3456 }
3457
3458 // Check that we can fold the initializer. In C++, we will have already done
3459 // this in the cases where it matters for conformance.
3460 // P2280R4 struck the initialization requirement for variables of reference
3461 // type so we can no longer assume we have an Init.
3462 // Used to be C++20 [expr.const]p5.12:
3463 // ... reference has a preceding initialization and either ...
3464 if (Init && !VD->evaluateValue() && !AllowConstexprUnknown) {
3465 Info.FFDiag(E, DiagId: diag::note_constexpr_var_init_non_constant, ExtraNotes: 1) << VD;
3466 NoteLValueLocation(Info, Base);
3467 return false;
3468 }
3469
3470 // Check that the variable is actually usable in constant expressions. For a
3471 // const integral variable or a reference, we might have a non-constant
3472 // initializer that we can nonetheless evaluate the initializer for. Such
3473 // variables are not usable in constant expressions. In C++98, the
3474 // initializer also syntactically needs to be an ICE.
3475 //
3476 // FIXME: We don't diagnose cases that aren't potentially usable in constant
3477 // expressions here; doing so would regress diagnostics for things like
3478 // reading from a volatile constexpr variable.
3479 if ((Info.getLangOpts().CPlusPlus && !VD->hasConstantInitialization() &&
3480 VD->mightBeUsableInConstantExpressions(C: Info.Ctx) &&
3481 !AllowConstexprUnknown) ||
3482 ((Info.getLangOpts().CPlusPlus || Info.getLangOpts().OpenCL) &&
3483 !Info.getLangOpts().CPlusPlus11 && !VD->hasICEInitializer(Context: Info.Ctx))) {
3484 if (Init) {
3485 Info.CCEDiag(E, DiagId: diag::note_constexpr_var_init_non_constant, ExtraNotes: 1) << VD;
3486 NoteLValueLocation(Info, Base);
3487 } else {
3488 Info.CCEDiag(E);
3489 }
3490 }
3491
3492 // Never use the initializer of a weak variable, not even for constant
3493 // folding. We can't be sure that this is the definition that will be used.
3494 if (VD->isWeak()) {
3495 Info.FFDiag(E, DiagId: diag::note_constexpr_var_init_weak) << VD;
3496 NoteLValueLocation(Info, Base);
3497 return false;
3498 }
3499
3500 Result = VD->getEvaluatedValue();
3501
3502 if (!Result && !AllowConstexprUnknown)
3503 return false;
3504
3505 return CheckUninitReference(/*IsLocalVariable=*/false);
3506}
3507
3508/// Get the base index of the given base class within an APValue representing
3509/// the given derived class.
3510static unsigned getBaseIndex(const CXXRecordDecl *Derived,
3511 const CXXRecordDecl *Base) {
3512 Base = Base->getCanonicalDecl();
3513 unsigned Index = 0;
3514 for (CXXRecordDecl::base_class_const_iterator I = Derived->bases_begin(),
3515 E = Derived->bases_end(); I != E; ++I, ++Index) {
3516 if (I->getType()->getAsCXXRecordDecl()->getCanonicalDecl() == Base)
3517 return Index;
3518 }
3519
3520 llvm_unreachable("base class missing from derived class's bases list");
3521}
3522
3523/// Extract the value of a character from a string literal.
3524static APSInt extractStringLiteralCharacter(EvalInfo &Info, const Expr *Lit,
3525 uint64_t Index) {
3526 assert(!isa<SourceLocExpr>(Lit) &&
3527 "SourceLocExpr should have already been converted to a StringLiteral");
3528
3529 // FIXME: Support MakeStringConstant
3530 if (const auto *ObjCEnc = dyn_cast<ObjCEncodeExpr>(Val: Lit)) {
3531 std::string Str;
3532 Info.Ctx.getObjCEncodingForType(T: ObjCEnc->getEncodedType(), S&: Str);
3533 assert(Index <= Str.size() && "Index too large");
3534 return APSInt::getUnsigned(X: Str.c_str()[Index]);
3535 }
3536
3537 if (auto PE = dyn_cast<PredefinedExpr>(Val: Lit))
3538 Lit = PE->getFunctionName();
3539 const StringLiteral *S = cast<StringLiteral>(Val: Lit);
3540 const ConstantArrayType *CAT =
3541 Info.Ctx.getAsConstantArrayType(T: S->getType());
3542 assert(CAT && "string literal isn't an array");
3543 QualType CharType = CAT->getElementType();
3544 assert(CharType->isIntegerType() && "unexpected character type");
3545 APSInt Value(Info.Ctx.getTypeSize(T: CharType),
3546 CharType->isUnsignedIntegerType());
3547 if (Index < S->getLength())
3548 Value = S->getCodeUnit(i: Index);
3549 return Value;
3550}
3551
3552// Expand a string literal into an array of characters.
3553//
3554// FIXME: This is inefficient; we should probably introduce something similar
3555// to the LLVM ConstantDataArray to make this cheaper.
3556static void expandStringLiteral(EvalInfo &Info, const StringLiteral *S,
3557 APValue &Result,
3558 QualType AllocType = QualType()) {
3559 const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(
3560 T: AllocType.isNull() ? S->getType() : AllocType);
3561 assert(CAT && "string literal isn't an array");
3562 QualType CharType = CAT->getElementType();
3563 assert(CharType->isIntegerType() && "unexpected character type");
3564
3565 unsigned Elts = CAT->getZExtSize();
3566 Result = APValue(APValue::UninitArray(),
3567 std::min(a: S->getLength(), b: Elts), Elts);
3568 APSInt Value(Info.Ctx.getTypeSize(T: CharType),
3569 CharType->isUnsignedIntegerType());
3570 if (Result.hasArrayFiller())
3571 Result.getArrayFiller() = APValue(Value);
3572 for (unsigned I = 0, N = Result.getArrayInitializedElts(); I != N; ++I) {
3573 Value = S->getCodeUnit(i: I);
3574 Result.getArrayInitializedElt(I) = APValue(Value);
3575 }
3576}
3577
3578// Expand an array so that it has more than Index filled elements.
3579static void expandArray(APValue &Array, unsigned Index) {
3580 unsigned Size = Array.getArraySize();
3581 assert(Index < Size);
3582
3583 // Always at least double the number of elements for which we store a value.
3584 unsigned OldElts = Array.getArrayInitializedElts();
3585 unsigned NewElts = std::max(a: Index+1, b: OldElts * 2);
3586 NewElts = std::min(a: Size, b: std::max(a: NewElts, b: 8u));
3587
3588 // Copy the data across.
3589 APValue NewValue(APValue::UninitArray(), NewElts, Size);
3590 for (unsigned I = 0; I != OldElts; ++I)
3591 NewValue.getArrayInitializedElt(I).swap(RHS&: Array.getArrayInitializedElt(I));
3592 for (unsigned I = OldElts; I != NewElts; ++I)
3593 NewValue.getArrayInitializedElt(I) = Array.getArrayFiller();
3594 if (NewValue.hasArrayFiller())
3595 NewValue.getArrayFiller() = Array.getArrayFiller();
3596 Array.swap(RHS&: NewValue);
3597}
3598
3599// Expand an indeterminate vector to materialize all elements.
3600static void expandVector(APValue &Vec, unsigned NumElements) {
3601 assert(Vec.isIndeterminate());
3602 SmallVector<APValue, 4> Elts(NumElements, APValue::IndeterminateValue());
3603 Vec = APValue(Elts.data(), Elts.size());
3604}
3605
3606/// Determine whether a type would actually be read by an lvalue-to-rvalue
3607/// conversion. If it's of class type, we may assume that the copy operation
3608/// is trivial. Note that this is never true for a union type with fields
3609/// (because the copy always "reads" the active member) and always true for
3610/// a non-class type.
3611static bool isReadByLvalueToRvalueConversion(const CXXRecordDecl *RD);
3612static bool isReadByLvalueToRvalueConversion(QualType T) {
3613 CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
3614 return !RD || isReadByLvalueToRvalueConversion(RD);
3615}
3616static bool isReadByLvalueToRvalueConversion(const CXXRecordDecl *RD) {
3617 // FIXME: A trivial copy of a union copies the object representation, even if
3618 // the union is empty.
3619 if (RD->isUnion())
3620 return !RD->field_empty();
3621 if (RD->isEmpty())
3622 return false;
3623
3624 for (auto *Field : RD->fields())
3625 if (!Field->isUnnamedBitField() &&
3626 isReadByLvalueToRvalueConversion(T: Field->getType()))
3627 return true;
3628
3629 for (auto &BaseSpec : RD->bases())
3630 if (isReadByLvalueToRvalueConversion(T: BaseSpec.getType()))
3631 return true;
3632
3633 return false;
3634}
3635
3636/// Diagnose an attempt to read from any unreadable field within the specified
3637/// type, which might be a class type.
3638static bool diagnoseMutableFields(EvalInfo &Info, const Expr *E, AccessKinds AK,
3639 QualType T) {
3640 CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
3641 if (!RD)
3642 return false;
3643
3644 if (!RD->hasMutableFields())
3645 return false;
3646
3647 for (auto *Field : RD->fields()) {
3648 // If we're actually going to read this field in some way, then it can't
3649 // be mutable. If we're in a union, then assigning to a mutable field
3650 // (even an empty one) can change the active member, so that's not OK.
3651 // FIXME: Add core issue number for the union case.
3652 if (Field->isMutable() &&
3653 (RD->isUnion() || isReadByLvalueToRvalueConversion(T: Field->getType()))) {
3654 Info.FFDiag(E, DiagId: diag::note_constexpr_access_mutable, ExtraNotes: 1) << AK << Field;
3655 Info.Note(Loc: Field->getLocation(), DiagId: diag::note_declared_at);
3656 return true;
3657 }
3658
3659 if (diagnoseMutableFields(Info, E, AK, T: Field->getType()))
3660 return true;
3661 }
3662
3663 for (auto &BaseSpec : RD->bases())
3664 if (diagnoseMutableFields(Info, E, AK, T: BaseSpec.getType()))
3665 return true;
3666
3667 // All mutable fields were empty, and thus not actually read.
3668 return false;
3669}
3670
3671static bool lifetimeStartedInEvaluation(EvalInfo &Info,
3672 APValue::LValueBase Base,
3673 bool MutableSubobject = false) {
3674 // A temporary or transient heap allocation we created.
3675 if (Base.getCallIndex() || Base.is<DynamicAllocLValue>())
3676 return true;
3677
3678 switch (Info.IsEvaluatingDecl) {
3679 case EvalInfo::EvaluatingDeclKind::None:
3680 return false;
3681
3682 case EvalInfo::EvaluatingDeclKind::Ctor:
3683 // The variable whose initializer we're evaluating.
3684 if (Info.EvaluatingDecl == Base)
3685 return true;
3686
3687 // A temporary lifetime-extended by the variable whose initializer we're
3688 // evaluating.
3689 if (auto *BaseE = Base.dyn_cast<const Expr *>())
3690 if (auto *BaseMTE = dyn_cast<MaterializeTemporaryExpr>(Val: BaseE))
3691 return Info.EvaluatingDecl == BaseMTE->getExtendingDecl();
3692 return false;
3693
3694 case EvalInfo::EvaluatingDeclKind::Dtor:
3695 // C++2a [expr.const]p6:
3696 // [during constant destruction] the lifetime of a and its non-mutable
3697 // subobjects (but not its mutable subobjects) [are] considered to start
3698 // within e.
3699 if (MutableSubobject || Base != Info.EvaluatingDecl)
3700 return false;
3701 // FIXME: We can meaningfully extend this to cover non-const objects, but
3702 // we will need special handling: we should be able to access only
3703 // subobjects of such objects that are themselves declared const.
3704 QualType T = getType(B: Base);
3705 return T.isConstQualified() || T->isReferenceType();
3706 }
3707
3708 llvm_unreachable("unknown evaluating decl kind");
3709}
3710
3711static bool CheckArraySize(EvalInfo &Info, const ConstantArrayType *CAT,
3712 SourceLocation CallLoc = {}) {
3713 return Info.CheckArraySize(
3714 Loc: CAT->getSizeExpr() ? CAT->getSizeExpr()->getBeginLoc() : CallLoc,
3715 BitWidth: CAT->getNumAddressingBits(Context: Info.Ctx), ElemCount: CAT->getZExtSize(),
3716 /*Diag=*/true);
3717}
3718
3719static bool handleScalarCast(EvalInfo &Info, const FPOptions FPO, const Expr *E,
3720 QualType SourceTy, QualType DestTy,
3721 APValue const &Original, APValue &Result) {
3722 // boolean must be checked before integer
3723 // since IsIntegerType() is true for bool
3724 if (SourceTy->isBooleanType()) {
3725 if (DestTy->isBooleanType()) {
3726 Result = Original;
3727 return true;
3728 }
3729 if (DestTy->isIntegerType() || DestTy->isRealFloatingType()) {
3730 bool BoolResult;
3731 if (!HandleConversionToBool(Val: Original, Result&: BoolResult))
3732 return false;
3733 uint64_t IntResult = BoolResult;
3734 QualType IntType = DestTy->isIntegerType()
3735 ? DestTy
3736 : Info.Ctx.getIntTypeForBitwidth(DestWidth: 64, Signed: false);
3737 Result = APValue(Info.Ctx.MakeIntValue(Value: IntResult, Type: IntType));
3738 }
3739 if (DestTy->isRealFloatingType()) {
3740 APValue Result2 = APValue(APFloat(0.0));
3741 if (!HandleIntToFloatCast(Info, E, FPO,
3742 SrcType: Info.Ctx.getIntTypeForBitwidth(DestWidth: 64, Signed: false),
3743 Value: Result.getInt(), DestType: DestTy, Result&: Result2.getFloat()))
3744 return false;
3745 Result = Result2;
3746 }
3747 return true;
3748 }
3749 if (SourceTy->isIntegerType()) {
3750 if (DestTy->isRealFloatingType()) {
3751 Result = APValue(APFloat(0.0));
3752 return HandleIntToFloatCast(Info, E, FPO, SrcType: SourceTy, Value: Original.getInt(),
3753 DestType: DestTy, Result&: Result.getFloat());
3754 }
3755 if (DestTy->isBooleanType()) {
3756 bool BoolResult;
3757 if (!HandleConversionToBool(Val: Original, Result&: BoolResult))
3758 return false;
3759 uint64_t IntResult = BoolResult;
3760 Result = APValue(Info.Ctx.MakeIntValue(Value: IntResult, Type: DestTy));
3761 return true;
3762 }
3763 if (DestTy->isIntegerType()) {
3764 Result = APValue(
3765 HandleIntToIntCast(Info, E, DestType: DestTy, SrcType: SourceTy, Value: Original.getInt()));
3766 return true;
3767 }
3768 } else if (SourceTy->isRealFloatingType()) {
3769 if (DestTy->isRealFloatingType()) {
3770 Result = Original;
3771 return HandleFloatToFloatCast(Info, E, SrcType: SourceTy, DestType: DestTy,
3772 Result&: Result.getFloat());
3773 }
3774 if (DestTy->isBooleanType()) {
3775 bool BoolResult;
3776 if (!HandleConversionToBool(Val: Original, Result&: BoolResult))
3777 return false;
3778 uint64_t IntResult = BoolResult;
3779 Result = APValue(Info.Ctx.MakeIntValue(Value: IntResult, Type: DestTy));
3780 return true;
3781 }
3782 if (DestTy->isIntegerType()) {
3783 Result = APValue(APSInt());
3784 return HandleFloatToIntCast(Info, E, SrcType: SourceTy, Value: Original.getFloat(),
3785 DestType: DestTy, Result&: Result.getInt());
3786 }
3787 }
3788
3789 Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
3790 return false;
3791}
3792
3793// do the heavy lifting for casting to aggregate types
3794// because we have to deal with bitfields specially
3795static bool constructAggregate(EvalInfo &Info, const FPOptions FPO,
3796 const Expr *E, APValue &Result,
3797 QualType ResultType,
3798 SmallVectorImpl<APValue> &Elements,
3799 SmallVectorImpl<QualType> &ElTypes) {
3800
3801 SmallVector<std::tuple<APValue *, QualType, unsigned>> WorkList = {
3802 {&Result, ResultType, 0}};
3803
3804 unsigned ElI = 0;
3805 while (!WorkList.empty() && ElI < Elements.size()) {
3806 auto [Res, Type, BitWidth] = WorkList.pop_back_val();
3807
3808 if (Type->isRealFloatingType()) {
3809 if (!handleScalarCast(Info, FPO, E, SourceTy: ElTypes[ElI], DestTy: Type, Original: Elements[ElI],
3810 Result&: *Res))
3811 return false;
3812 ElI++;
3813 continue;
3814 }
3815 if (Type->isIntegerType()) {
3816 if (!handleScalarCast(Info, FPO, E, SourceTy: ElTypes[ElI], DestTy: Type, Original: Elements[ElI],
3817 Result&: *Res))
3818 return false;
3819 if (BitWidth > 0) {
3820 if (!Res->isInt())
3821 return false;
3822 APSInt &Int = Res->getInt();
3823 unsigned OldBitWidth = Int.getBitWidth();
3824 unsigned NewBitWidth = BitWidth;
3825 if (NewBitWidth < OldBitWidth)
3826 Int = Int.trunc(width: NewBitWidth).extend(width: OldBitWidth);
3827 }
3828 ElI++;
3829 continue;
3830 }
3831 if (Type->isVectorType()) {
3832 QualType ElTy = Type->castAs<VectorType>()->getElementType();
3833 unsigned NumEl = Type->castAs<VectorType>()->getNumElements();
3834 SmallVector<APValue> Vals(NumEl);
3835 for (unsigned I = 0; I < NumEl; ++I) {
3836 if (!handleScalarCast(Info, FPO, E, SourceTy: ElTypes[ElI], DestTy: ElTy, Original: Elements[ElI],
3837 Result&: Vals[I]))
3838 return false;
3839 ElI++;
3840 }
3841 *Res = APValue(Vals.data(), NumEl);
3842 continue;
3843 }
3844 if (Type->isConstantArrayType()) {
3845 QualType ElTy = cast<ConstantArrayType>(Val: Info.Ctx.getAsArrayType(T: Type))
3846 ->getElementType();
3847 uint64_t Size =
3848 cast<ConstantArrayType>(Val: Info.Ctx.getAsArrayType(T: Type))->getZExtSize();
3849 *Res = APValue(APValue::UninitArray(), Size, Size);
3850 for (int64_t I = Size - 1; I > -1; --I)
3851 WorkList.emplace_back(Args: &Res->getArrayInitializedElt(I), Args&: ElTy, Args: 0u);
3852 continue;
3853 }
3854 if (Type->isRecordType()) {
3855 const RecordDecl *RD = Type->getAsRecordDecl();
3856
3857 unsigned NumBases = 0;
3858 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD))
3859 NumBases = CXXRD->getNumBases();
3860
3861 *Res = APValue(APValue::UninitStruct(), NumBases, RD->getNumFields());
3862
3863 SmallVector<std::tuple<APValue *, QualType, unsigned>> ReverseList;
3864 // we need to traverse backwards
3865 // Visit the base classes.
3866 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
3867 if (CXXRD->getNumBases() > 0) {
3868 assert(CXXRD->getNumBases() == 1);
3869 const CXXBaseSpecifier &BS = CXXRD->bases_begin()[0];
3870 ReverseList.emplace_back(Args: &Res->getStructBase(i: 0), Args: BS.getType(), Args: 0u);
3871 }
3872 }
3873
3874 // Visit the fields.
3875 for (FieldDecl *FD : RD->fields()) {
3876 unsigned FDBW = 0;
3877 if (FD->isUnnamedBitField())
3878 continue;
3879 if (FD->isBitField()) {
3880 FDBW = FD->getBitWidthValue();
3881 }
3882
3883 ReverseList.emplace_back(Args: &Res->getStructField(i: FD->getFieldIndex()),
3884 Args: FD->getType(), Args&: FDBW);
3885 }
3886
3887 std::reverse(first: ReverseList.begin(), last: ReverseList.end());
3888 llvm::append_range(C&: WorkList, R&: ReverseList);
3889 continue;
3890 }
3891 Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
3892 return false;
3893 }
3894 return true;
3895}
3896
3897static bool handleElementwiseCast(EvalInfo &Info, const Expr *E,
3898 const FPOptions FPO,
3899 SmallVectorImpl<APValue> &Elements,
3900 SmallVectorImpl<QualType> &SrcTypes,
3901 SmallVectorImpl<QualType> &DestTypes,
3902 SmallVectorImpl<APValue> &Results) {
3903
3904 assert((Elements.size() == SrcTypes.size()) &&
3905 (Elements.size() == DestTypes.size()));
3906
3907 for (unsigned I = 0, ESz = Elements.size(); I < ESz; ++I) {
3908 APValue Original = Elements[I];
3909 QualType SourceTy = SrcTypes[I];
3910 QualType DestTy = DestTypes[I];
3911
3912 if (!handleScalarCast(Info, FPO, E, SourceTy, DestTy, Original, Result&: Results[I]))
3913 return false;
3914 }
3915 return true;
3916}
3917
3918static unsigned elementwiseSize(EvalInfo &Info, QualType BaseTy) {
3919
3920 SmallVector<QualType> WorkList = {BaseTy};
3921
3922 unsigned Size = 0;
3923 while (!WorkList.empty()) {
3924 QualType Type = WorkList.pop_back_val();
3925 if (Type->isRealFloatingType() || Type->isIntegerType() ||
3926 Type->isBooleanType()) {
3927 ++Size;
3928 continue;
3929 }
3930 if (Type->isVectorType()) {
3931 unsigned NumEl = Type->castAs<VectorType>()->getNumElements();
3932 Size += NumEl;
3933 continue;
3934 }
3935 if (Type->isConstantArrayType()) {
3936 QualType ElTy = cast<ConstantArrayType>(Val: Info.Ctx.getAsArrayType(T: Type))
3937 ->getElementType();
3938 uint64_t ArrSize =
3939 cast<ConstantArrayType>(Val: Info.Ctx.getAsArrayType(T: Type))->getZExtSize();
3940 for (uint64_t I = 0; I < ArrSize; ++I) {
3941 WorkList.push_back(Elt: ElTy);
3942 }
3943 continue;
3944 }
3945 if (Type->isRecordType()) {
3946 const RecordDecl *RD = Type->getAsRecordDecl();
3947
3948 // Visit the base classes.
3949 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
3950 if (CXXRD->getNumBases() > 0) {
3951 assert(CXXRD->getNumBases() == 1);
3952 const CXXBaseSpecifier &BS = CXXRD->bases_begin()[0];
3953 WorkList.push_back(Elt: BS.getType());
3954 }
3955 }
3956
3957 // visit the fields.
3958 for (FieldDecl *FD : RD->fields()) {
3959 if (FD->isUnnamedBitField())
3960 continue;
3961 WorkList.push_back(Elt: FD->getType());
3962 }
3963 continue;
3964 }
3965 }
3966 return Size;
3967}
3968
3969static bool hlslAggSplatHelper(EvalInfo &Info, const Expr *E, APValue &SrcVal,
3970 QualType &SrcTy) {
3971 SrcTy = E->getType();
3972
3973 if (!Evaluate(Result&: SrcVal, Info, E))
3974 return false;
3975
3976 assert((SrcVal.isFloat() || SrcVal.isInt() ||
3977 (SrcVal.isVector() && SrcVal.getVectorLength() == 1)) &&
3978 "Not a valid HLSLAggregateSplatCast.");
3979
3980 if (SrcVal.isVector()) {
3981 assert(SrcTy->isVectorType() && "Type mismatch.");
3982 SrcTy = SrcTy->castAs<VectorType>()->getElementType();
3983 SrcVal = SrcVal.getVectorElt(I: 0);
3984 }
3985 return true;
3986}
3987
3988static bool flattenAPValue(EvalInfo &Info, const Expr *E, APValue Value,
3989 QualType BaseTy, SmallVectorImpl<APValue> &Elements,
3990 SmallVectorImpl<QualType> &Types, unsigned Size) {
3991
3992 SmallVector<std::pair<APValue, QualType>> WorkList = {{Value, BaseTy}};
3993 unsigned Populated = 0;
3994 while (!WorkList.empty() && Populated < Size) {
3995 auto [Work, Type] = WorkList.pop_back_val();
3996
3997 if (Work.isFloat() || Work.isInt()) {
3998 Elements.push_back(Elt: Work);
3999 Types.push_back(Elt: Type);
4000 Populated++;
4001 continue;
4002 }
4003 if (Work.isVector()) {
4004 assert(Type->isVectorType() && "Type mismatch.");
4005 QualType ElTy = Type->castAs<VectorType>()->getElementType();
4006 for (unsigned I = 0; I < Work.getVectorLength() && Populated < Size;
4007 I++) {
4008 Elements.push_back(Elt: Work.getVectorElt(I));
4009 Types.push_back(Elt: ElTy);
4010 Populated++;
4011 }
4012 continue;
4013 }
4014 if (Work.isArray()) {
4015 assert(Type->isConstantArrayType() && "Type mismatch.");
4016 QualType ElTy = cast<ConstantArrayType>(Val: Info.Ctx.getAsArrayType(T: Type))
4017 ->getElementType();
4018 for (int64_t I = Work.getArraySize() - 1; I > -1; --I) {
4019 WorkList.emplace_back(Args&: Work.getArrayInitializedElt(I), Args&: ElTy);
4020 }
4021 continue;
4022 }
4023
4024 if (Work.isStruct()) {
4025 assert(Type->isRecordType() && "Type mismatch.");
4026
4027 const RecordDecl *RD = Type->getAsRecordDecl();
4028
4029 SmallVector<std::pair<APValue, QualType>> ReverseList;
4030 // Visit the fields.
4031 for (FieldDecl *FD : RD->fields()) {
4032 if (FD->isUnnamedBitField())
4033 continue;
4034 ReverseList.emplace_back(Args&: Work.getStructField(i: FD->getFieldIndex()),
4035 Args: FD->getType());
4036 }
4037
4038 std::reverse(first: ReverseList.begin(), last: ReverseList.end());
4039 llvm::append_range(C&: WorkList, R&: ReverseList);
4040
4041 // Visit the base classes.
4042 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
4043 if (CXXRD->getNumBases() > 0) {
4044 assert(CXXRD->getNumBases() == 1);
4045 const CXXBaseSpecifier &BS = CXXRD->bases_begin()[0];
4046 const APValue &Base = Work.getStructBase(i: 0);
4047
4048 // Can happen in error cases.
4049 if (!Base.isStruct())
4050 return false;
4051
4052 WorkList.emplace_back(Args: Base, Args: BS.getType());
4053 }
4054 }
4055 continue;
4056 }
4057 Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
4058 return false;
4059 }
4060 return true;
4061}
4062
4063namespace {
4064/// A handle to a complete object (an object that is not a subobject of
4065/// another object).
4066struct CompleteObject {
4067 /// The identity of the object.
4068 APValue::LValueBase Base;
4069 /// The value of the complete object.
4070 APValue *Value;
4071 /// The type of the complete object.
4072 QualType Type;
4073
4074 CompleteObject() : Value(nullptr) {}
4075 CompleteObject(APValue::LValueBase Base, APValue *Value, QualType Type)
4076 : Base(Base), Value(Value), Type(Type) {}
4077
4078 bool mayAccessMutableMembers(EvalInfo &Info, AccessKinds AK) const {
4079 // If this isn't a "real" access (eg, if it's just accessing the type
4080 // info), allow it. We assume the type doesn't change dynamically for
4081 // subobjects of constexpr objects (even though we'd hit UB here if it
4082 // did). FIXME: Is this right?
4083 if (!isAnyAccess(AK))
4084 return true;
4085
4086 // In C++14 onwards, it is permitted to read a mutable member whose
4087 // lifetime began within the evaluation.
4088 // FIXME: Should we also allow this in C++11?
4089 if (!Info.getLangOpts().CPlusPlus14 &&
4090 AK != AccessKinds::AK_IsWithinLifetime)
4091 return false;
4092 return lifetimeStartedInEvaluation(Info, Base, /*MutableSubobject*/true);
4093 }
4094
4095 explicit operator bool() const { return !Type.isNull(); }
4096};
4097} // end anonymous namespace
4098
4099static QualType getSubobjectType(QualType ObjType, QualType SubobjType,
4100 bool IsMutable = false) {
4101 // C++ [basic.type.qualifier]p1:
4102 // - A const object is an object of type const T or a non-mutable subobject
4103 // of a const object.
4104 if (ObjType.isConstQualified() && !IsMutable)
4105 SubobjType.addConst();
4106 // - A volatile object is an object of type const T or a subobject of a
4107 // volatile object.
4108 if (ObjType.isVolatileQualified())
4109 SubobjType.addVolatile();
4110 return SubobjType;
4111}
4112
4113/// Find the designated sub-object of an rvalue.
4114template <typename SubobjectHandler>
4115static typename SubobjectHandler::result_type
4116findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
4117 const SubobjectDesignator &Sub, SubobjectHandler &handler) {
4118 if (Sub.Invalid)
4119 // A diagnostic will have already been produced.
4120 return handler.failed();
4121 if (Sub.isOnePastTheEnd() || Sub.isMostDerivedAnUnsizedArray()) {
4122 if (Info.getLangOpts().CPlusPlus11)
4123 Info.FFDiag(E, DiagId: Sub.isOnePastTheEnd()
4124 ? diag::note_constexpr_access_past_end
4125 : diag::note_constexpr_access_unsized_array)
4126 << handler.AccessKind;
4127 else
4128 Info.FFDiag(E);
4129 return handler.failed();
4130 }
4131
4132 APValue *O = Obj.Value;
4133 QualType ObjType = Obj.Type;
4134 const FieldDecl *LastField = nullptr;
4135 const FieldDecl *VolatileField = nullptr;
4136
4137 // Walk the designator's path to find the subobject.
4138 for (unsigned I = 0, N = Sub.Entries.size(); /**/; ++I) {
4139 // Reading an indeterminate value is undefined, but assigning over one is OK.
4140 if ((O->isAbsent() && !(handler.AccessKind == AK_Construct && I == N)) ||
4141 (O->isIndeterminate() &&
4142 !isValidIndeterminateAccess(handler.AccessKind))) {
4143 // Object has ended lifetime.
4144 // If I is non-zero, some subobject (member or array element) of a
4145 // complete object has ended its lifetime, so this is valid for
4146 // IsWithinLifetime, resulting in false.
4147 if (I != 0 && handler.AccessKind == AK_IsWithinLifetime)
4148 return false;
4149 if (!Info.checkingPotentialConstantExpression())
4150 Info.FFDiag(E, DiagId: diag::note_constexpr_access_uninit)
4151 << handler.AccessKind << O->isIndeterminate()
4152 << E->getSourceRange();
4153 return handler.failed();
4154 }
4155
4156 // C++ [class.ctor]p5, C++ [class.dtor]p5:
4157 // const and volatile semantics are not applied on an object under
4158 // {con,de}struction.
4159 if ((ObjType.isConstQualified() || ObjType.isVolatileQualified()) &&
4160 ObjType->isRecordType() &&
4161 Info.isEvaluatingCtorDtor(
4162 Base: Obj.Base, Path: ArrayRef(Sub.Entries.begin(), Sub.Entries.begin() + I)) !=
4163 ConstructionPhase::None) {
4164 ObjType = Info.Ctx.getCanonicalType(T: ObjType);
4165 ObjType.removeLocalConst();
4166 ObjType.removeLocalVolatile();
4167 }
4168
4169 // If this is our last pass, check that the final object type is OK.
4170 if (I == N || (I == N - 1 && ObjType->isAnyComplexType())) {
4171 // Accesses to volatile objects are prohibited.
4172 if (ObjType.isVolatileQualified() && isFormalAccess(handler.AccessKind)) {
4173 if (Info.getLangOpts().CPlusPlus) {
4174 int DiagKind;
4175 SourceLocation Loc;
4176 const NamedDecl *Decl = nullptr;
4177 if (VolatileField) {
4178 DiagKind = 2;
4179 Loc = VolatileField->getLocation();
4180 Decl = VolatileField;
4181 } else if (auto *VD = Obj.Base.dyn_cast<const ValueDecl*>()) {
4182 DiagKind = 1;
4183 Loc = VD->getLocation();
4184 Decl = VD;
4185 } else {
4186 DiagKind = 0;
4187 if (auto *E = Obj.Base.dyn_cast<const Expr *>())
4188 Loc = E->getExprLoc();
4189 }
4190 Info.FFDiag(E, DiagId: diag::note_constexpr_access_volatile_obj, ExtraNotes: 1)
4191 << handler.AccessKind << DiagKind << Decl;
4192 Info.Note(Loc, DiagId: diag::note_constexpr_volatile_here) << DiagKind;
4193 } else {
4194 Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
4195 }
4196 return handler.failed();
4197 }
4198
4199 // If we are reading an object of class type, there may still be more
4200 // things we need to check: if there are any mutable subobjects, we
4201 // cannot perform this read. (This only happens when performing a trivial
4202 // copy or assignment.)
4203 if (ObjType->isRecordType() &&
4204 !Obj.mayAccessMutableMembers(Info, AK: handler.AccessKind) &&
4205 diagnoseMutableFields(Info, E, handler.AccessKind, ObjType))
4206 return handler.failed();
4207 }
4208
4209 if (I == N) {
4210 if (!handler.found(*O, ObjType))
4211 return false;
4212
4213 // If we modified a bit-field, truncate it to the right width.
4214 if (isModification(handler.AccessKind) &&
4215 LastField && LastField->isBitField() &&
4216 !truncateBitfieldValue(Info, E, Value&: *O, FD: LastField))
4217 return false;
4218
4219 return true;
4220 }
4221
4222 LastField = nullptr;
4223 if (ObjType->isArrayType()) {
4224 // Next subobject is an array element.
4225 const ArrayType *AT = Info.Ctx.getAsArrayType(T: ObjType);
4226 assert((isa<ConstantArrayType>(AT) || isa<IncompleteArrayType>(AT)) &&
4227 "vla in literal type?");
4228 uint64_t Index = Sub.Entries[I].getAsArrayIndex();
4229 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT);
4230 CAT && CAT->getSize().ule(RHS: Index)) {
4231 // Note, it should not be possible to form a pointer with a valid
4232 // designator which points more than one past the end of the array.
4233 if (Info.getLangOpts().CPlusPlus11)
4234 Info.FFDiag(E, DiagId: diag::note_constexpr_access_past_end)
4235 << handler.AccessKind;
4236 else
4237 Info.FFDiag(E);
4238 return handler.failed();
4239 }
4240
4241 ObjType = AT->getElementType();
4242
4243 if (O->getArrayInitializedElts() > Index)
4244 O = &O->getArrayInitializedElt(I: Index);
4245 else if (!isRead(handler.AccessKind)) {
4246 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT);
4247 CAT && !CheckArraySize(Info, CAT, CallLoc: E->getExprLoc()))
4248 return handler.failed();
4249
4250 expandArray(Array&: *O, Index);
4251 O = &O->getArrayInitializedElt(I: Index);
4252 } else
4253 O = &O->getArrayFiller();
4254 } else if (ObjType->isAnyComplexType()) {
4255 // Next subobject is a complex number.
4256 uint64_t Index = Sub.Entries[I].getAsArrayIndex();
4257 if (Index > 1) {
4258 if (Info.getLangOpts().CPlusPlus11)
4259 Info.FFDiag(E, DiagId: diag::note_constexpr_access_past_end)
4260 << handler.AccessKind;
4261 else
4262 Info.FFDiag(E);
4263 return handler.failed();
4264 }
4265
4266 ObjType = getSubobjectType(
4267 ObjType, SubobjType: ObjType->castAs<ComplexType>()->getElementType());
4268
4269 assert(I == N - 1 && "extracting subobject of scalar?");
4270 if (O->isComplexInt()) {
4271 return handler.found(Index ? O->getComplexIntImag()
4272 : O->getComplexIntReal(), ObjType);
4273 } else {
4274 assert(O->isComplexFloat());
4275 return handler.found(Index ? O->getComplexFloatImag()
4276 : O->getComplexFloatReal(), ObjType);
4277 }
4278 } else if (const auto *VT = ObjType->getAs<VectorType>()) {
4279 uint64_t Index = Sub.Entries[I].getAsArrayIndex();
4280 unsigned NumElements = VT->getNumElements();
4281 if (Index == NumElements) {
4282 if (Info.getLangOpts().CPlusPlus11)
4283 Info.FFDiag(E, DiagId: diag::note_constexpr_access_past_end)
4284 << handler.AccessKind;
4285 else
4286 Info.FFDiag(E);
4287 return handler.failed();
4288 }
4289
4290 if (Index > NumElements) {
4291 Info.CCEDiag(E, DiagId: diag::note_constexpr_array_index)
4292 << Index << /*array*/ 0 << NumElements;
4293 return handler.failed();
4294 }
4295
4296 ObjType = VT->getElementType();
4297 assert(I == N - 1 && "extracting subobject of scalar?");
4298
4299 if (O->isIndeterminate()) {
4300 if (isRead(handler.AccessKind)) {
4301 Info.FFDiag(E);
4302 return handler.failed();
4303 }
4304 expandVector(Vec&: *O, NumElements);
4305 }
4306 assert(O->isVector() && "unexpected object during vector element access");
4307 return handler.found(O->getVectorElt(I: Index), ObjType);
4308 } else if (const FieldDecl *Field = getAsField(E: Sub.Entries[I])) {
4309 if (Field->isMutable() &&
4310 !Obj.mayAccessMutableMembers(Info, AK: handler.AccessKind)) {
4311 Info.FFDiag(E, DiagId: diag::note_constexpr_access_mutable, ExtraNotes: 1)
4312 << handler.AccessKind << Field;
4313 Info.Note(Loc: Field->getLocation(), DiagId: diag::note_declared_at);
4314 return handler.failed();
4315 }
4316
4317 // Next subobject is a class, struct or union field.
4318 RecordDecl *RD = ObjType->castAsCanonical<RecordType>()->getDecl();
4319 if (RD->isUnion()) {
4320 const FieldDecl *UnionField = O->getUnionField();
4321 if (!UnionField ||
4322 UnionField->getCanonicalDecl() != Field->getCanonicalDecl()) {
4323 if (I == N - 1 && handler.AccessKind == AK_Construct) {
4324 // Placement new onto an inactive union member makes it active.
4325 O->setUnion(Field, Value: APValue());
4326 } else {
4327 // Pointer to/into inactive union member: Not within lifetime
4328 if (handler.AccessKind == AK_IsWithinLifetime)
4329 return false;
4330 // FIXME: If O->getUnionValue() is absent, report that there's no
4331 // active union member rather than reporting the prior active union
4332 // member. We'll need to fix nullptr_t to not use APValue() as its
4333 // representation first.
4334 Info.FFDiag(E, DiagId: diag::note_constexpr_access_inactive_union_member)
4335 << handler.AccessKind << Field << !UnionField << UnionField;
4336 return handler.failed();
4337 }
4338 }
4339 O = &O->getUnionValue();
4340 } else
4341 O = &O->getStructField(i: Field->getFieldIndex());
4342
4343 ObjType = getSubobjectType(ObjType, SubobjType: Field->getType(), IsMutable: Field->isMutable());
4344 LastField = Field;
4345 if (Field->getType().isVolatileQualified())
4346 VolatileField = Field;
4347 } else {
4348 // Next subobject is a base class.
4349 const CXXRecordDecl *Derived = ObjType->getAsCXXRecordDecl();
4350 const CXXRecordDecl *Base = getAsBaseClass(E: Sub.Entries[I]);
4351 O = &O->getStructBase(i: getBaseIndex(Derived, Base));
4352
4353 ObjType = getSubobjectType(ObjType, SubobjType: Info.Ctx.getCanonicalTagType(TD: Base));
4354 }
4355 }
4356}
4357
4358namespace {
4359struct ExtractSubobjectHandler {
4360 EvalInfo &Info;
4361 const Expr *E;
4362 APValue &Result;
4363 const AccessKinds AccessKind;
4364
4365 typedef bool result_type;
4366 bool failed() { return false; }
4367 bool found(APValue &Subobj, QualType SubobjType) {
4368 Result = Subobj;
4369 if (AccessKind == AK_ReadObjectRepresentation)
4370 return true;
4371 return CheckFullyInitialized(Info, DiagLoc: E->getExprLoc(), Type: SubobjType, Value: Result);
4372 }
4373 bool found(APSInt &Value, QualType SubobjType) {
4374 Result = APValue(Value);
4375 return true;
4376 }
4377 bool found(APFloat &Value, QualType SubobjType) {
4378 Result = APValue(Value);
4379 return true;
4380 }
4381};
4382} // end anonymous namespace
4383
4384/// Extract the designated sub-object of an rvalue.
4385static bool extractSubobject(EvalInfo &Info, const Expr *E,
4386 const CompleteObject &Obj,
4387 const SubobjectDesignator &Sub, APValue &Result,
4388 AccessKinds AK = AK_Read) {
4389 assert(AK == AK_Read || AK == AK_ReadObjectRepresentation);
4390 ExtractSubobjectHandler Handler = {.Info: Info, .E: E, .Result: Result, .AccessKind: AK};
4391 return findSubobject(Info, E, Obj, Sub, handler&: Handler);
4392}
4393
4394namespace {
4395struct ModifySubobjectHandler {
4396 EvalInfo &Info;
4397 APValue &NewVal;
4398 const Expr *E;
4399
4400 typedef bool result_type;
4401 static const AccessKinds AccessKind = AK_Assign;
4402
4403 bool checkConst(QualType QT) {
4404 // Assigning to a const object has undefined behavior.
4405 if (QT.isConstQualified()) {
4406 Info.FFDiag(E, DiagId: diag::note_constexpr_modify_const_type) << QT;
4407 return false;
4408 }
4409 return true;
4410 }
4411
4412 bool failed() { return false; }
4413 bool found(APValue &Subobj, QualType SubobjType) {
4414 if (!checkConst(QT: SubobjType))
4415 return false;
4416 // We've been given ownership of NewVal, so just swap it in.
4417 Subobj.swap(RHS&: NewVal);
4418 return true;
4419 }
4420 bool found(APSInt &Value, QualType SubobjType) {
4421 if (!checkConst(QT: SubobjType))
4422 return false;
4423 if (!NewVal.isInt()) {
4424 // Maybe trying to write a cast pointer value into a complex?
4425 Info.FFDiag(E);
4426 return false;
4427 }
4428 Value = NewVal.getInt();
4429 return true;
4430 }
4431 bool found(APFloat &Value, QualType SubobjType) {
4432 if (!checkConst(QT: SubobjType))
4433 return false;
4434 Value = NewVal.getFloat();
4435 return true;
4436 }
4437};
4438} // end anonymous namespace
4439
4440const AccessKinds ModifySubobjectHandler::AccessKind;
4441
4442/// Update the designated sub-object of an rvalue to the given value.
4443static bool modifySubobject(EvalInfo &Info, const Expr *E,
4444 const CompleteObject &Obj,
4445 const SubobjectDesignator &Sub,
4446 APValue &NewVal) {
4447 ModifySubobjectHandler Handler = { .Info: Info, .NewVal: NewVal, .E: E };
4448 return findSubobject(Info, E, Obj, Sub, handler&: Handler);
4449}
4450
4451/// Find the position where two subobject designators diverge, or equivalently
4452/// the length of the common initial subsequence.
4453static unsigned FindDesignatorMismatch(QualType ObjType,
4454 const SubobjectDesignator &A,
4455 const SubobjectDesignator &B,
4456 bool &WasArrayIndex) {
4457 unsigned I = 0, N = std::min(a: A.Entries.size(), b: B.Entries.size());
4458 for (/**/; I != N; ++I) {
4459 if (!ObjType.isNull() &&
4460 (ObjType->isArrayType() || ObjType->isAnyComplexType())) {
4461 // Next subobject is an array element.
4462 if (A.Entries[I].getAsArrayIndex() != B.Entries[I].getAsArrayIndex()) {
4463 WasArrayIndex = true;
4464 return I;
4465 }
4466 if (ObjType->isAnyComplexType())
4467 ObjType = ObjType->castAs<ComplexType>()->getElementType();
4468 else
4469 ObjType = ObjType->castAsArrayTypeUnsafe()->getElementType();
4470 } else {
4471 if (A.Entries[I].getAsBaseOrMember() !=
4472 B.Entries[I].getAsBaseOrMember()) {
4473 WasArrayIndex = false;
4474 return I;
4475 }
4476 if (const FieldDecl *FD = getAsField(E: A.Entries[I]))
4477 // Next subobject is a field.
4478 ObjType = FD->getType();
4479 else
4480 // Next subobject is a base class.
4481 ObjType = QualType();
4482 }
4483 }
4484 WasArrayIndex = false;
4485 return I;
4486}
4487
4488/// Determine whether the given subobject designators refer to elements of the
4489/// same array object.
4490static bool AreElementsOfSameArray(QualType ObjType,
4491 const SubobjectDesignator &A,
4492 const SubobjectDesignator &B) {
4493 if (A.Entries.size() != B.Entries.size())
4494 return false;
4495
4496 bool IsArray = A.MostDerivedIsArrayElement;
4497 if (IsArray && A.MostDerivedPathLength != A.Entries.size())
4498 // A is a subobject of the array element.
4499 return false;
4500
4501 // If A (and B) designates an array element, the last entry will be the array
4502 // index. That doesn't have to match. Otherwise, we're in the 'implicit array
4503 // of length 1' case, and the entire path must match.
4504 bool WasArrayIndex;
4505 unsigned CommonLength = FindDesignatorMismatch(ObjType, A, B, WasArrayIndex);
4506 return CommonLength >= A.Entries.size() - IsArray;
4507}
4508
4509/// Find the complete object to which an LValue refers.
4510static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
4511 AccessKinds AK, const LValue &LVal,
4512 QualType LValType) {
4513 if (LVal.InvalidBase) {
4514 Info.FFDiag(E);
4515 return CompleteObject();
4516 }
4517
4518 if (!LVal.Base) {
4519 if (AK == AccessKinds::AK_Dereference)
4520 Info.FFDiag(E, DiagId: diag::note_constexpr_dereferencing_null);
4521 else
4522 Info.FFDiag(E, DiagId: diag::note_constexpr_access_null) << AK;
4523 return CompleteObject();
4524 }
4525
4526 CallStackFrame *Frame = nullptr;
4527 unsigned Depth = 0;
4528 if (LVal.getLValueCallIndex()) {
4529 std::tie(args&: Frame, args&: Depth) =
4530 Info.getCallFrameAndDepth(CallIndex: LVal.getLValueCallIndex());
4531 if (!Frame) {
4532 Info.FFDiag(E, DiagId: diag::note_constexpr_access_uninit, ExtraNotes: 1)
4533 << AK << /*Indeterminate=*/false << E->getSourceRange();
4534 NoteLValueLocation(Info, Base: LVal.Base);
4535 return CompleteObject();
4536 }
4537 }
4538
4539 bool IsAccess = isAnyAccess(AK);
4540
4541 // C++11 DR1311: An lvalue-to-rvalue conversion on a volatile-qualified type
4542 // is not a constant expression (even if the object is non-volatile). We also
4543 // apply this rule to C++98, in order to conform to the expected 'volatile'
4544 // semantics.
4545 if (isFormalAccess(AK) && LValType.isVolatileQualified()) {
4546 if (Info.getLangOpts().CPlusPlus)
4547 Info.FFDiag(E, DiagId: diag::note_constexpr_access_volatile_type)
4548 << AK << LValType;
4549 else
4550 Info.FFDiag(E);
4551 return CompleteObject();
4552 }
4553
4554 // Compute value storage location and type of base object.
4555 APValue *BaseVal = nullptr;
4556 QualType BaseType = getType(B: LVal.Base);
4557
4558 if (Info.getLangOpts().CPlusPlus14 && LVal.Base == Info.EvaluatingDecl &&
4559 lifetimeStartedInEvaluation(Info, Base: LVal.Base)) {
4560 // This is the object whose initializer we're evaluating, so its lifetime
4561 // started in the current evaluation.
4562 BaseVal = Info.EvaluatingDeclValue;
4563 } else if (const ValueDecl *D = LVal.Base.dyn_cast<const ValueDecl *>()) {
4564 // Allow reading from a GUID declaration.
4565 if (auto *GD = dyn_cast<MSGuidDecl>(Val: D)) {
4566 if (isModification(AK)) {
4567 // All the remaining cases do not permit modification of the object.
4568 Info.FFDiag(E, DiagId: diag::note_constexpr_modify_global);
4569 return CompleteObject();
4570 }
4571 APValue &V = GD->getAsAPValue();
4572 if (V.isAbsent()) {
4573 Info.FFDiag(E, DiagId: diag::note_constexpr_unsupported_layout)
4574 << GD->getType();
4575 return CompleteObject();
4576 }
4577 return CompleteObject(LVal.Base, &V, GD->getType());
4578 }
4579
4580 // Allow reading the APValue from an UnnamedGlobalConstantDecl.
4581 if (auto *GCD = dyn_cast<UnnamedGlobalConstantDecl>(Val: D)) {
4582 if (isModification(AK)) {
4583 Info.FFDiag(E, DiagId: diag::note_constexpr_modify_global);
4584 return CompleteObject();
4585 }
4586 return CompleteObject(LVal.Base, const_cast<APValue *>(&GCD->getValue()),
4587 GCD->getType());
4588 }
4589
4590 // Allow reading from template parameter objects.
4591 if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(Val: D)) {
4592 if (isModification(AK)) {
4593 Info.FFDiag(E, DiagId: diag::note_constexpr_modify_global);
4594 return CompleteObject();
4595 }
4596 return CompleteObject(LVal.Base, const_cast<APValue *>(&TPO->getValue()),
4597 TPO->getType());
4598 }
4599
4600 // In C++98, const, non-volatile integers initialized with ICEs are ICEs.
4601 // In C++11, constexpr, non-volatile variables initialized with constant
4602 // expressions are constant expressions too. Inside constexpr functions,
4603 // parameters are constant expressions even if they're non-const.
4604 // In C++1y, objects local to a constant expression (those with a Frame) are
4605 // both readable and writable inside constant expressions.
4606 // In C, such things can also be folded, although they are not ICEs.
4607 const VarDecl *VD = dyn_cast<VarDecl>(Val: D);
4608 if (VD) {
4609 if (const VarDecl *VDef = VD->getDefinition(C&: Info.Ctx))
4610 VD = VDef;
4611 }
4612 if (!VD || VD->isInvalidDecl()) {
4613 Info.FFDiag(E);
4614 return CompleteObject();
4615 }
4616
4617 bool IsConstant = BaseType.isConstant(Ctx: Info.Ctx);
4618 bool ConstexprVar = false;
4619 if (const auto *VD = dyn_cast_if_present<VarDecl>(
4620 Val: Info.EvaluatingDecl.dyn_cast<const ValueDecl *>()))
4621 ConstexprVar = VD->isConstexpr();
4622
4623 // Unless we're looking at a local variable or argument in a constexpr call,
4624 // the variable we're reading must be const (unless we are binding to a
4625 // reference).
4626 if (AK != clang::AK_Dereference && !Frame) {
4627 if (IsAccess && isa<ParmVarDecl>(Val: VD)) {
4628 // Access of a parameter that's not associated with a frame isn't going
4629 // to work out, but we can leave it to evaluateVarDeclInit to provide a
4630 // suitable diagnostic.
4631 } else if (Info.getLangOpts().CPlusPlus14 &&
4632 lifetimeStartedInEvaluation(Info, Base: LVal.Base)) {
4633 // OK, we can read and modify an object if we're in the process of
4634 // evaluating its initializer, because its lifetime began in this
4635 // evaluation.
4636 } else if (isModification(AK)) {
4637 // All the remaining cases do not permit modification of the object.
4638 Info.FFDiag(E, DiagId: diag::note_constexpr_modify_global);
4639 return CompleteObject();
4640 } else if (VD->isConstexpr()) {
4641 // OK, we can read this variable.
4642 } else if (Info.getLangOpts().C23 && ConstexprVar) {
4643 Info.FFDiag(E);
4644 return CompleteObject();
4645 } else if (BaseType->isIntegralOrEnumerationType()) {
4646 if (!IsConstant) {
4647 if (!IsAccess)
4648 return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
4649 if (Info.getLangOpts().CPlusPlus) {
4650 Info.FFDiag(E, DiagId: diag::note_constexpr_ltor_non_const_int, ExtraNotes: 1) << VD;
4651 Info.Note(Loc: VD->getLocation(), DiagId: diag::note_declared_at);
4652 } else {
4653 Info.FFDiag(E);
4654 }
4655 return CompleteObject();
4656 }
4657 } else if (!IsAccess) {
4658 return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
4659 } else if ((IsConstant || BaseType->isReferenceType()) &&
4660 Info.checkingPotentialConstantExpression() &&
4661 BaseType->isLiteralType(Ctx: Info.Ctx) && !VD->hasDefinition()) {
4662 // This variable might end up being constexpr. Don't diagnose it yet.
4663 } else if (IsConstant) {
4664 // Keep evaluating to see what we can do. In particular, we support
4665 // folding of const floating-point types, in order to make static const
4666 // data members of such types (supported as an extension) more useful.
4667 if (Info.getLangOpts().CPlusPlus) {
4668 Info.CCEDiag(E, DiagId: Info.getLangOpts().CPlusPlus11
4669 ? diag::note_constexpr_ltor_non_constexpr
4670 : diag::note_constexpr_ltor_non_integral, ExtraNotes: 1)
4671 << VD << BaseType;
4672 Info.Note(Loc: VD->getLocation(), DiagId: diag::note_declared_at);
4673 } else {
4674 Info.CCEDiag(E);
4675 }
4676 } else {
4677 // Never allow reading a non-const value.
4678 if (Info.getLangOpts().CPlusPlus) {
4679 Info.FFDiag(E, DiagId: Info.getLangOpts().CPlusPlus11
4680 ? diag::note_constexpr_ltor_non_constexpr
4681 : diag::note_constexpr_ltor_non_integral, ExtraNotes: 1)
4682 << VD << BaseType;
4683 Info.Note(Loc: VD->getLocation(), DiagId: diag::note_declared_at);
4684 } else {
4685 Info.FFDiag(E);
4686 }
4687 return CompleteObject();
4688 }
4689 }
4690
4691 // When binding to a reference, the variable does not need to be constexpr
4692 // or have constant initalization.
4693 if (AK != clang::AK_Dereference &&
4694 !evaluateVarDeclInit(Info, E, VD, Frame, Version: LVal.getLValueVersion(),
4695 Result&: BaseVal))
4696 return CompleteObject();
4697 // If evaluateVarDeclInit sees a constexpr-unknown variable, it returns
4698 // a null BaseVal. Any constexpr-unknown variable seen here is an error:
4699 // we can't access a constexpr-unknown object.
4700 if (AK != clang::AK_Dereference && !BaseVal) {
4701 if (!Info.checkingPotentialConstantExpression()) {
4702 Info.FFDiag(E, DiagId: diag::note_constexpr_access_unknown_variable, ExtraNotes: 1)
4703 << AK << VD;
4704 Info.Note(Loc: VD->getLocation(), DiagId: diag::note_declared_at);
4705 }
4706 return CompleteObject();
4707 }
4708 } else if (DynamicAllocLValue DA = LVal.Base.dyn_cast<DynamicAllocLValue>()) {
4709 std::optional<DynAlloc *> Alloc = Info.lookupDynamicAlloc(DA);
4710 if (!Alloc) {
4711 Info.FFDiag(E, DiagId: diag::note_constexpr_access_deleted_object) << AK;
4712 return CompleteObject();
4713 }
4714 return CompleteObject(LVal.Base, &(*Alloc)->Value,
4715 LVal.Base.getDynamicAllocType());
4716 }
4717 // When binding to a reference, the variable does not need to be
4718 // within its lifetime.
4719 else if (AK != clang::AK_Dereference) {
4720 const Expr *Base = LVal.Base.dyn_cast<const Expr*>();
4721
4722 if (!Frame) {
4723 if (const MaterializeTemporaryExpr *MTE =
4724 dyn_cast_or_null<MaterializeTemporaryExpr>(Val: Base)) {
4725 assert(MTE->getStorageDuration() == SD_Static &&
4726 "should have a frame for a non-global materialized temporary");
4727
4728 // C++20 [expr.const]p4: [DR2126]
4729 // An object or reference is usable in constant expressions if it is
4730 // - a temporary object of non-volatile const-qualified literal type
4731 // whose lifetime is extended to that of a variable that is usable
4732 // in constant expressions
4733 //
4734 // C++20 [expr.const]p5:
4735 // an lvalue-to-rvalue conversion [is not allowed unless it applies to]
4736 // - a non-volatile glvalue that refers to an object that is usable
4737 // in constant expressions, or
4738 // - a non-volatile glvalue of literal type that refers to a
4739 // non-volatile object whose lifetime began within the evaluation
4740 // of E;
4741 //
4742 // C++11 misses the 'began within the evaluation of e' check and
4743 // instead allows all temporaries, including things like:
4744 // int &&r = 1;
4745 // int x = ++r;
4746 // constexpr int k = r;
4747 // Therefore we use the C++14-onwards rules in C++11 too.
4748 //
4749 // Note that temporaries whose lifetimes began while evaluating a
4750 // variable's constructor are not usable while evaluating the
4751 // corresponding destructor, not even if they're of const-qualified
4752 // types.
4753 if (!MTE->isUsableInConstantExpressions(Context: Info.Ctx) &&
4754 !lifetimeStartedInEvaluation(Info, Base: LVal.Base)) {
4755 if (!IsAccess)
4756 return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
4757 Info.FFDiag(E, DiagId: diag::note_constexpr_access_static_temporary, ExtraNotes: 1) << AK;
4758 Info.Note(Loc: MTE->getExprLoc(), DiagId: diag::note_constexpr_temporary_here);
4759 return CompleteObject();
4760 }
4761
4762 BaseVal = MTE->getOrCreateValue(MayCreate: false);
4763 assert(BaseVal && "got reference to unevaluated temporary");
4764 } else if (const CompoundLiteralExpr *CLE =
4765 dyn_cast_or_null<CompoundLiteralExpr>(Val: Base)) {
4766 // According to GCC info page:
4767 //
4768 // 6.28 Compound Literals
4769 //
4770 // As an optimization, G++ sometimes gives array compound literals
4771 // longer lifetimes: when the array either appears outside a function or
4772 // has a const-qualified type. If foo and its initializer had elements
4773 // of type char *const rather than char *, or if foo were a global
4774 // variable, the array would have static storage duration. But it is
4775 // probably safest just to avoid the use of array compound literals in
4776 // C++ code.
4777 //
4778 // Obey that rule by checking constness for converted array types.
4779 if (QualType CLETy = CLE->getType(); CLETy->isArrayType() &&
4780 !LValType->isArrayType() &&
4781 !CLETy.isConstant(Ctx: Info.Ctx)) {
4782 Info.FFDiag(E);
4783 Info.Note(Loc: CLE->getExprLoc(), DiagId: diag::note_declared_at);
4784 return CompleteObject();
4785 }
4786
4787 BaseVal = &CLE->getStaticValue();
4788 } else {
4789 if (!IsAccess)
4790 return CompleteObject(LVal.getLValueBase(), nullptr, BaseType);
4791 APValue Val;
4792 LVal.moveInto(V&: Val);
4793 Info.FFDiag(E, DiagId: diag::note_constexpr_access_unreadable_object)
4794 << AK
4795 << Val.getAsString(Ctx: Info.Ctx,
4796 Ty: Info.Ctx.getLValueReferenceType(T: LValType));
4797 NoteLValueLocation(Info, Base: LVal.Base);
4798 return CompleteObject();
4799 }
4800 } else if (AK != clang::AK_Dereference) {
4801 BaseVal = Frame->getTemporary(Key: Base, Version: LVal.Base.getVersion());
4802 assert(BaseVal && "missing value for temporary");
4803 }
4804 }
4805
4806 // In C++14, we can't safely access any mutable state when we might be
4807 // evaluating after an unmodeled side effect. Parameters are modeled as state
4808 // in the caller, but aren't visible once the call returns, so they can be
4809 // modified in a speculatively-evaluated call.
4810 //
4811 // FIXME: Not all local state is mutable. Allow local constant subobjects
4812 // to be read here (but take care with 'mutable' fields).
4813 unsigned VisibleDepth = Depth;
4814 if (llvm::isa_and_nonnull<ParmVarDecl>(
4815 Val: LVal.Base.dyn_cast<const ValueDecl *>()))
4816 ++VisibleDepth;
4817 if ((Frame && Info.getLangOpts().CPlusPlus14 &&
4818 Info.EvalStatus.HasSideEffects) ||
4819 (isModification(AK) && VisibleDepth < Info.SpeculativeEvaluationDepth))
4820 return CompleteObject();
4821
4822 return CompleteObject(LVal.getLValueBase(), BaseVal, BaseType);
4823}
4824
4825/// Perform an lvalue-to-rvalue conversion on the given glvalue. This
4826/// can also be used for 'lvalue-to-lvalue' conversions for looking up the
4827/// glvalue referred to by an entity of reference type.
4828///
4829/// \param Info - Information about the ongoing evaluation.
4830/// \param Conv - The expression for which we are performing the conversion.
4831/// Used for diagnostics.
4832/// \param Type - The type of the glvalue (before stripping cv-qualifiers in the
4833/// case of a non-class type).
4834/// \param LVal - The glvalue on which we are attempting to perform this action.
4835/// \param RVal - The produced value will be placed here.
4836/// \param WantObjectRepresentation - If true, we're looking for the object
4837/// representation rather than the value, and in particular,
4838/// there is no requirement that the result be fully initialized.
4839static bool
4840handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv, QualType Type,
4841 const LValue &LVal, APValue &RVal,
4842 bool WantObjectRepresentation = false) {
4843 if (LVal.Designator.Invalid)
4844 return false;
4845
4846 // Check for special cases where there is no existing APValue to look at.
4847 const Expr *Base = LVal.Base.dyn_cast<const Expr*>();
4848
4849 AccessKinds AK =
4850 WantObjectRepresentation ? AK_ReadObjectRepresentation : AK_Read;
4851
4852 if (Base && !LVal.getLValueCallIndex() && !Type.isVolatileQualified()) {
4853 if (isa<StringLiteral>(Val: Base) || isa<PredefinedExpr>(Val: Base)) {
4854 // Special-case character extraction so we don't have to construct an
4855 // APValue for the whole string.
4856 assert(LVal.Designator.Entries.size() <= 1 &&
4857 "Can only read characters from string literals");
4858 if (LVal.Designator.Entries.empty()) {
4859 // Fail for now for LValue to RValue conversion of an array.
4860 // (This shouldn't show up in C/C++, but it could be triggered by a
4861 // weird EvaluateAsRValue call from a tool.)
4862 Info.FFDiag(E: Conv);
4863 return false;
4864 }
4865 if (LVal.Designator.isOnePastTheEnd()) {
4866 if (Info.getLangOpts().CPlusPlus11)
4867 Info.FFDiag(E: Conv, DiagId: diag::note_constexpr_access_past_end) << AK;
4868 else
4869 Info.FFDiag(E: Conv);
4870 return false;
4871 }
4872 uint64_t CharIndex = LVal.Designator.Entries[0].getAsArrayIndex();
4873 RVal = APValue(extractStringLiteralCharacter(Info, Lit: Base, Index: CharIndex));
4874 return true;
4875 }
4876 }
4877
4878 CompleteObject Obj = findCompleteObject(Info, E: Conv, AK, LVal, LValType: Type);
4879 return Obj && extractSubobject(Info, E: Conv, Obj, Sub: LVal.Designator, Result&: RVal, AK);
4880}
4881
4882static bool hlslElementwiseCastHelper(EvalInfo &Info, const Expr *E,
4883 QualType DestTy,
4884 SmallVectorImpl<APValue> &SrcVals,
4885 SmallVectorImpl<QualType> &SrcTypes) {
4886 APValue Val;
4887 if (!Evaluate(Result&: Val, Info, E))
4888 return false;
4889
4890 // must be dealing with a record
4891 if (Val.isLValue()) {
4892 LValue LVal;
4893 LVal.setFrom(Ctx: Info.Ctx, V: Val);
4894 if (!handleLValueToRValueConversion(Info, Conv: E, Type: E->getType(), LVal, RVal&: Val))
4895 return false;
4896 }
4897
4898 unsigned NEls = elementwiseSize(Info, BaseTy: DestTy);
4899 // flatten the source
4900 if (!flattenAPValue(Info, E, Value: Val, BaseTy: E->getType(), Elements&: SrcVals, Types&: SrcTypes, Size: NEls))
4901 return false;
4902
4903 return true;
4904}
4905
4906/// Perform an assignment of Val to LVal. Takes ownership of Val.
4907static bool handleAssignment(EvalInfo &Info, const Expr *E, const LValue &LVal,
4908 QualType LValType, APValue &Val) {
4909 if (LVal.Designator.Invalid)
4910 return false;
4911
4912 if (!Info.getLangOpts().CPlusPlus14) {
4913 Info.FFDiag(E);
4914 return false;
4915 }
4916
4917 CompleteObject Obj = findCompleteObject(Info, E, AK: AK_Assign, LVal, LValType);
4918 return Obj && modifySubobject(Info, E, Obj, Sub: LVal.Designator, NewVal&: Val);
4919}
4920
4921namespace {
4922struct CompoundAssignSubobjectHandler {
4923 EvalInfo &Info;
4924 const CompoundAssignOperator *E;
4925 QualType PromotedLHSType;
4926 BinaryOperatorKind Opcode;
4927 const APValue &RHS;
4928
4929 static const AccessKinds AccessKind = AK_Assign;
4930
4931 typedef bool result_type;
4932
4933 bool checkConst(QualType QT) {
4934 // Assigning to a const object has undefined behavior.
4935 if (QT.isConstQualified()) {
4936 Info.FFDiag(E, DiagId: diag::note_constexpr_modify_const_type) << QT;
4937 return false;
4938 }
4939 return true;
4940 }
4941
4942 bool failed() { return false; }
4943 bool found(APValue &Subobj, QualType SubobjType) {
4944 switch (Subobj.getKind()) {
4945 case APValue::Int:
4946 return found(Value&: Subobj.getInt(), SubobjType);
4947 case APValue::Float:
4948 return found(Value&: Subobj.getFloat(), SubobjType);
4949 case APValue::ComplexInt:
4950 case APValue::ComplexFloat:
4951 // FIXME: Implement complex compound assignment.
4952 Info.FFDiag(E);
4953 return false;
4954 case APValue::LValue:
4955 return foundPointer(Subobj, SubobjType);
4956 case APValue::Vector:
4957 return foundVector(Value&: Subobj, SubobjType);
4958 case APValue::Indeterminate:
4959 Info.FFDiag(E, DiagId: diag::note_constexpr_access_uninit)
4960 << /*read of=*/0 << /*uninitialized object=*/1
4961 << E->getLHS()->getSourceRange();
4962 return false;
4963 default:
4964 // FIXME: can this happen?
4965 Info.FFDiag(E);
4966 return false;
4967 }
4968 }
4969
4970 bool foundVector(APValue &Value, QualType SubobjType) {
4971 if (!checkConst(QT: SubobjType))
4972 return false;
4973
4974 if (!SubobjType->isVectorType()) {
4975 Info.FFDiag(E);
4976 return false;
4977 }
4978 return handleVectorVectorBinOp(Info, E, Opcode, LHSValue&: Value, RHSValue: RHS);
4979 }
4980
4981 bool found(APSInt &Value, QualType SubobjType) {
4982 if (!checkConst(QT: SubobjType))
4983 return false;
4984
4985 if (!SubobjType->isIntegerType()) {
4986 // We don't support compound assignment on integer-cast-to-pointer
4987 // values.
4988 Info.FFDiag(E);
4989 return false;
4990 }
4991
4992 if (RHS.isInt()) {
4993 APSInt LHS =
4994 HandleIntToIntCast(Info, E, DestType: PromotedLHSType, SrcType: SubobjType, Value);
4995 if (!handleIntIntBinOp(Info, E, LHS, Opcode, RHS: RHS.getInt(), Result&: LHS))
4996 return false;
4997 Value = HandleIntToIntCast(Info, E, DestType: SubobjType, SrcType: PromotedLHSType, Value: LHS);
4998 return true;
4999 } else if (RHS.isFloat()) {
5000 const FPOptions FPO = E->getFPFeaturesInEffect(
5001 LO: Info.Ctx.getLangOpts());
5002 APFloat FValue(0.0);
5003 return HandleIntToFloatCast(Info, E, FPO, SrcType: SubobjType, Value,
5004 DestType: PromotedLHSType, Result&: FValue) &&
5005 handleFloatFloatBinOp(Info, E, LHS&: FValue, Opcode, RHS: RHS.getFloat()) &&
5006 HandleFloatToIntCast(Info, E, SrcType: PromotedLHSType, Value: FValue, DestType: SubobjType,
5007 Result&: Value);
5008 }
5009
5010 Info.FFDiag(E);
5011 return false;
5012 }
5013 bool found(APFloat &Value, QualType SubobjType) {
5014 return checkConst(QT: SubobjType) &&
5015 HandleFloatToFloatCast(Info, E, SrcType: SubobjType, DestType: PromotedLHSType,
5016 Result&: Value) &&
5017 handleFloatFloatBinOp(Info, E, LHS&: Value, Opcode, RHS: RHS.getFloat()) &&
5018 HandleFloatToFloatCast(Info, E, SrcType: PromotedLHSType, DestType: SubobjType, Result&: Value);
5019 }
5020 bool foundPointer(APValue &Subobj, QualType SubobjType) {
5021 if (!checkConst(QT: SubobjType))
5022 return false;
5023
5024 QualType PointeeType;
5025 if (const PointerType *PT = SubobjType->getAs<PointerType>())
5026 PointeeType = PT->getPointeeType();
5027
5028 if (PointeeType.isNull() || !RHS.isInt() ||
5029 (Opcode != BO_Add && Opcode != BO_Sub)) {
5030 Info.FFDiag(E);
5031 return false;
5032 }
5033
5034 APSInt Offset = RHS.getInt();
5035 if (Opcode == BO_Sub)
5036 negateAsSigned(Int&: Offset);
5037
5038 LValue LVal;
5039 LVal.setFrom(Ctx: Info.Ctx, V: Subobj);
5040 if (!HandleLValueArrayAdjustment(Info, E, LVal, EltTy: PointeeType, Adjustment: Offset))
5041 return false;
5042 LVal.moveInto(V&: Subobj);
5043 return true;
5044 }
5045};
5046} // end anonymous namespace
5047
5048const AccessKinds CompoundAssignSubobjectHandler::AccessKind;
5049
5050/// Perform a compound assignment of LVal <op>= RVal.
5051static bool handleCompoundAssignment(EvalInfo &Info,
5052 const CompoundAssignOperator *E,
5053 const LValue &LVal, QualType LValType,
5054 QualType PromotedLValType,
5055 BinaryOperatorKind Opcode,
5056 const APValue &RVal) {
5057 if (LVal.Designator.Invalid)
5058 return false;
5059
5060 if (!Info.getLangOpts().CPlusPlus14) {
5061 Info.FFDiag(E);
5062 return false;
5063 }
5064
5065 CompleteObject Obj = findCompleteObject(Info, E, AK: AK_Assign, LVal, LValType);
5066 CompoundAssignSubobjectHandler Handler = { .Info: Info, .E: E, .PromotedLHSType: PromotedLValType, .Opcode: Opcode,
5067 .RHS: RVal };
5068 return Obj && findSubobject(Info, E, Obj, Sub: LVal.Designator, handler&: Handler);
5069}
5070
5071namespace {
5072struct IncDecSubobjectHandler {
5073 EvalInfo &Info;
5074 const UnaryOperator *E;
5075 AccessKinds AccessKind;
5076 APValue *Old;
5077
5078 typedef bool result_type;
5079
5080 bool checkConst(QualType QT) {
5081 // Assigning to a const object has undefined behavior.
5082 if (QT.isConstQualified()) {
5083 Info.FFDiag(E, DiagId: diag::note_constexpr_modify_const_type) << QT;
5084 return false;
5085 }
5086 return true;
5087 }
5088
5089 bool failed() { return false; }
5090 bool found(APValue &Subobj, QualType SubobjType) {
5091 // Stash the old value. Also clear Old, so we don't clobber it later
5092 // if we're post-incrementing a complex.
5093 if (Old) {
5094 *Old = Subobj;
5095 Old = nullptr;
5096 }
5097
5098 switch (Subobj.getKind()) {
5099 case APValue::Int:
5100 return found(Value&: Subobj.getInt(), SubobjType);
5101 case APValue::Float:
5102 return found(Value&: Subobj.getFloat(), SubobjType);
5103 case APValue::ComplexInt:
5104 return found(Value&: Subobj.getComplexIntReal(),
5105 SubobjType: SubobjType->castAs<ComplexType>()->getElementType()
5106 .withCVRQualifiers(CVR: SubobjType.getCVRQualifiers()));
5107 case APValue::ComplexFloat:
5108 return found(Value&: Subobj.getComplexFloatReal(),
5109 SubobjType: SubobjType->castAs<ComplexType>()->getElementType()
5110 .withCVRQualifiers(CVR: SubobjType.getCVRQualifiers()));
5111 case APValue::LValue:
5112 return foundPointer(Subobj, SubobjType);
5113 default:
5114 // FIXME: can this happen?
5115 Info.FFDiag(E);
5116 return false;
5117 }
5118 }
5119 bool found(APSInt &Value, QualType SubobjType) {
5120 if (!checkConst(QT: SubobjType))
5121 return false;
5122
5123 if (!SubobjType->isIntegerType()) {
5124 // We don't support increment / decrement on integer-cast-to-pointer
5125 // values.
5126 Info.FFDiag(E);
5127 return false;
5128 }
5129
5130 if (Old) *Old = APValue(Value);
5131
5132 // bool arithmetic promotes to int, and the conversion back to bool
5133 // doesn't reduce mod 2^n, so special-case it.
5134 if (SubobjType->isBooleanType()) {
5135 if (AccessKind == AK_Increment)
5136 Value = 1;
5137 else
5138 Value = !Value;
5139 return true;
5140 }
5141
5142 bool WasNegative = Value.isNegative();
5143 if (AccessKind == AK_Increment) {
5144 ++Value;
5145
5146 if (!WasNegative && Value.isNegative() && E->canOverflow()) {
5147 APSInt ActualValue(Value, /*IsUnsigned*/true);
5148 return HandleOverflow(Info, E, SrcValue: ActualValue, DestType: SubobjType);
5149 }
5150 } else {
5151 --Value;
5152
5153 if (WasNegative && !Value.isNegative() && E->canOverflow()) {
5154 unsigned BitWidth = Value.getBitWidth();
5155 APSInt ActualValue(Value.sext(width: BitWidth + 1), /*IsUnsigned*/false);
5156 ActualValue.setBit(BitWidth);
5157 return HandleOverflow(Info, E, SrcValue: ActualValue, DestType: SubobjType);
5158 }
5159 }
5160 return true;
5161 }
5162 bool found(APFloat &Value, QualType SubobjType) {
5163 if (!checkConst(QT: SubobjType))
5164 return false;
5165
5166 if (Old) *Old = APValue(Value);
5167
5168 APFloat One(Value.getSemantics(), 1);
5169 llvm::RoundingMode RM = getActiveRoundingMode(Info, E);
5170 APFloat::opStatus St;
5171 if (AccessKind == AK_Increment)
5172 St = Value.add(RHS: One, RM);
5173 else
5174 St = Value.subtract(RHS: One, RM);
5175 return checkFloatingPointResult(Info, E, St);
5176 }
5177 bool foundPointer(APValue &Subobj, QualType SubobjType) {
5178 if (!checkConst(QT: SubobjType))
5179 return false;
5180
5181 QualType PointeeType;
5182 if (const PointerType *PT = SubobjType->getAs<PointerType>())
5183 PointeeType = PT->getPointeeType();
5184 else {
5185 Info.FFDiag(E);
5186 return false;
5187 }
5188
5189 LValue LVal;
5190 LVal.setFrom(Ctx: Info.Ctx, V: Subobj);
5191 if (!HandleLValueArrayAdjustment(Info, E, LVal, EltTy: PointeeType,
5192 Adjustment: AccessKind == AK_Increment ? 1 : -1))
5193 return false;
5194 LVal.moveInto(V&: Subobj);
5195 return true;
5196 }
5197};
5198} // end anonymous namespace
5199
5200/// Perform an increment or decrement on LVal.
5201static bool handleIncDec(EvalInfo &Info, const Expr *E, const LValue &LVal,
5202 QualType LValType, bool IsIncrement, APValue *Old) {
5203 if (LVal.Designator.Invalid)
5204 return false;
5205
5206 if (!Info.getLangOpts().CPlusPlus14) {
5207 Info.FFDiag(E);
5208 return false;
5209 }
5210
5211 AccessKinds AK = IsIncrement ? AK_Increment : AK_Decrement;
5212 CompleteObject Obj = findCompleteObject(Info, E, AK, LVal, LValType);
5213 IncDecSubobjectHandler Handler = {.Info: Info, .E: cast<UnaryOperator>(Val: E), .AccessKind: AK, .Old: Old};
5214 return Obj && findSubobject(Info, E, Obj, Sub: LVal.Designator, handler&: Handler);
5215}
5216
5217/// Build an lvalue for the object argument of a member function call.
5218static bool EvaluateObjectArgument(EvalInfo &Info, const Expr *Object,
5219 LValue &This) {
5220 if (Object->getType()->isPointerType() && Object->isPRValue())
5221 return EvaluatePointer(E: Object, Result&: This, Info);
5222
5223 if (Object->isGLValue())
5224 return EvaluateLValue(E: Object, Result&: This, Info);
5225
5226 if (Object->getType()->isLiteralType(Ctx: Info.Ctx))
5227 return EvaluateTemporary(E: Object, Result&: This, Info);
5228
5229 if (Object->getType()->isRecordType() && Object->isPRValue())
5230 return EvaluateTemporary(E: Object, Result&: This, Info);
5231
5232 Info.FFDiag(E: Object, DiagId: diag::note_constexpr_nonliteral) << Object->getType();
5233 return false;
5234}
5235
5236/// HandleMemberPointerAccess - Evaluate a member access operation and build an
5237/// lvalue referring to the result.
5238///
5239/// \param Info - Information about the ongoing evaluation.
5240/// \param LV - An lvalue referring to the base of the member pointer.
5241/// \param RHS - The member pointer expression.
5242/// \param IncludeMember - Specifies whether the member itself is included in
5243/// the resulting LValue subobject designator. This is not possible when
5244/// creating a bound member function.
5245/// \return The field or method declaration to which the member pointer refers,
5246/// or 0 if evaluation fails.
5247static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info,
5248 QualType LVType,
5249 LValue &LV,
5250 const Expr *RHS,
5251 bool IncludeMember = true) {
5252 MemberPtr MemPtr;
5253 if (!EvaluateMemberPointer(E: RHS, Result&: MemPtr, Info))
5254 return nullptr;
5255
5256 // C++11 [expr.mptr.oper]p6: If the second operand is the null pointer to
5257 // member value, the behavior is undefined.
5258 if (!MemPtr.getDecl()) {
5259 // FIXME: Specific diagnostic.
5260 Info.FFDiag(E: RHS);
5261 return nullptr;
5262 }
5263
5264 if (MemPtr.isDerivedMember()) {
5265 // This is a member of some derived class. Truncate LV appropriately.
5266 // The end of the derived-to-base path for the base object must match the
5267 // derived-to-base path for the member pointer.
5268 // C++23 [expr.mptr.oper]p4:
5269 // If the result of E1 is an object [...] whose most derived object does
5270 // not contain the member to which E2 refers, the behavior is undefined.
5271 if (LV.Designator.MostDerivedPathLength + MemPtr.Path.size() >
5272 LV.Designator.Entries.size()) {
5273 Info.FFDiag(E: RHS);
5274 return nullptr;
5275 }
5276 unsigned PathLengthToMember =
5277 LV.Designator.Entries.size() - MemPtr.Path.size();
5278 for (unsigned I = 0, N = MemPtr.Path.size(); I != N; ++I) {
5279 const CXXRecordDecl *LVDecl = getAsBaseClass(
5280 E: LV.Designator.Entries[PathLengthToMember + I]);
5281 const CXXRecordDecl *MPDecl = MemPtr.Path[I];
5282 if (LVDecl->getCanonicalDecl() != MPDecl->getCanonicalDecl()) {
5283 Info.FFDiag(E: RHS);
5284 return nullptr;
5285 }
5286 }
5287 // MemPtr.Path only contains the base classes of the class directly
5288 // containing the member E2. It is still necessary to check that the class
5289 // directly containing the member E2 lies on the derived-to-base path of E1
5290 // to avoid incorrectly permitting member pointer access into a sibling
5291 // class of the class containing the member E2. If this class would
5292 // correspond to the most-derived class of E1, it either isn't contained in
5293 // LV.Designator.Entries or the corresponding entry refers to an array
5294 // element instead. Therefore get the most derived class directly in this
5295 // case. Otherwise the previous entry should correpond to this class.
5296 const CXXRecordDecl *LastLVDecl =
5297 (PathLengthToMember > LV.Designator.MostDerivedPathLength)
5298 ? getAsBaseClass(E: LV.Designator.Entries[PathLengthToMember - 1])
5299 : LV.Designator.MostDerivedType->getAsCXXRecordDecl();
5300 const CXXRecordDecl *LastMPDecl = MemPtr.getContainingRecord();
5301 if (LastLVDecl->getCanonicalDecl() != LastMPDecl->getCanonicalDecl()) {
5302 Info.FFDiag(E: RHS);
5303 return nullptr;
5304 }
5305
5306 // Truncate the lvalue to the appropriate derived class.
5307 if (!CastToDerivedClass(Info, E: RHS, Result&: LV, TruncatedType: MemPtr.getContainingRecord(),
5308 TruncatedElements: PathLengthToMember))
5309 return nullptr;
5310 } else if (!MemPtr.Path.empty()) {
5311 // Extend the LValue path with the member pointer's path.
5312 LV.Designator.Entries.reserve(N: LV.Designator.Entries.size() +
5313 MemPtr.Path.size() + IncludeMember);
5314
5315 // Walk down to the appropriate base class.
5316 if (const PointerType *PT = LVType->getAs<PointerType>())
5317 LVType = PT->getPointeeType();
5318 const CXXRecordDecl *RD = LVType->getAsCXXRecordDecl();
5319 assert(RD && "member pointer access on non-class-type expression");
5320 // The first class in the path is that of the lvalue.
5321 for (unsigned I = 1, N = MemPtr.Path.size(); I != N; ++I) {
5322 const CXXRecordDecl *Base = MemPtr.Path[N - I - 1];
5323 if (!HandleLValueDirectBase(Info, E: RHS, Obj&: LV, Derived: RD, Base))
5324 return nullptr;
5325 RD = Base;
5326 }
5327 // Finally cast to the class containing the member.
5328 if (!HandleLValueDirectBase(Info, E: RHS, Obj&: LV, Derived: RD,
5329 Base: MemPtr.getContainingRecord()))
5330 return nullptr;
5331 }
5332
5333 // Add the member. Note that we cannot build bound member functions here.
5334 if (IncludeMember) {
5335 if (const FieldDecl *FD = dyn_cast<FieldDecl>(Val: MemPtr.getDecl())) {
5336 if (!HandleLValueMember(Info, E: RHS, LVal&: LV, FD))
5337 return nullptr;
5338 } else if (const IndirectFieldDecl *IFD =
5339 dyn_cast<IndirectFieldDecl>(Val: MemPtr.getDecl())) {
5340 if (!HandleLValueIndirectMember(Info, E: RHS, LVal&: LV, IFD))
5341 return nullptr;
5342 } else {
5343 llvm_unreachable("can't construct reference to bound member function");
5344 }
5345 }
5346
5347 return MemPtr.getDecl();
5348}
5349
5350static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info,
5351 const BinaryOperator *BO,
5352 LValue &LV,
5353 bool IncludeMember = true) {
5354 assert(BO->getOpcode() == BO_PtrMemD || BO->getOpcode() == BO_PtrMemI);
5355
5356 if (!EvaluateObjectArgument(Info, Object: BO->getLHS(), This&: LV)) {
5357 if (Info.noteFailure()) {
5358 MemberPtr MemPtr;
5359 EvaluateMemberPointer(E: BO->getRHS(), Result&: MemPtr, Info);
5360 }
5361 return nullptr;
5362 }
5363
5364 return HandleMemberPointerAccess(Info, LVType: BO->getLHS()->getType(), LV,
5365 RHS: BO->getRHS(), IncludeMember);
5366}
5367
5368/// HandleBaseToDerivedCast - Apply the given base-to-derived cast operation on
5369/// the provided lvalue, which currently refers to the base object.
5370static bool HandleBaseToDerivedCast(EvalInfo &Info, const CastExpr *E,
5371 LValue &Result) {
5372 SubobjectDesignator &D = Result.Designator;
5373 if (D.Invalid || !Result.checkNullPointer(Info, E, CSK: CSK_Derived))
5374 return false;
5375
5376 QualType TargetQT = E->getType();
5377 if (const PointerType *PT = TargetQT->getAs<PointerType>())
5378 TargetQT = PT->getPointeeType();
5379
5380 auto InvalidCast = [&]() {
5381 if (!Info.checkingPotentialConstantExpression() ||
5382 !Result.AllowConstexprUnknown) {
5383 Info.CCEDiag(E, DiagId: diag::note_constexpr_invalid_downcast)
5384 << D.MostDerivedType << TargetQT;
5385 }
5386 return false;
5387 };
5388
5389 // Check this cast lands within the final derived-to-base subobject path.
5390 if (D.MostDerivedPathLength + E->path_size() > D.Entries.size())
5391 return InvalidCast();
5392
5393 // Check the type of the final cast. We don't need to check the path,
5394 // since a cast can only be formed if the path is unique.
5395 unsigned NewEntriesSize = D.Entries.size() - E->path_size();
5396 const CXXRecordDecl *TargetType = TargetQT->getAsCXXRecordDecl();
5397 const CXXRecordDecl *FinalType;
5398 if (NewEntriesSize == D.MostDerivedPathLength)
5399 FinalType = D.MostDerivedType->getAsCXXRecordDecl();
5400 else
5401 FinalType = getAsBaseClass(E: D.Entries[NewEntriesSize - 1]);
5402 if (FinalType->getCanonicalDecl() != TargetType->getCanonicalDecl())
5403 return InvalidCast();
5404
5405 // Truncate the lvalue to the appropriate derived class.
5406 return CastToDerivedClass(Info, E, Result, TruncatedType: TargetType, TruncatedElements: NewEntriesSize);
5407}
5408
5409/// Get the value to use for a default-initialized object of type T.
5410/// Return false if it encounters something invalid.
5411static bool handleDefaultInitValue(QualType T, APValue &Result) {
5412 bool Success = true;
5413
5414 // If there is already a value present don't overwrite it.
5415 if (!Result.isAbsent())
5416 return true;
5417
5418 if (auto *RD = T->getAsCXXRecordDecl()) {
5419 if (RD->isInvalidDecl()) {
5420 Result = APValue();
5421 return false;
5422 }
5423 if (RD->isUnion()) {
5424 Result = APValue((const FieldDecl *)nullptr);
5425 return true;
5426 }
5427 Result =
5428 APValue(APValue::UninitStruct(), RD->getNumBases(), RD->getNumFields());
5429
5430 unsigned Index = 0;
5431 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
5432 End = RD->bases_end();
5433 I != End; ++I, ++Index)
5434 Success &=
5435 handleDefaultInitValue(T: I->getType(), Result&: Result.getStructBase(i: Index));
5436
5437 for (const auto *I : RD->fields()) {
5438 if (I->isUnnamedBitField())
5439 continue;
5440 Success &= handleDefaultInitValue(
5441 T: I->getType(), Result&: Result.getStructField(i: I->getFieldIndex()));
5442 }
5443 return Success;
5444 }
5445
5446 if (auto *AT =
5447 dyn_cast_or_null<ConstantArrayType>(Val: T->getAsArrayTypeUnsafe())) {
5448 Result = APValue(APValue::UninitArray(), 0, AT->getZExtSize());
5449 if (Result.hasArrayFiller())
5450 Success &=
5451 handleDefaultInitValue(T: AT->getElementType(), Result&: Result.getArrayFiller());
5452
5453 return Success;
5454 }
5455
5456 Result = APValue::IndeterminateValue();
5457 return true;
5458}
5459
5460namespace {
5461enum EvalStmtResult {
5462 /// Evaluation failed.
5463 ESR_Failed,
5464 /// Hit a 'return' statement.
5465 ESR_Returned,
5466 /// Evaluation succeeded.
5467 ESR_Succeeded,
5468 /// Hit a 'continue' statement.
5469 ESR_Continue,
5470 /// Hit a 'break' statement.
5471 ESR_Break,
5472 /// Still scanning for 'case' or 'default' statement.
5473 ESR_CaseNotFound
5474};
5475}
5476/// Evaluates the initializer of a reference.
5477static bool EvaluateInitForDeclOfReferenceType(EvalInfo &Info,
5478 const ValueDecl *D,
5479 const Expr *Init, LValue &Result,
5480 APValue &Val) {
5481 assert(Init->isGLValue() && D->getType()->isReferenceType());
5482 // A reference is an lvalue.
5483 if (!EvaluateLValue(E: Init, Result, Info))
5484 return false;
5485 // [C++26][decl.ref]
5486 // The object designated by such a glvalue can be outside its lifetime
5487 // Because a null pointer value or a pointer past the end of an object
5488 // does not point to an object, a reference in a well-defined program cannot
5489 // refer to such things;
5490 if (!Result.Designator.Invalid && Result.Designator.isOnePastTheEnd()) {
5491 Info.FFDiag(E: Init, DiagId: diag::note_constexpr_access_past_end) << AK_Dereference;
5492 return false;
5493 }
5494
5495 // Save the result.
5496 Result.moveInto(V&: Val);
5497 return true;
5498}
5499
5500static bool EvaluateVarDecl(EvalInfo &Info, const VarDecl *VD) {
5501 if (VD->isInvalidDecl())
5502 return false;
5503 // We don't need to evaluate the initializer for a static local.
5504 if (!VD->hasLocalStorage())
5505 return true;
5506
5507 LValue Result;
5508 APValue &Val = Info.CurrentCall->createTemporary(Key: VD, T: VD->getType(),
5509 Scope: ScopeKind::Block, LV&: Result);
5510
5511 const Expr *InitE = VD->getInit();
5512 if (!InitE) {
5513 if (VD->getType()->isDependentType())
5514 return Info.noteSideEffect();
5515 return handleDefaultInitValue(T: VD->getType(), Result&: Val);
5516 }
5517 if (InitE->isValueDependent())
5518 return false;
5519
5520 // For references to objects, check they do not designate a one-past-the-end
5521 // object.
5522 if (VD->getType()->isReferenceType()) {
5523 return EvaluateInitForDeclOfReferenceType(Info, D: VD, Init: InitE, Result, Val);
5524 } else if (!EvaluateInPlace(Result&: Val, Info, This: Result, E: InitE)) {
5525 // Wipe out any partially-computed value, to allow tracking that this
5526 // evaluation failed.
5527 Val = APValue();
5528 return false;
5529 }
5530
5531 return true;
5532}
5533
5534static bool EvaluateDecompositionDeclInit(EvalInfo &Info,
5535 const DecompositionDecl *DD);
5536
5537static bool EvaluateDecl(EvalInfo &Info, const Decl *D,
5538 bool EvaluateConditionDecl = false) {
5539 bool OK = true;
5540 if (const VarDecl *VD = dyn_cast<VarDecl>(Val: D))
5541 OK &= EvaluateVarDecl(Info, VD);
5542
5543 if (const DecompositionDecl *DD = dyn_cast<DecompositionDecl>(Val: D);
5544 EvaluateConditionDecl && DD)
5545 OK &= EvaluateDecompositionDeclInit(Info, DD);
5546
5547 return OK;
5548}
5549
5550static bool EvaluateDecompositionDeclInit(EvalInfo &Info,
5551 const DecompositionDecl *DD) {
5552 bool OK = true;
5553 for (auto *BD : DD->flat_bindings())
5554 if (auto *VD = BD->getHoldingVar())
5555 OK &= EvaluateDecl(Info, D: VD, /*EvaluateConditionDecl=*/true);
5556
5557 return OK;
5558}
5559
5560static bool MaybeEvaluateDeferredVarDeclInit(EvalInfo &Info,
5561 const VarDecl *VD) {
5562 if (auto *DD = dyn_cast_if_present<DecompositionDecl>(Val: VD)) {
5563 if (!EvaluateDecompositionDeclInit(Info, DD))
5564 return false;
5565 }
5566 return true;
5567}
5568
5569static bool EvaluateDependentExpr(const Expr *E, EvalInfo &Info) {
5570 assert(E->isValueDependent());
5571 if (Info.noteSideEffect())
5572 return true;
5573 assert(E->containsErrors() && "valid value-dependent expression should never "
5574 "reach invalid code path.");
5575 return false;
5576}
5577
5578/// Evaluate a condition (either a variable declaration or an expression).
5579static bool EvaluateCond(EvalInfo &Info, const VarDecl *CondDecl,
5580 const Expr *Cond, bool &Result) {
5581 if (Cond->isValueDependent())
5582 return false;
5583 FullExpressionRAII Scope(Info);
5584 if (CondDecl && !EvaluateDecl(Info, D: CondDecl))
5585 return false;
5586 if (!EvaluateAsBooleanCondition(E: Cond, Result, Info))
5587 return false;
5588 if (!MaybeEvaluateDeferredVarDeclInit(Info, VD: CondDecl))
5589 return false;
5590 return Scope.destroy();
5591}
5592
5593namespace {
5594/// A location where the result (returned value) of evaluating a
5595/// statement should be stored.
5596struct StmtResult {
5597 /// The APValue that should be filled in with the returned value.
5598 APValue &Value;
5599 /// The location containing the result, if any (used to support RVO).
5600 const LValue *Slot;
5601};
5602
5603struct TempVersionRAII {
5604 CallStackFrame &Frame;
5605
5606 TempVersionRAII(CallStackFrame &Frame) : Frame(Frame) {
5607 Frame.pushTempVersion();
5608 }
5609
5610 ~TempVersionRAII() {
5611 Frame.popTempVersion();
5612 }
5613};
5614
5615}
5616
5617static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
5618 const Stmt *S,
5619 const SwitchCase *SC = nullptr);
5620
5621/// Helper to implement named break/continue. Returns 'true' if the evaluation
5622/// result should be propagated up. Otherwise, it sets the evaluation result
5623/// to either Continue to continue the current loop, or Succeeded to break it.
5624static bool ShouldPropagateBreakContinue(EvalInfo &Info,
5625 const Stmt *LoopOrSwitch,
5626 ArrayRef<BlockScopeRAII *> Scopes,
5627 EvalStmtResult &ESR) {
5628 bool IsSwitch = isa<SwitchStmt>(Val: LoopOrSwitch);
5629
5630 // For loops, map Succeeded to Continue so we don't have to check for both.
5631 if (!IsSwitch && ESR == ESR_Succeeded) {
5632 ESR = ESR_Continue;
5633 return false;
5634 }
5635
5636 if (ESR != ESR_Break && ESR != ESR_Continue)
5637 return false;
5638
5639 // Are we breaking out of or continuing this statement?
5640 bool CanBreakOrContinue = !IsSwitch || ESR == ESR_Break;
5641 const Stmt *StackTop = Info.BreakContinueStack.back();
5642 if (CanBreakOrContinue && (StackTop == nullptr || StackTop == LoopOrSwitch)) {
5643 Info.BreakContinueStack.pop_back();
5644 if (ESR == ESR_Break)
5645 ESR = ESR_Succeeded;
5646 return false;
5647 }
5648
5649 // We're not. Propagate the result up.
5650 for (BlockScopeRAII *S : Scopes) {
5651 if (!S->destroy()) {
5652 ESR = ESR_Failed;
5653 break;
5654 }
5655 }
5656 return true;
5657}
5658
5659/// Evaluate the body of a loop, and translate the result as appropriate.
5660static EvalStmtResult EvaluateLoopBody(StmtResult &Result, EvalInfo &Info,
5661 const Stmt *Body,
5662 const SwitchCase *Case = nullptr) {
5663 BlockScopeRAII Scope(Info);
5664
5665 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: Body, SC: Case);
5666 if (ESR != ESR_Failed && ESR != ESR_CaseNotFound && !Scope.destroy())
5667 ESR = ESR_Failed;
5668
5669 return ESR;
5670}
5671
5672/// Evaluate a switch statement.
5673static EvalStmtResult EvaluateSwitch(StmtResult &Result, EvalInfo &Info,
5674 const SwitchStmt *SS) {
5675 BlockScopeRAII Scope(Info);
5676
5677 // Evaluate the switch condition.
5678 APSInt Value;
5679 {
5680 if (const Stmt *Init = SS->getInit()) {
5681 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: Init);
5682 if (ESR != ESR_Succeeded) {
5683 if (ESR != ESR_Failed && !Scope.destroy())
5684 ESR = ESR_Failed;
5685 return ESR;
5686 }
5687 }
5688
5689 FullExpressionRAII CondScope(Info);
5690 if (SS->getConditionVariable() &&
5691 !EvaluateDecl(Info, D: SS->getConditionVariable()))
5692 return ESR_Failed;
5693 if (SS->getCond()->isValueDependent()) {
5694 // We don't know what the value is, and which branch should jump to.
5695 EvaluateDependentExpr(E: SS->getCond(), Info);
5696 return ESR_Failed;
5697 }
5698 if (!EvaluateInteger(E: SS->getCond(), Result&: Value, Info))
5699 return ESR_Failed;
5700
5701 if (!MaybeEvaluateDeferredVarDeclInit(Info, VD: SS->getConditionVariable()))
5702 return ESR_Failed;
5703
5704 if (!CondScope.destroy())
5705 return ESR_Failed;
5706 }
5707
5708 // Find the switch case corresponding to the value of the condition.
5709 // FIXME: Cache this lookup.
5710 const SwitchCase *Found = nullptr;
5711 for (const SwitchCase *SC = SS->getSwitchCaseList(); SC;
5712 SC = SC->getNextSwitchCase()) {
5713 if (isa<DefaultStmt>(Val: SC)) {
5714 Found = SC;
5715 continue;
5716 }
5717
5718 const CaseStmt *CS = cast<CaseStmt>(Val: SC);
5719 const Expr *LHS = CS->getLHS();
5720 const Expr *RHS = CS->getRHS();
5721 if (LHS->isValueDependent() || (RHS && RHS->isValueDependent()))
5722 return ESR_Failed;
5723 APSInt LHSValue = LHS->EvaluateKnownConstInt(Ctx: Info.Ctx);
5724 APSInt RHSValue = RHS ? RHS->EvaluateKnownConstInt(Ctx: Info.Ctx) : LHSValue;
5725 if (LHSValue <= Value && Value <= RHSValue) {
5726 Found = SC;
5727 break;
5728 }
5729 }
5730
5731 if (!Found)
5732 return Scope.destroy() ? ESR_Succeeded : ESR_Failed;
5733
5734 // Search the switch body for the switch case and evaluate it from there.
5735 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: SS->getBody(), SC: Found);
5736 if (ESR != ESR_Failed && ESR != ESR_CaseNotFound && !Scope.destroy())
5737 return ESR_Failed;
5738 if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: SS, /*Scopes=*/{}, ESR))
5739 return ESR;
5740
5741 switch (ESR) {
5742 case ESR_Break:
5743 llvm_unreachable("Should have been converted to Succeeded");
5744 case ESR_Succeeded:
5745 case ESR_Continue:
5746 case ESR_Failed:
5747 case ESR_Returned:
5748 return ESR;
5749 case ESR_CaseNotFound:
5750 // This can only happen if the switch case is nested within a statement
5751 // expression. We have no intention of supporting that.
5752 Info.FFDiag(Loc: Found->getBeginLoc(),
5753 DiagId: diag::note_constexpr_stmt_expr_unsupported);
5754 return ESR_Failed;
5755 }
5756 llvm_unreachable("Invalid EvalStmtResult!");
5757}
5758
5759static bool CheckLocalVariableDeclaration(EvalInfo &Info, const VarDecl *VD) {
5760 // An expression E is a core constant expression unless the evaluation of E
5761 // would evaluate one of the following: [C++23] - a control flow that passes
5762 // through a declaration of a variable with static or thread storage duration
5763 // unless that variable is usable in constant expressions.
5764 if (VD->isLocalVarDecl() && VD->isStaticLocal() &&
5765 !VD->isUsableInConstantExpressions(C: Info.Ctx)) {
5766 Info.CCEDiag(Loc: VD->getLocation(), DiagId: diag::note_constexpr_static_local)
5767 << (VD->getTSCSpec() == TSCS_unspecified ? 0 : 1) << VD;
5768 return false;
5769 }
5770 return true;
5771}
5772
5773// Evaluate a statement.
5774static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
5775 const Stmt *S, const SwitchCase *Case) {
5776 if (!Info.nextStep(S))
5777 return ESR_Failed;
5778
5779 // If we're hunting down a 'case' or 'default' label, recurse through
5780 // substatements until we hit the label.
5781 if (Case) {
5782 switch (S->getStmtClass()) {
5783 case Stmt::CompoundStmtClass:
5784 // FIXME: Precompute which substatement of a compound statement we
5785 // would jump to, and go straight there rather than performing a
5786 // linear scan each time.
5787 case Stmt::LabelStmtClass:
5788 case Stmt::AttributedStmtClass:
5789 case Stmt::DoStmtClass:
5790 break;
5791
5792 case Stmt::CaseStmtClass:
5793 case Stmt::DefaultStmtClass:
5794 if (Case == S)
5795 Case = nullptr;
5796 break;
5797
5798 case Stmt::IfStmtClass: {
5799 // FIXME: Precompute which side of an 'if' we would jump to, and go
5800 // straight there rather than scanning both sides.
5801 const IfStmt *IS = cast<IfStmt>(Val: S);
5802
5803 // Wrap the evaluation in a block scope, in case it's a DeclStmt
5804 // preceded by our switch label.
5805 BlockScopeRAII Scope(Info);
5806
5807 // Step into the init statement in case it brings an (uninitialized)
5808 // variable into scope.
5809 if (const Stmt *Init = IS->getInit()) {
5810 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: Init, Case);
5811 if (ESR != ESR_CaseNotFound) {
5812 assert(ESR != ESR_Succeeded);
5813 return ESR;
5814 }
5815 }
5816
5817 // Condition variable must be initialized if it exists.
5818 // FIXME: We can skip evaluating the body if there's a condition
5819 // variable, as there can't be any case labels within it.
5820 // (The same is true for 'for' statements.)
5821
5822 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: IS->getThen(), Case);
5823 if (ESR == ESR_Failed)
5824 return ESR;
5825 if (ESR != ESR_CaseNotFound)
5826 return Scope.destroy() ? ESR : ESR_Failed;
5827 if (!IS->getElse())
5828 return ESR_CaseNotFound;
5829
5830 ESR = EvaluateStmt(Result, Info, S: IS->getElse(), Case);
5831 if (ESR == ESR_Failed)
5832 return ESR;
5833 if (ESR != ESR_CaseNotFound)
5834 return Scope.destroy() ? ESR : ESR_Failed;
5835 return ESR_CaseNotFound;
5836 }
5837
5838 case Stmt::WhileStmtClass: {
5839 EvalStmtResult ESR =
5840 EvaluateLoopBody(Result, Info, Body: cast<WhileStmt>(Val: S)->getBody(), Case);
5841 if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: S, /*Scopes=*/{}, ESR))
5842 return ESR;
5843 if (ESR != ESR_Continue)
5844 return ESR;
5845 break;
5846 }
5847
5848 case Stmt::ForStmtClass: {
5849 const ForStmt *FS = cast<ForStmt>(Val: S);
5850 BlockScopeRAII Scope(Info);
5851
5852 // Step into the init statement in case it brings an (uninitialized)
5853 // variable into scope.
5854 if (const Stmt *Init = FS->getInit()) {
5855 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: Init, Case);
5856 if (ESR != ESR_CaseNotFound) {
5857 assert(ESR != ESR_Succeeded);
5858 return ESR;
5859 }
5860 }
5861
5862 EvalStmtResult ESR =
5863 EvaluateLoopBody(Result, Info, Body: FS->getBody(), Case);
5864 if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: FS, /*Scopes=*/{}, ESR))
5865 return ESR;
5866 if (ESR != ESR_Continue)
5867 return ESR;
5868 if (const auto *Inc = FS->getInc()) {
5869 if (Inc->isValueDependent()) {
5870 if (!EvaluateDependentExpr(E: Inc, Info))
5871 return ESR_Failed;
5872 } else {
5873 FullExpressionRAII IncScope(Info);
5874 if (!EvaluateIgnoredValue(Info, E: Inc) || !IncScope.destroy())
5875 return ESR_Failed;
5876 }
5877 }
5878 break;
5879 }
5880
5881 case Stmt::DeclStmtClass: {
5882 // Start the lifetime of any uninitialized variables we encounter. They
5883 // might be used by the selected branch of the switch.
5884 const DeclStmt *DS = cast<DeclStmt>(Val: S);
5885 for (const auto *D : DS->decls()) {
5886 if (const auto *VD = dyn_cast<VarDecl>(Val: D)) {
5887 if (!CheckLocalVariableDeclaration(Info, VD))
5888 return ESR_Failed;
5889 if (VD->hasLocalStorage() && !VD->getInit())
5890 if (!EvaluateVarDecl(Info, VD))
5891 return ESR_Failed;
5892 // FIXME: If the variable has initialization that can't be jumped
5893 // over, bail out of any immediately-surrounding compound-statement
5894 // too. There can't be any case labels here.
5895 }
5896 }
5897 return ESR_CaseNotFound;
5898 }
5899
5900 default:
5901 return ESR_CaseNotFound;
5902 }
5903 }
5904
5905 switch (S->getStmtClass()) {
5906 default:
5907 if (const Expr *E = dyn_cast<Expr>(Val: S)) {
5908 if (E->isValueDependent()) {
5909 if (!EvaluateDependentExpr(E, Info))
5910 return ESR_Failed;
5911 } else {
5912 // Don't bother evaluating beyond an expression-statement which couldn't
5913 // be evaluated.
5914 // FIXME: Do we need the FullExpressionRAII object here?
5915 // VisitExprWithCleanups should create one when necessary.
5916 FullExpressionRAII Scope(Info);
5917 if (!EvaluateIgnoredValue(Info, E) || !Scope.destroy())
5918 return ESR_Failed;
5919 }
5920 return ESR_Succeeded;
5921 }
5922
5923 Info.FFDiag(Loc: S->getBeginLoc()) << S->getSourceRange();
5924 return ESR_Failed;
5925
5926 case Stmt::NullStmtClass:
5927 return ESR_Succeeded;
5928
5929 case Stmt::DeclStmtClass: {
5930 const DeclStmt *DS = cast<DeclStmt>(Val: S);
5931 for (const auto *D : DS->decls()) {
5932 const VarDecl *VD = dyn_cast_or_null<VarDecl>(Val: D);
5933 if (VD && !CheckLocalVariableDeclaration(Info, VD))
5934 return ESR_Failed;
5935 // Each declaration initialization is its own full-expression.
5936 FullExpressionRAII Scope(Info);
5937 if (!EvaluateDecl(Info, D, /*EvaluateConditionDecl=*/true) &&
5938 !Info.noteFailure())
5939 return ESR_Failed;
5940 if (!Scope.destroy())
5941 return ESR_Failed;
5942 }
5943 return ESR_Succeeded;
5944 }
5945
5946 case Stmt::ReturnStmtClass: {
5947 const Expr *RetExpr = cast<ReturnStmt>(Val: S)->getRetValue();
5948 FullExpressionRAII Scope(Info);
5949 if (RetExpr && RetExpr->isValueDependent()) {
5950 EvaluateDependentExpr(E: RetExpr, Info);
5951 // We know we returned, but we don't know what the value is.
5952 return ESR_Failed;
5953 }
5954 if (RetExpr &&
5955 !(Result.Slot
5956 ? EvaluateInPlace(Result&: Result.Value, Info, This: *Result.Slot, E: RetExpr)
5957 : Evaluate(Result&: Result.Value, Info, E: RetExpr)))
5958 return ESR_Failed;
5959 return Scope.destroy() ? ESR_Returned : ESR_Failed;
5960 }
5961
5962 case Stmt::CompoundStmtClass: {
5963 BlockScopeRAII Scope(Info);
5964
5965 const CompoundStmt *CS = cast<CompoundStmt>(Val: S);
5966 for (const auto *BI : CS->body()) {
5967 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: BI, Case);
5968 if (ESR == ESR_Succeeded)
5969 Case = nullptr;
5970 else if (ESR != ESR_CaseNotFound) {
5971 if (ESR != ESR_Failed && !Scope.destroy())
5972 return ESR_Failed;
5973 return ESR;
5974 }
5975 }
5976 if (Case)
5977 return ESR_CaseNotFound;
5978 return Scope.destroy() ? ESR_Succeeded : ESR_Failed;
5979 }
5980
5981 case Stmt::IfStmtClass: {
5982 const IfStmt *IS = cast<IfStmt>(Val: S);
5983
5984 // Evaluate the condition, as either a var decl or as an expression.
5985 BlockScopeRAII Scope(Info);
5986 if (const Stmt *Init = IS->getInit()) {
5987 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: Init);
5988 if (ESR != ESR_Succeeded) {
5989 if (ESR != ESR_Failed && !Scope.destroy())
5990 return ESR_Failed;
5991 return ESR;
5992 }
5993 }
5994 bool Cond;
5995 if (IS->isConsteval()) {
5996 Cond = IS->isNonNegatedConsteval();
5997 // If we are not in a constant context, if consteval should not evaluate
5998 // to true.
5999 if (!Info.InConstantContext)
6000 Cond = !Cond;
6001 } else if (!EvaluateCond(Info, CondDecl: IS->getConditionVariable(), Cond: IS->getCond(),
6002 Result&: Cond))
6003 return ESR_Failed;
6004
6005 if (const Stmt *SubStmt = Cond ? IS->getThen() : IS->getElse()) {
6006 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: SubStmt);
6007 if (ESR != ESR_Succeeded) {
6008 if (ESR != ESR_Failed && !Scope.destroy())
6009 return ESR_Failed;
6010 return ESR;
6011 }
6012 }
6013 return Scope.destroy() ? ESR_Succeeded : ESR_Failed;
6014 }
6015
6016 case Stmt::WhileStmtClass: {
6017 const WhileStmt *WS = cast<WhileStmt>(Val: S);
6018 while (true) {
6019 BlockScopeRAII Scope(Info);
6020 bool Continue;
6021 if (!EvaluateCond(Info, CondDecl: WS->getConditionVariable(), Cond: WS->getCond(),
6022 Result&: Continue))
6023 return ESR_Failed;
6024 if (!Continue)
6025 break;
6026
6027 EvalStmtResult ESR = EvaluateLoopBody(Result, Info, Body: WS->getBody());
6028 if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: WS, Scopes: &Scope, ESR))
6029 return ESR;
6030
6031 if (ESR != ESR_Continue) {
6032 if (ESR != ESR_Failed && !Scope.destroy())
6033 return ESR_Failed;
6034 return ESR;
6035 }
6036 if (!Scope.destroy())
6037 return ESR_Failed;
6038 }
6039 return ESR_Succeeded;
6040 }
6041
6042 case Stmt::DoStmtClass: {
6043 const DoStmt *DS = cast<DoStmt>(Val: S);
6044 bool Continue;
6045 do {
6046 EvalStmtResult ESR = EvaluateLoopBody(Result, Info, Body: DS->getBody(), Case);
6047 if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: DS, /*Scopes=*/{}, ESR))
6048 return ESR;
6049 if (ESR != ESR_Continue)
6050 return ESR;
6051 Case = nullptr;
6052
6053 if (DS->getCond()->isValueDependent()) {
6054 EvaluateDependentExpr(E: DS->getCond(), Info);
6055 // Bailout as we don't know whether to keep going or terminate the loop.
6056 return ESR_Failed;
6057 }
6058 FullExpressionRAII CondScope(Info);
6059 if (!EvaluateAsBooleanCondition(E: DS->getCond(), Result&: Continue, Info) ||
6060 !CondScope.destroy())
6061 return ESR_Failed;
6062 } while (Continue);
6063 return ESR_Succeeded;
6064 }
6065
6066 case Stmt::ForStmtClass: {
6067 const ForStmt *FS = cast<ForStmt>(Val: S);
6068 BlockScopeRAII ForScope(Info);
6069 if (FS->getInit()) {
6070 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: FS->getInit());
6071 if (ESR != ESR_Succeeded) {
6072 if (ESR != ESR_Failed && !ForScope.destroy())
6073 return ESR_Failed;
6074 return ESR;
6075 }
6076 }
6077 while (true) {
6078 BlockScopeRAII IterScope(Info);
6079 bool Continue = true;
6080 if (FS->getCond() && !EvaluateCond(Info, CondDecl: FS->getConditionVariable(),
6081 Cond: FS->getCond(), Result&: Continue))
6082 return ESR_Failed;
6083
6084 if (!Continue) {
6085 if (!IterScope.destroy())
6086 return ESR_Failed;
6087 break;
6088 }
6089
6090 EvalStmtResult ESR = EvaluateLoopBody(Result, Info, Body: FS->getBody());
6091 if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: FS, Scopes: {&IterScope, &ForScope}, ESR))
6092 return ESR;
6093 if (ESR != ESR_Continue) {
6094 if (ESR != ESR_Failed && (!IterScope.destroy() || !ForScope.destroy()))
6095 return ESR_Failed;
6096 return ESR;
6097 }
6098
6099 if (const auto *Inc = FS->getInc()) {
6100 if (Inc->isValueDependent()) {
6101 if (!EvaluateDependentExpr(E: Inc, Info))
6102 return ESR_Failed;
6103 } else {
6104 FullExpressionRAII IncScope(Info);
6105 if (!EvaluateIgnoredValue(Info, E: Inc) || !IncScope.destroy())
6106 return ESR_Failed;
6107 }
6108 }
6109
6110 if (!IterScope.destroy())
6111 return ESR_Failed;
6112 }
6113 return ForScope.destroy() ? ESR_Succeeded : ESR_Failed;
6114 }
6115
6116 case Stmt::CXXForRangeStmtClass: {
6117 const CXXForRangeStmt *FS = cast<CXXForRangeStmt>(Val: S);
6118 BlockScopeRAII Scope(Info);
6119
6120 // Evaluate the init-statement if present.
6121 if (FS->getInit()) {
6122 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: FS->getInit());
6123 if (ESR != ESR_Succeeded) {
6124 if (ESR != ESR_Failed && !Scope.destroy())
6125 return ESR_Failed;
6126 return ESR;
6127 }
6128 }
6129
6130 // Initialize the __range variable.
6131 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: FS->getRangeStmt());
6132 if (ESR != ESR_Succeeded) {
6133 if (ESR != ESR_Failed && !Scope.destroy())
6134 return ESR_Failed;
6135 return ESR;
6136 }
6137
6138 // In error-recovery cases it's possible to get here even if we failed to
6139 // synthesize the __begin and __end variables.
6140 if (!FS->getBeginStmt() || !FS->getEndStmt() || !FS->getCond())
6141 return ESR_Failed;
6142
6143 // Create the __begin and __end iterators.
6144 ESR = EvaluateStmt(Result, Info, S: FS->getBeginStmt());
6145 if (ESR != ESR_Succeeded) {
6146 if (ESR != ESR_Failed && !Scope.destroy())
6147 return ESR_Failed;
6148 return ESR;
6149 }
6150 ESR = EvaluateStmt(Result, Info, S: FS->getEndStmt());
6151 if (ESR != ESR_Succeeded) {
6152 if (ESR != ESR_Failed && !Scope.destroy())
6153 return ESR_Failed;
6154 return ESR;
6155 }
6156
6157 while (true) {
6158 // Condition: __begin != __end.
6159 {
6160 if (FS->getCond()->isValueDependent()) {
6161 EvaluateDependentExpr(E: FS->getCond(), Info);
6162 // We don't know whether to keep going or terminate the loop.
6163 return ESR_Failed;
6164 }
6165 bool Continue = true;
6166 FullExpressionRAII CondExpr(Info);
6167 if (!EvaluateAsBooleanCondition(E: FS->getCond(), Result&: Continue, Info))
6168 return ESR_Failed;
6169 if (!Continue)
6170 break;
6171 }
6172
6173 // User's variable declaration, initialized by *__begin.
6174 BlockScopeRAII InnerScope(Info);
6175 ESR = EvaluateStmt(Result, Info, S: FS->getLoopVarStmt());
6176 if (ESR != ESR_Succeeded) {
6177 if (ESR != ESR_Failed && (!InnerScope.destroy() || !Scope.destroy()))
6178 return ESR_Failed;
6179 return ESR;
6180 }
6181
6182 // Loop body.
6183 ESR = EvaluateLoopBody(Result, Info, Body: FS->getBody());
6184 if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: FS, Scopes: {&InnerScope, &Scope}, ESR))
6185 return ESR;
6186 if (ESR != ESR_Continue) {
6187 if (ESR != ESR_Failed && (!InnerScope.destroy() || !Scope.destroy()))
6188 return ESR_Failed;
6189 return ESR;
6190 }
6191 if (FS->getInc()->isValueDependent()) {
6192 if (!EvaluateDependentExpr(E: FS->getInc(), Info))
6193 return ESR_Failed;
6194 } else {
6195 // Increment: ++__begin
6196 if (!EvaluateIgnoredValue(Info, E: FS->getInc()))
6197 return ESR_Failed;
6198 }
6199
6200 if (!InnerScope.destroy())
6201 return ESR_Failed;
6202 }
6203
6204 return Scope.destroy() ? ESR_Succeeded : ESR_Failed;
6205 }
6206
6207 case Stmt::SwitchStmtClass:
6208 return EvaluateSwitch(Result, Info, SS: cast<SwitchStmt>(Val: S));
6209
6210 case Stmt::ContinueStmtClass:
6211 case Stmt::BreakStmtClass: {
6212 auto *B = cast<LoopControlStmt>(Val: S);
6213 Info.BreakContinueStack.push_back(Elt: B->getNamedLoopOrSwitch());
6214 return isa<ContinueStmt>(Val: S) ? ESR_Continue : ESR_Break;
6215 }
6216
6217 case Stmt::LabelStmtClass:
6218 return EvaluateStmt(Result, Info, S: cast<LabelStmt>(Val: S)->getSubStmt(), Case);
6219
6220 case Stmt::AttributedStmtClass: {
6221 const auto *AS = cast<AttributedStmt>(Val: S);
6222 const auto *SS = AS->getSubStmt();
6223 MSConstexprContextRAII ConstexprContext(
6224 *Info.CurrentCall, hasSpecificAttr<MSConstexprAttr>(container: AS->getAttrs()) &&
6225 isa<ReturnStmt>(Val: SS));
6226
6227 auto LO = Info.Ctx.getLangOpts();
6228 if (LO.CXXAssumptions && !LO.MSVCCompat) {
6229 for (auto *Attr : AS->getAttrs()) {
6230 auto *AA = dyn_cast<CXXAssumeAttr>(Val: Attr);
6231 if (!AA)
6232 continue;
6233
6234 auto *Assumption = AA->getAssumption();
6235 if (Assumption->isValueDependent())
6236 return ESR_Failed;
6237
6238 if (Assumption->HasSideEffects(Ctx: Info.Ctx))
6239 continue;
6240
6241 bool Value;
6242 if (!EvaluateAsBooleanCondition(E: Assumption, Result&: Value, Info))
6243 return ESR_Failed;
6244 if (!Value) {
6245 Info.CCEDiag(Loc: Assumption->getExprLoc(),
6246 DiagId: diag::note_constexpr_assumption_failed);
6247 return ESR_Failed;
6248 }
6249 }
6250 }
6251
6252 return EvaluateStmt(Result, Info, S: SS, Case);
6253 }
6254
6255 case Stmt::CaseStmtClass:
6256 case Stmt::DefaultStmtClass:
6257 return EvaluateStmt(Result, Info, S: cast<SwitchCase>(Val: S)->getSubStmt(), Case);
6258 case Stmt::CXXTryStmtClass:
6259 // Evaluate try blocks by evaluating all sub statements.
6260 return EvaluateStmt(Result, Info, S: cast<CXXTryStmt>(Val: S)->getTryBlock(), Case);
6261 }
6262}
6263
6264/// CheckTrivialDefaultConstructor - Check whether a constructor is a trivial
6265/// default constructor. If so, we'll fold it whether or not it's marked as
6266/// constexpr. If it is marked as constexpr, we will never implicitly define it,
6267/// so we need special handling.
6268static bool CheckTrivialDefaultConstructor(EvalInfo &Info, SourceLocation Loc,
6269 const CXXConstructorDecl *CD,
6270 bool IsValueInitialization) {
6271 if (!CD->isTrivial() || !CD->isDefaultConstructor())
6272 return false;
6273
6274 // Value-initialization does not call a trivial default constructor, so such a
6275 // call is a core constant expression whether or not the constructor is
6276 // constexpr.
6277 if (!CD->isConstexpr() && !IsValueInitialization) {
6278 if (Info.getLangOpts().CPlusPlus11) {
6279 // FIXME: If DiagDecl is an implicitly-declared special member function,
6280 // we should be much more explicit about why it's not constexpr.
6281 Info.CCEDiag(Loc, DiagId: diag::note_constexpr_invalid_function, ExtraNotes: 1)
6282 << /*IsConstexpr*/0 << /*IsConstructor*/1 << CD;
6283 Info.Note(Loc: CD->getLocation(), DiagId: diag::note_declared_at);
6284 } else {
6285 Info.CCEDiag(Loc, DiagId: diag::note_invalid_subexpr_in_const_expr);
6286 }
6287 }
6288 return true;
6289}
6290
6291/// CheckConstexprFunction - Check that a function can be called in a constant
6292/// expression.
6293static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
6294 const FunctionDecl *Declaration,
6295 const FunctionDecl *Definition,
6296 const Stmt *Body) {
6297 // Potential constant expressions can contain calls to declared, but not yet
6298 // defined, constexpr functions.
6299 if (Info.checkingPotentialConstantExpression() && !Definition &&
6300 Declaration->isConstexpr())
6301 return false;
6302
6303 // Bail out if the function declaration itself is invalid. We will
6304 // have produced a relevant diagnostic while parsing it, so just
6305 // note the problematic sub-expression.
6306 if (Declaration->isInvalidDecl()) {
6307 Info.FFDiag(Loc: CallLoc, DiagId: diag::note_invalid_subexpr_in_const_expr);
6308 return false;
6309 }
6310
6311 // DR1872: An instantiated virtual constexpr function can't be called in a
6312 // constant expression (prior to C++20). We can still constant-fold such a
6313 // call.
6314 if (!Info.Ctx.getLangOpts().CPlusPlus20 && isa<CXXMethodDecl>(Val: Declaration) &&
6315 cast<CXXMethodDecl>(Val: Declaration)->isVirtual())
6316 Info.CCEDiag(Loc: CallLoc, DiagId: diag::note_constexpr_virtual_call);
6317
6318 if (Definition && Definition->isInvalidDecl()) {
6319 Info.FFDiag(Loc: CallLoc, DiagId: diag::note_invalid_subexpr_in_const_expr);
6320 return false;
6321 }
6322
6323 // Can we evaluate this function call?
6324 if (Definition && Body &&
6325 (Definition->isConstexpr() || (Info.CurrentCall->CanEvalMSConstexpr &&
6326 Definition->hasAttr<MSConstexprAttr>())))
6327 return true;
6328
6329 const FunctionDecl *DiagDecl = Definition ? Definition : Declaration;
6330 // Special note for the assert() macro, as the normal error message falsely
6331 // implies we cannot use an assertion during constant evaluation.
6332 if (CallLoc.isMacroID() && DiagDecl->getIdentifier()) {
6333 // FIXME: Instead of checking for an implementation-defined function,
6334 // check and evaluate the assert() macro.
6335 StringRef Name = DiagDecl->getName();
6336 bool AssertFailed =
6337 Name == "__assert_rtn" || Name == "__assert_fail" || Name == "_wassert";
6338 if (AssertFailed) {
6339 Info.FFDiag(Loc: CallLoc, DiagId: diag::note_constexpr_assert_failed);
6340 return false;
6341 }
6342 }
6343
6344 if (Info.getLangOpts().CPlusPlus11) {
6345 // If this function is not constexpr because it is an inherited
6346 // non-constexpr constructor, diagnose that directly.
6347 auto *CD = dyn_cast<CXXConstructorDecl>(Val: DiagDecl);
6348 if (CD && CD->isInheritingConstructor()) {
6349 auto *Inherited = CD->getInheritedConstructor().getConstructor();
6350 if (!Inherited->isConstexpr())
6351 DiagDecl = CD = Inherited;
6352 }
6353
6354 // FIXME: If DiagDecl is an implicitly-declared special member function
6355 // or an inheriting constructor, we should be much more explicit about why
6356 // it's not constexpr.
6357 if (CD && CD->isInheritingConstructor())
6358 Info.FFDiag(Loc: CallLoc, DiagId: diag::note_constexpr_invalid_inhctor, ExtraNotes: 1)
6359 << CD->getInheritedConstructor().getConstructor()->getParent();
6360 else
6361 Info.FFDiag(Loc: CallLoc, DiagId: diag::note_constexpr_invalid_function, ExtraNotes: 1)
6362 << DiagDecl->isConstexpr() << (bool)CD << DiagDecl;
6363 Info.Note(Loc: DiagDecl->getLocation(), DiagId: diag::note_declared_at);
6364 } else {
6365 Info.FFDiag(Loc: CallLoc, DiagId: diag::note_invalid_subexpr_in_const_expr);
6366 }
6367 return false;
6368}
6369
6370namespace {
6371struct CheckDynamicTypeHandler {
6372 AccessKinds AccessKind;
6373 typedef bool result_type;
6374 bool failed() { return false; }
6375 bool found(APValue &Subobj, QualType SubobjType) { return true; }
6376 bool found(APSInt &Value, QualType SubobjType) { return true; }
6377 bool found(APFloat &Value, QualType SubobjType) { return true; }
6378};
6379} // end anonymous namespace
6380
6381/// Check that we can access the notional vptr of an object / determine its
6382/// dynamic type.
6383static bool checkDynamicType(EvalInfo &Info, const Expr *E, const LValue &This,
6384 AccessKinds AK, bool Polymorphic) {
6385 if (This.Designator.Invalid)
6386 return false;
6387
6388 CompleteObject Obj = findCompleteObject(Info, E, AK, LVal: This, LValType: QualType());
6389
6390 if (!Obj)
6391 return false;
6392
6393 if (!Obj.Value) {
6394 // The object is not usable in constant expressions, so we can't inspect
6395 // its value to see if it's in-lifetime or what the active union members
6396 // are. We can still check for a one-past-the-end lvalue.
6397 if (This.Designator.isOnePastTheEnd() ||
6398 This.Designator.isMostDerivedAnUnsizedArray()) {
6399 Info.FFDiag(E, DiagId: This.Designator.isOnePastTheEnd()
6400 ? diag::note_constexpr_access_past_end
6401 : diag::note_constexpr_access_unsized_array)
6402 << AK;
6403 return false;
6404 } else if (Polymorphic) {
6405 // Conservatively refuse to perform a polymorphic operation if we would
6406 // not be able to read a notional 'vptr' value.
6407 if (!Info.checkingPotentialConstantExpression() ||
6408 !This.AllowConstexprUnknown) {
6409 APValue Val;
6410 This.moveInto(V&: Val);
6411 QualType StarThisType =
6412 Info.Ctx.getLValueReferenceType(T: This.Designator.getType(Ctx&: Info.Ctx));
6413 Info.FFDiag(E, DiagId: diag::note_constexpr_polymorphic_unknown_dynamic_type)
6414 << AK << Val.getAsString(Ctx: Info.Ctx, Ty: StarThisType);
6415 }
6416 return false;
6417 }
6418 return true;
6419 }
6420
6421 CheckDynamicTypeHandler Handler{.AccessKind: AK};
6422 return Obj && findSubobject(Info, E, Obj, Sub: This.Designator, handler&: Handler);
6423}
6424
6425/// Check that the pointee of the 'this' pointer in a member function call is
6426/// either within its lifetime or in its period of construction or destruction.
6427static bool
6428checkNonVirtualMemberCallThisPointer(EvalInfo &Info, const Expr *E,
6429 const LValue &This,
6430 const CXXMethodDecl *NamedMember) {
6431 return checkDynamicType(
6432 Info, E, This,
6433 AK: isa<CXXDestructorDecl>(Val: NamedMember) ? AK_Destroy : AK_MemberCall, Polymorphic: false);
6434}
6435
6436struct DynamicType {
6437 /// The dynamic class type of the object.
6438 const CXXRecordDecl *Type;
6439 /// The corresponding path length in the lvalue.
6440 unsigned PathLength;
6441};
6442
6443static const CXXRecordDecl *getBaseClassType(SubobjectDesignator &Designator,
6444 unsigned PathLength) {
6445 assert(PathLength >= Designator.MostDerivedPathLength && PathLength <=
6446 Designator.Entries.size() && "invalid path length");
6447 return (PathLength == Designator.MostDerivedPathLength)
6448 ? Designator.MostDerivedType->getAsCXXRecordDecl()
6449 : getAsBaseClass(E: Designator.Entries[PathLength - 1]);
6450}
6451
6452/// Determine the dynamic type of an object.
6453static std::optional<DynamicType> ComputeDynamicType(EvalInfo &Info,
6454 const Expr *E,
6455 LValue &This,
6456 AccessKinds AK) {
6457 // If we don't have an lvalue denoting an object of class type, there is no
6458 // meaningful dynamic type. (We consider objects of non-class type to have no
6459 // dynamic type.)
6460 if (!checkDynamicType(Info, E, This, AK,
6461 Polymorphic: AK != AK_TypeId || This.AllowConstexprUnknown))
6462 return std::nullopt;
6463
6464 if (This.Designator.Invalid)
6465 return std::nullopt;
6466
6467 // Refuse to compute a dynamic type in the presence of virtual bases. This
6468 // shouldn't happen other than in constant-folding situations, since literal
6469 // types can't have virtual bases.
6470 //
6471 // Note that consumers of DynamicType assume that the type has no virtual
6472 // bases, and will need modifications if this restriction is relaxed.
6473 const CXXRecordDecl *Class =
6474 This.Designator.MostDerivedType->getAsCXXRecordDecl();
6475 if (!Class || Class->getNumVBases()) {
6476 Info.FFDiag(E);
6477 return std::nullopt;
6478 }
6479
6480 // FIXME: For very deep class hierarchies, it might be beneficial to use a
6481 // binary search here instead. But the overwhelmingly common case is that
6482 // we're not in the middle of a constructor, so it probably doesn't matter
6483 // in practice.
6484 ArrayRef<APValue::LValuePathEntry> Path = This.Designator.Entries;
6485 for (unsigned PathLength = This.Designator.MostDerivedPathLength;
6486 PathLength <= Path.size(); ++PathLength) {
6487 switch (Info.isEvaluatingCtorDtor(Base: This.getLValueBase(),
6488 Path: Path.slice(N: 0, M: PathLength))) {
6489 case ConstructionPhase::Bases:
6490 case ConstructionPhase::DestroyingBases:
6491 // We're constructing or destroying a base class. This is not the dynamic
6492 // type.
6493 break;
6494
6495 case ConstructionPhase::None:
6496 case ConstructionPhase::AfterBases:
6497 case ConstructionPhase::AfterFields:
6498 case ConstructionPhase::Destroying:
6499 // We've finished constructing the base classes and not yet started
6500 // destroying them again, so this is the dynamic type.
6501 return DynamicType{.Type: getBaseClassType(Designator&: This.Designator, PathLength),
6502 .PathLength: PathLength};
6503 }
6504 }
6505
6506 // CWG issue 1517: we're constructing a base class of the object described by
6507 // 'This', so that object has not yet begun its period of construction and
6508 // any polymorphic operation on it results in undefined behavior.
6509 Info.FFDiag(E);
6510 return std::nullopt;
6511}
6512
6513/// Perform virtual dispatch.
6514static const CXXMethodDecl *HandleVirtualDispatch(
6515 EvalInfo &Info, const Expr *E, LValue &This, const CXXMethodDecl *Found,
6516 llvm::SmallVectorImpl<QualType> &CovariantAdjustmentPath) {
6517 std::optional<DynamicType> DynType = ComputeDynamicType(
6518 Info, E, This,
6519 AK: isa<CXXDestructorDecl>(Val: Found) ? AK_Destroy : AK_MemberCall);
6520 if (!DynType)
6521 return nullptr;
6522
6523 // Find the final overrider. It must be declared in one of the classes on the
6524 // path from the dynamic type to the static type.
6525 // FIXME: If we ever allow literal types to have virtual base classes, that
6526 // won't be true.
6527 const CXXMethodDecl *Callee = Found;
6528 unsigned PathLength = DynType->PathLength;
6529 for (/**/; PathLength <= This.Designator.Entries.size(); ++PathLength) {
6530 const CXXRecordDecl *Class = getBaseClassType(Designator&: This.Designator, PathLength);
6531 const CXXMethodDecl *Overrider =
6532 Found->getCorrespondingMethodDeclaredInClass(RD: Class, MayBeBase: false);
6533 if (Overrider) {
6534 Callee = Overrider;
6535 break;
6536 }
6537 }
6538
6539 // C++2a [class.abstract]p6:
6540 // the effect of making a virtual call to a pure virtual function [...] is
6541 // undefined
6542 if (Callee->isPureVirtual()) {
6543 Info.FFDiag(E, DiagId: diag::note_constexpr_pure_virtual_call, ExtraNotes: 1) << Callee;
6544 Info.Note(Loc: Callee->getLocation(), DiagId: diag::note_declared_at);
6545 return nullptr;
6546 }
6547
6548 // If necessary, walk the rest of the path to determine the sequence of
6549 // covariant adjustment steps to apply.
6550 if (!Info.Ctx.hasSameUnqualifiedType(T1: Callee->getReturnType(),
6551 T2: Found->getReturnType())) {
6552 CovariantAdjustmentPath.push_back(Elt: Callee->getReturnType());
6553 for (unsigned CovariantPathLength = PathLength + 1;
6554 CovariantPathLength != This.Designator.Entries.size();
6555 ++CovariantPathLength) {
6556 const CXXRecordDecl *NextClass =
6557 getBaseClassType(Designator&: This.Designator, PathLength: CovariantPathLength);
6558 const CXXMethodDecl *Next =
6559 Found->getCorrespondingMethodDeclaredInClass(RD: NextClass, MayBeBase: false);
6560 if (Next && !Info.Ctx.hasSameUnqualifiedType(
6561 T1: Next->getReturnType(), T2: CovariantAdjustmentPath.back()))
6562 CovariantAdjustmentPath.push_back(Elt: Next->getReturnType());
6563 }
6564 if (!Info.Ctx.hasSameUnqualifiedType(T1: Found->getReturnType(),
6565 T2: CovariantAdjustmentPath.back()))
6566 CovariantAdjustmentPath.push_back(Elt: Found->getReturnType());
6567 }
6568
6569 // Perform 'this' adjustment.
6570 if (!CastToDerivedClass(Info, E, Result&: This, TruncatedType: Callee->getParent(), TruncatedElements: PathLength))
6571 return nullptr;
6572
6573 return Callee;
6574}
6575
6576/// Perform the adjustment from a value returned by a virtual function to
6577/// a value of the statically expected type, which may be a pointer or
6578/// reference to a base class of the returned type.
6579static bool HandleCovariantReturnAdjustment(EvalInfo &Info, const Expr *E,
6580 APValue &Result,
6581 ArrayRef<QualType> Path) {
6582 assert(Result.isLValue() &&
6583 "unexpected kind of APValue for covariant return");
6584 if (Result.isNullPointer())
6585 return true;
6586
6587 LValue LVal;
6588 LVal.setFrom(Ctx: Info.Ctx, V: Result);
6589
6590 const CXXRecordDecl *OldClass = Path[0]->getPointeeCXXRecordDecl();
6591 for (unsigned I = 1; I != Path.size(); ++I) {
6592 const CXXRecordDecl *NewClass = Path[I]->getPointeeCXXRecordDecl();
6593 assert(OldClass && NewClass && "unexpected kind of covariant return");
6594 if (OldClass != NewClass &&
6595 !CastToBaseClass(Info, E, Result&: LVal, DerivedRD: OldClass, BaseRD: NewClass))
6596 return false;
6597 OldClass = NewClass;
6598 }
6599
6600 LVal.moveInto(V&: Result);
6601 return true;
6602}
6603
6604/// Determine whether \p Base, which is known to be a direct base class of
6605/// \p Derived, is a public base class.
6606static bool isBaseClassPublic(const CXXRecordDecl *Derived,
6607 const CXXRecordDecl *Base) {
6608 for (const CXXBaseSpecifier &BaseSpec : Derived->bases()) {
6609 auto *BaseClass = BaseSpec.getType()->getAsCXXRecordDecl();
6610 if (BaseClass && declaresSameEntity(D1: BaseClass, D2: Base))
6611 return BaseSpec.getAccessSpecifier() == AS_public;
6612 }
6613 llvm_unreachable("Base is not a direct base of Derived");
6614}
6615
6616/// Apply the given dynamic cast operation on the provided lvalue.
6617///
6618/// This implements the hard case of dynamic_cast, requiring a "runtime check"
6619/// to find a suitable target subobject.
6620static bool HandleDynamicCast(EvalInfo &Info, const ExplicitCastExpr *E,
6621 LValue &Ptr) {
6622 // We can't do anything with a non-symbolic pointer value.
6623 SubobjectDesignator &D = Ptr.Designator;
6624 if (D.Invalid)
6625 return false;
6626
6627 // C++ [expr.dynamic.cast]p6:
6628 // If v is a null pointer value, the result is a null pointer value.
6629 if (Ptr.isNullPointer() && !E->isGLValue())
6630 return true;
6631
6632 // For all the other cases, we need the pointer to point to an object within
6633 // its lifetime / period of construction / destruction, and we need to know
6634 // its dynamic type.
6635 std::optional<DynamicType> DynType =
6636 ComputeDynamicType(Info, E, This&: Ptr, AK: AK_DynamicCast);
6637 if (!DynType)
6638 return false;
6639
6640 // C++ [expr.dynamic.cast]p7:
6641 // If T is "pointer to cv void", then the result is a pointer to the most
6642 // derived object
6643 if (E->getType()->isVoidPointerType())
6644 return CastToDerivedClass(Info, E, Result&: Ptr, TruncatedType: DynType->Type, TruncatedElements: DynType->PathLength);
6645
6646 const CXXRecordDecl *C = E->getTypeAsWritten()->getPointeeCXXRecordDecl();
6647 assert(C && "dynamic_cast target is not void pointer nor class");
6648 CanQualType CQT = Info.Ctx.getCanonicalTagType(TD: C);
6649
6650 auto RuntimeCheckFailed = [&] (CXXBasePaths *Paths) {
6651 // C++ [expr.dynamic.cast]p9:
6652 if (!E->isGLValue()) {
6653 // The value of a failed cast to pointer type is the null pointer value
6654 // of the required result type.
6655 Ptr.setNull(Ctx&: Info.Ctx, PointerTy: E->getType());
6656 return true;
6657 }
6658
6659 // A failed cast to reference type throws [...] std::bad_cast.
6660 unsigned DiagKind;
6661 if (!Paths && (declaresSameEntity(D1: DynType->Type, D2: C) ||
6662 DynType->Type->isDerivedFrom(Base: C)))
6663 DiagKind = 0;
6664 else if (!Paths || Paths->begin() == Paths->end())
6665 DiagKind = 1;
6666 else if (Paths->isAmbiguous(BaseType: CQT))
6667 DiagKind = 2;
6668 else {
6669 assert(Paths->front().Access != AS_public && "why did the cast fail?");
6670 DiagKind = 3;
6671 }
6672 Info.FFDiag(E, DiagId: diag::note_constexpr_dynamic_cast_to_reference_failed)
6673 << DiagKind << Ptr.Designator.getType(Ctx&: Info.Ctx)
6674 << Info.Ctx.getCanonicalTagType(TD: DynType->Type)
6675 << E->getType().getUnqualifiedType();
6676 return false;
6677 };
6678
6679 // Runtime check, phase 1:
6680 // Walk from the base subobject towards the derived object looking for the
6681 // target type.
6682 for (int PathLength = Ptr.Designator.Entries.size();
6683 PathLength >= (int)DynType->PathLength; --PathLength) {
6684 const CXXRecordDecl *Class = getBaseClassType(Designator&: Ptr.Designator, PathLength);
6685 if (declaresSameEntity(D1: Class, D2: C))
6686 return CastToDerivedClass(Info, E, Result&: Ptr, TruncatedType: Class, TruncatedElements: PathLength);
6687 // We can only walk across public inheritance edges.
6688 if (PathLength > (int)DynType->PathLength &&
6689 !isBaseClassPublic(Derived: getBaseClassType(Designator&: Ptr.Designator, PathLength: PathLength - 1),
6690 Base: Class))
6691 return RuntimeCheckFailed(nullptr);
6692 }
6693
6694 // Runtime check, phase 2:
6695 // Search the dynamic type for an unambiguous public base of type C.
6696 CXXBasePaths Paths(/*FindAmbiguities=*/true,
6697 /*RecordPaths=*/true, /*DetectVirtual=*/false);
6698 if (DynType->Type->isDerivedFrom(Base: C, Paths) && !Paths.isAmbiguous(BaseType: CQT) &&
6699 Paths.front().Access == AS_public) {
6700 // Downcast to the dynamic type...
6701 if (!CastToDerivedClass(Info, E, Result&: Ptr, TruncatedType: DynType->Type, TruncatedElements: DynType->PathLength))
6702 return false;
6703 // ... then upcast to the chosen base class subobject.
6704 for (CXXBasePathElement &Elem : Paths.front())
6705 if (!HandleLValueBase(Info, E, Obj&: Ptr, DerivedDecl: Elem.Class, Base: Elem.Base))
6706 return false;
6707 return true;
6708 }
6709
6710 // Otherwise, the runtime check fails.
6711 return RuntimeCheckFailed(&Paths);
6712}
6713
6714namespace {
6715struct StartLifetimeOfUnionMemberHandler {
6716 EvalInfo &Info;
6717 const Expr *LHSExpr;
6718 const FieldDecl *Field;
6719 bool DuringInit;
6720 bool Failed = false;
6721 static const AccessKinds AccessKind = AK_Assign;
6722
6723 typedef bool result_type;
6724 bool failed() { return Failed; }
6725 bool found(APValue &Subobj, QualType SubobjType) {
6726 // We are supposed to perform no initialization but begin the lifetime of
6727 // the object. We interpret that as meaning to do what default
6728 // initialization of the object would do if all constructors involved were
6729 // trivial:
6730 // * All base, non-variant member, and array element subobjects' lifetimes
6731 // begin
6732 // * No variant members' lifetimes begin
6733 // * All scalar subobjects whose lifetimes begin have indeterminate values
6734 assert(SubobjType->isUnionType());
6735 if (declaresSameEntity(D1: Subobj.getUnionField(), D2: Field)) {
6736 // This union member is already active. If it's also in-lifetime, there's
6737 // nothing to do.
6738 if (Subobj.getUnionValue().hasValue())
6739 return true;
6740 } else if (DuringInit) {
6741 // We're currently in the process of initializing a different union
6742 // member. If we carried on, that initialization would attempt to
6743 // store to an inactive union member, resulting in undefined behavior.
6744 Info.FFDiag(E: LHSExpr,
6745 DiagId: diag::note_constexpr_union_member_change_during_init);
6746 return false;
6747 }
6748 APValue Result;
6749 Failed = !handleDefaultInitValue(T: Field->getType(), Result);
6750 Subobj.setUnion(Field, Value: Result);
6751 return true;
6752 }
6753 bool found(APSInt &Value, QualType SubobjType) {
6754 llvm_unreachable("wrong value kind for union object");
6755 }
6756 bool found(APFloat &Value, QualType SubobjType) {
6757 llvm_unreachable("wrong value kind for union object");
6758 }
6759};
6760} // end anonymous namespace
6761
6762const AccessKinds StartLifetimeOfUnionMemberHandler::AccessKind;
6763
6764/// Handle a builtin simple-assignment or a call to a trivial assignment
6765/// operator whose left-hand side might involve a union member access. If it
6766/// does, implicitly start the lifetime of any accessed union elements per
6767/// C++20 [class.union]5.
6768static bool MaybeHandleUnionActiveMemberChange(EvalInfo &Info,
6769 const Expr *LHSExpr,
6770 const LValue &LHS) {
6771 if (LHS.InvalidBase || LHS.Designator.Invalid)
6772 return false;
6773
6774 llvm::SmallVector<std::pair<unsigned, const FieldDecl*>, 4> UnionPathLengths;
6775 // C++ [class.union]p5:
6776 // define the set S(E) of subexpressions of E as follows:
6777 unsigned PathLength = LHS.Designator.Entries.size();
6778 for (const Expr *E = LHSExpr; E != nullptr;) {
6779 // -- If E is of the form A.B, S(E) contains the elements of S(A)...
6780 if (auto *ME = dyn_cast<MemberExpr>(Val: E)) {
6781 auto *FD = dyn_cast<FieldDecl>(Val: ME->getMemberDecl());
6782 // Note that we can't implicitly start the lifetime of a reference,
6783 // so we don't need to proceed any further if we reach one.
6784 if (!FD || FD->getType()->isReferenceType())
6785 break;
6786
6787 // ... and also contains A.B if B names a union member ...
6788 if (FD->getParent()->isUnion()) {
6789 // ... of a non-class, non-array type, or of a class type with a
6790 // trivial default constructor that is not deleted, or an array of
6791 // such types.
6792 auto *RD =
6793 FD->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
6794 if (!RD || RD->hasTrivialDefaultConstructor())
6795 UnionPathLengths.push_back(Elt: {PathLength - 1, FD});
6796 }
6797
6798 E = ME->getBase();
6799 --PathLength;
6800 assert(declaresSameEntity(FD,
6801 LHS.Designator.Entries[PathLength]
6802 .getAsBaseOrMember().getPointer()));
6803
6804 // -- If E is of the form A[B] and is interpreted as a built-in array
6805 // subscripting operator, S(E) is [S(the array operand, if any)].
6806 } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(Val: E)) {
6807 // Step over an ArrayToPointerDecay implicit cast.
6808 auto *Base = ASE->getBase()->IgnoreImplicit();
6809 if (!Base->getType()->isArrayType())
6810 break;
6811
6812 E = Base;
6813 --PathLength;
6814
6815 } else if (auto *ICE = dyn_cast<ImplicitCastExpr>(Val: E)) {
6816 // Step over a derived-to-base conversion.
6817 E = ICE->getSubExpr();
6818 if (ICE->getCastKind() == CK_NoOp)
6819 continue;
6820 if (ICE->getCastKind() != CK_DerivedToBase &&
6821 ICE->getCastKind() != CK_UncheckedDerivedToBase)
6822 break;
6823 // Walk path backwards as we walk up from the base to the derived class.
6824 for (const CXXBaseSpecifier *Elt : llvm::reverse(C: ICE->path())) {
6825 if (Elt->isVirtual()) {
6826 // A class with virtual base classes never has a trivial default
6827 // constructor, so S(E) is empty in this case.
6828 E = nullptr;
6829 break;
6830 }
6831
6832 --PathLength;
6833 assert(declaresSameEntity(Elt->getType()->getAsCXXRecordDecl(),
6834 LHS.Designator.Entries[PathLength]
6835 .getAsBaseOrMember().getPointer()));
6836 }
6837
6838 // -- Otherwise, S(E) is empty.
6839 } else {
6840 break;
6841 }
6842 }
6843
6844 // Common case: no unions' lifetimes are started.
6845 if (UnionPathLengths.empty())
6846 return true;
6847
6848 // if modification of X [would access an inactive union member], an object
6849 // of the type of X is implicitly created
6850 CompleteObject Obj =
6851 findCompleteObject(Info, E: LHSExpr, AK: AK_Assign, LVal: LHS, LValType: LHSExpr->getType());
6852 if (!Obj)
6853 return false;
6854 for (std::pair<unsigned, const FieldDecl *> LengthAndField :
6855 llvm::reverse(C&: UnionPathLengths)) {
6856 // Form a designator for the union object.
6857 SubobjectDesignator D = LHS.Designator;
6858 D.truncate(Ctx&: Info.Ctx, Base: LHS.Base, NewLength: LengthAndField.first);
6859
6860 bool DuringInit = Info.isEvaluatingCtorDtor(Base: LHS.Base, Path: D.Entries) ==
6861 ConstructionPhase::AfterBases;
6862 StartLifetimeOfUnionMemberHandler StartLifetime{
6863 .Info: Info, .LHSExpr: LHSExpr, .Field: LengthAndField.second, .DuringInit: DuringInit};
6864 if (!findSubobject(Info, E: LHSExpr, Obj, Sub: D, handler&: StartLifetime))
6865 return false;
6866 }
6867
6868 return true;
6869}
6870
6871static bool EvaluateCallArg(const ParmVarDecl *PVD, const Expr *Arg,
6872 CallRef Call, EvalInfo &Info, bool NonNull = false,
6873 APValue **EvaluatedArg = nullptr) {
6874 LValue LV;
6875 // Create the parameter slot and register its destruction. For a vararg
6876 // argument, create a temporary.
6877 // FIXME: For calling conventions that destroy parameters in the callee,
6878 // should we consider performing destruction when the function returns
6879 // instead?
6880 APValue &V = PVD ? Info.CurrentCall->createParam(Args: Call, PVD, LV)
6881 : Info.CurrentCall->createTemporary(Key: Arg, T: Arg->getType(),
6882 Scope: ScopeKind::Call, LV);
6883 if (!EvaluateInPlace(Result&: V, Info, This: LV, E: Arg))
6884 return false;
6885
6886 // Passing a null pointer to an __attribute__((nonnull)) parameter results in
6887 // undefined behavior, so is non-constant.
6888 if (NonNull && V.isLValue() && V.isNullPointer()) {
6889 Info.CCEDiag(E: Arg, DiagId: diag::note_non_null_attribute_failed);
6890 return false;
6891 }
6892
6893 if (EvaluatedArg)
6894 *EvaluatedArg = &V;
6895
6896 return true;
6897}
6898
6899/// Evaluate the arguments to a function call.
6900static bool EvaluateArgs(ArrayRef<const Expr *> Args, CallRef Call,
6901 EvalInfo &Info, const FunctionDecl *Callee,
6902 bool RightToLeft = false,
6903 LValue *ObjectArg = nullptr) {
6904 bool Success = true;
6905 llvm::SmallBitVector ForbiddenNullArgs;
6906 if (Callee->hasAttr<NonNullAttr>()) {
6907 ForbiddenNullArgs.resize(N: Args.size());
6908 for (const auto *Attr : Callee->specific_attrs<NonNullAttr>()) {
6909 if (!Attr->args_size()) {
6910 ForbiddenNullArgs.set();
6911 break;
6912 } else
6913 for (auto Idx : Attr->args()) {
6914 unsigned ASTIdx = Idx.getASTIndex();
6915 if (ASTIdx >= Args.size())
6916 continue;
6917 ForbiddenNullArgs[ASTIdx] = true;
6918 }
6919 }
6920 }
6921 for (unsigned I = 0; I < Args.size(); I++) {
6922 unsigned Idx = RightToLeft ? Args.size() - I - 1 : I;
6923 const ParmVarDecl *PVD =
6924 Idx < Callee->getNumParams() ? Callee->getParamDecl(i: Idx) : nullptr;
6925 bool NonNull = !ForbiddenNullArgs.empty() && ForbiddenNullArgs[Idx];
6926 APValue *That = nullptr;
6927 if (!EvaluateCallArg(PVD, Arg: Args[Idx], Call, Info, NonNull, EvaluatedArg: &That)) {
6928 // If we're checking for a potential constant expression, evaluate all
6929 // initializers even if some of them fail.
6930 if (!Info.noteFailure())
6931 return false;
6932 Success = false;
6933 }
6934 if (PVD && PVD->isExplicitObjectParameter() && That && That->isLValue())
6935 ObjectArg->setFrom(Ctx: Info.Ctx, V: *That);
6936 }
6937 return Success;
6938}
6939
6940/// Perform a trivial copy from Param, which is the parameter of a copy or move
6941/// constructor or assignment operator.
6942static bool handleTrivialCopy(EvalInfo &Info, const ParmVarDecl *Param,
6943 const Expr *E, APValue &Result,
6944 bool CopyObjectRepresentation) {
6945 // Find the reference argument.
6946 CallStackFrame *Frame = Info.CurrentCall;
6947 APValue *RefValue = Info.getParamSlot(Call: Frame->Arguments, PVD: Param);
6948 if (!RefValue) {
6949 Info.FFDiag(E);
6950 return false;
6951 }
6952
6953 // Copy out the contents of the RHS object.
6954 LValue RefLValue;
6955 RefLValue.setFrom(Ctx: Info.Ctx, V: *RefValue);
6956 return handleLValueToRValueConversion(
6957 Info, Conv: E, Type: Param->getType().getNonReferenceType(), LVal: RefLValue, RVal&: Result,
6958 WantObjectRepresentation: CopyObjectRepresentation);
6959}
6960
6961/// Evaluate a function call.
6962static bool HandleFunctionCall(SourceLocation CallLoc,
6963 const FunctionDecl *Callee,
6964 const LValue *ObjectArg, const Expr *E,
6965 ArrayRef<const Expr *> Args, CallRef Call,
6966 const Stmt *Body, EvalInfo &Info,
6967 APValue &Result, const LValue *ResultSlot) {
6968 if (!Info.CheckCallLimit(Loc: CallLoc))
6969 return false;
6970
6971 CallStackFrame Frame(Info, E->getSourceRange(), Callee, ObjectArg, E, Call);
6972
6973 // For a trivial copy or move assignment, perform an APValue copy. This is
6974 // essential for unions, where the operations performed by the assignment
6975 // operator cannot be represented as statements.
6976 //
6977 // Skip this for non-union classes with no fields; in that case, the defaulted
6978 // copy/move does not actually read the object.
6979 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: Callee);
6980 if (MD && MD->isDefaulted() &&
6981 (MD->getParent()->isUnion() ||
6982 (MD->isTrivial() &&
6983 isReadByLvalueToRvalueConversion(RD: MD->getParent())))) {
6984 unsigned ExplicitOffset = MD->isExplicitObjectMemberFunction() ? 1 : 0;
6985 assert(ObjectArg &&
6986 (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()));
6987 APValue RHSValue;
6988 if (!handleTrivialCopy(Info, Param: MD->getParamDecl(i: 0), E: Args[0], Result&: RHSValue,
6989 CopyObjectRepresentation: MD->getParent()->isUnion()))
6990 return false;
6991
6992 LValue Obj;
6993 if (!handleAssignment(Info, E: Args[ExplicitOffset], LVal: *ObjectArg,
6994 LValType: MD->getFunctionObjectParameterReferenceType(),
6995 Val&: RHSValue))
6996 return false;
6997 ObjectArg->moveInto(V&: Result);
6998 return true;
6999 } else if (MD && isLambdaCallOperator(MD)) {
7000 // We're in a lambda; determine the lambda capture field maps unless we're
7001 // just constexpr checking a lambda's call operator. constexpr checking is
7002 // done before the captures have been added to the closure object (unless
7003 // we're inferring constexpr-ness), so we don't have access to them in this
7004 // case. But since we don't need the captures to constexpr check, we can
7005 // just ignore them.
7006 if (!Info.checkingPotentialConstantExpression())
7007 MD->getParent()->getCaptureFields(Captures&: Frame.LambdaCaptureFields,
7008 ThisCapture&: Frame.LambdaThisCaptureField);
7009 }
7010
7011 StmtResult Ret = {.Value: Result, .Slot: ResultSlot};
7012 EvalStmtResult ESR = EvaluateStmt(Result&: Ret, Info, S: Body);
7013 if (ESR == ESR_Succeeded) {
7014 if (Callee->getReturnType()->isVoidType())
7015 return true;
7016 Info.FFDiag(Loc: Callee->getEndLoc(), DiagId: diag::note_constexpr_no_return);
7017 }
7018 return ESR == ESR_Returned;
7019}
7020
7021/// Evaluate a constructor call.
7022static bool HandleConstructorCall(const Expr *E, const LValue &This,
7023 CallRef Call,
7024 const CXXConstructorDecl *Definition,
7025 EvalInfo &Info, APValue &Result) {
7026 SourceLocation CallLoc = E->getExprLoc();
7027 if (!Info.CheckCallLimit(Loc: CallLoc))
7028 return false;
7029
7030 const CXXRecordDecl *RD = Definition->getParent();
7031 if (RD->getNumVBases()) {
7032 Info.FFDiag(Loc: CallLoc, DiagId: diag::note_constexpr_virtual_base) << RD;
7033 return false;
7034 }
7035
7036 EvalInfo::EvaluatingConstructorRAII EvalObj(
7037 Info,
7038 ObjectUnderConstruction{.Base: This.getLValueBase(), .Path: This.Designator.Entries},
7039 RD->getNumBases());
7040 CallStackFrame Frame(Info, E->getSourceRange(), Definition, &This, E, Call);
7041
7042 // FIXME: Creating an APValue just to hold a nonexistent return value is
7043 // wasteful.
7044 APValue RetVal;
7045 StmtResult Ret = {.Value: RetVal, .Slot: nullptr};
7046
7047 // If it's a delegating constructor, delegate.
7048 if (Definition->isDelegatingConstructor()) {
7049 CXXConstructorDecl::init_const_iterator I = Definition->init_begin();
7050 if ((*I)->getInit()->isValueDependent()) {
7051 if (!EvaluateDependentExpr(E: (*I)->getInit(), Info))
7052 return false;
7053 } else {
7054 FullExpressionRAII InitScope(Info);
7055 if (!EvaluateInPlace(Result, Info, This, E: (*I)->getInit()) ||
7056 !InitScope.destroy())
7057 return false;
7058 }
7059 return EvaluateStmt(Result&: Ret, Info, S: Definition->getBody()) != ESR_Failed;
7060 }
7061
7062 // For a trivial copy or move constructor, perform an APValue copy. This is
7063 // essential for unions (or classes with anonymous union members), where the
7064 // operations performed by the constructor cannot be represented by
7065 // ctor-initializers.
7066 //
7067 // Skip this for empty non-union classes; we should not perform an
7068 // lvalue-to-rvalue conversion on them because their copy constructor does not
7069 // actually read them.
7070 if (Definition->isDefaulted() && Definition->isCopyOrMoveConstructor() &&
7071 (Definition->getParent()->isUnion() ||
7072 (Definition->isTrivial() &&
7073 isReadByLvalueToRvalueConversion(RD: Definition->getParent())))) {
7074 return handleTrivialCopy(Info, Param: Definition->getParamDecl(i: 0), E, Result,
7075 CopyObjectRepresentation: Definition->getParent()->isUnion());
7076 }
7077
7078 // Reserve space for the struct members.
7079 if (!Result.hasValue()) {
7080 if (!RD->isUnion())
7081 Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
7082 RD->getNumFields());
7083 else
7084 // A union starts with no active member.
7085 Result = APValue((const FieldDecl*)nullptr);
7086 }
7087
7088 if (RD->isInvalidDecl()) return false;
7089 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD);
7090
7091 // A scope for temporaries lifetime-extended by reference members.
7092 BlockScopeRAII LifetimeExtendedScope(Info);
7093
7094 bool Success = true;
7095 unsigned BasesSeen = 0;
7096#ifndef NDEBUG
7097 CXXRecordDecl::base_class_const_iterator BaseIt = RD->bases_begin();
7098#endif
7099 CXXRecordDecl::field_iterator FieldIt = RD->field_begin();
7100 auto SkipToField = [&](FieldDecl *FD, bool Indirect) {
7101 // We might be initializing the same field again if this is an indirect
7102 // field initialization.
7103 if (FieldIt == RD->field_end() ||
7104 FieldIt->getFieldIndex() > FD->getFieldIndex()) {
7105 assert(Indirect && "fields out of order?");
7106 return;
7107 }
7108
7109 // Default-initialize any fields with no explicit initializer.
7110 for (; !declaresSameEntity(D1: *FieldIt, D2: FD); ++FieldIt) {
7111 assert(FieldIt != RD->field_end() && "missing field?");
7112 if (!FieldIt->isUnnamedBitField())
7113 Success &= handleDefaultInitValue(
7114 T: FieldIt->getType(),
7115 Result&: Result.getStructField(i: FieldIt->getFieldIndex()));
7116 }
7117 ++FieldIt;
7118 };
7119 for (const auto *I : Definition->inits()) {
7120 LValue Subobject = This;
7121 LValue SubobjectParent = This;
7122 APValue *Value = &Result;
7123
7124 // Determine the subobject to initialize.
7125 FieldDecl *FD = nullptr;
7126 if (I->isBaseInitializer()) {
7127 QualType BaseType(I->getBaseClass(), 0);
7128#ifndef NDEBUG
7129 // Non-virtual base classes are initialized in the order in the class
7130 // definition. We have already checked for virtual base classes.
7131 assert(!BaseIt->isVirtual() && "virtual base for literal type");
7132 assert(Info.Ctx.hasSameUnqualifiedType(BaseIt->getType(), BaseType) &&
7133 "base class initializers not in expected order");
7134 ++BaseIt;
7135#endif
7136 if (!HandleLValueDirectBase(Info, E: I->getInit(), Obj&: Subobject, Derived: RD,
7137 Base: BaseType->getAsCXXRecordDecl(), RL: &Layout))
7138 return false;
7139 Value = &Result.getStructBase(i: BasesSeen++);
7140 } else if ((FD = I->getMember())) {
7141 if (!HandleLValueMember(Info, E: I->getInit(), LVal&: Subobject, FD, RL: &Layout))
7142 return false;
7143 if (RD->isUnion()) {
7144 Result = APValue(FD);
7145 Value = &Result.getUnionValue();
7146 } else {
7147 SkipToField(FD, false);
7148 Value = &Result.getStructField(i: FD->getFieldIndex());
7149 }
7150 } else if (IndirectFieldDecl *IFD = I->getIndirectMember()) {
7151 // Walk the indirect field decl's chain to find the object to initialize,
7152 // and make sure we've initialized every step along it.
7153 auto IndirectFieldChain = IFD->chain();
7154 for (auto *C : IndirectFieldChain) {
7155 FD = cast<FieldDecl>(Val: C);
7156 CXXRecordDecl *CD = cast<CXXRecordDecl>(Val: FD->getParent());
7157 // Switch the union field if it differs. This happens if we had
7158 // preceding zero-initialization, and we're now initializing a union
7159 // subobject other than the first.
7160 // FIXME: In this case, the values of the other subobjects are
7161 // specified, since zero-initialization sets all padding bits to zero.
7162 if (!Value->hasValue() ||
7163 (Value->isUnion() &&
7164 !declaresSameEntity(D1: Value->getUnionField(), D2: FD))) {
7165 if (CD->isUnion())
7166 *Value = APValue(FD);
7167 else
7168 // FIXME: This immediately starts the lifetime of all members of
7169 // an anonymous struct. It would be preferable to strictly start
7170 // member lifetime in initialization order.
7171 Success &= handleDefaultInitValue(T: Info.Ctx.getCanonicalTagType(TD: CD),
7172 Result&: *Value);
7173 }
7174 // Store Subobject as its parent before updating it for the last element
7175 // in the chain.
7176 if (C == IndirectFieldChain.back())
7177 SubobjectParent = Subobject;
7178 if (!HandleLValueMember(Info, E: I->getInit(), LVal&: Subobject, FD))
7179 return false;
7180 if (CD->isUnion())
7181 Value = &Value->getUnionValue();
7182 else {
7183 if (C == IndirectFieldChain.front() && !RD->isUnion())
7184 SkipToField(FD, true);
7185 Value = &Value->getStructField(i: FD->getFieldIndex());
7186 }
7187 }
7188 } else {
7189 llvm_unreachable("unknown base initializer kind");
7190 }
7191
7192 // Need to override This for implicit field initializers as in this case
7193 // This refers to innermost anonymous struct/union containing initializer,
7194 // not to currently constructed class.
7195 const Expr *Init = I->getInit();
7196 if (Init->isValueDependent()) {
7197 if (!EvaluateDependentExpr(E: Init, Info))
7198 return false;
7199 } else {
7200 ThisOverrideRAII ThisOverride(*Info.CurrentCall, &SubobjectParent,
7201 isa<CXXDefaultInitExpr>(Val: Init));
7202 FullExpressionRAII InitScope(Info);
7203 if (FD && FD->getType()->isReferenceType() &&
7204 !FD->getType()->isFunctionReferenceType()) {
7205 LValue Result;
7206 if (!EvaluateInitForDeclOfReferenceType(Info, D: FD, Init, Result,
7207 Val&: *Value)) {
7208 if (!Info.noteFailure())
7209 return false;
7210 Success = false;
7211 }
7212 } else if (!EvaluateInPlace(Result&: *Value, Info, This: Subobject, E: Init) ||
7213 (FD && FD->isBitField() &&
7214 !truncateBitfieldValue(Info, E: Init, Value&: *Value, FD))) {
7215 // If we're checking for a potential constant expression, evaluate all
7216 // initializers even if some of them fail.
7217 if (!Info.noteFailure())
7218 return false;
7219 Success = false;
7220 }
7221 }
7222
7223 // This is the point at which the dynamic type of the object becomes this
7224 // class type.
7225 if (I->isBaseInitializer() && BasesSeen == RD->getNumBases())
7226 EvalObj.finishedConstructingBases();
7227 }
7228
7229 // Default-initialize any remaining fields.
7230 if (!RD->isUnion()) {
7231 for (; FieldIt != RD->field_end(); ++FieldIt) {
7232 if (!FieldIt->isUnnamedBitField())
7233 Success &= handleDefaultInitValue(
7234 T: FieldIt->getType(),
7235 Result&: Result.getStructField(i: FieldIt->getFieldIndex()));
7236 }
7237 }
7238
7239 EvalObj.finishedConstructingFields();
7240
7241 return Success &&
7242 EvaluateStmt(Result&: Ret, Info, S: Definition->getBody()) != ESR_Failed &&
7243 LifetimeExtendedScope.destroy();
7244}
7245
7246static bool HandleConstructorCall(const Expr *E, const LValue &This,
7247 ArrayRef<const Expr*> Args,
7248 const CXXConstructorDecl *Definition,
7249 EvalInfo &Info, APValue &Result) {
7250 CallScopeRAII CallScope(Info);
7251 CallRef Call = Info.CurrentCall->createCall(Callee: Definition);
7252 if (!EvaluateArgs(Args, Call, Info, Callee: Definition))
7253 return false;
7254
7255 return HandleConstructorCall(E, This, Call, Definition, Info, Result) &&
7256 CallScope.destroy();
7257}
7258
7259static bool HandleDestructionImpl(EvalInfo &Info, SourceRange CallRange,
7260 const LValue &This, APValue &Value,
7261 QualType T) {
7262 // Objects can only be destroyed while they're within their lifetimes.
7263 // FIXME: We have no representation for whether an object of type nullptr_t
7264 // is in its lifetime; it usually doesn't matter. Perhaps we should model it
7265 // as indeterminate instead?
7266 if (Value.isAbsent() && !T->isNullPtrType()) {
7267 APValue Printable;
7268 This.moveInto(V&: Printable);
7269 Info.FFDiag(Loc: CallRange.getBegin(),
7270 DiagId: diag::note_constexpr_destroy_out_of_lifetime)
7271 << Printable.getAsString(Ctx: Info.Ctx, Ty: Info.Ctx.getLValueReferenceType(T));
7272 return false;
7273 }
7274
7275 // Invent an expression for location purposes.
7276 // FIXME: We shouldn't need to do this.
7277 OpaqueValueExpr LocE(CallRange.getBegin(), Info.Ctx.IntTy, VK_PRValue);
7278
7279 // For arrays, destroy elements right-to-left.
7280 if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(T)) {
7281 uint64_t Size = CAT->getZExtSize();
7282 QualType ElemT = CAT->getElementType();
7283
7284 if (!CheckArraySize(Info, CAT, CallLoc: CallRange.getBegin()))
7285 return false;
7286
7287 LValue ElemLV = This;
7288 ElemLV.addArray(Info, E: &LocE, CAT);
7289 if (!HandleLValueArrayAdjustment(Info, E: &LocE, LVal&: ElemLV, EltTy: ElemT, Adjustment: Size))
7290 return false;
7291
7292 // Ensure that we have actual array elements available to destroy; the
7293 // destructors might mutate the value, so we can't run them on the array
7294 // filler.
7295 if (Size && Size > Value.getArrayInitializedElts())
7296 expandArray(Array&: Value, Index: Value.getArraySize() - 1);
7297
7298 // The size of the array might have been reduced by
7299 // a placement new.
7300 for (Size = Value.getArraySize(); Size != 0; --Size) {
7301 APValue &Elem = Value.getArrayInitializedElt(I: Size - 1);
7302 if (!HandleLValueArrayAdjustment(Info, E: &LocE, LVal&: ElemLV, EltTy: ElemT, Adjustment: -1) ||
7303 !HandleDestructionImpl(Info, CallRange, This: ElemLV, Value&: Elem, T: ElemT))
7304 return false;
7305 }
7306
7307 // End the lifetime of this array now.
7308 Value = APValue();
7309 return true;
7310 }
7311
7312 const CXXRecordDecl *RD = T->getAsCXXRecordDecl();
7313 if (!RD) {
7314 if (T.isDestructedType()) {
7315 Info.FFDiag(Loc: CallRange.getBegin(),
7316 DiagId: diag::note_constexpr_unsupported_destruction)
7317 << T;
7318 return false;
7319 }
7320
7321 Value = APValue();
7322 return true;
7323 }
7324
7325 if (RD->getNumVBases()) {
7326 Info.FFDiag(Loc: CallRange.getBegin(), DiagId: diag::note_constexpr_virtual_base) << RD;
7327 return false;
7328 }
7329
7330 const CXXDestructorDecl *DD = RD->getDestructor();
7331 if (!DD && !RD->hasTrivialDestructor()) {
7332 Info.FFDiag(Loc: CallRange.getBegin());
7333 return false;
7334 }
7335
7336 if (!DD || DD->isTrivial() ||
7337 (RD->isAnonymousStructOrUnion() && RD->isUnion())) {
7338 // A trivial destructor just ends the lifetime of the object. Check for
7339 // this case before checking for a body, because we might not bother
7340 // building a body for a trivial destructor. Note that it doesn't matter
7341 // whether the destructor is constexpr in this case; all trivial
7342 // destructors are constexpr.
7343 //
7344 // If an anonymous union would be destroyed, some enclosing destructor must
7345 // have been explicitly defined, and the anonymous union destruction should
7346 // have no effect.
7347 Value = APValue();
7348 return true;
7349 }
7350
7351 if (!Info.CheckCallLimit(Loc: CallRange.getBegin()))
7352 return false;
7353
7354 const FunctionDecl *Definition = nullptr;
7355 const Stmt *Body = DD->getBody(Definition);
7356
7357 if (!CheckConstexprFunction(Info, CallLoc: CallRange.getBegin(), Declaration: DD, Definition, Body))
7358 return false;
7359
7360 CallStackFrame Frame(Info, CallRange, Definition, &This, /*CallExpr=*/nullptr,
7361 CallRef());
7362
7363 // We're now in the period of destruction of this object.
7364 unsigned BasesLeft = RD->getNumBases();
7365 EvalInfo::EvaluatingDestructorRAII EvalObj(
7366 Info,
7367 ObjectUnderConstruction{.Base: This.getLValueBase(), .Path: This.Designator.Entries});
7368 if (!EvalObj.DidInsert) {
7369 // C++2a [class.dtor]p19:
7370 // the behavior is undefined if the destructor is invoked for an object
7371 // whose lifetime has ended
7372 // (Note that formally the lifetime ends when the period of destruction
7373 // begins, even though certain uses of the object remain valid until the
7374 // period of destruction ends.)
7375 Info.FFDiag(Loc: CallRange.getBegin(), DiagId: diag::note_constexpr_double_destroy);
7376 return false;
7377 }
7378
7379 // FIXME: Creating an APValue just to hold a nonexistent return value is
7380 // wasteful.
7381 APValue RetVal;
7382 StmtResult Ret = {.Value: RetVal, .Slot: nullptr};
7383 if (EvaluateStmt(Result&: Ret, Info, S: Definition->getBody()) == ESR_Failed)
7384 return false;
7385
7386 // A union destructor does not implicitly destroy its members.
7387 if (RD->isUnion())
7388 return true;
7389
7390 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD);
7391
7392 // We don't have a good way to iterate fields in reverse, so collect all the
7393 // fields first and then walk them backwards.
7394 SmallVector<FieldDecl*, 16> Fields(RD->fields());
7395 for (const FieldDecl *FD : llvm::reverse(C&: Fields)) {
7396 if (FD->isUnnamedBitField())
7397 continue;
7398
7399 LValue Subobject = This;
7400 if (!HandleLValueMember(Info, E: &LocE, LVal&: Subobject, FD, RL: &Layout))
7401 return false;
7402
7403 APValue *SubobjectValue = &Value.getStructField(i: FD->getFieldIndex());
7404 if (!HandleDestructionImpl(Info, CallRange, This: Subobject, Value&: *SubobjectValue,
7405 T: FD->getType()))
7406 return false;
7407 }
7408
7409 if (BasesLeft != 0)
7410 EvalObj.startedDestroyingBases();
7411
7412 // Destroy base classes in reverse order.
7413 for (const CXXBaseSpecifier &Base : llvm::reverse(C: RD->bases())) {
7414 --BasesLeft;
7415
7416 QualType BaseType = Base.getType();
7417 LValue Subobject = This;
7418 if (!HandleLValueDirectBase(Info, E: &LocE, Obj&: Subobject, Derived: RD,
7419 Base: BaseType->getAsCXXRecordDecl(), RL: &Layout))
7420 return false;
7421
7422 APValue *SubobjectValue = &Value.getStructBase(i: BasesLeft);
7423 if (!HandleDestructionImpl(Info, CallRange, This: Subobject, Value&: *SubobjectValue,
7424 T: BaseType))
7425 return false;
7426 }
7427 assert(BasesLeft == 0 && "NumBases was wrong?");
7428
7429 // The period of destruction ends now. The object is gone.
7430 Value = APValue();
7431 return true;
7432}
7433
7434namespace {
7435struct DestroyObjectHandler {
7436 EvalInfo &Info;
7437 const Expr *E;
7438 const LValue &This;
7439 const AccessKinds AccessKind;
7440
7441 typedef bool result_type;
7442 bool failed() { return false; }
7443 bool found(APValue &Subobj, QualType SubobjType) {
7444 return HandleDestructionImpl(Info, CallRange: E->getSourceRange(), This, Value&: Subobj,
7445 T: SubobjType);
7446 }
7447 bool found(APSInt &Value, QualType SubobjType) {
7448 Info.FFDiag(E, DiagId: diag::note_constexpr_destroy_complex_elem);
7449 return false;
7450 }
7451 bool found(APFloat &Value, QualType SubobjType) {
7452 Info.FFDiag(E, DiagId: diag::note_constexpr_destroy_complex_elem);
7453 return false;
7454 }
7455};
7456}
7457
7458/// Perform a destructor or pseudo-destructor call on the given object, which
7459/// might in general not be a complete object.
7460static bool HandleDestruction(EvalInfo &Info, const Expr *E,
7461 const LValue &This, QualType ThisType) {
7462 CompleteObject Obj = findCompleteObject(Info, E, AK: AK_Destroy, LVal: This, LValType: ThisType);
7463 DestroyObjectHandler Handler = {.Info: Info, .E: E, .This: This, .AccessKind: AK_Destroy};
7464 return Obj && findSubobject(Info, E, Obj, Sub: This.Designator, handler&: Handler);
7465}
7466
7467/// Destroy and end the lifetime of the given complete object.
7468static bool HandleDestruction(EvalInfo &Info, SourceLocation Loc,
7469 APValue::LValueBase LVBase, APValue &Value,
7470 QualType T) {
7471 // If we've had an unmodeled side-effect, we can't rely on mutable state
7472 // (such as the object we're about to destroy) being correct.
7473 if (Info.EvalStatus.HasSideEffects)
7474 return false;
7475
7476 LValue LV;
7477 LV.set(B: {LVBase});
7478 return HandleDestructionImpl(Info, CallRange: Loc, This: LV, Value, T);
7479}
7480
7481/// Perform a call to 'operator new' or to `__builtin_operator_new'.
7482static bool HandleOperatorNewCall(EvalInfo &Info, const CallExpr *E,
7483 LValue &Result) {
7484 if (Info.checkingPotentialConstantExpression() ||
7485 Info.SpeculativeEvaluationDepth)
7486 return false;
7487
7488 // This is permitted only within a call to std::allocator<T>::allocate.
7489 auto Caller = Info.getStdAllocatorCaller(FnName: "allocate");
7490 if (!Caller) {
7491 Info.FFDiag(Loc: E->getExprLoc(), DiagId: Info.getLangOpts().CPlusPlus20
7492 ? diag::note_constexpr_new_untyped
7493 : diag::note_constexpr_new);
7494 return false;
7495 }
7496
7497 QualType ElemType = Caller.ElemType;
7498 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
7499 Info.FFDiag(Loc: E->getExprLoc(),
7500 DiagId: diag::note_constexpr_new_not_complete_object_type)
7501 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
7502 return false;
7503 }
7504
7505 APSInt ByteSize;
7506 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: ByteSize, Info))
7507 return false;
7508 bool IsNothrow = false;
7509 for (unsigned I = 1, N = E->getNumArgs(); I != N; ++I) {
7510 EvaluateIgnoredValue(Info, E: E->getArg(Arg: I));
7511 IsNothrow |= E->getType()->isNothrowT();
7512 }
7513
7514 CharUnits ElemSize;
7515 if (!HandleSizeof(Info, Loc: E->getExprLoc(), Type: ElemType, Size&: ElemSize))
7516 return false;
7517 APInt Size, Remainder;
7518 APInt ElemSizeAP(ByteSize.getBitWidth(), ElemSize.getQuantity());
7519 APInt::udivrem(LHS: ByteSize, RHS: ElemSizeAP, Quotient&: Size, Remainder);
7520 if (Remainder != 0) {
7521 // This likely indicates a bug in the implementation of 'std::allocator'.
7522 Info.FFDiag(Loc: E->getExprLoc(), DiagId: diag::note_constexpr_operator_new_bad_size)
7523 << ByteSize << APSInt(ElemSizeAP, true) << ElemType;
7524 return false;
7525 }
7526
7527 if (!Info.CheckArraySize(Loc: E->getBeginLoc(), BitWidth: ByteSize.getActiveBits(),
7528 ElemCount: Size.getZExtValue(), /*Diag=*/!IsNothrow)) {
7529 if (IsNothrow) {
7530 Result.setNull(Ctx&: Info.Ctx, PointerTy: E->getType());
7531 return true;
7532 }
7533 return false;
7534 }
7535
7536 QualType AllocType = Info.Ctx.getConstantArrayType(
7537 EltTy: ElemType, ArySize: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
7538 APValue *Val = Info.createHeapAlloc(E: Caller.Call, T: AllocType, LV&: Result);
7539 *Val = APValue(APValue::UninitArray(), 0, Size.getZExtValue());
7540 Result.addArray(Info, E, CAT: cast<ConstantArrayType>(Val&: AllocType));
7541 return true;
7542}
7543
7544static bool hasVirtualDestructor(QualType T) {
7545 if (CXXRecordDecl *RD = T->getAsCXXRecordDecl())
7546 if (CXXDestructorDecl *DD = RD->getDestructor())
7547 return DD->isVirtual();
7548 return false;
7549}
7550
7551static const FunctionDecl *getVirtualOperatorDelete(QualType T) {
7552 if (CXXRecordDecl *RD = T->getAsCXXRecordDecl())
7553 if (CXXDestructorDecl *DD = RD->getDestructor())
7554 return DD->isVirtual() ? DD->getOperatorDelete() : nullptr;
7555 return nullptr;
7556}
7557
7558/// Check that the given object is a suitable pointer to a heap allocation that
7559/// still exists and is of the right kind for the purpose of a deletion.
7560///
7561/// On success, returns the heap allocation to deallocate. On failure, produces
7562/// a diagnostic and returns std::nullopt.
7563static std::optional<DynAlloc *> CheckDeleteKind(EvalInfo &Info, const Expr *E,
7564 const LValue &Pointer,
7565 DynAlloc::Kind DeallocKind) {
7566 auto PointerAsString = [&] {
7567 return Pointer.toString(Ctx&: Info.Ctx, T: Info.Ctx.VoidPtrTy);
7568 };
7569
7570 DynamicAllocLValue DA = Pointer.Base.dyn_cast<DynamicAllocLValue>();
7571 if (!DA) {
7572 Info.FFDiag(E, DiagId: diag::note_constexpr_delete_not_heap_alloc)
7573 << PointerAsString();
7574 if (Pointer.Base)
7575 NoteLValueLocation(Info, Base: Pointer.Base);
7576 return std::nullopt;
7577 }
7578
7579 std::optional<DynAlloc *> Alloc = Info.lookupDynamicAlloc(DA);
7580 if (!Alloc) {
7581 Info.FFDiag(E, DiagId: diag::note_constexpr_double_delete);
7582 return std::nullopt;
7583 }
7584
7585 if (DeallocKind != (*Alloc)->getKind()) {
7586 QualType AllocType = Pointer.Base.getDynamicAllocType();
7587 Info.FFDiag(E, DiagId: diag::note_constexpr_new_delete_mismatch)
7588 << DeallocKind << (*Alloc)->getKind() << AllocType;
7589 NoteLValueLocation(Info, Base: Pointer.Base);
7590 return std::nullopt;
7591 }
7592
7593 bool Subobject = false;
7594 if (DeallocKind == DynAlloc::New) {
7595 Subobject = Pointer.Designator.MostDerivedPathLength != 0 ||
7596 Pointer.Designator.isOnePastTheEnd();
7597 } else {
7598 Subobject = Pointer.Designator.Entries.size() != 1 ||
7599 Pointer.Designator.Entries[0].getAsArrayIndex() != 0;
7600 }
7601 if (Subobject) {
7602 Info.FFDiag(E, DiagId: diag::note_constexpr_delete_subobject)
7603 << PointerAsString() << Pointer.Designator.isOnePastTheEnd();
7604 return std::nullopt;
7605 }
7606
7607 return Alloc;
7608}
7609
7610// Perform a call to 'operator delete' or '__builtin_operator_delete'.
7611static bool HandleOperatorDeleteCall(EvalInfo &Info, const CallExpr *E) {
7612 if (Info.checkingPotentialConstantExpression() ||
7613 Info.SpeculativeEvaluationDepth)
7614 return false;
7615
7616 // This is permitted only within a call to std::allocator<T>::deallocate.
7617 if (!Info.getStdAllocatorCaller(FnName: "deallocate")) {
7618 Info.FFDiag(Loc: E->getExprLoc());
7619 return true;
7620 }
7621
7622 LValue Pointer;
7623 if (!EvaluatePointer(E: E->getArg(Arg: 0), Result&: Pointer, Info))
7624 return false;
7625 for (unsigned I = 1, N = E->getNumArgs(); I != N; ++I)
7626 EvaluateIgnoredValue(Info, E: E->getArg(Arg: I));
7627
7628 if (Pointer.Designator.Invalid)
7629 return false;
7630
7631 // Deleting a null pointer would have no effect, but it's not permitted by
7632 // std::allocator<T>::deallocate's contract.
7633 if (Pointer.isNullPointer()) {
7634 Info.CCEDiag(Loc: E->getExprLoc(), DiagId: diag::note_constexpr_deallocate_null);
7635 return true;
7636 }
7637
7638 if (!CheckDeleteKind(Info, E, Pointer, DeallocKind: DynAlloc::StdAllocator))
7639 return false;
7640
7641 Info.HeapAllocs.erase(x: Pointer.Base.get<DynamicAllocLValue>());
7642 return true;
7643}
7644
7645//===----------------------------------------------------------------------===//
7646// Generic Evaluation
7647//===----------------------------------------------------------------------===//
7648namespace {
7649
7650class BitCastBuffer {
7651 // FIXME: We're going to need bit-level granularity when we support
7652 // bit-fields.
7653 // FIXME: Its possible under the C++ standard for 'char' to not be 8 bits, but
7654 // we don't support a host or target where that is the case. Still, we should
7655 // use a more generic type in case we ever do.
7656 SmallVector<std::optional<unsigned char>, 32> Bytes;
7657
7658 static_assert(std::numeric_limits<unsigned char>::digits >= 8,
7659 "Need at least 8 bit unsigned char");
7660
7661 bool TargetIsLittleEndian;
7662
7663public:
7664 BitCastBuffer(CharUnits Width, bool TargetIsLittleEndian)
7665 : Bytes(Width.getQuantity()),
7666 TargetIsLittleEndian(TargetIsLittleEndian) {}
7667
7668 [[nodiscard]] bool readObject(CharUnits Offset, CharUnits Width,
7669 SmallVectorImpl<unsigned char> &Output) const {
7670 for (CharUnits I = Offset, E = Offset + Width; I != E; ++I) {
7671 // If a byte of an integer is uninitialized, then the whole integer is
7672 // uninitialized.
7673 if (!Bytes[I.getQuantity()])
7674 return false;
7675 Output.push_back(Elt: *Bytes[I.getQuantity()]);
7676 }
7677 if (llvm::sys::IsLittleEndianHost != TargetIsLittleEndian)
7678 std::reverse(first: Output.begin(), last: Output.end());
7679 return true;
7680 }
7681
7682 void writeObject(CharUnits Offset, SmallVectorImpl<unsigned char> &Input) {
7683 if (llvm::sys::IsLittleEndianHost != TargetIsLittleEndian)
7684 std::reverse(first: Input.begin(), last: Input.end());
7685
7686 size_t Index = 0;
7687 for (unsigned char Byte : Input) {
7688 assert(!Bytes[Offset.getQuantity() + Index] && "overwriting a byte?");
7689 Bytes[Offset.getQuantity() + Index] = Byte;
7690 ++Index;
7691 }
7692 }
7693
7694 size_t size() { return Bytes.size(); }
7695};
7696
7697/// Traverse an APValue to produce an BitCastBuffer, emulating how the current
7698/// target would represent the value at runtime.
7699class APValueToBufferConverter {
7700 EvalInfo &Info;
7701 BitCastBuffer Buffer;
7702 const CastExpr *BCE;
7703
7704 APValueToBufferConverter(EvalInfo &Info, CharUnits ObjectWidth,
7705 const CastExpr *BCE)
7706 : Info(Info),
7707 Buffer(ObjectWidth, Info.Ctx.getTargetInfo().isLittleEndian()),
7708 BCE(BCE) {}
7709
7710 bool visit(const APValue &Val, QualType Ty) {
7711 return visit(Val, Ty, Offset: CharUnits::fromQuantity(Quantity: 0));
7712 }
7713
7714 // Write out Val with type Ty into Buffer starting at Offset.
7715 bool visit(const APValue &Val, QualType Ty, CharUnits Offset) {
7716 assert((size_t)Offset.getQuantity() <= Buffer.size());
7717
7718 // As a special case, nullptr_t has an indeterminate value.
7719 if (Ty->isNullPtrType())
7720 return true;
7721
7722 // Dig through Src to find the byte at SrcOffset.
7723 switch (Val.getKind()) {
7724 case APValue::Indeterminate:
7725 case APValue::None:
7726 return true;
7727
7728 case APValue::Int:
7729 return visitInt(Val: Val.getInt(), Ty, Offset);
7730 case APValue::Float:
7731 return visitFloat(Val: Val.getFloat(), Ty, Offset);
7732 case APValue::Array:
7733 return visitArray(Val, Ty, Offset);
7734 case APValue::Struct:
7735 return visitRecord(Val, Ty, Offset);
7736 case APValue::Vector:
7737 return visitVector(Val, Ty, Offset);
7738
7739 case APValue::ComplexInt:
7740 case APValue::ComplexFloat:
7741 return visitComplex(Val, Ty, Offset);
7742 case APValue::FixedPoint:
7743 // FIXME: We should support these.
7744
7745 case APValue::Union:
7746 case APValue::MemberPointer:
7747 case APValue::AddrLabelDiff: {
7748 Info.FFDiag(Loc: BCE->getBeginLoc(),
7749 DiagId: diag::note_constexpr_bit_cast_unsupported_type)
7750 << Ty;
7751 return false;
7752 }
7753
7754 case APValue::LValue:
7755 llvm_unreachable("LValue subobject in bit_cast?");
7756 }
7757 llvm_unreachable("Unhandled APValue::ValueKind");
7758 }
7759
7760 bool visitRecord(const APValue &Val, QualType Ty, CharUnits Offset) {
7761 const RecordDecl *RD = Ty->getAsRecordDecl();
7762 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD);
7763
7764 // Visit the base classes.
7765 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
7766 for (size_t I = 0, E = CXXRD->getNumBases(); I != E; ++I) {
7767 const CXXBaseSpecifier &BS = CXXRD->bases_begin()[I];
7768 CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
7769 const APValue &Base = Val.getStructBase(i: I);
7770
7771 // Can happen in error cases.
7772 if (!Base.isStruct())
7773 return false;
7774
7775 if (!visitRecord(Val: Base, Ty: BS.getType(),
7776 Offset: Layout.getBaseClassOffset(Base: BaseDecl) + Offset))
7777 return false;
7778 }
7779 }
7780
7781 // Visit the fields.
7782 unsigned FieldIdx = 0;
7783 for (FieldDecl *FD : RD->fields()) {
7784 if (FD->isBitField()) {
7785 Info.FFDiag(Loc: BCE->getBeginLoc(),
7786 DiagId: diag::note_constexpr_bit_cast_unsupported_bitfield);
7787 return false;
7788 }
7789
7790 uint64_t FieldOffsetBits = Layout.getFieldOffset(FieldNo: FieldIdx);
7791
7792 assert(FieldOffsetBits % Info.Ctx.getCharWidth() == 0 &&
7793 "only bit-fields can have sub-char alignment");
7794 CharUnits FieldOffset =
7795 Info.Ctx.toCharUnitsFromBits(BitSize: FieldOffsetBits) + Offset;
7796 QualType FieldTy = FD->getType();
7797 if (!visit(Val: Val.getStructField(i: FieldIdx), Ty: FieldTy, Offset: FieldOffset))
7798 return false;
7799 ++FieldIdx;
7800 }
7801
7802 return true;
7803 }
7804
7805 bool visitArray(const APValue &Val, QualType Ty, CharUnits Offset) {
7806 const auto *CAT =
7807 dyn_cast_or_null<ConstantArrayType>(Val: Ty->getAsArrayTypeUnsafe());
7808 if (!CAT)
7809 return false;
7810
7811 CharUnits ElemWidth = Info.Ctx.getTypeSizeInChars(T: CAT->getElementType());
7812 unsigned NumInitializedElts = Val.getArrayInitializedElts();
7813 unsigned ArraySize = Val.getArraySize();
7814 // First, initialize the initialized elements.
7815 for (unsigned I = 0; I != NumInitializedElts; ++I) {
7816 const APValue &SubObj = Val.getArrayInitializedElt(I);
7817 if (!visit(Val: SubObj, Ty: CAT->getElementType(), Offset: Offset + I * ElemWidth))
7818 return false;
7819 }
7820
7821 // Next, initialize the rest of the array using the filler.
7822 if (Val.hasArrayFiller()) {
7823 const APValue &Filler = Val.getArrayFiller();
7824 for (unsigned I = NumInitializedElts; I != ArraySize; ++I) {
7825 if (!visit(Val: Filler, Ty: CAT->getElementType(), Offset: Offset + I * ElemWidth))
7826 return false;
7827 }
7828 }
7829
7830 return true;
7831 }
7832
7833 bool visitComplex(const APValue &Val, QualType Ty, CharUnits Offset) {
7834 const ComplexType *ComplexTy = Ty->castAs<ComplexType>();
7835 QualType EltTy = ComplexTy->getElementType();
7836 CharUnits EltSizeChars = Info.Ctx.getTypeSizeInChars(T: EltTy);
7837 bool IsInt = Val.isComplexInt();
7838
7839 if (IsInt) {
7840 if (!visitInt(Val: Val.getComplexIntReal(), Ty: EltTy,
7841 Offset: Offset + (0 * EltSizeChars)))
7842 return false;
7843 if (!visitInt(Val: Val.getComplexIntImag(), Ty: EltTy,
7844 Offset: Offset + (1 * EltSizeChars)))
7845 return false;
7846 } else {
7847 if (!visitFloat(Val: Val.getComplexFloatReal(), Ty: EltTy,
7848 Offset: Offset + (0 * EltSizeChars)))
7849 return false;
7850 if (!visitFloat(Val: Val.getComplexFloatImag(), Ty: EltTy,
7851 Offset: Offset + (1 * EltSizeChars)))
7852 return false;
7853 }
7854
7855 return true;
7856 }
7857
7858 bool visitVector(const APValue &Val, QualType Ty, CharUnits Offset) {
7859 const VectorType *VTy = Ty->castAs<VectorType>();
7860 QualType EltTy = VTy->getElementType();
7861 unsigned NElts = VTy->getNumElements();
7862
7863 if (VTy->isPackedVectorBoolType(ctx: Info.Ctx)) {
7864 // Special handling for OpenCL bool vectors:
7865 // Since these vectors are stored as packed bits, but we can't write
7866 // individual bits to the BitCastBuffer, we'll buffer all of the elements
7867 // together into an appropriately sized APInt and write them all out at
7868 // once. Because we don't accept vectors where NElts * EltSize isn't a
7869 // multiple of the char size, there will be no padding space, so we don't
7870 // have to worry about writing data which should have been left
7871 // uninitialized.
7872 bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
7873
7874 llvm::APInt Res = llvm::APInt::getZero(numBits: NElts);
7875 for (unsigned I = 0; I < NElts; ++I) {
7876 const llvm::APSInt &EltAsInt = Val.getVectorElt(I).getInt();
7877 assert(EltAsInt.isUnsigned() && EltAsInt.getBitWidth() == 1 &&
7878 "bool vector element must be 1-bit unsigned integer!");
7879
7880 Res.insertBits(SubBits: EltAsInt, bitPosition: BigEndian ? (NElts - I - 1) : I);
7881 }
7882
7883 SmallVector<uint8_t, 8> Bytes(NElts / 8);
7884 llvm::StoreIntToMemory(IntVal: Res, Dst: &*Bytes.begin(), StoreBytes: NElts / 8);
7885 Buffer.writeObject(Offset, Input&: Bytes);
7886 } else {
7887 // Iterate over each of the elements and write them out to the buffer at
7888 // the appropriate offset.
7889 CharUnits EltSizeChars = Info.Ctx.getTypeSizeInChars(T: EltTy);
7890 for (unsigned I = 0; I < NElts; ++I) {
7891 if (!visit(Val: Val.getVectorElt(I), Ty: EltTy, Offset: Offset + I * EltSizeChars))
7892 return false;
7893 }
7894 }
7895
7896 return true;
7897 }
7898
7899 bool visitInt(const APSInt &Val, QualType Ty, CharUnits Offset) {
7900 APSInt AdjustedVal = Val;
7901 unsigned Width = AdjustedVal.getBitWidth();
7902 if (Ty->isBooleanType()) {
7903 Width = Info.Ctx.getTypeSize(T: Ty);
7904 AdjustedVal = AdjustedVal.extend(width: Width);
7905 }
7906
7907 SmallVector<uint8_t, 8> Bytes(Width / 8);
7908 llvm::StoreIntToMemory(IntVal: AdjustedVal, Dst: &*Bytes.begin(), StoreBytes: Width / 8);
7909 Buffer.writeObject(Offset, Input&: Bytes);
7910 return true;
7911 }
7912
7913 bool visitFloat(const APFloat &Val, QualType Ty, CharUnits Offset) {
7914 APSInt AsInt(Val.bitcastToAPInt());
7915 return visitInt(Val: AsInt, Ty, Offset);
7916 }
7917
7918public:
7919 static std::optional<BitCastBuffer>
7920 convert(EvalInfo &Info, const APValue &Src, const CastExpr *BCE) {
7921 CharUnits DstSize = Info.Ctx.getTypeSizeInChars(T: BCE->getType());
7922 APValueToBufferConverter Converter(Info, DstSize, BCE);
7923 if (!Converter.visit(Val: Src, Ty: BCE->getSubExpr()->getType()))
7924 return std::nullopt;
7925 return Converter.Buffer;
7926 }
7927};
7928
7929/// Write an BitCastBuffer into an APValue.
7930class BufferToAPValueConverter {
7931 EvalInfo &Info;
7932 const BitCastBuffer &Buffer;
7933 const CastExpr *BCE;
7934
7935 BufferToAPValueConverter(EvalInfo &Info, const BitCastBuffer &Buffer,
7936 const CastExpr *BCE)
7937 : Info(Info), Buffer(Buffer), BCE(BCE) {}
7938
7939 // Emit an unsupported bit_cast type error. Sema refuses to build a bit_cast
7940 // with an invalid type, so anything left is a deficiency on our part (FIXME).
7941 // Ideally this will be unreachable.
7942 std::nullopt_t unsupportedType(QualType Ty) {
7943 Info.FFDiag(Loc: BCE->getBeginLoc(),
7944 DiagId: diag::note_constexpr_bit_cast_unsupported_type)
7945 << Ty;
7946 return std::nullopt;
7947 }
7948
7949 std::nullopt_t unrepresentableValue(QualType Ty, const APSInt &Val) {
7950 Info.FFDiag(Loc: BCE->getBeginLoc(),
7951 DiagId: diag::note_constexpr_bit_cast_unrepresentable_value)
7952 << Ty << toString(I: Val, /*Radix=*/10);
7953 return std::nullopt;
7954 }
7955
7956 std::optional<APValue> visit(const BuiltinType *T, CharUnits Offset,
7957 const EnumType *EnumSugar = nullptr) {
7958 if (T->isNullPtrType()) {
7959 uint64_t NullValue = Info.Ctx.getTargetNullPointerValue(QT: QualType(T, 0));
7960 return APValue((Expr *)nullptr,
7961 /*Offset=*/CharUnits::fromQuantity(Quantity: NullValue),
7962 APValue::NoLValuePath{}, /*IsNullPtr=*/true);
7963 }
7964
7965 CharUnits SizeOf = Info.Ctx.getTypeSizeInChars(T);
7966
7967 // Work around floating point types that contain unused padding bytes. This
7968 // is really just `long double` on x86, which is the only fundamental type
7969 // with padding bytes.
7970 if (T->isRealFloatingType()) {
7971 const llvm::fltSemantics &Semantics =
7972 Info.Ctx.getFloatTypeSemantics(T: QualType(T, 0));
7973 unsigned NumBits = llvm::APFloatBase::getSizeInBits(Sem: Semantics);
7974 assert(NumBits % 8 == 0);
7975 CharUnits NumBytes = CharUnits::fromQuantity(Quantity: NumBits / 8);
7976 if (NumBytes != SizeOf)
7977 SizeOf = NumBytes;
7978 }
7979
7980 SmallVector<uint8_t, 8> Bytes;
7981 if (!Buffer.readObject(Offset, Width: SizeOf, Output&: Bytes)) {
7982 // If this is std::byte or unsigned char, then its okay to store an
7983 // indeterminate value.
7984 bool IsStdByte = EnumSugar && EnumSugar->isStdByteType();
7985 bool IsUChar =
7986 !EnumSugar && (T->isSpecificBuiltinType(K: BuiltinType::UChar) ||
7987 T->isSpecificBuiltinType(K: BuiltinType::Char_U));
7988 if (!IsStdByte && !IsUChar) {
7989 QualType DisplayType(EnumSugar ? (const Type *)EnumSugar : T, 0);
7990 Info.FFDiag(Loc: BCE->getExprLoc(),
7991 DiagId: diag::note_constexpr_bit_cast_indet_dest)
7992 << DisplayType << Info.Ctx.getLangOpts().CharIsSigned;
7993 return std::nullopt;
7994 }
7995
7996 return APValue::IndeterminateValue();
7997 }
7998
7999 APSInt Val(SizeOf.getQuantity() * Info.Ctx.getCharWidth(), true);
8000 llvm::LoadIntFromMemory(IntVal&: Val, Src: &*Bytes.begin(), LoadBytes: Bytes.size());
8001
8002 if (T->isIntegralOrEnumerationType()) {
8003 Val.setIsSigned(T->isSignedIntegerOrEnumerationType());
8004
8005 unsigned IntWidth = Info.Ctx.getIntWidth(T: QualType(T, 0));
8006 if (IntWidth != Val.getBitWidth()) {
8007 APSInt Truncated = Val.trunc(width: IntWidth);
8008 if (Truncated.extend(width: Val.getBitWidth()) != Val)
8009 return unrepresentableValue(Ty: QualType(T, 0), Val);
8010 Val = Truncated;
8011 }
8012
8013 return APValue(Val);
8014 }
8015
8016 if (T->isRealFloatingType()) {
8017 const llvm::fltSemantics &Semantics =
8018 Info.Ctx.getFloatTypeSemantics(T: QualType(T, 0));
8019 return APValue(APFloat(Semantics, Val));
8020 }
8021
8022 return unsupportedType(Ty: QualType(T, 0));
8023 }
8024
8025 std::optional<APValue> visit(const RecordType *RTy, CharUnits Offset) {
8026 const RecordDecl *RD = RTy->getAsRecordDecl();
8027 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD);
8028
8029 unsigned NumBases = 0;
8030 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD))
8031 NumBases = CXXRD->getNumBases();
8032
8033 APValue ResultVal(APValue::UninitStruct(), NumBases, RD->getNumFields());
8034
8035 // Visit the base classes.
8036 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
8037 for (size_t I = 0, E = CXXRD->getNumBases(); I != E; ++I) {
8038 const CXXBaseSpecifier &BS = CXXRD->bases_begin()[I];
8039 CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
8040
8041 std::optional<APValue> SubObj = visitType(
8042 Ty: BS.getType(), Offset: Layout.getBaseClassOffset(Base: BaseDecl) + Offset);
8043 if (!SubObj)
8044 return std::nullopt;
8045 ResultVal.getStructBase(i: I) = *SubObj;
8046 }
8047 }
8048
8049 // Visit the fields.
8050 unsigned FieldIdx = 0;
8051 for (FieldDecl *FD : RD->fields()) {
8052 // FIXME: We don't currently support bit-fields. A lot of the logic for
8053 // this is in CodeGen, so we need to factor it around.
8054 if (FD->isBitField()) {
8055 Info.FFDiag(Loc: BCE->getBeginLoc(),
8056 DiagId: diag::note_constexpr_bit_cast_unsupported_bitfield);
8057 return std::nullopt;
8058 }
8059
8060 uint64_t FieldOffsetBits = Layout.getFieldOffset(FieldNo: FieldIdx);
8061 assert(FieldOffsetBits % Info.Ctx.getCharWidth() == 0);
8062
8063 CharUnits FieldOffset =
8064 CharUnits::fromQuantity(Quantity: FieldOffsetBits / Info.Ctx.getCharWidth()) +
8065 Offset;
8066 QualType FieldTy = FD->getType();
8067 std::optional<APValue> SubObj = visitType(Ty: FieldTy, Offset: FieldOffset);
8068 if (!SubObj)
8069 return std::nullopt;
8070 ResultVal.getStructField(i: FieldIdx) = *SubObj;
8071 ++FieldIdx;
8072 }
8073
8074 return ResultVal;
8075 }
8076
8077 std::optional<APValue> visit(const EnumType *Ty, CharUnits Offset) {
8078 QualType RepresentationType =
8079 Ty->getDecl()->getDefinitionOrSelf()->getIntegerType();
8080 assert(!RepresentationType.isNull() &&
8081 "enum forward decl should be caught by Sema");
8082 const auto *AsBuiltin =
8083 RepresentationType.getCanonicalType()->castAs<BuiltinType>();
8084 // Recurse into the underlying type. Treat std::byte transparently as
8085 // unsigned char.
8086 return visit(T: AsBuiltin, Offset, /*EnumTy=*/EnumSugar: Ty);
8087 }
8088
8089 std::optional<APValue> visit(const ConstantArrayType *Ty, CharUnits Offset) {
8090 size_t Size = Ty->getLimitedSize();
8091 CharUnits ElementWidth = Info.Ctx.getTypeSizeInChars(T: Ty->getElementType());
8092
8093 APValue ArrayValue(APValue::UninitArray(), Size, Size);
8094 for (size_t I = 0; I != Size; ++I) {
8095 std::optional<APValue> ElementValue =
8096 visitType(Ty: Ty->getElementType(), Offset: Offset + I * ElementWidth);
8097 if (!ElementValue)
8098 return std::nullopt;
8099 ArrayValue.getArrayInitializedElt(I) = std::move(*ElementValue);
8100 }
8101
8102 return ArrayValue;
8103 }
8104
8105 std::optional<APValue> visit(const ComplexType *Ty, CharUnits Offset) {
8106 QualType ElementType = Ty->getElementType();
8107 CharUnits ElementWidth = Info.Ctx.getTypeSizeInChars(T: ElementType);
8108 bool IsInt = ElementType->isIntegerType();
8109
8110 std::optional<APValue> Values[2];
8111 for (unsigned I = 0; I != 2; ++I) {
8112 Values[I] = visitType(Ty: Ty->getElementType(), Offset: Offset + I * ElementWidth);
8113 if (!Values[I])
8114 return std::nullopt;
8115 }
8116
8117 if (IsInt)
8118 return APValue(Values[0]->getInt(), Values[1]->getInt());
8119 return APValue(Values[0]->getFloat(), Values[1]->getFloat());
8120 }
8121
8122 std::optional<APValue> visit(const VectorType *VTy, CharUnits Offset) {
8123 QualType EltTy = VTy->getElementType();
8124 unsigned NElts = VTy->getNumElements();
8125 unsigned EltSize =
8126 VTy->isPackedVectorBoolType(ctx: Info.Ctx) ? 1 : Info.Ctx.getTypeSize(T: EltTy);
8127
8128 SmallVector<APValue, 4> Elts;
8129 Elts.reserve(N: NElts);
8130 if (VTy->isPackedVectorBoolType(ctx: Info.Ctx)) {
8131 // Special handling for OpenCL bool vectors:
8132 // Since these vectors are stored as packed bits, but we can't read
8133 // individual bits from the BitCastBuffer, we'll buffer all of the
8134 // elements together into an appropriately sized APInt and write them all
8135 // out at once. Because we don't accept vectors where NElts * EltSize
8136 // isn't a multiple of the char size, there will be no padding space, so
8137 // we don't have to worry about reading any padding data which didn't
8138 // actually need to be accessed.
8139 bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
8140
8141 SmallVector<uint8_t, 8> Bytes;
8142 Bytes.reserve(N: NElts / 8);
8143 if (!Buffer.readObject(Offset, Width: CharUnits::fromQuantity(Quantity: NElts / 8), Output&: Bytes))
8144 return std::nullopt;
8145
8146 APSInt SValInt(NElts, true);
8147 llvm::LoadIntFromMemory(IntVal&: SValInt, Src: &*Bytes.begin(), LoadBytes: Bytes.size());
8148
8149 for (unsigned I = 0; I < NElts; ++I) {
8150 llvm::APInt Elt =
8151 SValInt.extractBits(numBits: 1, bitPosition: (BigEndian ? NElts - I - 1 : I) * EltSize);
8152 Elts.emplace_back(
8153 Args: APSInt(std::move(Elt), !EltTy->isSignedIntegerType()));
8154 }
8155 } else {
8156 // Iterate over each of the elements and read them from the buffer at
8157 // the appropriate offset.
8158 CharUnits EltSizeChars = Info.Ctx.getTypeSizeInChars(T: EltTy);
8159 for (unsigned I = 0; I < NElts; ++I) {
8160 std::optional<APValue> EltValue =
8161 visitType(Ty: EltTy, Offset: Offset + I * EltSizeChars);
8162 if (!EltValue)
8163 return std::nullopt;
8164 Elts.push_back(Elt: std::move(*EltValue));
8165 }
8166 }
8167
8168 return APValue(Elts.data(), Elts.size());
8169 }
8170
8171 std::optional<APValue> visit(const Type *Ty, CharUnits Offset) {
8172 return unsupportedType(Ty: QualType(Ty, 0));
8173 }
8174
8175 std::optional<APValue> visitType(QualType Ty, CharUnits Offset) {
8176 QualType Can = Ty.getCanonicalType();
8177
8178 switch (Can->getTypeClass()) {
8179#define TYPE(Class, Base) \
8180 case Type::Class: \
8181 return visit(cast<Class##Type>(Can.getTypePtr()), Offset);
8182#define ABSTRACT_TYPE(Class, Base)
8183#define NON_CANONICAL_TYPE(Class, Base) \
8184 case Type::Class: \
8185 llvm_unreachable("non-canonical type should be impossible!");
8186#define DEPENDENT_TYPE(Class, Base) \
8187 case Type::Class: \
8188 llvm_unreachable( \
8189 "dependent types aren't supported in the constant evaluator!");
8190#define NON_CANONICAL_UNLESS_DEPENDENT(Class, Base) \
8191 case Type::Class: \
8192 llvm_unreachable("either dependent or not canonical!");
8193#include "clang/AST/TypeNodes.inc"
8194 }
8195 llvm_unreachable("Unhandled Type::TypeClass");
8196 }
8197
8198public:
8199 // Pull out a full value of type DstType.
8200 static std::optional<APValue> convert(EvalInfo &Info, BitCastBuffer &Buffer,
8201 const CastExpr *BCE) {
8202 BufferToAPValueConverter Converter(Info, Buffer, BCE);
8203 return Converter.visitType(Ty: BCE->getType(), Offset: CharUnits::fromQuantity(Quantity: 0));
8204 }
8205};
8206
8207static bool checkBitCastConstexprEligibilityType(SourceLocation Loc,
8208 QualType Ty, EvalInfo *Info,
8209 const ASTContext &Ctx,
8210 bool CheckingDest) {
8211 Ty = Ty.getCanonicalType();
8212
8213 auto diag = [&](int Reason) {
8214 if (Info)
8215 Info->FFDiag(Loc, DiagId: diag::note_constexpr_bit_cast_invalid_type)
8216 << CheckingDest << (Reason == 4) << Reason;
8217 return false;
8218 };
8219 auto note = [&](int Construct, QualType NoteTy, SourceLocation NoteLoc) {
8220 if (Info)
8221 Info->Note(Loc: NoteLoc, DiagId: diag::note_constexpr_bit_cast_invalid_subtype)
8222 << NoteTy << Construct << Ty;
8223 return false;
8224 };
8225
8226 if (Ty->isUnionType())
8227 return diag(0);
8228 if (Ty->isPointerType())
8229 return diag(1);
8230 if (Ty->isMemberPointerType())
8231 return diag(2);
8232 if (Ty.isVolatileQualified())
8233 return diag(3);
8234
8235 if (RecordDecl *Record = Ty->getAsRecordDecl()) {
8236 if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: Record)) {
8237 for (CXXBaseSpecifier &BS : CXXRD->bases())
8238 if (!checkBitCastConstexprEligibilityType(Loc, Ty: BS.getType(), Info, Ctx,
8239 CheckingDest))
8240 return note(1, BS.getType(), BS.getBeginLoc());
8241 }
8242 for (FieldDecl *FD : Record->fields()) {
8243 if (FD->getType()->isReferenceType())
8244 return diag(4);
8245 if (!checkBitCastConstexprEligibilityType(Loc, Ty: FD->getType(), Info, Ctx,
8246 CheckingDest))
8247 return note(0, FD->getType(), FD->getBeginLoc());
8248 }
8249 }
8250
8251 if (Ty->isArrayType() &&
8252 !checkBitCastConstexprEligibilityType(Loc, Ty: Ctx.getBaseElementType(QT: Ty),
8253 Info, Ctx, CheckingDest))
8254 return false;
8255
8256 if (const auto *VTy = Ty->getAs<VectorType>()) {
8257 QualType EltTy = VTy->getElementType();
8258 unsigned NElts = VTy->getNumElements();
8259 unsigned EltSize =
8260 VTy->isPackedVectorBoolType(ctx: Ctx) ? 1 : Ctx.getTypeSize(T: EltTy);
8261
8262 if ((NElts * EltSize) % Ctx.getCharWidth() != 0) {
8263 // The vector's size in bits is not a multiple of the target's byte size,
8264 // so its layout is unspecified. For now, we'll simply treat these cases
8265 // as unsupported (this should only be possible with OpenCL bool vectors
8266 // whose element count isn't a multiple of the byte size).
8267 if (Info)
8268 Info->FFDiag(Loc, DiagId: diag::note_constexpr_bit_cast_invalid_vector)
8269 << QualType(VTy, 0) << EltSize << NElts << Ctx.getCharWidth();
8270 return false;
8271 }
8272
8273 if (EltTy->isRealFloatingType() &&
8274 &Ctx.getFloatTypeSemantics(T: EltTy) == &APFloat::x87DoubleExtended()) {
8275 // The layout for x86_fp80 vectors seems to be handled very inconsistently
8276 // by both clang and LLVM, so for now we won't allow bit_casts involving
8277 // it in a constexpr context.
8278 if (Info)
8279 Info->FFDiag(Loc, DiagId: diag::note_constexpr_bit_cast_unsupported_type)
8280 << EltTy;
8281 return false;
8282 }
8283 }
8284
8285 return true;
8286}
8287
8288static bool checkBitCastConstexprEligibility(EvalInfo *Info,
8289 const ASTContext &Ctx,
8290 const CastExpr *BCE) {
8291 bool DestOK = checkBitCastConstexprEligibilityType(
8292 Loc: BCE->getBeginLoc(), Ty: BCE->getType(), Info, Ctx, CheckingDest: true);
8293 bool SourceOK = DestOK && checkBitCastConstexprEligibilityType(
8294 Loc: BCE->getBeginLoc(),
8295 Ty: BCE->getSubExpr()->getType(), Info, Ctx, CheckingDest: false);
8296 return SourceOK;
8297}
8298
8299static bool handleRValueToRValueBitCast(EvalInfo &Info, APValue &DestValue,
8300 const APValue &SourceRValue,
8301 const CastExpr *BCE) {
8302 assert(CHAR_BIT == 8 && Info.Ctx.getTargetInfo().getCharWidth() == 8 &&
8303 "no host or target supports non 8-bit chars");
8304
8305 if (!checkBitCastConstexprEligibility(Info: &Info, Ctx: Info.Ctx, BCE))
8306 return false;
8307
8308 // Read out SourceValue into a char buffer.
8309 std::optional<BitCastBuffer> Buffer =
8310 APValueToBufferConverter::convert(Info, Src: SourceRValue, BCE);
8311 if (!Buffer)
8312 return false;
8313
8314 // Write out the buffer into a new APValue.
8315 std::optional<APValue> MaybeDestValue =
8316 BufferToAPValueConverter::convert(Info, Buffer&: *Buffer, BCE);
8317 if (!MaybeDestValue)
8318 return false;
8319
8320 DestValue = std::move(*MaybeDestValue);
8321 return true;
8322}
8323
8324static bool handleLValueToRValueBitCast(EvalInfo &Info, APValue &DestValue,
8325 APValue &SourceValue,
8326 const CastExpr *BCE) {
8327 assert(CHAR_BIT == 8 && Info.Ctx.getTargetInfo().getCharWidth() == 8 &&
8328 "no host or target supports non 8-bit chars");
8329 assert(SourceValue.isLValue() &&
8330 "LValueToRValueBitcast requires an lvalue operand!");
8331
8332 LValue SourceLValue;
8333 APValue SourceRValue;
8334 SourceLValue.setFrom(Ctx: Info.Ctx, V: SourceValue);
8335 if (!handleLValueToRValueConversion(
8336 Info, Conv: BCE, Type: BCE->getSubExpr()->getType().withConst(), LVal: SourceLValue,
8337 RVal&: SourceRValue, /*WantObjectRepresentation=*/true))
8338 return false;
8339
8340 return handleRValueToRValueBitCast(Info, DestValue, SourceRValue, BCE);
8341}
8342
8343template <class Derived>
8344class ExprEvaluatorBase
8345 : public ConstStmtVisitor<Derived, bool> {
8346private:
8347 Derived &getDerived() { return static_cast<Derived&>(*this); }
8348 bool DerivedSuccess(const APValue &V, const Expr *E) {
8349 return getDerived().Success(V, E);
8350 }
8351 bool DerivedZeroInitialization(const Expr *E) {
8352 return getDerived().ZeroInitialization(E);
8353 }
8354
8355 // Check whether a conditional operator with a non-constant condition is a
8356 // potential constant expression. If neither arm is a potential constant
8357 // expression, then the conditional operator is not either.
8358 template<typename ConditionalOperator>
8359 void CheckPotentialConstantConditional(const ConditionalOperator *E) {
8360 assert(Info.checkingPotentialConstantExpression());
8361
8362 // Speculatively evaluate both arms.
8363 SmallVector<PartialDiagnosticAt, 8> Diag;
8364 {
8365 SpeculativeEvaluationRAII Speculate(Info, &Diag);
8366 StmtVisitorTy::Visit(E->getFalseExpr());
8367 if (Diag.empty())
8368 return;
8369 }
8370
8371 {
8372 SpeculativeEvaluationRAII Speculate(Info, &Diag);
8373 Diag.clear();
8374 StmtVisitorTy::Visit(E->getTrueExpr());
8375 if (Diag.empty())
8376 return;
8377 }
8378
8379 Error(E, diag::note_constexpr_conditional_never_const);
8380 }
8381
8382
8383 template<typename ConditionalOperator>
8384 bool HandleConditionalOperator(const ConditionalOperator *E) {
8385 bool BoolResult;
8386 if (!EvaluateAsBooleanCondition(E->getCond(), BoolResult, Info)) {
8387 if (Info.checkingPotentialConstantExpression() && Info.noteFailure()) {
8388 CheckPotentialConstantConditional(E);
8389 return false;
8390 }
8391 if (Info.noteFailure()) {
8392 StmtVisitorTy::Visit(E->getTrueExpr());
8393 StmtVisitorTy::Visit(E->getFalseExpr());
8394 }
8395 return false;
8396 }
8397
8398 Expr *EvalExpr = BoolResult ? E->getTrueExpr() : E->getFalseExpr();
8399 return StmtVisitorTy::Visit(EvalExpr);
8400 }
8401
8402protected:
8403 EvalInfo &Info;
8404 typedef ConstStmtVisitor<Derived, bool> StmtVisitorTy;
8405 typedef ExprEvaluatorBase ExprEvaluatorBaseTy;
8406
8407 OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) {
8408 return Info.CCEDiag(E, DiagId: D);
8409 }
8410
8411 bool ZeroInitialization(const Expr *E) { return Error(E); }
8412
8413 bool IsConstantEvaluatedBuiltinCall(const CallExpr *E) {
8414 unsigned BuiltinOp = E->getBuiltinCallee();
8415 return BuiltinOp != 0 &&
8416 Info.Ctx.BuiltinInfo.isConstantEvaluated(ID: BuiltinOp);
8417 }
8418
8419public:
8420 ExprEvaluatorBase(EvalInfo &Info) : Info(Info) {}
8421
8422 EvalInfo &getEvalInfo() { return Info; }
8423
8424 /// Report an evaluation error. This should only be called when an error is
8425 /// first discovered. When propagating an error, just return false.
8426 bool Error(const Expr *E, diag::kind D) {
8427 Info.FFDiag(E, DiagId: D) << E->getSourceRange();
8428 return false;
8429 }
8430 bool Error(const Expr *E) {
8431 return Error(E, diag::note_invalid_subexpr_in_const_expr);
8432 }
8433
8434 bool VisitStmt(const Stmt *) {
8435 llvm_unreachable("Expression evaluator should not be called on stmts");
8436 }
8437 bool VisitExpr(const Expr *E) {
8438 return Error(E);
8439 }
8440
8441 bool VisitEmbedExpr(const EmbedExpr *E) {
8442 const auto It = E->begin();
8443 return StmtVisitorTy::Visit(*It);
8444 }
8445
8446 bool VisitPredefinedExpr(const PredefinedExpr *E) {
8447 return StmtVisitorTy::Visit(E->getFunctionName());
8448 }
8449 bool VisitConstantExpr(const ConstantExpr *E) {
8450 if (E->hasAPValueResult())
8451 return DerivedSuccess(V: E->getAPValueResult(), E);
8452
8453 return StmtVisitorTy::Visit(E->getSubExpr());
8454 }
8455
8456 bool VisitParenExpr(const ParenExpr *E)
8457 { return StmtVisitorTy::Visit(E->getSubExpr()); }
8458 bool VisitUnaryExtension(const UnaryOperator *E)
8459 { return StmtVisitorTy::Visit(E->getSubExpr()); }
8460 bool VisitUnaryPlus(const UnaryOperator *E)
8461 { return StmtVisitorTy::Visit(E->getSubExpr()); }
8462 bool VisitChooseExpr(const ChooseExpr *E)
8463 { return StmtVisitorTy::Visit(E->getChosenSubExpr()); }
8464 bool VisitGenericSelectionExpr(const GenericSelectionExpr *E)
8465 { return StmtVisitorTy::Visit(E->getResultExpr()); }
8466 bool VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E)
8467 { return StmtVisitorTy::Visit(E->getReplacement()); }
8468 bool VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E) {
8469 TempVersionRAII RAII(*Info.CurrentCall);
8470 SourceLocExprScopeGuard Guard(E, Info.CurrentCall->CurSourceLocExprScope);
8471 return StmtVisitorTy::Visit(E->getExpr());
8472 }
8473 bool VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E) {
8474 TempVersionRAII RAII(*Info.CurrentCall);
8475 // The initializer may not have been parsed yet, or might be erroneous.
8476 if (!E->getExpr())
8477 return Error(E);
8478 SourceLocExprScopeGuard Guard(E, Info.CurrentCall->CurSourceLocExprScope);
8479 return StmtVisitorTy::Visit(E->getExpr());
8480 }
8481
8482 bool VisitExprWithCleanups(const ExprWithCleanups *E) {
8483 FullExpressionRAII Scope(Info);
8484 return StmtVisitorTy::Visit(E->getSubExpr()) && Scope.destroy();
8485 }
8486
8487 // Temporaries are registered when created, so we don't care about
8488 // CXXBindTemporaryExpr.
8489 bool VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *E) {
8490 return StmtVisitorTy::Visit(E->getSubExpr());
8491 }
8492
8493 bool VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *E) {
8494 CCEDiag(E, D: diag::note_constexpr_invalid_cast)
8495 << diag::ConstexprInvalidCastKind::Reinterpret;
8496 return static_cast<Derived*>(this)->VisitCastExpr(E);
8497 }
8498 bool VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *E) {
8499 if (!Info.Ctx.getLangOpts().CPlusPlus20)
8500 CCEDiag(E, D: diag::note_constexpr_invalid_cast)
8501 << diag::ConstexprInvalidCastKind::Dynamic;
8502 return static_cast<Derived*>(this)->VisitCastExpr(E);
8503 }
8504 bool VisitBuiltinBitCastExpr(const BuiltinBitCastExpr *E) {
8505 return static_cast<Derived*>(this)->VisitCastExpr(E);
8506 }
8507
8508 bool VisitBinaryOperator(const BinaryOperator *E) {
8509 switch (E->getOpcode()) {
8510 default:
8511 return Error(E);
8512
8513 case BO_Comma:
8514 VisitIgnoredValue(E: E->getLHS());
8515 return StmtVisitorTy::Visit(E->getRHS());
8516
8517 case BO_PtrMemD:
8518 case BO_PtrMemI: {
8519 LValue Obj;
8520 if (!HandleMemberPointerAccess(Info, BO: E, LV&: Obj))
8521 return false;
8522 APValue Result;
8523 if (!handleLValueToRValueConversion(Info, Conv: E, Type: E->getType(), LVal: Obj, RVal&: Result))
8524 return false;
8525 return DerivedSuccess(V: Result, E);
8526 }
8527 }
8528 }
8529
8530 bool VisitCXXRewrittenBinaryOperator(const CXXRewrittenBinaryOperator *E) {
8531 return StmtVisitorTy::Visit(E->getSemanticForm());
8532 }
8533
8534 bool VisitBinaryConditionalOperator(const BinaryConditionalOperator *E) {
8535 // Evaluate and cache the common expression. We treat it as a temporary,
8536 // even though it's not quite the same thing.
8537 LValue CommonLV;
8538 if (!Evaluate(Result&: Info.CurrentCall->createTemporary(
8539 Key: E->getOpaqueValue(),
8540 T: getStorageType(Ctx: Info.Ctx, E: E->getOpaqueValue()),
8541 Scope: ScopeKind::FullExpression, LV&: CommonLV),
8542 Info, E: E->getCommon()))
8543 return false;
8544
8545 return HandleConditionalOperator(E);
8546 }
8547
8548 bool VisitConditionalOperator(const ConditionalOperator *E) {
8549 bool IsBcpCall = false;
8550 // If the condition (ignoring parens) is a __builtin_constant_p call,
8551 // the result is a constant expression if it can be folded without
8552 // side-effects. This is an important GNU extension. See GCC PR38377
8553 // for discussion.
8554 if (const CallExpr *CallCE =
8555 dyn_cast<CallExpr>(Val: E->getCond()->IgnoreParenCasts()))
8556 if (CallCE->getBuiltinCallee() == Builtin::BI__builtin_constant_p)
8557 IsBcpCall = true;
8558
8559 // Always assume __builtin_constant_p(...) ? ... : ... is a potential
8560 // constant expression; we can't check whether it's potentially foldable.
8561 // FIXME: We should instead treat __builtin_constant_p as non-constant if
8562 // it would return 'false' in this mode.
8563 if (Info.checkingPotentialConstantExpression() && IsBcpCall)
8564 return false;
8565
8566 FoldConstant Fold(Info, IsBcpCall);
8567 if (!HandleConditionalOperator(E)) {
8568 Fold.keepDiagnostics();
8569 return false;
8570 }
8571
8572 return true;
8573 }
8574
8575 bool VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
8576 if (APValue *Value = Info.CurrentCall->getCurrentTemporary(Key: E);
8577 Value && !Value->isAbsent())
8578 return DerivedSuccess(V: *Value, E);
8579
8580 const Expr *Source = E->getSourceExpr();
8581 if (!Source)
8582 return Error(E);
8583 if (Source == E) {
8584 assert(0 && "OpaqueValueExpr recursively refers to itself");
8585 return Error(E);
8586 }
8587 return StmtVisitorTy::Visit(Source);
8588 }
8589
8590 bool VisitPseudoObjectExpr(const PseudoObjectExpr *E) {
8591 for (const Expr *SemE : E->semantics()) {
8592 if (auto *OVE = dyn_cast<OpaqueValueExpr>(Val: SemE)) {
8593 // FIXME: We can't handle the case where an OpaqueValueExpr is also the
8594 // result expression: there could be two different LValues that would
8595 // refer to the same object in that case, and we can't model that.
8596 if (SemE == E->getResultExpr())
8597 return Error(E);
8598
8599 // Unique OVEs get evaluated if and when we encounter them when
8600 // emitting the rest of the semantic form, rather than eagerly.
8601 if (OVE->isUnique())
8602 continue;
8603
8604 LValue LV;
8605 if (!Evaluate(Result&: Info.CurrentCall->createTemporary(
8606 Key: OVE, T: getStorageType(Ctx: Info.Ctx, E: OVE),
8607 Scope: ScopeKind::FullExpression, LV),
8608 Info, E: OVE->getSourceExpr()))
8609 return false;
8610 } else if (SemE == E->getResultExpr()) {
8611 if (!StmtVisitorTy::Visit(SemE))
8612 return false;
8613 } else {
8614 if (!EvaluateIgnoredValue(Info, E: SemE))
8615 return false;
8616 }
8617 }
8618 return true;
8619 }
8620
8621 bool VisitCallExpr(const CallExpr *E) {
8622 APValue Result;
8623 if (!handleCallExpr(E, Result, ResultSlot: nullptr))
8624 return false;
8625 return DerivedSuccess(V: Result, E);
8626 }
8627
8628 bool handleCallExpr(const CallExpr *E, APValue &Result,
8629 const LValue *ResultSlot) {
8630 CallScopeRAII CallScope(Info);
8631
8632 const Expr *Callee = E->getCallee()->IgnoreParens();
8633 QualType CalleeType = Callee->getType();
8634
8635 const FunctionDecl *FD = nullptr;
8636 LValue *This = nullptr, ObjectArg;
8637 auto Args = ArrayRef(E->getArgs(), E->getNumArgs());
8638 bool HasQualifier = false;
8639
8640 CallRef Call;
8641
8642 // Extract function decl and 'this' pointer from the callee.
8643 if (CalleeType->isSpecificBuiltinType(K: BuiltinType::BoundMember)) {
8644 const CXXMethodDecl *Member = nullptr;
8645 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Val: Callee)) {
8646 // Explicit bound member calls, such as x.f() or p->g();
8647 if (!EvaluateObjectArgument(Info, Object: ME->getBase(), This&: ObjectArg))
8648 return false;
8649 Member = dyn_cast<CXXMethodDecl>(Val: ME->getMemberDecl());
8650 if (!Member)
8651 return Error(Callee);
8652 This = &ObjectArg;
8653 HasQualifier = ME->hasQualifier();
8654 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(Val: Callee)) {
8655 // Indirect bound member calls ('.*' or '->*').
8656 const ValueDecl *D =
8657 HandleMemberPointerAccess(Info, BO: BE, LV&: ObjectArg, IncludeMember: false);
8658 if (!D)
8659 return false;
8660 Member = dyn_cast<CXXMethodDecl>(Val: D);
8661 if (!Member)
8662 return Error(Callee);
8663 This = &ObjectArg;
8664 } else if (const auto *PDE = dyn_cast<CXXPseudoDestructorExpr>(Val: Callee)) {
8665 if (!Info.getLangOpts().CPlusPlus20)
8666 Info.CCEDiag(E: PDE, DiagId: diag::note_constexpr_pseudo_destructor);
8667 return EvaluateObjectArgument(Info, Object: PDE->getBase(), This&: ObjectArg) &&
8668 HandleDestruction(Info, E: PDE, This: ObjectArg, ThisType: PDE->getDestroyedType());
8669 } else
8670 return Error(Callee);
8671 FD = Member;
8672 } else if (CalleeType->isFunctionPointerType()) {
8673 LValue CalleeLV;
8674 if (!EvaluatePointer(E: Callee, Result&: CalleeLV, Info))
8675 return false;
8676
8677 if (!CalleeLV.getLValueOffset().isZero())
8678 return Error(Callee);
8679 if (CalleeLV.isNullPointer()) {
8680 Info.FFDiag(E: Callee, DiagId: diag::note_constexpr_null_callee)
8681 << const_cast<Expr *>(Callee);
8682 return false;
8683 }
8684 FD = dyn_cast_or_null<FunctionDecl>(
8685 Val: CalleeLV.getLValueBase().dyn_cast<const ValueDecl *>());
8686 if (!FD)
8687 return Error(Callee);
8688 // Don't call function pointers which have been cast to some other type.
8689 // Per DR (no number yet), the caller and callee can differ in noexcept.
8690 if (!Info.Ctx.hasSameFunctionTypeIgnoringExceptionSpec(
8691 T: CalleeType->getPointeeType(), U: FD->getType())) {
8692 return Error(E);
8693 }
8694
8695 // For an (overloaded) assignment expression, evaluate the RHS before the
8696 // LHS.
8697 auto *OCE = dyn_cast<CXXOperatorCallExpr>(Val: E);
8698 if (OCE && OCE->isAssignmentOp()) {
8699 assert(Args.size() == 2 && "wrong number of arguments in assignment");
8700 Call = Info.CurrentCall->createCall(Callee: FD);
8701 bool HasThis = false;
8702 if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: FD))
8703 HasThis = MD->isImplicitObjectMemberFunction();
8704 if (!EvaluateArgs(Args: HasThis ? Args.slice(N: 1) : Args, Call, Info, Callee: FD,
8705 /*RightToLeft=*/true, ObjectArg: &ObjectArg))
8706 return false;
8707 }
8708
8709 // Overloaded operator calls to member functions are represented as normal
8710 // calls with '*this' as the first argument.
8711 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: FD);
8712 if (MD &&
8713 (MD->isImplicitObjectMemberFunction() || (OCE && MD->isStatic()))) {
8714 // FIXME: When selecting an implicit conversion for an overloaded
8715 // operator delete, we sometimes try to evaluate calls to conversion
8716 // operators without a 'this' parameter!
8717 if (Args.empty())
8718 return Error(E);
8719
8720 if (!EvaluateObjectArgument(Info, Object: Args[0], This&: ObjectArg))
8721 return false;
8722
8723 // If we are calling a static operator, the 'this' argument needs to be
8724 // ignored after being evaluated.
8725 if (MD->isInstance())
8726 This = &ObjectArg;
8727
8728 // If this is syntactically a simple assignment using a trivial
8729 // assignment operator, start the lifetimes of union members as needed,
8730 // per C++20 [class.union]5.
8731 if (Info.getLangOpts().CPlusPlus20 && OCE &&
8732 OCE->getOperator() == OO_Equal && MD->isTrivial() &&
8733 !MaybeHandleUnionActiveMemberChange(Info, LHSExpr: Args[0], LHS: ObjectArg))
8734 return false;
8735
8736 Args = Args.slice(N: 1);
8737 } else if (MD && MD->isLambdaStaticInvoker()) {
8738 // Map the static invoker for the lambda back to the call operator.
8739 // Conveniently, we don't have to slice out the 'this' argument (as is
8740 // being done for the non-static case), since a static member function
8741 // doesn't have an implicit argument passed in.
8742 const CXXRecordDecl *ClosureClass = MD->getParent();
8743 assert(
8744 ClosureClass->captures().empty() &&
8745 "Number of captures must be zero for conversion to function-ptr");
8746
8747 const CXXMethodDecl *LambdaCallOp =
8748 ClosureClass->getLambdaCallOperator();
8749
8750 // Set 'FD', the function that will be called below, to the call
8751 // operator. If the closure object represents a generic lambda, find
8752 // the corresponding specialization of the call operator.
8753
8754 if (ClosureClass->isGenericLambda()) {
8755 assert(MD->isFunctionTemplateSpecialization() &&
8756 "A generic lambda's static-invoker function must be a "
8757 "template specialization");
8758 const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs();
8759 FunctionTemplateDecl *CallOpTemplate =
8760 LambdaCallOp->getDescribedFunctionTemplate();
8761 void *InsertPos = nullptr;
8762 FunctionDecl *CorrespondingCallOpSpecialization =
8763 CallOpTemplate->findSpecialization(Args: TAL->asArray(), InsertPos);
8764 assert(CorrespondingCallOpSpecialization &&
8765 "We must always have a function call operator specialization "
8766 "that corresponds to our static invoker specialization");
8767 assert(isa<CXXMethodDecl>(CorrespondingCallOpSpecialization));
8768 FD = CorrespondingCallOpSpecialization;
8769 } else
8770 FD = LambdaCallOp;
8771 } else if (FD->isUsableAsGlobalAllocationFunctionInConstantEvaluation()) {
8772 if (FD->getDeclName().isAnyOperatorNew()) {
8773 LValue Ptr;
8774 if (!HandleOperatorNewCall(Info, E, Result&: Ptr))
8775 return false;
8776 Ptr.moveInto(V&: Result);
8777 return CallScope.destroy();
8778 } else {
8779 return HandleOperatorDeleteCall(Info, E) && CallScope.destroy();
8780 }
8781 }
8782 } else
8783 return Error(E);
8784
8785 // Evaluate the arguments now if we've not already done so.
8786 if (!Call) {
8787 Call = Info.CurrentCall->createCall(Callee: FD);
8788 if (!EvaluateArgs(Args, Call, Info, Callee: FD, /*RightToLeft*/ false,
8789 ObjectArg: &ObjectArg))
8790 return false;
8791 }
8792
8793 SmallVector<QualType, 4> CovariantAdjustmentPath;
8794 if (This) {
8795 auto *NamedMember = dyn_cast<CXXMethodDecl>(Val: FD);
8796 if (NamedMember && NamedMember->isVirtual() && !HasQualifier) {
8797 // Perform virtual dispatch, if necessary.
8798 FD = HandleVirtualDispatch(Info, E, This&: *This, Found: NamedMember,
8799 CovariantAdjustmentPath);
8800 if (!FD)
8801 return false;
8802 } else if (NamedMember && NamedMember->isImplicitObjectMemberFunction()) {
8803 // Check that the 'this' pointer points to an object of the right type.
8804 // FIXME: If this is an assignment operator call, we may need to change
8805 // the active union member before we check this.
8806 if (!checkNonVirtualMemberCallThisPointer(Info, E, This: *This, NamedMember))
8807 return false;
8808 }
8809 }
8810
8811 // Destructor calls are different enough that they have their own codepath.
8812 if (auto *DD = dyn_cast<CXXDestructorDecl>(Val: FD)) {
8813 assert(This && "no 'this' pointer for destructor call");
8814 return HandleDestruction(Info, E, This: *This,
8815 ThisType: Info.Ctx.getCanonicalTagType(TD: DD->getParent())) &&
8816 CallScope.destroy();
8817 }
8818
8819 const FunctionDecl *Definition = nullptr;
8820 Stmt *Body = FD->getBody(Definition);
8821 SourceLocation Loc = E->getExprLoc();
8822
8823 // Treat the object argument as `this` when evaluating defaulted
8824 // special menmber functions
8825 if (FD->hasCXXExplicitFunctionObjectParameter())
8826 This = &ObjectArg;
8827
8828 if (!CheckConstexprFunction(Info, CallLoc: Loc, Declaration: FD, Definition, Body) ||
8829 !HandleFunctionCall(CallLoc: Loc, Callee: Definition, ObjectArg: This, E, Args, Call, Body, Info,
8830 Result, ResultSlot))
8831 return false;
8832
8833 if (!CovariantAdjustmentPath.empty() &&
8834 !HandleCovariantReturnAdjustment(Info, E, Result,
8835 Path: CovariantAdjustmentPath))
8836 return false;
8837
8838 return CallScope.destroy();
8839 }
8840
8841 bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
8842 return StmtVisitorTy::Visit(E->getInitializer());
8843 }
8844 bool VisitInitListExpr(const InitListExpr *E) {
8845 if (E->getNumInits() == 0)
8846 return DerivedZeroInitialization(E);
8847 if (E->getNumInits() == 1)
8848 return StmtVisitorTy::Visit(E->getInit(Init: 0));
8849 return Error(E);
8850 }
8851 bool VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
8852 return DerivedZeroInitialization(E);
8853 }
8854 bool VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
8855 return DerivedZeroInitialization(E);
8856 }
8857 bool VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
8858 return DerivedZeroInitialization(E);
8859 }
8860
8861 /// A member expression where the object is a prvalue is itself a prvalue.
8862 bool VisitMemberExpr(const MemberExpr *E) {
8863 assert(!Info.Ctx.getLangOpts().CPlusPlus11 &&
8864 "missing temporary materialization conversion");
8865 assert(!E->isArrow() && "missing call to bound member function?");
8866
8867 APValue Val;
8868 if (!Evaluate(Result&: Val, Info, E: E->getBase()))
8869 return false;
8870
8871 QualType BaseTy = E->getBase()->getType();
8872
8873 const FieldDecl *FD = dyn_cast<FieldDecl>(Val: E->getMemberDecl());
8874 if (!FD) return Error(E);
8875 assert(!FD->getType()->isReferenceType() && "prvalue reference?");
8876 assert(BaseTy->castAsCanonical<RecordType>()->getDecl() ==
8877 FD->getParent()->getCanonicalDecl() &&
8878 "record / field mismatch");
8879
8880 // Note: there is no lvalue base here. But this case should only ever
8881 // happen in C or in C++98, where we cannot be evaluating a constexpr
8882 // constructor, which is the only case the base matters.
8883 CompleteObject Obj(APValue::LValueBase(), &Val, BaseTy);
8884 SubobjectDesignator Designator(BaseTy);
8885 Designator.addDeclUnchecked(D: FD);
8886
8887 APValue Result;
8888 return extractSubobject(Info, E, Obj, Sub: Designator, Result) &&
8889 DerivedSuccess(V: Result, E);
8890 }
8891
8892 bool VisitExtVectorElementExpr(const ExtVectorElementExpr *E) {
8893 APValue Val;
8894 if (!Evaluate(Result&: Val, Info, E: E->getBase()))
8895 return false;
8896
8897 if (Val.isVector()) {
8898 SmallVector<uint32_t, 4> Indices;
8899 E->getEncodedElementAccess(Elts&: Indices);
8900 if (Indices.size() == 1) {
8901 // Return scalar.
8902 return DerivedSuccess(V: Val.getVectorElt(I: Indices[0]), E);
8903 } else {
8904 // Construct new APValue vector.
8905 SmallVector<APValue, 4> Elts;
8906 for (unsigned I = 0; I < Indices.size(); ++I) {
8907 Elts.push_back(Elt: Val.getVectorElt(I: Indices[I]));
8908 }
8909 APValue VecResult(Elts.data(), Indices.size());
8910 return DerivedSuccess(V: VecResult, E);
8911 }
8912 }
8913
8914 return false;
8915 }
8916
8917 bool VisitCastExpr(const CastExpr *E) {
8918 switch (E->getCastKind()) {
8919 default:
8920 break;
8921
8922 case CK_AtomicToNonAtomic: {
8923 APValue AtomicVal;
8924 // This does not need to be done in place even for class/array types:
8925 // atomic-to-non-atomic conversion implies copying the object
8926 // representation.
8927 if (!Evaluate(Result&: AtomicVal, Info, E: E->getSubExpr()))
8928 return false;
8929 return DerivedSuccess(V: AtomicVal, E);
8930 }
8931
8932 case CK_NoOp:
8933 case CK_UserDefinedConversion:
8934 return StmtVisitorTy::Visit(E->getSubExpr());
8935
8936 case CK_HLSLArrayRValue: {
8937 const Expr *SubExpr = E->getSubExpr();
8938 if (!SubExpr->isGLValue()) {
8939 APValue Val;
8940 if (!Evaluate(Result&: Val, Info, E: SubExpr))
8941 return false;
8942 return DerivedSuccess(V: Val, E);
8943 }
8944
8945 LValue LVal;
8946 if (!EvaluateLValue(E: SubExpr, Result&: LVal, Info))
8947 return false;
8948 APValue RVal;
8949 // Note, we use the subexpression's type in order to retain cv-qualifiers.
8950 if (!handleLValueToRValueConversion(Info, Conv: E, Type: SubExpr->getType(), LVal,
8951 RVal))
8952 return false;
8953 return DerivedSuccess(V: RVal, E);
8954 }
8955 case CK_LValueToRValue: {
8956 LValue LVal;
8957 if (!EvaluateLValue(E: E->getSubExpr(), Result&: LVal, Info))
8958 return false;
8959 APValue RVal;
8960 // Note, we use the subexpression's type in order to retain cv-qualifiers.
8961 if (!handleLValueToRValueConversion(Info, Conv: E, Type: E->getSubExpr()->getType(),
8962 LVal, RVal))
8963 return false;
8964 return DerivedSuccess(V: RVal, E);
8965 }
8966 case CK_LValueToRValueBitCast: {
8967 APValue DestValue, SourceValue;
8968 if (!Evaluate(Result&: SourceValue, Info, E: E->getSubExpr()))
8969 return false;
8970 if (!handleLValueToRValueBitCast(Info, DestValue, SourceValue, BCE: E))
8971 return false;
8972 return DerivedSuccess(V: DestValue, E);
8973 }
8974
8975 case CK_AddressSpaceConversion: {
8976 APValue Value;
8977 if (!Evaluate(Result&: Value, Info, E: E->getSubExpr()))
8978 return false;
8979 return DerivedSuccess(V: Value, E);
8980 }
8981 }
8982
8983 return Error(E);
8984 }
8985
8986 bool VisitUnaryPostInc(const UnaryOperator *UO) {
8987 return VisitUnaryPostIncDec(UO);
8988 }
8989 bool VisitUnaryPostDec(const UnaryOperator *UO) {
8990 return VisitUnaryPostIncDec(UO);
8991 }
8992 bool VisitUnaryPostIncDec(const UnaryOperator *UO) {
8993 if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
8994 return Error(UO);
8995
8996 LValue LVal;
8997 if (!EvaluateLValue(E: UO->getSubExpr(), Result&: LVal, Info))
8998 return false;
8999 APValue RVal;
9000 if (!handleIncDec(this->Info, UO, LVal, UO->getSubExpr()->getType(),
9001 UO->isIncrementOp(), &RVal))
9002 return false;
9003 return DerivedSuccess(V: RVal, E: UO);
9004 }
9005
9006 bool VisitStmtExpr(const StmtExpr *E) {
9007 // We will have checked the full-expressions inside the statement expression
9008 // when they were completed, and don't need to check them again now.
9009 llvm::SaveAndRestore NotCheckingForUB(Info.CheckingForUndefinedBehavior,
9010 false);
9011
9012 const CompoundStmt *CS = E->getSubStmt();
9013 if (CS->body_empty())
9014 return true;
9015
9016 BlockScopeRAII Scope(Info);
9017 for (CompoundStmt::const_body_iterator BI = CS->body_begin(),
9018 BE = CS->body_end();
9019 /**/; ++BI) {
9020 if (BI + 1 == BE) {
9021 const Expr *FinalExpr = dyn_cast<Expr>(Val: *BI);
9022 if (!FinalExpr) {
9023 Info.FFDiag(Loc: (*BI)->getBeginLoc(),
9024 DiagId: diag::note_constexpr_stmt_expr_unsupported);
9025 return false;
9026 }
9027 return this->Visit(FinalExpr) && Scope.destroy();
9028 }
9029
9030 APValue ReturnValue;
9031 StmtResult Result = { .Value: ReturnValue, .Slot: nullptr };
9032 EvalStmtResult ESR = EvaluateStmt(Result, Info, S: *BI);
9033 if (ESR != ESR_Succeeded) {
9034 // FIXME: If the statement-expression terminated due to 'return',
9035 // 'break', or 'continue', it would be nice to propagate that to
9036 // the outer statement evaluation rather than bailing out.
9037 if (ESR != ESR_Failed)
9038 Info.FFDiag(Loc: (*BI)->getBeginLoc(),
9039 DiagId: diag::note_constexpr_stmt_expr_unsupported);
9040 return false;
9041 }
9042 }
9043
9044 llvm_unreachable("Return from function from the loop above.");
9045 }
9046
9047 bool VisitPackIndexingExpr(const PackIndexingExpr *E) {
9048 return StmtVisitorTy::Visit(E->getSelectedExpr());
9049 }
9050
9051 /// Visit a value which is evaluated, but whose value is ignored.
9052 void VisitIgnoredValue(const Expr *E) {
9053 EvaluateIgnoredValue(Info, E);
9054 }
9055
9056 /// Potentially visit a MemberExpr's base expression.
9057 void VisitIgnoredBaseExpression(const Expr *E) {
9058 // While MSVC doesn't evaluate the base expression, it does diagnose the
9059 // presence of side-effecting behavior.
9060 if (Info.getLangOpts().MSVCCompat && !E->HasSideEffects(Ctx: Info.Ctx))
9061 return;
9062 VisitIgnoredValue(E);
9063 }
9064};
9065
9066} // namespace
9067
9068//===----------------------------------------------------------------------===//
9069// Common base class for lvalue and temporary evaluation.
9070//===----------------------------------------------------------------------===//
9071namespace {
9072template<class Derived>
9073class LValueExprEvaluatorBase
9074 : public ExprEvaluatorBase<Derived> {
9075protected:
9076 LValue &Result;
9077 bool InvalidBaseOK;
9078 typedef LValueExprEvaluatorBase LValueExprEvaluatorBaseTy;
9079 typedef ExprEvaluatorBase<Derived> ExprEvaluatorBaseTy;
9080
9081 bool Success(APValue::LValueBase B) {
9082 Result.set(B);
9083 return true;
9084 }
9085
9086 bool evaluatePointer(const Expr *E, LValue &Result) {
9087 return EvaluatePointer(E, Result, this->Info, InvalidBaseOK);
9088 }
9089
9090public:
9091 LValueExprEvaluatorBase(EvalInfo &Info, LValue &Result, bool InvalidBaseOK)
9092 : ExprEvaluatorBaseTy(Info), Result(Result),
9093 InvalidBaseOK(InvalidBaseOK) {}
9094
9095 bool Success(const APValue &V, const Expr *E) {
9096 Result.setFrom(Ctx: this->Info.Ctx, V);
9097 return true;
9098 }
9099
9100 bool VisitMemberExpr(const MemberExpr *E) {
9101 // Handle non-static data members.
9102 QualType BaseTy;
9103 bool EvalOK;
9104 if (E->isArrow()) {
9105 EvalOK = evaluatePointer(E: E->getBase(), Result);
9106 BaseTy = E->getBase()->getType()->castAs<PointerType>()->getPointeeType();
9107 } else if (E->getBase()->isPRValue()) {
9108 assert(E->getBase()->getType()->isRecordType());
9109 EvalOK = EvaluateTemporary(E->getBase(), Result, this->Info);
9110 BaseTy = E->getBase()->getType();
9111 } else {
9112 EvalOK = this->Visit(E->getBase());
9113 BaseTy = E->getBase()->getType();
9114 }
9115 if (!EvalOK) {
9116 if (!InvalidBaseOK)
9117 return false;
9118 Result.setInvalid(B: E);
9119 return true;
9120 }
9121
9122 const ValueDecl *MD = E->getMemberDecl();
9123 if (const FieldDecl *FD = dyn_cast<FieldDecl>(Val: E->getMemberDecl())) {
9124 assert(BaseTy->castAsCanonical<RecordType>()->getDecl() ==
9125 FD->getParent()->getCanonicalDecl() &&
9126 "record / field mismatch");
9127 (void)BaseTy;
9128 if (!HandleLValueMember(this->Info, E, Result, FD))
9129 return false;
9130 } else if (const IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(Val: MD)) {
9131 if (!HandleLValueIndirectMember(this->Info, E, Result, IFD))
9132 return false;
9133 } else
9134 return this->Error(E);
9135
9136 if (MD->getType()->isReferenceType()) {
9137 APValue RefValue;
9138 if (!handleLValueToRValueConversion(this->Info, E, MD->getType(), Result,
9139 RefValue))
9140 return false;
9141 return Success(RefValue, E);
9142 }
9143 return true;
9144 }
9145
9146 bool VisitBinaryOperator(const BinaryOperator *E) {
9147 switch (E->getOpcode()) {
9148 default:
9149 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
9150
9151 case BO_PtrMemD:
9152 case BO_PtrMemI:
9153 return HandleMemberPointerAccess(this->Info, E, Result);
9154 }
9155 }
9156
9157 bool VisitCastExpr(const CastExpr *E) {
9158 switch (E->getCastKind()) {
9159 default:
9160 return ExprEvaluatorBaseTy::VisitCastExpr(E);
9161
9162 case CK_DerivedToBase:
9163 case CK_UncheckedDerivedToBase:
9164 if (!this->Visit(E->getSubExpr()))
9165 return false;
9166
9167 // Now figure out the necessary offset to add to the base LV to get from
9168 // the derived class to the base class.
9169 return HandleLValueBasePath(this->Info, E, E->getSubExpr()->getType(),
9170 Result);
9171 }
9172 }
9173};
9174}
9175
9176//===----------------------------------------------------------------------===//
9177// LValue Evaluation
9178//
9179// This is used for evaluating lvalues (in C and C++), xvalues (in C++11),
9180// function designators (in C), decl references to void objects (in C), and
9181// temporaries (if building with -Wno-address-of-temporary).
9182//
9183// LValue evaluation produces values comprising a base expression of one of the
9184// following types:
9185// - Declarations
9186// * VarDecl
9187// * FunctionDecl
9188// - Literals
9189// * CompoundLiteralExpr in C (and in global scope in C++)
9190// * StringLiteral
9191// * PredefinedExpr
9192// * ObjCStringLiteralExpr
9193// * ObjCEncodeExpr
9194// * AddrLabelExpr
9195// * BlockExpr
9196// * CallExpr for a MakeStringConstant builtin
9197// - typeid(T) expressions, as TypeInfoLValues
9198// - Locals and temporaries
9199// * MaterializeTemporaryExpr
9200// * Any Expr, with a CallIndex indicating the function in which the temporary
9201// was evaluated, for cases where the MaterializeTemporaryExpr is missing
9202// from the AST (FIXME).
9203// * A MaterializeTemporaryExpr that has static storage duration, with no
9204// CallIndex, for a lifetime-extended temporary.
9205// * The ConstantExpr that is currently being evaluated during evaluation of an
9206// immediate invocation.
9207// plus an offset in bytes.
9208//===----------------------------------------------------------------------===//
9209namespace {
9210class LValueExprEvaluator
9211 : public LValueExprEvaluatorBase<LValueExprEvaluator> {
9212public:
9213 LValueExprEvaluator(EvalInfo &Info, LValue &Result, bool InvalidBaseOK) :
9214 LValueExprEvaluatorBaseTy(Info, Result, InvalidBaseOK) {}
9215
9216 bool VisitVarDecl(const Expr *E, const VarDecl *VD);
9217 bool VisitUnaryPreIncDec(const UnaryOperator *UO);
9218
9219 bool VisitCallExpr(const CallExpr *E);
9220 bool VisitDeclRefExpr(const DeclRefExpr *E);
9221 bool VisitPredefinedExpr(const PredefinedExpr *E) { return Success(B: E); }
9222 bool VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
9223 bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E);
9224 bool VisitMemberExpr(const MemberExpr *E);
9225 bool VisitStringLiteral(const StringLiteral *E) {
9226 return Success(
9227 B: APValue::LValueBase(E, 0, Info.Ctx.getNextStringLiteralVersion()));
9228 }
9229 bool VisitObjCEncodeExpr(const ObjCEncodeExpr *E) { return Success(B: E); }
9230 bool VisitCXXTypeidExpr(const CXXTypeidExpr *E);
9231 bool VisitCXXUuidofExpr(const CXXUuidofExpr *E);
9232 bool VisitArraySubscriptExpr(const ArraySubscriptExpr *E);
9233 bool VisitExtVectorElementExpr(const ExtVectorElementExpr *E);
9234 bool VisitUnaryDeref(const UnaryOperator *E);
9235 bool VisitUnaryReal(const UnaryOperator *E);
9236 bool VisitUnaryImag(const UnaryOperator *E);
9237 bool VisitUnaryPreInc(const UnaryOperator *UO) {
9238 return VisitUnaryPreIncDec(UO);
9239 }
9240 bool VisitUnaryPreDec(const UnaryOperator *UO) {
9241 return VisitUnaryPreIncDec(UO);
9242 }
9243 bool VisitBinAssign(const BinaryOperator *BO);
9244 bool VisitCompoundAssignOperator(const CompoundAssignOperator *CAO);
9245
9246 bool VisitCastExpr(const CastExpr *E) {
9247 switch (E->getCastKind()) {
9248 default:
9249 return LValueExprEvaluatorBaseTy::VisitCastExpr(E);
9250
9251 case CK_LValueBitCast:
9252 this->CCEDiag(E, D: diag::note_constexpr_invalid_cast)
9253 << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret
9254 << Info.Ctx.getLangOpts().CPlusPlus;
9255 if (!Visit(S: E->getSubExpr()))
9256 return false;
9257 Result.Designator.setInvalid();
9258 return true;
9259
9260 case CK_BaseToDerived:
9261 if (!Visit(S: E->getSubExpr()))
9262 return false;
9263 return HandleBaseToDerivedCast(Info, E, Result);
9264
9265 case CK_Dynamic:
9266 if (!Visit(S: E->getSubExpr()))
9267 return false;
9268 return HandleDynamicCast(Info, E: cast<ExplicitCastExpr>(Val: E), Ptr&: Result);
9269 }
9270 }
9271};
9272} // end anonymous namespace
9273
9274/// Get an lvalue to a field of a lambda's closure type.
9275static bool HandleLambdaCapture(EvalInfo &Info, const Expr *E, LValue &Result,
9276 const CXXMethodDecl *MD, const FieldDecl *FD,
9277 bool LValueToRValueConversion) {
9278 // Static lambda function call operators can't have captures. We already
9279 // diagnosed this, so bail out here.
9280 if (MD->isStatic()) {
9281 assert(Info.CurrentCall->This == nullptr &&
9282 "This should not be set for a static call operator");
9283 return false;
9284 }
9285
9286 // Start with 'Result' referring to the complete closure object...
9287 if (MD->isExplicitObjectMemberFunction()) {
9288 // Self may be passed by reference or by value.
9289 const ParmVarDecl *Self = MD->getParamDecl(i: 0);
9290 if (Self->getType()->isReferenceType()) {
9291 APValue *RefValue = Info.getParamSlot(Call: Info.CurrentCall->Arguments, PVD: Self);
9292 if (!RefValue->allowConstexprUnknown() || RefValue->hasValue())
9293 Result.setFrom(Ctx: Info.Ctx, V: *RefValue);
9294 } else {
9295 const ParmVarDecl *VD = Info.CurrentCall->Arguments.getOrigParam(PVD: Self);
9296 CallStackFrame *Frame =
9297 Info.getCallFrameAndDepth(CallIndex: Info.CurrentCall->Arguments.CallIndex)
9298 .first;
9299 unsigned Version = Info.CurrentCall->Arguments.Version;
9300 Result.set(B: {VD, Frame->Index, Version});
9301 }
9302 } else
9303 Result = *Info.CurrentCall->This;
9304
9305 // ... then update it to refer to the field of the closure object
9306 // that represents the capture.
9307 if (!HandleLValueMember(Info, E, LVal&: Result, FD))
9308 return false;
9309
9310 // And if the field is of reference type (or if we captured '*this' by
9311 // reference), update 'Result' to refer to what
9312 // the field refers to.
9313 if (LValueToRValueConversion) {
9314 APValue RVal;
9315 if (!handleLValueToRValueConversion(Info, Conv: E, Type: FD->getType(), LVal: Result, RVal))
9316 return false;
9317 Result.setFrom(Ctx: Info.Ctx, V: RVal);
9318 }
9319 return true;
9320}
9321
9322/// Evaluate an expression as an lvalue. This can be legitimately called on
9323/// expressions which are not glvalues, in three cases:
9324/// * function designators in C, and
9325/// * "extern void" objects
9326/// * @selector() expressions in Objective-C
9327static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info,
9328 bool InvalidBaseOK) {
9329 assert(!E->isValueDependent());
9330 assert(E->isGLValue() || E->getType()->isFunctionType() ||
9331 E->getType()->isVoidType() || isa<ObjCSelectorExpr>(E->IgnoreParens()));
9332 return LValueExprEvaluator(Info, Result, InvalidBaseOK).Visit(S: E);
9333}
9334
9335bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) {
9336 const ValueDecl *D = E->getDecl();
9337
9338 // If we are within a lambda's call operator, check whether the 'VD' referred
9339 // to within 'E' actually represents a lambda-capture that maps to a
9340 // data-member/field within the closure object, and if so, evaluate to the
9341 // field or what the field refers to.
9342 if (Info.CurrentCall && isLambdaCallOperator(DC: Info.CurrentCall->Callee) &&
9343 E->refersToEnclosingVariableOrCapture()) {
9344 // We don't always have a complete capture-map when checking or inferring if
9345 // the function call operator meets the requirements of a constexpr function
9346 // - but we don't need to evaluate the captures to determine constexprness
9347 // (dcl.constexpr C++17).
9348 if (Info.checkingPotentialConstantExpression())
9349 return false;
9350
9351 if (auto *FD = Info.CurrentCall->LambdaCaptureFields.lookup(Val: D)) {
9352 const auto *MD = cast<CXXMethodDecl>(Val: Info.CurrentCall->Callee);
9353 return HandleLambdaCapture(Info, E, Result, MD, FD,
9354 LValueToRValueConversion: FD->getType()->isReferenceType());
9355 }
9356 }
9357
9358 if (isa<FunctionDecl, MSGuidDecl, TemplateParamObjectDecl,
9359 UnnamedGlobalConstantDecl>(Val: D))
9360 return Success(B: cast<ValueDecl>(Val: D));
9361 if (const VarDecl *VD = dyn_cast<VarDecl>(Val: D))
9362 return VisitVarDecl(E, VD);
9363 if (const BindingDecl *BD = dyn_cast<BindingDecl>(Val: D))
9364 return Visit(S: BD->getBinding());
9365 return Error(E);
9366}
9367
9368bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) {
9369 CallStackFrame *Frame = nullptr;
9370 unsigned Version = 0;
9371 if (VD->hasLocalStorage()) {
9372 // Only if a local variable was declared in the function currently being
9373 // evaluated, do we expect to be able to find its value in the current
9374 // frame. (Otherwise it was likely declared in an enclosing context and
9375 // could either have a valid evaluatable value (for e.g. a constexpr
9376 // variable) or be ill-formed (and trigger an appropriate evaluation
9377 // diagnostic)).
9378 CallStackFrame *CurrFrame = Info.CurrentCall;
9379 if (CurrFrame->Callee && CurrFrame->Callee->Equals(DC: VD->getDeclContext())) {
9380 // Function parameters are stored in some caller's frame. (Usually the
9381 // immediate caller, but for an inherited constructor they may be more
9382 // distant.)
9383 if (auto *PVD = dyn_cast<ParmVarDecl>(Val: VD)) {
9384 if (CurrFrame->Arguments) {
9385 VD = CurrFrame->Arguments.getOrigParam(PVD);
9386 Frame =
9387 Info.getCallFrameAndDepth(CallIndex: CurrFrame->Arguments.CallIndex).first;
9388 Version = CurrFrame->Arguments.Version;
9389 }
9390 } else {
9391 Frame = CurrFrame;
9392 Version = CurrFrame->getCurrentTemporaryVersion(Key: VD);
9393 }
9394 }
9395 }
9396
9397 if (!VD->getType()->isReferenceType()) {
9398 if (Frame) {
9399 Result.set(B: {VD, Frame->Index, Version});
9400 return true;
9401 }
9402 return Success(B: VD);
9403 }
9404
9405 if (!Info.getLangOpts().CPlusPlus11) {
9406 Info.CCEDiag(E, DiagId: diag::note_constexpr_ltor_non_integral, ExtraNotes: 1)
9407 << VD << VD->getType();
9408 Info.Note(Loc: VD->getLocation(), DiagId: diag::note_declared_at);
9409 }
9410
9411 APValue *V;
9412 if (!evaluateVarDeclInit(Info, E, VD, Frame, Version, Result&: V))
9413 return false;
9414
9415 if (!V) {
9416 Result.set(B: VD);
9417 Result.AllowConstexprUnknown = true;
9418 return true;
9419 }
9420
9421 return Success(V: *V, E);
9422}
9423
9424bool LValueExprEvaluator::VisitCallExpr(const CallExpr *E) {
9425 if (!IsConstantEvaluatedBuiltinCall(E))
9426 return ExprEvaluatorBaseTy::VisitCallExpr(E);
9427
9428 switch (E->getBuiltinCallee()) {
9429 default:
9430 return false;
9431 case Builtin::BIas_const:
9432 case Builtin::BIforward:
9433 case Builtin::BIforward_like:
9434 case Builtin::BImove:
9435 case Builtin::BImove_if_noexcept:
9436 if (cast<FunctionDecl>(Val: E->getCalleeDecl())->isConstexpr())
9437 return Visit(S: E->getArg(Arg: 0));
9438 break;
9439 }
9440
9441 return ExprEvaluatorBaseTy::VisitCallExpr(E);
9442}
9443
9444bool LValueExprEvaluator::VisitMaterializeTemporaryExpr(
9445 const MaterializeTemporaryExpr *E) {
9446 // Walk through the expression to find the materialized temporary itself.
9447 SmallVector<const Expr *, 2> CommaLHSs;
9448 SmallVector<SubobjectAdjustment, 2> Adjustments;
9449 const Expr *Inner =
9450 E->getSubExpr()->skipRValueSubobjectAdjustments(CommaLHS&: CommaLHSs, Adjustments);
9451
9452 // If we passed any comma operators, evaluate their LHSs.
9453 for (const Expr *E : CommaLHSs)
9454 if (!EvaluateIgnoredValue(Info, E))
9455 return false;
9456
9457 // A materialized temporary with static storage duration can appear within the
9458 // result of a constant expression evaluation, so we need to preserve its
9459 // value for use outside this evaluation.
9460 APValue *Value;
9461 if (E->getStorageDuration() == SD_Static) {
9462 if (Info.EvalMode == EvaluationMode::ConstantFold)
9463 return false;
9464 // FIXME: What about SD_Thread?
9465 Value = E->getOrCreateValue(MayCreate: true);
9466 *Value = APValue();
9467 Result.set(B: E);
9468 } else {
9469 Value = &Info.CurrentCall->createTemporary(
9470 Key: E, T: Inner->getType(),
9471 Scope: E->getStorageDuration() == SD_FullExpression ? ScopeKind::FullExpression
9472 : ScopeKind::Block,
9473 LV&: Result);
9474 }
9475
9476 QualType Type = Inner->getType();
9477
9478 // Materialize the temporary itself.
9479 if (!EvaluateInPlace(Result&: *Value, Info, This: Result, E: Inner)) {
9480 *Value = APValue();
9481 return false;
9482 }
9483
9484 // Adjust our lvalue to refer to the desired subobject.
9485 for (unsigned I = Adjustments.size(); I != 0; /**/) {
9486 --I;
9487 switch (Adjustments[I].Kind) {
9488 case SubobjectAdjustment::DerivedToBaseAdjustment:
9489 if (!HandleLValueBasePath(Info, E: Adjustments[I].DerivedToBase.BasePath,
9490 Type, Result))
9491 return false;
9492 Type = Adjustments[I].DerivedToBase.BasePath->getType();
9493 break;
9494
9495 case SubobjectAdjustment::FieldAdjustment:
9496 if (!HandleLValueMember(Info, E, LVal&: Result, FD: Adjustments[I].Field))
9497 return false;
9498 Type = Adjustments[I].Field->getType();
9499 break;
9500
9501 case SubobjectAdjustment::MemberPointerAdjustment:
9502 if (!HandleMemberPointerAccess(Info&: this->Info, LVType: Type, LV&: Result,
9503 RHS: Adjustments[I].Ptr.RHS))
9504 return false;
9505 Type = Adjustments[I].Ptr.MPT->getPointeeType();
9506 break;
9507 }
9508 }
9509
9510 return true;
9511}
9512
9513bool
9514LValueExprEvaluator::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
9515 assert((!Info.getLangOpts().CPlusPlus || E->isFileScope()) &&
9516 "lvalue compound literal in c++?");
9517 APValue *Lit;
9518 // If CompountLiteral has static storage, its value can be used outside
9519 // this expression. So evaluate it once and store it in ASTContext.
9520 if (E->hasStaticStorage()) {
9521 Lit = &E->getOrCreateStaticValue(Ctx&: Info.Ctx);
9522 Result.set(B: E);
9523 // Reset any previously evaluated state, otherwise evaluation below might
9524 // fail.
9525 // FIXME: Should we just re-use the previously evaluated value instead?
9526 *Lit = APValue();
9527 } else {
9528 assert(!Info.getLangOpts().CPlusPlus);
9529 Lit = &Info.CurrentCall->createTemporary(Key: E, T: E->getInitializer()->getType(),
9530 Scope: ScopeKind::Block, LV&: Result);
9531 }
9532 // FIXME: Evaluating in place isn't always right. We should figure out how to
9533 // use appropriate evaluation context here, see
9534 // clang/test/AST/static-compound-literals-reeval.cpp for a failure.
9535 if (!EvaluateInPlace(Result&: *Lit, Info, This: Result, E: E->getInitializer())) {
9536 *Lit = APValue();
9537 return false;
9538 }
9539 return true;
9540}
9541
9542bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
9543 TypeInfoLValue TypeInfo;
9544
9545 if (!E->isPotentiallyEvaluated()) {
9546 if (E->isTypeOperand())
9547 TypeInfo = TypeInfoLValue(E->getTypeOperand(Context: Info.Ctx).getTypePtr());
9548 else
9549 TypeInfo = TypeInfoLValue(E->getExprOperand()->getType().getTypePtr());
9550 } else {
9551 if (!Info.Ctx.getLangOpts().CPlusPlus20) {
9552 Info.CCEDiag(E, DiagId: diag::note_constexpr_typeid_polymorphic)
9553 << E->getExprOperand()->getType()
9554 << E->getExprOperand()->getSourceRange();
9555 }
9556
9557 if (!Visit(S: E->getExprOperand()))
9558 return false;
9559
9560 std::optional<DynamicType> DynType =
9561 ComputeDynamicType(Info, E, This&: Result, AK: AK_TypeId);
9562 if (!DynType)
9563 return false;
9564
9565 TypeInfo = TypeInfoLValue(
9566 Info.Ctx.getCanonicalTagType(TD: DynType->Type).getTypePtr());
9567 }
9568
9569 return Success(B: APValue::LValueBase::getTypeInfo(LV: TypeInfo, TypeInfo: E->getType()));
9570}
9571
9572bool LValueExprEvaluator::VisitCXXUuidofExpr(const CXXUuidofExpr *E) {
9573 return Success(B: E->getGuidDecl());
9574}
9575
9576bool LValueExprEvaluator::VisitMemberExpr(const MemberExpr *E) {
9577 // Handle static data members.
9578 if (const VarDecl *VD = dyn_cast<VarDecl>(Val: E->getMemberDecl())) {
9579 VisitIgnoredBaseExpression(E: E->getBase());
9580 return VisitVarDecl(E, VD);
9581 }
9582
9583 // Handle static member functions.
9584 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: E->getMemberDecl())) {
9585 if (MD->isStatic()) {
9586 VisitIgnoredBaseExpression(E: E->getBase());
9587 return Success(B: MD);
9588 }
9589 }
9590
9591 // Handle non-static data members.
9592 return LValueExprEvaluatorBaseTy::VisitMemberExpr(E);
9593}
9594
9595bool LValueExprEvaluator::VisitExtVectorElementExpr(
9596 const ExtVectorElementExpr *E) {
9597 bool Success = true;
9598
9599 APValue Val;
9600 if (!Evaluate(Result&: Val, Info, E: E->getBase())) {
9601 if (!Info.noteFailure())
9602 return false;
9603 Success = false;
9604 }
9605
9606 SmallVector<uint32_t, 4> Indices;
9607 E->getEncodedElementAccess(Elts&: Indices);
9608 // FIXME: support accessing more than one element
9609 if (Indices.size() > 1)
9610 return false;
9611
9612 if (Success) {
9613 Result.setFrom(Ctx: Info.Ctx, V: Val);
9614 QualType BaseType = E->getBase()->getType();
9615 if (E->isArrow())
9616 BaseType = BaseType->getPointeeType();
9617 const auto *VT = BaseType->castAs<VectorType>();
9618 HandleLValueVectorElement(Info, E, LVal&: Result, EltTy: VT->getElementType(),
9619 Size: VT->getNumElements(), Idx: Indices[0]);
9620 }
9621
9622 return Success;
9623}
9624
9625bool LValueExprEvaluator::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
9626 if (E->getBase()->getType()->isSveVLSBuiltinType())
9627 return Error(E);
9628
9629 APSInt Index;
9630 bool Success = true;
9631
9632 if (const auto *VT = E->getBase()->getType()->getAs<VectorType>()) {
9633 APValue Val;
9634 if (!Evaluate(Result&: Val, Info, E: E->getBase())) {
9635 if (!Info.noteFailure())
9636 return false;
9637 Success = false;
9638 }
9639
9640 if (!EvaluateInteger(E: E->getIdx(), Result&: Index, Info)) {
9641 if (!Info.noteFailure())
9642 return false;
9643 Success = false;
9644 }
9645
9646 if (Success) {
9647 Result.setFrom(Ctx: Info.Ctx, V: Val);
9648 HandleLValueVectorElement(Info, E, LVal&: Result, EltTy: VT->getElementType(),
9649 Size: VT->getNumElements(), Idx: Index.getExtValue());
9650 }
9651
9652 return Success;
9653 }
9654
9655 // C++17's rules require us to evaluate the LHS first, regardless of which
9656 // side is the base.
9657 for (const Expr *SubExpr : {E->getLHS(), E->getRHS()}) {
9658 if (SubExpr == E->getBase() ? !evaluatePointer(E: SubExpr, Result)
9659 : !EvaluateInteger(E: SubExpr, Result&: Index, Info)) {
9660 if (!Info.noteFailure())
9661 return false;
9662 Success = false;
9663 }
9664 }
9665
9666 return Success &&
9667 HandleLValueArrayAdjustment(Info, E, LVal&: Result, EltTy: E->getType(), Adjustment: Index);
9668}
9669
9670bool LValueExprEvaluator::VisitUnaryDeref(const UnaryOperator *E) {
9671 bool Success = evaluatePointer(E: E->getSubExpr(), Result);
9672 // [C++26][expr.unary.op]
9673 // If the operand points to an object or function, the result
9674 // denotes that object or function; otherwise, the behavior is undefined.
9675 // Because &(*(type*)0) is a common pattern, we do not fail the evaluation
9676 // immediately.
9677 if (!Success || !E->getType().getNonReferenceType()->isObjectType())
9678 return Success;
9679 return bool(findCompleteObject(Info, E, AK: AK_Dereference, LVal: Result,
9680 LValType: E->getType())) ||
9681 Info.noteUndefinedBehavior();
9682}
9683
9684bool LValueExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
9685 if (!Visit(S: E->getSubExpr()))
9686 return false;
9687 // __real is a no-op on scalar lvalues.
9688 if (E->getSubExpr()->getType()->isAnyComplexType())
9689 HandleLValueComplexElement(Info, E, LVal&: Result, EltTy: E->getType(), Imag: false);
9690 return true;
9691}
9692
9693bool LValueExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
9694 assert(E->getSubExpr()->getType()->isAnyComplexType() &&
9695 "lvalue __imag__ on scalar?");
9696 if (!Visit(S: E->getSubExpr()))
9697 return false;
9698 HandleLValueComplexElement(Info, E, LVal&: Result, EltTy: E->getType(), Imag: true);
9699 return true;
9700}
9701
9702bool LValueExprEvaluator::VisitUnaryPreIncDec(const UnaryOperator *UO) {
9703 if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
9704 return Error(E: UO);
9705
9706 if (!this->Visit(S: UO->getSubExpr()))
9707 return false;
9708
9709 return handleIncDec(
9710 Info&: this->Info, E: UO, LVal: Result, LValType: UO->getSubExpr()->getType(),
9711 IsIncrement: UO->isIncrementOp(), Old: nullptr);
9712}
9713
9714bool LValueExprEvaluator::VisitCompoundAssignOperator(
9715 const CompoundAssignOperator *CAO) {
9716 if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
9717 return Error(E: CAO);
9718
9719 bool Success = true;
9720
9721 // C++17 onwards require that we evaluate the RHS first.
9722 APValue RHS;
9723 if (!Evaluate(Result&: RHS, Info&: this->Info, E: CAO->getRHS())) {
9724 if (!Info.noteFailure())
9725 return false;
9726 Success = false;
9727 }
9728
9729 // The overall lvalue result is the result of evaluating the LHS.
9730 if (!this->Visit(S: CAO->getLHS()) || !Success)
9731 return false;
9732
9733 return handleCompoundAssignment(
9734 Info&: this->Info, E: CAO,
9735 LVal: Result, LValType: CAO->getLHS()->getType(), PromotedLValType: CAO->getComputationLHSType(),
9736 Opcode: CAO->getOpForCompoundAssignment(Opc: CAO->getOpcode()), RVal: RHS);
9737}
9738
9739bool LValueExprEvaluator::VisitBinAssign(const BinaryOperator *E) {
9740 if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure())
9741 return Error(E);
9742
9743 bool Success = true;
9744
9745 // C++17 onwards require that we evaluate the RHS first.
9746 APValue NewVal;
9747 if (!Evaluate(Result&: NewVal, Info&: this->Info, E: E->getRHS())) {
9748 if (!Info.noteFailure())
9749 return false;
9750 Success = false;
9751 }
9752
9753 if (!this->Visit(S: E->getLHS()) || !Success)
9754 return false;
9755
9756 if (Info.getLangOpts().CPlusPlus20 &&
9757 !MaybeHandleUnionActiveMemberChange(Info, LHSExpr: E->getLHS(), LHS: Result))
9758 return false;
9759
9760 return handleAssignment(Info&: this->Info, E, LVal: Result, LValType: E->getLHS()->getType(),
9761 Val&: NewVal);
9762}
9763
9764//===----------------------------------------------------------------------===//
9765// Pointer Evaluation
9766//===----------------------------------------------------------------------===//
9767
9768/// Convenience function. LVal's base must be a call to an alloc_size
9769/// function.
9770static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
9771 const LValue &LVal,
9772 llvm::APInt &Result) {
9773 assert(isBaseAnAllocSizeCall(LVal.getLValueBase()) &&
9774 "Can't get the size of a non alloc_size function");
9775 const auto *Base = LVal.getLValueBase().get<const Expr *>();
9776 const CallExpr *CE = tryUnwrapAllocSizeCall(E: Base);
9777 std::optional<llvm::APInt> Size =
9778 CE->evaluateBytesReturnedByAllocSizeCall(Ctx);
9779 if (!Size)
9780 return false;
9781
9782 Result = std::move(*Size);
9783 return true;
9784}
9785
9786/// Attempts to evaluate the given LValueBase as the result of a call to
9787/// a function with the alloc_size attribute. If it was possible to do so, this
9788/// function will return true, make Result's Base point to said function call,
9789/// and mark Result's Base as invalid.
9790static bool evaluateLValueAsAllocSize(EvalInfo &Info, APValue::LValueBase Base,
9791 LValue &Result) {
9792 if (Base.isNull())
9793 return false;
9794
9795 // Because we do no form of static analysis, we only support const variables.
9796 //
9797 // Additionally, we can't support parameters, nor can we support static
9798 // variables (in the latter case, use-before-assign isn't UB; in the former,
9799 // we have no clue what they'll be assigned to).
9800 const auto *VD =
9801 dyn_cast_or_null<VarDecl>(Val: Base.dyn_cast<const ValueDecl *>());
9802 if (!VD || !VD->isLocalVarDecl() || !VD->getType().isConstQualified())
9803 return false;
9804
9805 const Expr *Init = VD->getAnyInitializer();
9806 if (!Init || Init->getType().isNull())
9807 return false;
9808
9809 const Expr *E = Init->IgnoreParens();
9810 if (!tryUnwrapAllocSizeCall(E))
9811 return false;
9812
9813 // Store E instead of E unwrapped so that the type of the LValue's base is
9814 // what the user wanted.
9815 Result.setInvalid(B: E);
9816
9817 QualType Pointee = E->getType()->castAs<PointerType>()->getPointeeType();
9818 Result.addUnsizedArray(Info, E, ElemTy: Pointee);
9819 return true;
9820}
9821
9822namespace {
9823class PointerExprEvaluator
9824 : public ExprEvaluatorBase<PointerExprEvaluator> {
9825 LValue &Result;
9826 bool InvalidBaseOK;
9827
9828 bool Success(const Expr *E) {
9829 Result.set(B: E);
9830 return true;
9831 }
9832
9833 bool evaluateLValue(const Expr *E, LValue &Result) {
9834 return EvaluateLValue(E, Result, Info, InvalidBaseOK);
9835 }
9836
9837 bool evaluatePointer(const Expr *E, LValue &Result) {
9838 return EvaluatePointer(E, Result, Info, InvalidBaseOK);
9839 }
9840
9841 bool visitNonBuiltinCallExpr(const CallExpr *E);
9842public:
9843
9844 PointerExprEvaluator(EvalInfo &info, LValue &Result, bool InvalidBaseOK)
9845 : ExprEvaluatorBaseTy(info), Result(Result),
9846 InvalidBaseOK(InvalidBaseOK) {}
9847
9848 bool Success(const APValue &V, const Expr *E) {
9849 Result.setFrom(Ctx: Info.Ctx, V);
9850 return true;
9851 }
9852 bool ZeroInitialization(const Expr *E) {
9853 Result.setNull(Ctx&: Info.Ctx, PointerTy: E->getType());
9854 return true;
9855 }
9856
9857 bool VisitBinaryOperator(const BinaryOperator *E);
9858 bool VisitCastExpr(const CastExpr* E);
9859 bool VisitUnaryAddrOf(const UnaryOperator *E);
9860 bool VisitObjCStringLiteral(const ObjCStringLiteral *E)
9861 { return Success(E); }
9862 bool VisitObjCBoxedExpr(const ObjCBoxedExpr *E) {
9863 if (E->isExpressibleAsConstantInitializer())
9864 return Success(E);
9865 if (Info.noteFailure())
9866 EvaluateIgnoredValue(Info, E: E->getSubExpr());
9867 return Error(E);
9868 }
9869 bool VisitAddrLabelExpr(const AddrLabelExpr *E)
9870 { return Success(E); }
9871 bool VisitCallExpr(const CallExpr *E);
9872 bool VisitBuiltinCallExpr(const CallExpr *E, unsigned BuiltinOp);
9873 bool VisitBlockExpr(const BlockExpr *E) {
9874 if (!E->getBlockDecl()->hasCaptures())
9875 return Success(E);
9876 return Error(E);
9877 }
9878 bool VisitCXXThisExpr(const CXXThisExpr *E) {
9879 auto DiagnoseInvalidUseOfThis = [&] {
9880 if (Info.getLangOpts().CPlusPlus11)
9881 Info.FFDiag(E, DiagId: diag::note_constexpr_this) << E->isImplicit();
9882 else
9883 Info.FFDiag(E);
9884 };
9885
9886 // Can't look at 'this' when checking a potential constant expression.
9887 if (Info.checkingPotentialConstantExpression())
9888 return false;
9889
9890 bool IsExplicitLambda =
9891 isLambdaCallWithExplicitObjectParameter(DC: Info.CurrentCall->Callee);
9892 if (!IsExplicitLambda) {
9893 if (!Info.CurrentCall->This) {
9894 DiagnoseInvalidUseOfThis();
9895 return false;
9896 }
9897
9898 Result = *Info.CurrentCall->This;
9899 }
9900
9901 if (isLambdaCallOperator(DC: Info.CurrentCall->Callee)) {
9902 // Ensure we actually have captured 'this'. If something was wrong with
9903 // 'this' capture, the error would have been previously reported.
9904 // Otherwise we can be inside of a default initialization of an object
9905 // declared by lambda's body, so no need to return false.
9906 if (!Info.CurrentCall->LambdaThisCaptureField) {
9907 if (IsExplicitLambda && !Info.CurrentCall->This) {
9908 DiagnoseInvalidUseOfThis();
9909 return false;
9910 }
9911
9912 return true;
9913 }
9914
9915 const auto *MD = cast<CXXMethodDecl>(Val: Info.CurrentCall->Callee);
9916 return HandleLambdaCapture(
9917 Info, E, Result, MD, FD: Info.CurrentCall->LambdaThisCaptureField,
9918 LValueToRValueConversion: Info.CurrentCall->LambdaThisCaptureField->getType()->isPointerType());
9919 }
9920 return true;
9921 }
9922
9923 bool VisitCXXNewExpr(const CXXNewExpr *E);
9924
9925 bool VisitSourceLocExpr(const SourceLocExpr *E) {
9926 assert(!E->isIntType() && "SourceLocExpr isn't a pointer type?");
9927 APValue LValResult = E->EvaluateInContext(
9928 Ctx: Info.Ctx, DefaultExpr: Info.CurrentCall->CurSourceLocExprScope.getDefaultExpr());
9929 Result.setFrom(Ctx: Info.Ctx, V: LValResult);
9930 return true;
9931 }
9932
9933 bool VisitEmbedExpr(const EmbedExpr *E) {
9934 llvm::report_fatal_error(reason: "Not yet implemented for ExprConstant.cpp");
9935 return true;
9936 }
9937
9938 bool VisitSYCLUniqueStableNameExpr(const SYCLUniqueStableNameExpr *E) {
9939 std::string ResultStr = E->ComputeName(Context&: Info.Ctx);
9940
9941 QualType CharTy = Info.Ctx.CharTy.withConst();
9942 APInt Size(Info.Ctx.getTypeSize(T: Info.Ctx.getSizeType()),
9943 ResultStr.size() + 1);
9944 QualType ArrayTy = Info.Ctx.getConstantArrayType(
9945 EltTy: CharTy, ArySize: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
9946
9947 StringLiteral *SL =
9948 StringLiteral::Create(Ctx: Info.Ctx, Str: ResultStr, Kind: StringLiteralKind::Ordinary,
9949 /*Pascal*/ false, Ty: ArrayTy, Locs: E->getLocation());
9950
9951 evaluateLValue(E: SL, Result);
9952 Result.addArray(Info, E, CAT: cast<ConstantArrayType>(Val&: ArrayTy));
9953 return true;
9954 }
9955
9956 // FIXME: Missing: @protocol, @selector
9957};
9958} // end anonymous namespace
9959
9960static bool EvaluatePointer(const Expr* E, LValue& Result, EvalInfo &Info,
9961 bool InvalidBaseOK) {
9962 assert(!E->isValueDependent());
9963 assert(E->isPRValue() && E->getType()->hasPointerRepresentation());
9964 return PointerExprEvaluator(Info, Result, InvalidBaseOK).Visit(S: E);
9965}
9966
9967bool PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
9968 if (E->getOpcode() != BO_Add &&
9969 E->getOpcode() != BO_Sub)
9970 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
9971
9972 const Expr *PExp = E->getLHS();
9973 const Expr *IExp = E->getRHS();
9974 if (IExp->getType()->isPointerType())
9975 std::swap(a&: PExp, b&: IExp);
9976
9977 bool EvalPtrOK = evaluatePointer(E: PExp, Result);
9978 if (!EvalPtrOK && !Info.noteFailure())
9979 return false;
9980
9981 llvm::APSInt Offset;
9982 if (!EvaluateInteger(E: IExp, Result&: Offset, Info) || !EvalPtrOK)
9983 return false;
9984
9985 if (E->getOpcode() == BO_Sub)
9986 negateAsSigned(Int&: Offset);
9987
9988 QualType Pointee = PExp->getType()->castAs<PointerType>()->getPointeeType();
9989 return HandleLValueArrayAdjustment(Info, E, LVal&: Result, EltTy: Pointee, Adjustment: Offset);
9990}
9991
9992bool PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
9993 return evaluateLValue(E: E->getSubExpr(), Result);
9994}
9995
9996// Is the provided decl 'std::source_location::current'?
9997static bool IsDeclSourceLocationCurrent(const FunctionDecl *FD) {
9998 if (!FD)
9999 return false;
10000 const IdentifierInfo *FnII = FD->getIdentifier();
10001 if (!FnII || !FnII->isStr(Str: "current"))
10002 return false;
10003
10004 const auto *RD = dyn_cast<RecordDecl>(Val: FD->getParent());
10005 if (!RD)
10006 return false;
10007
10008 const IdentifierInfo *ClassII = RD->getIdentifier();
10009 return RD->isInStdNamespace() && ClassII && ClassII->isStr(Str: "source_location");
10010}
10011
10012bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
10013 const Expr *SubExpr = E->getSubExpr();
10014
10015 switch (E->getCastKind()) {
10016 default:
10017 break;
10018 case CK_BitCast:
10019 case CK_CPointerToObjCPointerCast:
10020 case CK_BlockPointerToObjCPointerCast:
10021 case CK_AnyPointerToBlockPointerCast:
10022 case CK_AddressSpaceConversion:
10023 if (!Visit(S: SubExpr))
10024 return false;
10025 if (E->getType()->isFunctionPointerType() ||
10026 SubExpr->getType()->isFunctionPointerType()) {
10027 // Casting between two function pointer types, or between a function
10028 // pointer and an object pointer, is always a reinterpret_cast.
10029 CCEDiag(E, D: diag::note_constexpr_invalid_cast)
10030 << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret
10031 << Info.Ctx.getLangOpts().CPlusPlus;
10032 Result.Designator.setInvalid();
10033 } else if (!E->getType()->isVoidPointerType()) {
10034 // Bitcasts to cv void* are static_casts, not reinterpret_casts, so are
10035 // permitted in constant expressions in C++11. Bitcasts from cv void* are
10036 // also static_casts, but we disallow them as a resolution to DR1312.
10037 //
10038 // In some circumstances, we permit casting from void* to cv1 T*, when the
10039 // actual pointee object is actually a cv2 T.
10040 bool HasValidResult = !Result.InvalidBase && !Result.Designator.Invalid &&
10041 !Result.IsNullPtr;
10042 bool VoidPtrCastMaybeOK =
10043 Result.IsNullPtr ||
10044 (HasValidResult &&
10045 Info.Ctx.hasSimilarType(T1: Result.Designator.getType(Ctx&: Info.Ctx),
10046 T2: E->getType()->getPointeeType()));
10047 // 1. We'll allow it in std::allocator::allocate, and anything which that
10048 // calls.
10049 // 2. HACK 2022-03-28: Work around an issue with libstdc++'s
10050 // <source_location> header. Fixed in GCC 12 and later (2022-04-??).
10051 // We'll allow it in the body of std::source_location::current. GCC's
10052 // implementation had a parameter of type `void*`, and casts from
10053 // that back to `const __impl*` in its body.
10054 if (VoidPtrCastMaybeOK &&
10055 (Info.getStdAllocatorCaller(FnName: "allocate") ||
10056 IsDeclSourceLocationCurrent(FD: Info.CurrentCall->Callee) ||
10057 Info.getLangOpts().CPlusPlus26)) {
10058 // Permitted.
10059 } else {
10060 if (SubExpr->getType()->isVoidPointerType() &&
10061 Info.getLangOpts().CPlusPlus) {
10062 if (HasValidResult)
10063 CCEDiag(E, D: diag::note_constexpr_invalid_void_star_cast)
10064 << SubExpr->getType() << Info.getLangOpts().CPlusPlus26
10065 << Result.Designator.getType(Ctx&: Info.Ctx).getCanonicalType()
10066 << E->getType()->getPointeeType();
10067 else
10068 CCEDiag(E, D: diag::note_constexpr_invalid_cast)
10069 << diag::ConstexprInvalidCastKind::CastFrom
10070 << SubExpr->getType();
10071 } else
10072 CCEDiag(E, D: diag::note_constexpr_invalid_cast)
10073 << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret
10074 << Info.Ctx.getLangOpts().CPlusPlus;
10075 Result.Designator.setInvalid();
10076 }
10077 }
10078 if (E->getCastKind() == CK_AddressSpaceConversion && Result.IsNullPtr)
10079 ZeroInitialization(E);
10080 return true;
10081
10082 case CK_DerivedToBase:
10083 case CK_UncheckedDerivedToBase:
10084 if (!evaluatePointer(E: E->getSubExpr(), Result))
10085 return false;
10086 if (!Result.Base && Result.Offset.isZero())
10087 return true;
10088
10089 // Now figure out the necessary offset to add to the base LV to get from
10090 // the derived class to the base class.
10091 return HandleLValueBasePath(Info, E, Type: E->getSubExpr()->getType()->
10092 castAs<PointerType>()->getPointeeType(),
10093 Result);
10094
10095 case CK_BaseToDerived:
10096 if (!Visit(S: E->getSubExpr()))
10097 return false;
10098 if (!Result.Base && Result.Offset.isZero())
10099 return true;
10100 return HandleBaseToDerivedCast(Info, E, Result);
10101
10102 case CK_Dynamic:
10103 if (!Visit(S: E->getSubExpr()))
10104 return false;
10105 return HandleDynamicCast(Info, E: cast<ExplicitCastExpr>(Val: E), Ptr&: Result);
10106
10107 case CK_NullToPointer:
10108 VisitIgnoredValue(E: E->getSubExpr());
10109 return ZeroInitialization(E);
10110
10111 case CK_IntegralToPointer: {
10112 CCEDiag(E, D: diag::note_constexpr_invalid_cast)
10113 << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret
10114 << Info.Ctx.getLangOpts().CPlusPlus;
10115
10116 APValue Value;
10117 if (!EvaluateIntegerOrLValue(E: SubExpr, Result&: Value, Info))
10118 break;
10119
10120 if (Value.isInt()) {
10121 unsigned Size = Info.Ctx.getTypeSize(T: E->getType());
10122 uint64_t N = Value.getInt().extOrTrunc(width: Size).getZExtValue();
10123 if (N == Info.Ctx.getTargetNullPointerValue(QT: E->getType())) {
10124 Result.setNull(Ctx&: Info.Ctx, PointerTy: E->getType());
10125 } else {
10126 Result.Base = (Expr *)nullptr;
10127 Result.InvalidBase = false;
10128 Result.Offset = CharUnits::fromQuantity(Quantity: N);
10129 Result.Designator.setInvalid();
10130 Result.IsNullPtr = false;
10131 }
10132 return true;
10133 } else {
10134 // In rare instances, the value isn't an lvalue.
10135 // For example, when the value is the difference between the addresses of
10136 // two labels. We reject that as a constant expression because we can't
10137 // compute a valid offset to convert into a pointer.
10138 if (!Value.isLValue())
10139 return false;
10140
10141 // Cast is of an lvalue, no need to change value.
10142 Result.setFrom(Ctx: Info.Ctx, V: Value);
10143 return true;
10144 }
10145 }
10146
10147 case CK_ArrayToPointerDecay: {
10148 if (SubExpr->isGLValue()) {
10149 if (!evaluateLValue(E: SubExpr, Result))
10150 return false;
10151 } else {
10152 APValue &Value = Info.CurrentCall->createTemporary(
10153 Key: SubExpr, T: SubExpr->getType(), Scope: ScopeKind::FullExpression, LV&: Result);
10154 if (!EvaluateInPlace(Result&: Value, Info, This: Result, E: SubExpr))
10155 return false;
10156 }
10157 // The result is a pointer to the first element of the array.
10158 auto *AT = Info.Ctx.getAsArrayType(T: SubExpr->getType());
10159 if (auto *CAT = dyn_cast<ConstantArrayType>(Val: AT))
10160 Result.addArray(Info, E, CAT);
10161 else
10162 Result.addUnsizedArray(Info, E, ElemTy: AT->getElementType());
10163 return true;
10164 }
10165
10166 case CK_FunctionToPointerDecay:
10167 return evaluateLValue(E: SubExpr, Result);
10168
10169 case CK_LValueToRValue: {
10170 LValue LVal;
10171 if (!evaluateLValue(E: E->getSubExpr(), Result&: LVal))
10172 return false;
10173
10174 APValue RVal;
10175 // Note, we use the subexpression's type in order to retain cv-qualifiers.
10176 if (!handleLValueToRValueConversion(Info, Conv: E, Type: E->getSubExpr()->getType(),
10177 LVal, RVal))
10178 return InvalidBaseOK &&
10179 evaluateLValueAsAllocSize(Info, Base: LVal.Base, Result);
10180 return Success(V: RVal, E);
10181 }
10182 }
10183
10184 return ExprEvaluatorBaseTy::VisitCastExpr(E);
10185}
10186
10187static CharUnits GetAlignOfType(const ASTContext &Ctx, QualType T,
10188 UnaryExprOrTypeTrait ExprKind) {
10189 // C++ [expr.alignof]p3:
10190 // When alignof is applied to a reference type, the result is the
10191 // alignment of the referenced type.
10192 T = T.getNonReferenceType();
10193
10194 if (T.getQualifiers().hasUnaligned())
10195 return CharUnits::One();
10196
10197 const bool AlignOfReturnsPreferred =
10198 Ctx.getLangOpts().getClangABICompat() <= LangOptions::ClangABI::Ver7;
10199
10200 // __alignof is defined to return the preferred alignment.
10201 // Before 8, clang returned the preferred alignment for alignof and _Alignof
10202 // as well.
10203 if (ExprKind == UETT_PreferredAlignOf || AlignOfReturnsPreferred)
10204 return Ctx.toCharUnitsFromBits(BitSize: Ctx.getPreferredTypeAlign(T: T.getTypePtr()));
10205 // alignof and _Alignof are defined to return the ABI alignment.
10206 else if (ExprKind == UETT_AlignOf)
10207 return Ctx.getTypeAlignInChars(T: T.getTypePtr());
10208 else
10209 llvm_unreachable("GetAlignOfType on a non-alignment ExprKind");
10210}
10211
10212CharUnits GetAlignOfExpr(const ASTContext &Ctx, const Expr *E,
10213 UnaryExprOrTypeTrait ExprKind) {
10214 E = E->IgnoreParens();
10215
10216 // The kinds of expressions that we have special-case logic here for
10217 // should be kept up to date with the special checks for those
10218 // expressions in Sema.
10219
10220 // alignof decl is always accepted, even if it doesn't make sense: we default
10221 // to 1 in those cases.
10222 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Val: E))
10223 return Ctx.getDeclAlign(D: DRE->getDecl(),
10224 /*RefAsPointee*/ ForAlignof: true);
10225
10226 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Val: E))
10227 return Ctx.getDeclAlign(D: ME->getMemberDecl(),
10228 /*RefAsPointee*/ ForAlignof: true);
10229
10230 return GetAlignOfType(Ctx, T: E->getType(), ExprKind);
10231}
10232
10233static CharUnits getBaseAlignment(EvalInfo &Info, const LValue &Value) {
10234 if (const auto *VD = Value.Base.dyn_cast<const ValueDecl *>())
10235 return Info.Ctx.getDeclAlign(D: VD);
10236 if (const auto *E = Value.Base.dyn_cast<const Expr *>())
10237 return GetAlignOfExpr(Ctx: Info.Ctx, E, ExprKind: UETT_AlignOf);
10238 return GetAlignOfType(Ctx: Info.Ctx, T: Value.Base.getTypeInfoType(), ExprKind: UETT_AlignOf);
10239}
10240
10241/// Evaluate the value of the alignment argument to __builtin_align_{up,down},
10242/// __builtin_is_aligned and __builtin_assume_aligned.
10243static bool getAlignmentArgument(const Expr *E, QualType ForType,
10244 EvalInfo &Info, APSInt &Alignment) {
10245 if (!EvaluateInteger(E, Result&: Alignment, Info))
10246 return false;
10247 if (Alignment < 0 || !Alignment.isPowerOf2()) {
10248 Info.FFDiag(E, DiagId: diag::note_constexpr_invalid_alignment) << Alignment;
10249 return false;
10250 }
10251 unsigned SrcWidth = Info.Ctx.getIntWidth(T: ForType);
10252 APSInt MaxValue(APInt::getOneBitSet(numBits: SrcWidth, BitNo: SrcWidth - 1));
10253 if (APSInt::compareValues(I1: Alignment, I2: MaxValue) > 0) {
10254 Info.FFDiag(E, DiagId: diag::note_constexpr_alignment_too_big)
10255 << MaxValue << ForType << Alignment;
10256 return false;
10257 }
10258 // Ensure both alignment and source value have the same bit width so that we
10259 // don't assert when computing the resulting value.
10260 APSInt ExtAlignment =
10261 APSInt(Alignment.zextOrTrunc(width: SrcWidth), /*isUnsigned=*/true);
10262 assert(APSInt::compareValues(Alignment, ExtAlignment) == 0 &&
10263 "Alignment should not be changed by ext/trunc");
10264 Alignment = ExtAlignment;
10265 assert(Alignment.getBitWidth() == SrcWidth);
10266 return true;
10267}
10268
10269// To be clear: this happily visits unsupported builtins. Better name welcomed.
10270bool PointerExprEvaluator::visitNonBuiltinCallExpr(const CallExpr *E) {
10271 if (ExprEvaluatorBaseTy::VisitCallExpr(E))
10272 return true;
10273
10274 if (!(InvalidBaseOK && E->getCalleeAllocSizeAttr()))
10275 return false;
10276
10277 Result.setInvalid(B: E);
10278 QualType PointeeTy = E->getType()->castAs<PointerType>()->getPointeeType();
10279 Result.addUnsizedArray(Info, E, ElemTy: PointeeTy);
10280 return true;
10281}
10282
10283bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) {
10284 if (!IsConstantEvaluatedBuiltinCall(E))
10285 return visitNonBuiltinCallExpr(E);
10286 return VisitBuiltinCallExpr(E, BuiltinOp: E->getBuiltinCallee());
10287}
10288
10289// Determine if T is a character type for which we guarantee that
10290// sizeof(T) == 1.
10291static bool isOneByteCharacterType(QualType T) {
10292 return T->isCharType() || T->isChar8Type();
10293}
10294
10295bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
10296 unsigned BuiltinOp) {
10297 if (IsOpaqueConstantCall(E))
10298 return Success(E);
10299
10300 switch (BuiltinOp) {
10301 case Builtin::BIaddressof:
10302 case Builtin::BI__addressof:
10303 case Builtin::BI__builtin_addressof:
10304 return evaluateLValue(E: E->getArg(Arg: 0), Result);
10305 case Builtin::BI__builtin_assume_aligned: {
10306 // We need to be very careful here because: if the pointer does not have the
10307 // asserted alignment, then the behavior is undefined, and undefined
10308 // behavior is non-constant.
10309 if (!evaluatePointer(E: E->getArg(Arg: 0), Result))
10310 return false;
10311
10312 LValue OffsetResult(Result);
10313 APSInt Alignment;
10314 if (!getAlignmentArgument(E: E->getArg(Arg: 1), ForType: E->getArg(Arg: 0)->getType(), Info,
10315 Alignment))
10316 return false;
10317 CharUnits Align = CharUnits::fromQuantity(Quantity: Alignment.getZExtValue());
10318
10319 if (E->getNumArgs() > 2) {
10320 APSInt Offset;
10321 if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: Offset, Info))
10322 return false;
10323
10324 int64_t AdditionalOffset = -Offset.getZExtValue();
10325 OffsetResult.Offset += CharUnits::fromQuantity(Quantity: AdditionalOffset);
10326 }
10327
10328 // If there is a base object, then it must have the correct alignment.
10329 if (OffsetResult.Base) {
10330 CharUnits BaseAlignment = getBaseAlignment(Info, Value: OffsetResult);
10331
10332 if (BaseAlignment < Align) {
10333 Result.Designator.setInvalid();
10334 CCEDiag(E: E->getArg(Arg: 0), D: diag::note_constexpr_baa_insufficient_alignment)
10335 << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
10336 return false;
10337 }
10338 }
10339
10340 // The offset must also have the correct alignment.
10341 if (OffsetResult.Offset.alignTo(Align) != OffsetResult.Offset) {
10342 Result.Designator.setInvalid();
10343
10344 (OffsetResult.Base
10345 ? CCEDiag(E: E->getArg(Arg: 0),
10346 D: diag::note_constexpr_baa_insufficient_alignment)
10347 << 1
10348 : CCEDiag(E: E->getArg(Arg: 0),
10349 D: diag::note_constexpr_baa_value_insufficient_alignment))
10350 << OffsetResult.Offset.getQuantity() << Align.getQuantity();
10351 return false;
10352 }
10353
10354 return true;
10355 }
10356 case Builtin::BI__builtin_align_up:
10357 case Builtin::BI__builtin_align_down: {
10358 if (!evaluatePointer(E: E->getArg(Arg: 0), Result))
10359 return false;
10360 APSInt Alignment;
10361 if (!getAlignmentArgument(E: E->getArg(Arg: 1), ForType: E->getArg(Arg: 0)->getType(), Info,
10362 Alignment))
10363 return false;
10364 CharUnits BaseAlignment = getBaseAlignment(Info, Value: Result);
10365 CharUnits PtrAlign = BaseAlignment.alignmentAtOffset(offset: Result.Offset);
10366 // For align_up/align_down, we can return the same value if the alignment
10367 // is known to be greater or equal to the requested value.
10368 if (PtrAlign.getQuantity() >= Alignment)
10369 return true;
10370
10371 // The alignment could be greater than the minimum at run-time, so we cannot
10372 // infer much about the resulting pointer value. One case is possible:
10373 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
10374 // can infer the correct index if the requested alignment is smaller than
10375 // the base alignment so we can perform the computation on the offset.
10376 if (BaseAlignment.getQuantity() >= Alignment) {
10377 assert(Alignment.getBitWidth() <= 64 &&
10378 "Cannot handle > 64-bit address-space");
10379 uint64_t Alignment64 = Alignment.getZExtValue();
10380 CharUnits NewOffset = CharUnits::fromQuantity(
10381 Quantity: BuiltinOp == Builtin::BI__builtin_align_down
10382 ? llvm::alignDown(Value: Result.Offset.getQuantity(), Align: Alignment64)
10383 : llvm::alignTo(Value: Result.Offset.getQuantity(), Align: Alignment64));
10384 Result.adjustOffset(N: NewOffset - Result.Offset);
10385 // TODO: diagnose out-of-bounds values/only allow for arrays?
10386 return true;
10387 }
10388 // Otherwise, we cannot constant-evaluate the result.
10389 Info.FFDiag(E: E->getArg(Arg: 0), DiagId: diag::note_constexpr_alignment_adjust)
10390 << Alignment;
10391 return false;
10392 }
10393 case Builtin::BI__builtin_operator_new:
10394 return HandleOperatorNewCall(Info, E, Result);
10395 case Builtin::BI__builtin_launder:
10396 return evaluatePointer(E: E->getArg(Arg: 0), Result);
10397 case Builtin::BIstrchr:
10398 case Builtin::BIwcschr:
10399 case Builtin::BImemchr:
10400 case Builtin::BIwmemchr:
10401 if (Info.getLangOpts().CPlusPlus11)
10402 Info.CCEDiag(E, DiagId: diag::note_constexpr_invalid_function)
10403 << /*isConstexpr*/ 0 << /*isConstructor*/ 0
10404 << Info.Ctx.BuiltinInfo.getQuotedName(ID: BuiltinOp);
10405 else
10406 Info.CCEDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
10407 [[fallthrough]];
10408 case Builtin::BI__builtin_strchr:
10409 case Builtin::BI__builtin_wcschr:
10410 case Builtin::BI__builtin_memchr:
10411 case Builtin::BI__builtin_char_memchr:
10412 case Builtin::BI__builtin_wmemchr: {
10413 if (!Visit(S: E->getArg(Arg: 0)))
10414 return false;
10415 APSInt Desired;
10416 if (!EvaluateInteger(E: E->getArg(Arg: 1), Result&: Desired, Info))
10417 return false;
10418 uint64_t MaxLength = uint64_t(-1);
10419 if (BuiltinOp != Builtin::BIstrchr &&
10420 BuiltinOp != Builtin::BIwcschr &&
10421 BuiltinOp != Builtin::BI__builtin_strchr &&
10422 BuiltinOp != Builtin::BI__builtin_wcschr) {
10423 APSInt N;
10424 if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: N, Info))
10425 return false;
10426 MaxLength = N.getZExtValue();
10427 }
10428 // We cannot find the value if there are no candidates to match against.
10429 if (MaxLength == 0u)
10430 return ZeroInitialization(E);
10431 if (!Result.checkNullPointerForFoldAccess(Info, E, AK: AK_Read) ||
10432 Result.Designator.Invalid)
10433 return false;
10434 QualType CharTy = Result.Designator.getType(Ctx&: Info.Ctx);
10435 bool IsRawByte = BuiltinOp == Builtin::BImemchr ||
10436 BuiltinOp == Builtin::BI__builtin_memchr;
10437 assert(IsRawByte ||
10438 Info.Ctx.hasSameUnqualifiedType(
10439 CharTy, E->getArg(0)->getType()->getPointeeType()));
10440 // Pointers to const void may point to objects of incomplete type.
10441 if (IsRawByte && CharTy->isIncompleteType()) {
10442 Info.FFDiag(E, DiagId: diag::note_constexpr_ltor_incomplete_type) << CharTy;
10443 return false;
10444 }
10445 // Give up on byte-oriented matching against multibyte elements.
10446 // FIXME: We can compare the bytes in the correct order.
10447 if (IsRawByte && !isOneByteCharacterType(T: CharTy)) {
10448 Info.FFDiag(E, DiagId: diag::note_constexpr_memchr_unsupported)
10449 << Info.Ctx.BuiltinInfo.getQuotedName(ID: BuiltinOp) << CharTy;
10450 return false;
10451 }
10452 // Figure out what value we're actually looking for (after converting to
10453 // the corresponding unsigned type if necessary).
10454 uint64_t DesiredVal;
10455 bool StopAtNull = false;
10456 switch (BuiltinOp) {
10457 case Builtin::BIstrchr:
10458 case Builtin::BI__builtin_strchr:
10459 // strchr compares directly to the passed integer, and therefore
10460 // always fails if given an int that is not a char.
10461 if (!APSInt::isSameValue(I1: HandleIntToIntCast(Info, E, DestType: CharTy,
10462 SrcType: E->getArg(Arg: 1)->getType(),
10463 Value: Desired),
10464 I2: Desired))
10465 return ZeroInitialization(E);
10466 StopAtNull = true;
10467 [[fallthrough]];
10468 case Builtin::BImemchr:
10469 case Builtin::BI__builtin_memchr:
10470 case Builtin::BI__builtin_char_memchr:
10471 // memchr compares by converting both sides to unsigned char. That's also
10472 // correct for strchr if we get this far (to cope with plain char being
10473 // unsigned in the strchr case).
10474 DesiredVal = Desired.trunc(width: Info.Ctx.getCharWidth()).getZExtValue();
10475 break;
10476
10477 case Builtin::BIwcschr:
10478 case Builtin::BI__builtin_wcschr:
10479 StopAtNull = true;
10480 [[fallthrough]];
10481 case Builtin::BIwmemchr:
10482 case Builtin::BI__builtin_wmemchr:
10483 // wcschr and wmemchr are given a wchar_t to look for. Just use it.
10484 DesiredVal = Desired.getZExtValue();
10485 break;
10486 }
10487
10488 for (; MaxLength; --MaxLength) {
10489 APValue Char;
10490 if (!handleLValueToRValueConversion(Info, Conv: E, Type: CharTy, LVal: Result, RVal&: Char) ||
10491 !Char.isInt())
10492 return false;
10493 if (Char.getInt().getZExtValue() == DesiredVal)
10494 return true;
10495 if (StopAtNull && !Char.getInt())
10496 break;
10497 if (!HandleLValueArrayAdjustment(Info, E, LVal&: Result, EltTy: CharTy, Adjustment: 1))
10498 return false;
10499 }
10500 // Not found: return nullptr.
10501 return ZeroInitialization(E);
10502 }
10503
10504 case Builtin::BImemcpy:
10505 case Builtin::BImemmove:
10506 case Builtin::BIwmemcpy:
10507 case Builtin::BIwmemmove:
10508 if (Info.getLangOpts().CPlusPlus11)
10509 Info.CCEDiag(E, DiagId: diag::note_constexpr_invalid_function)
10510 << /*isConstexpr*/ 0 << /*isConstructor*/ 0
10511 << Info.Ctx.BuiltinInfo.getQuotedName(ID: BuiltinOp);
10512 else
10513 Info.CCEDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
10514 [[fallthrough]];
10515 case Builtin::BI__builtin_memcpy:
10516 case Builtin::BI__builtin_memmove:
10517 case Builtin::BI__builtin_wmemcpy:
10518 case Builtin::BI__builtin_wmemmove: {
10519 bool WChar = BuiltinOp == Builtin::BIwmemcpy ||
10520 BuiltinOp == Builtin::BIwmemmove ||
10521 BuiltinOp == Builtin::BI__builtin_wmemcpy ||
10522 BuiltinOp == Builtin::BI__builtin_wmemmove;
10523 bool Move = BuiltinOp == Builtin::BImemmove ||
10524 BuiltinOp == Builtin::BIwmemmove ||
10525 BuiltinOp == Builtin::BI__builtin_memmove ||
10526 BuiltinOp == Builtin::BI__builtin_wmemmove;
10527
10528 // The result of mem* is the first argument.
10529 if (!Visit(S: E->getArg(Arg: 0)))
10530 return false;
10531 LValue Dest = Result;
10532
10533 LValue Src;
10534 if (!EvaluatePointer(E: E->getArg(Arg: 1), Result&: Src, Info))
10535 return false;
10536
10537 APSInt N;
10538 if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: N, Info))
10539 return false;
10540 assert(!N.isSigned() && "memcpy and friends take an unsigned size");
10541
10542 // If the size is zero, we treat this as always being a valid no-op.
10543 // (Even if one of the src and dest pointers is null.)
10544 if (!N)
10545 return true;
10546
10547 // Otherwise, if either of the operands is null, we can't proceed. Don't
10548 // try to determine the type of the copied objects, because there aren't
10549 // any.
10550 if (!Src.Base || !Dest.Base) {
10551 APValue Val;
10552 (!Src.Base ? Src : Dest).moveInto(V&: Val);
10553 Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_null)
10554 << Move << WChar << !!Src.Base
10555 << Val.getAsString(Ctx: Info.Ctx, Ty: E->getArg(Arg: 0)->getType());
10556 return false;
10557 }
10558 if (Src.Designator.Invalid || Dest.Designator.Invalid)
10559 return false;
10560
10561 // We require that Src and Dest are both pointers to arrays of
10562 // trivially-copyable type. (For the wide version, the designator will be
10563 // invalid if the designated object is not a wchar_t.)
10564 QualType T = Dest.Designator.getType(Ctx&: Info.Ctx);
10565 QualType SrcT = Src.Designator.getType(Ctx&: Info.Ctx);
10566 if (!Info.Ctx.hasSameUnqualifiedType(T1: T, T2: SrcT)) {
10567 // FIXME: Consider using our bit_cast implementation to support this.
10568 Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_type_pun) << Move << SrcT << T;
10569 return false;
10570 }
10571 if (T->isIncompleteType()) {
10572 Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_incomplete_type) << Move << T;
10573 return false;
10574 }
10575 if (!T.isTriviallyCopyableType(Context: Info.Ctx)) {
10576 Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_nontrivial) << Move << T;
10577 return false;
10578 }
10579
10580 // Figure out how many T's we're copying.
10581 uint64_t TSize = Info.Ctx.getTypeSizeInChars(T).getQuantity();
10582 if (TSize == 0)
10583 return false;
10584 if (!WChar) {
10585 uint64_t Remainder;
10586 llvm::APInt OrigN = N;
10587 llvm::APInt::udivrem(LHS: OrigN, RHS: TSize, Quotient&: N, Remainder);
10588 if (Remainder) {
10589 Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_unsupported)
10590 << Move << WChar << 0 << T << toString(I: OrigN, Radix: 10, /*Signed*/false)
10591 << (unsigned)TSize;
10592 return false;
10593 }
10594 }
10595
10596 // Check that the copying will remain within the arrays, just so that we
10597 // can give a more meaningful diagnostic. This implicitly also checks that
10598 // N fits into 64 bits.
10599 uint64_t RemainingSrcSize = Src.Designator.validIndexAdjustments().second;
10600 uint64_t RemainingDestSize = Dest.Designator.validIndexAdjustments().second;
10601 if (N.ugt(RHS: RemainingSrcSize) || N.ugt(RHS: RemainingDestSize)) {
10602 Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_unsupported)
10603 << Move << WChar << (N.ugt(RHS: RemainingSrcSize) ? 1 : 2) << T
10604 << toString(I: N, Radix: 10, /*Signed*/false);
10605 return false;
10606 }
10607 uint64_t NElems = N.getZExtValue();
10608 uint64_t NBytes = NElems * TSize;
10609
10610 // Check for overlap.
10611 int Direction = 1;
10612 if (HasSameBase(A: Src, B: Dest)) {
10613 uint64_t SrcOffset = Src.getLValueOffset().getQuantity();
10614 uint64_t DestOffset = Dest.getLValueOffset().getQuantity();
10615 if (DestOffset >= SrcOffset && DestOffset - SrcOffset < NBytes) {
10616 // Dest is inside the source region.
10617 if (!Move) {
10618 Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_overlap) << WChar;
10619 return false;
10620 }
10621 // For memmove and friends, copy backwards.
10622 if (!HandleLValueArrayAdjustment(Info, E, LVal&: Src, EltTy: T, Adjustment: NElems - 1) ||
10623 !HandleLValueArrayAdjustment(Info, E, LVal&: Dest, EltTy: T, Adjustment: NElems - 1))
10624 return false;
10625 Direction = -1;
10626 } else if (!Move && SrcOffset >= DestOffset &&
10627 SrcOffset - DestOffset < NBytes) {
10628 // Src is inside the destination region for memcpy: invalid.
10629 Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_overlap) << WChar;
10630 return false;
10631 }
10632 }
10633
10634 while (true) {
10635 APValue Val;
10636 // FIXME: Set WantObjectRepresentation to true if we're copying a
10637 // char-like type?
10638 if (!handleLValueToRValueConversion(Info, Conv: E, Type: T, LVal: Src, RVal&: Val) ||
10639 !handleAssignment(Info, E, LVal: Dest, LValType: T, Val))
10640 return false;
10641 // Do not iterate past the last element; if we're copying backwards, that
10642 // might take us off the start of the array.
10643 if (--NElems == 0)
10644 return true;
10645 if (!HandleLValueArrayAdjustment(Info, E, LVal&: Src, EltTy: T, Adjustment: Direction) ||
10646 !HandleLValueArrayAdjustment(Info, E, LVal&: Dest, EltTy: T, Adjustment: Direction))
10647 return false;
10648 }
10649 }
10650
10651 default:
10652 return false;
10653 }
10654}
10655
10656static bool EvaluateArrayNewInitList(EvalInfo &Info, LValue &This,
10657 APValue &Result, const InitListExpr *ILE,
10658 QualType AllocType);
10659static bool EvaluateArrayNewConstructExpr(EvalInfo &Info, LValue &This,
10660 APValue &Result,
10661 const CXXConstructExpr *CCE,
10662 QualType AllocType);
10663
10664bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
10665 if (!Info.getLangOpts().CPlusPlus20)
10666 Info.CCEDiag(E, DiagId: diag::note_constexpr_new);
10667
10668 // We cannot speculatively evaluate a delete expression.
10669 if (Info.SpeculativeEvaluationDepth)
10670 return false;
10671
10672 FunctionDecl *OperatorNew = E->getOperatorNew();
10673 QualType AllocType = E->getAllocatedType();
10674 QualType TargetType = AllocType;
10675
10676 bool IsNothrow = false;
10677 bool IsPlacement = false;
10678
10679 if (E->getNumPlacementArgs() == 1 &&
10680 E->getPlacementArg(I: 0)->getType()->isNothrowT()) {
10681 // The only new-placement list we support is of the form (std::nothrow).
10682 //
10683 // FIXME: There is no restriction on this, but it's not clear that any
10684 // other form makes any sense. We get here for cases such as:
10685 //
10686 // new (std::align_val_t{N}) X(int)
10687 //
10688 // (which should presumably be valid only if N is a multiple of
10689 // alignof(int), and in any case can't be deallocated unless N is
10690 // alignof(X) and X has new-extended alignment).
10691 LValue Nothrow;
10692 if (!EvaluateLValue(E: E->getPlacementArg(I: 0), Result&: Nothrow, Info))
10693 return false;
10694 IsNothrow = true;
10695 } else if (OperatorNew->isReservedGlobalPlacementOperator()) {
10696 if (Info.CurrentCall->isStdFunction() || Info.getLangOpts().CPlusPlus26 ||
10697 (Info.CurrentCall->CanEvalMSConstexpr &&
10698 OperatorNew->hasAttr<MSConstexprAttr>())) {
10699 if (!EvaluatePointer(E: E->getPlacementArg(I: 0), Result, Info))
10700 return false;
10701 if (Result.Designator.Invalid)
10702 return false;
10703 TargetType = E->getPlacementArg(I: 0)->getType();
10704 IsPlacement = true;
10705 } else {
10706 Info.FFDiag(E, DiagId: diag::note_constexpr_new_placement)
10707 << /*C++26 feature*/ 1 << E->getSourceRange();
10708 return false;
10709 }
10710 } else if (E->getNumPlacementArgs()) {
10711 Info.FFDiag(E, DiagId: diag::note_constexpr_new_placement)
10712 << /*Unsupported*/ 0 << E->getSourceRange();
10713 return false;
10714 } else if (!OperatorNew
10715 ->isUsableAsGlobalAllocationFunctionInConstantEvaluation()) {
10716 Info.FFDiag(E, DiagId: diag::note_constexpr_new_non_replaceable)
10717 << isa<CXXMethodDecl>(Val: OperatorNew) << OperatorNew;
10718 return false;
10719 }
10720
10721 const Expr *Init = E->getInitializer();
10722 const InitListExpr *ResizedArrayILE = nullptr;
10723 const CXXConstructExpr *ResizedArrayCCE = nullptr;
10724 bool ValueInit = false;
10725
10726 if (std::optional<const Expr *> ArraySize = E->getArraySize()) {
10727 const Expr *Stripped = *ArraySize;
10728 for (; auto *ICE = dyn_cast<ImplicitCastExpr>(Val: Stripped);
10729 Stripped = ICE->getSubExpr())
10730 if (ICE->getCastKind() != CK_NoOp &&
10731 ICE->getCastKind() != CK_IntegralCast)
10732 break;
10733
10734 llvm::APSInt ArrayBound;
10735 if (!EvaluateInteger(E: Stripped, Result&: ArrayBound, Info))
10736 return false;
10737
10738 // C++ [expr.new]p9:
10739 // The expression is erroneous if:
10740 // -- [...] its value before converting to size_t [or] applying the
10741 // second standard conversion sequence is less than zero
10742 if (ArrayBound.isSigned() && ArrayBound.isNegative()) {
10743 if (IsNothrow)
10744 return ZeroInitialization(E);
10745
10746 Info.FFDiag(E: *ArraySize, DiagId: diag::note_constexpr_new_negative)
10747 << ArrayBound << (*ArraySize)->getSourceRange();
10748 return false;
10749 }
10750
10751 // -- its value is such that the size of the allocated object would
10752 // exceed the implementation-defined limit
10753 if (!Info.CheckArraySize(Loc: ArraySize.value()->getExprLoc(),
10754 BitWidth: ConstantArrayType::getNumAddressingBits(
10755 Context: Info.Ctx, ElementType: AllocType, NumElements: ArrayBound),
10756 ElemCount: ArrayBound.getZExtValue(), /*Diag=*/!IsNothrow)) {
10757 if (IsNothrow)
10758 return ZeroInitialization(E);
10759 return false;
10760 }
10761
10762 // -- the new-initializer is a braced-init-list and the number of
10763 // array elements for which initializers are provided [...]
10764 // exceeds the number of elements to initialize
10765 if (!Init) {
10766 // No initialization is performed.
10767 } else if (isa<CXXScalarValueInitExpr>(Val: Init) ||
10768 isa<ImplicitValueInitExpr>(Val: Init)) {
10769 ValueInit = true;
10770 } else if (auto *CCE = dyn_cast<CXXConstructExpr>(Val: Init)) {
10771 ResizedArrayCCE = CCE;
10772 } else {
10773 auto *CAT = Info.Ctx.getAsConstantArrayType(T: Init->getType());
10774 assert(CAT && "unexpected type for array initializer");
10775
10776 unsigned Bits =
10777 std::max(a: CAT->getSizeBitWidth(), b: ArrayBound.getBitWidth());
10778 llvm::APInt InitBound = CAT->getSize().zext(width: Bits);
10779 llvm::APInt AllocBound = ArrayBound.zext(width: Bits);
10780 if (InitBound.ugt(RHS: AllocBound)) {
10781 if (IsNothrow)
10782 return ZeroInitialization(E);
10783
10784 Info.FFDiag(E: *ArraySize, DiagId: diag::note_constexpr_new_too_small)
10785 << toString(I: AllocBound, Radix: 10, /*Signed=*/false)
10786 << toString(I: InitBound, Radix: 10, /*Signed=*/false)
10787 << (*ArraySize)->getSourceRange();
10788 return false;
10789 }
10790
10791 // If the sizes differ, we must have an initializer list, and we need
10792 // special handling for this case when we initialize.
10793 if (InitBound != AllocBound)
10794 ResizedArrayILE = cast<InitListExpr>(Val: Init);
10795 }
10796
10797 AllocType = Info.Ctx.getConstantArrayType(EltTy: AllocType, ArySize: ArrayBound, SizeExpr: nullptr,
10798 ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
10799 } else {
10800 assert(!AllocType->isArrayType() &&
10801 "array allocation with non-array new");
10802 }
10803
10804 APValue *Val;
10805 if (IsPlacement) {
10806 AccessKinds AK = AK_Construct;
10807 struct FindObjectHandler {
10808 EvalInfo &Info;
10809 const Expr *E;
10810 QualType AllocType;
10811 const AccessKinds AccessKind;
10812 APValue *Value;
10813
10814 typedef bool result_type;
10815 bool failed() { return false; }
10816 bool checkConst(QualType QT) {
10817 if (QT.isConstQualified()) {
10818 Info.FFDiag(E, DiagId: diag::note_constexpr_modify_const_type) << QT;
10819 return false;
10820 }
10821 return true;
10822 }
10823 bool found(APValue &Subobj, QualType SubobjType) {
10824 if (!checkConst(QT: SubobjType))
10825 return false;
10826 // FIXME: Reject the cases where [basic.life]p8 would not permit the
10827 // old name of the object to be used to name the new object.
10828 unsigned SubobjectSize = 1;
10829 unsigned AllocSize = 1;
10830 if (auto *CAT = dyn_cast<ConstantArrayType>(Val&: AllocType))
10831 AllocSize = CAT->getZExtSize();
10832 if (auto *CAT = dyn_cast<ConstantArrayType>(Val&: SubobjType))
10833 SubobjectSize = CAT->getZExtSize();
10834 if (SubobjectSize < AllocSize ||
10835 !Info.Ctx.hasSimilarType(T1: Info.Ctx.getBaseElementType(QT: SubobjType),
10836 T2: Info.Ctx.getBaseElementType(QT: AllocType))) {
10837 Info.FFDiag(E, DiagId: diag::note_constexpr_placement_new_wrong_type)
10838 << SubobjType << AllocType;
10839 return false;
10840 }
10841 Value = &Subobj;
10842 return true;
10843 }
10844 bool found(APSInt &Value, QualType SubobjType) {
10845 Info.FFDiag(E, DiagId: diag::note_constexpr_construct_complex_elem);
10846 return false;
10847 }
10848 bool found(APFloat &Value, QualType SubobjType) {
10849 Info.FFDiag(E, DiagId: diag::note_constexpr_construct_complex_elem);
10850 return false;
10851 }
10852 } Handler = {.Info: Info, .E: E, .AllocType: AllocType, .AccessKind: AK, .Value: nullptr};
10853
10854 CompleteObject Obj = findCompleteObject(Info, E, AK, LVal: Result, LValType: AllocType);
10855 if (!Obj || !findSubobject(Info, E, Obj, Sub: Result.Designator, handler&: Handler))
10856 return false;
10857
10858 Val = Handler.Value;
10859
10860 // [basic.life]p1:
10861 // The lifetime of an object o of type T ends when [...] the storage
10862 // which the object occupies is [...] reused by an object that is not
10863 // nested within o (6.6.2).
10864 *Val = APValue();
10865 } else {
10866 // Perform the allocation and obtain a pointer to the resulting object.
10867 Val = Info.createHeapAlloc(E, T: AllocType, LV&: Result);
10868 if (!Val)
10869 return false;
10870 }
10871
10872 if (ValueInit) {
10873 ImplicitValueInitExpr VIE(AllocType);
10874 if (!EvaluateInPlace(Result&: *Val, Info, This: Result, E: &VIE))
10875 return false;
10876 } else if (ResizedArrayILE) {
10877 if (!EvaluateArrayNewInitList(Info, This&: Result, Result&: *Val, ILE: ResizedArrayILE,
10878 AllocType))
10879 return false;
10880 } else if (ResizedArrayCCE) {
10881 if (!EvaluateArrayNewConstructExpr(Info, This&: Result, Result&: *Val, CCE: ResizedArrayCCE,
10882 AllocType))
10883 return false;
10884 } else if (Init) {
10885 if (!EvaluateInPlace(Result&: *Val, Info, This: Result, E: Init))
10886 return false;
10887 } else if (!handleDefaultInitValue(T: AllocType, Result&: *Val)) {
10888 return false;
10889 }
10890
10891 // Array new returns a pointer to the first element, not a pointer to the
10892 // array.
10893 if (auto *AT = AllocType->getAsArrayTypeUnsafe())
10894 Result.addArray(Info, E, CAT: cast<ConstantArrayType>(Val: AT));
10895
10896 return true;
10897}
10898//===----------------------------------------------------------------------===//
10899// Member Pointer Evaluation
10900//===----------------------------------------------------------------------===//
10901
10902namespace {
10903class MemberPointerExprEvaluator
10904 : public ExprEvaluatorBase<MemberPointerExprEvaluator> {
10905 MemberPtr &Result;
10906
10907 bool Success(const ValueDecl *D) {
10908 Result = MemberPtr(D);
10909 return true;
10910 }
10911public:
10912
10913 MemberPointerExprEvaluator(EvalInfo &Info, MemberPtr &Result)
10914 : ExprEvaluatorBaseTy(Info), Result(Result) {}
10915
10916 bool Success(const APValue &V, const Expr *E) {
10917 Result.setFrom(V);
10918 return true;
10919 }
10920 bool ZeroInitialization(const Expr *E) {
10921 return Success(D: (const ValueDecl*)nullptr);
10922 }
10923
10924 bool VisitCastExpr(const CastExpr *E);
10925 bool VisitUnaryAddrOf(const UnaryOperator *E);
10926};
10927} // end anonymous namespace
10928
10929static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
10930 EvalInfo &Info) {
10931 assert(!E->isValueDependent());
10932 assert(E->isPRValue() && E->getType()->isMemberPointerType());
10933 return MemberPointerExprEvaluator(Info, Result).Visit(S: E);
10934}
10935
10936bool MemberPointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
10937 switch (E->getCastKind()) {
10938 default:
10939 return ExprEvaluatorBaseTy::VisitCastExpr(E);
10940
10941 case CK_NullToMemberPointer:
10942 VisitIgnoredValue(E: E->getSubExpr());
10943 return ZeroInitialization(E);
10944
10945 case CK_BaseToDerivedMemberPointer: {
10946 if (!Visit(S: E->getSubExpr()))
10947 return false;
10948 if (E->path_empty())
10949 return true;
10950 // Base-to-derived member pointer casts store the path in derived-to-base
10951 // order, so iterate backwards. The CXXBaseSpecifier also provides us with
10952 // the wrong end of the derived->base arc, so stagger the path by one class.
10953 typedef std::reverse_iterator<CastExpr::path_const_iterator> ReverseIter;
10954 for (ReverseIter PathI(E->path_end() - 1), PathE(E->path_begin());
10955 PathI != PathE; ++PathI) {
10956 assert(!(*PathI)->isVirtual() && "memptr cast through vbase");
10957 const CXXRecordDecl *Derived = (*PathI)->getType()->getAsCXXRecordDecl();
10958 if (!Result.castToDerived(Derived))
10959 return Error(E);
10960 }
10961 if (!Result.castToDerived(Derived: E->getType()
10962 ->castAs<MemberPointerType>()
10963 ->getMostRecentCXXRecordDecl()))
10964 return Error(E);
10965 return true;
10966 }
10967
10968 case CK_DerivedToBaseMemberPointer:
10969 if (!Visit(S: E->getSubExpr()))
10970 return false;
10971 for (CastExpr::path_const_iterator PathI = E->path_begin(),
10972 PathE = E->path_end(); PathI != PathE; ++PathI) {
10973 assert(!(*PathI)->isVirtual() && "memptr cast through vbase");
10974 const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl();
10975 if (!Result.castToBase(Base))
10976 return Error(E);
10977 }
10978 return true;
10979 }
10980}
10981
10982bool MemberPointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
10983 // C++11 [expr.unary.op]p3 has very strict rules on how the address of a
10984 // member can be formed.
10985 return Success(D: cast<DeclRefExpr>(Val: E->getSubExpr())->getDecl());
10986}
10987
10988//===----------------------------------------------------------------------===//
10989// Record Evaluation
10990//===----------------------------------------------------------------------===//
10991
10992namespace {
10993 class RecordExprEvaluator
10994 : public ExprEvaluatorBase<RecordExprEvaluator> {
10995 const LValue &This;
10996 APValue &Result;
10997 public:
10998
10999 RecordExprEvaluator(EvalInfo &info, const LValue &This, APValue &Result)
11000 : ExprEvaluatorBaseTy(info), This(This), Result(Result) {}
11001
11002 bool Success(const APValue &V, const Expr *E) {
11003 Result = V;
11004 return true;
11005 }
11006 bool ZeroInitialization(const Expr *E) {
11007 return ZeroInitialization(E, T: E->getType());
11008 }
11009 bool ZeroInitialization(const Expr *E, QualType T);
11010
11011 bool VisitCallExpr(const CallExpr *E) {
11012 return handleCallExpr(E, Result, ResultSlot: &This);
11013 }
11014 bool VisitCastExpr(const CastExpr *E);
11015 bool VisitInitListExpr(const InitListExpr *E);
11016 bool VisitCXXConstructExpr(const CXXConstructExpr *E) {
11017 return VisitCXXConstructExpr(E, T: E->getType());
11018 }
11019 bool VisitLambdaExpr(const LambdaExpr *E);
11020 bool VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
11021 bool VisitCXXConstructExpr(const CXXConstructExpr *E, QualType T);
11022 bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E);
11023 bool VisitBinCmp(const BinaryOperator *E);
11024 bool VisitCXXParenListInitExpr(const CXXParenListInitExpr *E);
11025 bool VisitCXXParenListOrInitListExpr(const Expr *ExprToVisit,
11026 ArrayRef<Expr *> Args);
11027 };
11028}
11029
11030/// Perform zero-initialization on an object of non-union class type.
11031/// C++11 [dcl.init]p5:
11032/// To zero-initialize an object or reference of type T means:
11033/// [...]
11034/// -- if T is a (possibly cv-qualified) non-union class type,
11035/// each non-static data member and each base-class subobject is
11036/// zero-initialized
11037static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E,
11038 const RecordDecl *RD,
11039 const LValue &This, APValue &Result) {
11040 assert(!RD->isUnion() && "Expected non-union class type");
11041 const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(Val: RD);
11042 Result = APValue(APValue::UninitStruct(), CD ? CD->getNumBases() : 0,
11043 RD->getNumFields());
11044
11045 if (RD->isInvalidDecl()) return false;
11046 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD);
11047
11048 if (CD) {
11049 unsigned Index = 0;
11050 for (CXXRecordDecl::base_class_const_iterator I = CD->bases_begin(),
11051 End = CD->bases_end(); I != End; ++I, ++Index) {
11052 const CXXRecordDecl *Base = I->getType()->getAsCXXRecordDecl();
11053 LValue Subobject = This;
11054 if (!HandleLValueDirectBase(Info, E, Obj&: Subobject, Derived: CD, Base, RL: &Layout))
11055 return false;
11056 if (!HandleClassZeroInitialization(Info, E, RD: Base, This: Subobject,
11057 Result&: Result.getStructBase(i: Index)))
11058 return false;
11059 }
11060 }
11061
11062 for (const auto *I : RD->fields()) {
11063 // -- if T is a reference type, no initialization is performed.
11064 if (I->isUnnamedBitField() || I->getType()->isReferenceType())
11065 continue;
11066
11067 LValue Subobject = This;
11068 if (!HandleLValueMember(Info, E, LVal&: Subobject, FD: I, RL: &Layout))
11069 return false;
11070
11071 ImplicitValueInitExpr VIE(I->getType());
11072 if (!EvaluateInPlace(
11073 Result&: Result.getStructField(i: I->getFieldIndex()), Info, This: Subobject, E: &VIE))
11074 return false;
11075 }
11076
11077 return true;
11078}
11079
11080bool RecordExprEvaluator::ZeroInitialization(const Expr *E, QualType T) {
11081 const auto *RD = T->castAsRecordDecl();
11082 if (RD->isInvalidDecl()) return false;
11083 if (RD->isUnion()) {
11084 // C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the
11085 // object's first non-static named data member is zero-initialized
11086 RecordDecl::field_iterator I = RD->field_begin();
11087 while (I != RD->field_end() && (*I)->isUnnamedBitField())
11088 ++I;
11089 if (I == RD->field_end()) {
11090 Result = APValue((const FieldDecl*)nullptr);
11091 return true;
11092 }
11093
11094 LValue Subobject = This;
11095 if (!HandleLValueMember(Info, E, LVal&: Subobject, FD: *I))
11096 return false;
11097 Result = APValue(*I);
11098 ImplicitValueInitExpr VIE(I->getType());
11099 return EvaluateInPlace(Result&: Result.getUnionValue(), Info, This: Subobject, E: &VIE);
11100 }
11101
11102 if (isa<CXXRecordDecl>(Val: RD) && cast<CXXRecordDecl>(Val: RD)->getNumVBases()) {
11103 Info.FFDiag(E, DiagId: diag::note_constexpr_virtual_base) << RD;
11104 return false;
11105 }
11106
11107 return HandleClassZeroInitialization(Info, E, RD, This, Result);
11108}
11109
11110bool RecordExprEvaluator::VisitCastExpr(const CastExpr *E) {
11111 switch (E->getCastKind()) {
11112 default:
11113 return ExprEvaluatorBaseTy::VisitCastExpr(E);
11114
11115 case CK_ConstructorConversion:
11116 return Visit(S: E->getSubExpr());
11117
11118 case CK_DerivedToBase:
11119 case CK_UncheckedDerivedToBase: {
11120 APValue DerivedObject;
11121 if (!Evaluate(Result&: DerivedObject, Info, E: E->getSubExpr()))
11122 return false;
11123 if (!DerivedObject.isStruct())
11124 return Error(E: E->getSubExpr());
11125
11126 // Derived-to-base rvalue conversion: just slice off the derived part.
11127 APValue *Value = &DerivedObject;
11128 const CXXRecordDecl *RD = E->getSubExpr()->getType()->getAsCXXRecordDecl();
11129 for (CastExpr::path_const_iterator PathI = E->path_begin(),
11130 PathE = E->path_end(); PathI != PathE; ++PathI) {
11131 assert(!(*PathI)->isVirtual() && "record rvalue with virtual base");
11132 const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl();
11133 Value = &Value->getStructBase(i: getBaseIndex(Derived: RD, Base));
11134 RD = Base;
11135 }
11136 Result = *Value;
11137 return true;
11138 }
11139 case CK_HLSLAggregateSplatCast: {
11140 APValue Val;
11141 QualType ValTy;
11142
11143 if (!hlslAggSplatHelper(Info, E: E->getSubExpr(), SrcVal&: Val, SrcTy&: ValTy))
11144 return false;
11145
11146 unsigned NEls = elementwiseSize(Info, BaseTy: E->getType());
11147 // splat our Val
11148 SmallVector<APValue> SplatEls(NEls, Val);
11149 SmallVector<QualType> SplatType(NEls, ValTy);
11150
11151 // cast the elements and construct our struct result
11152 const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
11153 if (!constructAggregate(Info, FPO, E, Result, ResultType: E->getType(), Elements&: SplatEls,
11154 ElTypes&: SplatType))
11155 return false;
11156
11157 return true;
11158 }
11159 case CK_HLSLElementwiseCast: {
11160 SmallVector<APValue> SrcEls;
11161 SmallVector<QualType> SrcTypes;
11162
11163 if (!hlslElementwiseCastHelper(Info, E: E->getSubExpr(), DestTy: E->getType(), SrcVals&: SrcEls,
11164 SrcTypes))
11165 return false;
11166
11167 // cast the elements and construct our struct result
11168 const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
11169 if (!constructAggregate(Info, FPO, E, Result, ResultType: E->getType(), Elements&: SrcEls,
11170 ElTypes&: SrcTypes))
11171 return false;
11172
11173 return true;
11174 }
11175 }
11176}
11177
11178bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
11179 if (E->isTransparent())
11180 return Visit(S: E->getInit(Init: 0));
11181 return VisitCXXParenListOrInitListExpr(ExprToVisit: E, Args: E->inits());
11182}
11183
11184bool RecordExprEvaluator::VisitCXXParenListOrInitListExpr(
11185 const Expr *ExprToVisit, ArrayRef<Expr *> Args) {
11186 const auto *RD = ExprToVisit->getType()->castAsRecordDecl();
11187 if (RD->isInvalidDecl()) return false;
11188 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD);
11189 auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD);
11190
11191 EvalInfo::EvaluatingConstructorRAII EvalObj(
11192 Info,
11193 ObjectUnderConstruction{.Base: This.getLValueBase(), .Path: This.Designator.Entries},
11194 CXXRD && CXXRD->getNumBases());
11195
11196 if (RD->isUnion()) {
11197 const FieldDecl *Field;
11198 if (auto *ILE = dyn_cast<InitListExpr>(Val: ExprToVisit)) {
11199 Field = ILE->getInitializedFieldInUnion();
11200 } else if (auto *PLIE = dyn_cast<CXXParenListInitExpr>(Val: ExprToVisit)) {
11201 Field = PLIE->getInitializedFieldInUnion();
11202 } else {
11203 llvm_unreachable(
11204 "Expression is neither an init list nor a C++ paren list");
11205 }
11206
11207 Result = APValue(Field);
11208 if (!Field)
11209 return true;
11210
11211 // If the initializer list for a union does not contain any elements, the
11212 // first element of the union is value-initialized.
11213 // FIXME: The element should be initialized from an initializer list.
11214 // Is this difference ever observable for initializer lists which
11215 // we don't build?
11216 ImplicitValueInitExpr VIE(Field->getType());
11217 const Expr *InitExpr = Args.empty() ? &VIE : Args[0];
11218
11219 LValue Subobject = This;
11220 if (!HandleLValueMember(Info, E: InitExpr, LVal&: Subobject, FD: Field, RL: &Layout))
11221 return false;
11222
11223 // Temporarily override This, in case there's a CXXDefaultInitExpr in here.
11224 ThisOverrideRAII ThisOverride(*Info.CurrentCall, &This,
11225 isa<CXXDefaultInitExpr>(Val: InitExpr));
11226
11227 if (EvaluateInPlace(Result&: Result.getUnionValue(), Info, This: Subobject, E: InitExpr)) {
11228 if (Field->isBitField())
11229 return truncateBitfieldValue(Info, E: InitExpr, Value&: Result.getUnionValue(),
11230 FD: Field);
11231 return true;
11232 }
11233
11234 return false;
11235 }
11236
11237 if (!Result.hasValue())
11238 Result = APValue(APValue::UninitStruct(), CXXRD ? CXXRD->getNumBases() : 0,
11239 RD->getNumFields());
11240 unsigned ElementNo = 0;
11241 bool Success = true;
11242
11243 // Initialize base classes.
11244 if (CXXRD && CXXRD->getNumBases()) {
11245 for (const auto &Base : CXXRD->bases()) {
11246 assert(ElementNo < Args.size() && "missing init for base class");
11247 const Expr *Init = Args[ElementNo];
11248
11249 LValue Subobject = This;
11250 if (!HandleLValueBase(Info, E: Init, Obj&: Subobject, DerivedDecl: CXXRD, Base: &Base))
11251 return false;
11252
11253 APValue &FieldVal = Result.getStructBase(i: ElementNo);
11254 if (!EvaluateInPlace(Result&: FieldVal, Info, This: Subobject, E: Init)) {
11255 if (!Info.noteFailure())
11256 return false;
11257 Success = false;
11258 }
11259 ++ElementNo;
11260 }
11261
11262 EvalObj.finishedConstructingBases();
11263 }
11264
11265 // Initialize members.
11266 for (const auto *Field : RD->fields()) {
11267 // Anonymous bit-fields are not considered members of the class for
11268 // purposes of aggregate initialization.
11269 if (Field->isUnnamedBitField())
11270 continue;
11271
11272 LValue Subobject = This;
11273
11274 bool HaveInit = ElementNo < Args.size();
11275
11276 // FIXME: Diagnostics here should point to the end of the initializer
11277 // list, not the start.
11278 if (!HandleLValueMember(Info, E: HaveInit ? Args[ElementNo] : ExprToVisit,
11279 LVal&: Subobject, FD: Field, RL: &Layout))
11280 return false;
11281
11282 // Perform an implicit value-initialization for members beyond the end of
11283 // the initializer list.
11284 ImplicitValueInitExpr VIE(HaveInit ? Info.Ctx.IntTy : Field->getType());
11285 const Expr *Init = HaveInit ? Args[ElementNo++] : &VIE;
11286
11287 if (Field->getType()->isIncompleteArrayType()) {
11288 if (auto *CAT = Info.Ctx.getAsConstantArrayType(T: Init->getType())) {
11289 if (!CAT->isZeroSize()) {
11290 // Bail out for now. This might sort of "work", but the rest of the
11291 // code isn't really prepared to handle it.
11292 Info.FFDiag(E: Init, DiagId: diag::note_constexpr_unsupported_flexible_array);
11293 return false;
11294 }
11295 }
11296 }
11297
11298 // Temporarily override This, in case there's a CXXDefaultInitExpr in here.
11299 ThisOverrideRAII ThisOverride(*Info.CurrentCall, &This,
11300 isa<CXXDefaultInitExpr>(Val: Init));
11301
11302 APValue &FieldVal = Result.getStructField(i: Field->getFieldIndex());
11303 if (Field->getType()->isReferenceType()) {
11304 LValue Result;
11305 if (!EvaluateInitForDeclOfReferenceType(Info, D: Field, Init, Result,
11306 Val&: FieldVal)) {
11307 if (!Info.noteFailure())
11308 return false;
11309 Success = false;
11310 }
11311 } else if (!EvaluateInPlace(Result&: FieldVal, Info, This: Subobject, E: Init) ||
11312 (Field->isBitField() &&
11313 !truncateBitfieldValue(Info, E: Init, Value&: FieldVal, FD: Field))) {
11314 if (!Info.noteFailure())
11315 return false;
11316 Success = false;
11317 }
11318 }
11319
11320 EvalObj.finishedConstructingFields();
11321
11322 return Success;
11323}
11324
11325bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
11326 QualType T) {
11327 // Note that E's type is not necessarily the type of our class here; we might
11328 // be initializing an array element instead.
11329 const CXXConstructorDecl *FD = E->getConstructor();
11330 if (FD->isInvalidDecl() || FD->getParent()->isInvalidDecl()) return false;
11331
11332 bool ZeroInit = E->requiresZeroInitialization();
11333 if (CheckTrivialDefaultConstructor(Info, Loc: E->getExprLoc(), CD: FD, IsValueInitialization: ZeroInit)) {
11334 if (ZeroInit)
11335 return ZeroInitialization(E, T);
11336
11337 return handleDefaultInitValue(T, Result);
11338 }
11339
11340 const FunctionDecl *Definition = nullptr;
11341 auto Body = FD->getBody(Definition);
11342
11343 if (!CheckConstexprFunction(Info, CallLoc: E->getExprLoc(), Declaration: FD, Definition, Body))
11344 return false;
11345
11346 // Avoid materializing a temporary for an elidable copy/move constructor.
11347 if (E->isElidable() && !ZeroInit) {
11348 // FIXME: This only handles the simplest case, where the source object
11349 // is passed directly as the first argument to the constructor.
11350 // This should also handle stepping though implicit casts and
11351 // and conversion sequences which involve two steps, with a
11352 // conversion operator followed by a converting constructor.
11353 const Expr *SrcObj = E->getArg(Arg: 0);
11354 assert(SrcObj->isTemporaryObject(Info.Ctx, FD->getParent()));
11355 assert(Info.Ctx.hasSameUnqualifiedType(E->getType(), SrcObj->getType()));
11356 if (const MaterializeTemporaryExpr *ME =
11357 dyn_cast<MaterializeTemporaryExpr>(Val: SrcObj))
11358 return Visit(S: ME->getSubExpr());
11359 }
11360
11361 if (ZeroInit && !ZeroInitialization(E, T))
11362 return false;
11363
11364 auto Args = ArrayRef(E->getArgs(), E->getNumArgs());
11365 return HandleConstructorCall(E, This, Args,
11366 Definition: cast<CXXConstructorDecl>(Val: Definition), Info,
11367 Result);
11368}
11369
11370bool RecordExprEvaluator::VisitCXXInheritedCtorInitExpr(
11371 const CXXInheritedCtorInitExpr *E) {
11372 if (!Info.CurrentCall) {
11373 assert(Info.checkingPotentialConstantExpression());
11374 return false;
11375 }
11376
11377 const CXXConstructorDecl *FD = E->getConstructor();
11378 if (FD->isInvalidDecl() || FD->getParent()->isInvalidDecl())
11379 return false;
11380
11381 const FunctionDecl *Definition = nullptr;
11382 auto Body = FD->getBody(Definition);
11383
11384 if (!CheckConstexprFunction(Info, CallLoc: E->getExprLoc(), Declaration: FD, Definition, Body))
11385 return false;
11386
11387 return HandleConstructorCall(E, This, Call: Info.CurrentCall->Arguments,
11388 Definition: cast<CXXConstructorDecl>(Val: Definition), Info,
11389 Result);
11390}
11391
11392bool RecordExprEvaluator::VisitCXXStdInitializerListExpr(
11393 const CXXStdInitializerListExpr *E) {
11394 const ConstantArrayType *ArrayType =
11395 Info.Ctx.getAsConstantArrayType(T: E->getSubExpr()->getType());
11396
11397 LValue Array;
11398 if (!EvaluateLValue(E: E->getSubExpr(), Result&: Array, Info))
11399 return false;
11400
11401 assert(ArrayType && "unexpected type for array initializer");
11402
11403 // Get a pointer to the first element of the array.
11404 Array.addArray(Info, E, CAT: ArrayType);
11405
11406 // FIXME: What if the initializer_list type has base classes, etc?
11407 Result = APValue(APValue::UninitStruct(), 0, 2);
11408 Array.moveInto(V&: Result.getStructField(i: 0));
11409
11410 auto *Record = E->getType()->castAsRecordDecl();
11411 RecordDecl::field_iterator Field = Record->field_begin();
11412 assert(Field != Record->field_end() &&
11413 Info.Ctx.hasSameType(Field->getType()->getPointeeType(),
11414 ArrayType->getElementType()) &&
11415 "Expected std::initializer_list first field to be const E *");
11416 ++Field;
11417 assert(Field != Record->field_end() &&
11418 "Expected std::initializer_list to have two fields");
11419
11420 if (Info.Ctx.hasSameType(T1: Field->getType(), T2: Info.Ctx.getSizeType())) {
11421 // Length.
11422 Result.getStructField(i: 1) = APValue(APSInt(ArrayType->getSize()));
11423 } else {
11424 // End pointer.
11425 assert(Info.Ctx.hasSameType(Field->getType()->getPointeeType(),
11426 ArrayType->getElementType()) &&
11427 "Expected std::initializer_list second field to be const E *");
11428 if (!HandleLValueArrayAdjustment(Info, E, LVal&: Array,
11429 EltTy: ArrayType->getElementType(),
11430 Adjustment: ArrayType->getZExtSize()))
11431 return false;
11432 Array.moveInto(V&: Result.getStructField(i: 1));
11433 }
11434
11435 assert(++Field == Record->field_end() &&
11436 "Expected std::initializer_list to only have two fields");
11437
11438 return true;
11439}
11440
11441bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) {
11442 const CXXRecordDecl *ClosureClass = E->getLambdaClass();
11443 if (ClosureClass->isInvalidDecl())
11444 return false;
11445
11446 const size_t NumFields = ClosureClass->getNumFields();
11447
11448 assert(NumFields == (size_t)std::distance(E->capture_init_begin(),
11449 E->capture_init_end()) &&
11450 "The number of lambda capture initializers should equal the number of "
11451 "fields within the closure type");
11452
11453 Result = APValue(APValue::UninitStruct(), /*NumBases*/0, NumFields);
11454 // Iterate through all the lambda's closure object's fields and initialize
11455 // them.
11456 auto *CaptureInitIt = E->capture_init_begin();
11457 bool Success = true;
11458 const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: ClosureClass);
11459 for (const auto *Field : ClosureClass->fields()) {
11460 assert(CaptureInitIt != E->capture_init_end());
11461 // Get the initializer for this field
11462 Expr *const CurFieldInit = *CaptureInitIt++;
11463
11464 // If there is no initializer, either this is a VLA or an error has
11465 // occurred.
11466 if (!CurFieldInit || CurFieldInit->containsErrors())
11467 return Error(E);
11468
11469 LValue Subobject = This;
11470
11471 if (!HandleLValueMember(Info, E, LVal&: Subobject, FD: Field, RL: &Layout))
11472 return false;
11473
11474 APValue &FieldVal = Result.getStructField(i: Field->getFieldIndex());
11475 if (!EvaluateInPlace(Result&: FieldVal, Info, This: Subobject, E: CurFieldInit)) {
11476 if (!Info.keepEvaluatingAfterFailure())
11477 return false;
11478 Success = false;
11479 }
11480 }
11481 return Success;
11482}
11483
11484static bool EvaluateRecord(const Expr *E, const LValue &This,
11485 APValue &Result, EvalInfo &Info) {
11486 assert(!E->isValueDependent());
11487 assert(E->isPRValue() && E->getType()->isRecordType() &&
11488 "can't evaluate expression as a record rvalue");
11489 return RecordExprEvaluator(Info, This, Result).Visit(S: E);
11490}
11491
11492//===----------------------------------------------------------------------===//
11493// Temporary Evaluation
11494//
11495// Temporaries are represented in the AST as rvalues, but generally behave like
11496// lvalues. The full-object of which the temporary is a subobject is implicitly
11497// materialized so that a reference can bind to it.
11498//===----------------------------------------------------------------------===//
11499namespace {
11500class TemporaryExprEvaluator
11501 : public LValueExprEvaluatorBase<TemporaryExprEvaluator> {
11502public:
11503 TemporaryExprEvaluator(EvalInfo &Info, LValue &Result) :
11504 LValueExprEvaluatorBaseTy(Info, Result, false) {}
11505
11506 /// Visit an expression which constructs the value of this temporary.
11507 bool VisitConstructExpr(const Expr *E) {
11508 APValue &Value = Info.CurrentCall->createTemporary(
11509 Key: E, T: E->getType(), Scope: ScopeKind::FullExpression, LV&: Result);
11510 return EvaluateInPlace(Result&: Value, Info, This: Result, E);
11511 }
11512
11513 bool VisitCastExpr(const CastExpr *E) {
11514 switch (E->getCastKind()) {
11515 default:
11516 return LValueExprEvaluatorBaseTy::VisitCastExpr(E);
11517
11518 case CK_ConstructorConversion:
11519 return VisitConstructExpr(E: E->getSubExpr());
11520 }
11521 }
11522 bool VisitInitListExpr(const InitListExpr *E) {
11523 return VisitConstructExpr(E);
11524 }
11525 bool VisitCXXConstructExpr(const CXXConstructExpr *E) {
11526 return VisitConstructExpr(E);
11527 }
11528 bool VisitCallExpr(const CallExpr *E) {
11529 return VisitConstructExpr(E);
11530 }
11531 bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E) {
11532 return VisitConstructExpr(E);
11533 }
11534 bool VisitLambdaExpr(const LambdaExpr *E) {
11535 return VisitConstructExpr(E);
11536 }
11537};
11538} // end anonymous namespace
11539
11540/// Evaluate an expression of record type as a temporary.
11541static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info) {
11542 assert(!E->isValueDependent());
11543 assert(E->isPRValue() && E->getType()->isRecordType());
11544 return TemporaryExprEvaluator(Info, Result).Visit(S: E);
11545}
11546
11547//===----------------------------------------------------------------------===//
11548// Vector Evaluation
11549//===----------------------------------------------------------------------===//
11550
11551namespace {
11552 class VectorExprEvaluator
11553 : public ExprEvaluatorBase<VectorExprEvaluator> {
11554 APValue &Result;
11555 public:
11556
11557 VectorExprEvaluator(EvalInfo &info, APValue &Result)
11558 : ExprEvaluatorBaseTy(info), Result(Result) {}
11559
11560 bool Success(ArrayRef<APValue> V, const Expr *E) {
11561 assert(V.size() == E->getType()->castAs<VectorType>()->getNumElements());
11562 // FIXME: remove this APValue copy.
11563 Result = APValue(V.data(), V.size());
11564 return true;
11565 }
11566 bool Success(const APValue &V, const Expr *E) {
11567 assert(V.isVector());
11568 Result = V;
11569 return true;
11570 }
11571 bool ZeroInitialization(const Expr *E);
11572
11573 bool VisitUnaryReal(const UnaryOperator *E)
11574 { return Visit(S: E->getSubExpr()); }
11575 bool VisitCastExpr(const CastExpr* E);
11576 bool VisitInitListExpr(const InitListExpr *E);
11577 bool VisitUnaryImag(const UnaryOperator *E);
11578 bool VisitBinaryOperator(const BinaryOperator *E);
11579 bool VisitUnaryOperator(const UnaryOperator *E);
11580 bool VisitCallExpr(const CallExpr *E);
11581 bool VisitConvertVectorExpr(const ConvertVectorExpr *E);
11582 bool VisitShuffleVectorExpr(const ShuffleVectorExpr *E);
11583
11584 // FIXME: Missing: conditional operator (for GNU
11585 // conditional select), ExtVectorElementExpr
11586 };
11587} // end anonymous namespace
11588
11589static bool EvaluateVector(const Expr* E, APValue& Result, EvalInfo &Info) {
11590 assert(E->isPRValue() && E->getType()->isVectorType() &&
11591 "not a vector prvalue");
11592 return VectorExprEvaluator(Info, Result).Visit(S: E);
11593}
11594
11595static llvm::APInt ConvertBoolVectorToInt(const APValue &Val) {
11596 assert(Val.isVector() && "expected vector APValue");
11597 unsigned NumElts = Val.getVectorLength();
11598
11599 // Each element is one bit, so create an integer with NumElts bits.
11600 llvm::APInt Result(NumElts, 0);
11601
11602 for (unsigned I = 0; I < NumElts; ++I) {
11603 const APValue &Elt = Val.getVectorElt(I);
11604 assert(Elt.isInt() && "expected integer element in bool vector");
11605
11606 if (Elt.getInt().getBoolValue())
11607 Result.setBit(I);
11608 }
11609
11610 return Result;
11611}
11612
11613bool VectorExprEvaluator::VisitCastExpr(const CastExpr *E) {
11614 const VectorType *VTy = E->getType()->castAs<VectorType>();
11615 unsigned NElts = VTy->getNumElements();
11616
11617 const Expr *SE = E->getSubExpr();
11618 QualType SETy = SE->getType();
11619
11620 switch (E->getCastKind()) {
11621 case CK_VectorSplat: {
11622 APValue Val = APValue();
11623 if (SETy->isIntegerType()) {
11624 APSInt IntResult;
11625 if (!EvaluateInteger(E: SE, Result&: IntResult, Info))
11626 return false;
11627 Val = APValue(std::move(IntResult));
11628 } else if (SETy->isRealFloatingType()) {
11629 APFloat FloatResult(0.0);
11630 if (!EvaluateFloat(E: SE, Result&: FloatResult, Info))
11631 return false;
11632 Val = APValue(std::move(FloatResult));
11633 } else {
11634 return Error(E);
11635 }
11636
11637 // Splat and create vector APValue.
11638 SmallVector<APValue, 4> Elts(NElts, Val);
11639 return Success(V: Elts, E);
11640 }
11641 case CK_BitCast: {
11642 APValue SVal;
11643 if (!Evaluate(Result&: SVal, Info, E: SE))
11644 return false;
11645
11646 if (!SVal.isInt() && !SVal.isFloat() && !SVal.isVector()) {
11647 // Give up if the input isn't an int, float, or vector. For example, we
11648 // reject "(v4i16)(intptr_t)&a".
11649 Info.FFDiag(E, DiagId: diag::note_constexpr_invalid_cast)
11650 << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret
11651 << Info.Ctx.getLangOpts().CPlusPlus;
11652 return false;
11653 }
11654
11655 if (!handleRValueToRValueBitCast(Info, DestValue&: Result, SourceRValue: SVal, BCE: E))
11656 return false;
11657
11658 return true;
11659 }
11660 case CK_HLSLVectorTruncation: {
11661 APValue Val;
11662 SmallVector<APValue, 4> Elements;
11663 if (!EvaluateVector(E: SE, Result&: Val, Info))
11664 return Error(E);
11665 for (unsigned I = 0; I < NElts; I++)
11666 Elements.push_back(Elt: Val.getVectorElt(I));
11667 return Success(V: Elements, E);
11668 }
11669 case CK_HLSLMatrixTruncation: {
11670 // TODO: See #168935. Add matrix truncation support to expr constant.
11671 return Error(E);
11672 }
11673 case CK_HLSLAggregateSplatCast: {
11674 APValue Val;
11675 QualType ValTy;
11676
11677 if (!hlslAggSplatHelper(Info, E: SE, SrcVal&: Val, SrcTy&: ValTy))
11678 return false;
11679
11680 // cast our Val once.
11681 APValue Result;
11682 const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
11683 if (!handleScalarCast(Info, FPO, E, SourceTy: ValTy, DestTy: VTy->getElementType(), Original: Val,
11684 Result))
11685 return false;
11686
11687 SmallVector<APValue, 4> SplatEls(NElts, Result);
11688 return Success(V: SplatEls, E);
11689 }
11690 case CK_HLSLElementwiseCast: {
11691 SmallVector<APValue> SrcVals;
11692 SmallVector<QualType> SrcTypes;
11693
11694 if (!hlslElementwiseCastHelper(Info, E: SE, DestTy: E->getType(), SrcVals, SrcTypes))
11695 return false;
11696
11697 const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
11698 SmallVector<QualType, 4> DestTypes(NElts, VTy->getElementType());
11699 SmallVector<APValue, 4> ResultEls(NElts);
11700 if (!handleElementwiseCast(Info, E, FPO, Elements&: SrcVals, SrcTypes, DestTypes,
11701 Results&: ResultEls))
11702 return false;
11703 return Success(V: ResultEls, E);
11704 }
11705 default:
11706 return ExprEvaluatorBaseTy::VisitCastExpr(E);
11707 }
11708}
11709
11710bool
11711VectorExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
11712 const VectorType *VT = E->getType()->castAs<VectorType>();
11713 unsigned NumInits = E->getNumInits();
11714 unsigned NumElements = VT->getNumElements();
11715
11716 QualType EltTy = VT->getElementType();
11717 SmallVector<APValue, 4> Elements;
11718
11719 // MFloat8 type doesn't have constants and thus constant folding
11720 // is impossible.
11721 if (EltTy->isMFloat8Type())
11722 return false;
11723
11724 // The number of initializers can be less than the number of
11725 // vector elements. For OpenCL, this can be due to nested vector
11726 // initialization. For GCC compatibility, missing trailing elements
11727 // should be initialized with zeroes.
11728 unsigned CountInits = 0, CountElts = 0;
11729 while (CountElts < NumElements) {
11730 // Handle nested vector initialization.
11731 if (CountInits < NumInits
11732 && E->getInit(Init: CountInits)->getType()->isVectorType()) {
11733 APValue v;
11734 if (!EvaluateVector(E: E->getInit(Init: CountInits), Result&: v, Info))
11735 return Error(E);
11736 unsigned vlen = v.getVectorLength();
11737 for (unsigned j = 0; j < vlen; j++)
11738 Elements.push_back(Elt: v.getVectorElt(I: j));
11739 CountElts += vlen;
11740 } else if (EltTy->isIntegerType()) {
11741 llvm::APSInt sInt(32);
11742 if (CountInits < NumInits) {
11743 if (!EvaluateInteger(E: E->getInit(Init: CountInits), Result&: sInt, Info))
11744 return false;
11745 } else // trailing integer zero.
11746 sInt = Info.Ctx.MakeIntValue(Value: 0, Type: EltTy);
11747 Elements.push_back(Elt: APValue(sInt));
11748 CountElts++;
11749 } else {
11750 llvm::APFloat f(0.0);
11751 if (CountInits < NumInits) {
11752 if (!EvaluateFloat(E: E->getInit(Init: CountInits), Result&: f, Info))
11753 return false;
11754 } else // trailing float zero.
11755 f = APFloat::getZero(Sem: Info.Ctx.getFloatTypeSemantics(T: EltTy));
11756 Elements.push_back(Elt: APValue(f));
11757 CountElts++;
11758 }
11759 CountInits++;
11760 }
11761 return Success(V: Elements, E);
11762}
11763
11764bool
11765VectorExprEvaluator::ZeroInitialization(const Expr *E) {
11766 const auto *VT = E->getType()->castAs<VectorType>();
11767 QualType EltTy = VT->getElementType();
11768 APValue ZeroElement;
11769 if (EltTy->isIntegerType())
11770 ZeroElement = APValue(Info.Ctx.MakeIntValue(Value: 0, Type: EltTy));
11771 else
11772 ZeroElement =
11773 APValue(APFloat::getZero(Sem: Info.Ctx.getFloatTypeSemantics(T: EltTy)));
11774
11775 SmallVector<APValue, 4> Elements(VT->getNumElements(), ZeroElement);
11776 return Success(V: Elements, E);
11777}
11778
11779bool VectorExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
11780 VisitIgnoredValue(E: E->getSubExpr());
11781 return ZeroInitialization(E);
11782}
11783
11784bool VectorExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
11785 BinaryOperatorKind Op = E->getOpcode();
11786 assert(Op != BO_PtrMemD && Op != BO_PtrMemI && Op != BO_Cmp &&
11787 "Operation not supported on vector types");
11788
11789 if (Op == BO_Comma)
11790 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
11791
11792 Expr *LHS = E->getLHS();
11793 Expr *RHS = E->getRHS();
11794
11795 assert(LHS->getType()->isVectorType() && RHS->getType()->isVectorType() &&
11796 "Must both be vector types");
11797 // Checking JUST the types are the same would be fine, except shifts don't
11798 // need to have their types be the same (since you always shift by an int).
11799 assert(LHS->getType()->castAs<VectorType>()->getNumElements() ==
11800 E->getType()->castAs<VectorType>()->getNumElements() &&
11801 RHS->getType()->castAs<VectorType>()->getNumElements() ==
11802 E->getType()->castAs<VectorType>()->getNumElements() &&
11803 "All operands must be the same size.");
11804
11805 APValue LHSValue;
11806 APValue RHSValue;
11807 bool LHSOK = Evaluate(Result&: LHSValue, Info, E: LHS);
11808 if (!LHSOK && !Info.noteFailure())
11809 return false;
11810 if (!Evaluate(Result&: RHSValue, Info, E: RHS) || !LHSOK)
11811 return false;
11812
11813 if (!handleVectorVectorBinOp(Info, E, Opcode: Op, LHSValue, RHSValue))
11814 return false;
11815
11816 return Success(V: LHSValue, E);
11817}
11818
11819static std::optional<APValue> handleVectorUnaryOperator(ASTContext &Ctx,
11820 QualType ResultTy,
11821 UnaryOperatorKind Op,
11822 APValue Elt) {
11823 switch (Op) {
11824 case UO_Plus:
11825 // Nothing to do here.
11826 return Elt;
11827 case UO_Minus:
11828 if (Elt.getKind() == APValue::Int) {
11829 Elt.getInt().negate();
11830 } else {
11831 assert(Elt.getKind() == APValue::Float &&
11832 "Vector can only be int or float type");
11833 Elt.getFloat().changeSign();
11834 }
11835 return Elt;
11836 case UO_Not:
11837 // This is only valid for integral types anyway, so we don't have to handle
11838 // float here.
11839 assert(Elt.getKind() == APValue::Int &&
11840 "Vector operator ~ can only be int");
11841 Elt.getInt().flipAllBits();
11842 return Elt;
11843 case UO_LNot: {
11844 if (Elt.getKind() == APValue::Int) {
11845 Elt.getInt() = !Elt.getInt();
11846 // operator ! on vectors returns -1 for 'truth', so negate it.
11847 Elt.getInt().negate();
11848 return Elt;
11849 }
11850 assert(Elt.getKind() == APValue::Float &&
11851 "Vector can only be int or float type");
11852 // Float types result in an int of the same size, but -1 for true, or 0 for
11853 // false.
11854 APSInt EltResult{Ctx.getIntWidth(T: ResultTy),
11855 ResultTy->isUnsignedIntegerType()};
11856 if (Elt.getFloat().isZero())
11857 EltResult.setAllBits();
11858 else
11859 EltResult.clearAllBits();
11860
11861 return APValue{EltResult};
11862 }
11863 default:
11864 // FIXME: Implement the rest of the unary operators.
11865 return std::nullopt;
11866 }
11867}
11868
11869bool VectorExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
11870 Expr *SubExpr = E->getSubExpr();
11871 const auto *VD = SubExpr->getType()->castAs<VectorType>();
11872 // This result element type differs in the case of negating a floating point
11873 // vector, since the result type is the a vector of the equivilant sized
11874 // integer.
11875 const QualType ResultEltTy = VD->getElementType();
11876 UnaryOperatorKind Op = E->getOpcode();
11877
11878 APValue SubExprValue;
11879 if (!Evaluate(Result&: SubExprValue, Info, E: SubExpr))
11880 return false;
11881
11882 // FIXME: This vector evaluator someday needs to be changed to be LValue
11883 // aware/keep LValue information around, rather than dealing with just vector
11884 // types directly. Until then, we cannot handle cases where the operand to
11885 // these unary operators is an LValue. The only case I've been able to see
11886 // cause this is operator++ assigning to a member expression (only valid in
11887 // altivec compilations) in C mode, so this shouldn't limit us too much.
11888 if (SubExprValue.isLValue())
11889 return false;
11890
11891 assert(SubExprValue.getVectorLength() == VD->getNumElements() &&
11892 "Vector length doesn't match type?");
11893
11894 SmallVector<APValue, 4> ResultElements;
11895 for (unsigned EltNum = 0; EltNum < VD->getNumElements(); ++EltNum) {
11896 std::optional<APValue> Elt = handleVectorUnaryOperator(
11897 Ctx&: Info.Ctx, ResultTy: ResultEltTy, Op, Elt: SubExprValue.getVectorElt(I: EltNum));
11898 if (!Elt)
11899 return false;
11900 ResultElements.push_back(Elt: *Elt);
11901 }
11902 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
11903}
11904
11905static bool handleVectorElementCast(EvalInfo &Info, const FPOptions FPO,
11906 const Expr *E, QualType SourceTy,
11907 QualType DestTy, APValue const &Original,
11908 APValue &Result) {
11909 if (SourceTy->isIntegerType()) {
11910 if (DestTy->isRealFloatingType()) {
11911 Result = APValue(APFloat(0.0));
11912 return HandleIntToFloatCast(Info, E, FPO, SrcType: SourceTy, Value: Original.getInt(),
11913 DestType: DestTy, Result&: Result.getFloat());
11914 }
11915 if (DestTy->isIntegerType()) {
11916 Result = APValue(
11917 HandleIntToIntCast(Info, E, DestType: DestTy, SrcType: SourceTy, Value: Original.getInt()));
11918 return true;
11919 }
11920 } else if (SourceTy->isRealFloatingType()) {
11921 if (DestTy->isRealFloatingType()) {
11922 Result = Original;
11923 return HandleFloatToFloatCast(Info, E, SrcType: SourceTy, DestType: DestTy,
11924 Result&: Result.getFloat());
11925 }
11926 if (DestTy->isIntegerType()) {
11927 Result = APValue(APSInt());
11928 return HandleFloatToIntCast(Info, E, SrcType: SourceTy, Value: Original.getFloat(),
11929 DestType: DestTy, Result&: Result.getInt());
11930 }
11931 }
11932
11933 Info.FFDiag(E, DiagId: diag::err_convertvector_constexpr_unsupported_vector_cast)
11934 << SourceTy << DestTy;
11935 return false;
11936}
11937
11938static bool evalPackBuiltin(const CallExpr *E, EvalInfo &Info, APValue &Result,
11939 llvm::function_ref<APInt(const APSInt &)> PackFn) {
11940 APValue LHS, RHS;
11941 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: LHS) ||
11942 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: RHS))
11943 return false;
11944
11945 unsigned LHSVecLen = LHS.getVectorLength();
11946 unsigned RHSVecLen = RHS.getVectorLength();
11947
11948 assert(LHSVecLen != 0 && LHSVecLen == RHSVecLen &&
11949 "pack builtin LHSVecLen must equal to RHSVecLen");
11950
11951 const VectorType *VT0 = E->getArg(Arg: 0)->getType()->castAs<VectorType>();
11952 const unsigned SrcBits = Info.Ctx.getIntWidth(T: VT0->getElementType());
11953
11954 const VectorType *DstVT = E->getType()->castAs<VectorType>();
11955 QualType DstElemTy = DstVT->getElementType();
11956 const bool DstIsUnsigned = DstElemTy->isUnsignedIntegerType();
11957
11958 const unsigned SrcPerLane = 128 / SrcBits;
11959 const unsigned Lanes = LHSVecLen * SrcBits / 128;
11960
11961 SmallVector<APValue, 64> Out;
11962 Out.reserve(N: LHSVecLen + RHSVecLen);
11963
11964 for (unsigned Lane = 0; Lane != Lanes; ++Lane) {
11965 unsigned base = Lane * SrcPerLane;
11966 for (unsigned I = 0; I != SrcPerLane; ++I)
11967 Out.emplace_back(Args: APValue(
11968 APSInt(PackFn(LHS.getVectorElt(I: base + I).getInt()), DstIsUnsigned)));
11969 for (unsigned I = 0; I != SrcPerLane; ++I)
11970 Out.emplace_back(Args: APValue(
11971 APSInt(PackFn(RHS.getVectorElt(I: base + I).getInt()), DstIsUnsigned)));
11972 }
11973
11974 Result = APValue(Out.data(), Out.size());
11975 return true;
11976}
11977
11978static bool evalShuffleGeneric(
11979 EvalInfo &Info, const CallExpr *Call, APValue &Out,
11980 llvm::function_ref<std::pair<unsigned, int>(unsigned, unsigned)>
11981 GetSourceIndex) {
11982
11983 const auto *VT = Call->getType()->getAs<VectorType>();
11984 if (!VT)
11985 return false;
11986
11987 unsigned ShuffleMask = 0;
11988 APValue A, MaskVector, B;
11989 bool IsVectorMask = false;
11990 bool IsSingleOperand = (Call->getNumArgs() == 2);
11991
11992 if (IsSingleOperand) {
11993 QualType MaskType = Call->getArg(Arg: 1)->getType();
11994 if (MaskType->isVectorType()) {
11995 IsVectorMask = true;
11996 if (!EvaluateAsRValue(Info, E: Call->getArg(Arg: 0), Result&: A) ||
11997 !EvaluateAsRValue(Info, E: Call->getArg(Arg: 1), Result&: MaskVector))
11998 return false;
11999 B = A;
12000 } else if (MaskType->isIntegerType()) {
12001 APSInt MaskImm;
12002 if (!EvaluateInteger(E: Call->getArg(Arg: 1), Result&: MaskImm, Info))
12003 return false;
12004 ShuffleMask = static_cast<unsigned>(MaskImm.getZExtValue());
12005 if (!EvaluateAsRValue(Info, E: Call->getArg(Arg: 0), Result&: A))
12006 return false;
12007 B = A;
12008 } else {
12009 return false;
12010 }
12011 } else {
12012 QualType Arg2Type = Call->getArg(Arg: 2)->getType();
12013 if (Arg2Type->isVectorType()) {
12014 IsVectorMask = true;
12015 if (!EvaluateAsRValue(Info, E: Call->getArg(Arg: 0), Result&: A) ||
12016 !EvaluateAsRValue(Info, E: Call->getArg(Arg: 1), Result&: MaskVector) ||
12017 !EvaluateAsRValue(Info, E: Call->getArg(Arg: 2), Result&: B))
12018 return false;
12019 } else if (Arg2Type->isIntegerType()) {
12020 APSInt MaskImm;
12021 if (!EvaluateInteger(E: Call->getArg(Arg: 2), Result&: MaskImm, Info))
12022 return false;
12023 ShuffleMask = static_cast<unsigned>(MaskImm.getZExtValue());
12024 if (!EvaluateAsRValue(Info, E: Call->getArg(Arg: 0), Result&: A) ||
12025 !EvaluateAsRValue(Info, E: Call->getArg(Arg: 1), Result&: B))
12026 return false;
12027 } else {
12028 return false;
12029 }
12030 }
12031
12032 unsigned NumElts = VT->getNumElements();
12033 SmallVector<APValue, 64> ResultElements;
12034 ResultElements.reserve(N: NumElts);
12035
12036 for (unsigned DstIdx = 0; DstIdx != NumElts; ++DstIdx) {
12037 if (IsVectorMask) {
12038 ShuffleMask = static_cast<unsigned>(
12039 MaskVector.getVectorElt(I: DstIdx).getInt().getZExtValue());
12040 }
12041 auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask);
12042
12043 if (SrcIdx < 0) {
12044 // Zero out this element
12045 QualType ElemTy = VT->getElementType();
12046 if (ElemTy->isRealFloatingType()) {
12047 ResultElements.push_back(
12048 Elt: APValue(APFloat::getZero(Sem: Info.Ctx.getFloatTypeSemantics(T: ElemTy))));
12049 } else if (ElemTy->isIntegerType()) {
12050 APValue Zero(Info.Ctx.MakeIntValue(Value: 0, Type: ElemTy));
12051 ResultElements.push_back(Elt: APValue(Zero));
12052 } else {
12053 // Other types of fallback logic
12054 ResultElements.push_back(Elt: APValue());
12055 }
12056 } else {
12057 const APValue &Src = (SrcVecIdx == 0) ? A : B;
12058 ResultElements.push_back(Elt: Src.getVectorElt(I: SrcIdx));
12059 }
12060 }
12061
12062 Out = APValue(ResultElements.data(), ResultElements.size());
12063 return true;
12064}
12065static bool ConvertDoubleToFloatStrict(EvalInfo &Info, const Expr *E,
12066 APFloat OrigVal, APValue &Result) {
12067
12068 if (OrigVal.isInfinity()) {
12069 Info.CCEDiag(E, DiagId: diag::note_constexpr_float_arithmetic) << 0;
12070 return false;
12071 }
12072 if (OrigVal.isNaN()) {
12073 Info.CCEDiag(E, DiagId: diag::note_constexpr_float_arithmetic) << 1;
12074 return false;
12075 }
12076
12077 APFloat Val = OrigVal;
12078 bool LosesInfo = false;
12079 APFloat::opStatus Status = Val.convert(
12080 ToSemantics: APFloat::IEEEsingle(), RM: APFloat::rmNearestTiesToEven, losesInfo: &LosesInfo);
12081
12082 if (LosesInfo || Val.isDenormal()) {
12083 Info.CCEDiag(E, DiagId: diag::note_constexpr_float_arithmetic_strict);
12084 return false;
12085 }
12086
12087 if (Status != APFloat::opOK) {
12088 Info.CCEDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
12089 return false;
12090 }
12091
12092 Result = APValue(Val);
12093 return true;
12094}
12095static bool evalShiftWithCount(
12096 EvalInfo &Info, const CallExpr *Call, APValue &Out,
12097 llvm::function_ref<APInt(const APInt &, uint64_t)> ShiftOp,
12098 llvm::function_ref<APInt(const APInt &, unsigned)> OverflowOp) {
12099
12100 APValue Source, Count;
12101 if (!EvaluateAsRValue(Info, E: Call->getArg(Arg: 0), Result&: Source) ||
12102 !EvaluateAsRValue(Info, E: Call->getArg(Arg: 1), Result&: Count))
12103 return false;
12104
12105 assert(Call->getNumArgs() == 2);
12106
12107 QualType SourceTy = Call->getArg(Arg: 0)->getType();
12108 assert(SourceTy->isVectorType() &&
12109 Call->getArg(1)->getType()->isVectorType());
12110
12111 QualType DestEltTy = SourceTy->castAs<VectorType>()->getElementType();
12112 unsigned DestEltWidth = Source.getVectorElt(I: 0).getInt().getBitWidth();
12113 unsigned DestLen = Source.getVectorLength();
12114 bool IsDestUnsigned = DestEltTy->isUnsignedIntegerType();
12115 unsigned CountEltWidth = Count.getVectorElt(I: 0).getInt().getBitWidth();
12116 unsigned NumBitsInQWord = 64;
12117 unsigned NumCountElts = NumBitsInQWord / CountEltWidth;
12118 SmallVector<APValue, 64> Result;
12119 Result.reserve(N: DestLen);
12120
12121 uint64_t CountLQWord = 0;
12122 for (unsigned EltIdx = 0; EltIdx != NumCountElts; ++EltIdx) {
12123 uint64_t Elt = Count.getVectorElt(I: EltIdx).getInt().getZExtValue();
12124 CountLQWord |= (Elt << (EltIdx * CountEltWidth));
12125 }
12126
12127 for (unsigned EltIdx = 0; EltIdx != DestLen; ++EltIdx) {
12128 APInt Elt = Source.getVectorElt(I: EltIdx).getInt();
12129 if (CountLQWord < DestEltWidth) {
12130 Result.push_back(
12131 Elt: APValue(APSInt(ShiftOp(Elt, CountLQWord), IsDestUnsigned)));
12132 } else {
12133 Result.push_back(
12134 Elt: APValue(APSInt(OverflowOp(Elt, DestEltWidth), IsDestUnsigned)));
12135 }
12136 }
12137 Out = APValue(Result.data(), Result.size());
12138 return true;
12139}
12140
12141std::optional<APFloat> EvalScalarMinMaxFp(const APFloat &A, const APFloat &B,
12142 std::optional<APSInt> RoundingMode,
12143 bool IsMin) {
12144 APSInt DefaultMode(APInt(32, 4), /*isUnsigned=*/true);
12145 if (RoundingMode.value_or(u&: DefaultMode) != 4)
12146 return std::nullopt;
12147 if (A.isNaN() || A.isInfinity() || A.isDenormal() || B.isNaN() ||
12148 B.isInfinity() || B.isDenormal())
12149 return std::nullopt;
12150 if (A.isZero() && B.isZero())
12151 return B;
12152 return IsMin ? llvm::minimum(A, B) : llvm::maximum(A, B);
12153}
12154
12155bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
12156 if (!IsConstantEvaluatedBuiltinCall(E))
12157 return ExprEvaluatorBaseTy::VisitCallExpr(E);
12158
12159 auto EvaluateBinOpExpr =
12160 [&](llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) {
12161 APValue SourceLHS, SourceRHS;
12162 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) ||
12163 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS))
12164 return false;
12165
12166 auto *DestTy = E->getType()->castAs<VectorType>();
12167 QualType DestEltTy = DestTy->getElementType();
12168 bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType();
12169 unsigned SourceLen = SourceLHS.getVectorLength();
12170 SmallVector<APValue, 4> ResultElements;
12171 ResultElements.reserve(N: SourceLen);
12172
12173 if (SourceRHS.isInt()) {
12174 const APSInt &RHS = SourceRHS.getInt();
12175 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
12176 const APSInt &LHS = SourceLHS.getVectorElt(I: EltNum).getInt();
12177 ResultElements.push_back(
12178 Elt: APValue(APSInt(Fn(LHS, RHS), DestUnsigned)));
12179 }
12180 } else {
12181 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
12182 const APSInt &LHS = SourceLHS.getVectorElt(I: EltNum).getInt();
12183 const APSInt &RHS = SourceRHS.getVectorElt(I: EltNum).getInt();
12184 ResultElements.push_back(
12185 Elt: APValue(APSInt(Fn(LHS, RHS), DestUnsigned)));
12186 }
12187 }
12188 return Success(V: APValue(ResultElements.data(), SourceLen), E);
12189 };
12190
12191 auto EvaluateFpBinOpExpr =
12192 [&](llvm::function_ref<std::optional<APFloat>(
12193 const APFloat &, const APFloat &, std::optional<APSInt>)>
12194 Fn,
12195 bool IsScalar = false) {
12196 assert(E->getNumArgs() == 2 || E->getNumArgs() == 3);
12197 APValue A, B;
12198 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: A) ||
12199 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: B))
12200 return false;
12201
12202 assert(A.isVector() && B.isVector());
12203 assert(A.getVectorLength() == B.getVectorLength());
12204
12205 std::optional<APSInt> RoundingMode;
12206 if (E->getNumArgs() == 3) {
12207 APSInt Imm;
12208 if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: Imm, Info))
12209 return false;
12210 RoundingMode = Imm;
12211 }
12212
12213 unsigned NumElems = A.getVectorLength();
12214 SmallVector<APValue, 4> ResultElements;
12215 ResultElements.reserve(N: NumElems);
12216
12217 for (unsigned EltNum = 0; EltNum < NumElems; ++EltNum) {
12218 if (IsScalar && EltNum > 0) {
12219 ResultElements.push_back(Elt: A.getVectorElt(I: EltNum));
12220 continue;
12221 }
12222 const APFloat &EltA = A.getVectorElt(I: EltNum).getFloat();
12223 const APFloat &EltB = B.getVectorElt(I: EltNum).getFloat();
12224 std::optional<APFloat> Result = Fn(EltA, EltB, RoundingMode);
12225 if (!Result)
12226 return false;
12227 ResultElements.push_back(Elt: APValue(*Result));
12228 }
12229 return Success(V: APValue(ResultElements.data(), NumElems), E);
12230 };
12231
12232 auto EvaluateScalarFpRoundMaskBinOp =
12233 [&](llvm::function_ref<std::optional<APFloat>(
12234 const APFloat &, const APFloat &, std::optional<APSInt>)>
12235 Fn) {
12236 assert(E->getNumArgs() == 5);
12237 APValue VecA, VecB, VecSrc;
12238 APSInt MaskVal, Rounding;
12239
12240 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: VecA) ||
12241 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: VecB) ||
12242 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: VecSrc) ||
12243 !EvaluateInteger(E: E->getArg(Arg: 3), Result&: MaskVal, Info) ||
12244 !EvaluateInteger(E: E->getArg(Arg: 4), Result&: Rounding, Info))
12245 return false;
12246
12247 unsigned NumElems = VecA.getVectorLength();
12248 SmallVector<APValue, 8> ResultElements;
12249 ResultElements.reserve(N: NumElems);
12250
12251 if (MaskVal.getZExtValue() & 1) {
12252 const APFloat &EltA = VecA.getVectorElt(I: 0).getFloat();
12253 const APFloat &EltB = VecB.getVectorElt(I: 0).getFloat();
12254 std::optional<APFloat> Result = Fn(EltA, EltB, Rounding);
12255 if (!Result)
12256 return false;
12257 ResultElements.push_back(Elt: APValue(*Result));
12258 } else {
12259 ResultElements.push_back(Elt: VecSrc.getVectorElt(I: 0));
12260 }
12261
12262 for (unsigned I = 1; I < NumElems; ++I)
12263 ResultElements.push_back(Elt: VecA.getVectorElt(I));
12264
12265 return Success(V: APValue(ResultElements.data(), NumElems), E);
12266 };
12267
12268 auto EvalSelectScalar = [&](unsigned Len) -> bool {
12269 APSInt Mask;
12270 APValue AVal, WVal;
12271 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Mask, Info) ||
12272 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: AVal) ||
12273 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: WVal))
12274 return false;
12275
12276 bool TakeA0 = (Mask.getZExtValue() & 1u) != 0;
12277 SmallVector<APValue, 4> Res;
12278 Res.reserve(N: Len);
12279 Res.push_back(Elt: TakeA0 ? AVal.getVectorElt(I: 0) : WVal.getVectorElt(I: 0));
12280 for (unsigned I = 1; I < Len; ++I)
12281 Res.push_back(Elt: WVal.getVectorElt(I));
12282 APValue V(Res.data(), Res.size());
12283 return Success(V, E);
12284 };
12285
12286 switch (E->getBuiltinCallee()) {
12287 default:
12288 return false;
12289 case Builtin::BI__builtin_elementwise_popcount:
12290 case Builtin::BI__builtin_elementwise_bitreverse: {
12291 APValue Source;
12292 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: Source))
12293 return false;
12294
12295 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
12296 unsigned SourceLen = Source.getVectorLength();
12297 SmallVector<APValue, 4> ResultElements;
12298 ResultElements.reserve(N: SourceLen);
12299
12300 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
12301 APSInt Elt = Source.getVectorElt(I: EltNum).getInt();
12302 switch (E->getBuiltinCallee()) {
12303 case Builtin::BI__builtin_elementwise_popcount:
12304 ResultElements.push_back(Elt: APValue(
12305 APSInt(APInt(Info.Ctx.getIntWidth(T: DestEltTy), Elt.popcount()),
12306 DestEltTy->isUnsignedIntegerOrEnumerationType())));
12307 break;
12308 case Builtin::BI__builtin_elementwise_bitreverse:
12309 ResultElements.push_back(
12310 Elt: APValue(APSInt(Elt.reverseBits(),
12311 DestEltTy->isUnsignedIntegerOrEnumerationType())));
12312 break;
12313 }
12314 }
12315
12316 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12317 }
12318 case Builtin::BI__builtin_elementwise_abs: {
12319 APValue Source;
12320 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: Source))
12321 return false;
12322
12323 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
12324 unsigned SourceLen = Source.getVectorLength();
12325 SmallVector<APValue, 4> ResultElements;
12326 ResultElements.reserve(N: SourceLen);
12327
12328 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
12329 APValue CurrentEle = Source.getVectorElt(I: EltNum);
12330 APValue Val = DestEltTy->isFloatingType()
12331 ? APValue(llvm::abs(X: CurrentEle.getFloat()))
12332 : APValue(APSInt(
12333 CurrentEle.getInt().abs(),
12334 DestEltTy->isUnsignedIntegerOrEnumerationType()));
12335 ResultElements.push_back(Elt: Val);
12336 }
12337
12338 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12339 }
12340
12341 case Builtin::BI__builtin_elementwise_add_sat:
12342 return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) {
12343 return LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS);
12344 });
12345
12346 case Builtin::BI__builtin_elementwise_sub_sat:
12347 return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) {
12348 return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
12349 });
12350
12351 case X86::BI__builtin_ia32_extract128i256:
12352 case X86::BI__builtin_ia32_vextractf128_pd256:
12353 case X86::BI__builtin_ia32_vextractf128_ps256:
12354 case X86::BI__builtin_ia32_vextractf128_si256: {
12355 APValue SourceVec, SourceImm;
12356 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceVec) ||
12357 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceImm))
12358 return false;
12359
12360 if (!SourceVec.isVector())
12361 return false;
12362
12363 const auto *RetVT = E->getType()->castAs<VectorType>();
12364 unsigned RetLen = RetVT->getNumElements();
12365 unsigned Idx = SourceImm.getInt().getZExtValue() & 1;
12366
12367 SmallVector<APValue, 32> ResultElements;
12368 ResultElements.reserve(N: RetLen);
12369
12370 for (unsigned I = 0; I < RetLen; I++)
12371 ResultElements.push_back(Elt: SourceVec.getVectorElt(I: Idx * RetLen + I));
12372
12373 return Success(V: APValue(ResultElements.data(), RetLen), E);
12374 }
12375
12376 case clang::X86::BI__builtin_ia32_cvtmask2b128:
12377 case clang::X86::BI__builtin_ia32_cvtmask2b256:
12378 case clang::X86::BI__builtin_ia32_cvtmask2b512:
12379 case clang::X86::BI__builtin_ia32_cvtmask2w128:
12380 case clang::X86::BI__builtin_ia32_cvtmask2w256:
12381 case clang::X86::BI__builtin_ia32_cvtmask2w512:
12382 case clang::X86::BI__builtin_ia32_cvtmask2d128:
12383 case clang::X86::BI__builtin_ia32_cvtmask2d256:
12384 case clang::X86::BI__builtin_ia32_cvtmask2d512:
12385 case clang::X86::BI__builtin_ia32_cvtmask2q128:
12386 case clang::X86::BI__builtin_ia32_cvtmask2q256:
12387 case clang::X86::BI__builtin_ia32_cvtmask2q512: {
12388 assert(E->getNumArgs() == 1);
12389 APSInt Mask;
12390 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Mask, Info))
12391 return false;
12392
12393 QualType VecTy = E->getType();
12394 const VectorType *VT = VecTy->castAs<VectorType>();
12395 unsigned VectorLen = VT->getNumElements();
12396 QualType ElemTy = VT->getElementType();
12397 unsigned ElemWidth = Info.Ctx.getTypeSize(T: ElemTy);
12398
12399 SmallVector<APValue, 16> Elems;
12400 for (unsigned I = 0; I != VectorLen; ++I) {
12401 bool BitSet = Mask[I];
12402 APSInt ElemVal(ElemWidth, /*isUnsigned=*/false);
12403 if (BitSet) {
12404 ElemVal.setAllBits();
12405 }
12406 Elems.push_back(Elt: APValue(ElemVal));
12407 }
12408 return Success(V: APValue(Elems.data(), VectorLen), E);
12409 }
12410
12411 case X86::BI__builtin_ia32_extracti32x4_256_mask:
12412 case X86::BI__builtin_ia32_extractf32x4_256_mask:
12413 case X86::BI__builtin_ia32_extracti32x4_mask:
12414 case X86::BI__builtin_ia32_extractf32x4_mask:
12415 case X86::BI__builtin_ia32_extracti32x8_mask:
12416 case X86::BI__builtin_ia32_extractf32x8_mask:
12417 case X86::BI__builtin_ia32_extracti64x2_256_mask:
12418 case X86::BI__builtin_ia32_extractf64x2_256_mask:
12419 case X86::BI__builtin_ia32_extracti64x2_512_mask:
12420 case X86::BI__builtin_ia32_extractf64x2_512_mask:
12421 case X86::BI__builtin_ia32_extracti64x4_mask:
12422 case X86::BI__builtin_ia32_extractf64x4_mask: {
12423 APValue SourceVec, MergeVec;
12424 APSInt Imm, MaskImm;
12425
12426 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceVec) ||
12427 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Imm, Info) ||
12428 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: MergeVec) ||
12429 !EvaluateInteger(E: E->getArg(Arg: 3), Result&: MaskImm, Info))
12430 return false;
12431
12432 const auto *RetVT = E->getType()->castAs<VectorType>();
12433 unsigned RetLen = RetVT->getNumElements();
12434
12435 if (!SourceVec.isVector() || !MergeVec.isVector())
12436 return false;
12437 unsigned SrcLen = SourceVec.getVectorLength();
12438 unsigned Lanes = SrcLen / RetLen;
12439 unsigned Lane = static_cast<unsigned>(Imm.getZExtValue() % Lanes);
12440 unsigned Base = Lane * RetLen;
12441
12442 SmallVector<APValue, 32> ResultElements;
12443 ResultElements.reserve(N: RetLen);
12444 for (unsigned I = 0; I < RetLen; ++I) {
12445 if (MaskImm[I])
12446 ResultElements.push_back(Elt: SourceVec.getVectorElt(I: Base + I));
12447 else
12448 ResultElements.push_back(Elt: MergeVec.getVectorElt(I));
12449 }
12450 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12451 }
12452
12453 case clang::X86::BI__builtin_ia32_pavgb128:
12454 case clang::X86::BI__builtin_ia32_pavgw128:
12455 case clang::X86::BI__builtin_ia32_pavgb256:
12456 case clang::X86::BI__builtin_ia32_pavgw256:
12457 case clang::X86::BI__builtin_ia32_pavgb512:
12458 case clang::X86::BI__builtin_ia32_pavgw512:
12459 return EvaluateBinOpExpr(llvm::APIntOps::avgCeilU);
12460
12461 case clang::X86::BI__builtin_ia32_pmulhrsw128:
12462 case clang::X86::BI__builtin_ia32_pmulhrsw256:
12463 case clang::X86::BI__builtin_ia32_pmulhrsw512:
12464 return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) {
12465 return (llvm::APIntOps::mulsExtended(C1: LHS, C2: RHS).ashr(ShiftAmt: 14) + 1)
12466 .extractBits(numBits: 16, bitPosition: 1);
12467 });
12468
12469 case clang::X86::BI__builtin_ia32_pmaddubsw128:
12470 case clang::X86::BI__builtin_ia32_pmaddubsw256:
12471 case clang::X86::BI__builtin_ia32_pmaddubsw512:
12472 case clang::X86::BI__builtin_ia32_pmaddwd128:
12473 case clang::X86::BI__builtin_ia32_pmaddwd256:
12474 case clang::X86::BI__builtin_ia32_pmaddwd512: {
12475 APValue SourceLHS, SourceRHS;
12476 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) ||
12477 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS))
12478 return false;
12479
12480 auto *DestTy = E->getType()->castAs<VectorType>();
12481 QualType DestEltTy = DestTy->getElementType();
12482 unsigned SourceLen = SourceLHS.getVectorLength();
12483 bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType();
12484 SmallVector<APValue, 4> ResultElements;
12485 ResultElements.reserve(N: SourceLen / 2);
12486
12487 for (unsigned EltNum = 0; EltNum < SourceLen; EltNum += 2) {
12488 const APSInt &LoLHS = SourceLHS.getVectorElt(I: EltNum).getInt();
12489 const APSInt &HiLHS = SourceLHS.getVectorElt(I: EltNum + 1).getInt();
12490 const APSInt &LoRHS = SourceRHS.getVectorElt(I: EltNum).getInt();
12491 const APSInt &HiRHS = SourceRHS.getVectorElt(I: EltNum + 1).getInt();
12492 unsigned BitWidth = 2 * LoLHS.getBitWidth();
12493
12494 switch (E->getBuiltinCallee()) {
12495 case clang::X86::BI__builtin_ia32_pmaddubsw128:
12496 case clang::X86::BI__builtin_ia32_pmaddubsw256:
12497 case clang::X86::BI__builtin_ia32_pmaddubsw512:
12498 ResultElements.push_back(Elt: APValue(
12499 APSInt((LoLHS.zext(width: BitWidth) * LoRHS.sext(width: BitWidth))
12500 .sadd_sat(RHS: (HiLHS.zext(width: BitWidth) * HiRHS.sext(width: BitWidth))),
12501 DestUnsigned)));
12502 break;
12503 case clang::X86::BI__builtin_ia32_pmaddwd128:
12504 case clang::X86::BI__builtin_ia32_pmaddwd256:
12505 case clang::X86::BI__builtin_ia32_pmaddwd512:
12506 ResultElements.push_back(
12507 Elt: APValue(APSInt((LoLHS.sext(width: BitWidth) * LoRHS.sext(width: BitWidth)) +
12508 (HiLHS.sext(width: BitWidth) * HiRHS.sext(width: BitWidth)),
12509 DestUnsigned)));
12510 break;
12511 }
12512 }
12513
12514 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12515 }
12516
12517 case clang::X86::BI__builtin_ia32_pmulhuw128:
12518 case clang::X86::BI__builtin_ia32_pmulhuw256:
12519 case clang::X86::BI__builtin_ia32_pmulhuw512:
12520 return EvaluateBinOpExpr(llvm::APIntOps::mulhu);
12521
12522 case clang::X86::BI__builtin_ia32_pmulhw128:
12523 case clang::X86::BI__builtin_ia32_pmulhw256:
12524 case clang::X86::BI__builtin_ia32_pmulhw512:
12525 return EvaluateBinOpExpr(llvm::APIntOps::mulhs);
12526
12527 case clang::X86::BI__builtin_ia32_psllv2di:
12528 case clang::X86::BI__builtin_ia32_psllv4di:
12529 case clang::X86::BI__builtin_ia32_psllv4si:
12530 case clang::X86::BI__builtin_ia32_psllv8di:
12531 case clang::X86::BI__builtin_ia32_psllv8hi:
12532 case clang::X86::BI__builtin_ia32_psllv8si:
12533 case clang::X86::BI__builtin_ia32_psllv16hi:
12534 case clang::X86::BI__builtin_ia32_psllv16si:
12535 case clang::X86::BI__builtin_ia32_psllv32hi:
12536 case clang::X86::BI__builtin_ia32_psllwi128:
12537 case clang::X86::BI__builtin_ia32_pslldi128:
12538 case clang::X86::BI__builtin_ia32_psllqi128:
12539 case clang::X86::BI__builtin_ia32_psllwi256:
12540 case clang::X86::BI__builtin_ia32_pslldi256:
12541 case clang::X86::BI__builtin_ia32_psllqi256:
12542 case clang::X86::BI__builtin_ia32_psllwi512:
12543 case clang::X86::BI__builtin_ia32_pslldi512:
12544 case clang::X86::BI__builtin_ia32_psllqi512:
12545 return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) {
12546 if (RHS.uge(RHS: LHS.getBitWidth())) {
12547 return APInt::getZero(numBits: LHS.getBitWidth());
12548 }
12549 return LHS.shl(shiftAmt: RHS.getZExtValue());
12550 });
12551
12552 case clang::X86::BI__builtin_ia32_psrav4si:
12553 case clang::X86::BI__builtin_ia32_psrav8di:
12554 case clang::X86::BI__builtin_ia32_psrav8hi:
12555 case clang::X86::BI__builtin_ia32_psrav8si:
12556 case clang::X86::BI__builtin_ia32_psrav16hi:
12557 case clang::X86::BI__builtin_ia32_psrav16si:
12558 case clang::X86::BI__builtin_ia32_psrav32hi:
12559 case clang::X86::BI__builtin_ia32_psravq128:
12560 case clang::X86::BI__builtin_ia32_psravq256:
12561 case clang::X86::BI__builtin_ia32_psrawi128:
12562 case clang::X86::BI__builtin_ia32_psradi128:
12563 case clang::X86::BI__builtin_ia32_psraqi128:
12564 case clang::X86::BI__builtin_ia32_psrawi256:
12565 case clang::X86::BI__builtin_ia32_psradi256:
12566 case clang::X86::BI__builtin_ia32_psraqi256:
12567 case clang::X86::BI__builtin_ia32_psrawi512:
12568 case clang::X86::BI__builtin_ia32_psradi512:
12569 case clang::X86::BI__builtin_ia32_psraqi512:
12570 return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) {
12571 if (RHS.uge(RHS: LHS.getBitWidth())) {
12572 return LHS.ashr(ShiftAmt: LHS.getBitWidth() - 1);
12573 }
12574 return LHS.ashr(ShiftAmt: RHS.getZExtValue());
12575 });
12576
12577 case clang::X86::BI__builtin_ia32_psrlv2di:
12578 case clang::X86::BI__builtin_ia32_psrlv4di:
12579 case clang::X86::BI__builtin_ia32_psrlv4si:
12580 case clang::X86::BI__builtin_ia32_psrlv8di:
12581 case clang::X86::BI__builtin_ia32_psrlv8hi:
12582 case clang::X86::BI__builtin_ia32_psrlv8si:
12583 case clang::X86::BI__builtin_ia32_psrlv16hi:
12584 case clang::X86::BI__builtin_ia32_psrlv16si:
12585 case clang::X86::BI__builtin_ia32_psrlv32hi:
12586 case clang::X86::BI__builtin_ia32_psrlwi128:
12587 case clang::X86::BI__builtin_ia32_psrldi128:
12588 case clang::X86::BI__builtin_ia32_psrlqi128:
12589 case clang::X86::BI__builtin_ia32_psrlwi256:
12590 case clang::X86::BI__builtin_ia32_psrldi256:
12591 case clang::X86::BI__builtin_ia32_psrlqi256:
12592 case clang::X86::BI__builtin_ia32_psrlwi512:
12593 case clang::X86::BI__builtin_ia32_psrldi512:
12594 case clang::X86::BI__builtin_ia32_psrlqi512:
12595 return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) {
12596 if (RHS.uge(RHS: LHS.getBitWidth())) {
12597 return APInt::getZero(numBits: LHS.getBitWidth());
12598 }
12599 return LHS.lshr(shiftAmt: RHS.getZExtValue());
12600 });
12601 case X86::BI__builtin_ia32_packsswb128:
12602 case X86::BI__builtin_ia32_packsswb256:
12603 case X86::BI__builtin_ia32_packsswb512:
12604 case X86::BI__builtin_ia32_packssdw128:
12605 case X86::BI__builtin_ia32_packssdw256:
12606 case X86::BI__builtin_ia32_packssdw512:
12607 return evalPackBuiltin(E, Info, Result, PackFn: [](const APSInt &Src) {
12608 return APSInt(Src).truncSSat(width: Src.getBitWidth() / 2);
12609 });
12610 case X86::BI__builtin_ia32_packusdw128:
12611 case X86::BI__builtin_ia32_packusdw256:
12612 case X86::BI__builtin_ia32_packusdw512:
12613 case X86::BI__builtin_ia32_packuswb128:
12614 case X86::BI__builtin_ia32_packuswb256:
12615 case X86::BI__builtin_ia32_packuswb512:
12616 return evalPackBuiltin(E, Info, Result, PackFn: [](const APSInt &Src) {
12617 return APSInt(Src).truncSSatU(width: Src.getBitWidth() / 2);
12618 });
12619 case clang::X86::BI__builtin_ia32_selectss_128:
12620 return EvalSelectScalar(4);
12621 case clang::X86::BI__builtin_ia32_selectsd_128:
12622 return EvalSelectScalar(2);
12623 case clang::X86::BI__builtin_ia32_selectsh_128:
12624 case clang::X86::BI__builtin_ia32_selectsbf_128:
12625 return EvalSelectScalar(8);
12626 case clang::X86::BI__builtin_ia32_pmuldq128:
12627 case clang::X86::BI__builtin_ia32_pmuldq256:
12628 case clang::X86::BI__builtin_ia32_pmuldq512:
12629 case clang::X86::BI__builtin_ia32_pmuludq128:
12630 case clang::X86::BI__builtin_ia32_pmuludq256:
12631 case clang::X86::BI__builtin_ia32_pmuludq512: {
12632 APValue SourceLHS, SourceRHS;
12633 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) ||
12634 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS))
12635 return false;
12636
12637 unsigned SourceLen = SourceLHS.getVectorLength();
12638 SmallVector<APValue, 4> ResultElements;
12639 ResultElements.reserve(N: SourceLen / 2);
12640
12641 for (unsigned EltNum = 0; EltNum < SourceLen; EltNum += 2) {
12642 APSInt LHS = SourceLHS.getVectorElt(I: EltNum).getInt();
12643 APSInt RHS = SourceRHS.getVectorElt(I: EltNum).getInt();
12644
12645 switch (E->getBuiltinCallee()) {
12646 case clang::X86::BI__builtin_ia32_pmuludq128:
12647 case clang::X86::BI__builtin_ia32_pmuludq256:
12648 case clang::X86::BI__builtin_ia32_pmuludq512:
12649 ResultElements.push_back(
12650 Elt: APValue(APSInt(llvm::APIntOps::muluExtended(C1: LHS, C2: RHS), true)));
12651 break;
12652 case clang::X86::BI__builtin_ia32_pmuldq128:
12653 case clang::X86::BI__builtin_ia32_pmuldq256:
12654 case clang::X86::BI__builtin_ia32_pmuldq512:
12655 ResultElements.push_back(
12656 Elt: APValue(APSInt(llvm::APIntOps::mulsExtended(C1: LHS, C2: RHS), false)));
12657 break;
12658 }
12659 }
12660
12661 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12662 }
12663
12664 case X86::BI__builtin_ia32_vpmadd52luq128:
12665 case X86::BI__builtin_ia32_vpmadd52luq256:
12666 case X86::BI__builtin_ia32_vpmadd52luq512: {
12667 APValue A, B, C;
12668 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: A) ||
12669 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: B) ||
12670 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: C))
12671 return false;
12672
12673 unsigned ALen = A.getVectorLength();
12674 SmallVector<APValue, 4> ResultElements;
12675 ResultElements.reserve(N: ALen);
12676
12677 for (unsigned EltNum = 0; EltNum < ALen; EltNum += 1) {
12678 APInt AElt = A.getVectorElt(I: EltNum).getInt();
12679 APInt BElt = B.getVectorElt(I: EltNum).getInt().trunc(width: 52);
12680 APInt CElt = C.getVectorElt(I: EltNum).getInt().trunc(width: 52);
12681 APSInt ResElt(AElt + (BElt * CElt).zext(width: 64), false);
12682 ResultElements.push_back(Elt: APValue(ResElt));
12683 }
12684
12685 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12686 }
12687 case X86::BI__builtin_ia32_vpmadd52huq128:
12688 case X86::BI__builtin_ia32_vpmadd52huq256:
12689 case X86::BI__builtin_ia32_vpmadd52huq512: {
12690 APValue A, B, C;
12691 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: A) ||
12692 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: B) ||
12693 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: C))
12694 return false;
12695
12696 unsigned ALen = A.getVectorLength();
12697 SmallVector<APValue, 4> ResultElements;
12698 ResultElements.reserve(N: ALen);
12699
12700 for (unsigned EltNum = 0; EltNum < ALen; EltNum += 1) {
12701 APInt AElt = A.getVectorElt(I: EltNum).getInt();
12702 APInt BElt = B.getVectorElt(I: EltNum).getInt().trunc(width: 52);
12703 APInt CElt = C.getVectorElt(I: EltNum).getInt().trunc(width: 52);
12704 APSInt ResElt(AElt + llvm::APIntOps::mulhu(C1: BElt, C2: CElt).zext(width: 64), false);
12705 ResultElements.push_back(Elt: APValue(ResElt));
12706 }
12707
12708 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12709 }
12710
12711 case clang::X86::BI__builtin_ia32_vprotbi:
12712 case clang::X86::BI__builtin_ia32_vprotdi:
12713 case clang::X86::BI__builtin_ia32_vprotqi:
12714 case clang::X86::BI__builtin_ia32_vprotwi:
12715 case clang::X86::BI__builtin_ia32_prold128:
12716 case clang::X86::BI__builtin_ia32_prold256:
12717 case clang::X86::BI__builtin_ia32_prold512:
12718 case clang::X86::BI__builtin_ia32_prolq128:
12719 case clang::X86::BI__builtin_ia32_prolq256:
12720 case clang::X86::BI__builtin_ia32_prolq512:
12721 return EvaluateBinOpExpr(
12722 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotl(rotateAmt: RHS); });
12723
12724 case clang::X86::BI__builtin_ia32_prord128:
12725 case clang::X86::BI__builtin_ia32_prord256:
12726 case clang::X86::BI__builtin_ia32_prord512:
12727 case clang::X86::BI__builtin_ia32_prorq128:
12728 case clang::X86::BI__builtin_ia32_prorq256:
12729 case clang::X86::BI__builtin_ia32_prorq512:
12730 return EvaluateBinOpExpr(
12731 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotr(rotateAmt: RHS); });
12732
12733 case Builtin::BI__builtin_elementwise_max:
12734 case Builtin::BI__builtin_elementwise_min: {
12735 APValue SourceLHS, SourceRHS;
12736 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) ||
12737 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS))
12738 return false;
12739
12740 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
12741
12742 if (!DestEltTy->isIntegerType())
12743 return false;
12744
12745 unsigned SourceLen = SourceLHS.getVectorLength();
12746 SmallVector<APValue, 4> ResultElements;
12747 ResultElements.reserve(N: SourceLen);
12748
12749 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
12750 APSInt LHS = SourceLHS.getVectorElt(I: EltNum).getInt();
12751 APSInt RHS = SourceRHS.getVectorElt(I: EltNum).getInt();
12752 switch (E->getBuiltinCallee()) {
12753 case Builtin::BI__builtin_elementwise_max:
12754 ResultElements.push_back(
12755 Elt: APValue(APSInt(std::max(a: LHS, b: RHS),
12756 DestEltTy->isUnsignedIntegerOrEnumerationType())));
12757 break;
12758 case Builtin::BI__builtin_elementwise_min:
12759 ResultElements.push_back(
12760 Elt: APValue(APSInt(std::min(a: LHS, b: RHS),
12761 DestEltTy->isUnsignedIntegerOrEnumerationType())));
12762 break;
12763 }
12764 }
12765
12766 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12767 }
12768 case X86::BI__builtin_ia32_vpshldd128:
12769 case X86::BI__builtin_ia32_vpshldd256:
12770 case X86::BI__builtin_ia32_vpshldd512:
12771 case X86::BI__builtin_ia32_vpshldq128:
12772 case X86::BI__builtin_ia32_vpshldq256:
12773 case X86::BI__builtin_ia32_vpshldq512:
12774 case X86::BI__builtin_ia32_vpshldw128:
12775 case X86::BI__builtin_ia32_vpshldw256:
12776 case X86::BI__builtin_ia32_vpshldw512: {
12777 APValue SourceHi, SourceLo, SourceAmt;
12778 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceHi) ||
12779 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceLo) ||
12780 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceAmt))
12781 return false;
12782
12783 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
12784 unsigned SourceLen = SourceHi.getVectorLength();
12785 SmallVector<APValue, 32> ResultElements;
12786 ResultElements.reserve(N: SourceLen);
12787
12788 APInt Amt = SourceAmt.getInt();
12789 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
12790 APInt Hi = SourceHi.getVectorElt(I: EltNum).getInt();
12791 APInt Lo = SourceLo.getVectorElt(I: EltNum).getInt();
12792 APInt R = llvm::APIntOps::fshl(Hi, Lo, Shift: Amt);
12793 ResultElements.push_back(
12794 Elt: APValue(APSInt(R, DestEltTy->isUnsignedIntegerOrEnumerationType())));
12795 }
12796
12797 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12798 }
12799 case X86::BI__builtin_ia32_vpshrdd128:
12800 case X86::BI__builtin_ia32_vpshrdd256:
12801 case X86::BI__builtin_ia32_vpshrdd512:
12802 case X86::BI__builtin_ia32_vpshrdq128:
12803 case X86::BI__builtin_ia32_vpshrdq256:
12804 case X86::BI__builtin_ia32_vpshrdq512:
12805 case X86::BI__builtin_ia32_vpshrdw128:
12806 case X86::BI__builtin_ia32_vpshrdw256:
12807 case X86::BI__builtin_ia32_vpshrdw512: {
12808 // NOTE: Reversed Hi/Lo operands.
12809 APValue SourceHi, SourceLo, SourceAmt;
12810 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLo) ||
12811 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceHi) ||
12812 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceAmt))
12813 return false;
12814
12815 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
12816 unsigned SourceLen = SourceHi.getVectorLength();
12817 SmallVector<APValue, 32> ResultElements;
12818 ResultElements.reserve(N: SourceLen);
12819
12820 APInt Amt = SourceAmt.getInt();
12821 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
12822 APInt Hi = SourceHi.getVectorElt(I: EltNum).getInt();
12823 APInt Lo = SourceLo.getVectorElt(I: EltNum).getInt();
12824 APInt R = llvm::APIntOps::fshr(Hi, Lo, Shift: Amt);
12825 ResultElements.push_back(
12826 Elt: APValue(APSInt(R, DestEltTy->isUnsignedIntegerOrEnumerationType())));
12827 }
12828
12829 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12830 }
12831 case X86::BI__builtin_ia32_vpconflictsi_128:
12832 case X86::BI__builtin_ia32_vpconflictsi_256:
12833 case X86::BI__builtin_ia32_vpconflictsi_512:
12834 case X86::BI__builtin_ia32_vpconflictdi_128:
12835 case X86::BI__builtin_ia32_vpconflictdi_256:
12836 case X86::BI__builtin_ia32_vpconflictdi_512: {
12837 APValue Source;
12838
12839 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: Source))
12840 return false;
12841
12842 unsigned SourceLen = Source.getVectorLength();
12843 SmallVector<APValue, 32> ResultElements;
12844 ResultElements.reserve(N: SourceLen);
12845
12846 const auto *VecT = E->getType()->castAs<VectorType>();
12847 bool DestUnsigned =
12848 VecT->getElementType()->isUnsignedIntegerOrEnumerationType();
12849
12850 for (unsigned I = 0; I != SourceLen; ++I) {
12851 const APValue &EltI = Source.getVectorElt(I);
12852
12853 APInt ConflictMask(EltI.getInt().getBitWidth(), 0);
12854 for (unsigned J = 0; J != I; ++J) {
12855 const APValue &EltJ = Source.getVectorElt(I: J);
12856 ConflictMask.setBitVal(BitPosition: J, BitValue: EltI.getInt() == EltJ.getInt());
12857 }
12858 ResultElements.push_back(Elt: APValue(APSInt(ConflictMask, DestUnsigned)));
12859 }
12860 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12861 }
12862 case X86::BI__builtin_ia32_blendpd:
12863 case X86::BI__builtin_ia32_blendpd256:
12864 case X86::BI__builtin_ia32_blendps:
12865 case X86::BI__builtin_ia32_blendps256:
12866 case X86::BI__builtin_ia32_pblendw128:
12867 case X86::BI__builtin_ia32_pblendw256:
12868 case X86::BI__builtin_ia32_pblendd128:
12869 case X86::BI__builtin_ia32_pblendd256: {
12870 APValue SourceF, SourceT, SourceC;
12871 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceF) ||
12872 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceT) ||
12873 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceC))
12874 return false;
12875
12876 const APInt &C = SourceC.getInt();
12877 unsigned SourceLen = SourceF.getVectorLength();
12878 SmallVector<APValue, 32> ResultElements;
12879 ResultElements.reserve(N: SourceLen);
12880 for (unsigned EltNum = 0; EltNum != SourceLen; ++EltNum) {
12881 const APValue &F = SourceF.getVectorElt(I: EltNum);
12882 const APValue &T = SourceT.getVectorElt(I: EltNum);
12883 ResultElements.push_back(Elt: C[EltNum % 8] ? T : F);
12884 }
12885
12886 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12887 }
12888
12889 case X86::BI__builtin_ia32_psignb128:
12890 case X86::BI__builtin_ia32_psignb256:
12891 case X86::BI__builtin_ia32_psignw128:
12892 case X86::BI__builtin_ia32_psignw256:
12893 case X86::BI__builtin_ia32_psignd128:
12894 case X86::BI__builtin_ia32_psignd256:
12895 return EvaluateBinOpExpr([](const APInt &AElem, const APInt &BElem) {
12896 if (BElem.isZero())
12897 return APInt::getZero(numBits: AElem.getBitWidth());
12898 if (BElem.isNegative())
12899 return -AElem;
12900 return AElem;
12901 });
12902
12903 case X86::BI__builtin_ia32_blendvpd:
12904 case X86::BI__builtin_ia32_blendvpd256:
12905 case X86::BI__builtin_ia32_blendvps:
12906 case X86::BI__builtin_ia32_blendvps256:
12907 case X86::BI__builtin_ia32_pblendvb128:
12908 case X86::BI__builtin_ia32_pblendvb256: {
12909 // SSE blendv by mask signbit: "Result = C[] < 0 ? T[] : F[]".
12910 APValue SourceF, SourceT, SourceC;
12911 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceF) ||
12912 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceT) ||
12913 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceC))
12914 return false;
12915
12916 unsigned SourceLen = SourceF.getVectorLength();
12917 SmallVector<APValue, 32> ResultElements;
12918 ResultElements.reserve(N: SourceLen);
12919
12920 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
12921 const APValue &F = SourceF.getVectorElt(I: EltNum);
12922 const APValue &T = SourceT.getVectorElt(I: EltNum);
12923 const APValue &C = SourceC.getVectorElt(I: EltNum);
12924 APInt M = C.isInt() ? (APInt)C.getInt() : C.getFloat().bitcastToAPInt();
12925 ResultElements.push_back(Elt: M.isNegative() ? T : F);
12926 }
12927
12928 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12929 }
12930 case X86::BI__builtin_ia32_selectb_128:
12931 case X86::BI__builtin_ia32_selectb_256:
12932 case X86::BI__builtin_ia32_selectb_512:
12933 case X86::BI__builtin_ia32_selectw_128:
12934 case X86::BI__builtin_ia32_selectw_256:
12935 case X86::BI__builtin_ia32_selectw_512:
12936 case X86::BI__builtin_ia32_selectd_128:
12937 case X86::BI__builtin_ia32_selectd_256:
12938 case X86::BI__builtin_ia32_selectd_512:
12939 case X86::BI__builtin_ia32_selectq_128:
12940 case X86::BI__builtin_ia32_selectq_256:
12941 case X86::BI__builtin_ia32_selectq_512:
12942 case X86::BI__builtin_ia32_selectph_128:
12943 case X86::BI__builtin_ia32_selectph_256:
12944 case X86::BI__builtin_ia32_selectph_512:
12945 case X86::BI__builtin_ia32_selectpbf_128:
12946 case X86::BI__builtin_ia32_selectpbf_256:
12947 case X86::BI__builtin_ia32_selectpbf_512:
12948 case X86::BI__builtin_ia32_selectps_128:
12949 case X86::BI__builtin_ia32_selectps_256:
12950 case X86::BI__builtin_ia32_selectps_512:
12951 case X86::BI__builtin_ia32_selectpd_128:
12952 case X86::BI__builtin_ia32_selectpd_256:
12953 case X86::BI__builtin_ia32_selectpd_512: {
12954 // AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
12955 APValue SourceMask, SourceLHS, SourceRHS;
12956 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceMask) ||
12957 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceLHS) ||
12958 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceRHS))
12959 return false;
12960
12961 APSInt Mask = SourceMask.getInt();
12962 unsigned SourceLen = SourceLHS.getVectorLength();
12963 SmallVector<APValue, 4> ResultElements;
12964 ResultElements.reserve(N: SourceLen);
12965
12966 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
12967 const APValue &LHS = SourceLHS.getVectorElt(I: EltNum);
12968 const APValue &RHS = SourceRHS.getVectorElt(I: EltNum);
12969 ResultElements.push_back(Elt: Mask[EltNum] ? LHS : RHS);
12970 }
12971
12972 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
12973 }
12974
12975 case X86::BI__builtin_ia32_cvtsd2ss: {
12976 APValue VecA, VecB;
12977 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: VecA) ||
12978 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: VecB))
12979 return false;
12980
12981 SmallVector<APValue, 4> Elements;
12982
12983 APValue ResultVal;
12984 if (!ConvertDoubleToFloatStrict(Info, E, OrigVal: VecB.getVectorElt(I: 0).getFloat(),
12985 Result&: ResultVal))
12986 return false;
12987
12988 Elements.push_back(Elt: ResultVal);
12989
12990 unsigned NumEltsA = VecA.getVectorLength();
12991 for (unsigned I = 1; I < NumEltsA; ++I) {
12992 Elements.push_back(Elt: VecA.getVectorElt(I));
12993 }
12994
12995 return Success(V: Elements, E);
12996 }
12997 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: {
12998 APValue VecA, VecB, VecSrc, MaskValue;
12999
13000 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: VecA) ||
13001 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: VecB) ||
13002 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: VecSrc) ||
13003 !EvaluateAsRValue(Info, E: E->getArg(Arg: 3), Result&: MaskValue))
13004 return false;
13005
13006 unsigned Mask = MaskValue.getInt().getZExtValue();
13007 SmallVector<APValue, 4> Elements;
13008
13009 if (Mask & 1) {
13010 APValue ResultVal;
13011 if (!ConvertDoubleToFloatStrict(Info, E, OrigVal: VecB.getVectorElt(I: 0).getFloat(),
13012 Result&: ResultVal))
13013 return false;
13014 Elements.push_back(Elt: ResultVal);
13015 } else {
13016 Elements.push_back(Elt: VecSrc.getVectorElt(I: 0));
13017 }
13018
13019 unsigned NumEltsA = VecA.getVectorLength();
13020 for (unsigned I = 1; I < NumEltsA; ++I) {
13021 Elements.push_back(Elt: VecA.getVectorElt(I));
13022 }
13023
13024 return Success(V: Elements, E);
13025 }
13026 case X86::BI__builtin_ia32_cvtpd2ps:
13027 case X86::BI__builtin_ia32_cvtpd2ps256:
13028 case X86::BI__builtin_ia32_cvtpd2ps_mask:
13029 case X86::BI__builtin_ia32_cvtpd2ps512_mask: {
13030
13031 const auto BuiltinID = E->getBuiltinCallee();
13032 bool IsMasked = (BuiltinID == X86::BI__builtin_ia32_cvtpd2ps_mask ||
13033 BuiltinID == X86::BI__builtin_ia32_cvtpd2ps512_mask);
13034
13035 APValue InputValue;
13036 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: InputValue))
13037 return false;
13038
13039 APValue MergeValue;
13040 unsigned Mask = 0xFFFFFFFF;
13041 bool NeedsMerge = false;
13042 if (IsMasked) {
13043 APValue MaskValue;
13044 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: MaskValue))
13045 return false;
13046 Mask = MaskValue.getInt().getZExtValue();
13047 auto NumEltsResult = E->getType()->getAs<VectorType>()->getNumElements();
13048 for (unsigned I = 0; I < NumEltsResult; ++I) {
13049 if (!((Mask >> I) & 1)) {
13050 NeedsMerge = true;
13051 break;
13052 }
13053 }
13054 if (NeedsMerge) {
13055 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: MergeValue))
13056 return false;
13057 }
13058 }
13059
13060 unsigned NumEltsResult =
13061 E->getType()->getAs<VectorType>()->getNumElements();
13062 unsigned NumEltsInput = InputValue.getVectorLength();
13063 SmallVector<APValue, 8> Elements;
13064 for (unsigned I = 0; I < NumEltsResult; ++I) {
13065 if (IsMasked && !((Mask >> I) & 1)) {
13066 if (!NeedsMerge) {
13067 return false;
13068 }
13069 Elements.push_back(Elt: MergeValue.getVectorElt(I));
13070 continue;
13071 }
13072
13073 if (I >= NumEltsInput) {
13074 Elements.push_back(Elt: APValue(APFloat::getZero(Sem: APFloat::IEEEsingle())));
13075 continue;
13076 }
13077
13078 APValue ResultVal;
13079 if (!ConvertDoubleToFloatStrict(
13080 Info, E, OrigVal: InputValue.getVectorElt(I).getFloat(), Result&: ResultVal))
13081 return false;
13082
13083 Elements.push_back(Elt: ResultVal);
13084 }
13085 return Success(V: Elements, E);
13086 }
13087
13088 case X86::BI__builtin_ia32_shufps:
13089 case X86::BI__builtin_ia32_shufps256:
13090 case X86::BI__builtin_ia32_shufps512: {
13091 APValue R;
13092 if (!evalShuffleGeneric(
13093 Info, Call: E, Out&: R,
13094 GetSourceIndex: [](unsigned DstIdx,
13095 unsigned ShuffleMask) -> std::pair<unsigned, int> {
13096 constexpr unsigned LaneBits = 128u;
13097 unsigned NumElemPerLane = LaneBits / 32;
13098 unsigned NumSelectableElems = NumElemPerLane / 2;
13099 unsigned BitsPerElem = 2;
13100 unsigned IndexMask = (1u << BitsPerElem) - 1;
13101 unsigned MaskBits = 8;
13102 unsigned Lane = DstIdx / NumElemPerLane;
13103 unsigned ElemInLane = DstIdx % NumElemPerLane;
13104 unsigned LaneOffset = Lane * NumElemPerLane;
13105 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
13106 unsigned SrcIdx = (ElemInLane < NumSelectableElems) ? 0 : 1;
13107 unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
13108 return {SrcIdx, static_cast<int>(LaneOffset + Index)};
13109 }))
13110 return false;
13111 return Success(V: R, E);
13112 }
13113 case X86::BI__builtin_ia32_shufpd:
13114 case X86::BI__builtin_ia32_shufpd256:
13115 case X86::BI__builtin_ia32_shufpd512: {
13116 APValue R;
13117 if (!evalShuffleGeneric(
13118 Info, Call: E, Out&: R,
13119 GetSourceIndex: [](unsigned DstIdx,
13120 unsigned ShuffleMask) -> std::pair<unsigned, int> {
13121 constexpr unsigned LaneBits = 128u;
13122 unsigned NumElemPerLane = LaneBits / 64;
13123 unsigned NumSelectableElems = NumElemPerLane / 2;
13124 unsigned BitsPerElem = 1;
13125 unsigned IndexMask = (1u << BitsPerElem) - 1;
13126 unsigned MaskBits = 8;
13127 unsigned Lane = DstIdx / NumElemPerLane;
13128 unsigned ElemInLane = DstIdx % NumElemPerLane;
13129 unsigned LaneOffset = Lane * NumElemPerLane;
13130 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
13131 unsigned SrcIdx = (ElemInLane < NumSelectableElems) ? 0 : 1;
13132 unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
13133 return {SrcIdx, static_cast<int>(LaneOffset + Index)};
13134 }))
13135 return false;
13136 return Success(V: R, E);
13137 }
13138 case X86::BI__builtin_ia32_insertps128: {
13139 APValue R;
13140 if (!evalShuffleGeneric(
13141 Info, Call: E, Out&: R,
13142 GetSourceIndex: [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
13143 // Bits [3:0]: zero mask - if bit is set, zero this element
13144 if ((Mask & (1 << DstIdx)) != 0) {
13145 return {0, -1};
13146 }
13147 // Bits [7:6]: select element from source vector Y (0-3)
13148 // Bits [5:4]: select destination position (0-3)
13149 unsigned SrcElem = (Mask >> 6) & 0x3;
13150 unsigned DstElem = (Mask >> 4) & 0x3;
13151 if (DstIdx == DstElem) {
13152 // Insert element from source vector (B) at this position
13153 return {1, static_cast<int>(SrcElem)};
13154 } else {
13155 // Copy from destination vector (A)
13156 return {0, static_cast<int>(DstIdx)};
13157 }
13158 }))
13159 return false;
13160 return Success(V: R, E);
13161 }
13162 case X86::BI__builtin_ia32_pshufb128:
13163 case X86::BI__builtin_ia32_pshufb256:
13164 case X86::BI__builtin_ia32_pshufb512: {
13165 APValue R;
13166 if (!evalShuffleGeneric(
13167 Info, Call: E, Out&: R,
13168 GetSourceIndex: [](unsigned DstIdx,
13169 unsigned ShuffleMask) -> std::pair<unsigned, int> {
13170 uint8_t Ctlb = static_cast<uint8_t>(ShuffleMask);
13171 if (Ctlb & 0x80)
13172 return std::make_pair(x: 0, y: -1);
13173
13174 unsigned LaneBase = (DstIdx / 16) * 16;
13175 unsigned SrcOffset = Ctlb & 0x0F;
13176 unsigned SrcIdx = LaneBase + SrcOffset;
13177 return std::make_pair(x: 0, y: static_cast<int>(SrcIdx));
13178 }))
13179 return false;
13180 return Success(V: R, E);
13181 }
13182
13183 case X86::BI__builtin_ia32_pshuflw:
13184 case X86::BI__builtin_ia32_pshuflw256:
13185 case X86::BI__builtin_ia32_pshuflw512: {
13186 APValue R;
13187 if (!evalShuffleGeneric(
13188 Info, Call: E, Out&: R,
13189 GetSourceIndex: [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
13190 constexpr unsigned LaneBits = 128u;
13191 constexpr unsigned ElemBits = 16u;
13192 constexpr unsigned LaneElts = LaneBits / ElemBits;
13193 constexpr unsigned HalfSize = 4;
13194 unsigned LaneBase = (DstIdx / LaneElts) * LaneElts;
13195 unsigned LaneIdx = DstIdx % LaneElts;
13196 if (LaneIdx < HalfSize) {
13197 unsigned Sel = (Mask >> (2 * LaneIdx)) & 0x3;
13198 return std::make_pair(x: 0, y: static_cast<int>(LaneBase + Sel));
13199 }
13200 return std::make_pair(x: 0, y: static_cast<int>(DstIdx));
13201 }))
13202 return false;
13203 return Success(V: R, E);
13204 }
13205
13206 case X86::BI__builtin_ia32_pshufhw:
13207 case X86::BI__builtin_ia32_pshufhw256:
13208 case X86::BI__builtin_ia32_pshufhw512: {
13209 APValue R;
13210 if (!evalShuffleGeneric(
13211 Info, Call: E, Out&: R,
13212 GetSourceIndex: [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
13213 constexpr unsigned LaneBits = 128u;
13214 constexpr unsigned ElemBits = 16u;
13215 constexpr unsigned LaneElts = LaneBits / ElemBits;
13216 constexpr unsigned HalfSize = 4;
13217 unsigned LaneBase = (DstIdx / LaneElts) * LaneElts;
13218 unsigned LaneIdx = DstIdx % LaneElts;
13219 if (LaneIdx >= HalfSize) {
13220 unsigned Rel = LaneIdx - HalfSize;
13221 unsigned Sel = (Mask >> (2 * Rel)) & 0x3;
13222 return std::make_pair(
13223 x: 0, y: static_cast<int>(LaneBase + HalfSize + Sel));
13224 }
13225 return std::make_pair(x: 0, y: static_cast<int>(DstIdx));
13226 }))
13227 return false;
13228 return Success(V: R, E);
13229 }
13230
13231 case X86::BI__builtin_ia32_pshufd:
13232 case X86::BI__builtin_ia32_pshufd256:
13233 case X86::BI__builtin_ia32_pshufd512:
13234 case X86::BI__builtin_ia32_vpermilps:
13235 case X86::BI__builtin_ia32_vpermilps256:
13236 case X86::BI__builtin_ia32_vpermilps512: {
13237 APValue R;
13238 if (!evalShuffleGeneric(
13239 Info, Call: E, Out&: R,
13240 GetSourceIndex: [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
13241 constexpr unsigned LaneBits = 128u;
13242 constexpr unsigned ElemBits = 32u;
13243 constexpr unsigned LaneElts = LaneBits / ElemBits;
13244 unsigned LaneBase = (DstIdx / LaneElts) * LaneElts;
13245 unsigned LaneIdx = DstIdx % LaneElts;
13246 unsigned Sel = (Mask >> (2 * LaneIdx)) & 0x3;
13247 return std::make_pair(x: 0, y: static_cast<int>(LaneBase + Sel));
13248 }))
13249 return false;
13250 return Success(V: R, E);
13251 }
13252
13253 case X86::BI__builtin_ia32_vpermilvarpd:
13254 case X86::BI__builtin_ia32_vpermilvarpd256:
13255 case X86::BI__builtin_ia32_vpermilvarpd512: {
13256 APValue R;
13257 if (!evalShuffleGeneric(
13258 Info, Call: E, Out&: R,
13259 GetSourceIndex: [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
13260 unsigned NumElemPerLane = 2;
13261 unsigned Lane = DstIdx / NumElemPerLane;
13262 unsigned Offset = Mask & 0b10 ? 1 : 0;
13263 return std::make_pair(
13264 x: 0, y: static_cast<int>(Lane * NumElemPerLane + Offset));
13265 }))
13266 return false;
13267 return Success(V: R, E);
13268 }
13269
13270 case X86::BI__builtin_ia32_vpermilpd:
13271 case X86::BI__builtin_ia32_vpermilpd256:
13272 case X86::BI__builtin_ia32_vpermilpd512: {
13273 APValue R;
13274 if (!evalShuffleGeneric(Info, Call: E, Out&: R, GetSourceIndex: [](unsigned DstIdx, unsigned Control) {
13275 unsigned NumElemPerLane = 2;
13276 unsigned BitsPerElem = 1;
13277 unsigned MaskBits = 8;
13278 unsigned IndexMask = 0x1;
13279 unsigned Lane = DstIdx / NumElemPerLane;
13280 unsigned LaneOffset = Lane * NumElemPerLane;
13281 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
13282 unsigned Index = (Control >> BitIndex) & IndexMask;
13283 return std::make_pair(x: 0, y: static_cast<int>(LaneOffset + Index));
13284 }))
13285 return false;
13286 return Success(V: R, E);
13287 }
13288
13289 case X86::BI__builtin_ia32_permdf256:
13290 case X86::BI__builtin_ia32_permdi256: {
13291 APValue R;
13292 if (!evalShuffleGeneric(Info, Call: E, Out&: R, GetSourceIndex: [](unsigned DstIdx, unsigned Control) {
13293 // permute4x64 operates on 4 64-bit elements
13294 // For element i (0-3), extract bits [2*i+1:2*i] from Control
13295 unsigned Index = (Control >> (2 * DstIdx)) & 0x3;
13296 return std::make_pair(x: 0, y: static_cast<int>(Index));
13297 }))
13298 return false;
13299 return Success(V: R, E);
13300 }
13301
13302 case X86::BI__builtin_ia32_vpermilvarps:
13303 case X86::BI__builtin_ia32_vpermilvarps256:
13304 case X86::BI__builtin_ia32_vpermilvarps512: {
13305 APValue R;
13306 if (!evalShuffleGeneric(
13307 Info, Call: E, Out&: R,
13308 GetSourceIndex: [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> {
13309 unsigned NumElemPerLane = 4;
13310 unsigned Lane = DstIdx / NumElemPerLane;
13311 unsigned Offset = Mask & 0b11;
13312 return std::make_pair(
13313 x: 0, y: static_cast<int>(Lane * NumElemPerLane + Offset));
13314 }))
13315 return false;
13316 return Success(V: R, E);
13317 }
13318
13319 case X86::BI__builtin_ia32_vpmultishiftqb128:
13320 case X86::BI__builtin_ia32_vpmultishiftqb256:
13321 case X86::BI__builtin_ia32_vpmultishiftqb512: {
13322 assert(E->getNumArgs() == 2);
13323
13324 APValue A, B;
13325 if (!Evaluate(Result&: A, Info, E: E->getArg(Arg: 0)) || !Evaluate(Result&: B, Info, E: E->getArg(Arg: 1)))
13326 return false;
13327
13328 assert(A.getVectorLength() == B.getVectorLength());
13329 unsigned NumBytesInQWord = 8;
13330 unsigned NumBitsInByte = 8;
13331 unsigned NumBytes = A.getVectorLength();
13332 unsigned NumQWords = NumBytes / NumBytesInQWord;
13333 SmallVector<APValue, 64> Result;
13334 Result.reserve(N: NumBytes);
13335
13336 for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) {
13337 APInt BQWord(64, 0);
13338 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
13339 unsigned Idx = QWordId * NumBytesInQWord + ByteIdx;
13340 uint64_t Byte = B.getVectorElt(I: Idx).getInt().getZExtValue();
13341 BQWord.insertBits(SubBits: APInt(8, Byte & 0xFF), bitPosition: ByteIdx * NumBitsInByte);
13342 }
13343
13344 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
13345 unsigned Idx = QWordId * NumBytesInQWord + ByteIdx;
13346 uint64_t Ctrl = A.getVectorElt(I: Idx).getInt().getZExtValue() & 0x3F;
13347
13348 APInt Byte(8, 0);
13349 for (unsigned BitIdx = 0; BitIdx != NumBitsInByte; ++BitIdx) {
13350 Byte.setBitVal(BitPosition: BitIdx, BitValue: BQWord[(Ctrl + BitIdx) & 0x3F]);
13351 }
13352 Result.push_back(Elt: APValue(APSInt(Byte, /*isUnsigned*/ true)));
13353 }
13354 }
13355 return Success(V: APValue(Result.data(), Result.size()), E);
13356 }
13357
13358 case X86::BI__builtin_ia32_phminposuw128: {
13359 APValue Source;
13360 if (!Evaluate(Result&: Source, Info, E: E->getArg(Arg: 0)))
13361 return false;
13362 unsigned SourceLen = Source.getVectorLength();
13363 const VectorType *VT = E->getArg(Arg: 0)->getType()->castAs<VectorType>();
13364 QualType ElemQT = VT->getElementType();
13365 unsigned ElemBitWidth = Info.Ctx.getTypeSize(T: ElemQT);
13366
13367 APInt MinIndex(ElemBitWidth, 0);
13368 APInt MinVal = Source.getVectorElt(I: 0).getInt();
13369 for (unsigned I = 1; I != SourceLen; ++I) {
13370 APInt Val = Source.getVectorElt(I).getInt();
13371 if (MinVal.ugt(RHS: Val)) {
13372 MinVal = Val;
13373 MinIndex = I;
13374 }
13375 }
13376
13377 bool ResultUnsigned = E->getCallReturnType(Ctx: Info.Ctx)
13378 ->castAs<VectorType>()
13379 ->getElementType()
13380 ->isUnsignedIntegerOrEnumerationType();
13381
13382 SmallVector<APValue, 8> Result;
13383 Result.reserve(N: SourceLen);
13384 Result.emplace_back(Args: APSInt(MinVal, ResultUnsigned));
13385 Result.emplace_back(Args: APSInt(MinIndex, ResultUnsigned));
13386 for (unsigned I = 0; I != SourceLen - 2; ++I) {
13387 Result.emplace_back(Args: APSInt(APInt(ElemBitWidth, 0), ResultUnsigned));
13388 }
13389 return Success(V: APValue(Result.data(), Result.size()), E);
13390 }
13391
13392 case X86::BI__builtin_ia32_psraq128:
13393 case X86::BI__builtin_ia32_psraq256:
13394 case X86::BI__builtin_ia32_psraq512:
13395 case X86::BI__builtin_ia32_psrad128:
13396 case X86::BI__builtin_ia32_psrad256:
13397 case X86::BI__builtin_ia32_psrad512:
13398 case X86::BI__builtin_ia32_psraw128:
13399 case X86::BI__builtin_ia32_psraw256:
13400 case X86::BI__builtin_ia32_psraw512: {
13401 APValue R;
13402 if (!evalShiftWithCount(
13403 Info, Call: E, Out&: R,
13404 ShiftOp: [](const APInt &Elt, uint64_t Count) { return Elt.ashr(ShiftAmt: Count); },
13405 OverflowOp: [](const APInt &Elt, unsigned Width) {
13406 return Elt.ashr(ShiftAmt: Width - 1);
13407 }))
13408 return false;
13409 return Success(V: R, E);
13410 }
13411
13412 case X86::BI__builtin_ia32_psllq128:
13413 case X86::BI__builtin_ia32_psllq256:
13414 case X86::BI__builtin_ia32_psllq512:
13415 case X86::BI__builtin_ia32_pslld128:
13416 case X86::BI__builtin_ia32_pslld256:
13417 case X86::BI__builtin_ia32_pslld512:
13418 case X86::BI__builtin_ia32_psllw128:
13419 case X86::BI__builtin_ia32_psllw256:
13420 case X86::BI__builtin_ia32_psllw512: {
13421 APValue R;
13422 if (!evalShiftWithCount(
13423 Info, Call: E, Out&: R,
13424 ShiftOp: [](const APInt &Elt, uint64_t Count) { return Elt.shl(shiftAmt: Count); },
13425 OverflowOp: [](const APInt &Elt, unsigned Width) {
13426 return APInt::getZero(numBits: Width);
13427 }))
13428 return false;
13429 return Success(V: R, E);
13430 }
13431
13432 case X86::BI__builtin_ia32_psrlq128:
13433 case X86::BI__builtin_ia32_psrlq256:
13434 case X86::BI__builtin_ia32_psrlq512:
13435 case X86::BI__builtin_ia32_psrld128:
13436 case X86::BI__builtin_ia32_psrld256:
13437 case X86::BI__builtin_ia32_psrld512:
13438 case X86::BI__builtin_ia32_psrlw128:
13439 case X86::BI__builtin_ia32_psrlw256:
13440 case X86::BI__builtin_ia32_psrlw512: {
13441 APValue R;
13442 if (!evalShiftWithCount(
13443 Info, Call: E, Out&: R,
13444 ShiftOp: [](const APInt &Elt, uint64_t Count) { return Elt.lshr(shiftAmt: Count); },
13445 OverflowOp: [](const APInt &Elt, unsigned Width) {
13446 return APInt::getZero(numBits: Width);
13447 }))
13448 return false;
13449 return Success(V: R, E);
13450 }
13451
13452 case X86::BI__builtin_ia32_pternlogd128_mask:
13453 case X86::BI__builtin_ia32_pternlogd256_mask:
13454 case X86::BI__builtin_ia32_pternlogd512_mask:
13455 case X86::BI__builtin_ia32_pternlogq128_mask:
13456 case X86::BI__builtin_ia32_pternlogq256_mask:
13457 case X86::BI__builtin_ia32_pternlogq512_mask: {
13458 APValue AValue, BValue, CValue, ImmValue, UValue;
13459 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: AValue) ||
13460 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: BValue) ||
13461 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: CValue) ||
13462 !EvaluateAsRValue(Info, E: E->getArg(Arg: 3), Result&: ImmValue) ||
13463 !EvaluateAsRValue(Info, E: E->getArg(Arg: 4), Result&: UValue))
13464 return false;
13465
13466 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
13467 bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType();
13468 APInt Imm = ImmValue.getInt();
13469 APInt U = UValue.getInt();
13470 unsigned ResultLen = AValue.getVectorLength();
13471 SmallVector<APValue, 16> ResultElements;
13472 ResultElements.reserve(N: ResultLen);
13473
13474 for (unsigned EltNum = 0; EltNum < ResultLen; ++EltNum) {
13475 APInt ALane = AValue.getVectorElt(I: EltNum).getInt();
13476 APInt BLane = BValue.getVectorElt(I: EltNum).getInt();
13477 APInt CLane = CValue.getVectorElt(I: EltNum).getInt();
13478
13479 if (U[EltNum]) {
13480 unsigned BitWidth = ALane.getBitWidth();
13481 APInt ResLane(BitWidth, 0);
13482
13483 for (unsigned Bit = 0; Bit < BitWidth; ++Bit) {
13484 unsigned ABit = ALane[Bit];
13485 unsigned BBit = BLane[Bit];
13486 unsigned CBit = CLane[Bit];
13487
13488 unsigned Idx = (ABit << 2) | (BBit << 1) | CBit;
13489 ResLane.setBitVal(BitPosition: Bit, BitValue: Imm[Idx]);
13490 }
13491 ResultElements.push_back(Elt: APValue(APSInt(ResLane, DestUnsigned)));
13492 } else {
13493 ResultElements.push_back(Elt: APValue(APSInt(ALane, DestUnsigned)));
13494 }
13495 }
13496 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
13497 }
13498 case X86::BI__builtin_ia32_pternlogd128_maskz:
13499 case X86::BI__builtin_ia32_pternlogd256_maskz:
13500 case X86::BI__builtin_ia32_pternlogd512_maskz:
13501 case X86::BI__builtin_ia32_pternlogq128_maskz:
13502 case X86::BI__builtin_ia32_pternlogq256_maskz:
13503 case X86::BI__builtin_ia32_pternlogq512_maskz: {
13504 APValue AValue, BValue, CValue, ImmValue, UValue;
13505 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: AValue) ||
13506 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: BValue) ||
13507 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: CValue) ||
13508 !EvaluateAsRValue(Info, E: E->getArg(Arg: 3), Result&: ImmValue) ||
13509 !EvaluateAsRValue(Info, E: E->getArg(Arg: 4), Result&: UValue))
13510 return false;
13511
13512 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
13513 bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType();
13514 APInt Imm = ImmValue.getInt();
13515 APInt U = UValue.getInt();
13516 unsigned ResultLen = AValue.getVectorLength();
13517 SmallVector<APValue, 16> ResultElements;
13518 ResultElements.reserve(N: ResultLen);
13519
13520 for (unsigned EltNum = 0; EltNum < ResultLen; ++EltNum) {
13521 APInt ALane = AValue.getVectorElt(I: EltNum).getInt();
13522 APInt BLane = BValue.getVectorElt(I: EltNum).getInt();
13523 APInt CLane = CValue.getVectorElt(I: EltNum).getInt();
13524
13525 unsigned BitWidth = ALane.getBitWidth();
13526 APInt ResLane(BitWidth, 0);
13527
13528 if (U[EltNum]) {
13529 for (unsigned Bit = 0; Bit < BitWidth; ++Bit) {
13530 unsigned ABit = ALane[Bit];
13531 unsigned BBit = BLane[Bit];
13532 unsigned CBit = CLane[Bit];
13533
13534 unsigned Idx = (ABit << 2) | (BBit << 1) | CBit;
13535 ResLane.setBitVal(BitPosition: Bit, BitValue: Imm[Idx]);
13536 }
13537 }
13538 ResultElements.push_back(Elt: APValue(APSInt(ResLane, DestUnsigned)));
13539 }
13540 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
13541 }
13542
13543 case Builtin::BI__builtin_elementwise_clzg:
13544 case Builtin::BI__builtin_elementwise_ctzg: {
13545 APValue SourceLHS;
13546 std::optional<APValue> Fallback;
13547 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS))
13548 return false;
13549 if (E->getNumArgs() > 1) {
13550 APValue FallbackTmp;
13551 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: FallbackTmp))
13552 return false;
13553 Fallback = FallbackTmp;
13554 }
13555
13556 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
13557 unsigned SourceLen = SourceLHS.getVectorLength();
13558 SmallVector<APValue, 4> ResultElements;
13559 ResultElements.reserve(N: SourceLen);
13560
13561 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
13562 APSInt LHS = SourceLHS.getVectorElt(I: EltNum).getInt();
13563 if (!LHS) {
13564 // Without a fallback, a zero element is undefined
13565 if (!Fallback) {
13566 Info.FFDiag(E, DiagId: diag::note_constexpr_countzeroes_zero)
13567 << /*IsTrailing=*/(E->getBuiltinCallee() ==
13568 Builtin::BI__builtin_elementwise_ctzg);
13569 return false;
13570 }
13571 ResultElements.push_back(Elt: Fallback->getVectorElt(I: EltNum));
13572 continue;
13573 }
13574 switch (E->getBuiltinCallee()) {
13575 case Builtin::BI__builtin_elementwise_clzg:
13576 ResultElements.push_back(Elt: APValue(
13577 APSInt(APInt(Info.Ctx.getIntWidth(T: DestEltTy), LHS.countl_zero()),
13578 DestEltTy->isUnsignedIntegerOrEnumerationType())));
13579 break;
13580 case Builtin::BI__builtin_elementwise_ctzg:
13581 ResultElements.push_back(Elt: APValue(
13582 APSInt(APInt(Info.Ctx.getIntWidth(T: DestEltTy), LHS.countr_zero()),
13583 DestEltTy->isUnsignedIntegerOrEnumerationType())));
13584 break;
13585 }
13586 }
13587
13588 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
13589 }
13590
13591 case Builtin::BI__builtin_elementwise_fma: {
13592 APValue SourceX, SourceY, SourceZ;
13593 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceX) ||
13594 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceY) ||
13595 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceZ))
13596 return false;
13597
13598 unsigned SourceLen = SourceX.getVectorLength();
13599 SmallVector<APValue> ResultElements;
13600 ResultElements.reserve(N: SourceLen);
13601 llvm::RoundingMode RM = getActiveRoundingMode(Info&: getEvalInfo(), E);
13602 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
13603 const APFloat &X = SourceX.getVectorElt(I: EltNum).getFloat();
13604 const APFloat &Y = SourceY.getVectorElt(I: EltNum).getFloat();
13605 const APFloat &Z = SourceZ.getVectorElt(I: EltNum).getFloat();
13606 APFloat Result(X);
13607 (void)Result.fusedMultiplyAdd(Multiplicand: Y, Addend: Z, RM);
13608 ResultElements.push_back(Elt: APValue(Result));
13609 }
13610 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
13611 }
13612
13613 case clang::X86::BI__builtin_ia32_phaddw128:
13614 case clang::X86::BI__builtin_ia32_phaddw256:
13615 case clang::X86::BI__builtin_ia32_phaddd128:
13616 case clang::X86::BI__builtin_ia32_phaddd256:
13617 case clang::X86::BI__builtin_ia32_phaddsw128:
13618 case clang::X86::BI__builtin_ia32_phaddsw256:
13619
13620 case clang::X86::BI__builtin_ia32_phsubw128:
13621 case clang::X86::BI__builtin_ia32_phsubw256:
13622 case clang::X86::BI__builtin_ia32_phsubd128:
13623 case clang::X86::BI__builtin_ia32_phsubd256:
13624 case clang::X86::BI__builtin_ia32_phsubsw128:
13625 case clang::X86::BI__builtin_ia32_phsubsw256: {
13626 APValue SourceLHS, SourceRHS;
13627 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) ||
13628 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS))
13629 return false;
13630 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
13631 bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType();
13632
13633 unsigned NumElts = SourceLHS.getVectorLength();
13634 unsigned EltBits = Info.Ctx.getIntWidth(T: DestEltTy);
13635 unsigned EltsPerLane = 128 / EltBits;
13636 SmallVector<APValue, 4> ResultElements;
13637 ResultElements.reserve(N: NumElts);
13638
13639 for (unsigned LaneStart = 0; LaneStart != NumElts;
13640 LaneStart += EltsPerLane) {
13641 for (unsigned I = 0; I != EltsPerLane; I += 2) {
13642 APSInt LHSA = SourceLHS.getVectorElt(I: LaneStart + I).getInt();
13643 APSInt LHSB = SourceLHS.getVectorElt(I: LaneStart + I + 1).getInt();
13644 switch (E->getBuiltinCallee()) {
13645 case clang::X86::BI__builtin_ia32_phaddw128:
13646 case clang::X86::BI__builtin_ia32_phaddw256:
13647 case clang::X86::BI__builtin_ia32_phaddd128:
13648 case clang::X86::BI__builtin_ia32_phaddd256: {
13649 APSInt Res(LHSA + LHSB, DestUnsigned);
13650 ResultElements.push_back(Elt: APValue(Res));
13651 break;
13652 }
13653 case clang::X86::BI__builtin_ia32_phaddsw128:
13654 case clang::X86::BI__builtin_ia32_phaddsw256: {
13655 APSInt Res(LHSA.sadd_sat(RHS: LHSB));
13656 ResultElements.push_back(Elt: APValue(Res));
13657 break;
13658 }
13659 case clang::X86::BI__builtin_ia32_phsubw128:
13660 case clang::X86::BI__builtin_ia32_phsubw256:
13661 case clang::X86::BI__builtin_ia32_phsubd128:
13662 case clang::X86::BI__builtin_ia32_phsubd256: {
13663 APSInt Res(LHSA - LHSB, DestUnsigned);
13664 ResultElements.push_back(Elt: APValue(Res));
13665 break;
13666 }
13667 case clang::X86::BI__builtin_ia32_phsubsw128:
13668 case clang::X86::BI__builtin_ia32_phsubsw256: {
13669 APSInt Res(LHSA.ssub_sat(RHS: LHSB));
13670 ResultElements.push_back(Elt: APValue(Res));
13671 break;
13672 }
13673 }
13674 }
13675 for (unsigned I = 0; I != EltsPerLane; I += 2) {
13676 APSInt RHSA = SourceRHS.getVectorElt(I: LaneStart + I).getInt();
13677 APSInt RHSB = SourceRHS.getVectorElt(I: LaneStart + I + 1).getInt();
13678 switch (E->getBuiltinCallee()) {
13679 case clang::X86::BI__builtin_ia32_phaddw128:
13680 case clang::X86::BI__builtin_ia32_phaddw256:
13681 case clang::X86::BI__builtin_ia32_phaddd128:
13682 case clang::X86::BI__builtin_ia32_phaddd256: {
13683 APSInt Res(RHSA + RHSB, DestUnsigned);
13684 ResultElements.push_back(Elt: APValue(Res));
13685 break;
13686 }
13687 case clang::X86::BI__builtin_ia32_phaddsw128:
13688 case clang::X86::BI__builtin_ia32_phaddsw256: {
13689 APSInt Res(RHSA.sadd_sat(RHS: RHSB));
13690 ResultElements.push_back(Elt: APValue(Res));
13691 break;
13692 }
13693 case clang::X86::BI__builtin_ia32_phsubw128:
13694 case clang::X86::BI__builtin_ia32_phsubw256:
13695 case clang::X86::BI__builtin_ia32_phsubd128:
13696 case clang::X86::BI__builtin_ia32_phsubd256: {
13697 APSInt Res(RHSA - RHSB, DestUnsigned);
13698 ResultElements.push_back(Elt: APValue(Res));
13699 break;
13700 }
13701 case clang::X86::BI__builtin_ia32_phsubsw128:
13702 case clang::X86::BI__builtin_ia32_phsubsw256: {
13703 APSInt Res(RHSA.ssub_sat(RHS: RHSB));
13704 ResultElements.push_back(Elt: APValue(Res));
13705 break;
13706 }
13707 }
13708 }
13709 }
13710 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
13711 }
13712 case clang::X86::BI__builtin_ia32_haddpd:
13713 case clang::X86::BI__builtin_ia32_haddps:
13714 case clang::X86::BI__builtin_ia32_haddps256:
13715 case clang::X86::BI__builtin_ia32_haddpd256:
13716 case clang::X86::BI__builtin_ia32_hsubpd:
13717 case clang::X86::BI__builtin_ia32_hsubps:
13718 case clang::X86::BI__builtin_ia32_hsubps256:
13719 case clang::X86::BI__builtin_ia32_hsubpd256: {
13720 APValue SourceLHS, SourceRHS;
13721 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) ||
13722 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS))
13723 return false;
13724 unsigned NumElts = SourceLHS.getVectorLength();
13725 SmallVector<APValue, 4> ResultElements;
13726 ResultElements.reserve(N: NumElts);
13727 llvm::RoundingMode RM = getActiveRoundingMode(Info&: getEvalInfo(), E);
13728 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
13729 unsigned EltBits = Info.Ctx.getTypeSize(T: DestEltTy);
13730 unsigned NumLanes = NumElts * EltBits / 128;
13731 unsigned NumElemsPerLane = NumElts / NumLanes;
13732 unsigned HalfElemsPerLane = NumElemsPerLane / 2;
13733
13734 for (unsigned L = 0; L != NumElts; L += NumElemsPerLane) {
13735 for (unsigned I = 0; I != HalfElemsPerLane; ++I) {
13736 APFloat LHSA = SourceLHS.getVectorElt(I: L + (2 * I) + 0).getFloat();
13737 APFloat LHSB = SourceLHS.getVectorElt(I: L + (2 * I) + 1).getFloat();
13738 switch (E->getBuiltinCallee()) {
13739 case clang::X86::BI__builtin_ia32_haddpd:
13740 case clang::X86::BI__builtin_ia32_haddps:
13741 case clang::X86::BI__builtin_ia32_haddps256:
13742 case clang::X86::BI__builtin_ia32_haddpd256:
13743 LHSA.add(RHS: LHSB, RM);
13744 break;
13745 case clang::X86::BI__builtin_ia32_hsubpd:
13746 case clang::X86::BI__builtin_ia32_hsubps:
13747 case clang::X86::BI__builtin_ia32_hsubps256:
13748 case clang::X86::BI__builtin_ia32_hsubpd256:
13749 LHSA.subtract(RHS: LHSB, RM);
13750 break;
13751 }
13752 ResultElements.push_back(Elt: APValue(LHSA));
13753 }
13754 for (unsigned I = 0; I != HalfElemsPerLane; ++I) {
13755 APFloat RHSA = SourceRHS.getVectorElt(I: L + (2 * I) + 0).getFloat();
13756 APFloat RHSB = SourceRHS.getVectorElt(I: L + (2 * I) + 1).getFloat();
13757 switch (E->getBuiltinCallee()) {
13758 case clang::X86::BI__builtin_ia32_haddpd:
13759 case clang::X86::BI__builtin_ia32_haddps:
13760 case clang::X86::BI__builtin_ia32_haddps256:
13761 case clang::X86::BI__builtin_ia32_haddpd256:
13762 RHSA.add(RHS: RHSB, RM);
13763 break;
13764 case clang::X86::BI__builtin_ia32_hsubpd:
13765 case clang::X86::BI__builtin_ia32_hsubps:
13766 case clang::X86::BI__builtin_ia32_hsubps256:
13767 case clang::X86::BI__builtin_ia32_hsubpd256:
13768 RHSA.subtract(RHS: RHSB, RM);
13769 break;
13770 }
13771 ResultElements.push_back(Elt: APValue(RHSA));
13772 }
13773 }
13774 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
13775 }
13776 case clang::X86::BI__builtin_ia32_addsubpd:
13777 case clang::X86::BI__builtin_ia32_addsubps:
13778 case clang::X86::BI__builtin_ia32_addsubpd256:
13779 case clang::X86::BI__builtin_ia32_addsubps256: {
13780 // Addsub: alternates between subtraction and addition
13781 // Result[i] = (i % 2 == 0) ? (a[i] - b[i]) : (a[i] + b[i])
13782 APValue SourceLHS, SourceRHS;
13783 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) ||
13784 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS))
13785 return false;
13786 unsigned NumElems = SourceLHS.getVectorLength();
13787 SmallVector<APValue, 8> ResultElements;
13788 ResultElements.reserve(N: NumElems);
13789 llvm::RoundingMode RM = getActiveRoundingMode(Info&: getEvalInfo(), E);
13790
13791 for (unsigned I = 0; I != NumElems; ++I) {
13792 APFloat LHS = SourceLHS.getVectorElt(I).getFloat();
13793 APFloat RHS = SourceRHS.getVectorElt(I).getFloat();
13794 if (I % 2 == 0) {
13795 // Even indices: subtract
13796 LHS.subtract(RHS, RM);
13797 } else {
13798 // Odd indices: add
13799 LHS.add(RHS, RM);
13800 }
13801 ResultElements.push_back(Elt: APValue(LHS));
13802 }
13803 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
13804 }
13805 case clang::X86::BI__builtin_ia32_pclmulqdq128:
13806 case clang::X86::BI__builtin_ia32_pclmulqdq256:
13807 case clang::X86::BI__builtin_ia32_pclmulqdq512: {
13808 // PCLMULQDQ: carry-less multiplication of selected 64-bit halves
13809 // imm8 bit 0: selects lower (0) or upper (1) 64 bits of first operand
13810 // imm8 bit 4: selects lower (0) or upper (1) 64 bits of second operand
13811 APValue SourceLHS, SourceRHS;
13812 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) ||
13813 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS))
13814 return false;
13815
13816 APSInt Imm8;
13817 if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: Imm8, Info))
13818 return false;
13819
13820 // Extract bits 0 and 4 from imm8
13821 bool SelectUpperA = (Imm8 & 0x01) != 0;
13822 bool SelectUpperB = (Imm8 & 0x10) != 0;
13823
13824 unsigned NumElems = SourceLHS.getVectorLength();
13825 SmallVector<APValue, 8> ResultElements;
13826 ResultElements.reserve(N: NumElems);
13827 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
13828 bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType();
13829
13830 // Process each 128-bit lane
13831 for (unsigned Lane = 0; Lane < NumElems; Lane += 2) {
13832 // Get the two 64-bit halves of the first operand
13833 APSInt A0 = SourceLHS.getVectorElt(I: Lane + 0).getInt();
13834 APSInt A1 = SourceLHS.getVectorElt(I: Lane + 1).getInt();
13835 // Get the two 64-bit halves of the second operand
13836 APSInt B0 = SourceRHS.getVectorElt(I: Lane + 0).getInt();
13837 APSInt B1 = SourceRHS.getVectorElt(I: Lane + 1).getInt();
13838
13839 // Select the appropriate 64-bit values based on imm8
13840 APInt A = SelectUpperA ? A1 : A0;
13841 APInt B = SelectUpperB ? B1 : B0;
13842
13843 // Extend both operands to 128 bits for carry-less multiplication
13844 APInt A128 = A.zext(width: 128);
13845 APInt B128 = B.zext(width: 128);
13846
13847 // Use APIntOps::clmul for carry-less multiplication
13848 APInt Result = llvm::APIntOps::clmul(LHS: A128, RHS: B128);
13849
13850 // Split the 128-bit result into two 64-bit halves
13851 APSInt ResultLow(Result.extractBits(numBits: 64, bitPosition: 0), DestUnsigned);
13852 APSInt ResultHigh(Result.extractBits(numBits: 64, bitPosition: 64), DestUnsigned);
13853
13854 ResultElements.push_back(Elt: APValue(ResultLow));
13855 ResultElements.push_back(Elt: APValue(ResultHigh));
13856 }
13857
13858 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
13859 }
13860 case Builtin::BI__builtin_elementwise_fshl:
13861 case Builtin::BI__builtin_elementwise_fshr: {
13862 APValue SourceHi, SourceLo, SourceShift;
13863 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceHi) ||
13864 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceLo) ||
13865 !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceShift))
13866 return false;
13867
13868 QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
13869 if (!DestEltTy->isIntegerType())
13870 return false;
13871
13872 unsigned SourceLen = SourceHi.getVectorLength();
13873 SmallVector<APValue> ResultElements;
13874 ResultElements.reserve(N: SourceLen);
13875 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
13876 const APSInt &Hi = SourceHi.getVectorElt(I: EltNum).getInt();
13877 const APSInt &Lo = SourceLo.getVectorElt(I: EltNum).getInt();
13878 const APSInt &Shift = SourceShift.getVectorElt(I: EltNum).getInt();
13879 switch (E->getBuiltinCallee()) {
13880 case Builtin::BI__builtin_elementwise_fshl:
13881 ResultElements.push_back(Elt: APValue(
13882 APSInt(llvm::APIntOps::fshl(Hi, Lo, Shift), Hi.isUnsigned())));
13883 break;
13884 case Builtin::BI__builtin_elementwise_fshr:
13885 ResultElements.push_back(Elt: APValue(
13886 APSInt(llvm::APIntOps::fshr(Hi, Lo, Shift), Hi.isUnsigned())));
13887 break;
13888 }
13889 }
13890
13891 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
13892 }
13893
13894 case X86::BI__builtin_ia32_shuf_f32x4_256:
13895 case X86::BI__builtin_ia32_shuf_i32x4_256:
13896 case X86::BI__builtin_ia32_shuf_f64x2_256:
13897 case X86::BI__builtin_ia32_shuf_i64x2_256:
13898 case X86::BI__builtin_ia32_shuf_f32x4:
13899 case X86::BI__builtin_ia32_shuf_i32x4:
13900 case X86::BI__builtin_ia32_shuf_f64x2:
13901 case X86::BI__builtin_ia32_shuf_i64x2: {
13902 APValue SourceA, SourceB;
13903 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceA) ||
13904 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceB))
13905 return false;
13906
13907 APSInt Imm;
13908 if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: Imm, Info))
13909 return false;
13910
13911 // Destination and sources A, B all have the same type.
13912 unsigned NumElems = SourceA.getVectorLength();
13913 const VectorType *VT = E->getArg(Arg: 0)->getType()->castAs<VectorType>();
13914 QualType ElemQT = VT->getElementType();
13915 unsigned ElemBits = Info.Ctx.getTypeSize(T: ElemQT);
13916 unsigned LaneBits = 128u;
13917 unsigned NumLanes = (NumElems * ElemBits) / LaneBits;
13918 unsigned NumElemsPerLane = LaneBits / ElemBits;
13919
13920 unsigned DstLen = SourceA.getVectorLength();
13921 SmallVector<APValue, 16> ResultElements;
13922 ResultElements.reserve(N: DstLen);
13923
13924 APValue R;
13925 if (!evalShuffleGeneric(
13926 Info, Call: E, Out&: R,
13927 GetSourceIndex: [NumLanes, NumElemsPerLane](unsigned DstIdx, unsigned ShuffleMask)
13928 -> std::pair<unsigned, int> {
13929 // DstIdx determines source. ShuffleMask selects lane in source.
13930 unsigned BitsPerElem = NumLanes / 2;
13931 unsigned IndexMask = (1u << BitsPerElem) - 1;
13932 unsigned Lane = DstIdx / NumElemsPerLane;
13933 unsigned SrcIdx = (Lane < NumLanes / 2) ? 0 : 1;
13934 unsigned BitIdx = BitsPerElem * Lane;
13935 unsigned SrcLaneIdx = (ShuffleMask >> BitIdx) & IndexMask;
13936 unsigned ElemInLane = DstIdx % NumElemsPerLane;
13937 unsigned IdxToPick = SrcLaneIdx * NumElemsPerLane + ElemInLane;
13938 return {SrcIdx, IdxToPick};
13939 }))
13940 return false;
13941 return Success(V: R, E);
13942 }
13943
13944 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v16qi:
13945 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v32qi:
13946 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v64qi:
13947 case X86::BI__builtin_ia32_vgf2p8affineqb_v16qi:
13948 case X86::BI__builtin_ia32_vgf2p8affineqb_v32qi:
13949 case X86::BI__builtin_ia32_vgf2p8affineqb_v64qi: {
13950
13951 APValue X, A;
13952 APSInt Imm;
13953 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: X) ||
13954 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: A) ||
13955 !EvaluateInteger(E: E->getArg(Arg: 2), Result&: Imm, Info))
13956 return false;
13957
13958 assert(X.isVector() && A.isVector());
13959 assert(X.getVectorLength() == A.getVectorLength());
13960
13961 bool IsInverse = false;
13962 switch (E->getBuiltinCallee()) {
13963 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v16qi:
13964 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v32qi:
13965 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v64qi: {
13966 IsInverse = true;
13967 }
13968 }
13969
13970 unsigned NumBitsInByte = 8;
13971 unsigned NumBytesInQWord = 8;
13972 unsigned NumBitsInQWord = 64;
13973 unsigned NumBytes = A.getVectorLength();
13974 unsigned NumQWords = NumBytes / NumBytesInQWord;
13975 SmallVector<APValue, 64> Result;
13976 Result.reserve(N: NumBytes);
13977
13978 // computing A*X + Imm
13979 for (unsigned QWordIdx = 0; QWordIdx != NumQWords; ++QWordIdx) {
13980 // Extract the QWords from X, A
13981 APInt XQWord(NumBitsInQWord, 0);
13982 APInt AQWord(NumBitsInQWord, 0);
13983 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
13984 unsigned Idx = QWordIdx * NumBytesInQWord + ByteIdx;
13985 APInt XByte = X.getVectorElt(I: Idx).getInt();
13986 APInt AByte = A.getVectorElt(I: Idx).getInt();
13987 XQWord.insertBits(SubBits: XByte, bitPosition: ByteIdx * NumBitsInByte);
13988 AQWord.insertBits(SubBits: AByte, bitPosition: ByteIdx * NumBitsInByte);
13989 }
13990
13991 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
13992 uint8_t XByte =
13993 XQWord.lshr(shiftAmt: ByteIdx * NumBitsInByte).getLoBits(numBits: 8).getZExtValue();
13994 Result.push_back(Elt: APValue(APSInt(
13995 APInt(8, GFNIAffine(XByte, AQword: AQWord, Imm, Inverse: IsInverse)), false)));
13996 }
13997 }
13998
13999 return Success(V: APValue(Result.data(), Result.size()), E);
14000 }
14001
14002 case X86::BI__builtin_ia32_vgf2p8mulb_v16qi:
14003 case X86::BI__builtin_ia32_vgf2p8mulb_v32qi:
14004 case X86::BI__builtin_ia32_vgf2p8mulb_v64qi: {
14005 APValue A, B;
14006 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: A) ||
14007 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: B))
14008 return false;
14009
14010 assert(A.isVector() && B.isVector());
14011 assert(A.getVectorLength() == B.getVectorLength());
14012
14013 unsigned NumBytes = A.getVectorLength();
14014 SmallVector<APValue, 64> Result;
14015 Result.reserve(N: NumBytes);
14016
14017 for (unsigned ByteIdx = 0; ByteIdx != NumBytes; ++ByteIdx) {
14018 uint8_t AByte = A.getVectorElt(I: ByteIdx).getInt().getZExtValue();
14019 uint8_t BByte = B.getVectorElt(I: ByteIdx).getInt().getZExtValue();
14020 Result.push_back(Elt: APValue(
14021 APSInt(APInt(8, GFNIMul(AByte, BByte)), /*IsUnsigned=*/false)));
14022 }
14023
14024 return Success(V: APValue(Result.data(), Result.size()), E);
14025 }
14026
14027 case X86::BI__builtin_ia32_insertf32x4_256:
14028 case X86::BI__builtin_ia32_inserti32x4_256:
14029 case X86::BI__builtin_ia32_insertf64x2_256:
14030 case X86::BI__builtin_ia32_inserti64x2_256:
14031 case X86::BI__builtin_ia32_insertf32x4:
14032 case X86::BI__builtin_ia32_inserti32x4:
14033 case X86::BI__builtin_ia32_insertf64x2_512:
14034 case X86::BI__builtin_ia32_inserti64x2_512:
14035 case X86::BI__builtin_ia32_insertf32x8:
14036 case X86::BI__builtin_ia32_inserti32x8:
14037 case X86::BI__builtin_ia32_insertf64x4:
14038 case X86::BI__builtin_ia32_inserti64x4:
14039 case X86::BI__builtin_ia32_vinsertf128_ps256:
14040 case X86::BI__builtin_ia32_vinsertf128_pd256:
14041 case X86::BI__builtin_ia32_vinsertf128_si256:
14042 case X86::BI__builtin_ia32_insert128i256: {
14043 APValue SourceDst, SourceSub;
14044 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceDst) ||
14045 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceSub))
14046 return false;
14047
14048 APSInt Imm;
14049 if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: Imm, Info))
14050 return false;
14051
14052 assert(SourceDst.isVector() && SourceSub.isVector());
14053 unsigned DstLen = SourceDst.getVectorLength();
14054 unsigned SubLen = SourceSub.getVectorLength();
14055 assert(SubLen != 0 && DstLen != 0 && (DstLen % SubLen) == 0);
14056 unsigned NumLanes = DstLen / SubLen;
14057 unsigned LaneIdx = (Imm.getZExtValue() % NumLanes) * SubLen;
14058
14059 SmallVector<APValue, 16> ResultElements;
14060 ResultElements.reserve(N: DstLen);
14061
14062 for (unsigned EltNum = 0; EltNum < DstLen; ++EltNum) {
14063 if (EltNum >= LaneIdx && EltNum < LaneIdx + SubLen)
14064 ResultElements.push_back(Elt: SourceSub.getVectorElt(I: EltNum - LaneIdx));
14065 else
14066 ResultElements.push_back(Elt: SourceDst.getVectorElt(I: EltNum));
14067 }
14068
14069 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
14070 }
14071
14072 case clang::X86::BI__builtin_ia32_vec_set_v4hi:
14073 case clang::X86::BI__builtin_ia32_vec_set_v16qi:
14074 case clang::X86::BI__builtin_ia32_vec_set_v8hi:
14075 case clang::X86::BI__builtin_ia32_vec_set_v4si:
14076 case clang::X86::BI__builtin_ia32_vec_set_v2di:
14077 case clang::X86::BI__builtin_ia32_vec_set_v32qi:
14078 case clang::X86::BI__builtin_ia32_vec_set_v16hi:
14079 case clang::X86::BI__builtin_ia32_vec_set_v8si:
14080 case clang::X86::BI__builtin_ia32_vec_set_v4di: {
14081 APValue VecVal;
14082 APSInt Scalar, IndexAPS;
14083 if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: VecVal, Info) ||
14084 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Scalar, Info) ||
14085 !EvaluateInteger(E: E->getArg(Arg: 2), Result&: IndexAPS, Info))
14086 return false;
14087
14088 QualType ElemTy = E->getType()->castAs<VectorType>()->getElementType();
14089 unsigned ElemWidth = Info.Ctx.getIntWidth(T: ElemTy);
14090 bool ElemUnsigned = ElemTy->isUnsignedIntegerOrEnumerationType();
14091 Scalar.setIsUnsigned(ElemUnsigned);
14092 APSInt ElemAPS = Scalar.extOrTrunc(width: ElemWidth);
14093 APValue ElemAV(ElemAPS);
14094
14095 unsigned NumElems = VecVal.getVectorLength();
14096 unsigned Index =
14097 static_cast<unsigned>(IndexAPS.getZExtValue() & (NumElems - 1));
14098
14099 SmallVector<APValue, 4> Elems;
14100 Elems.reserve(N: NumElems);
14101 for (unsigned ElemNum = 0; ElemNum != NumElems; ++ElemNum)
14102 Elems.push_back(Elt: ElemNum == Index ? ElemAV : VecVal.getVectorElt(I: ElemNum));
14103
14104 return Success(V: APValue(Elems.data(), NumElems), E);
14105 }
14106
14107 case X86::BI__builtin_ia32_pslldqi128_byteshift:
14108 case X86::BI__builtin_ia32_pslldqi256_byteshift:
14109 case X86::BI__builtin_ia32_pslldqi512_byteshift: {
14110 APValue R;
14111 if (!evalShuffleGeneric(
14112 Info, Call: E, Out&: R,
14113 GetSourceIndex: [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
14114 unsigned LaneBase = (DstIdx / 16) * 16;
14115 unsigned LaneIdx = DstIdx % 16;
14116 if (LaneIdx < Shift)
14117 return std::make_pair(x: 0, y: -1);
14118
14119 return std::make_pair(
14120 x: 0, y: static_cast<int>(LaneBase + LaneIdx - Shift));
14121 }))
14122 return false;
14123 return Success(V: R, E);
14124 }
14125
14126 case X86::BI__builtin_ia32_psrldqi128_byteshift:
14127 case X86::BI__builtin_ia32_psrldqi256_byteshift:
14128 case X86::BI__builtin_ia32_psrldqi512_byteshift: {
14129 APValue R;
14130 if (!evalShuffleGeneric(
14131 Info, Call: E, Out&: R,
14132 GetSourceIndex: [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
14133 unsigned LaneBase = (DstIdx / 16) * 16;
14134 unsigned LaneIdx = DstIdx % 16;
14135 if (LaneIdx + Shift < 16)
14136 return std::make_pair(
14137 x: 0, y: static_cast<int>(LaneBase + LaneIdx + Shift));
14138
14139 return std::make_pair(x: 0, y: -1);
14140 }))
14141 return false;
14142 return Success(V: R, E);
14143 }
14144
14145 case X86::BI__builtin_ia32_palignr128:
14146 case X86::BI__builtin_ia32_palignr256:
14147 case X86::BI__builtin_ia32_palignr512: {
14148 APValue R;
14149 if (!evalShuffleGeneric(Info, Call: E, Out&: R, GetSourceIndex: [](unsigned DstIdx, unsigned Shift) {
14150 // Default to -1 → zero-fill this destination element
14151 unsigned VecIdx = 1;
14152 int ElemIdx = -1;
14153
14154 int Lane = DstIdx / 16;
14155 int Offset = DstIdx % 16;
14156
14157 // Elements come from VecB first, then VecA after the shift boundary
14158 unsigned ShiftedIdx = Offset + (Shift & 0xFF);
14159 if (ShiftedIdx < 16) { // from VecB
14160 ElemIdx = ShiftedIdx + (Lane * 16);
14161 } else if (ShiftedIdx < 32) { // from VecA
14162 VecIdx = 0;
14163 ElemIdx = (ShiftedIdx - 16) + (Lane * 16);
14164 }
14165
14166 return std::pair<unsigned, int>{VecIdx, ElemIdx};
14167 }))
14168 return false;
14169 return Success(V: R, E);
14170 }
14171 case X86::BI__builtin_ia32_alignd128:
14172 case X86::BI__builtin_ia32_alignd256:
14173 case X86::BI__builtin_ia32_alignd512:
14174 case X86::BI__builtin_ia32_alignq128:
14175 case X86::BI__builtin_ia32_alignq256:
14176 case X86::BI__builtin_ia32_alignq512: {
14177 APValue R;
14178 unsigned NumElems = E->getType()->castAs<VectorType>()->getNumElements();
14179 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14180 GetSourceIndex: [NumElems](unsigned DstIdx, unsigned Shift) {
14181 unsigned Imm = Shift & 0xFF;
14182 unsigned EffectiveShift = Imm & (NumElems - 1);
14183 unsigned SourcePos = DstIdx + EffectiveShift;
14184 unsigned VecIdx = SourcePos < NumElems ? 1 : 0;
14185 unsigned ElemIdx = SourcePos & (NumElems - 1);
14186
14187 return std::pair<unsigned, int>{
14188 VecIdx, static_cast<int>(ElemIdx)};
14189 }))
14190 return false;
14191 return Success(V: R, E);
14192 }
14193 case X86::BI__builtin_ia32_permvarsi256:
14194 case X86::BI__builtin_ia32_permvarsf256:
14195 case X86::BI__builtin_ia32_permvardf512:
14196 case X86::BI__builtin_ia32_permvardi512:
14197 case X86::BI__builtin_ia32_permvarhi128: {
14198 APValue R;
14199 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14200 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14201 int Offset = ShuffleMask & 0x7;
14202 return std::pair<unsigned, int>{0, Offset};
14203 }))
14204 return false;
14205 return Success(V: R, E);
14206 }
14207 case X86::BI__builtin_ia32_permvarqi128:
14208 case X86::BI__builtin_ia32_permvarhi256:
14209 case X86::BI__builtin_ia32_permvarsi512:
14210 case X86::BI__builtin_ia32_permvarsf512: {
14211 APValue R;
14212 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14213 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14214 int Offset = ShuffleMask & 0xF;
14215 return std::pair<unsigned, int>{0, Offset};
14216 }))
14217 return false;
14218 return Success(V: R, E);
14219 }
14220 case X86::BI__builtin_ia32_permvardi256:
14221 case X86::BI__builtin_ia32_permvardf256: {
14222 APValue R;
14223 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14224 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14225 int Offset = ShuffleMask & 0x3;
14226 return std::pair<unsigned, int>{0, Offset};
14227 }))
14228 return false;
14229 return Success(V: R, E);
14230 }
14231 case X86::BI__builtin_ia32_permvarqi256:
14232 case X86::BI__builtin_ia32_permvarhi512: {
14233 APValue R;
14234 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14235 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14236 int Offset = ShuffleMask & 0x1F;
14237 return std::pair<unsigned, int>{0, Offset};
14238 }))
14239 return false;
14240 return Success(V: R, E);
14241 }
14242 case X86::BI__builtin_ia32_permvarqi512: {
14243 APValue R;
14244 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14245 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14246 int Offset = ShuffleMask & 0x3F;
14247 return std::pair<unsigned, int>{0, Offset};
14248 }))
14249 return false;
14250 return Success(V: R, E);
14251 }
14252 case X86::BI__builtin_ia32_vpermi2varq128:
14253 case X86::BI__builtin_ia32_vpermi2varpd128: {
14254 APValue R;
14255 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14256 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14257 int Offset = ShuffleMask & 0x1;
14258 unsigned SrcIdx = (ShuffleMask >> 1) & 0x1;
14259 return std::pair<unsigned, int>{SrcIdx, Offset};
14260 }))
14261 return false;
14262 return Success(V: R, E);
14263 }
14264 case X86::BI__builtin_ia32_vpermi2vard128:
14265 case X86::BI__builtin_ia32_vpermi2varps128:
14266 case X86::BI__builtin_ia32_vpermi2varq256:
14267 case X86::BI__builtin_ia32_vpermi2varpd256: {
14268 APValue R;
14269 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14270 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14271 int Offset = ShuffleMask & 0x3;
14272 unsigned SrcIdx = (ShuffleMask >> 2) & 0x1;
14273 return std::pair<unsigned, int>{SrcIdx, Offset};
14274 }))
14275 return false;
14276 return Success(V: R, E);
14277 }
14278 case X86::BI__builtin_ia32_vpermi2varhi128:
14279 case X86::BI__builtin_ia32_vpermi2vard256:
14280 case X86::BI__builtin_ia32_vpermi2varps256:
14281 case X86::BI__builtin_ia32_vpermi2varq512:
14282 case X86::BI__builtin_ia32_vpermi2varpd512: {
14283 APValue R;
14284 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14285 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14286 int Offset = ShuffleMask & 0x7;
14287 unsigned SrcIdx = (ShuffleMask >> 3) & 0x1;
14288 return std::pair<unsigned, int>{SrcIdx, Offset};
14289 }))
14290 return false;
14291 return Success(V: R, E);
14292 }
14293 case X86::BI__builtin_ia32_vpermi2varqi128:
14294 case X86::BI__builtin_ia32_vpermi2varhi256:
14295 case X86::BI__builtin_ia32_vpermi2vard512:
14296 case X86::BI__builtin_ia32_vpermi2varps512: {
14297 APValue R;
14298 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14299 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14300 int Offset = ShuffleMask & 0xF;
14301 unsigned SrcIdx = (ShuffleMask >> 4) & 0x1;
14302 return std::pair<unsigned, int>{SrcIdx, Offset};
14303 }))
14304 return false;
14305 return Success(V: R, E);
14306 }
14307 case X86::BI__builtin_ia32_vpermi2varqi256:
14308 case X86::BI__builtin_ia32_vpermi2varhi512: {
14309 APValue R;
14310 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14311 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14312 int Offset = ShuffleMask & 0x1F;
14313 unsigned SrcIdx = (ShuffleMask >> 5) & 0x1;
14314 return std::pair<unsigned, int>{SrcIdx, Offset};
14315 }))
14316 return false;
14317 return Success(V: R, E);
14318 }
14319 case X86::BI__builtin_ia32_vpermi2varqi512: {
14320 APValue R;
14321 if (!evalShuffleGeneric(Info, Call: E, Out&: R,
14322 GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) {
14323 int Offset = ShuffleMask & 0x3F;
14324 unsigned SrcIdx = (ShuffleMask >> 6) & 0x1;
14325 return std::pair<unsigned, int>{SrcIdx, Offset};
14326 }))
14327 return false;
14328 return Success(V: R, E);
14329 }
14330
14331 case clang::X86::BI__builtin_ia32_minps:
14332 case clang::X86::BI__builtin_ia32_minpd:
14333 case clang::X86::BI__builtin_ia32_minps256:
14334 case clang::X86::BI__builtin_ia32_minpd256:
14335 case clang::X86::BI__builtin_ia32_minps512:
14336 case clang::X86::BI__builtin_ia32_minpd512:
14337 case clang::X86::BI__builtin_ia32_minph128:
14338 case clang::X86::BI__builtin_ia32_minph256:
14339 case clang::X86::BI__builtin_ia32_minph512:
14340 return EvaluateFpBinOpExpr(
14341 [](const APFloat &A, const APFloat &B,
14342 std::optional<APSInt>) -> std::optional<APFloat> {
14343 if (A.isNaN() || A.isInfinity() || A.isDenormal() || B.isNaN() ||
14344 B.isInfinity() || B.isDenormal())
14345 return std::nullopt;
14346 if (A.isZero() && B.isZero())
14347 return B;
14348 return llvm::minimum(A, B);
14349 });
14350
14351 case clang::X86::BI__builtin_ia32_minss:
14352 case clang::X86::BI__builtin_ia32_minsd:
14353 return EvaluateFpBinOpExpr(
14354 [](const APFloat &A, const APFloat &B,
14355 std::optional<APSInt> RoundingMode) -> std::optional<APFloat> {
14356 return EvalScalarMinMaxFp(A, B, RoundingMode, /*IsMin=*/true);
14357 },
14358 /*IsScalar=*/true);
14359
14360 case clang::X86::BI__builtin_ia32_minsd_round_mask:
14361 case clang::X86::BI__builtin_ia32_minss_round_mask:
14362 case clang::X86::BI__builtin_ia32_minsh_round_mask:
14363 case clang::X86::BI__builtin_ia32_maxsd_round_mask:
14364 case clang::X86::BI__builtin_ia32_maxss_round_mask:
14365 case clang::X86::BI__builtin_ia32_maxsh_round_mask: {
14366 bool IsMin =
14367 E->getBuiltinCallee() ==
14368 clang::X86::BI__builtin_ia32_minsd_round_mask ||
14369 E->getBuiltinCallee() ==
14370 clang::X86::BI__builtin_ia32_minss_round_mask ||
14371 E->getBuiltinCallee() == clang::X86::BI__builtin_ia32_minsh_round_mask;
14372 return EvaluateScalarFpRoundMaskBinOp(
14373 [IsMin](const APFloat &A, const APFloat &B,
14374 std::optional<APSInt> RoundingMode) -> std::optional<APFloat> {
14375 return EvalScalarMinMaxFp(A, B, RoundingMode, IsMin);
14376 });
14377 }
14378
14379 case clang::X86::BI__builtin_ia32_maxps:
14380 case clang::X86::BI__builtin_ia32_maxpd:
14381 case clang::X86::BI__builtin_ia32_maxps256:
14382 case clang::X86::BI__builtin_ia32_maxpd256:
14383 case clang::X86::BI__builtin_ia32_maxps512:
14384 case clang::X86::BI__builtin_ia32_maxpd512:
14385 case clang::X86::BI__builtin_ia32_maxph128:
14386 case clang::X86::BI__builtin_ia32_maxph256:
14387 case clang::X86::BI__builtin_ia32_maxph512:
14388 return EvaluateFpBinOpExpr(
14389 [](const APFloat &A, const APFloat &B,
14390 std::optional<APSInt>) -> std::optional<APFloat> {
14391 if (A.isNaN() || A.isInfinity() || A.isDenormal() || B.isNaN() ||
14392 B.isInfinity() || B.isDenormal())
14393 return std::nullopt;
14394 if (A.isZero() && B.isZero())
14395 return B;
14396 return llvm::maximum(A, B);
14397 });
14398
14399 case clang::X86::BI__builtin_ia32_maxss:
14400 case clang::X86::BI__builtin_ia32_maxsd:
14401 return EvaluateFpBinOpExpr(
14402 [](const APFloat &A, const APFloat &B,
14403 std::optional<APSInt> RoundingMode) -> std::optional<APFloat> {
14404 return EvalScalarMinMaxFp(A, B, RoundingMode, /*IsMin=*/false);
14405 },
14406 /*IsScalar=*/true);
14407
14408 case clang::X86::BI__builtin_ia32_vcvtps2ph:
14409 case clang::X86::BI__builtin_ia32_vcvtps2ph256: {
14410 APValue SrcVec;
14411 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SrcVec))
14412 return false;
14413
14414 APSInt Imm;
14415 if (!EvaluateInteger(E: E->getArg(Arg: 1), Result&: Imm, Info))
14416 return false;
14417
14418 const auto *SrcVTy = E->getArg(Arg: 0)->getType()->castAs<VectorType>();
14419 unsigned SrcNumElems = SrcVTy->getNumElements();
14420 const auto *DstVTy = E->getType()->castAs<VectorType>();
14421 unsigned DstNumElems = DstVTy->getNumElements();
14422 QualType DstElemTy = DstVTy->getElementType();
14423
14424 const llvm::fltSemantics &HalfSem =
14425 Info.Ctx.getFloatTypeSemantics(T: Info.Ctx.HalfTy);
14426
14427 int ImmVal = Imm.getZExtValue();
14428 bool UseMXCSR = (ImmVal & 4) != 0;
14429 bool IsFPConstrained =
14430 E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts()).isFPConstrained();
14431
14432 llvm::RoundingMode RM;
14433 if (!UseMXCSR) {
14434 switch (ImmVal & 3) {
14435 case 0:
14436 RM = llvm::RoundingMode::NearestTiesToEven;
14437 break;
14438 case 1:
14439 RM = llvm::RoundingMode::TowardNegative;
14440 break;
14441 case 2:
14442 RM = llvm::RoundingMode::TowardPositive;
14443 break;
14444 case 3:
14445 RM = llvm::RoundingMode::TowardZero;
14446 break;
14447 default:
14448 llvm_unreachable("Invalid immediate rounding mode");
14449 }
14450 } else {
14451 RM = llvm::RoundingMode::NearestTiesToEven;
14452 }
14453
14454 SmallVector<APValue, 8> ResultElements;
14455 ResultElements.reserve(N: DstNumElems);
14456
14457 for (unsigned I = 0; I < SrcNumElems; ++I) {
14458 APFloat SrcVal = SrcVec.getVectorElt(I).getFloat();
14459
14460 bool LostInfo;
14461 APFloat::opStatus St = SrcVal.convert(ToSemantics: HalfSem, RM, losesInfo: &LostInfo);
14462
14463 if (UseMXCSR && IsFPConstrained && St != APFloat::opOK) {
14464 Info.FFDiag(E, DiagId: diag::note_constexpr_dynamic_rounding);
14465 return false;
14466 }
14467
14468 APSInt DstInt(SrcVal.bitcastToAPInt(),
14469 DstElemTy->isUnsignedIntegerOrEnumerationType());
14470 ResultElements.push_back(Elt: APValue(DstInt));
14471 }
14472
14473 if (DstNumElems > SrcNumElems) {
14474 APSInt Zero = Info.Ctx.MakeIntValue(Value: 0, Type: DstElemTy);
14475 for (unsigned I = SrcNumElems; I < DstNumElems; ++I) {
14476 ResultElements.push_back(Elt: APValue(Zero));
14477 }
14478 }
14479
14480 return Success(V: ResultElements, E);
14481 }
14482 case X86::BI__builtin_ia32_vperm2f128_pd256:
14483 case X86::BI__builtin_ia32_vperm2f128_ps256:
14484 case X86::BI__builtin_ia32_vperm2f128_si256:
14485 case X86::BI__builtin_ia32_permti256: {
14486 unsigned NumElements =
14487 E->getArg(Arg: 0)->getType()->getAs<VectorType>()->getNumElements();
14488 unsigned PreservedBitsCnt = NumElements >> 2;
14489 APValue R;
14490 if (!evalShuffleGeneric(
14491 Info, Call: E, Out&: R,
14492 GetSourceIndex: [PreservedBitsCnt](unsigned DstIdx, unsigned ShuffleMask) {
14493 unsigned ControlBitsCnt = DstIdx >> PreservedBitsCnt << 2;
14494 unsigned ControlBits = ShuffleMask >> ControlBitsCnt;
14495
14496 if (ControlBits & 0b1000)
14497 return std::make_pair(x: 0u, y: -1);
14498
14499 unsigned SrcVecIdx = (ControlBits & 0b10) >> 1;
14500 unsigned PreservedBitsMask = (1 << PreservedBitsCnt) - 1;
14501 int SrcIdx = ((ControlBits & 0b1) << PreservedBitsCnt) |
14502 (DstIdx & PreservedBitsMask);
14503 return std::make_pair(x&: SrcVecIdx, y&: SrcIdx);
14504 }))
14505 return false;
14506 return Success(V: R, E);
14507 }
14508 }
14509}
14510
14511bool VectorExprEvaluator::VisitConvertVectorExpr(const ConvertVectorExpr *E) {
14512 APValue Source;
14513 QualType SourceVecType = E->getSrcExpr()->getType();
14514 if (!EvaluateAsRValue(Info, E: E->getSrcExpr(), Result&: Source))
14515 return false;
14516
14517 QualType DestTy = E->getType()->castAs<VectorType>()->getElementType();
14518 QualType SourceTy = SourceVecType->castAs<VectorType>()->getElementType();
14519
14520 const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
14521
14522 auto SourceLen = Source.getVectorLength();
14523 SmallVector<APValue, 4> ResultElements;
14524 ResultElements.reserve(N: SourceLen);
14525 for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) {
14526 APValue Elt;
14527 if (!handleVectorElementCast(Info, FPO, E, SourceTy, DestTy,
14528 Original: Source.getVectorElt(I: EltNum), Result&: Elt))
14529 return false;
14530 ResultElements.push_back(Elt: std::move(Elt));
14531 }
14532
14533 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
14534}
14535
14536static bool handleVectorShuffle(EvalInfo &Info, const ShuffleVectorExpr *E,
14537 QualType ElemType, APValue const &VecVal1,
14538 APValue const &VecVal2, unsigned EltNum,
14539 APValue &Result) {
14540 unsigned const TotalElementsInInputVector1 = VecVal1.getVectorLength();
14541 unsigned const TotalElementsInInputVector2 = VecVal2.getVectorLength();
14542
14543 APSInt IndexVal = E->getShuffleMaskIdx(N: EltNum);
14544 int64_t index = IndexVal.getExtValue();
14545 // The spec says that -1 should be treated as undef for optimizations,
14546 // but in constexpr we'd have to produce an APValue::Indeterminate,
14547 // which is prohibited from being a top-level constant value. Emit a
14548 // diagnostic instead.
14549 if (index == -1) {
14550 Info.FFDiag(
14551 E, DiagId: diag::err_shufflevector_minus_one_is_undefined_behavior_constexpr)
14552 << EltNum;
14553 return false;
14554 }
14555
14556 if (index < 0 ||
14557 index >= TotalElementsInInputVector1 + TotalElementsInInputVector2)
14558 llvm_unreachable("Out of bounds shuffle index");
14559
14560 if (index >= TotalElementsInInputVector1)
14561 Result = VecVal2.getVectorElt(I: index - TotalElementsInInputVector1);
14562 else
14563 Result = VecVal1.getVectorElt(I: index);
14564 return true;
14565}
14566
14567bool VectorExprEvaluator::VisitShuffleVectorExpr(const ShuffleVectorExpr *E) {
14568 // FIXME: Unary shuffle with mask not currently supported.
14569 if (E->getNumSubExprs() == 2)
14570 return Error(E);
14571 APValue VecVal1;
14572 const Expr *Vec1 = E->getExpr(Index: 0);
14573 if (!EvaluateAsRValue(Info, E: Vec1, Result&: VecVal1))
14574 return false;
14575 APValue VecVal2;
14576 const Expr *Vec2 = E->getExpr(Index: 1);
14577 if (!EvaluateAsRValue(Info, E: Vec2, Result&: VecVal2))
14578 return false;
14579
14580 VectorType const *DestVecTy = E->getType()->castAs<VectorType>();
14581 QualType DestElTy = DestVecTy->getElementType();
14582
14583 auto TotalElementsInOutputVector = DestVecTy->getNumElements();
14584
14585 SmallVector<APValue, 4> ResultElements;
14586 ResultElements.reserve(N: TotalElementsInOutputVector);
14587 for (unsigned EltNum = 0; EltNum < TotalElementsInOutputVector; ++EltNum) {
14588 APValue Elt;
14589 if (!handleVectorShuffle(Info, E, ElemType: DestElTy, VecVal1, VecVal2, EltNum, Result&: Elt))
14590 return false;
14591 ResultElements.push_back(Elt: std::move(Elt));
14592 }
14593
14594 return Success(V: APValue(ResultElements.data(), ResultElements.size()), E);
14595}
14596
14597//===----------------------------------------------------------------------===//
14598// Array Evaluation
14599//===----------------------------------------------------------------------===//
14600
14601namespace {
14602 class ArrayExprEvaluator
14603 : public ExprEvaluatorBase<ArrayExprEvaluator> {
14604 const LValue &This;
14605 APValue &Result;
14606 public:
14607
14608 ArrayExprEvaluator(EvalInfo &Info, const LValue &This, APValue &Result)
14609 : ExprEvaluatorBaseTy(Info), This(This), Result(Result) {}
14610
14611 bool Success(const APValue &V, const Expr *E) {
14612 assert(V.isArray() && "expected array");
14613 Result = V;
14614 return true;
14615 }
14616
14617 bool ZeroInitialization(const Expr *E) {
14618 const ConstantArrayType *CAT =
14619 Info.Ctx.getAsConstantArrayType(T: E->getType());
14620 if (!CAT) {
14621 if (E->getType()->isIncompleteArrayType()) {
14622 // We can be asked to zero-initialize a flexible array member; this
14623 // is represented as an ImplicitValueInitExpr of incomplete array
14624 // type. In this case, the array has zero elements.
14625 Result = APValue(APValue::UninitArray(), 0, 0);
14626 return true;
14627 }
14628 // FIXME: We could handle VLAs here.
14629 return Error(E);
14630 }
14631
14632 Result = APValue(APValue::UninitArray(), 0, CAT->getZExtSize());
14633 if (!Result.hasArrayFiller())
14634 return true;
14635
14636 // Zero-initialize all elements.
14637 LValue Subobject = This;
14638 Subobject.addArray(Info, E, CAT);
14639 ImplicitValueInitExpr VIE(CAT->getElementType());
14640 return EvaluateInPlace(Result&: Result.getArrayFiller(), Info, This: Subobject, E: &VIE);
14641 }
14642
14643 bool VisitCallExpr(const CallExpr *E) {
14644 return handleCallExpr(E, Result, ResultSlot: &This);
14645 }
14646 bool VisitCastExpr(const CastExpr *E);
14647 bool VisitInitListExpr(const InitListExpr *E,
14648 QualType AllocType = QualType());
14649 bool VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E);
14650 bool VisitCXXConstructExpr(const CXXConstructExpr *E);
14651 bool VisitCXXConstructExpr(const CXXConstructExpr *E,
14652 const LValue &Subobject,
14653 APValue *Value, QualType Type);
14654 bool VisitStringLiteral(const StringLiteral *E,
14655 QualType AllocType = QualType()) {
14656 expandStringLiteral(Info, S: E, Result, AllocType);
14657 return true;
14658 }
14659 bool VisitCXXParenListInitExpr(const CXXParenListInitExpr *E);
14660 bool VisitCXXParenListOrInitListExpr(const Expr *ExprToVisit,
14661 ArrayRef<Expr *> Args,
14662 const Expr *ArrayFiller,
14663 QualType AllocType = QualType());
14664 };
14665} // end anonymous namespace
14666
14667static bool EvaluateArray(const Expr *E, const LValue &This,
14668 APValue &Result, EvalInfo &Info) {
14669 assert(!E->isValueDependent());
14670 assert(E->isPRValue() && E->getType()->isArrayType() &&
14671 "not an array prvalue");
14672 return ArrayExprEvaluator(Info, This, Result).Visit(S: E);
14673}
14674
14675static bool EvaluateArrayNewInitList(EvalInfo &Info, LValue &This,
14676 APValue &Result, const InitListExpr *ILE,
14677 QualType AllocType) {
14678 assert(!ILE->isValueDependent());
14679 assert(ILE->isPRValue() && ILE->getType()->isArrayType() &&
14680 "not an array prvalue");
14681 return ArrayExprEvaluator(Info, This, Result)
14682 .VisitInitListExpr(E: ILE, AllocType);
14683}
14684
14685static bool EvaluateArrayNewConstructExpr(EvalInfo &Info, LValue &This,
14686 APValue &Result,
14687 const CXXConstructExpr *CCE,
14688 QualType AllocType) {
14689 assert(!CCE->isValueDependent());
14690 assert(CCE->isPRValue() && CCE->getType()->isArrayType() &&
14691 "not an array prvalue");
14692 return ArrayExprEvaluator(Info, This, Result)
14693 .VisitCXXConstructExpr(E: CCE, Subobject: This, Value: &Result, Type: AllocType);
14694}
14695
14696// Return true iff the given array filler may depend on the element index.
14697static bool MaybeElementDependentArrayFiller(const Expr *FillerExpr) {
14698 // For now, just allow non-class value-initialization and initialization
14699 // lists comprised of them.
14700 if (isa<ImplicitValueInitExpr>(Val: FillerExpr))
14701 return false;
14702 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Val: FillerExpr)) {
14703 for (unsigned I = 0, E = ILE->getNumInits(); I != E; ++I) {
14704 if (MaybeElementDependentArrayFiller(FillerExpr: ILE->getInit(Init: I)))
14705 return true;
14706 }
14707
14708 if (ILE->hasArrayFiller() &&
14709 MaybeElementDependentArrayFiller(FillerExpr: ILE->getArrayFiller()))
14710 return true;
14711
14712 return false;
14713 }
14714 return true;
14715}
14716
14717bool ArrayExprEvaluator::VisitCastExpr(const CastExpr *E) {
14718 const Expr *SE = E->getSubExpr();
14719
14720 switch (E->getCastKind()) {
14721 default:
14722 return ExprEvaluatorBaseTy::VisitCastExpr(E);
14723 case CK_HLSLAggregateSplatCast: {
14724 APValue Val;
14725 QualType ValTy;
14726
14727 if (!hlslAggSplatHelper(Info, E: SE, SrcVal&: Val, SrcTy&: ValTy))
14728 return false;
14729
14730 unsigned NEls = elementwiseSize(Info, BaseTy: E->getType());
14731
14732 SmallVector<APValue> SplatEls(NEls, Val);
14733 SmallVector<QualType> SplatType(NEls, ValTy);
14734
14735 // cast the elements
14736 const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
14737 if (!constructAggregate(Info, FPO, E, Result, ResultType: E->getType(), Elements&: SplatEls,
14738 ElTypes&: SplatType))
14739 return false;
14740
14741 return true;
14742 }
14743 case CK_HLSLElementwiseCast: {
14744 SmallVector<APValue> SrcEls;
14745 SmallVector<QualType> SrcTypes;
14746
14747 if (!hlslElementwiseCastHelper(Info, E: SE, DestTy: E->getType(), SrcVals&: SrcEls, SrcTypes))
14748 return false;
14749
14750 // cast the elements
14751 const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
14752 if (!constructAggregate(Info, FPO, E, Result, ResultType: E->getType(), Elements&: SrcEls,
14753 ElTypes&: SrcTypes))
14754 return false;
14755 return true;
14756 }
14757 }
14758}
14759
14760bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E,
14761 QualType AllocType) {
14762 const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(
14763 T: AllocType.isNull() ? E->getType() : AllocType);
14764 if (!CAT)
14765 return Error(E);
14766
14767 // C++11 [dcl.init.string]p1: A char array [...] can be initialized by [...]
14768 // an appropriately-typed string literal enclosed in braces.
14769 if (E->isStringLiteralInit()) {
14770 auto *SL = dyn_cast<StringLiteral>(Val: E->getInit(Init: 0)->IgnoreParenImpCasts());
14771 // FIXME: Support ObjCEncodeExpr here once we support it in
14772 // ArrayExprEvaluator generally.
14773 if (!SL)
14774 return Error(E);
14775 return VisitStringLiteral(E: SL, AllocType);
14776 }
14777 // Any other transparent list init will need proper handling of the
14778 // AllocType; we can't just recurse to the inner initializer.
14779 assert(!E->isTransparent() &&
14780 "transparent array list initialization is not string literal init?");
14781
14782 return VisitCXXParenListOrInitListExpr(ExprToVisit: E, Args: E->inits(), ArrayFiller: E->getArrayFiller(),
14783 AllocType);
14784}
14785
14786bool ArrayExprEvaluator::VisitCXXParenListOrInitListExpr(
14787 const Expr *ExprToVisit, ArrayRef<Expr *> Args, const Expr *ArrayFiller,
14788 QualType AllocType) {
14789 const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(
14790 T: AllocType.isNull() ? ExprToVisit->getType() : AllocType);
14791
14792 bool Success = true;
14793
14794 assert((!Result.isArray() || Result.getArrayInitializedElts() == 0) &&
14795 "zero-initialized array shouldn't have any initialized elts");
14796 APValue Filler;
14797 if (Result.isArray() && Result.hasArrayFiller())
14798 Filler = Result.getArrayFiller();
14799
14800 unsigned NumEltsToInit = Args.size();
14801 unsigned NumElts = CAT->getZExtSize();
14802
14803 // If the initializer might depend on the array index, run it for each
14804 // array element.
14805 if (NumEltsToInit != NumElts &&
14806 MaybeElementDependentArrayFiller(FillerExpr: ArrayFiller)) {
14807 NumEltsToInit = NumElts;
14808 } else {
14809 for (auto *Init : Args) {
14810 if (auto *EmbedS = dyn_cast<EmbedExpr>(Val: Init->IgnoreParenImpCasts()))
14811 NumEltsToInit += EmbedS->getDataElementCount() - 1;
14812 }
14813 if (NumEltsToInit > NumElts)
14814 NumEltsToInit = NumElts;
14815 }
14816
14817 LLVM_DEBUG(llvm::dbgs() << "The number of elements to initialize: "
14818 << NumEltsToInit << ".\n");
14819
14820 Result = APValue(APValue::UninitArray(), NumEltsToInit, NumElts);
14821
14822 // If the array was previously zero-initialized, preserve the
14823 // zero-initialized values.
14824 if (Filler.hasValue()) {
14825 for (unsigned I = 0, E = Result.getArrayInitializedElts(); I != E; ++I)
14826 Result.getArrayInitializedElt(I) = Filler;
14827 if (Result.hasArrayFiller())
14828 Result.getArrayFiller() = Filler;
14829 }
14830
14831 LValue Subobject = This;
14832 Subobject.addArray(Info, E: ExprToVisit, CAT);
14833 auto Eval = [&](const Expr *Init, unsigned ArrayIndex) {
14834 if (Init->isValueDependent())
14835 return EvaluateDependentExpr(E: Init, Info);
14836
14837 if (!EvaluateInPlace(Result&: Result.getArrayInitializedElt(I: ArrayIndex), Info,
14838 This: Subobject, E: Init) ||
14839 !HandleLValueArrayAdjustment(Info, E: Init, LVal&: Subobject,
14840 EltTy: CAT->getElementType(), Adjustment: 1)) {
14841 if (!Info.noteFailure())
14842 return false;
14843 Success = false;
14844 }
14845 return true;
14846 };
14847 unsigned ArrayIndex = 0;
14848 QualType DestTy = CAT->getElementType();
14849 APSInt Value(Info.Ctx.getTypeSize(T: DestTy), DestTy->isUnsignedIntegerType());
14850 for (unsigned Index = 0; Index != NumEltsToInit; ++Index) {
14851 const Expr *Init = Index < Args.size() ? Args[Index] : ArrayFiller;
14852 if (ArrayIndex >= NumEltsToInit)
14853 break;
14854 if (auto *EmbedS = dyn_cast<EmbedExpr>(Val: Init->IgnoreParenImpCasts())) {
14855 StringLiteral *SL = EmbedS->getDataStringLiteral();
14856 for (unsigned I = EmbedS->getStartingElementPos(),
14857 N = EmbedS->getDataElementCount();
14858 I != EmbedS->getStartingElementPos() + N; ++I) {
14859 Value = SL->getCodeUnit(i: I);
14860 if (DestTy->isIntegerType()) {
14861 Result.getArrayInitializedElt(I: ArrayIndex) = APValue(Value);
14862 } else {
14863 assert(DestTy->isFloatingType() && "unexpected type");
14864 const FPOptions FPO =
14865 Init->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
14866 APFloat FValue(0.0);
14867 if (!HandleIntToFloatCast(Info, E: Init, FPO, SrcType: EmbedS->getType(), Value,
14868 DestType: DestTy, Result&: FValue))
14869 return false;
14870 Result.getArrayInitializedElt(I: ArrayIndex) = APValue(FValue);
14871 }
14872 ArrayIndex++;
14873 }
14874 } else {
14875 if (!Eval(Init, ArrayIndex))
14876 return false;
14877 ++ArrayIndex;
14878 }
14879 }
14880
14881 if (!Result.hasArrayFiller())
14882 return Success;
14883
14884 // If we get here, we have a trivial filler, which we can just evaluate
14885 // once and splat over the rest of the array elements.
14886 assert(ArrayFiller && "no array filler for incomplete init list");
14887 return EvaluateInPlace(Result&: Result.getArrayFiller(), Info, This: Subobject,
14888 E: ArrayFiller) &&
14889 Success;
14890}
14891
14892bool ArrayExprEvaluator::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) {
14893 LValue CommonLV;
14894 if (E->getCommonExpr() &&
14895 !Evaluate(Result&: Info.CurrentCall->createTemporary(
14896 Key: E->getCommonExpr(),
14897 T: getStorageType(Ctx: Info.Ctx, E: E->getCommonExpr()),
14898 Scope: ScopeKind::FullExpression, LV&: CommonLV),
14899 Info, E: E->getCommonExpr()->getSourceExpr()))
14900 return false;
14901
14902 auto *CAT = cast<ConstantArrayType>(Val: E->getType()->castAsArrayTypeUnsafe());
14903
14904 uint64_t Elements = CAT->getZExtSize();
14905 Result = APValue(APValue::UninitArray(), Elements, Elements);
14906
14907 LValue Subobject = This;
14908 Subobject.addArray(Info, E, CAT);
14909
14910 bool Success = true;
14911 for (EvalInfo::ArrayInitLoopIndex Index(Info); Index != Elements; ++Index) {
14912 // C++ [class.temporary]/5
14913 // There are four contexts in which temporaries are destroyed at a different
14914 // point than the end of the full-expression. [...] The second context is
14915 // when a copy constructor is called to copy an element of an array while
14916 // the entire array is copied [...]. In either case, if the constructor has
14917 // one or more default arguments, the destruction of every temporary created
14918 // in a default argument is sequenced before the construction of the next
14919 // array element, if any.
14920 FullExpressionRAII Scope(Info);
14921
14922 if (!EvaluateInPlace(Result&: Result.getArrayInitializedElt(I: Index),
14923 Info, This: Subobject, E: E->getSubExpr()) ||
14924 !HandleLValueArrayAdjustment(Info, E, LVal&: Subobject,
14925 EltTy: CAT->getElementType(), Adjustment: 1)) {
14926 if (!Info.noteFailure())
14927 return false;
14928 Success = false;
14929 }
14930
14931 // Make sure we run the destructors too.
14932 Scope.destroy();
14933 }
14934
14935 return Success;
14936}
14937
14938bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) {
14939 return VisitCXXConstructExpr(E, Subobject: This, Value: &Result, Type: E->getType());
14940}
14941
14942bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
14943 const LValue &Subobject,
14944 APValue *Value,
14945 QualType Type) {
14946 bool HadZeroInit = Value->hasValue();
14947
14948 if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(T: Type)) {
14949 unsigned FinalSize = CAT->getZExtSize();
14950
14951 // Preserve the array filler if we had prior zero-initialization.
14952 APValue Filler =
14953 HadZeroInit && Value->hasArrayFiller() ? Value->getArrayFiller()
14954 : APValue();
14955
14956 *Value = APValue(APValue::UninitArray(), 0, FinalSize);
14957 if (FinalSize == 0)
14958 return true;
14959
14960 bool HasTrivialConstructor = CheckTrivialDefaultConstructor(
14961 Info, Loc: E->getExprLoc(), CD: E->getConstructor(),
14962 IsValueInitialization: E->requiresZeroInitialization());
14963 LValue ArrayElt = Subobject;
14964 ArrayElt.addArray(Info, E, CAT);
14965 // We do the whole initialization in two passes, first for just one element,
14966 // then for the whole array. It's possible we may find out we can't do const
14967 // init in the first pass, in which case we avoid allocating a potentially
14968 // large array. We don't do more passes because expanding array requires
14969 // copying the data, which is wasteful.
14970 for (const unsigned N : {1u, FinalSize}) {
14971 unsigned OldElts = Value->getArrayInitializedElts();
14972 if (OldElts == N)
14973 break;
14974
14975 // Expand the array to appropriate size.
14976 APValue NewValue(APValue::UninitArray(), N, FinalSize);
14977 for (unsigned I = 0; I < OldElts; ++I)
14978 NewValue.getArrayInitializedElt(I).swap(
14979 RHS&: Value->getArrayInitializedElt(I));
14980 Value->swap(RHS&: NewValue);
14981
14982 if (HadZeroInit)
14983 for (unsigned I = OldElts; I < N; ++I)
14984 Value->getArrayInitializedElt(I) = Filler;
14985
14986 if (HasTrivialConstructor && N == FinalSize && FinalSize != 1) {
14987 // If we have a trivial constructor, only evaluate it once and copy
14988 // the result into all the array elements.
14989 APValue &FirstResult = Value->getArrayInitializedElt(I: 0);
14990 for (unsigned I = OldElts; I < FinalSize; ++I)
14991 Value->getArrayInitializedElt(I) = FirstResult;
14992 } else {
14993 for (unsigned I = OldElts; I < N; ++I) {
14994 if (!VisitCXXConstructExpr(E, Subobject: ArrayElt,
14995 Value: &Value->getArrayInitializedElt(I),
14996 Type: CAT->getElementType()) ||
14997 !HandleLValueArrayAdjustment(Info, E, LVal&: ArrayElt,
14998 EltTy: CAT->getElementType(), Adjustment: 1))
14999 return false;
15000 // When checking for const initilization any diagnostic is considered
15001 // an error.
15002 if (Info.EvalStatus.Diag && !Info.EvalStatus.Diag->empty() &&
15003 !Info.keepEvaluatingAfterFailure())
15004 return false;
15005 }
15006 }
15007 }
15008
15009 return true;
15010 }
15011
15012 if (!Type->isRecordType())
15013 return Error(E);
15014
15015 return RecordExprEvaluator(Info, Subobject, *Value)
15016 .VisitCXXConstructExpr(E, T: Type);
15017}
15018
15019bool ArrayExprEvaluator::VisitCXXParenListInitExpr(
15020 const CXXParenListInitExpr *E) {
15021 assert(E->getType()->isConstantArrayType() &&
15022 "Expression result is not a constant array type");
15023
15024 return VisitCXXParenListOrInitListExpr(ExprToVisit: E, Args: E->getInitExprs(),
15025 ArrayFiller: E->getArrayFiller());
15026}
15027
15028//===----------------------------------------------------------------------===//
15029// Integer Evaluation
15030//
15031// As a GNU extension, we support casting pointers to sufficiently-wide integer
15032// types and back in constant folding. Integer values are thus represented
15033// either as an integer-valued APValue, or as an lvalue-valued APValue.
15034//===----------------------------------------------------------------------===//
15035
15036namespace {
15037class IntExprEvaluator
15038 : public ExprEvaluatorBase<IntExprEvaluator> {
15039 APValue &Result;
15040public:
15041 IntExprEvaluator(EvalInfo &info, APValue &result)
15042 : ExprEvaluatorBaseTy(info), Result(result) {}
15043
15044 bool Success(const llvm::APSInt &SI, const Expr *E, APValue &Result) {
15045 assert(E->getType()->isIntegralOrEnumerationType() &&
15046 "Invalid evaluation result.");
15047 assert(SI.isSigned() == E->getType()->isSignedIntegerOrEnumerationType() &&
15048 "Invalid evaluation result.");
15049 assert(SI.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
15050 "Invalid evaluation result.");
15051 Result = APValue(SI);
15052 return true;
15053 }
15054 bool Success(const llvm::APSInt &SI, const Expr *E) {
15055 return Success(SI, E, Result);
15056 }
15057
15058 bool Success(const llvm::APInt &I, const Expr *E, APValue &Result) {
15059 assert(E->getType()->isIntegralOrEnumerationType() &&
15060 "Invalid evaluation result.");
15061 assert(I.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
15062 "Invalid evaluation result.");
15063 Result = APValue(APSInt(I));
15064 Result.getInt().setIsUnsigned(
15065 E->getType()->isUnsignedIntegerOrEnumerationType());
15066 return true;
15067 }
15068 bool Success(const llvm::APInt &I, const Expr *E) {
15069 return Success(I, E, Result);
15070 }
15071
15072 bool Success(uint64_t Value, const Expr *E, APValue &Result) {
15073 assert(E->getType()->isIntegralOrEnumerationType() &&
15074 "Invalid evaluation result.");
15075 Result = APValue(Info.Ctx.MakeIntValue(Value, Type: E->getType()));
15076 return true;
15077 }
15078 bool Success(uint64_t Value, const Expr *E) {
15079 return Success(Value, E, Result);
15080 }
15081
15082 bool Success(CharUnits Size, const Expr *E) {
15083 return Success(Value: Size.getQuantity(), E);
15084 }
15085
15086 bool Success(const APValue &V, const Expr *E) {
15087 // C++23 [expr.const]p8 If we have a variable that is unknown reference or
15088 // pointer allow further evaluation of the value.
15089 if (V.isLValue() || V.isAddrLabelDiff() || V.isIndeterminate() ||
15090 V.allowConstexprUnknown()) {
15091 Result = V;
15092 return true;
15093 }
15094 return Success(SI: V.getInt(), E);
15095 }
15096
15097 bool ZeroInitialization(const Expr *E) { return Success(Value: 0, E); }
15098
15099 friend std::optional<bool> EvaluateBuiltinIsWithinLifetime(IntExprEvaluator &,
15100 const CallExpr *);
15101
15102 //===--------------------------------------------------------------------===//
15103 // Visitor Methods
15104 //===--------------------------------------------------------------------===//
15105
15106 bool VisitIntegerLiteral(const IntegerLiteral *E) {
15107 return Success(I: E->getValue(), E);
15108 }
15109 bool VisitCharacterLiteral(const CharacterLiteral *E) {
15110 return Success(Value: E->getValue(), E);
15111 }
15112
15113 bool CheckReferencedDecl(const Expr *E, const Decl *D);
15114 bool VisitDeclRefExpr(const DeclRefExpr *E) {
15115 if (CheckReferencedDecl(E, D: E->getDecl()))
15116 return true;
15117
15118 return ExprEvaluatorBaseTy::VisitDeclRefExpr(S: E);
15119 }
15120 bool VisitMemberExpr(const MemberExpr *E) {
15121 if (CheckReferencedDecl(E, D: E->getMemberDecl())) {
15122 VisitIgnoredBaseExpression(E: E->getBase());
15123 return true;
15124 }
15125
15126 return ExprEvaluatorBaseTy::VisitMemberExpr(E);
15127 }
15128
15129 bool VisitCallExpr(const CallExpr *E);
15130 bool VisitBuiltinCallExpr(const CallExpr *E, unsigned BuiltinOp);
15131 bool VisitBinaryOperator(const BinaryOperator *E);
15132 bool VisitOffsetOfExpr(const OffsetOfExpr *E);
15133 bool VisitUnaryOperator(const UnaryOperator *E);
15134
15135 bool VisitCastExpr(const CastExpr* E);
15136 bool VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
15137
15138 bool VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
15139 return Success(Value: E->getValue(), E);
15140 }
15141
15142 bool VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
15143 return Success(Value: E->getValue(), E);
15144 }
15145
15146 bool VisitArrayInitIndexExpr(const ArrayInitIndexExpr *E) {
15147 if (Info.ArrayInitIndex == uint64_t(-1)) {
15148 // We were asked to evaluate this subexpression independent of the
15149 // enclosing ArrayInitLoopExpr. We can't do that.
15150 Info.FFDiag(E);
15151 return false;
15152 }
15153 return Success(Value: Info.ArrayInitIndex, E);
15154 }
15155
15156 // Note, GNU defines __null as an integer, not a pointer.
15157 bool VisitGNUNullExpr(const GNUNullExpr *E) {
15158 return ZeroInitialization(E);
15159 }
15160
15161 bool VisitTypeTraitExpr(const TypeTraitExpr *E) {
15162 if (E->isStoredAsBoolean())
15163 return Success(Value: E->getBoolValue(), E);
15164 if (E->getAPValue().isAbsent())
15165 return false;
15166 assert(E->getAPValue().isInt() && "APValue type not supported");
15167 return Success(SI: E->getAPValue().getInt(), E);
15168 }
15169
15170 bool VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
15171 return Success(Value: E->getValue(), E);
15172 }
15173
15174 bool VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
15175 return Success(Value: E->getValue(), E);
15176 }
15177
15178 bool VisitOpenACCAsteriskSizeExpr(const OpenACCAsteriskSizeExpr *E) {
15179 // This should not be evaluated during constant expr evaluation, as it
15180 // should always be in an unevaluated context (the args list of a 'gang' or
15181 // 'tile' clause).
15182 return Error(E);
15183 }
15184
15185 bool VisitUnaryReal(const UnaryOperator *E);
15186 bool VisitUnaryImag(const UnaryOperator *E);
15187
15188 bool VisitCXXNoexceptExpr(const CXXNoexceptExpr *E);
15189 bool VisitSizeOfPackExpr(const SizeOfPackExpr *E);
15190 bool VisitSourceLocExpr(const SourceLocExpr *E);
15191 bool VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E);
15192 bool VisitRequiresExpr(const RequiresExpr *E);
15193 // FIXME: Missing: array subscript of vector, member of vector
15194};
15195
15196class FixedPointExprEvaluator
15197 : public ExprEvaluatorBase<FixedPointExprEvaluator> {
15198 APValue &Result;
15199
15200 public:
15201 FixedPointExprEvaluator(EvalInfo &info, APValue &result)
15202 : ExprEvaluatorBaseTy(info), Result(result) {}
15203
15204 bool Success(const llvm::APInt &I, const Expr *E) {
15205 return Success(
15206 V: APFixedPoint(I, Info.Ctx.getFixedPointSemantics(Ty: E->getType())), E);
15207 }
15208
15209 bool Success(uint64_t Value, const Expr *E) {
15210 return Success(
15211 V: APFixedPoint(Value, Info.Ctx.getFixedPointSemantics(Ty: E->getType())), E);
15212 }
15213
15214 bool Success(const APValue &V, const Expr *E) {
15215 return Success(V: V.getFixedPoint(), E);
15216 }
15217
15218 bool Success(const APFixedPoint &V, const Expr *E) {
15219 assert(E->getType()->isFixedPointType() && "Invalid evaluation result.");
15220 assert(V.getWidth() == Info.Ctx.getIntWidth(E->getType()) &&
15221 "Invalid evaluation result.");
15222 Result = APValue(V);
15223 return true;
15224 }
15225
15226 bool ZeroInitialization(const Expr *E) {
15227 return Success(Value: 0, E);
15228 }
15229
15230 //===--------------------------------------------------------------------===//
15231 // Visitor Methods
15232 //===--------------------------------------------------------------------===//
15233
15234 bool VisitFixedPointLiteral(const FixedPointLiteral *E) {
15235 return Success(I: E->getValue(), E);
15236 }
15237
15238 bool VisitCastExpr(const CastExpr *E);
15239 bool VisitUnaryOperator(const UnaryOperator *E);
15240 bool VisitBinaryOperator(const BinaryOperator *E);
15241};
15242} // end anonymous namespace
15243
15244/// EvaluateIntegerOrLValue - Evaluate an rvalue integral-typed expression, and
15245/// produce either the integer value or a pointer.
15246///
15247/// GCC has a heinous extension which folds casts between pointer types and
15248/// pointer-sized integral types. We support this by allowing the evaluation of
15249/// an integer rvalue to produce a pointer (represented as an lvalue) instead.
15250/// Some simple arithmetic on such values is supported (they are treated much
15251/// like char*).
15252static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
15253 EvalInfo &Info) {
15254 assert(!E->isValueDependent());
15255 assert(E->isPRValue() && E->getType()->isIntegralOrEnumerationType());
15256 return IntExprEvaluator(Info, Result).Visit(S: E);
15257}
15258
15259static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info) {
15260 assert(!E->isValueDependent());
15261 APValue Val;
15262 if (!EvaluateIntegerOrLValue(E, Result&: Val, Info))
15263 return false;
15264 if (!Val.isInt()) {
15265 // FIXME: It would be better to produce the diagnostic for casting
15266 // a pointer to an integer.
15267 Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
15268 return false;
15269 }
15270 Result = Val.getInt();
15271 return true;
15272}
15273
15274bool IntExprEvaluator::VisitSourceLocExpr(const SourceLocExpr *E) {
15275 APValue Evaluated = E->EvaluateInContext(
15276 Ctx: Info.Ctx, DefaultExpr: Info.CurrentCall->CurSourceLocExprScope.getDefaultExpr());
15277 return Success(V: Evaluated, E);
15278}
15279
15280static bool EvaluateFixedPoint(const Expr *E, APFixedPoint &Result,
15281 EvalInfo &Info) {
15282 assert(!E->isValueDependent());
15283 if (E->getType()->isFixedPointType()) {
15284 APValue Val;
15285 if (!FixedPointExprEvaluator(Info, Val).Visit(S: E))
15286 return false;
15287 if (!Val.isFixedPoint())
15288 return false;
15289
15290 Result = Val.getFixedPoint();
15291 return true;
15292 }
15293 return false;
15294}
15295
15296static bool EvaluateFixedPointOrInteger(const Expr *E, APFixedPoint &Result,
15297 EvalInfo &Info) {
15298 assert(!E->isValueDependent());
15299 if (E->getType()->isIntegerType()) {
15300 auto FXSema = Info.Ctx.getFixedPointSemantics(Ty: E->getType());
15301 APSInt Val;
15302 if (!EvaluateInteger(E, Result&: Val, Info))
15303 return false;
15304 Result = APFixedPoint(Val, FXSema);
15305 return true;
15306 } else if (E->getType()->isFixedPointType()) {
15307 return EvaluateFixedPoint(E, Result, Info);
15308 }
15309 return false;
15310}
15311
15312/// Check whether the given declaration can be directly converted to an integral
15313/// rvalue. If not, no diagnostic is produced; there are other things we can
15314/// try.
15315bool IntExprEvaluator::CheckReferencedDecl(const Expr* E, const Decl* D) {
15316 // Enums are integer constant exprs.
15317 if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(Val: D)) {
15318 // Check for signedness/width mismatches between E type and ECD value.
15319 bool SameSign = (ECD->getInitVal().isSigned()
15320 == E->getType()->isSignedIntegerOrEnumerationType());
15321 bool SameWidth = (ECD->getInitVal().getBitWidth()
15322 == Info.Ctx.getIntWidth(T: E->getType()));
15323 if (SameSign && SameWidth)
15324 return Success(SI: ECD->getInitVal(), E);
15325 else {
15326 // Get rid of mismatch (otherwise Success assertions will fail)
15327 // by computing a new value matching the type of E.
15328 llvm::APSInt Val = ECD->getInitVal();
15329 if (!SameSign)
15330 Val.setIsSigned(!ECD->getInitVal().isSigned());
15331 if (!SameWidth)
15332 Val = Val.extOrTrunc(width: Info.Ctx.getIntWidth(T: E->getType()));
15333 return Success(SI: Val, E);
15334 }
15335 }
15336 return false;
15337}
15338
15339/// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way
15340/// as GCC.
15341GCCTypeClass EvaluateBuiltinClassifyType(QualType T,
15342 const LangOptions &LangOpts) {
15343 assert(!T->isDependentType() && "unexpected dependent type");
15344
15345 QualType CanTy = T.getCanonicalType();
15346
15347 switch (CanTy->getTypeClass()) {
15348#define TYPE(ID, BASE)
15349#define DEPENDENT_TYPE(ID, BASE) case Type::ID:
15350#define NON_CANONICAL_TYPE(ID, BASE) case Type::ID:
15351#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(ID, BASE) case Type::ID:
15352#include "clang/AST/TypeNodes.inc"
15353 case Type::Auto:
15354 case Type::DeducedTemplateSpecialization:
15355 llvm_unreachable("unexpected non-canonical or dependent type");
15356
15357 case Type::Builtin:
15358 switch (cast<BuiltinType>(Val&: CanTy)->getKind()) {
15359#define BUILTIN_TYPE(ID, SINGLETON_ID)
15360#define SIGNED_TYPE(ID, SINGLETON_ID) \
15361 case BuiltinType::ID: return GCCTypeClass::Integer;
15362#define FLOATING_TYPE(ID, SINGLETON_ID) \
15363 case BuiltinType::ID: return GCCTypeClass::RealFloat;
15364#define PLACEHOLDER_TYPE(ID, SINGLETON_ID) \
15365 case BuiltinType::ID: break;
15366#include "clang/AST/BuiltinTypes.def"
15367 case BuiltinType::Void:
15368 return GCCTypeClass::Void;
15369
15370 case BuiltinType::Bool:
15371 return GCCTypeClass::Bool;
15372
15373 case BuiltinType::Char_U:
15374 case BuiltinType::UChar:
15375 case BuiltinType::WChar_U:
15376 case BuiltinType::Char8:
15377 case BuiltinType::Char16:
15378 case BuiltinType::Char32:
15379 case BuiltinType::UShort:
15380 case BuiltinType::UInt:
15381 case BuiltinType::ULong:
15382 case BuiltinType::ULongLong:
15383 case BuiltinType::UInt128:
15384 return GCCTypeClass::Integer;
15385
15386 case BuiltinType::UShortAccum:
15387 case BuiltinType::UAccum:
15388 case BuiltinType::ULongAccum:
15389 case BuiltinType::UShortFract:
15390 case BuiltinType::UFract:
15391 case BuiltinType::ULongFract:
15392 case BuiltinType::SatUShortAccum:
15393 case BuiltinType::SatUAccum:
15394 case BuiltinType::SatULongAccum:
15395 case BuiltinType::SatUShortFract:
15396 case BuiltinType::SatUFract:
15397 case BuiltinType::SatULongFract:
15398 return GCCTypeClass::None;
15399
15400 case BuiltinType::NullPtr:
15401
15402 case BuiltinType::ObjCId:
15403 case BuiltinType::ObjCClass:
15404 case BuiltinType::ObjCSel:
15405#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
15406 case BuiltinType::Id:
15407#include "clang/Basic/OpenCLImageTypes.def"
15408#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
15409 case BuiltinType::Id:
15410#include "clang/Basic/OpenCLExtensionTypes.def"
15411 case BuiltinType::OCLSampler:
15412 case BuiltinType::OCLEvent:
15413 case BuiltinType::OCLClkEvent:
15414 case BuiltinType::OCLQueue:
15415 case BuiltinType::OCLReserveID:
15416#define SVE_TYPE(Name, Id, SingletonId) \
15417 case BuiltinType::Id:
15418#include "clang/Basic/AArch64ACLETypes.def"
15419#define PPC_VECTOR_TYPE(Name, Id, Size) \
15420 case BuiltinType::Id:
15421#include "clang/Basic/PPCTypes.def"
15422#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
15423#include "clang/Basic/RISCVVTypes.def"
15424#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
15425#include "clang/Basic/WebAssemblyReferenceTypes.def"
15426#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
15427#include "clang/Basic/AMDGPUTypes.def"
15428#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
15429#include "clang/Basic/HLSLIntangibleTypes.def"
15430 return GCCTypeClass::None;
15431
15432 case BuiltinType::Dependent:
15433 llvm_unreachable("unexpected dependent type");
15434 };
15435 llvm_unreachable("unexpected placeholder type");
15436
15437 case Type::Enum:
15438 return LangOpts.CPlusPlus ? GCCTypeClass::Enum : GCCTypeClass::Integer;
15439
15440 case Type::Pointer:
15441 case Type::ConstantArray:
15442 case Type::VariableArray:
15443 case Type::IncompleteArray:
15444 case Type::FunctionNoProto:
15445 case Type::FunctionProto:
15446 case Type::ArrayParameter:
15447 return GCCTypeClass::Pointer;
15448
15449 case Type::MemberPointer:
15450 return CanTy->isMemberDataPointerType()
15451 ? GCCTypeClass::PointerToDataMember
15452 : GCCTypeClass::PointerToMemberFunction;
15453
15454 case Type::Complex:
15455 return GCCTypeClass::Complex;
15456
15457 case Type::Record:
15458 return CanTy->isUnionType() ? GCCTypeClass::Union
15459 : GCCTypeClass::ClassOrStruct;
15460
15461 case Type::Atomic:
15462 // GCC classifies _Atomic T the same as T.
15463 return EvaluateBuiltinClassifyType(
15464 T: CanTy->castAs<AtomicType>()->getValueType(), LangOpts);
15465
15466 case Type::Vector:
15467 case Type::ExtVector:
15468 return GCCTypeClass::Vector;
15469
15470 case Type::BlockPointer:
15471 case Type::ConstantMatrix:
15472 case Type::ObjCObject:
15473 case Type::ObjCInterface:
15474 case Type::ObjCObjectPointer:
15475 case Type::Pipe:
15476 case Type::HLSLAttributedResource:
15477 case Type::HLSLInlineSpirv:
15478 case Type::OverflowBehavior:
15479 // Classify all other types that don't fit into the regular
15480 // classification the same way.
15481 return GCCTypeClass::None;
15482
15483 case Type::BitInt:
15484 return GCCTypeClass::BitInt;
15485
15486 case Type::LValueReference:
15487 case Type::RValueReference:
15488 llvm_unreachable("invalid type for expression");
15489 }
15490
15491 llvm_unreachable("unexpected type class");
15492}
15493
15494/// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way
15495/// as GCC.
15496static GCCTypeClass
15497EvaluateBuiltinClassifyType(const CallExpr *E, const LangOptions &LangOpts) {
15498 // If no argument was supplied, default to None. This isn't
15499 // ideal, however it is what gcc does.
15500 if (E->getNumArgs() == 0)
15501 return GCCTypeClass::None;
15502
15503 // FIXME: Bizarrely, GCC treats a call with more than one argument as not
15504 // being an ICE, but still folds it to a constant using the type of the first
15505 // argument.
15506 return EvaluateBuiltinClassifyType(T: E->getArg(Arg: 0)->getType(), LangOpts);
15507}
15508
15509/// EvaluateBuiltinConstantPForLValue - Determine the result of
15510/// __builtin_constant_p when applied to the given pointer.
15511///
15512/// A pointer is only "constant" if it is null (or a pointer cast to integer)
15513/// or it points to the first character of a string literal.
15514static bool EvaluateBuiltinConstantPForLValue(const APValue &LV) {
15515 APValue::LValueBase Base = LV.getLValueBase();
15516 if (Base.isNull()) {
15517 // A null base is acceptable.
15518 return true;
15519 } else if (const Expr *E = Base.dyn_cast<const Expr *>()) {
15520 if (!isa<StringLiteral>(Val: E))
15521 return false;
15522 return LV.getLValueOffset().isZero();
15523 } else if (Base.is<TypeInfoLValue>()) {
15524 // Surprisingly, GCC considers __builtin_constant_p(&typeid(int)) to
15525 // evaluate to true.
15526 return true;
15527 } else {
15528 // Any other base is not constant enough for GCC.
15529 return false;
15530 }
15531}
15532
15533/// EvaluateBuiltinConstantP - Evaluate __builtin_constant_p as similarly to
15534/// GCC as we can manage.
15535static bool EvaluateBuiltinConstantP(EvalInfo &Info, const Expr *Arg) {
15536 // This evaluation is not permitted to have side-effects, so evaluate it in
15537 // a speculative evaluation context.
15538 SpeculativeEvaluationRAII SpeculativeEval(Info);
15539
15540 // Constant-folding is always enabled for the operand of __builtin_constant_p
15541 // (even when the enclosing evaluation context otherwise requires a strict
15542 // language-specific constant expression).
15543 FoldConstant Fold(Info, true);
15544
15545 QualType ArgType = Arg->getType();
15546
15547 // __builtin_constant_p always has one operand. The rules which gcc follows
15548 // are not precisely documented, but are as follows:
15549 //
15550 // - If the operand is of integral, floating, complex or enumeration type,
15551 // and can be folded to a known value of that type, it returns 1.
15552 // - If the operand can be folded to a pointer to the first character
15553 // of a string literal (or such a pointer cast to an integral type)
15554 // or to a null pointer or an integer cast to a pointer, it returns 1.
15555 //
15556 // Otherwise, it returns 0.
15557 //
15558 // FIXME: GCC also intends to return 1 for literals of aggregate types, but
15559 // its support for this did not work prior to GCC 9 and is not yet well
15560 // understood.
15561 if (ArgType->isIntegralOrEnumerationType() || ArgType->isFloatingType() ||
15562 ArgType->isAnyComplexType() || ArgType->isPointerType() ||
15563 ArgType->isNullPtrType()) {
15564 APValue V;
15565 if (!::EvaluateAsRValue(Info, E: Arg, Result&: V) || Info.EvalStatus.HasSideEffects) {
15566 Fold.keepDiagnostics();
15567 return false;
15568 }
15569
15570 // For a pointer (possibly cast to integer), there are special rules.
15571 if (V.getKind() == APValue::LValue)
15572 return EvaluateBuiltinConstantPForLValue(LV: V);
15573
15574 // Otherwise, any constant value is good enough.
15575 return V.hasValue();
15576 }
15577
15578 // Anything else isn't considered to be sufficiently constant.
15579 return false;
15580}
15581
15582/// Retrieves the "underlying object type" of the given expression,
15583/// as used by __builtin_object_size.
15584static QualType getObjectType(APValue::LValueBase B) {
15585 if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
15586 if (const VarDecl *VD = dyn_cast<VarDecl>(Val: D))
15587 return VD->getType();
15588 } else if (const Expr *E = B.dyn_cast<const Expr*>()) {
15589 if (isa<CompoundLiteralExpr>(Val: E))
15590 return E->getType();
15591 } else if (B.is<TypeInfoLValue>()) {
15592 return B.getTypeInfoType();
15593 } else if (B.is<DynamicAllocLValue>()) {
15594 return B.getDynamicAllocType();
15595 }
15596
15597 return QualType();
15598}
15599
15600/// A more selective version of E->IgnoreParenCasts for
15601/// tryEvaluateBuiltinObjectSize. This ignores some casts/parens that serve only
15602/// to change the type of E.
15603/// Ex. For E = `(short*)((char*)(&foo))`, returns `&foo`
15604///
15605/// Always returns an RValue with a pointer representation.
15606static const Expr *ignorePointerCastsAndParens(const Expr *E) {
15607 assert(E->isPRValue() && E->getType()->hasPointerRepresentation());
15608
15609 const Expr *NoParens = E->IgnoreParens();
15610 const auto *Cast = dyn_cast<CastExpr>(Val: NoParens);
15611 if (Cast == nullptr)
15612 return NoParens;
15613
15614 // We only conservatively allow a few kinds of casts, because this code is
15615 // inherently a simple solution that seeks to support the common case.
15616 auto CastKind = Cast->getCastKind();
15617 if (CastKind != CK_NoOp && CastKind != CK_BitCast &&
15618 CastKind != CK_AddressSpaceConversion)
15619 return NoParens;
15620
15621 const auto *SubExpr = Cast->getSubExpr();
15622 if (!SubExpr->getType()->hasPointerRepresentation() || !SubExpr->isPRValue())
15623 return NoParens;
15624 return ignorePointerCastsAndParens(E: SubExpr);
15625}
15626
15627/// Checks to see if the given LValue's Designator is at the end of the LValue's
15628/// record layout. e.g.
15629/// struct { struct { int a, b; } fst, snd; } obj;
15630/// obj.fst // no
15631/// obj.snd // yes
15632/// obj.fst.a // no
15633/// obj.fst.b // no
15634/// obj.snd.a // no
15635/// obj.snd.b // yes
15636///
15637/// Please note: this function is specialized for how __builtin_object_size
15638/// views "objects".
15639///
15640/// If this encounters an invalid RecordDecl or otherwise cannot determine the
15641/// correct result, it will always return true.
15642static bool isDesignatorAtObjectEnd(const ASTContext &Ctx, const LValue &LVal) {
15643 assert(!LVal.Designator.Invalid);
15644
15645 auto IsLastOrInvalidFieldDecl = [&Ctx](const FieldDecl *FD) {
15646 const RecordDecl *Parent = FD->getParent();
15647 if (Parent->isInvalidDecl() || Parent->isUnion())
15648 return true;
15649 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(D: Parent);
15650 return FD->getFieldIndex() + 1 == Layout.getFieldCount();
15651 };
15652
15653 auto &Base = LVal.getLValueBase();
15654 if (auto *ME = dyn_cast_or_null<MemberExpr>(Val: Base.dyn_cast<const Expr *>())) {
15655 if (auto *FD = dyn_cast<FieldDecl>(Val: ME->getMemberDecl())) {
15656 if (!IsLastOrInvalidFieldDecl(FD))
15657 return false;
15658 } else if (auto *IFD = dyn_cast<IndirectFieldDecl>(Val: ME->getMemberDecl())) {
15659 for (auto *FD : IFD->chain()) {
15660 if (!IsLastOrInvalidFieldDecl(cast<FieldDecl>(Val: FD)))
15661 return false;
15662 }
15663 }
15664 }
15665
15666 unsigned I = 0;
15667 QualType BaseType = getType(B: Base);
15668 if (LVal.Designator.FirstEntryIsAnUnsizedArray) {
15669 // If we don't know the array bound, conservatively assume we're looking at
15670 // the final array element.
15671 ++I;
15672 if (BaseType->isIncompleteArrayType())
15673 BaseType = Ctx.getAsArrayType(T: BaseType)->getElementType();
15674 else
15675 BaseType = BaseType->castAs<PointerType>()->getPointeeType();
15676 }
15677
15678 for (unsigned E = LVal.Designator.Entries.size(); I != E; ++I) {
15679 const auto &Entry = LVal.Designator.Entries[I];
15680 if (BaseType->isArrayType()) {
15681 // Because __builtin_object_size treats arrays as objects, we can ignore
15682 // the index iff this is the last array in the Designator.
15683 if (I + 1 == E)
15684 return true;
15685 const auto *CAT = cast<ConstantArrayType>(Val: Ctx.getAsArrayType(T: BaseType));
15686 uint64_t Index = Entry.getAsArrayIndex();
15687 if (Index + 1 != CAT->getZExtSize())
15688 return false;
15689 BaseType = CAT->getElementType();
15690 } else if (BaseType->isAnyComplexType()) {
15691 const auto *CT = BaseType->castAs<ComplexType>();
15692 uint64_t Index = Entry.getAsArrayIndex();
15693 if (Index != 1)
15694 return false;
15695 BaseType = CT->getElementType();
15696 } else if (auto *FD = getAsField(E: Entry)) {
15697 if (!IsLastOrInvalidFieldDecl(FD))
15698 return false;
15699 BaseType = FD->getType();
15700 } else {
15701 assert(getAsBaseClass(Entry) && "Expecting cast to a base class");
15702 return false;
15703 }
15704 }
15705 return true;
15706}
15707
15708/// Tests to see if the LValue has a user-specified designator (that isn't
15709/// necessarily valid). Note that this always returns 'true' if the LValue has
15710/// an unsized array as its first designator entry, because there's currently no
15711/// way to tell if the user typed *foo or foo[0].
15712static bool refersToCompleteObject(const LValue &LVal) {
15713 if (LVal.Designator.Invalid)
15714 return false;
15715
15716 if (!LVal.Designator.Entries.empty())
15717 return LVal.Designator.isMostDerivedAnUnsizedArray();
15718
15719 if (!LVal.InvalidBase)
15720 return true;
15721
15722 // If `E` is a MemberExpr, then the first part of the designator is hiding in
15723 // the LValueBase.
15724 const auto *E = LVal.Base.dyn_cast<const Expr *>();
15725 return !E || !isa<MemberExpr>(Val: E);
15726}
15727
15728/// Attempts to detect a user writing into a piece of memory that's impossible
15729/// to figure out the size of by just using types.
15730static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal) {
15731 const SubobjectDesignator &Designator = LVal.Designator;
15732 // Notes:
15733 // - Users can only write off of the end when we have an invalid base. Invalid
15734 // bases imply we don't know where the memory came from.
15735 // - We used to be a bit more aggressive here; we'd only be conservative if
15736 // the array at the end was flexible, or if it had 0 or 1 elements. This
15737 // broke some common standard library extensions (PR30346), but was
15738 // otherwise seemingly fine. It may be useful to reintroduce this behavior
15739 // with some sort of list. OTOH, it seems that GCC is always
15740 // conservative with the last element in structs (if it's an array), so our
15741 // current behavior is more compatible than an explicit list approach would
15742 // be.
15743 auto isFlexibleArrayMember = [&] {
15744 using FAMKind = LangOptions::StrictFlexArraysLevelKind;
15745 FAMKind StrictFlexArraysLevel =
15746 Ctx.getLangOpts().getStrictFlexArraysLevel();
15747
15748 if (Designator.isMostDerivedAnUnsizedArray())
15749 return true;
15750
15751 if (StrictFlexArraysLevel == FAMKind::Default)
15752 return true;
15753
15754 if (Designator.getMostDerivedArraySize() == 0 &&
15755 StrictFlexArraysLevel != FAMKind::IncompleteOnly)
15756 return true;
15757
15758 if (Designator.getMostDerivedArraySize() == 1 &&
15759 StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete)
15760 return true;
15761
15762 return false;
15763 };
15764
15765 return LVal.InvalidBase &&
15766 Designator.Entries.size() == Designator.MostDerivedPathLength &&
15767 Designator.MostDerivedIsArrayElement && isFlexibleArrayMember() &&
15768 isDesignatorAtObjectEnd(Ctx, LVal);
15769}
15770
15771/// Converts the given APInt to CharUnits, assuming the APInt is unsigned.
15772/// Fails if the conversion would cause loss of precision.
15773static bool convertUnsignedAPIntToCharUnits(const llvm::APInt &Int,
15774 CharUnits &Result) {
15775 auto CharUnitsMax = std::numeric_limits<CharUnits::QuantityType>::max();
15776 if (Int.ugt(RHS: CharUnitsMax))
15777 return false;
15778 Result = CharUnits::fromQuantity(Quantity: Int.getZExtValue());
15779 return true;
15780}
15781
15782/// If we're evaluating the object size of an instance of a struct that
15783/// contains a flexible array member, add the size of the initializer.
15784static void addFlexibleArrayMemberInitSize(EvalInfo &Info, const QualType &T,
15785 const LValue &LV, CharUnits &Size) {
15786 if (!T.isNull() && T->isStructureType() &&
15787 T->castAsRecordDecl()->hasFlexibleArrayMember())
15788 if (const auto *V = LV.getLValueBase().dyn_cast<const ValueDecl *>())
15789 if (const auto *VD = dyn_cast<VarDecl>(Val: V))
15790 if (VD->hasInit())
15791 Size += VD->getFlexibleArrayInitChars(Ctx: Info.Ctx);
15792}
15793
15794/// Helper for tryEvaluateBuiltinObjectSize -- Given an LValue, this will
15795/// determine how many bytes exist from the beginning of the object to either
15796/// the end of the current subobject, or the end of the object itself, depending
15797/// on what the LValue looks like + the value of Type.
15798///
15799/// If this returns false, the value of Result is undefined.
15800static bool determineEndOffset(EvalInfo &Info, SourceLocation ExprLoc,
15801 unsigned Type, const LValue &LVal,
15802 CharUnits &EndOffset) {
15803 bool DetermineForCompleteObject = refersToCompleteObject(LVal);
15804
15805 auto CheckedHandleSizeof = [&](QualType Ty, CharUnits &Result) {
15806 if (Ty.isNull())
15807 return false;
15808
15809 Ty = Ty.getNonReferenceType();
15810
15811 if (Ty->isIncompleteType() || Ty->isFunctionType())
15812 return false;
15813
15814 return HandleSizeof(Info, Loc: ExprLoc, Type: Ty, Size&: Result);
15815 };
15816
15817 // We want to evaluate the size of the entire object. This is a valid fallback
15818 // for when Type=1 and the designator is invalid, because we're asked for an
15819 // upper-bound.
15820 if (!(Type & 1) || LVal.Designator.Invalid || DetermineForCompleteObject) {
15821 // Type=3 wants a lower bound, so we can't fall back to this.
15822 if (Type == 3 && !DetermineForCompleteObject)
15823 return false;
15824
15825 llvm::APInt APEndOffset;
15826 if (isBaseAnAllocSizeCall(Base: LVal.getLValueBase()) &&
15827 getBytesReturnedByAllocSizeCall(Ctx: Info.Ctx, LVal, Result&: APEndOffset))
15828 return convertUnsignedAPIntToCharUnits(Int: APEndOffset, Result&: EndOffset);
15829
15830 if (LVal.InvalidBase)
15831 return false;
15832
15833 QualType BaseTy = getObjectType(B: LVal.getLValueBase());
15834 const bool Ret = CheckedHandleSizeof(BaseTy, EndOffset);
15835 addFlexibleArrayMemberInitSize(Info, T: BaseTy, LV: LVal, Size&: EndOffset);
15836 return Ret;
15837 }
15838
15839 // We want to evaluate the size of a subobject.
15840 const SubobjectDesignator &Designator = LVal.Designator;
15841
15842 // The following is a moderately common idiom in C:
15843 //
15844 // struct Foo { int a; char c[1]; };
15845 // struct Foo *F = (struct Foo *)malloc(sizeof(struct Foo) + strlen(Bar));
15846 // strcpy(&F->c[0], Bar);
15847 //
15848 // In order to not break too much legacy code, we need to support it.
15849 if (isUserWritingOffTheEnd(Ctx: Info.Ctx, LVal)) {
15850 // If we can resolve this to an alloc_size call, we can hand that back,
15851 // because we know for certain how many bytes there are to write to.
15852 llvm::APInt APEndOffset;
15853 if (isBaseAnAllocSizeCall(Base: LVal.getLValueBase()) &&
15854 getBytesReturnedByAllocSizeCall(Ctx: Info.Ctx, LVal, Result&: APEndOffset))
15855 return convertUnsignedAPIntToCharUnits(Int: APEndOffset, Result&: EndOffset);
15856
15857 // If we cannot determine the size of the initial allocation, then we can't
15858 // given an accurate upper-bound. However, we are still able to give
15859 // conservative lower-bounds for Type=3.
15860 if (Type == 1)
15861 return false;
15862 }
15863
15864 CharUnits BytesPerElem;
15865 if (!CheckedHandleSizeof(Designator.MostDerivedType, BytesPerElem))
15866 return false;
15867
15868 // According to the GCC documentation, we want the size of the subobject
15869 // denoted by the pointer. But that's not quite right -- what we actually
15870 // want is the size of the immediately-enclosing array, if there is one.
15871 int64_t ElemsRemaining;
15872 if (Designator.MostDerivedIsArrayElement &&
15873 Designator.Entries.size() == Designator.MostDerivedPathLength) {
15874 uint64_t ArraySize = Designator.getMostDerivedArraySize();
15875 uint64_t ArrayIndex = Designator.Entries.back().getAsArrayIndex();
15876 ElemsRemaining = ArraySize <= ArrayIndex ? 0 : ArraySize - ArrayIndex;
15877 } else {
15878 ElemsRemaining = Designator.isOnePastTheEnd() ? 0 : 1;
15879 }
15880
15881 EndOffset = LVal.getLValueOffset() + BytesPerElem * ElemsRemaining;
15882 return true;
15883}
15884
15885/// Tries to evaluate the __builtin_object_size for @p E. If successful,
15886/// returns true and stores the result in @p Size.
15887///
15888/// If @p WasError is non-null, this will report whether the failure to evaluate
15889/// is to be treated as an Error in IntExprEvaluator.
15890static std::optional<uint64_t>
15891tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type, EvalInfo &Info) {
15892 // Determine the denoted object.
15893 LValue LVal;
15894 {
15895 // The operand of __builtin_object_size is never evaluated for side-effects.
15896 // If there are any, but we can determine the pointed-to object anyway, then
15897 // ignore the side-effects.
15898 SpeculativeEvaluationRAII SpeculativeEval(Info);
15899 IgnoreSideEffectsRAII Fold(Info);
15900
15901 if (E->isGLValue()) {
15902 // It's possible for us to be given GLValues if we're called via
15903 // Expr::tryEvaluateObjectSize.
15904 APValue RVal;
15905 if (!EvaluateAsRValue(Info, E, Result&: RVal))
15906 return std::nullopt;
15907 LVal.setFrom(Ctx: Info.Ctx, V: RVal);
15908 } else if (!EvaluatePointer(E: ignorePointerCastsAndParens(E), Result&: LVal, Info,
15909 /*InvalidBaseOK=*/true))
15910 return std::nullopt;
15911 }
15912
15913 // If we point to before the start of the object, there are no accessible
15914 // bytes.
15915 if (LVal.getLValueOffset().isNegative())
15916 return 0;
15917
15918 CharUnits EndOffset;
15919 if (!determineEndOffset(Info, ExprLoc: E->getExprLoc(), Type, LVal, EndOffset))
15920 return std::nullopt;
15921
15922 // If we've fallen outside of the end offset, just pretend there's nothing to
15923 // write to/read from.
15924 if (EndOffset <= LVal.getLValueOffset())
15925 return 0;
15926 return (EndOffset - LVal.getLValueOffset()).getQuantity();
15927}
15928
15929bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
15930 if (!IsConstantEvaluatedBuiltinCall(E))
15931 return ExprEvaluatorBaseTy::VisitCallExpr(E);
15932 return VisitBuiltinCallExpr(E, BuiltinOp: E->getBuiltinCallee());
15933}
15934
15935static bool getBuiltinAlignArguments(const CallExpr *E, EvalInfo &Info,
15936 APValue &Val, APSInt &Alignment) {
15937 QualType SrcTy = E->getArg(Arg: 0)->getType();
15938 if (!getAlignmentArgument(E: E->getArg(Arg: 1), ForType: SrcTy, Info, Alignment))
15939 return false;
15940 // Even though we are evaluating integer expressions we could get a pointer
15941 // argument for the __builtin_is_aligned() case.
15942 if (SrcTy->isPointerType()) {
15943 LValue Ptr;
15944 if (!EvaluatePointer(E: E->getArg(Arg: 0), Result&: Ptr, Info))
15945 return false;
15946 Ptr.moveInto(V&: Val);
15947 } else if (!SrcTy->isIntegralOrEnumerationType()) {
15948 Info.FFDiag(E: E->getArg(Arg: 0));
15949 return false;
15950 } else {
15951 APSInt SrcInt;
15952 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: SrcInt, Info))
15953 return false;
15954 assert(SrcInt.getBitWidth() >= Alignment.getBitWidth() &&
15955 "Bit widths must be the same");
15956 Val = APValue(SrcInt);
15957 }
15958 assert(Val.hasValue());
15959 return true;
15960}
15961
15962bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
15963 unsigned BuiltinOp) {
15964 auto EvalTestOp = [&](llvm::function_ref<bool(const APInt &, const APInt &)>
15965 Fn) {
15966 APValue SourceLHS, SourceRHS;
15967 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) ||
15968 !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS))
15969 return false;
15970
15971 unsigned SourceLen = SourceLHS.getVectorLength();
15972 const VectorType *VT = E->getArg(Arg: 0)->getType()->castAs<VectorType>();
15973 QualType ElemQT = VT->getElementType();
15974 unsigned LaneWidth = Info.Ctx.getTypeSize(T: ElemQT);
15975
15976 APInt AWide(LaneWidth * SourceLen, 0);
15977 APInt BWide(LaneWidth * SourceLen, 0);
15978
15979 for (unsigned I = 0; I != SourceLen; ++I) {
15980 APInt ALane;
15981 APInt BLane;
15982 if (ElemQT->isIntegerType()) { // Get value.
15983 ALane = SourceLHS.getVectorElt(I).getInt();
15984 BLane = SourceRHS.getVectorElt(I).getInt();
15985 } else if (ElemQT->isFloatingType()) { // Get only sign bit.
15986 ALane =
15987 SourceLHS.getVectorElt(I).getFloat().bitcastToAPInt().isNegative();
15988 BLane =
15989 SourceRHS.getVectorElt(I).getFloat().bitcastToAPInt().isNegative();
15990 } else { // Must be integer or floating type.
15991 return false;
15992 }
15993 AWide.insertBits(SubBits: ALane, bitPosition: I * LaneWidth);
15994 BWide.insertBits(SubBits: BLane, bitPosition: I * LaneWidth);
15995 }
15996 return Success(Value: Fn(AWide, BWide), E);
15997 };
15998
15999 auto HandleMaskBinOp =
16000 [&](llvm::function_ref<APSInt(const APSInt &, const APSInt &)> Fn)
16001 -> bool {
16002 APValue LHS, RHS;
16003 if (!Evaluate(Result&: LHS, Info, E: E->getArg(Arg: 0)) ||
16004 !Evaluate(Result&: RHS, Info, E: E->getArg(Arg: 1)))
16005 return false;
16006
16007 APSInt ResultInt = Fn(LHS.getInt(), RHS.getInt());
16008
16009 return Success(V: APValue(ResultInt), E);
16010 };
16011
16012 auto HandleCRC32 = [&](unsigned DataBytes) -> bool {
16013 APSInt CRC, Data;
16014 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: CRC, Info) ||
16015 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Data, Info))
16016 return false;
16017
16018 uint64_t CRCVal = CRC.getZExtValue();
16019 uint64_t DataVal = Data.getZExtValue();
16020
16021 // CRC32C polynomial (iSCSI polynomial, bit-reversed)
16022 static const uint32_t CRC32C_POLY = 0x82F63B78;
16023
16024 // Process each byte
16025 uint32_t Result = static_cast<uint32_t>(CRCVal);
16026 for (unsigned I = 0; I != DataBytes; ++I) {
16027 uint8_t Byte = static_cast<uint8_t>((DataVal >> (I * 8)) & 0xFF);
16028 Result ^= Byte;
16029 for (int J = 0; J != 8; ++J) {
16030 Result = (Result >> 1) ^ ((Result & 1) ? CRC32C_POLY : 0);
16031 }
16032 }
16033
16034 return Success(Value: Result, E);
16035 };
16036
16037 switch (BuiltinOp) {
16038 default:
16039 return false;
16040
16041 case X86::BI__builtin_ia32_crc32qi:
16042 return HandleCRC32(1);
16043 case X86::BI__builtin_ia32_crc32hi:
16044 return HandleCRC32(2);
16045 case X86::BI__builtin_ia32_crc32si:
16046 return HandleCRC32(4);
16047 case X86::BI__builtin_ia32_crc32di:
16048 return HandleCRC32(8);
16049
16050 case Builtin::BI__builtin_dynamic_object_size:
16051 case Builtin::BI__builtin_object_size: {
16052 // The type was checked when we built the expression.
16053 unsigned Type =
16054 E->getArg(Arg: 1)->EvaluateKnownConstInt(Ctx: Info.Ctx).getZExtValue();
16055 assert(Type <= 3 && "unexpected type");
16056
16057 if (std::optional<uint64_t> Size =
16058 tryEvaluateBuiltinObjectSize(E: E->getArg(Arg: 0), Type, Info))
16059 return Success(Value: *Size, E);
16060
16061 if (E->getArg(Arg: 0)->HasSideEffects(Ctx: Info.Ctx))
16062 return Success(Value: (Type & 2) ? 0 : -1, E);
16063
16064 // Expression had no side effects, but we couldn't statically determine the
16065 // size of the referenced object.
16066 switch (Info.EvalMode) {
16067 case EvaluationMode::ConstantExpression:
16068 case EvaluationMode::ConstantFold:
16069 case EvaluationMode::IgnoreSideEffects:
16070 // Leave it to IR generation.
16071 return Error(E);
16072 case EvaluationMode::ConstantExpressionUnevaluated:
16073 // Reduce it to a constant now.
16074 return Success(Value: (Type & 2) ? 0 : -1, E);
16075 }
16076
16077 llvm_unreachable("unexpected EvalMode");
16078 }
16079
16080 case Builtin::BI__builtin_os_log_format_buffer_size: {
16081 analyze_os_log::OSLogBufferLayout Layout;
16082 analyze_os_log::computeOSLogBufferLayout(Ctx&: Info.Ctx, E, layout&: Layout);
16083 return Success(Value: Layout.size().getQuantity(), E);
16084 }
16085
16086 case Builtin::BI__builtin_is_aligned: {
16087 APValue Src;
16088 APSInt Alignment;
16089 if (!getBuiltinAlignArguments(E, Info, Val&: Src, Alignment))
16090 return false;
16091 if (Src.isLValue()) {
16092 // If we evaluated a pointer, check the minimum known alignment.
16093 LValue Ptr;
16094 Ptr.setFrom(Ctx: Info.Ctx, V: Src);
16095 CharUnits BaseAlignment = getBaseAlignment(Info, Value: Ptr);
16096 CharUnits PtrAlign = BaseAlignment.alignmentAtOffset(offset: Ptr.Offset);
16097 // We can return true if the known alignment at the computed offset is
16098 // greater than the requested alignment.
16099 assert(PtrAlign.isPowerOfTwo());
16100 assert(Alignment.isPowerOf2());
16101 if (PtrAlign.getQuantity() >= Alignment)
16102 return Success(Value: 1, E);
16103 // If the alignment is not known to be sufficient, some cases could still
16104 // be aligned at run time. However, if the requested alignment is less or
16105 // equal to the base alignment and the offset is not aligned, we know that
16106 // the run-time value can never be aligned.
16107 if (BaseAlignment.getQuantity() >= Alignment &&
16108 PtrAlign.getQuantity() < Alignment)
16109 return Success(Value: 0, E);
16110 // Otherwise we can't infer whether the value is sufficiently aligned.
16111 // TODO: __builtin_is_aligned(__builtin_align_{down,up{(expr, N), N)
16112 // in cases where we can't fully evaluate the pointer.
16113 Info.FFDiag(E: E->getArg(Arg: 0), DiagId: diag::note_constexpr_alignment_compute)
16114 << Alignment;
16115 return false;
16116 }
16117 assert(Src.isInt());
16118 return Success(Value: (Src.getInt() & (Alignment - 1)) == 0 ? 1 : 0, E);
16119 }
16120 case Builtin::BI__builtin_align_up: {
16121 APValue Src;
16122 APSInt Alignment;
16123 if (!getBuiltinAlignArguments(E, Info, Val&: Src, Alignment))
16124 return false;
16125 if (!Src.isInt())
16126 return Error(E);
16127 APSInt AlignedVal =
16128 APSInt((Src.getInt() + (Alignment - 1)) & ~(Alignment - 1),
16129 Src.getInt().isUnsigned());
16130 assert(AlignedVal.getBitWidth() == Src.getInt().getBitWidth());
16131 return Success(SI: AlignedVal, E);
16132 }
16133 case Builtin::BI__builtin_align_down: {
16134 APValue Src;
16135 APSInt Alignment;
16136 if (!getBuiltinAlignArguments(E, Info, Val&: Src, Alignment))
16137 return false;
16138 if (!Src.isInt())
16139 return Error(E);
16140 APSInt AlignedVal =
16141 APSInt(Src.getInt() & ~(Alignment - 1), Src.getInt().isUnsigned());
16142 assert(AlignedVal.getBitWidth() == Src.getInt().getBitWidth());
16143 return Success(SI: AlignedVal, E);
16144 }
16145
16146 case Builtin::BI__builtin_bitreverseg:
16147 case Builtin::BI__builtin_bitreverse8:
16148 case Builtin::BI__builtin_bitreverse16:
16149 case Builtin::BI__builtin_bitreverse32:
16150 case Builtin::BI__builtin_bitreverse64:
16151 case Builtin::BI__builtin_elementwise_bitreverse: {
16152 APSInt Val;
16153 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
16154 return false;
16155
16156 return Success(I: Val.reverseBits(), E);
16157 }
16158 case Builtin::BI__builtin_bswapg:
16159 case Builtin::BI__builtin_bswap16:
16160 case Builtin::BI__builtin_bswap32:
16161 case Builtin::BI__builtin_bswap64: {
16162 APSInt Val;
16163 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
16164 return false;
16165 if (Val.getBitWidth() == 8 || Val.getBitWidth() == 1)
16166 return Success(SI: Val, E);
16167
16168 return Success(I: Val.byteSwap(), E);
16169 }
16170
16171 case Builtin::BI__builtin_classify_type:
16172 return Success(Value: (int)EvaluateBuiltinClassifyType(E, LangOpts: Info.getLangOpts()), E);
16173
16174 case Builtin::BI__builtin_clrsb:
16175 case Builtin::BI__builtin_clrsbl:
16176 case Builtin::BI__builtin_clrsbll: {
16177 APSInt Val;
16178 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
16179 return false;
16180
16181 return Success(Value: Val.getBitWidth() - Val.getSignificantBits(), E);
16182 }
16183
16184 case Builtin::BI__builtin_clz:
16185 case Builtin::BI__builtin_clzl:
16186 case Builtin::BI__builtin_clzll:
16187 case Builtin::BI__builtin_clzs:
16188 case Builtin::BI__builtin_clzg:
16189 case Builtin::BI__builtin_elementwise_clzg:
16190 case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
16191 case Builtin::BI__lzcnt:
16192 case Builtin::BI__lzcnt64: {
16193 APSInt Val;
16194 if (E->getArg(Arg: 0)->getType()->isExtVectorBoolType()) {
16195 APValue Vec;
16196 if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Vec, Info))
16197 return false;
16198 Val = ConvertBoolVectorToInt(Val: Vec);
16199 } else if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info)) {
16200 return false;
16201 }
16202
16203 std::optional<APSInt> Fallback;
16204 if ((BuiltinOp == Builtin::BI__builtin_clzg ||
16205 BuiltinOp == Builtin::BI__builtin_elementwise_clzg) &&
16206 E->getNumArgs() > 1) {
16207 APSInt FallbackTemp;
16208 if (!EvaluateInteger(E: E->getArg(Arg: 1), Result&: FallbackTemp, Info))
16209 return false;
16210 Fallback = FallbackTemp;
16211 }
16212
16213 if (!Val) {
16214 if (Fallback)
16215 return Success(SI: *Fallback, E);
16216
16217 // When the argument is 0, the result of GCC builtins is undefined,
16218 // whereas for Microsoft intrinsics, the result is the bit-width of the
16219 // argument.
16220 bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
16221 BuiltinOp != Builtin::BI__lzcnt &&
16222 BuiltinOp != Builtin::BI__lzcnt64;
16223
16224 if (BuiltinOp == Builtin::BI__builtin_elementwise_clzg) {
16225 Info.FFDiag(E, DiagId: diag::note_constexpr_countzeroes_zero)
16226 << /*IsTrailing=*/false;
16227 }
16228
16229 if (ZeroIsUndefined)
16230 return Error(E);
16231 }
16232
16233 return Success(Value: Val.countl_zero(), E);
16234 }
16235
16236 case Builtin::BI__builtin_constant_p: {
16237 const Expr *Arg = E->getArg(Arg: 0);
16238 if (EvaluateBuiltinConstantP(Info, Arg))
16239 return Success(Value: true, E);
16240 if (Info.InConstantContext || Arg->HasSideEffects(Ctx: Info.Ctx)) {
16241 // Outside a constant context, eagerly evaluate to false in the presence
16242 // of side-effects in order to avoid -Wunsequenced false-positives in
16243 // a branch on __builtin_constant_p(expr).
16244 return Success(Value: false, E);
16245 }
16246 Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
16247 return false;
16248 }
16249
16250 case Builtin::BI__noop:
16251 // __noop always evaluates successfully and returns 0.
16252 return Success(Value: 0, E);
16253
16254 case Builtin::BI__builtin_is_constant_evaluated: {
16255 const auto *Callee = Info.CurrentCall->getCallee();
16256 if (Info.InConstantContext && !Info.CheckingPotentialConstantExpression &&
16257 (Info.CallStackDepth == 1 ||
16258 (Info.CallStackDepth == 2 && Callee->isInStdNamespace() &&
16259 Callee->getIdentifier() &&
16260 Callee->getIdentifier()->isStr(Str: "is_constant_evaluated")))) {
16261 // FIXME: Find a better way to avoid duplicated diagnostics.
16262 if (Info.EvalStatus.Diag)
16263 Info.report(Loc: (Info.CallStackDepth == 1)
16264 ? E->getExprLoc()
16265 : Info.CurrentCall->getCallRange().getBegin(),
16266 DiagId: diag::warn_is_constant_evaluated_always_true_constexpr)
16267 << (Info.CallStackDepth == 1 ? "__builtin_is_constant_evaluated"
16268 : "std::is_constant_evaluated");
16269 }
16270
16271 return Success(Value: Info.InConstantContext, E);
16272 }
16273
16274 case Builtin::BI__builtin_is_within_lifetime:
16275 if (auto result = EvaluateBuiltinIsWithinLifetime(*this, E))
16276 return Success(Value: *result, E);
16277 return false;
16278
16279 case Builtin::BI__builtin_ctz:
16280 case Builtin::BI__builtin_ctzl:
16281 case Builtin::BI__builtin_ctzll:
16282 case Builtin::BI__builtin_ctzs:
16283 case Builtin::BI__builtin_ctzg:
16284 case Builtin::BI__builtin_elementwise_ctzg: {
16285 APSInt Val;
16286 if (E->getArg(Arg: 0)->getType()->isExtVectorBoolType()) {
16287 APValue Vec;
16288 if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Vec, Info))
16289 return false;
16290 Val = ConvertBoolVectorToInt(Val: Vec);
16291 } else if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info)) {
16292 return false;
16293 }
16294
16295 std::optional<APSInt> Fallback;
16296 if ((BuiltinOp == Builtin::BI__builtin_ctzg ||
16297 BuiltinOp == Builtin::BI__builtin_elementwise_ctzg) &&
16298 E->getNumArgs() > 1) {
16299 APSInt FallbackTemp;
16300 if (!EvaluateInteger(E: E->getArg(Arg: 1), Result&: FallbackTemp, Info))
16301 return false;
16302 Fallback = FallbackTemp;
16303 }
16304
16305 if (!Val) {
16306 if (Fallback)
16307 return Success(SI: *Fallback, E);
16308
16309 if (BuiltinOp == Builtin::BI__builtin_elementwise_ctzg) {
16310 Info.FFDiag(E, DiagId: diag::note_constexpr_countzeroes_zero)
16311 << /*IsTrailing=*/true;
16312 }
16313 return Error(E);
16314 }
16315
16316 return Success(Value: Val.countr_zero(), E);
16317 }
16318
16319 case Builtin::BI__builtin_eh_return_data_regno: {
16320 int Operand = E->getArg(Arg: 0)->EvaluateKnownConstInt(Ctx: Info.Ctx).getZExtValue();
16321 Operand = Info.Ctx.getTargetInfo().getEHDataRegisterNumber(RegNo: Operand);
16322 return Success(Value: Operand, E);
16323 }
16324
16325 case Builtin::BI__builtin_elementwise_abs: {
16326 APSInt Val;
16327 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
16328 return false;
16329
16330 return Success(I: Val.abs(), E);
16331 }
16332
16333 case Builtin::BI__builtin_expect:
16334 case Builtin::BI__builtin_expect_with_probability:
16335 return Visit(S: E->getArg(Arg: 0));
16336
16337 case Builtin::BI__builtin_ptrauth_string_discriminator: {
16338 const auto *Literal =
16339 cast<StringLiteral>(Val: E->getArg(Arg: 0)->IgnoreParenImpCasts());
16340 uint64_t Result = getPointerAuthStableSipHash(S: Literal->getString());
16341 return Success(Value: Result, E);
16342 }
16343
16344 case Builtin::BI__builtin_infer_alloc_token: {
16345 // If we fail to infer a type, this fails to be a constant expression; this
16346 // can be checked with __builtin_constant_p(...).
16347 QualType AllocType = infer_alloc::inferPossibleType(E, Ctx: Info.Ctx, CastE: nullptr);
16348 if (AllocType.isNull())
16349 return Error(
16350 E, D: diag::note_constexpr_infer_alloc_token_type_inference_failed);
16351 auto ATMD = infer_alloc::getAllocTokenMetadata(T: AllocType, Ctx: Info.Ctx);
16352 if (!ATMD)
16353 return Error(E, D: diag::note_constexpr_infer_alloc_token_no_metadata);
16354 auto Mode =
16355 Info.getLangOpts().AllocTokenMode.value_or(u: llvm::DefaultAllocTokenMode);
16356 uint64_t BitWidth = Info.Ctx.getTypeSize(T: Info.Ctx.getSizeType());
16357 auto MaxTokensOpt = Info.getLangOpts().AllocTokenMax;
16358 uint64_t MaxTokens =
16359 MaxTokensOpt.value_or(u: 0) ? *MaxTokensOpt : (~0ULL >> (64 - BitWidth));
16360 auto MaybeToken = llvm::getAllocToken(Mode, Metadata: *ATMD, MaxTokens);
16361 if (!MaybeToken)
16362 return Error(E, D: diag::note_constexpr_infer_alloc_token_stateful_mode);
16363 return Success(I: llvm::APInt(BitWidth, *MaybeToken), E);
16364 }
16365
16366 case Builtin::BI__builtin_ffs:
16367 case Builtin::BI__builtin_ffsl:
16368 case Builtin::BI__builtin_ffsll: {
16369 APSInt Val;
16370 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
16371 return false;
16372
16373 unsigned N = Val.countr_zero();
16374 return Success(Value: N == Val.getBitWidth() ? 0 : N + 1, E);
16375 }
16376
16377 case Builtin::BI__builtin_fpclassify: {
16378 APFloat Val(0.0);
16379 if (!EvaluateFloat(E: E->getArg(Arg: 5), Result&: Val, Info))
16380 return false;
16381 unsigned Arg;
16382 switch (Val.getCategory()) {
16383 case APFloat::fcNaN: Arg = 0; break;
16384 case APFloat::fcInfinity: Arg = 1; break;
16385 case APFloat::fcNormal: Arg = Val.isDenormal() ? 3 : 2; break;
16386 case APFloat::fcZero: Arg = 4; break;
16387 }
16388 return Visit(S: E->getArg(Arg));
16389 }
16390
16391 case Builtin::BI__builtin_isinf_sign: {
16392 APFloat Val(0.0);
16393 return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) &&
16394 Success(Value: Val.isInfinity() ? (Val.isNegative() ? -1 : 1) : 0, E);
16395 }
16396
16397 case Builtin::BI__builtin_isinf: {
16398 APFloat Val(0.0);
16399 return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) &&
16400 Success(Value: Val.isInfinity() ? 1 : 0, E);
16401 }
16402
16403 case Builtin::BI__builtin_isfinite: {
16404 APFloat Val(0.0);
16405 return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) &&
16406 Success(Value: Val.isFinite() ? 1 : 0, E);
16407 }
16408
16409 case Builtin::BI__builtin_isnan: {
16410 APFloat Val(0.0);
16411 return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) &&
16412 Success(Value: Val.isNaN() ? 1 : 0, E);
16413 }
16414
16415 case Builtin::BI__builtin_isnormal: {
16416 APFloat Val(0.0);
16417 return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) &&
16418 Success(Value: Val.isNormal() ? 1 : 0, E);
16419 }
16420
16421 case Builtin::BI__builtin_issubnormal: {
16422 APFloat Val(0.0);
16423 return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) &&
16424 Success(Value: Val.isDenormal() ? 1 : 0, E);
16425 }
16426
16427 case Builtin::BI__builtin_iszero: {
16428 APFloat Val(0.0);
16429 return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) &&
16430 Success(Value: Val.isZero() ? 1 : 0, E);
16431 }
16432
16433 case Builtin::BI__builtin_signbit:
16434 case Builtin::BI__builtin_signbitf:
16435 case Builtin::BI__builtin_signbitl: {
16436 APFloat Val(0.0);
16437 return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) &&
16438 Success(Value: Val.isNegative() ? 1 : 0, E);
16439 }
16440
16441 case Builtin::BI__builtin_isgreater:
16442 case Builtin::BI__builtin_isgreaterequal:
16443 case Builtin::BI__builtin_isless:
16444 case Builtin::BI__builtin_islessequal:
16445 case Builtin::BI__builtin_islessgreater:
16446 case Builtin::BI__builtin_isunordered: {
16447 APFloat LHS(0.0);
16448 APFloat RHS(0.0);
16449 if (!EvaluateFloat(E: E->getArg(Arg: 0), Result&: LHS, Info) ||
16450 !EvaluateFloat(E: E->getArg(Arg: 1), Result&: RHS, Info))
16451 return false;
16452
16453 return Success(
16454 Value: [&] {
16455 switch (BuiltinOp) {
16456 case Builtin::BI__builtin_isgreater:
16457 return LHS > RHS;
16458 case Builtin::BI__builtin_isgreaterequal:
16459 return LHS >= RHS;
16460 case Builtin::BI__builtin_isless:
16461 return LHS < RHS;
16462 case Builtin::BI__builtin_islessequal:
16463 return LHS <= RHS;
16464 case Builtin::BI__builtin_islessgreater: {
16465 APFloat::cmpResult cmp = LHS.compare(RHS);
16466 return cmp == APFloat::cmpResult::cmpLessThan ||
16467 cmp == APFloat::cmpResult::cmpGreaterThan;
16468 }
16469 case Builtin::BI__builtin_isunordered:
16470 return LHS.compare(RHS) == APFloat::cmpResult::cmpUnordered;
16471 default:
16472 llvm_unreachable("Unexpected builtin ID: Should be a floating "
16473 "point comparison function");
16474 }
16475 }()
16476 ? 1
16477 : 0,
16478 E);
16479 }
16480
16481 case Builtin::BI__builtin_issignaling: {
16482 APFloat Val(0.0);
16483 return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) &&
16484 Success(Value: Val.isSignaling() ? 1 : 0, E);
16485 }
16486
16487 case Builtin::BI__builtin_isfpclass: {
16488 APSInt MaskVal;
16489 if (!EvaluateInteger(E: E->getArg(Arg: 1), Result&: MaskVal, Info))
16490 return false;
16491 unsigned Test = static_cast<llvm::FPClassTest>(MaskVal.getZExtValue());
16492 APFloat Val(0.0);
16493 return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) &&
16494 Success(Value: (Val.classify() & Test) ? 1 : 0, E);
16495 }
16496
16497 case Builtin::BI__builtin_parity:
16498 case Builtin::BI__builtin_parityl:
16499 case Builtin::BI__builtin_parityll: {
16500 APSInt Val;
16501 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
16502 return false;
16503
16504 return Success(Value: Val.popcount() % 2, E);
16505 }
16506
16507 case Builtin::BI__builtin_abs:
16508 case Builtin::BI__builtin_labs:
16509 case Builtin::BI__builtin_llabs: {
16510 APSInt Val;
16511 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
16512 return false;
16513 if (Val == APSInt(APInt::getSignedMinValue(numBits: Val.getBitWidth()),
16514 /*IsUnsigned=*/false))
16515 return false;
16516 if (Val.isNegative())
16517 Val.negate();
16518 return Success(SI: Val, E);
16519 }
16520
16521 case Builtin::BI__builtin_popcount:
16522 case Builtin::BI__builtin_popcountl:
16523 case Builtin::BI__builtin_popcountll:
16524 case Builtin::BI__builtin_popcountg:
16525 case Builtin::BI__builtin_elementwise_popcount:
16526 case Builtin::BI__popcnt16: // Microsoft variants of popcount
16527 case Builtin::BI__popcnt:
16528 case Builtin::BI__popcnt64: {
16529 APSInt Val;
16530 if (E->getArg(Arg: 0)->getType()->isExtVectorBoolType()) {
16531 APValue Vec;
16532 if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Vec, Info))
16533 return false;
16534 Val = ConvertBoolVectorToInt(Val: Vec);
16535 } else if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info)) {
16536 return false;
16537 }
16538
16539 return Success(Value: Val.popcount(), E);
16540 }
16541
16542 case Builtin::BI__builtin_rotateleft8:
16543 case Builtin::BI__builtin_rotateleft16:
16544 case Builtin::BI__builtin_rotateleft32:
16545 case Builtin::BI__builtin_rotateleft64:
16546 case Builtin::BI__builtin_rotateright8:
16547 case Builtin::BI__builtin_rotateright16:
16548 case Builtin::BI__builtin_rotateright32:
16549 case Builtin::BI__builtin_rotateright64:
16550 case Builtin::BI__builtin_stdc_rotate_left:
16551 case Builtin::BI__builtin_stdc_rotate_right:
16552 case Builtin::BI_rotl8: // Microsoft variants of rotate left
16553 case Builtin::BI_rotl16:
16554 case Builtin::BI_rotl:
16555 case Builtin::BI_lrotl:
16556 case Builtin::BI_rotl64:
16557 case Builtin::BI_rotr8: // Microsoft variants of rotate right
16558 case Builtin::BI_rotr16:
16559 case Builtin::BI_rotr:
16560 case Builtin::BI_lrotr:
16561 case Builtin::BI_rotr64: {
16562 APSInt Value, Amount;
16563 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Value, Info) ||
16564 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Amount, Info))
16565 return false;
16566
16567 Amount = NormalizeRotateAmount(Value, Amount);
16568
16569 switch (BuiltinOp) {
16570 case Builtin::BI__builtin_rotateright8:
16571 case Builtin::BI__builtin_rotateright16:
16572 case Builtin::BI__builtin_rotateright32:
16573 case Builtin::BI__builtin_rotateright64:
16574 case Builtin::BI__builtin_stdc_rotate_right:
16575 case Builtin::BI_rotr8:
16576 case Builtin::BI_rotr16:
16577 case Builtin::BI_rotr:
16578 case Builtin::BI_lrotr:
16579 case Builtin::BI_rotr64:
16580 return Success(
16581 SI: APSInt(Value.rotr(rotateAmt: Amount.getZExtValue()), Value.isUnsigned()), E);
16582 default:
16583 return Success(
16584 SI: APSInt(Value.rotl(rotateAmt: Amount.getZExtValue()), Value.isUnsigned()), E);
16585 }
16586 }
16587
16588 case Builtin::BI__builtin_elementwise_add_sat: {
16589 APSInt LHS, RHS;
16590 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: LHS, Info) ||
16591 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: RHS, Info))
16592 return false;
16593
16594 APInt Result = LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS);
16595 return Success(SI: APSInt(Result, !LHS.isSigned()), E);
16596 }
16597 case Builtin::BI__builtin_elementwise_sub_sat: {
16598 APSInt LHS, RHS;
16599 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: LHS, Info) ||
16600 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: RHS, Info))
16601 return false;
16602
16603 APInt Result = LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
16604 return Success(SI: APSInt(Result, !LHS.isSigned()), E);
16605 }
16606 case Builtin::BI__builtin_elementwise_max: {
16607 APSInt LHS, RHS;
16608 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: LHS, Info) ||
16609 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: RHS, Info))
16610 return false;
16611
16612 APInt Result = std::max(a: LHS, b: RHS);
16613 return Success(SI: APSInt(Result, !LHS.isSigned()), E);
16614 }
16615 case Builtin::BI__builtin_elementwise_min: {
16616 APSInt LHS, RHS;
16617 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: LHS, Info) ||
16618 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: RHS, Info))
16619 return false;
16620
16621 APInt Result = std::min(a: LHS, b: RHS);
16622 return Success(SI: APSInt(Result, !LHS.isSigned()), E);
16623 }
16624 case Builtin::BI__builtin_elementwise_fshl:
16625 case Builtin::BI__builtin_elementwise_fshr: {
16626 APSInt Hi, Lo, Shift;
16627 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Hi, Info) ||
16628 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Lo, Info) ||
16629 !EvaluateInteger(E: E->getArg(Arg: 2), Result&: Shift, Info))
16630 return false;
16631
16632 switch (BuiltinOp) {
16633 case Builtin::BI__builtin_elementwise_fshl: {
16634 APSInt Result(llvm::APIntOps::fshl(Hi, Lo, Shift), Hi.isUnsigned());
16635 return Success(SI: Result, E);
16636 }
16637 case Builtin::BI__builtin_elementwise_fshr: {
16638 APSInt Result(llvm::APIntOps::fshr(Hi, Lo, Shift), Hi.isUnsigned());
16639 return Success(SI: Result, E);
16640 }
16641 }
16642 llvm_unreachable("Fully covered switch above");
16643 }
16644 case Builtin::BIstrlen:
16645 case Builtin::BIwcslen:
16646 // A call to strlen is not a constant expression.
16647 if (Info.getLangOpts().CPlusPlus11)
16648 Info.CCEDiag(E, DiagId: diag::note_constexpr_invalid_function)
16649 << /*isConstexpr*/ 0 << /*isConstructor*/ 0
16650 << Info.Ctx.BuiltinInfo.getQuotedName(ID: BuiltinOp);
16651 else
16652 Info.CCEDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
16653 [[fallthrough]];
16654 case Builtin::BI__builtin_strlen:
16655 case Builtin::BI__builtin_wcslen: {
16656 // As an extension, we support __builtin_strlen() as a constant expression,
16657 // and support folding strlen() to a constant.
16658 if (std::optional<uint64_t> StrLen =
16659 EvaluateBuiltinStrLen(E: E->getArg(Arg: 0), Info))
16660 return Success(Value: *StrLen, E);
16661 return false;
16662 }
16663
16664 case Builtin::BIstrcmp:
16665 case Builtin::BIwcscmp:
16666 case Builtin::BIstrncmp:
16667 case Builtin::BIwcsncmp:
16668 case Builtin::BImemcmp:
16669 case Builtin::BIbcmp:
16670 case Builtin::BIwmemcmp:
16671 // A call to strlen is not a constant expression.
16672 if (Info.getLangOpts().CPlusPlus11)
16673 Info.CCEDiag(E, DiagId: diag::note_constexpr_invalid_function)
16674 << /*isConstexpr*/ 0 << /*isConstructor*/ 0
16675 << Info.Ctx.BuiltinInfo.getQuotedName(ID: BuiltinOp);
16676 else
16677 Info.CCEDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
16678 [[fallthrough]];
16679 case Builtin::BI__builtin_strcmp:
16680 case Builtin::BI__builtin_wcscmp:
16681 case Builtin::BI__builtin_strncmp:
16682 case Builtin::BI__builtin_wcsncmp:
16683 case Builtin::BI__builtin_memcmp:
16684 case Builtin::BI__builtin_bcmp:
16685 case Builtin::BI__builtin_wmemcmp: {
16686 LValue String1, String2;
16687 if (!EvaluatePointer(E: E->getArg(Arg: 0), Result&: String1, Info) ||
16688 !EvaluatePointer(E: E->getArg(Arg: 1), Result&: String2, Info))
16689 return false;
16690
16691 uint64_t MaxLength = uint64_t(-1);
16692 if (BuiltinOp != Builtin::BIstrcmp &&
16693 BuiltinOp != Builtin::BIwcscmp &&
16694 BuiltinOp != Builtin::BI__builtin_strcmp &&
16695 BuiltinOp != Builtin::BI__builtin_wcscmp) {
16696 APSInt N;
16697 if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: N, Info))
16698 return false;
16699 MaxLength = N.getZExtValue();
16700 }
16701
16702 // Empty substrings compare equal by definition.
16703 if (MaxLength == 0u)
16704 return Success(Value: 0, E);
16705
16706 if (!String1.checkNullPointerForFoldAccess(Info, E, AK: AK_Read) ||
16707 !String2.checkNullPointerForFoldAccess(Info, E, AK: AK_Read) ||
16708 String1.Designator.Invalid || String2.Designator.Invalid)
16709 return false;
16710
16711 QualType CharTy1 = String1.Designator.getType(Ctx&: Info.Ctx);
16712 QualType CharTy2 = String2.Designator.getType(Ctx&: Info.Ctx);
16713
16714 bool IsRawByte = BuiltinOp == Builtin::BImemcmp ||
16715 BuiltinOp == Builtin::BIbcmp ||
16716 BuiltinOp == Builtin::BI__builtin_memcmp ||
16717 BuiltinOp == Builtin::BI__builtin_bcmp;
16718
16719 assert(IsRawByte ||
16720 (Info.Ctx.hasSameUnqualifiedType(
16721 CharTy1, E->getArg(0)->getType()->getPointeeType()) &&
16722 Info.Ctx.hasSameUnqualifiedType(CharTy1, CharTy2)));
16723
16724 // For memcmp, allow comparing any arrays of '[[un]signed] char' or
16725 // 'char8_t', but no other types.
16726 if (IsRawByte &&
16727 !(isOneByteCharacterType(T: CharTy1) && isOneByteCharacterType(T: CharTy2))) {
16728 // FIXME: Consider using our bit_cast implementation to support this.
16729 Info.FFDiag(E, DiagId: diag::note_constexpr_memcmp_unsupported)
16730 << Info.Ctx.BuiltinInfo.getQuotedName(ID: BuiltinOp) << CharTy1
16731 << CharTy2;
16732 return false;
16733 }
16734
16735 const auto &ReadCurElems = [&](APValue &Char1, APValue &Char2) {
16736 return handleLValueToRValueConversion(Info, Conv: E, Type: CharTy1, LVal: String1, RVal&: Char1) &&
16737 handleLValueToRValueConversion(Info, Conv: E, Type: CharTy2, LVal: String2, RVal&: Char2) &&
16738 Char1.isInt() && Char2.isInt();
16739 };
16740 const auto &AdvanceElems = [&] {
16741 return HandleLValueArrayAdjustment(Info, E, LVal&: String1, EltTy: CharTy1, Adjustment: 1) &&
16742 HandleLValueArrayAdjustment(Info, E, LVal&: String2, EltTy: CharTy2, Adjustment: 1);
16743 };
16744
16745 bool StopAtNull =
16746 (BuiltinOp != Builtin::BImemcmp && BuiltinOp != Builtin::BIbcmp &&
16747 BuiltinOp != Builtin::BIwmemcmp &&
16748 BuiltinOp != Builtin::BI__builtin_memcmp &&
16749 BuiltinOp != Builtin::BI__builtin_bcmp &&
16750 BuiltinOp != Builtin::BI__builtin_wmemcmp);
16751 bool IsWide = BuiltinOp == Builtin::BIwcscmp ||
16752 BuiltinOp == Builtin::BIwcsncmp ||
16753 BuiltinOp == Builtin::BIwmemcmp ||
16754 BuiltinOp == Builtin::BI__builtin_wcscmp ||
16755 BuiltinOp == Builtin::BI__builtin_wcsncmp ||
16756 BuiltinOp == Builtin::BI__builtin_wmemcmp;
16757
16758 for (; MaxLength; --MaxLength) {
16759 APValue Char1, Char2;
16760 if (!ReadCurElems(Char1, Char2))
16761 return false;
16762 if (Char1.getInt().ne(RHS: Char2.getInt())) {
16763 if (IsWide) // wmemcmp compares with wchar_t signedness.
16764 return Success(Value: Char1.getInt() < Char2.getInt() ? -1 : 1, E);
16765 // memcmp always compares unsigned chars.
16766 return Success(Value: Char1.getInt().ult(RHS: Char2.getInt()) ? -1 : 1, E);
16767 }
16768 if (StopAtNull && !Char1.getInt())
16769 return Success(Value: 0, E);
16770 assert(!(StopAtNull && !Char2.getInt()));
16771 if (!AdvanceElems())
16772 return false;
16773 }
16774 // We hit the strncmp / memcmp limit.
16775 return Success(Value: 0, E);
16776 }
16777
16778 case Builtin::BI__atomic_always_lock_free:
16779 case Builtin::BI__atomic_is_lock_free:
16780 case Builtin::BI__c11_atomic_is_lock_free: {
16781 APSInt SizeVal;
16782 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: SizeVal, Info))
16783 return false;
16784
16785 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
16786 // of two less than or equal to the maximum inline atomic width, we know it
16787 // is lock-free. If the size isn't a power of two, or greater than the
16788 // maximum alignment where we promote atomics, we know it is not lock-free
16789 // (at least not in the sense of atomic_is_lock_free). Otherwise,
16790 // the answer can only be determined at runtime; for example, 16-byte
16791 // atomics have lock-free implementations on some, but not all,
16792 // x86-64 processors.
16793
16794 // Check power-of-two.
16795 CharUnits Size = CharUnits::fromQuantity(Quantity: SizeVal.getZExtValue());
16796 if (Size.isPowerOfTwo()) {
16797 // Check against inlining width.
16798 unsigned InlineWidthBits =
16799 Info.Ctx.getTargetInfo().getMaxAtomicInlineWidth();
16800 if (Size <= Info.Ctx.toCharUnitsFromBits(BitSize: InlineWidthBits)) {
16801 if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free ||
16802 Size == CharUnits::One())
16803 return Success(Value: 1, E);
16804
16805 // If the pointer argument can be evaluated to a compile-time constant
16806 // integer (or nullptr), check if that value is appropriately aligned.
16807 const Expr *PtrArg = E->getArg(Arg: 1);
16808 Expr::EvalResult ExprResult;
16809 APSInt IntResult;
16810 if (PtrArg->EvaluateAsRValue(Result&: ExprResult, Ctx: Info.Ctx) &&
16811 ExprResult.Val.toIntegralConstant(Result&: IntResult, SrcTy: PtrArg->getType(),
16812 Ctx: Info.Ctx) &&
16813 IntResult.isAligned(A: Size.getAsAlign()))
16814 return Success(Value: 1, E);
16815
16816 // Otherwise, check if the type's alignment against Size.
16817 if (auto *ICE = dyn_cast<ImplicitCastExpr>(Val: PtrArg)) {
16818 // Drop the potential implicit-cast to 'const volatile void*', getting
16819 // the underlying type.
16820 if (ICE->getCastKind() == CK_BitCast)
16821 PtrArg = ICE->getSubExpr();
16822 }
16823
16824 if (auto PtrTy = PtrArg->getType()->getAs<PointerType>()) {
16825 QualType PointeeType = PtrTy->getPointeeType();
16826 if (!PointeeType->isIncompleteType() &&
16827 Info.Ctx.getTypeAlignInChars(T: PointeeType) >= Size) {
16828 // OK, we will inline operations on this object.
16829 return Success(Value: 1, E);
16830 }
16831 }
16832 }
16833 }
16834
16835 return BuiltinOp == Builtin::BI__atomic_always_lock_free ?
16836 Success(Value: 0, E) : Error(E);
16837 }
16838 case Builtin::BI__builtin_addcb:
16839 case Builtin::BI__builtin_addcs:
16840 case Builtin::BI__builtin_addc:
16841 case Builtin::BI__builtin_addcl:
16842 case Builtin::BI__builtin_addcll:
16843 case Builtin::BI__builtin_subcb:
16844 case Builtin::BI__builtin_subcs:
16845 case Builtin::BI__builtin_subc:
16846 case Builtin::BI__builtin_subcl:
16847 case Builtin::BI__builtin_subcll: {
16848 LValue CarryOutLValue;
16849 APSInt LHS, RHS, CarryIn, CarryOut, Result;
16850 QualType ResultType = E->getArg(Arg: 0)->getType();
16851 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: LHS, Info) ||
16852 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: RHS, Info) ||
16853 !EvaluateInteger(E: E->getArg(Arg: 2), Result&: CarryIn, Info) ||
16854 !EvaluatePointer(E: E->getArg(Arg: 3), Result&: CarryOutLValue, Info))
16855 return false;
16856 // Copy the number of bits and sign.
16857 Result = LHS;
16858 CarryOut = LHS;
16859
16860 bool FirstOverflowed = false;
16861 bool SecondOverflowed = false;
16862 switch (BuiltinOp) {
16863 default:
16864 llvm_unreachable("Invalid value for BuiltinOp");
16865 case Builtin::BI__builtin_addcb:
16866 case Builtin::BI__builtin_addcs:
16867 case Builtin::BI__builtin_addc:
16868 case Builtin::BI__builtin_addcl:
16869 case Builtin::BI__builtin_addcll:
16870 Result =
16871 LHS.uadd_ov(RHS, Overflow&: FirstOverflowed).uadd_ov(RHS: CarryIn, Overflow&: SecondOverflowed);
16872 break;
16873 case Builtin::BI__builtin_subcb:
16874 case Builtin::BI__builtin_subcs:
16875 case Builtin::BI__builtin_subc:
16876 case Builtin::BI__builtin_subcl:
16877 case Builtin::BI__builtin_subcll:
16878 Result =
16879 LHS.usub_ov(RHS, Overflow&: FirstOverflowed).usub_ov(RHS: CarryIn, Overflow&: SecondOverflowed);
16880 break;
16881 }
16882
16883 // It is possible for both overflows to happen but CGBuiltin uses an OR so
16884 // this is consistent.
16885 CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
16886 APValue APV{CarryOut};
16887 if (!handleAssignment(Info, E, LVal: CarryOutLValue, LValType: ResultType, Val&: APV))
16888 return false;
16889 return Success(SI: Result, E);
16890 }
16891 case Builtin::BI__builtin_add_overflow:
16892 case Builtin::BI__builtin_sub_overflow:
16893 case Builtin::BI__builtin_mul_overflow:
16894 case Builtin::BI__builtin_sadd_overflow:
16895 case Builtin::BI__builtin_uadd_overflow:
16896 case Builtin::BI__builtin_uaddl_overflow:
16897 case Builtin::BI__builtin_uaddll_overflow:
16898 case Builtin::BI__builtin_usub_overflow:
16899 case Builtin::BI__builtin_usubl_overflow:
16900 case Builtin::BI__builtin_usubll_overflow:
16901 case Builtin::BI__builtin_umul_overflow:
16902 case Builtin::BI__builtin_umull_overflow:
16903 case Builtin::BI__builtin_umulll_overflow:
16904 case Builtin::BI__builtin_saddl_overflow:
16905 case Builtin::BI__builtin_saddll_overflow:
16906 case Builtin::BI__builtin_ssub_overflow:
16907 case Builtin::BI__builtin_ssubl_overflow:
16908 case Builtin::BI__builtin_ssubll_overflow:
16909 case Builtin::BI__builtin_smul_overflow:
16910 case Builtin::BI__builtin_smull_overflow:
16911 case Builtin::BI__builtin_smulll_overflow: {
16912 LValue ResultLValue;
16913 APSInt LHS, RHS;
16914
16915 QualType ResultType = E->getArg(Arg: 2)->getType()->getPointeeType();
16916 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: LHS, Info) ||
16917 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: RHS, Info) ||
16918 !EvaluatePointer(E: E->getArg(Arg: 2), Result&: ResultLValue, Info))
16919 return false;
16920
16921 APSInt Result;
16922 bool DidOverflow = false;
16923
16924 // If the types don't have to match, enlarge all 3 to the largest of them.
16925 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
16926 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
16927 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
16928 bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
16929 ResultType->isSignedIntegerOrEnumerationType();
16930 bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
16931 ResultType->isSignedIntegerOrEnumerationType();
16932 uint64_t LHSSize = LHS.getBitWidth();
16933 uint64_t RHSSize = RHS.getBitWidth();
16934 uint64_t ResultSize = Info.Ctx.getTypeSize(T: ResultType);
16935 uint64_t MaxBits = std::max(a: std::max(a: LHSSize, b: RHSSize), b: ResultSize);
16936
16937 // Add an additional bit if the signedness isn't uniformly agreed to. We
16938 // could do this ONLY if there is a signed and an unsigned that both have
16939 // MaxBits, but the code to check that is pretty nasty. The issue will be
16940 // caught in the shrink-to-result later anyway.
16941 if (IsSigned && !AllSigned)
16942 ++MaxBits;
16943
16944 LHS = APSInt(LHS.extOrTrunc(width: MaxBits), !IsSigned);
16945 RHS = APSInt(RHS.extOrTrunc(width: MaxBits), !IsSigned);
16946 Result = APSInt(MaxBits, !IsSigned);
16947 }
16948
16949 // Find largest int.
16950 switch (BuiltinOp) {
16951 default:
16952 llvm_unreachable("Invalid value for BuiltinOp");
16953 case Builtin::BI__builtin_add_overflow:
16954 case Builtin::BI__builtin_sadd_overflow:
16955 case Builtin::BI__builtin_saddl_overflow:
16956 case Builtin::BI__builtin_saddll_overflow:
16957 case Builtin::BI__builtin_uadd_overflow:
16958 case Builtin::BI__builtin_uaddl_overflow:
16959 case Builtin::BI__builtin_uaddll_overflow:
16960 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow&: DidOverflow)
16961 : LHS.uadd_ov(RHS, Overflow&: DidOverflow);
16962 break;
16963 case Builtin::BI__builtin_sub_overflow:
16964 case Builtin::BI__builtin_ssub_overflow:
16965 case Builtin::BI__builtin_ssubl_overflow:
16966 case Builtin::BI__builtin_ssubll_overflow:
16967 case Builtin::BI__builtin_usub_overflow:
16968 case Builtin::BI__builtin_usubl_overflow:
16969 case Builtin::BI__builtin_usubll_overflow:
16970 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow&: DidOverflow)
16971 : LHS.usub_ov(RHS, Overflow&: DidOverflow);
16972 break;
16973 case Builtin::BI__builtin_mul_overflow:
16974 case Builtin::BI__builtin_smul_overflow:
16975 case Builtin::BI__builtin_smull_overflow:
16976 case Builtin::BI__builtin_smulll_overflow:
16977 case Builtin::BI__builtin_umul_overflow:
16978 case Builtin::BI__builtin_umull_overflow:
16979 case Builtin::BI__builtin_umulll_overflow:
16980 Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow&: DidOverflow)
16981 : LHS.umul_ov(RHS, Overflow&: DidOverflow);
16982 break;
16983 }
16984
16985 // In the case where multiple sizes are allowed, truncate and see if
16986 // the values are the same.
16987 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
16988 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
16989 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
16990 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
16991 // since it will give us the behavior of a TruncOrSelf in the case where
16992 // its parameter <= its size. We previously set Result to be at least the
16993 // type-size of the result, so getTypeSize(ResultType) <= Result.BitWidth
16994 // will work exactly like TruncOrSelf.
16995 APSInt Temp = Result.extOrTrunc(width: Info.Ctx.getTypeSize(T: ResultType));
16996 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
16997
16998 if (!APSInt::isSameValue(I1: Temp, I2: Result))
16999 DidOverflow = true;
17000 Result = Temp;
17001 }
17002
17003 APValue APV{Result};
17004 if (!handleAssignment(Info, E, LVal: ResultLValue, LValType: ResultType, Val&: APV))
17005 return false;
17006 return Success(Value: DidOverflow, E);
17007 }
17008
17009 case Builtin::BI__builtin_reduce_add:
17010 case Builtin::BI__builtin_reduce_mul:
17011 case Builtin::BI__builtin_reduce_and:
17012 case Builtin::BI__builtin_reduce_or:
17013 case Builtin::BI__builtin_reduce_xor:
17014 case Builtin::BI__builtin_reduce_min:
17015 case Builtin::BI__builtin_reduce_max: {
17016 APValue Source;
17017 if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: Source))
17018 return false;
17019
17020 unsigned SourceLen = Source.getVectorLength();
17021 APSInt Reduced = Source.getVectorElt(I: 0).getInt();
17022 for (unsigned EltNum = 1; EltNum < SourceLen; ++EltNum) {
17023 switch (BuiltinOp) {
17024 default:
17025 return false;
17026 case Builtin::BI__builtin_reduce_add: {
17027 if (!CheckedIntArithmetic(
17028 Info, E, LHS: Reduced, RHS: Source.getVectorElt(I: EltNum).getInt(),
17029 BitWidth: Reduced.getBitWidth() + 1, Op: std::plus<APSInt>(), Result&: Reduced))
17030 return false;
17031 break;
17032 }
17033 case Builtin::BI__builtin_reduce_mul: {
17034 if (!CheckedIntArithmetic(
17035 Info, E, LHS: Reduced, RHS: Source.getVectorElt(I: EltNum).getInt(),
17036 BitWidth: Reduced.getBitWidth() * 2, Op: std::multiplies<APSInt>(), Result&: Reduced))
17037 return false;
17038 break;
17039 }
17040 case Builtin::BI__builtin_reduce_and: {
17041 Reduced &= Source.getVectorElt(I: EltNum).getInt();
17042 break;
17043 }
17044 case Builtin::BI__builtin_reduce_or: {
17045 Reduced |= Source.getVectorElt(I: EltNum).getInt();
17046 break;
17047 }
17048 case Builtin::BI__builtin_reduce_xor: {
17049 Reduced ^= Source.getVectorElt(I: EltNum).getInt();
17050 break;
17051 }
17052 case Builtin::BI__builtin_reduce_min: {
17053 Reduced = std::min(a: Reduced, b: Source.getVectorElt(I: EltNum).getInt());
17054 break;
17055 }
17056 case Builtin::BI__builtin_reduce_max: {
17057 Reduced = std::max(a: Reduced, b: Source.getVectorElt(I: EltNum).getInt());
17058 break;
17059 }
17060 }
17061 }
17062
17063 return Success(SI: Reduced, E);
17064 }
17065
17066 case clang::X86::BI__builtin_ia32_addcarryx_u32:
17067 case clang::X86::BI__builtin_ia32_addcarryx_u64:
17068 case clang::X86::BI__builtin_ia32_subborrow_u32:
17069 case clang::X86::BI__builtin_ia32_subborrow_u64: {
17070 LValue ResultLValue;
17071 APSInt CarryIn, LHS, RHS;
17072 QualType ResultType = E->getArg(Arg: 3)->getType()->getPointeeType();
17073 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: CarryIn, Info) ||
17074 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: LHS, Info) ||
17075 !EvaluateInteger(E: E->getArg(Arg: 2), Result&: RHS, Info) ||
17076 !EvaluatePointer(E: E->getArg(Arg: 3), Result&: ResultLValue, Info))
17077 return false;
17078
17079 bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
17080 BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
17081
17082 unsigned BitWidth = LHS.getBitWidth();
17083 unsigned CarryInBit = CarryIn.ugt(RHS: 0) ? 1 : 0;
17084 APInt ExResult =
17085 IsAdd
17086 ? (LHS.zext(width: BitWidth + 1) + (RHS.zext(width: BitWidth + 1) + CarryInBit))
17087 : (LHS.zext(width: BitWidth + 1) - (RHS.zext(width: BitWidth + 1) + CarryInBit));
17088
17089 APInt Result = ExResult.extractBits(numBits: BitWidth, bitPosition: 0);
17090 uint64_t CarryOut = ExResult.extractBitsAsZExtValue(numBits: 1, bitPosition: BitWidth);
17091
17092 APValue APV{APSInt(Result, /*isUnsigned=*/true)};
17093 if (!handleAssignment(Info, E, LVal: ResultLValue, LValType: ResultType, Val&: APV))
17094 return false;
17095 return Success(Value: CarryOut, E);
17096 }
17097
17098 case clang::X86::BI__builtin_ia32_movmskps:
17099 case clang::X86::BI__builtin_ia32_movmskpd:
17100 case clang::X86::BI__builtin_ia32_pmovmskb128:
17101 case clang::X86::BI__builtin_ia32_pmovmskb256:
17102 case clang::X86::BI__builtin_ia32_movmskps256:
17103 case clang::X86::BI__builtin_ia32_movmskpd256: {
17104 APValue Source;
17105 if (!Evaluate(Result&: Source, Info, E: E->getArg(Arg: 0)))
17106 return false;
17107 unsigned SourceLen = Source.getVectorLength();
17108 const VectorType *VT = E->getArg(Arg: 0)->getType()->castAs<VectorType>();
17109 QualType ElemQT = VT->getElementType();
17110 unsigned ResultLen = Info.Ctx.getTypeSize(
17111 T: E->getCallReturnType(Ctx: Info.Ctx)); // Always 32-bit integer.
17112 APInt Result(ResultLen, 0);
17113
17114 for (unsigned I = 0; I != SourceLen; ++I) {
17115 APInt Elem;
17116 if (ElemQT->isIntegerType()) {
17117 Elem = Source.getVectorElt(I).getInt();
17118 } else if (ElemQT->isRealFloatingType()) {
17119 Elem = Source.getVectorElt(I).getFloat().bitcastToAPInt();
17120 } else {
17121 return false;
17122 }
17123 Result.setBitVal(BitPosition: I, BitValue: Elem.isNegative());
17124 }
17125 return Success(I: Result, E);
17126 }
17127
17128 case clang::X86::BI__builtin_ia32_bextr_u32:
17129 case clang::X86::BI__builtin_ia32_bextr_u64:
17130 case clang::X86::BI__builtin_ia32_bextri_u32:
17131 case clang::X86::BI__builtin_ia32_bextri_u64: {
17132 APSInt Val, Idx;
17133 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info) ||
17134 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Idx, Info))
17135 return false;
17136
17137 unsigned BitWidth = Val.getBitWidth();
17138 uint64_t Shift = Idx.extractBitsAsZExtValue(numBits: 8, bitPosition: 0);
17139 uint64_t Length = Idx.extractBitsAsZExtValue(numBits: 8, bitPosition: 8);
17140 Length = Length > BitWidth ? BitWidth : Length;
17141
17142 // Handle out of bounds cases.
17143 if (Length == 0 || Shift >= BitWidth)
17144 return Success(Value: 0, E);
17145
17146 uint64_t Result = Val.getZExtValue() >> Shift;
17147 Result &= llvm::maskTrailingOnes<uint64_t>(N: Length);
17148 return Success(Value: Result, E);
17149 }
17150
17151 case clang::X86::BI__builtin_ia32_bzhi_si:
17152 case clang::X86::BI__builtin_ia32_bzhi_di: {
17153 APSInt Val, Idx;
17154 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info) ||
17155 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Idx, Info))
17156 return false;
17157
17158 unsigned BitWidth = Val.getBitWidth();
17159 unsigned Index = Idx.extractBitsAsZExtValue(numBits: 8, bitPosition: 0);
17160 if (Index < BitWidth)
17161 Val.clearHighBits(hiBits: BitWidth - Index);
17162 return Success(SI: Val, E);
17163 }
17164
17165 case clang::X86::BI__builtin_ia32_ktestcqi:
17166 case clang::X86::BI__builtin_ia32_ktestchi:
17167 case clang::X86::BI__builtin_ia32_ktestcsi:
17168 case clang::X86::BI__builtin_ia32_ktestcdi: {
17169 APSInt A, B;
17170 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: A, Info) ||
17171 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: B, Info))
17172 return false;
17173
17174 return Success(Value: (~A & B) == 0, E);
17175 }
17176
17177 case clang::X86::BI__builtin_ia32_ktestzqi:
17178 case clang::X86::BI__builtin_ia32_ktestzhi:
17179 case clang::X86::BI__builtin_ia32_ktestzsi:
17180 case clang::X86::BI__builtin_ia32_ktestzdi: {
17181 APSInt A, B;
17182 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: A, Info) ||
17183 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: B, Info))
17184 return false;
17185
17186 return Success(Value: (A & B) == 0, E);
17187 }
17188
17189 case clang::X86::BI__builtin_ia32_kortestcqi:
17190 case clang::X86::BI__builtin_ia32_kortestchi:
17191 case clang::X86::BI__builtin_ia32_kortestcsi:
17192 case clang::X86::BI__builtin_ia32_kortestcdi: {
17193 APSInt A, B;
17194 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: A, Info) ||
17195 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: B, Info))
17196 return false;
17197
17198 return Success(Value: ~(A | B) == 0, E);
17199 }
17200
17201 case clang::X86::BI__builtin_ia32_kortestzqi:
17202 case clang::X86::BI__builtin_ia32_kortestzhi:
17203 case clang::X86::BI__builtin_ia32_kortestzsi:
17204 case clang::X86::BI__builtin_ia32_kortestzdi: {
17205 APSInt A, B;
17206 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: A, Info) ||
17207 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: B, Info))
17208 return false;
17209
17210 return Success(Value: (A | B) == 0, E);
17211 }
17212
17213 case clang::X86::BI__builtin_ia32_kunpckhi:
17214 case clang::X86::BI__builtin_ia32_kunpckdi:
17215 case clang::X86::BI__builtin_ia32_kunpcksi: {
17216 APSInt A, B;
17217 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: A, Info) ||
17218 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: B, Info))
17219 return false;
17220
17221 // Generic kunpack: extract lower half of each operand and concatenate
17222 // Result = A[HalfWidth-1:0] concat B[HalfWidth-1:0]
17223 unsigned BW = A.getBitWidth();
17224 APSInt Result(A.trunc(width: BW / 2).concat(NewLSB: B.trunc(width: BW / 2)), A.isUnsigned());
17225 return Success(SI: Result, E);
17226 }
17227
17228 case clang::X86::BI__builtin_ia32_lzcnt_u16:
17229 case clang::X86::BI__builtin_ia32_lzcnt_u32:
17230 case clang::X86::BI__builtin_ia32_lzcnt_u64: {
17231 APSInt Val;
17232 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
17233 return false;
17234 return Success(Value: Val.countLeadingZeros(), E);
17235 }
17236
17237 case clang::X86::BI__builtin_ia32_tzcnt_u16:
17238 case clang::X86::BI__builtin_ia32_tzcnt_u32:
17239 case clang::X86::BI__builtin_ia32_tzcnt_u64: {
17240 APSInt Val;
17241 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
17242 return false;
17243 return Success(Value: Val.countTrailingZeros(), E);
17244 }
17245
17246 case clang::X86::BI__builtin_ia32_pdep_si:
17247 case clang::X86::BI__builtin_ia32_pdep_di: {
17248 APSInt Val, Msk;
17249 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info) ||
17250 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Msk, Info))
17251 return false;
17252
17253 unsigned BitWidth = Val.getBitWidth();
17254 APInt Result = APInt::getZero(numBits: BitWidth);
17255 for (unsigned I = 0, P = 0; I != BitWidth; ++I)
17256 if (Msk[I])
17257 Result.setBitVal(BitPosition: I, BitValue: Val[P++]);
17258 return Success(I: Result, E);
17259 }
17260
17261 case clang::X86::BI__builtin_ia32_pext_si:
17262 case clang::X86::BI__builtin_ia32_pext_di: {
17263 APSInt Val, Msk;
17264 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info) ||
17265 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Msk, Info))
17266 return false;
17267
17268 unsigned BitWidth = Val.getBitWidth();
17269 APInt Result = APInt::getZero(numBits: BitWidth);
17270 for (unsigned I = 0, P = 0; I != BitWidth; ++I)
17271 if (Msk[I])
17272 Result.setBitVal(BitPosition: P++, BitValue: Val[I]);
17273 return Success(I: Result, E);
17274 }
17275 case X86::BI__builtin_ia32_ptestz128:
17276 case X86::BI__builtin_ia32_ptestz256:
17277 case X86::BI__builtin_ia32_vtestzps:
17278 case X86::BI__builtin_ia32_vtestzps256:
17279 case X86::BI__builtin_ia32_vtestzpd:
17280 case X86::BI__builtin_ia32_vtestzpd256: {
17281 return EvalTestOp(
17282 [](const APInt &A, const APInt &B) { return (A & B) == 0; });
17283 }
17284 case X86::BI__builtin_ia32_ptestc128:
17285 case X86::BI__builtin_ia32_ptestc256:
17286 case X86::BI__builtin_ia32_vtestcps:
17287 case X86::BI__builtin_ia32_vtestcps256:
17288 case X86::BI__builtin_ia32_vtestcpd:
17289 case X86::BI__builtin_ia32_vtestcpd256: {
17290 return EvalTestOp(
17291 [](const APInt &A, const APInt &B) { return (~A & B) == 0; });
17292 }
17293 case X86::BI__builtin_ia32_ptestnzc128:
17294 case X86::BI__builtin_ia32_ptestnzc256:
17295 case X86::BI__builtin_ia32_vtestnzcps:
17296 case X86::BI__builtin_ia32_vtestnzcps256:
17297 case X86::BI__builtin_ia32_vtestnzcpd:
17298 case X86::BI__builtin_ia32_vtestnzcpd256: {
17299 return EvalTestOp([](const APInt &A, const APInt &B) {
17300 return ((A & B) != 0) && ((~A & B) != 0);
17301 });
17302 }
17303 case X86::BI__builtin_ia32_kandqi:
17304 case X86::BI__builtin_ia32_kandhi:
17305 case X86::BI__builtin_ia32_kandsi:
17306 case X86::BI__builtin_ia32_kanddi: {
17307 return HandleMaskBinOp(
17308 [](const APSInt &LHS, const APSInt &RHS) { return LHS & RHS; });
17309 }
17310
17311 case X86::BI__builtin_ia32_kandnqi:
17312 case X86::BI__builtin_ia32_kandnhi:
17313 case X86::BI__builtin_ia32_kandnsi:
17314 case X86::BI__builtin_ia32_kandndi: {
17315 return HandleMaskBinOp(
17316 [](const APSInt &LHS, const APSInt &RHS) { return ~LHS & RHS; });
17317 }
17318
17319 case X86::BI__builtin_ia32_korqi:
17320 case X86::BI__builtin_ia32_korhi:
17321 case X86::BI__builtin_ia32_korsi:
17322 case X86::BI__builtin_ia32_kordi: {
17323 return HandleMaskBinOp(
17324 [](const APSInt &LHS, const APSInt &RHS) { return LHS | RHS; });
17325 }
17326
17327 case X86::BI__builtin_ia32_kxnorqi:
17328 case X86::BI__builtin_ia32_kxnorhi:
17329 case X86::BI__builtin_ia32_kxnorsi:
17330 case X86::BI__builtin_ia32_kxnordi: {
17331 return HandleMaskBinOp(
17332 [](const APSInt &LHS, const APSInt &RHS) { return ~(LHS ^ RHS); });
17333 }
17334
17335 case X86::BI__builtin_ia32_kxorqi:
17336 case X86::BI__builtin_ia32_kxorhi:
17337 case X86::BI__builtin_ia32_kxorsi:
17338 case X86::BI__builtin_ia32_kxordi: {
17339 return HandleMaskBinOp(
17340 [](const APSInt &LHS, const APSInt &RHS) { return LHS ^ RHS; });
17341 }
17342
17343 case X86::BI__builtin_ia32_knotqi:
17344 case X86::BI__builtin_ia32_knothi:
17345 case X86::BI__builtin_ia32_knotsi:
17346 case X86::BI__builtin_ia32_knotdi: {
17347 APSInt Val;
17348 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
17349 return false;
17350 APSInt Result = ~Val;
17351 return Success(V: APValue(Result), E);
17352 }
17353
17354 case X86::BI__builtin_ia32_kaddqi:
17355 case X86::BI__builtin_ia32_kaddhi:
17356 case X86::BI__builtin_ia32_kaddsi:
17357 case X86::BI__builtin_ia32_kadddi: {
17358 return HandleMaskBinOp(
17359 [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
17360 }
17361
17362 case X86::BI__builtin_ia32_kmovb:
17363 case X86::BI__builtin_ia32_kmovw:
17364 case X86::BI__builtin_ia32_kmovd:
17365 case X86::BI__builtin_ia32_kmovq: {
17366 APSInt Val;
17367 if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info))
17368 return false;
17369 return Success(SI: Val, E);
17370 }
17371
17372 case X86::BI__builtin_ia32_kshiftliqi:
17373 case X86::BI__builtin_ia32_kshiftlihi:
17374 case X86::BI__builtin_ia32_kshiftlisi:
17375 case X86::BI__builtin_ia32_kshiftlidi: {
17376 return HandleMaskBinOp([](const APSInt &LHS, const APSInt &RHS) {
17377 unsigned Amt = RHS.getZExtValue() & 0xFF;
17378 if (Amt >= LHS.getBitWidth())
17379 return APSInt(APInt::getZero(numBits: LHS.getBitWidth()), LHS.isUnsigned());
17380 return APSInt(LHS.shl(shiftAmt: Amt), LHS.isUnsigned());
17381 });
17382 }
17383
17384 case X86::BI__builtin_ia32_kshiftriqi:
17385 case X86::BI__builtin_ia32_kshiftrihi:
17386 case X86::BI__builtin_ia32_kshiftrisi:
17387 case X86::BI__builtin_ia32_kshiftridi: {
17388 return HandleMaskBinOp([](const APSInt &LHS, const APSInt &RHS) {
17389 unsigned Amt = RHS.getZExtValue() & 0xFF;
17390 if (Amt >= LHS.getBitWidth())
17391 return APSInt(APInt::getZero(numBits: LHS.getBitWidth()), LHS.isUnsigned());
17392 return APSInt(LHS.lshr(shiftAmt: Amt), LHS.isUnsigned());
17393 });
17394 }
17395
17396 case clang::X86::BI__builtin_ia32_vec_ext_v4hi:
17397 case clang::X86::BI__builtin_ia32_vec_ext_v16qi:
17398 case clang::X86::BI__builtin_ia32_vec_ext_v8hi:
17399 case clang::X86::BI__builtin_ia32_vec_ext_v4si:
17400 case clang::X86::BI__builtin_ia32_vec_ext_v2di:
17401 case clang::X86::BI__builtin_ia32_vec_ext_v32qi:
17402 case clang::X86::BI__builtin_ia32_vec_ext_v16hi:
17403 case clang::X86::BI__builtin_ia32_vec_ext_v8si:
17404 case clang::X86::BI__builtin_ia32_vec_ext_v4di: {
17405 APValue Vec;
17406 APSInt IdxAPS;
17407 if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Vec, Info) ||
17408 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: IdxAPS, Info))
17409 return false;
17410 unsigned N = Vec.getVectorLength();
17411 unsigned Idx = static_cast<unsigned>(IdxAPS.getZExtValue() & (N - 1));
17412 return Success(SI: Vec.getVectorElt(I: Idx).getInt(), E);
17413 }
17414
17415 case clang::X86::BI__builtin_ia32_cvtb2mask128:
17416 case clang::X86::BI__builtin_ia32_cvtb2mask256:
17417 case clang::X86::BI__builtin_ia32_cvtb2mask512:
17418 case clang::X86::BI__builtin_ia32_cvtw2mask128:
17419 case clang::X86::BI__builtin_ia32_cvtw2mask256:
17420 case clang::X86::BI__builtin_ia32_cvtw2mask512:
17421 case clang::X86::BI__builtin_ia32_cvtd2mask128:
17422 case clang::X86::BI__builtin_ia32_cvtd2mask256:
17423 case clang::X86::BI__builtin_ia32_cvtd2mask512:
17424 case clang::X86::BI__builtin_ia32_cvtq2mask128:
17425 case clang::X86::BI__builtin_ia32_cvtq2mask256:
17426 case clang::X86::BI__builtin_ia32_cvtq2mask512: {
17427 assert(E->getNumArgs() == 1);
17428 APValue Vec;
17429 if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Vec, Info))
17430 return false;
17431
17432 unsigned VectorLen = Vec.getVectorLength();
17433 unsigned RetWidth = Info.Ctx.getIntWidth(T: E->getType());
17434 llvm::APInt Bits(RetWidth, 0);
17435
17436 for (unsigned ElemNum = 0; ElemNum != VectorLen; ++ElemNum) {
17437 const APSInt &A = Vec.getVectorElt(I: ElemNum).getInt();
17438 unsigned MSB = A[A.getBitWidth() - 1];
17439 Bits.setBitVal(BitPosition: ElemNum, BitValue: MSB);
17440 }
17441
17442 APSInt RetMask(Bits, /*isUnsigned=*/true);
17443 return Success(V: APValue(RetMask), E);
17444 }
17445
17446 case clang::X86::BI__builtin_ia32_cmpb128_mask:
17447 case clang::X86::BI__builtin_ia32_cmpw128_mask:
17448 case clang::X86::BI__builtin_ia32_cmpd128_mask:
17449 case clang::X86::BI__builtin_ia32_cmpq128_mask:
17450 case clang::X86::BI__builtin_ia32_cmpb256_mask:
17451 case clang::X86::BI__builtin_ia32_cmpw256_mask:
17452 case clang::X86::BI__builtin_ia32_cmpd256_mask:
17453 case clang::X86::BI__builtin_ia32_cmpq256_mask:
17454 case clang::X86::BI__builtin_ia32_cmpb512_mask:
17455 case clang::X86::BI__builtin_ia32_cmpw512_mask:
17456 case clang::X86::BI__builtin_ia32_cmpd512_mask:
17457 case clang::X86::BI__builtin_ia32_cmpq512_mask:
17458 case clang::X86::BI__builtin_ia32_ucmpb128_mask:
17459 case clang::X86::BI__builtin_ia32_ucmpw128_mask:
17460 case clang::X86::BI__builtin_ia32_ucmpd128_mask:
17461 case clang::X86::BI__builtin_ia32_ucmpq128_mask:
17462 case clang::X86::BI__builtin_ia32_ucmpb256_mask:
17463 case clang::X86::BI__builtin_ia32_ucmpw256_mask:
17464 case clang::X86::BI__builtin_ia32_ucmpd256_mask:
17465 case clang::X86::BI__builtin_ia32_ucmpq256_mask:
17466 case clang::X86::BI__builtin_ia32_ucmpb512_mask:
17467 case clang::X86::BI__builtin_ia32_ucmpw512_mask:
17468 case clang::X86::BI__builtin_ia32_ucmpd512_mask:
17469 case clang::X86::BI__builtin_ia32_ucmpq512_mask: {
17470 assert(E->getNumArgs() == 4);
17471
17472 bool IsUnsigned =
17473 (BuiltinOp >= clang::X86::BI__builtin_ia32_ucmpb128_mask &&
17474 BuiltinOp <= clang::X86::BI__builtin_ia32_ucmpw512_mask);
17475
17476 APValue LHS, RHS;
17477 APSInt Mask, Opcode;
17478 if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: LHS, Info) ||
17479 !EvaluateVector(E: E->getArg(Arg: 1), Result&: RHS, Info) ||
17480 !EvaluateInteger(E: E->getArg(Arg: 2), Result&: Opcode, Info) ||
17481 !EvaluateInteger(E: E->getArg(Arg: 3), Result&: Mask, Info))
17482 return false;
17483
17484 assert(LHS.getVectorLength() == RHS.getVectorLength());
17485
17486 unsigned VectorLen = LHS.getVectorLength();
17487 unsigned RetWidth = Mask.getBitWidth();
17488
17489 APSInt RetMask(llvm::APInt(RetWidth, 0), /*isUnsigned=*/true);
17490
17491 for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
17492 const APSInt &A = LHS.getVectorElt(I: ElemNum).getInt();
17493 const APSInt &B = RHS.getVectorElt(I: ElemNum).getInt();
17494 bool Result = false;
17495
17496 switch (Opcode.getExtValue() & 0x7) {
17497 case 0: // _MM_CMPINT_EQ
17498 Result = (A == B);
17499 break;
17500 case 1: // _MM_CMPINT_LT
17501 Result = IsUnsigned ? A.ult(RHS: B) : A.slt(RHS: B);
17502 break;
17503 case 2: // _MM_CMPINT_LE
17504 Result = IsUnsigned ? A.ule(RHS: B) : A.sle(RHS: B);
17505 break;
17506 case 3: // _MM_CMPINT_FALSE
17507 Result = false;
17508 break;
17509 case 4: // _MM_CMPINT_NE
17510 Result = (A != B);
17511 break;
17512 case 5: // _MM_CMPINT_NLT (>=)
17513 Result = IsUnsigned ? A.uge(RHS: B) : A.sge(RHS: B);
17514 break;
17515 case 6: // _MM_CMPINT_NLE (>)
17516 Result = IsUnsigned ? A.ugt(RHS: B) : A.sgt(RHS: B);
17517 break;
17518 case 7: // _MM_CMPINT_TRUE
17519 Result = true;
17520 break;
17521 }
17522
17523 RetMask.setBitVal(BitPosition: ElemNum, BitValue: Mask[ElemNum] && Result);
17524 }
17525
17526 return Success(V: APValue(RetMask), E);
17527 }
17528 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
17529 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
17530 case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
17531 assert(E->getNumArgs() == 3);
17532
17533 APValue Source, ShuffleMask;
17534 APSInt ZeroMask;
17535 if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Source, Info) ||
17536 !EvaluateVector(E: E->getArg(Arg: 1), Result&: ShuffleMask, Info) ||
17537 !EvaluateInteger(E: E->getArg(Arg: 2), Result&: ZeroMask, Info))
17538 return false;
17539
17540 assert(Source.getVectorLength() == ShuffleMask.getVectorLength());
17541 assert(ZeroMask.getBitWidth() == Source.getVectorLength());
17542
17543 unsigned NumBytesInQWord = 8;
17544 unsigned NumBitsInByte = 8;
17545 unsigned NumBytes = Source.getVectorLength();
17546 unsigned NumQWords = NumBytes / NumBytesInQWord;
17547 unsigned RetWidth = ZeroMask.getBitWidth();
17548 APSInt RetMask(llvm::APInt(RetWidth, 0), /*isUnsigned=*/true);
17549
17550 for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) {
17551 APInt SourceQWord(64, 0);
17552 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
17553 uint64_t Byte = Source.getVectorElt(I: QWordId * NumBytesInQWord + ByteIdx)
17554 .getInt()
17555 .getZExtValue();
17556 SourceQWord.insertBits(SubBits: APInt(8, Byte & 0xFF), bitPosition: ByteIdx * NumBitsInByte);
17557 }
17558
17559 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
17560 unsigned SelIdx = QWordId * NumBytesInQWord + ByteIdx;
17561 unsigned M =
17562 ShuffleMask.getVectorElt(I: SelIdx).getInt().getZExtValue() & 0x3F;
17563 if (ZeroMask[SelIdx]) {
17564 RetMask.setBitVal(BitPosition: SelIdx, BitValue: SourceQWord[M]);
17565 }
17566 }
17567 }
17568 return Success(V: APValue(RetMask), E);
17569 }
17570 }
17571}
17572
17573/// Determine whether this is a pointer past the end of the complete
17574/// object referred to by the lvalue.
17575static bool isOnePastTheEndOfCompleteObject(const ASTContext &Ctx,
17576 const LValue &LV) {
17577 // A null pointer can be viewed as being "past the end" but we don't
17578 // choose to look at it that way here.
17579 if (!LV.getLValueBase())
17580 return false;
17581
17582 // If the designator is valid and refers to a subobject, we're not pointing
17583 // past the end.
17584 if (!LV.getLValueDesignator().Invalid &&
17585 !LV.getLValueDesignator().isOnePastTheEnd())
17586 return false;
17587
17588 // A pointer to an incomplete type might be past-the-end if the type's size is
17589 // zero. We cannot tell because the type is incomplete.
17590 QualType Ty = getType(B: LV.getLValueBase());
17591 if (Ty->isIncompleteType())
17592 return true;
17593
17594 // Can't be past the end of an invalid object.
17595 if (LV.getLValueDesignator().Invalid)
17596 return false;
17597
17598 // We're a past-the-end pointer if we point to the byte after the object,
17599 // no matter what our type or path is.
17600 auto Size = Ctx.getTypeSizeInChars(T: Ty);
17601 return LV.getLValueOffset() == Size;
17602}
17603
17604namespace {
17605
17606/// Data recursive integer evaluator of certain binary operators.
17607///
17608/// We use a data recursive algorithm for binary operators so that we are able
17609/// to handle extreme cases of chained binary operators without causing stack
17610/// overflow.
17611class DataRecursiveIntBinOpEvaluator {
17612 struct EvalResult {
17613 APValue Val;
17614 bool Failed = false;
17615
17616 EvalResult() = default;
17617
17618 void swap(EvalResult &RHS) {
17619 Val.swap(RHS&: RHS.Val);
17620 Failed = RHS.Failed;
17621 RHS.Failed = false;
17622 }
17623 };
17624
17625 struct Job {
17626 const Expr *E;
17627 EvalResult LHSResult; // meaningful only for binary operator expression.
17628 enum { AnyExprKind, BinOpKind, BinOpVisitedLHSKind } Kind;
17629
17630 Job() = default;
17631 Job(Job &&) = default;
17632
17633 void startSpeculativeEval(EvalInfo &Info) {
17634 SpecEvalRAII = SpeculativeEvaluationRAII(Info);
17635 }
17636
17637 private:
17638 SpeculativeEvaluationRAII SpecEvalRAII;
17639 };
17640
17641 SmallVector<Job, 16> Queue;
17642
17643 IntExprEvaluator &IntEval;
17644 EvalInfo &Info;
17645 APValue &FinalResult;
17646
17647public:
17648 DataRecursiveIntBinOpEvaluator(IntExprEvaluator &IntEval, APValue &Result)
17649 : IntEval(IntEval), Info(IntEval.getEvalInfo()), FinalResult(Result) { }
17650
17651 /// True if \param E is a binary operator that we are going to handle
17652 /// data recursively.
17653 /// We handle binary operators that are comma, logical, or that have operands
17654 /// with integral or enumeration type.
17655 static bool shouldEnqueue(const BinaryOperator *E) {
17656 return E->getOpcode() == BO_Comma || E->isLogicalOp() ||
17657 (E->isPRValue() && E->getType()->isIntegralOrEnumerationType() &&
17658 E->getLHS()->getType()->isIntegralOrEnumerationType() &&
17659 E->getRHS()->getType()->isIntegralOrEnumerationType());
17660 }
17661
17662 bool Traverse(const BinaryOperator *E) {
17663 enqueue(E);
17664 EvalResult PrevResult;
17665 while (!Queue.empty())
17666 process(Result&: PrevResult);
17667
17668 if (PrevResult.Failed) return false;
17669
17670 FinalResult.swap(RHS&: PrevResult.Val);
17671 return true;
17672 }
17673
17674private:
17675 bool Success(uint64_t Value, const Expr *E, APValue &Result) {
17676 return IntEval.Success(Value, E, Result);
17677 }
17678 bool Success(const APSInt &Value, const Expr *E, APValue &Result) {
17679 return IntEval.Success(SI: Value, E, Result);
17680 }
17681 bool Error(const Expr *E) {
17682 return IntEval.Error(E);
17683 }
17684 bool Error(const Expr *E, diag::kind D) {
17685 return IntEval.Error(E, D);
17686 }
17687
17688 OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) {
17689 return Info.CCEDiag(E, DiagId: D);
17690 }
17691
17692 // Returns true if visiting the RHS is necessary, false otherwise.
17693 bool VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E,
17694 bool &SuppressRHSDiags);
17695
17696 bool VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult,
17697 const BinaryOperator *E, APValue &Result);
17698
17699 void EvaluateExpr(const Expr *E, EvalResult &Result) {
17700 Result.Failed = !Evaluate(Result&: Result.Val, Info, E);
17701 if (Result.Failed)
17702 Result.Val = APValue();
17703 }
17704
17705 void process(EvalResult &Result);
17706
17707 void enqueue(const Expr *E) {
17708 E = E->IgnoreParens();
17709 Queue.resize(N: Queue.size()+1);
17710 Queue.back().E = E;
17711 Queue.back().Kind = Job::AnyExprKind;
17712 }
17713};
17714
17715}
17716
17717bool DataRecursiveIntBinOpEvaluator::
17718 VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E,
17719 bool &SuppressRHSDiags) {
17720 if (E->getOpcode() == BO_Comma) {
17721 // Ignore LHS but note if we could not evaluate it.
17722 if (LHSResult.Failed)
17723 return Info.noteSideEffect();
17724 return true;
17725 }
17726
17727 if (E->isLogicalOp()) {
17728 bool LHSAsBool;
17729 if (!LHSResult.Failed && HandleConversionToBool(Val: LHSResult.Val, Result&: LHSAsBool)) {
17730 // We were able to evaluate the LHS, see if we can get away with not
17731 // evaluating the RHS: 0 && X -> 0, 1 || X -> 1
17732 if (LHSAsBool == (E->getOpcode() == BO_LOr)) {
17733 Success(Value: LHSAsBool, E, Result&: LHSResult.Val);
17734 return false; // Ignore RHS
17735 }
17736 } else {
17737 LHSResult.Failed = true;
17738
17739 // Since we weren't able to evaluate the left hand side, it
17740 // might have had side effects.
17741 if (!Info.noteSideEffect())
17742 return false;
17743
17744 // We can't evaluate the LHS; however, sometimes the result
17745 // is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
17746 // Don't ignore RHS and suppress diagnostics from this arm.
17747 SuppressRHSDiags = true;
17748 }
17749
17750 return true;
17751 }
17752
17753 assert(E->getLHS()->getType()->isIntegralOrEnumerationType() &&
17754 E->getRHS()->getType()->isIntegralOrEnumerationType());
17755
17756 if (LHSResult.Failed && !Info.noteFailure())
17757 return false; // Ignore RHS;
17758
17759 return true;
17760}
17761
17762static void addOrSubLValueAsInteger(APValue &LVal, const APSInt &Index,
17763 bool IsSub) {
17764 // Compute the new offset in the appropriate width, wrapping at 64 bits.
17765 // FIXME: When compiling for a 32-bit target, we should use 32-bit
17766 // offsets.
17767 assert(!LVal.hasLValuePath() && "have designator for integer lvalue");
17768 CharUnits &Offset = LVal.getLValueOffset();
17769 uint64_t Offset64 = Offset.getQuantity();
17770 uint64_t Index64 = Index.extOrTrunc(width: 64).getZExtValue();
17771 Offset = CharUnits::fromQuantity(Quantity: IsSub ? Offset64 - Index64
17772 : Offset64 + Index64);
17773}
17774
17775bool DataRecursiveIntBinOpEvaluator::
17776 VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult,
17777 const BinaryOperator *E, APValue &Result) {
17778 if (E->getOpcode() == BO_Comma) {
17779 if (RHSResult.Failed)
17780 return false;
17781 Result = RHSResult.Val;
17782 return true;
17783 }
17784
17785 if (E->isLogicalOp()) {
17786 bool lhsResult, rhsResult;
17787 bool LHSIsOK = HandleConversionToBool(Val: LHSResult.Val, Result&: lhsResult);
17788 bool RHSIsOK = HandleConversionToBool(Val: RHSResult.Val, Result&: rhsResult);
17789
17790 if (LHSIsOK) {
17791 if (RHSIsOK) {
17792 if (E->getOpcode() == BO_LOr)
17793 return Success(Value: lhsResult || rhsResult, E, Result);
17794 else
17795 return Success(Value: lhsResult && rhsResult, E, Result);
17796 }
17797 } else {
17798 if (RHSIsOK) {
17799 // We can't evaluate the LHS; however, sometimes the result
17800 // is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
17801 if (rhsResult == (E->getOpcode() == BO_LOr))
17802 return Success(Value: rhsResult, E, Result);
17803 }
17804 }
17805
17806 return false;
17807 }
17808
17809 assert(E->getLHS()->getType()->isIntegralOrEnumerationType() &&
17810 E->getRHS()->getType()->isIntegralOrEnumerationType());
17811
17812 if (LHSResult.Failed || RHSResult.Failed)
17813 return false;
17814
17815 const APValue &LHSVal = LHSResult.Val;
17816 const APValue &RHSVal = RHSResult.Val;
17817
17818 // Handle cases like (unsigned long)&a + 4.
17819 if (E->isAdditiveOp() && LHSVal.isLValue() && RHSVal.isInt()) {
17820 Result = LHSVal;
17821 addOrSubLValueAsInteger(LVal&: Result, Index: RHSVal.getInt(), IsSub: E->getOpcode() == BO_Sub);
17822 return true;
17823 }
17824
17825 // Handle cases like 4 + (unsigned long)&a
17826 if (E->getOpcode() == BO_Add &&
17827 RHSVal.isLValue() && LHSVal.isInt()) {
17828 Result = RHSVal;
17829 addOrSubLValueAsInteger(LVal&: Result, Index: LHSVal.getInt(), /*IsSub*/false);
17830 return true;
17831 }
17832
17833 if (E->getOpcode() == BO_Sub && LHSVal.isLValue() && RHSVal.isLValue()) {
17834 // Handle (intptr_t)&&A - (intptr_t)&&B.
17835 if (!LHSVal.getLValueOffset().isZero() ||
17836 !RHSVal.getLValueOffset().isZero())
17837 return false;
17838 const Expr *LHSExpr = LHSVal.getLValueBase().dyn_cast<const Expr*>();
17839 const Expr *RHSExpr = RHSVal.getLValueBase().dyn_cast<const Expr*>();
17840 if (!LHSExpr || !RHSExpr)
17841 return false;
17842 const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(Val: LHSExpr);
17843 const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(Val: RHSExpr);
17844 if (!LHSAddrExpr || !RHSAddrExpr)
17845 return false;
17846 // Make sure both labels come from the same function.
17847 if (LHSAddrExpr->getLabel()->getDeclContext() !=
17848 RHSAddrExpr->getLabel()->getDeclContext())
17849 return false;
17850 Result = APValue(LHSAddrExpr, RHSAddrExpr);
17851 return true;
17852 }
17853
17854 // All the remaining cases expect both operands to be an integer
17855 if (!LHSVal.isInt() || !RHSVal.isInt())
17856 return Error(E);
17857
17858 // Set up the width and signedness manually, in case it can't be deduced
17859 // from the operation we're performing.
17860 // FIXME: Don't do this in the cases where we can deduce it.
17861 APSInt Value(Info.Ctx.getIntWidth(T: E->getType()),
17862 E->getType()->isUnsignedIntegerOrEnumerationType());
17863 if (!handleIntIntBinOp(Info, E, LHS: LHSVal.getInt(), Opcode: E->getOpcode(),
17864 RHS: RHSVal.getInt(), Result&: Value))
17865 return false;
17866 return Success(Value, E, Result);
17867}
17868
17869void DataRecursiveIntBinOpEvaluator::process(EvalResult &Result) {
17870 Job &job = Queue.back();
17871
17872 switch (job.Kind) {
17873 case Job::AnyExprKind: {
17874 if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(Val: job.E)) {
17875 if (shouldEnqueue(E: Bop)) {
17876 job.Kind = Job::BinOpKind;
17877 enqueue(E: Bop->getLHS());
17878 return;
17879 }
17880 }
17881
17882 EvaluateExpr(E: job.E, Result);
17883 Queue.pop_back();
17884 return;
17885 }
17886
17887 case Job::BinOpKind: {
17888 const BinaryOperator *Bop = cast<BinaryOperator>(Val: job.E);
17889 bool SuppressRHSDiags = false;
17890 if (!VisitBinOpLHSOnly(LHSResult&: Result, E: Bop, SuppressRHSDiags)) {
17891 Queue.pop_back();
17892 return;
17893 }
17894 if (SuppressRHSDiags)
17895 job.startSpeculativeEval(Info);
17896 job.LHSResult.swap(RHS&: Result);
17897 job.Kind = Job::BinOpVisitedLHSKind;
17898 enqueue(E: Bop->getRHS());
17899 return;
17900 }
17901
17902 case Job::BinOpVisitedLHSKind: {
17903 const BinaryOperator *Bop = cast<BinaryOperator>(Val: job.E);
17904 EvalResult RHS;
17905 RHS.swap(RHS&: Result);
17906 Result.Failed = !VisitBinOp(LHSResult: job.LHSResult, RHSResult: RHS, E: Bop, Result&: Result.Val);
17907 Queue.pop_back();
17908 return;
17909 }
17910 }
17911
17912 llvm_unreachable("Invalid Job::Kind!");
17913}
17914
17915namespace {
17916enum class CmpResult {
17917 Unequal,
17918 Less,
17919 Equal,
17920 Greater,
17921 Unordered,
17922};
17923}
17924
17925template <class SuccessCB, class AfterCB>
17926static bool
17927EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E,
17928 SuccessCB &&Success, AfterCB &&DoAfter) {
17929 assert(!E->isValueDependent());
17930 assert(E->isComparisonOp() && "expected comparison operator");
17931 assert((E->getOpcode() == BO_Cmp ||
17932 E->getType()->isIntegralOrEnumerationType()) &&
17933 "unsupported binary expression evaluation");
17934 auto Error = [&](const Expr *E) {
17935 Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
17936 return false;
17937 };
17938
17939 bool IsRelational = E->isRelationalOp() || E->getOpcode() == BO_Cmp;
17940 bool IsEquality = E->isEqualityOp();
17941
17942 QualType LHSTy = E->getLHS()->getType();
17943 QualType RHSTy = E->getRHS()->getType();
17944
17945 if (LHSTy->isIntegralOrEnumerationType() &&
17946 RHSTy->isIntegralOrEnumerationType()) {
17947 APSInt LHS, RHS;
17948 bool LHSOK = EvaluateInteger(E: E->getLHS(), Result&: LHS, Info);
17949 if (!LHSOK && !Info.noteFailure())
17950 return false;
17951 if (!EvaluateInteger(E: E->getRHS(), Result&: RHS, Info) || !LHSOK)
17952 return false;
17953 if (LHS < RHS)
17954 return Success(CmpResult::Less, E);
17955 if (LHS > RHS)
17956 return Success(CmpResult::Greater, E);
17957 return Success(CmpResult::Equal, E);
17958 }
17959
17960 if (LHSTy->isFixedPointType() || RHSTy->isFixedPointType()) {
17961 APFixedPoint LHSFX(Info.Ctx.getFixedPointSemantics(Ty: LHSTy));
17962 APFixedPoint RHSFX(Info.Ctx.getFixedPointSemantics(Ty: RHSTy));
17963
17964 bool LHSOK = EvaluateFixedPointOrInteger(E: E->getLHS(), Result&: LHSFX, Info);
17965 if (!LHSOK && !Info.noteFailure())
17966 return false;
17967 if (!EvaluateFixedPointOrInteger(E: E->getRHS(), Result&: RHSFX, Info) || !LHSOK)
17968 return false;
17969 if (LHSFX < RHSFX)
17970 return Success(CmpResult::Less, E);
17971 if (LHSFX > RHSFX)
17972 return Success(CmpResult::Greater, E);
17973 return Success(CmpResult::Equal, E);
17974 }
17975
17976 if (LHSTy->isAnyComplexType() || RHSTy->isAnyComplexType()) {
17977 ComplexValue LHS, RHS;
17978 bool LHSOK;
17979 if (E->isAssignmentOp()) {
17980 LValue LV;
17981 EvaluateLValue(E: E->getLHS(), Result&: LV, Info);
17982 LHSOK = false;
17983 } else if (LHSTy->isRealFloatingType()) {
17984 LHSOK = EvaluateFloat(E: E->getLHS(), Result&: LHS.FloatReal, Info);
17985 if (LHSOK) {
17986 LHS.makeComplexFloat();
17987 LHS.FloatImag = APFloat(LHS.FloatReal.getSemantics());
17988 }
17989 } else {
17990 LHSOK = EvaluateComplex(E: E->getLHS(), Res&: LHS, Info);
17991 }
17992 if (!LHSOK && !Info.noteFailure())
17993 return false;
17994
17995 if (E->getRHS()->getType()->isRealFloatingType()) {
17996 if (!EvaluateFloat(E: E->getRHS(), Result&: RHS.FloatReal, Info) || !LHSOK)
17997 return false;
17998 RHS.makeComplexFloat();
17999 RHS.FloatImag = APFloat(RHS.FloatReal.getSemantics());
18000 } else if (!EvaluateComplex(E: E->getRHS(), Res&: RHS, Info) || !LHSOK)
18001 return false;
18002
18003 if (LHS.isComplexFloat()) {
18004 APFloat::cmpResult CR_r =
18005 LHS.getComplexFloatReal().compare(RHS: RHS.getComplexFloatReal());
18006 APFloat::cmpResult CR_i =
18007 LHS.getComplexFloatImag().compare(RHS: RHS.getComplexFloatImag());
18008 bool IsEqual = CR_r == APFloat::cmpEqual && CR_i == APFloat::cmpEqual;
18009 return Success(IsEqual ? CmpResult::Equal : CmpResult::Unequal, E);
18010 } else {
18011 assert(IsEquality && "invalid complex comparison");
18012 bool IsEqual = LHS.getComplexIntReal() == RHS.getComplexIntReal() &&
18013 LHS.getComplexIntImag() == RHS.getComplexIntImag();
18014 return Success(IsEqual ? CmpResult::Equal : CmpResult::Unequal, E);
18015 }
18016 }
18017
18018 if (LHSTy->isRealFloatingType() &&
18019 RHSTy->isRealFloatingType()) {
18020 APFloat RHS(0.0), LHS(0.0);
18021
18022 bool LHSOK = EvaluateFloat(E: E->getRHS(), Result&: RHS, Info);
18023 if (!LHSOK && !Info.noteFailure())
18024 return false;
18025
18026 if (!EvaluateFloat(E: E->getLHS(), Result&: LHS, Info) || !LHSOK)
18027 return false;
18028
18029 assert(E->isComparisonOp() && "Invalid binary operator!");
18030 llvm::APFloatBase::cmpResult APFloatCmpResult = LHS.compare(RHS);
18031 if (!Info.InConstantContext &&
18032 APFloatCmpResult == APFloat::cmpUnordered &&
18033 E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts()).isFPConstrained()) {
18034 // Note: Compares may raise invalid in some cases involving NaN or sNaN.
18035 Info.FFDiag(E, DiagId: diag::note_constexpr_float_arithmetic_strict);
18036 return false;
18037 }
18038 auto GetCmpRes = [&]() {
18039 switch (APFloatCmpResult) {
18040 case APFloat::cmpEqual:
18041 return CmpResult::Equal;
18042 case APFloat::cmpLessThan:
18043 return CmpResult::Less;
18044 case APFloat::cmpGreaterThan:
18045 return CmpResult::Greater;
18046 case APFloat::cmpUnordered:
18047 return CmpResult::Unordered;
18048 }
18049 llvm_unreachable("Unrecognised APFloat::cmpResult enum");
18050 };
18051 return Success(GetCmpRes(), E);
18052 }
18053
18054 if (LHSTy->isPointerType() && RHSTy->isPointerType()) {
18055 LValue LHSValue, RHSValue;
18056
18057 bool LHSOK = EvaluatePointer(E: E->getLHS(), Result&: LHSValue, Info);
18058 if (!LHSOK && !Info.noteFailure())
18059 return false;
18060
18061 if (!EvaluatePointer(E: E->getRHS(), Result&: RHSValue, Info) || !LHSOK)
18062 return false;
18063
18064 // Reject differing bases from the normal codepath; we special-case
18065 // comparisons to null.
18066 if (!HasSameBase(A: LHSValue, B: RHSValue)) {
18067 // Bail out early if we're checking potential constant expression.
18068 // Otherwise, prefer to diagnose other issues.
18069 if (Info.checkingPotentialConstantExpression() &&
18070 (LHSValue.AllowConstexprUnknown || RHSValue.AllowConstexprUnknown))
18071 return false;
18072 auto DiagComparison = [&] (unsigned DiagID, bool Reversed = false) {
18073 std::string LHS = LHSValue.toString(Ctx&: Info.Ctx, T: E->getLHS()->getType());
18074 std::string RHS = RHSValue.toString(Ctx&: Info.Ctx, T: E->getRHS()->getType());
18075 Info.FFDiag(E, DiagId: DiagID)
18076 << (Reversed ? RHS : LHS) << (Reversed ? LHS : RHS);
18077 return false;
18078 };
18079 // Inequalities and subtractions between unrelated pointers have
18080 // unspecified or undefined behavior.
18081 if (!IsEquality)
18082 return DiagComparison(
18083 diag::note_constexpr_pointer_comparison_unspecified);
18084 // A constant address may compare equal to the address of a symbol.
18085 // The one exception is that address of an object cannot compare equal
18086 // to a null pointer constant.
18087 // TODO: Should we restrict this to actual null pointers, and exclude the
18088 // case of zero cast to pointer type?
18089 if ((!LHSValue.Base && !LHSValue.Offset.isZero()) ||
18090 (!RHSValue.Base && !RHSValue.Offset.isZero()))
18091 return DiagComparison(diag::note_constexpr_pointer_constant_comparison,
18092 !RHSValue.Base);
18093 // C++2c [intro.object]/10:
18094 // Two objects [...] may have the same address if [...] they are both
18095 // potentially non-unique objects.
18096 // C++2c [intro.object]/9:
18097 // An object is potentially non-unique if it is a string literal object,
18098 // the backing array of an initializer list, or a subobject thereof.
18099 //
18100 // This makes the comparison result unspecified, so it's not a constant
18101 // expression.
18102 //
18103 // TODO: Do we need to handle the initializer list case here?
18104 if (ArePotentiallyOverlappingStringLiterals(Info, LHS: LHSValue, RHS: RHSValue))
18105 return DiagComparison(diag::note_constexpr_literal_comparison);
18106 if (IsOpaqueConstantCall(LVal: LHSValue) || IsOpaqueConstantCall(LVal: RHSValue))
18107 return DiagComparison(diag::note_constexpr_opaque_call_comparison,
18108 !IsOpaqueConstantCall(LVal: LHSValue));
18109 // We can't tell whether weak symbols will end up pointing to the same
18110 // object.
18111 if (IsWeakLValue(Value: LHSValue) || IsWeakLValue(Value: RHSValue))
18112 return DiagComparison(diag::note_constexpr_pointer_weak_comparison,
18113 !IsWeakLValue(Value: LHSValue));
18114 // We can't compare the address of the start of one object with the
18115 // past-the-end address of another object, per C++ DR1652.
18116 if (LHSValue.Base && LHSValue.Offset.isZero() &&
18117 isOnePastTheEndOfCompleteObject(Ctx: Info.Ctx, LV: RHSValue))
18118 return DiagComparison(diag::note_constexpr_pointer_comparison_past_end,
18119 true);
18120 if (RHSValue.Base && RHSValue.Offset.isZero() &&
18121 isOnePastTheEndOfCompleteObject(Ctx: Info.Ctx, LV: LHSValue))
18122 return DiagComparison(diag::note_constexpr_pointer_comparison_past_end,
18123 false);
18124 // We can't tell whether an object is at the same address as another
18125 // zero sized object.
18126 if ((RHSValue.Base && isZeroSized(Value: LHSValue)) ||
18127 (LHSValue.Base && isZeroSized(Value: RHSValue)))
18128 return DiagComparison(
18129 diag::note_constexpr_pointer_comparison_zero_sized);
18130 if (LHSValue.AllowConstexprUnknown || RHSValue.AllowConstexprUnknown)
18131 return DiagComparison(
18132 diag::note_constexpr_pointer_comparison_unspecified);
18133 // FIXME: Verify both variables are live.
18134 return Success(CmpResult::Unequal, E);
18135 }
18136
18137 const CharUnits &LHSOffset = LHSValue.getLValueOffset();
18138 const CharUnits &RHSOffset = RHSValue.getLValueOffset();
18139
18140 SubobjectDesignator &LHSDesignator = LHSValue.getLValueDesignator();
18141 SubobjectDesignator &RHSDesignator = RHSValue.getLValueDesignator();
18142
18143 // C++11 [expr.rel]p2:
18144 // - If two pointers point to non-static data members of the same object,
18145 // or to subobjects or array elements fo such members, recursively, the
18146 // pointer to the later declared member compares greater provided the
18147 // two members have the same access control and provided their class is
18148 // not a union.
18149 // [...]
18150 // - Otherwise pointer comparisons are unspecified.
18151 if (!LHSDesignator.Invalid && !RHSDesignator.Invalid && IsRelational) {
18152 bool WasArrayIndex;
18153 unsigned Mismatch = FindDesignatorMismatch(
18154 ObjType: LHSValue.Base.isNull() ? QualType()
18155 : getType(B: LHSValue.Base).getNonReferenceType(),
18156 A: LHSDesignator, B: RHSDesignator, WasArrayIndex);
18157 // At the point where the designators diverge, the comparison has a
18158 // specified value if:
18159 // - we are comparing array indices
18160 // - we are comparing fields of a union, or fields with the same access
18161 // Otherwise, the result is unspecified and thus the comparison is not a
18162 // constant expression.
18163 if (!WasArrayIndex && Mismatch < LHSDesignator.Entries.size() &&
18164 Mismatch < RHSDesignator.Entries.size()) {
18165 const FieldDecl *LF = getAsField(E: LHSDesignator.Entries[Mismatch]);
18166 const FieldDecl *RF = getAsField(E: RHSDesignator.Entries[Mismatch]);
18167 if (!LF && !RF)
18168 Info.CCEDiag(E, DiagId: diag::note_constexpr_pointer_comparison_base_classes);
18169 else if (!LF)
18170 Info.CCEDiag(E, DiagId: diag::note_constexpr_pointer_comparison_base_field)
18171 << getAsBaseClass(E: LHSDesignator.Entries[Mismatch])
18172 << RF->getParent() << RF;
18173 else if (!RF)
18174 Info.CCEDiag(E, DiagId: diag::note_constexpr_pointer_comparison_base_field)
18175 << getAsBaseClass(E: RHSDesignator.Entries[Mismatch])
18176 << LF->getParent() << LF;
18177 else if (!LF->getParent()->isUnion() &&
18178 LF->getAccess() != RF->getAccess())
18179 Info.CCEDiag(E,
18180 DiagId: diag::note_constexpr_pointer_comparison_differing_access)
18181 << LF << LF->getAccess() << RF << RF->getAccess()
18182 << LF->getParent();
18183 }
18184 }
18185
18186 // The comparison here must be unsigned, and performed with the same
18187 // width as the pointer.
18188 unsigned PtrSize = Info.Ctx.getTypeSize(T: LHSTy);
18189 uint64_t CompareLHS = LHSOffset.getQuantity();
18190 uint64_t CompareRHS = RHSOffset.getQuantity();
18191 assert(PtrSize <= 64 && "Unexpected pointer width");
18192 uint64_t Mask = ~0ULL >> (64 - PtrSize);
18193 CompareLHS &= Mask;
18194 CompareRHS &= Mask;
18195
18196 // If there is a base and this is a relational operator, we can only
18197 // compare pointers within the object in question; otherwise, the result
18198 // depends on where the object is located in memory.
18199 if (!LHSValue.Base.isNull() && IsRelational) {
18200 QualType BaseTy = getType(B: LHSValue.Base).getNonReferenceType();
18201 if (BaseTy->isIncompleteType())
18202 return Error(E);
18203 CharUnits Size = Info.Ctx.getTypeSizeInChars(T: BaseTy);
18204 uint64_t OffsetLimit = Size.getQuantity();
18205 if (CompareLHS > OffsetLimit || CompareRHS > OffsetLimit)
18206 return Error(E);
18207 }
18208
18209 if (CompareLHS < CompareRHS)
18210 return Success(CmpResult::Less, E);
18211 if (CompareLHS > CompareRHS)
18212 return Success(CmpResult::Greater, E);
18213 return Success(CmpResult::Equal, E);
18214 }
18215
18216 if (LHSTy->isMemberPointerType()) {
18217 assert(IsEquality && "unexpected member pointer operation");
18218 assert(RHSTy->isMemberPointerType() && "invalid comparison");
18219
18220 MemberPtr LHSValue, RHSValue;
18221
18222 bool LHSOK = EvaluateMemberPointer(E: E->getLHS(), Result&: LHSValue, Info);
18223 if (!LHSOK && !Info.noteFailure())
18224 return false;
18225
18226 if (!EvaluateMemberPointer(E: E->getRHS(), Result&: RHSValue, Info) || !LHSOK)
18227 return false;
18228
18229 // If either operand is a pointer to a weak function, the comparison is not
18230 // constant.
18231 if (LHSValue.getDecl() && LHSValue.getDecl()->isWeak()) {
18232 Info.FFDiag(E, DiagId: diag::note_constexpr_mem_pointer_weak_comparison)
18233 << LHSValue.getDecl();
18234 return false;
18235 }
18236 if (RHSValue.getDecl() && RHSValue.getDecl()->isWeak()) {
18237 Info.FFDiag(E, DiagId: diag::note_constexpr_mem_pointer_weak_comparison)
18238 << RHSValue.getDecl();
18239 return false;
18240 }
18241
18242 // C++11 [expr.eq]p2:
18243 // If both operands are null, they compare equal. Otherwise if only one is
18244 // null, they compare unequal.
18245 if (!LHSValue.getDecl() || !RHSValue.getDecl()) {
18246 bool Equal = !LHSValue.getDecl() && !RHSValue.getDecl();
18247 return Success(Equal ? CmpResult::Equal : CmpResult::Unequal, E);
18248 }
18249
18250 // Otherwise if either is a pointer to a virtual member function, the
18251 // result is unspecified.
18252 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: LHSValue.getDecl()))
18253 if (MD->isVirtual())
18254 Info.CCEDiag(E, DiagId: diag::note_constexpr_compare_virtual_mem_ptr) << MD;
18255 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: RHSValue.getDecl()))
18256 if (MD->isVirtual())
18257 Info.CCEDiag(E, DiagId: diag::note_constexpr_compare_virtual_mem_ptr) << MD;
18258
18259 // Otherwise they compare equal if and only if they would refer to the
18260 // same member of the same most derived object or the same subobject if
18261 // they were dereferenced with a hypothetical object of the associated
18262 // class type.
18263 bool Equal = LHSValue == RHSValue;
18264 return Success(Equal ? CmpResult::Equal : CmpResult::Unequal, E);
18265 }
18266
18267 if (LHSTy->isNullPtrType()) {
18268 assert(E->isComparisonOp() && "unexpected nullptr operation");
18269 assert(RHSTy->isNullPtrType() && "missing pointer conversion");
18270 // C++11 [expr.rel]p4, [expr.eq]p3: If two operands of type std::nullptr_t
18271 // are compared, the result is true of the operator is <=, >= or ==, and
18272 // false otherwise.
18273 LValue Res;
18274 if (!EvaluatePointer(E: E->getLHS(), Result&: Res, Info) ||
18275 !EvaluatePointer(E: E->getRHS(), Result&: Res, Info))
18276 return false;
18277 return Success(CmpResult::Equal, E);
18278 }
18279
18280 return DoAfter();
18281}
18282
18283bool RecordExprEvaluator::VisitBinCmp(const BinaryOperator *E) {
18284 if (!CheckLiteralType(Info, E))
18285 return false;
18286
18287 auto OnSuccess = [&](CmpResult CR, const BinaryOperator *E) {
18288 ComparisonCategoryResult CCR;
18289 switch (CR) {
18290 case CmpResult::Unequal:
18291 llvm_unreachable("should never produce Unequal for three-way comparison");
18292 case CmpResult::Less:
18293 CCR = ComparisonCategoryResult::Less;
18294 break;
18295 case CmpResult::Equal:
18296 CCR = ComparisonCategoryResult::Equal;
18297 break;
18298 case CmpResult::Greater:
18299 CCR = ComparisonCategoryResult::Greater;
18300 break;
18301 case CmpResult::Unordered:
18302 CCR = ComparisonCategoryResult::Unordered;
18303 break;
18304 }
18305 // Evaluation succeeded. Lookup the information for the comparison category
18306 // type and fetch the VarDecl for the result.
18307 const ComparisonCategoryInfo &CmpInfo =
18308 Info.Ctx.CompCategories.getInfoForType(Ty: E->getType());
18309 const VarDecl *VD = CmpInfo.getValueInfo(ValueKind: CmpInfo.makeWeakResult(Res: CCR))->VD;
18310 // Check and evaluate the result as a constant expression.
18311 LValue LV;
18312 LV.set(B: VD);
18313 if (!handleLValueToRValueConversion(Info, Conv: E, Type: E->getType(), LVal: LV, RVal&: Result))
18314 return false;
18315 return CheckConstantExpression(Info, DiagLoc: E->getExprLoc(), Type: E->getType(), Value: Result,
18316 Kind: ConstantExprKind::Normal);
18317 };
18318 return EvaluateComparisonBinaryOperator(Info, E, Success&: OnSuccess, DoAfter: [&]() {
18319 return ExprEvaluatorBaseTy::VisitBinCmp(S: E);
18320 });
18321}
18322
18323bool RecordExprEvaluator::VisitCXXParenListInitExpr(
18324 const CXXParenListInitExpr *E) {
18325 return VisitCXXParenListOrInitListExpr(ExprToVisit: E, Args: E->getInitExprs());
18326}
18327
18328bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
18329 // We don't support assignment in C. C++ assignments don't get here because
18330 // assignment is an lvalue in C++.
18331 if (E->isAssignmentOp()) {
18332 Error(E);
18333 if (!Info.noteFailure())
18334 return false;
18335 }
18336
18337 if (DataRecursiveIntBinOpEvaluator::shouldEnqueue(E))
18338 return DataRecursiveIntBinOpEvaluator(*this, Result).Traverse(E);
18339
18340 assert((!E->getLHS()->getType()->isIntegralOrEnumerationType() ||
18341 !E->getRHS()->getType()->isIntegralOrEnumerationType()) &&
18342 "DataRecursiveIntBinOpEvaluator should have handled integral types");
18343
18344 if (E->isComparisonOp()) {
18345 // Evaluate builtin binary comparisons by evaluating them as three-way
18346 // comparisons and then translating the result.
18347 auto OnSuccess = [&](CmpResult CR, const BinaryOperator *E) {
18348 assert((CR != CmpResult::Unequal || E->isEqualityOp()) &&
18349 "should only produce Unequal for equality comparisons");
18350 bool IsEqual = CR == CmpResult::Equal,
18351 IsLess = CR == CmpResult::Less,
18352 IsGreater = CR == CmpResult::Greater;
18353 auto Op = E->getOpcode();
18354 switch (Op) {
18355 default:
18356 llvm_unreachable("unsupported binary operator");
18357 case BO_EQ:
18358 case BO_NE:
18359 return Success(Value: IsEqual == (Op == BO_EQ), E);
18360 case BO_LT:
18361 return Success(Value: IsLess, E);
18362 case BO_GT:
18363 return Success(Value: IsGreater, E);
18364 case BO_LE:
18365 return Success(Value: IsEqual || IsLess, E);
18366 case BO_GE:
18367 return Success(Value: IsEqual || IsGreater, E);
18368 }
18369 };
18370 return EvaluateComparisonBinaryOperator(Info, E, Success&: OnSuccess, DoAfter: [&]() {
18371 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
18372 });
18373 }
18374
18375 QualType LHSTy = E->getLHS()->getType();
18376 QualType RHSTy = E->getRHS()->getType();
18377
18378 if (LHSTy->isPointerType() && RHSTy->isPointerType() &&
18379 E->getOpcode() == BO_Sub) {
18380 LValue LHSValue, RHSValue;
18381
18382 bool LHSOK = EvaluatePointer(E: E->getLHS(), Result&: LHSValue, Info);
18383 if (!LHSOK && !Info.noteFailure())
18384 return false;
18385
18386 if (!EvaluatePointer(E: E->getRHS(), Result&: RHSValue, Info) || !LHSOK)
18387 return false;
18388
18389 // Reject differing bases from the normal codepath; we special-case
18390 // comparisons to null.
18391 if (!HasSameBase(A: LHSValue, B: RHSValue)) {
18392 if (Info.checkingPotentialConstantExpression() &&
18393 (LHSValue.AllowConstexprUnknown || RHSValue.AllowConstexprUnknown))
18394 return false;
18395
18396 const Expr *LHSExpr = LHSValue.Base.dyn_cast<const Expr *>();
18397 const Expr *RHSExpr = RHSValue.Base.dyn_cast<const Expr *>();
18398
18399 auto DiagArith = [&](unsigned DiagID) {
18400 std::string LHS = LHSValue.toString(Ctx&: Info.Ctx, T: E->getLHS()->getType());
18401 std::string RHS = RHSValue.toString(Ctx&: Info.Ctx, T: E->getRHS()->getType());
18402 Info.FFDiag(E, DiagId: DiagID) << LHS << RHS;
18403 if (LHSExpr && LHSExpr == RHSExpr)
18404 Info.Note(Loc: LHSExpr->getExprLoc(),
18405 DiagId: diag::note_constexpr_repeated_literal_eval)
18406 << LHSExpr->getSourceRange();
18407 return false;
18408 };
18409
18410 if (!LHSExpr || !RHSExpr)
18411 return DiagArith(diag::note_constexpr_pointer_arith_unspecified);
18412
18413 if (ArePotentiallyOverlappingStringLiterals(Info, LHS: LHSValue, RHS: RHSValue))
18414 return DiagArith(diag::note_constexpr_literal_arith);
18415
18416 const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(Val: LHSExpr);
18417 const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(Val: RHSExpr);
18418 if (!LHSAddrExpr || !RHSAddrExpr)
18419 return Error(E);
18420 // Make sure both labels come from the same function.
18421 if (LHSAddrExpr->getLabel()->getDeclContext() !=
18422 RHSAddrExpr->getLabel()->getDeclContext())
18423 return Error(E);
18424 return Success(V: APValue(LHSAddrExpr, RHSAddrExpr), E);
18425 }
18426 const CharUnits &LHSOffset = LHSValue.getLValueOffset();
18427 const CharUnits &RHSOffset = RHSValue.getLValueOffset();
18428
18429 SubobjectDesignator &LHSDesignator = LHSValue.getLValueDesignator();
18430 SubobjectDesignator &RHSDesignator = RHSValue.getLValueDesignator();
18431
18432 // C++11 [expr.add]p6:
18433 // Unless both pointers point to elements of the same array object, or
18434 // one past the last element of the array object, the behavior is
18435 // undefined.
18436 if (!LHSDesignator.Invalid && !RHSDesignator.Invalid &&
18437 !AreElementsOfSameArray(ObjType: getType(B: LHSValue.Base), A: LHSDesignator,
18438 B: RHSDesignator))
18439 Info.CCEDiag(E, DiagId: diag::note_constexpr_pointer_subtraction_not_same_array);
18440
18441 QualType Type = E->getLHS()->getType();
18442 QualType ElementType = Type->castAs<PointerType>()->getPointeeType();
18443
18444 CharUnits ElementSize;
18445 if (!HandleSizeof(Info, Loc: E->getExprLoc(), Type: ElementType, Size&: ElementSize))
18446 return false;
18447
18448 // As an extension, a type may have zero size (empty struct or union in
18449 // C, array of zero length). Pointer subtraction in such cases has
18450 // undefined behavior, so is not constant.
18451 if (ElementSize.isZero()) {
18452 Info.FFDiag(E, DiagId: diag::note_constexpr_pointer_subtraction_zero_size)
18453 << ElementType;
18454 return false;
18455 }
18456
18457 // FIXME: LLVM and GCC both compute LHSOffset - RHSOffset at runtime,
18458 // and produce incorrect results when it overflows. Such behavior
18459 // appears to be non-conforming, but is common, so perhaps we should
18460 // assume the standard intended for such cases to be undefined behavior
18461 // and check for them.
18462
18463 // Compute (LHSOffset - RHSOffset) / Size carefully, checking for
18464 // overflow in the final conversion to ptrdiff_t.
18465 APSInt LHS(llvm::APInt(65, (int64_t)LHSOffset.getQuantity(), true), false);
18466 APSInt RHS(llvm::APInt(65, (int64_t)RHSOffset.getQuantity(), true), false);
18467 APSInt ElemSize(llvm::APInt(65, (int64_t)ElementSize.getQuantity(), true),
18468 false);
18469 APSInt TrueResult = (LHS - RHS) / ElemSize;
18470 APSInt Result = TrueResult.trunc(width: Info.Ctx.getIntWidth(T: E->getType()));
18471
18472 if (Result.extend(width: 65) != TrueResult &&
18473 !HandleOverflow(Info, E, SrcValue: TrueResult, DestType: E->getType()))
18474 return false;
18475 return Success(SI: Result, E);
18476 }
18477
18478 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
18479}
18480
18481/// VisitUnaryExprOrTypeTraitExpr - Evaluate a sizeof, alignof or vec_step with
18482/// a result as the expression's type.
18483bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
18484 const UnaryExprOrTypeTraitExpr *E) {
18485 switch(E->getKind()) {
18486 case UETT_PreferredAlignOf:
18487 case UETT_AlignOf: {
18488 if (E->isArgumentType())
18489 return Success(
18490 Size: GetAlignOfType(Ctx: Info.Ctx, T: E->getArgumentType(), ExprKind: E->getKind()), E);
18491 else
18492 return Success(
18493 Size: GetAlignOfExpr(Ctx: Info.Ctx, E: E->getArgumentExpr(), ExprKind: E->getKind()), E);
18494 }
18495
18496 case UETT_PtrAuthTypeDiscriminator: {
18497 if (E->getArgumentType()->isDependentType())
18498 return false;
18499 return Success(
18500 Value: Info.Ctx.getPointerAuthTypeDiscriminator(T: E->getArgumentType()), E);
18501 }
18502 case UETT_VecStep: {
18503 QualType Ty = E->getTypeOfArgument();
18504
18505 if (Ty->isVectorType()) {
18506 unsigned n = Ty->castAs<VectorType>()->getNumElements();
18507
18508 // The vec_step built-in functions that take a 3-component
18509 // vector return 4. (OpenCL 1.1 spec 6.11.12)
18510 if (n == 3)
18511 n = 4;
18512
18513 return Success(Value: n, E);
18514 } else
18515 return Success(Value: 1, E);
18516 }
18517
18518 case UETT_DataSizeOf:
18519 case UETT_SizeOf: {
18520 QualType SrcTy = E->getTypeOfArgument();
18521 // C++ [expr.sizeof]p2: "When applied to a reference or a reference type,
18522 // the result is the size of the referenced type."
18523 if (const ReferenceType *Ref = SrcTy->getAs<ReferenceType>())
18524 SrcTy = Ref->getPointeeType();
18525
18526 CharUnits Sizeof;
18527 if (!HandleSizeof(Info, Loc: E->getExprLoc(), Type: SrcTy, Size&: Sizeof,
18528 SOT: E->getKind() == UETT_DataSizeOf ? SizeOfType::DataSizeOf
18529 : SizeOfType::SizeOf)) {
18530 return false;
18531 }
18532 return Success(Size: Sizeof, E);
18533 }
18534 case UETT_OpenMPRequiredSimdAlign:
18535 assert(E->isArgumentType());
18536 return Success(
18537 Value: Info.Ctx.toCharUnitsFromBits(
18538 BitSize: Info.Ctx.getOpenMPDefaultSimdAlign(T: E->getArgumentType()))
18539 .getQuantity(),
18540 E);
18541 case UETT_VectorElements: {
18542 QualType Ty = E->getTypeOfArgument();
18543 // If the vector has a fixed size, we can determine the number of elements
18544 // at compile time.
18545 if (const auto *VT = Ty->getAs<VectorType>())
18546 return Success(Value: VT->getNumElements(), E);
18547
18548 assert(Ty->isSizelessVectorType());
18549 if (Info.InConstantContext)
18550 Info.CCEDiag(E, DiagId: diag::note_constexpr_non_const_vectorelements)
18551 << E->getSourceRange();
18552
18553 return false;
18554 }
18555 case UETT_CountOf: {
18556 QualType Ty = E->getTypeOfArgument();
18557 assert(Ty->isArrayType());
18558
18559 // We don't need to worry about array element qualifiers, so getting the
18560 // unsafe array type is fine.
18561 if (const auto *CAT =
18562 dyn_cast<ConstantArrayType>(Val: Ty->getAsArrayTypeUnsafe())) {
18563 return Success(I: CAT->getSize(), E);
18564 }
18565
18566 assert(!Ty->isConstantSizeType());
18567
18568 // If it's a variable-length array type, we need to check whether it is a
18569 // multidimensional array. If so, we need to check the size expression of
18570 // the VLA to see if it's a constant size. If so, we can return that value.
18571 const auto *VAT = Info.Ctx.getAsVariableArrayType(T: Ty);
18572 assert(VAT);
18573 if (VAT->getElementType()->isArrayType()) {
18574 // Variable array size expression could be missing (e.g. int a[*][10]) In
18575 // that case, it can't be a constant expression.
18576 if (!VAT->getSizeExpr()) {
18577 Info.FFDiag(Loc: E->getBeginLoc());
18578 return false;
18579 }
18580
18581 std::optional<APSInt> Res =
18582 VAT->getSizeExpr()->getIntegerConstantExpr(Ctx: Info.Ctx);
18583 if (Res) {
18584 // The resulting value always has type size_t, so we need to make the
18585 // returned APInt have the correct sign and bit-width.
18586 APInt Val{
18587 static_cast<unsigned>(Info.Ctx.getTypeSize(T: Info.Ctx.getSizeType())),
18588 Res->getZExtValue()};
18589 return Success(I: Val, E);
18590 }
18591 }
18592
18593 // Definitely a variable-length type, which is not an ICE.
18594 // FIXME: Better diagnostic.
18595 Info.FFDiag(Loc: E->getBeginLoc());
18596 return false;
18597 }
18598 }
18599
18600 llvm_unreachable("unknown expr/type trait");
18601}
18602
18603bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
18604 Info.Ctx.recordOffsetOfEvaluation(E: OOE);
18605 CharUnits Result;
18606 unsigned n = OOE->getNumComponents();
18607 if (n == 0)
18608 return Error(E: OOE);
18609 QualType CurrentType = OOE->getTypeSourceInfo()->getType();
18610 for (unsigned i = 0; i != n; ++i) {
18611 OffsetOfNode ON = OOE->getComponent(Idx: i);
18612 switch (ON.getKind()) {
18613 case OffsetOfNode::Array: {
18614 const Expr *Idx = OOE->getIndexExpr(Idx: ON.getArrayExprIndex());
18615 APSInt IdxResult;
18616 if (!EvaluateInteger(E: Idx, Result&: IdxResult, Info))
18617 return false;
18618 const ArrayType *AT = Info.Ctx.getAsArrayType(T: CurrentType);
18619 if (!AT)
18620 return Error(E: OOE);
18621 CurrentType = AT->getElementType();
18622 CharUnits ElementSize = Info.Ctx.getTypeSizeInChars(T: CurrentType);
18623 Result += IdxResult.getSExtValue() * ElementSize;
18624 break;
18625 }
18626
18627 case OffsetOfNode::Field: {
18628 FieldDecl *MemberDecl = ON.getField();
18629 const auto *RD = CurrentType->getAsRecordDecl();
18630 if (!RD)
18631 return Error(E: OOE);
18632 if (RD->isInvalidDecl()) return false;
18633 const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(D: RD);
18634 unsigned i = MemberDecl->getFieldIndex();
18635 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
18636 Result += Info.Ctx.toCharUnitsFromBits(BitSize: RL.getFieldOffset(FieldNo: i));
18637 CurrentType = MemberDecl->getType().getNonReferenceType();
18638 break;
18639 }
18640
18641 case OffsetOfNode::Identifier:
18642 llvm_unreachable("dependent __builtin_offsetof");
18643
18644 case OffsetOfNode::Base: {
18645 CXXBaseSpecifier *BaseSpec = ON.getBase();
18646 if (BaseSpec->isVirtual())
18647 return Error(E: OOE);
18648
18649 // Find the layout of the class whose base we are looking into.
18650 const auto *RD = CurrentType->getAsCXXRecordDecl();
18651 if (!RD)
18652 return Error(E: OOE);
18653 if (RD->isInvalidDecl()) return false;
18654 const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(D: RD);
18655
18656 // Find the base class itself.
18657 CurrentType = BaseSpec->getType();
18658 const auto *BaseRD = CurrentType->getAsCXXRecordDecl();
18659 if (!BaseRD)
18660 return Error(E: OOE);
18661
18662 // Add the offset to the base.
18663 Result += RL.getBaseClassOffset(Base: BaseRD);
18664 break;
18665 }
18666 }
18667 }
18668 return Success(Size: Result, E: OOE);
18669}
18670
18671bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
18672 switch (E->getOpcode()) {
18673 default:
18674 // Address, indirect, pre/post inc/dec, etc are not valid constant exprs.
18675 // See C99 6.6p3.
18676 return Error(E);
18677 case UO_Extension:
18678 // FIXME: Should extension allow i-c-e extension expressions in its scope?
18679 // If so, we could clear the diagnostic ID.
18680 return Visit(S: E->getSubExpr());
18681 case UO_Plus:
18682 // The result is just the value.
18683 return Visit(S: E->getSubExpr());
18684 case UO_Minus: {
18685 if (!Visit(S: E->getSubExpr()))
18686 return false;
18687 if (!Result.isInt()) return Error(E);
18688 const APSInt &Value = Result.getInt();
18689 if (Value.isSigned() && Value.isMinSignedValue() && E->canOverflow() &&
18690 !E->getType().isWrapType()) {
18691 if (Info.checkingForUndefinedBehavior())
18692 Info.Ctx.getDiagnostics().Report(Loc: E->getExprLoc(),
18693 DiagID: diag::warn_integer_constant_overflow)
18694 << toString(I: Value, Radix: 10, Signed: Value.isSigned(), /*formatAsCLiteral=*/false,
18695 /*UpperCase=*/true, /*InsertSeparators=*/true)
18696 << E->getType() << E->getSourceRange();
18697
18698 if (!HandleOverflow(Info, E, SrcValue: -Value.extend(width: Value.getBitWidth() + 1),
18699 DestType: E->getType()))
18700 return false;
18701 }
18702 return Success(SI: -Value, E);
18703 }
18704 case UO_Not: {
18705 if (!Visit(S: E->getSubExpr()))
18706 return false;
18707 if (!Result.isInt()) return Error(E);
18708 return Success(SI: ~Result.getInt(), E);
18709 }
18710 case UO_LNot: {
18711 bool bres;
18712 if (!EvaluateAsBooleanCondition(E: E->getSubExpr(), Result&: bres, Info))
18713 return false;
18714 return Success(Value: !bres, E);
18715 }
18716 }
18717}
18718
18719/// HandleCast - This is used to evaluate implicit or explicit casts where the
18720/// result type is integer.
18721bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
18722 const Expr *SubExpr = E->getSubExpr();
18723 QualType DestType = E->getType();
18724 QualType SrcType = SubExpr->getType();
18725
18726 switch (E->getCastKind()) {
18727 case CK_BaseToDerived:
18728 case CK_DerivedToBase:
18729 case CK_UncheckedDerivedToBase:
18730 case CK_Dynamic:
18731 case CK_ToUnion:
18732 case CK_ArrayToPointerDecay:
18733 case CK_FunctionToPointerDecay:
18734 case CK_NullToPointer:
18735 case CK_NullToMemberPointer:
18736 case CK_BaseToDerivedMemberPointer:
18737 case CK_DerivedToBaseMemberPointer:
18738 case CK_ReinterpretMemberPointer:
18739 case CK_ConstructorConversion:
18740 case CK_IntegralToPointer:
18741 case CK_ToVoid:
18742 case CK_VectorSplat:
18743 case CK_IntegralToFloating:
18744 case CK_FloatingCast:
18745 case CK_CPointerToObjCPointerCast:
18746 case CK_BlockPointerToObjCPointerCast:
18747 case CK_AnyPointerToBlockPointerCast:
18748 case CK_ObjCObjectLValueCast:
18749 case CK_FloatingRealToComplex:
18750 case CK_FloatingComplexToReal:
18751 case CK_FloatingComplexCast:
18752 case CK_FloatingComplexToIntegralComplex:
18753 case CK_IntegralRealToComplex:
18754 case CK_IntegralComplexCast:
18755 case CK_IntegralComplexToFloatingComplex:
18756 case CK_BuiltinFnToFnPtr:
18757 case CK_ZeroToOCLOpaqueType:
18758 case CK_NonAtomicToAtomic:
18759 case CK_AddressSpaceConversion:
18760 case CK_IntToOCLSampler:
18761 case CK_FloatingToFixedPoint:
18762 case CK_FixedPointToFloating:
18763 case CK_FixedPointCast:
18764 case CK_IntegralToFixedPoint:
18765 case CK_MatrixCast:
18766 case CK_HLSLAggregateSplatCast:
18767 llvm_unreachable("invalid cast kind for integral value");
18768
18769 case CK_BitCast:
18770 case CK_Dependent:
18771 case CK_LValueBitCast:
18772 case CK_ARCProduceObject:
18773 case CK_ARCConsumeObject:
18774 case CK_ARCReclaimReturnedObject:
18775 case CK_ARCExtendBlockObject:
18776 case CK_CopyAndAutoreleaseBlockObject:
18777 return Error(E);
18778
18779 case CK_UserDefinedConversion:
18780 case CK_LValueToRValue:
18781 case CK_AtomicToNonAtomic:
18782 case CK_NoOp:
18783 case CK_LValueToRValueBitCast:
18784 case CK_HLSLArrayRValue:
18785 return ExprEvaluatorBaseTy::VisitCastExpr(E);
18786
18787 case CK_MemberPointerToBoolean:
18788 case CK_PointerToBoolean:
18789 case CK_IntegralToBoolean:
18790 case CK_FloatingToBoolean:
18791 case CK_BooleanToSignedIntegral:
18792 case CK_FloatingComplexToBoolean:
18793 case CK_IntegralComplexToBoolean: {
18794 bool BoolResult;
18795 if (!EvaluateAsBooleanCondition(E: SubExpr, Result&: BoolResult, Info))
18796 return false;
18797 uint64_t IntResult = BoolResult;
18798 if (BoolResult && E->getCastKind() == CK_BooleanToSignedIntegral)
18799 IntResult = (uint64_t)-1;
18800 return Success(Value: IntResult, E);
18801 }
18802
18803 case CK_FixedPointToIntegral: {
18804 APFixedPoint Src(Info.Ctx.getFixedPointSemantics(Ty: SrcType));
18805 if (!EvaluateFixedPoint(E: SubExpr, Result&: Src, Info))
18806 return false;
18807 bool Overflowed;
18808 llvm::APSInt Result = Src.convertToInt(
18809 DstWidth: Info.Ctx.getIntWidth(T: DestType),
18810 DstSign: DestType->isSignedIntegerOrEnumerationType(), Overflow: &Overflowed);
18811 if (Overflowed && !HandleOverflow(Info, E, SrcValue: Result, DestType))
18812 return false;
18813 return Success(SI: Result, E);
18814 }
18815
18816 case CK_FixedPointToBoolean: {
18817 // Unsigned padding does not affect this.
18818 APValue Val;
18819 if (!Evaluate(Result&: Val, Info, E: SubExpr))
18820 return false;
18821 return Success(Value: Val.getFixedPoint().getBoolValue(), E);
18822 }
18823
18824 case CK_IntegralCast: {
18825 if (!Visit(S: SubExpr))
18826 return false;
18827
18828 if (!Result.isInt()) {
18829 // Allow casts of address-of-label differences if they are no-ops
18830 // or narrowing, if the result is at least 32 bits wide.
18831 // (The narrowing case isn't actually guaranteed to
18832 // be constant-evaluatable except in some narrow cases which are hard
18833 // to detect here. We let it through on the assumption the user knows
18834 // what they are doing.)
18835 if (Result.isAddrLabelDiff()) {
18836 unsigned DestBits = Info.Ctx.getTypeSize(T: DestType);
18837 return DestBits >= 32 && DestBits <= Info.Ctx.getTypeSize(T: SrcType);
18838 }
18839 // Only allow casts of lvalues if they are lossless.
18840 return Info.Ctx.getTypeSize(T: DestType) == Info.Ctx.getTypeSize(T: SrcType);
18841 }
18842
18843 if (Info.Ctx.getLangOpts().CPlusPlus && DestType->isEnumeralType()) {
18844 const auto *ED = DestType->getAsEnumDecl();
18845 // Check that the value is within the range of the enumeration values.
18846 //
18847 // This corressponds to [expr.static.cast]p10 which says:
18848 // A value of integral or enumeration type can be explicitly converted
18849 // to a complete enumeration type ... If the enumeration type does not
18850 // have a fixed underlying type, the value is unchanged if the original
18851 // value is within the range of the enumeration values ([dcl.enum]), and
18852 // otherwise, the behavior is undefined.
18853 //
18854 // This was resolved as part of DR2338 which has CD5 status.
18855 if (!ED->isFixed()) {
18856 llvm::APInt Min;
18857 llvm::APInt Max;
18858
18859 ED->getValueRange(Max, Min);
18860 --Max;
18861
18862 if (ED->getNumNegativeBits() &&
18863 (Max.slt(RHS: Result.getInt().getSExtValue()) ||
18864 Min.sgt(RHS: Result.getInt().getSExtValue())))
18865 Info.CCEDiag(E, DiagId: diag::note_constexpr_unscoped_enum_out_of_range)
18866 << llvm::toString(I: Result.getInt(), Radix: 10) << Min.getSExtValue()
18867 << Max.getSExtValue() << ED;
18868 else if (!ED->getNumNegativeBits() &&
18869 Max.ult(RHS: Result.getInt().getZExtValue()))
18870 Info.CCEDiag(E, DiagId: diag::note_constexpr_unscoped_enum_out_of_range)
18871 << llvm::toString(I: Result.getInt(), Radix: 10) << Min.getZExtValue()
18872 << Max.getZExtValue() << ED;
18873 }
18874 }
18875
18876 return Success(SI: HandleIntToIntCast(Info, E, DestType, SrcType,
18877 Value: Result.getInt()), E);
18878 }
18879
18880 case CK_PointerToIntegral: {
18881 CCEDiag(E, D: diag::note_constexpr_invalid_cast)
18882 << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret
18883 << Info.Ctx.getLangOpts().CPlusPlus << E->getSourceRange();
18884
18885 LValue LV;
18886 if (!EvaluatePointer(E: SubExpr, Result&: LV, Info))
18887 return false;
18888
18889 if (LV.getLValueBase()) {
18890 // Only allow based lvalue casts if they are lossless.
18891 // FIXME: Allow a larger integer size than the pointer size, and allow
18892 // narrowing back down to pointer width in subsequent integral casts.
18893 // FIXME: Check integer type's active bits, not its type size.
18894 if (Info.Ctx.getTypeSize(T: DestType) != Info.Ctx.getTypeSize(T: SrcType))
18895 return Error(E);
18896
18897 LV.Designator.setInvalid();
18898 LV.moveInto(V&: Result);
18899 return true;
18900 }
18901
18902 APSInt AsInt;
18903 APValue V;
18904 LV.moveInto(V);
18905 if (!V.toIntegralConstant(Result&: AsInt, SrcTy: SrcType, Ctx: Info.Ctx))
18906 llvm_unreachable("Can't cast this!");
18907
18908 return Success(SI: HandleIntToIntCast(Info, E, DestType, SrcType, Value: AsInt), E);
18909 }
18910
18911 case CK_IntegralComplexToReal: {
18912 ComplexValue C;
18913 if (!EvaluateComplex(E: SubExpr, Res&: C, Info))
18914 return false;
18915 return Success(SI: C.getComplexIntReal(), E);
18916 }
18917
18918 case CK_FloatingToIntegral: {
18919 APFloat F(0.0);
18920 if (!EvaluateFloat(E: SubExpr, Result&: F, Info))
18921 return false;
18922
18923 APSInt Value;
18924 if (!HandleFloatToIntCast(Info, E, SrcType, Value: F, DestType, Result&: Value))
18925 return false;
18926 return Success(SI: Value, E);
18927 }
18928 case CK_HLSLVectorTruncation: {
18929 APValue Val;
18930 if (!EvaluateVector(E: SubExpr, Result&: Val, Info))
18931 return Error(E);
18932 return Success(V: Val.getVectorElt(I: 0), E);
18933 }
18934 case CK_HLSLMatrixTruncation: {
18935 // TODO: See #168935. Add matrix truncation support to expr constant.
18936 return Error(E);
18937 }
18938 case CK_HLSLElementwiseCast: {
18939 SmallVector<APValue> SrcVals;
18940 SmallVector<QualType> SrcTypes;
18941
18942 if (!hlslElementwiseCastHelper(Info, E: SubExpr, DestTy: DestType, SrcVals, SrcTypes))
18943 return false;
18944
18945 // cast our single element
18946 const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
18947 APValue ResultVal;
18948 if (!handleScalarCast(Info, FPO, E, SourceTy: SrcTypes[0], DestTy: DestType, Original: SrcVals[0],
18949 Result&: ResultVal))
18950 return false;
18951 return Success(V: ResultVal, E);
18952 }
18953 }
18954
18955 llvm_unreachable("unknown cast resulting in integral value");
18956}
18957
18958bool IntExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
18959 if (E->getSubExpr()->getType()->isAnyComplexType()) {
18960 ComplexValue LV;
18961 if (!EvaluateComplex(E: E->getSubExpr(), Res&: LV, Info))
18962 return false;
18963 if (!LV.isComplexInt())
18964 return Error(E);
18965 return Success(SI: LV.getComplexIntReal(), E);
18966 }
18967
18968 return Visit(S: E->getSubExpr());
18969}
18970
18971bool IntExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
18972 if (E->getSubExpr()->getType()->isComplexIntegerType()) {
18973 ComplexValue LV;
18974 if (!EvaluateComplex(E: E->getSubExpr(), Res&: LV, Info))
18975 return false;
18976 if (!LV.isComplexInt())
18977 return Error(E);
18978 return Success(SI: LV.getComplexIntImag(), E);
18979 }
18980
18981 VisitIgnoredValue(E: E->getSubExpr());
18982 return Success(Value: 0, E);
18983}
18984
18985bool IntExprEvaluator::VisitSizeOfPackExpr(const SizeOfPackExpr *E) {
18986 return Success(Value: E->getPackLength(), E);
18987}
18988
18989bool IntExprEvaluator::VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
18990 return Success(Value: E->getValue(), E);
18991}
18992
18993bool IntExprEvaluator::VisitConceptSpecializationExpr(
18994 const ConceptSpecializationExpr *E) {
18995 return Success(Value: E->isSatisfied(), E);
18996}
18997
18998bool IntExprEvaluator::VisitRequiresExpr(const RequiresExpr *E) {
18999 return Success(Value: E->isSatisfied(), E);
19000}
19001
19002bool FixedPointExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
19003 switch (E->getOpcode()) {
19004 default:
19005 // Invalid unary operators
19006 return Error(E);
19007 case UO_Plus:
19008 // The result is just the value.
19009 return Visit(S: E->getSubExpr());
19010 case UO_Minus: {
19011 if (!Visit(S: E->getSubExpr())) return false;
19012 if (!Result.isFixedPoint())
19013 return Error(E);
19014 bool Overflowed;
19015 APFixedPoint Negated = Result.getFixedPoint().negate(Overflow: &Overflowed);
19016 if (Overflowed && !HandleOverflow(Info, E, SrcValue: Negated, DestType: E->getType()))
19017 return false;
19018 return Success(V: Negated, E);
19019 }
19020 case UO_LNot: {
19021 bool bres;
19022 if (!EvaluateAsBooleanCondition(E: E->getSubExpr(), Result&: bres, Info))
19023 return false;
19024 return Success(Value: !bres, E);
19025 }
19026 }
19027}
19028
19029bool FixedPointExprEvaluator::VisitCastExpr(const CastExpr *E) {
19030 const Expr *SubExpr = E->getSubExpr();
19031 QualType DestType = E->getType();
19032 assert(DestType->isFixedPointType() &&
19033 "Expected destination type to be a fixed point type");
19034 auto DestFXSema = Info.Ctx.getFixedPointSemantics(Ty: DestType);
19035
19036 switch (E->getCastKind()) {
19037 case CK_FixedPointCast: {
19038 APFixedPoint Src(Info.Ctx.getFixedPointSemantics(Ty: SubExpr->getType()));
19039 if (!EvaluateFixedPoint(E: SubExpr, Result&: Src, Info))
19040 return false;
19041 bool Overflowed;
19042 APFixedPoint Result = Src.convert(DstSema: DestFXSema, Overflow: &Overflowed);
19043 if (Overflowed) {
19044 if (Info.checkingForUndefinedBehavior())
19045 Info.Ctx.getDiagnostics().Report(Loc: E->getExprLoc(),
19046 DiagID: diag::warn_fixedpoint_constant_overflow)
19047 << Result.toString() << E->getType();
19048 if (!HandleOverflow(Info, E, SrcValue: Result, DestType: E->getType()))
19049 return false;
19050 }
19051 return Success(V: Result, E);
19052 }
19053 case CK_IntegralToFixedPoint: {
19054 APSInt Src;
19055 if (!EvaluateInteger(E: SubExpr, Result&: Src, Info))
19056 return false;
19057
19058 bool Overflowed;
19059 APFixedPoint IntResult = APFixedPoint::getFromIntValue(
19060 Value: Src, DstFXSema: Info.Ctx.getFixedPointSemantics(Ty: DestType), Overflow: &Overflowed);
19061
19062 if (Overflowed) {
19063 if (Info.checkingForUndefinedBehavior())
19064 Info.Ctx.getDiagnostics().Report(Loc: E->getExprLoc(),
19065 DiagID: diag::warn_fixedpoint_constant_overflow)
19066 << IntResult.toString() << E->getType();
19067 if (!HandleOverflow(Info, E, SrcValue: IntResult, DestType: E->getType()))
19068 return false;
19069 }
19070
19071 return Success(V: IntResult, E);
19072 }
19073 case CK_FloatingToFixedPoint: {
19074 APFloat Src(0.0);
19075 if (!EvaluateFloat(E: SubExpr, Result&: Src, Info))
19076 return false;
19077
19078 bool Overflowed;
19079 APFixedPoint Result = APFixedPoint::getFromFloatValue(
19080 Value: Src, DstFXSema: Info.Ctx.getFixedPointSemantics(Ty: DestType), Overflow: &Overflowed);
19081
19082 if (Overflowed) {
19083 if (Info.checkingForUndefinedBehavior())
19084 Info.Ctx.getDiagnostics().Report(Loc: E->getExprLoc(),
19085 DiagID: diag::warn_fixedpoint_constant_overflow)
19086 << Result.toString() << E->getType();
19087 if (!HandleOverflow(Info, E, SrcValue: Result, DestType: E->getType()))
19088 return false;
19089 }
19090
19091 return Success(V: Result, E);
19092 }
19093 case CK_NoOp:
19094 case CK_LValueToRValue:
19095 return ExprEvaluatorBaseTy::VisitCastExpr(E);
19096 default:
19097 return Error(E);
19098 }
19099}
19100
19101bool FixedPointExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
19102 if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
19103 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
19104
19105 const Expr *LHS = E->getLHS();
19106 const Expr *RHS = E->getRHS();
19107 FixedPointSemantics ResultFXSema =
19108 Info.Ctx.getFixedPointSemantics(Ty: E->getType());
19109
19110 APFixedPoint LHSFX(Info.Ctx.getFixedPointSemantics(Ty: LHS->getType()));
19111 if (!EvaluateFixedPointOrInteger(E: LHS, Result&: LHSFX, Info))
19112 return false;
19113 APFixedPoint RHSFX(Info.Ctx.getFixedPointSemantics(Ty: RHS->getType()));
19114 if (!EvaluateFixedPointOrInteger(E: RHS, Result&: RHSFX, Info))
19115 return false;
19116
19117 bool OpOverflow = false, ConversionOverflow = false;
19118 APFixedPoint Result(LHSFX.getSemantics());
19119 switch (E->getOpcode()) {
19120 case BO_Add: {
19121 Result = LHSFX.add(Other: RHSFX, Overflow: &OpOverflow)
19122 .convert(DstSema: ResultFXSema, Overflow: &ConversionOverflow);
19123 break;
19124 }
19125 case BO_Sub: {
19126 Result = LHSFX.sub(Other: RHSFX, Overflow: &OpOverflow)
19127 .convert(DstSema: ResultFXSema, Overflow: &ConversionOverflow);
19128 break;
19129 }
19130 case BO_Mul: {
19131 Result = LHSFX.mul(Other: RHSFX, Overflow: &OpOverflow)
19132 .convert(DstSema: ResultFXSema, Overflow: &ConversionOverflow);
19133 break;
19134 }
19135 case BO_Div: {
19136 if (RHSFX.getValue() == 0) {
19137 Info.FFDiag(E, DiagId: diag::note_expr_divide_by_zero);
19138 return false;
19139 }
19140 Result = LHSFX.div(Other: RHSFX, Overflow: &OpOverflow)
19141 .convert(DstSema: ResultFXSema, Overflow: &ConversionOverflow);
19142 break;
19143 }
19144 case BO_Shl:
19145 case BO_Shr: {
19146 FixedPointSemantics LHSSema = LHSFX.getSemantics();
19147 llvm::APSInt RHSVal = RHSFX.getValue();
19148
19149 unsigned ShiftBW =
19150 LHSSema.getWidth() - (unsigned)LHSSema.hasUnsignedPadding();
19151 unsigned Amt = RHSVal.getLimitedValue(Limit: ShiftBW - 1);
19152 // Embedded-C 4.1.6.2.2:
19153 // The right operand must be nonnegative and less than the total number
19154 // of (nonpadding) bits of the fixed-point operand ...
19155 if (RHSVal.isNegative())
19156 Info.CCEDiag(E, DiagId: diag::note_constexpr_negative_shift) << RHSVal;
19157 else if (Amt != RHSVal)
19158 Info.CCEDiag(E, DiagId: diag::note_constexpr_large_shift)
19159 << RHSVal << E->getType() << ShiftBW;
19160
19161 if (E->getOpcode() == BO_Shl)
19162 Result = LHSFX.shl(Amt, Overflow: &OpOverflow);
19163 else
19164 Result = LHSFX.shr(Amt, Overflow: &OpOverflow);
19165 break;
19166 }
19167 default:
19168 return false;
19169 }
19170 if (OpOverflow || ConversionOverflow) {
19171 if (Info.checkingForUndefinedBehavior())
19172 Info.Ctx.getDiagnostics().Report(Loc: E->getExprLoc(),
19173 DiagID: diag::warn_fixedpoint_constant_overflow)
19174 << Result.toString() << E->getType();
19175 if (!HandleOverflow(Info, E, SrcValue: Result, DestType: E->getType()))
19176 return false;
19177 }
19178 return Success(V: Result, E);
19179}
19180
19181//===----------------------------------------------------------------------===//
19182// Float Evaluation
19183//===----------------------------------------------------------------------===//
19184
19185namespace {
19186class FloatExprEvaluator
19187 : public ExprEvaluatorBase<FloatExprEvaluator> {
19188 APFloat &Result;
19189public:
19190 FloatExprEvaluator(EvalInfo &info, APFloat &result)
19191 : ExprEvaluatorBaseTy(info), Result(result) {}
19192
19193 bool Success(const APValue &V, const Expr *e) {
19194 Result = V.getFloat();
19195 return true;
19196 }
19197
19198 bool ZeroInitialization(const Expr *E) {
19199 Result = APFloat::getZero(Sem: Info.Ctx.getFloatTypeSemantics(T: E->getType()));
19200 return true;
19201 }
19202
19203 bool VisitCallExpr(const CallExpr *E);
19204
19205 bool VisitUnaryOperator(const UnaryOperator *E);
19206 bool VisitBinaryOperator(const BinaryOperator *E);
19207 bool VisitFloatingLiteral(const FloatingLiteral *E);
19208 bool VisitCastExpr(const CastExpr *E);
19209
19210 bool VisitUnaryReal(const UnaryOperator *E);
19211 bool VisitUnaryImag(const UnaryOperator *E);
19212
19213 // FIXME: Missing: array subscript of vector, member of vector
19214};
19215} // end anonymous namespace
19216
19217static bool EvaluateFloat(const Expr* E, APFloat& Result, EvalInfo &Info) {
19218 assert(!E->isValueDependent());
19219 assert(E->isPRValue() && E->getType()->isRealFloatingType());
19220 return FloatExprEvaluator(Info, Result).Visit(S: E);
19221}
19222
19223static bool TryEvaluateBuiltinNaN(const ASTContext &Context,
19224 QualType ResultTy,
19225 const Expr *Arg,
19226 bool SNaN,
19227 llvm::APFloat &Result) {
19228 const StringLiteral *S = dyn_cast<StringLiteral>(Val: Arg->IgnoreParenCasts());
19229 if (!S) return false;
19230
19231 const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(T: ResultTy);
19232
19233 llvm::APInt fill;
19234
19235 // Treat empty strings as if they were zero.
19236 if (S->getString().empty())
19237 fill = llvm::APInt(32, 0);
19238 else if (S->getString().getAsInteger(Radix: 0, Result&: fill))
19239 return false;
19240
19241 if (Context.getTargetInfo().isNan2008()) {
19242 if (SNaN)
19243 Result = llvm::APFloat::getSNaN(Sem, Negative: false, payload: &fill);
19244 else
19245 Result = llvm::APFloat::getQNaN(Sem, Negative: false, payload: &fill);
19246 } else {
19247 // Prior to IEEE 754-2008, architectures were allowed to choose whether
19248 // the first bit of their significand was set for qNaN or sNaN. MIPS chose
19249 // a different encoding to what became a standard in 2008, and for pre-
19250 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
19251 // sNaN. This is now known as "legacy NaN" encoding.
19252 if (SNaN)
19253 Result = llvm::APFloat::getQNaN(Sem, Negative: false, payload: &fill);
19254 else
19255 Result = llvm::APFloat::getSNaN(Sem, Negative: false, payload: &fill);
19256 }
19257
19258 return true;
19259}
19260
19261bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
19262 if (!IsConstantEvaluatedBuiltinCall(E))
19263 return ExprEvaluatorBaseTy::VisitCallExpr(E);
19264
19265 switch (E->getBuiltinCallee()) {
19266 default:
19267 return false;
19268
19269 case Builtin::BI__builtin_huge_val:
19270 case Builtin::BI__builtin_huge_valf:
19271 case Builtin::BI__builtin_huge_vall:
19272 case Builtin::BI__builtin_huge_valf16:
19273 case Builtin::BI__builtin_huge_valf128:
19274 case Builtin::BI__builtin_inf:
19275 case Builtin::BI__builtin_inff:
19276 case Builtin::BI__builtin_infl:
19277 case Builtin::BI__builtin_inff16:
19278 case Builtin::BI__builtin_inff128: {
19279 const llvm::fltSemantics &Sem =
19280 Info.Ctx.getFloatTypeSemantics(T: E->getType());
19281 Result = llvm::APFloat::getInf(Sem);
19282 return true;
19283 }
19284
19285 case Builtin::BI__builtin_nans:
19286 case Builtin::BI__builtin_nansf:
19287 case Builtin::BI__builtin_nansl:
19288 case Builtin::BI__builtin_nansf16:
19289 case Builtin::BI__builtin_nansf128:
19290 if (!TryEvaluateBuiltinNaN(Context: Info.Ctx, ResultTy: E->getType(), Arg: E->getArg(Arg: 0),
19291 SNaN: true, Result))
19292 return Error(E);
19293 return true;
19294
19295 case Builtin::BI__builtin_nan:
19296 case Builtin::BI__builtin_nanf:
19297 case Builtin::BI__builtin_nanl:
19298 case Builtin::BI__builtin_nanf16:
19299 case Builtin::BI__builtin_nanf128:
19300 // If this is __builtin_nan() turn this into a nan, otherwise we
19301 // can't constant fold it.
19302 if (!TryEvaluateBuiltinNaN(Context: Info.Ctx, ResultTy: E->getType(), Arg: E->getArg(Arg: 0),
19303 SNaN: false, Result))
19304 return Error(E);
19305 return true;
19306
19307 case Builtin::BI__builtin_elementwise_abs:
19308 case Builtin::BI__builtin_fabs:
19309 case Builtin::BI__builtin_fabsf:
19310 case Builtin::BI__builtin_fabsl:
19311 case Builtin::BI__builtin_fabsf128:
19312 // The C standard says "fabs raises no floating-point exceptions,
19313 // even if x is a signaling NaN. The returned value is independent of
19314 // the current rounding direction mode." Therefore constant folding can
19315 // proceed without regard to the floating point settings.
19316 // Reference, WG14 N2478 F.10.4.3
19317 if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info))
19318 return false;
19319
19320 if (Result.isNegative())
19321 Result.changeSign();
19322 return true;
19323
19324 case Builtin::BI__arithmetic_fence:
19325 return EvaluateFloat(E: E->getArg(Arg: 0), Result, Info);
19326
19327 // FIXME: Builtin::BI__builtin_powi
19328 // FIXME: Builtin::BI__builtin_powif
19329 // FIXME: Builtin::BI__builtin_powil
19330
19331 case Builtin::BI__builtin_copysign:
19332 case Builtin::BI__builtin_copysignf:
19333 case Builtin::BI__builtin_copysignl:
19334 case Builtin::BI__builtin_copysignf128: {
19335 APFloat RHS(0.);
19336 if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info) ||
19337 !EvaluateFloat(E: E->getArg(Arg: 1), Result&: RHS, Info))
19338 return false;
19339 Result.copySign(RHS);
19340 return true;
19341 }
19342
19343 case Builtin::BI__builtin_fmax:
19344 case Builtin::BI__builtin_fmaxf:
19345 case Builtin::BI__builtin_fmaxl:
19346 case Builtin::BI__builtin_fmaxf16:
19347 case Builtin::BI__builtin_fmaxf128: {
19348 APFloat RHS(0.);
19349 if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info) ||
19350 !EvaluateFloat(E: E->getArg(Arg: 1), Result&: RHS, Info))
19351 return false;
19352 Result = maxnum(A: Result, B: RHS);
19353 return true;
19354 }
19355
19356 case Builtin::BI__builtin_fmin:
19357 case Builtin::BI__builtin_fminf:
19358 case Builtin::BI__builtin_fminl:
19359 case Builtin::BI__builtin_fminf16:
19360 case Builtin::BI__builtin_fminf128: {
19361 APFloat RHS(0.);
19362 if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info) ||
19363 !EvaluateFloat(E: E->getArg(Arg: 1), Result&: RHS, Info))
19364 return false;
19365 Result = minnum(A: Result, B: RHS);
19366 return true;
19367 }
19368
19369 case Builtin::BI__builtin_fmaximum_num:
19370 case Builtin::BI__builtin_fmaximum_numf:
19371 case Builtin::BI__builtin_fmaximum_numl:
19372 case Builtin::BI__builtin_fmaximum_numf16:
19373 case Builtin::BI__builtin_fmaximum_numf128: {
19374 APFloat RHS(0.);
19375 if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info) ||
19376 !EvaluateFloat(E: E->getArg(Arg: 1), Result&: RHS, Info))
19377 return false;
19378 Result = maximumnum(A: Result, B: RHS);
19379 return true;
19380 }
19381
19382 case Builtin::BI__builtin_fminimum_num:
19383 case Builtin::BI__builtin_fminimum_numf:
19384 case Builtin::BI__builtin_fminimum_numl:
19385 case Builtin::BI__builtin_fminimum_numf16:
19386 case Builtin::BI__builtin_fminimum_numf128: {
19387 APFloat RHS(0.);
19388 if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info) ||
19389 !EvaluateFloat(E: E->getArg(Arg: 1), Result&: RHS, Info))
19390 return false;
19391 Result = minimumnum(A: Result, B: RHS);
19392 return true;
19393 }
19394
19395 case Builtin::BI__builtin_elementwise_fma: {
19396 if (!E->getArg(Arg: 0)->isPRValue() || !E->getArg(Arg: 1)->isPRValue() ||
19397 !E->getArg(Arg: 2)->isPRValue()) {
19398 return false;
19399 }
19400 APFloat SourceY(0.), SourceZ(0.);
19401 if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info) ||
19402 !EvaluateFloat(E: E->getArg(Arg: 1), Result&: SourceY, Info) ||
19403 !EvaluateFloat(E: E->getArg(Arg: 2), Result&: SourceZ, Info))
19404 return false;
19405 llvm::RoundingMode RM = getActiveRoundingMode(Info&: getEvalInfo(), E);
19406 (void)Result.fusedMultiplyAdd(Multiplicand: SourceY, Addend: SourceZ, RM);
19407 return true;
19408 }
19409
19410 case clang::X86::BI__builtin_ia32_vec_ext_v4sf: {
19411 APValue Vec;
19412 APSInt IdxAPS;
19413 if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Vec, Info) ||
19414 !EvaluateInteger(E: E->getArg(Arg: 1), Result&: IdxAPS, Info))
19415 return false;
19416 unsigned N = Vec.getVectorLength();
19417 unsigned Idx = static_cast<unsigned>(IdxAPS.getZExtValue() & (N - 1));
19418 return Success(V: Vec.getVectorElt(I: Idx), e: E);
19419 }
19420 }
19421}
19422
19423bool FloatExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
19424 if (E->getSubExpr()->getType()->isAnyComplexType()) {
19425 ComplexValue CV;
19426 if (!EvaluateComplex(E: E->getSubExpr(), Res&: CV, Info))
19427 return false;
19428 Result = CV.FloatReal;
19429 return true;
19430 }
19431
19432 return Visit(S: E->getSubExpr());
19433}
19434
19435bool FloatExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
19436 if (E->getSubExpr()->getType()->isAnyComplexType()) {
19437 ComplexValue CV;
19438 if (!EvaluateComplex(E: E->getSubExpr(), Res&: CV, Info))
19439 return false;
19440 Result = CV.FloatImag;
19441 return true;
19442 }
19443
19444 VisitIgnoredValue(E: E->getSubExpr());
19445 const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(T: E->getType());
19446 Result = llvm::APFloat::getZero(Sem);
19447 return true;
19448}
19449
19450bool FloatExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
19451 switch (E->getOpcode()) {
19452 default: return Error(E);
19453 case UO_Plus:
19454 return EvaluateFloat(E: E->getSubExpr(), Result, Info);
19455 case UO_Minus:
19456 // In C standard, WG14 N2478 F.3 p4
19457 // "the unary - raises no floating point exceptions,
19458 // even if the operand is signalling."
19459 if (!EvaluateFloat(E: E->getSubExpr(), Result, Info))
19460 return false;
19461 Result.changeSign();
19462 return true;
19463 }
19464}
19465
19466bool FloatExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
19467 if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
19468 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
19469
19470 APFloat RHS(0.0);
19471 bool LHSOK = EvaluateFloat(E: E->getLHS(), Result, Info);
19472 if (!LHSOK && !Info.noteFailure())
19473 return false;
19474 return EvaluateFloat(E: E->getRHS(), Result&: RHS, Info) && LHSOK &&
19475 handleFloatFloatBinOp(Info, E, LHS&: Result, Opcode: E->getOpcode(), RHS);
19476}
19477
19478bool FloatExprEvaluator::VisitFloatingLiteral(const FloatingLiteral *E) {
19479 Result = E->getValue();
19480 return true;
19481}
19482
19483bool FloatExprEvaluator::VisitCastExpr(const CastExpr *E) {
19484 const Expr* SubExpr = E->getSubExpr();
19485
19486 switch (E->getCastKind()) {
19487 default:
19488 return ExprEvaluatorBaseTy::VisitCastExpr(E);
19489
19490 case CK_HLSLAggregateSplatCast:
19491 llvm_unreachable("invalid cast kind for floating value");
19492
19493 case CK_IntegralToFloating: {
19494 APSInt IntResult;
19495 const FPOptions FPO = E->getFPFeaturesInEffect(
19496 LO: Info.Ctx.getLangOpts());
19497 return EvaluateInteger(E: SubExpr, Result&: IntResult, Info) &&
19498 HandleIntToFloatCast(Info, E, FPO, SrcType: SubExpr->getType(),
19499 Value: IntResult, DestType: E->getType(), Result);
19500 }
19501
19502 case CK_FixedPointToFloating: {
19503 APFixedPoint FixResult(Info.Ctx.getFixedPointSemantics(Ty: SubExpr->getType()));
19504 if (!EvaluateFixedPoint(E: SubExpr, Result&: FixResult, Info))
19505 return false;
19506 Result =
19507 FixResult.convertToFloat(FloatSema: Info.Ctx.getFloatTypeSemantics(T: E->getType()));
19508 return true;
19509 }
19510
19511 case CK_FloatingCast: {
19512 if (!Visit(S: SubExpr))
19513 return false;
19514 return HandleFloatToFloatCast(Info, E, SrcType: SubExpr->getType(), DestType: E->getType(),
19515 Result);
19516 }
19517
19518 case CK_FloatingComplexToReal: {
19519 ComplexValue V;
19520 if (!EvaluateComplex(E: SubExpr, Res&: V, Info))
19521 return false;
19522 Result = V.getComplexFloatReal();
19523 return true;
19524 }
19525 case CK_HLSLVectorTruncation: {
19526 APValue Val;
19527 if (!EvaluateVector(E: SubExpr, Result&: Val, Info))
19528 return Error(E);
19529 return Success(V: Val.getVectorElt(I: 0), e: E);
19530 }
19531 case CK_HLSLMatrixTruncation: {
19532 // TODO: See #168935. Add matrix truncation support to expr constant.
19533 return Error(E);
19534 }
19535 case CK_HLSLElementwiseCast: {
19536 SmallVector<APValue> SrcVals;
19537 SmallVector<QualType> SrcTypes;
19538
19539 if (!hlslElementwiseCastHelper(Info, E: SubExpr, DestTy: E->getType(), SrcVals,
19540 SrcTypes))
19541 return false;
19542 APValue Val;
19543
19544 // cast our single element
19545 const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts());
19546 APValue ResultVal;
19547 if (!handleScalarCast(Info, FPO, E, SourceTy: SrcTypes[0], DestTy: E->getType(), Original: SrcVals[0],
19548 Result&: ResultVal))
19549 return false;
19550 return Success(V: ResultVal, e: E);
19551 }
19552 }
19553}
19554
19555//===----------------------------------------------------------------------===//
19556// Complex Evaluation (for float and integer)
19557//===----------------------------------------------------------------------===//
19558
19559namespace {
19560class ComplexExprEvaluator
19561 : public ExprEvaluatorBase<ComplexExprEvaluator> {
19562 ComplexValue &Result;
19563
19564public:
19565 ComplexExprEvaluator(EvalInfo &info, ComplexValue &Result)
19566 : ExprEvaluatorBaseTy(info), Result(Result) {}
19567
19568 bool Success(const APValue &V, const Expr *e) {
19569 Result.setFrom(V);
19570 return true;
19571 }
19572
19573 bool ZeroInitialization(const Expr *E);
19574
19575 //===--------------------------------------------------------------------===//
19576 // Visitor Methods
19577 //===--------------------------------------------------------------------===//
19578
19579 bool VisitImaginaryLiteral(const ImaginaryLiteral *E);
19580 bool VisitCastExpr(const CastExpr *E);
19581 bool VisitBinaryOperator(const BinaryOperator *E);
19582 bool VisitUnaryOperator(const UnaryOperator *E);
19583 bool VisitInitListExpr(const InitListExpr *E);
19584 bool VisitCallExpr(const CallExpr *E);
19585};
19586} // end anonymous namespace
19587
19588static bool EvaluateComplex(const Expr *E, ComplexValue &Result,
19589 EvalInfo &Info) {
19590 assert(!E->isValueDependent());
19591 assert(E->isPRValue() && E->getType()->isAnyComplexType());
19592 return ComplexExprEvaluator(Info, Result).Visit(S: E);
19593}
19594
19595bool ComplexExprEvaluator::ZeroInitialization(const Expr *E) {
19596 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
19597 if (ElemTy->isRealFloatingType()) {
19598 Result.makeComplexFloat();
19599 APFloat Zero = APFloat::getZero(Sem: Info.Ctx.getFloatTypeSemantics(T: ElemTy));
19600 Result.FloatReal = Zero;
19601 Result.FloatImag = Zero;
19602 } else {
19603 Result.makeComplexInt();
19604 APSInt Zero = Info.Ctx.MakeIntValue(Value: 0, Type: ElemTy);
19605 Result.IntReal = Zero;
19606 Result.IntImag = Zero;
19607 }
19608 return true;
19609}
19610
19611bool ComplexExprEvaluator::VisitImaginaryLiteral(const ImaginaryLiteral *E) {
19612 const Expr* SubExpr = E->getSubExpr();
19613
19614 if (SubExpr->getType()->isRealFloatingType()) {
19615 Result.makeComplexFloat();
19616 APFloat &Imag = Result.FloatImag;
19617 if (!EvaluateFloat(E: SubExpr, Result&: Imag, Info))
19618 return false;
19619
19620 Result.FloatReal = APFloat(Imag.getSemantics());
19621 return true;
19622 } else {
19623 assert(SubExpr->getType()->isIntegerType() &&
19624 "Unexpected imaginary literal.");
19625
19626 Result.makeComplexInt();
19627 APSInt &Imag = Result.IntImag;
19628 if (!EvaluateInteger(E: SubExpr, Result&: Imag, Info))
19629 return false;
19630
19631 Result.IntReal = APSInt(Imag.getBitWidth(), !Imag.isSigned());
19632 return true;
19633 }
19634}
19635
19636bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
19637
19638 switch (E->getCastKind()) {
19639 case CK_BitCast:
19640 case CK_BaseToDerived:
19641 case CK_DerivedToBase:
19642 case CK_UncheckedDerivedToBase:
19643 case CK_Dynamic:
19644 case CK_ToUnion:
19645 case CK_ArrayToPointerDecay:
19646 case CK_FunctionToPointerDecay:
19647 case CK_NullToPointer:
19648 case CK_NullToMemberPointer:
19649 case CK_BaseToDerivedMemberPointer:
19650 case CK_DerivedToBaseMemberPointer:
19651 case CK_MemberPointerToBoolean:
19652 case CK_ReinterpretMemberPointer:
19653 case CK_ConstructorConversion:
19654 case CK_IntegralToPointer:
19655 case CK_PointerToIntegral:
19656 case CK_PointerToBoolean:
19657 case CK_ToVoid:
19658 case CK_VectorSplat:
19659 case CK_IntegralCast:
19660 case CK_BooleanToSignedIntegral:
19661 case CK_IntegralToBoolean:
19662 case CK_IntegralToFloating:
19663 case CK_FloatingToIntegral:
19664 case CK_FloatingToBoolean:
19665 case CK_FloatingCast:
19666 case CK_CPointerToObjCPointerCast:
19667 case CK_BlockPointerToObjCPointerCast:
19668 case CK_AnyPointerToBlockPointerCast:
19669 case CK_ObjCObjectLValueCast:
19670 case CK_FloatingComplexToReal:
19671 case CK_FloatingComplexToBoolean:
19672 case CK_IntegralComplexToReal:
19673 case CK_IntegralComplexToBoolean:
19674 case CK_ARCProduceObject:
19675 case CK_ARCConsumeObject:
19676 case CK_ARCReclaimReturnedObject:
19677 case CK_ARCExtendBlockObject:
19678 case CK_CopyAndAutoreleaseBlockObject:
19679 case CK_BuiltinFnToFnPtr:
19680 case CK_ZeroToOCLOpaqueType:
19681 case CK_NonAtomicToAtomic:
19682 case CK_AddressSpaceConversion:
19683 case CK_IntToOCLSampler:
19684 case CK_FloatingToFixedPoint:
19685 case CK_FixedPointToFloating:
19686 case CK_FixedPointCast:
19687 case CK_FixedPointToBoolean:
19688 case CK_FixedPointToIntegral:
19689 case CK_IntegralToFixedPoint:
19690 case CK_MatrixCast:
19691 case CK_HLSLVectorTruncation:
19692 case CK_HLSLMatrixTruncation:
19693 case CK_HLSLElementwiseCast:
19694 case CK_HLSLAggregateSplatCast:
19695 llvm_unreachable("invalid cast kind for complex value");
19696
19697 case CK_LValueToRValue:
19698 case CK_AtomicToNonAtomic:
19699 case CK_NoOp:
19700 case CK_LValueToRValueBitCast:
19701 case CK_HLSLArrayRValue:
19702 return ExprEvaluatorBaseTy::VisitCastExpr(E);
19703
19704 case CK_Dependent:
19705 case CK_LValueBitCast:
19706 case CK_UserDefinedConversion:
19707 return Error(E);
19708
19709 case CK_FloatingRealToComplex: {
19710 APFloat &Real = Result.FloatReal;
19711 if (!EvaluateFloat(E: E->getSubExpr(), Result&: Real, Info))
19712 return false;
19713
19714 Result.makeComplexFloat();
19715 Result.FloatImag = APFloat(Real.getSemantics());
19716 return true;
19717 }
19718
19719 case CK_FloatingComplexCast: {
19720 if (!Visit(S: E->getSubExpr()))
19721 return false;
19722
19723 QualType To = E->getType()->castAs<ComplexType>()->getElementType();
19724 QualType From
19725 = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
19726
19727 return HandleFloatToFloatCast(Info, E, SrcType: From, DestType: To, Result&: Result.FloatReal) &&
19728 HandleFloatToFloatCast(Info, E, SrcType: From, DestType: To, Result&: Result.FloatImag);
19729 }
19730
19731 case CK_FloatingComplexToIntegralComplex: {
19732 if (!Visit(S: E->getSubExpr()))
19733 return false;
19734
19735 QualType To = E->getType()->castAs<ComplexType>()->getElementType();
19736 QualType From
19737 = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
19738 Result.makeComplexInt();
19739 return HandleFloatToIntCast(Info, E, SrcType: From, Value: Result.FloatReal,
19740 DestType: To, Result&: Result.IntReal) &&
19741 HandleFloatToIntCast(Info, E, SrcType: From, Value: Result.FloatImag,
19742 DestType: To, Result&: Result.IntImag);
19743 }
19744
19745 case CK_IntegralRealToComplex: {
19746 APSInt &Real = Result.IntReal;
19747 if (!EvaluateInteger(E: E->getSubExpr(), Result&: Real, Info))
19748 return false;
19749
19750 Result.makeComplexInt();
19751 Result.IntImag = APSInt(Real.getBitWidth(), !Real.isSigned());
19752 return true;
19753 }
19754
19755 case CK_IntegralComplexCast: {
19756 if (!Visit(S: E->getSubExpr()))
19757 return false;
19758
19759 QualType To = E->getType()->castAs<ComplexType>()->getElementType();
19760 QualType From
19761 = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
19762
19763 Result.IntReal = HandleIntToIntCast(Info, E, DestType: To, SrcType: From, Value: Result.IntReal);
19764 Result.IntImag = HandleIntToIntCast(Info, E, DestType: To, SrcType: From, Value: Result.IntImag);
19765 return true;
19766 }
19767
19768 case CK_IntegralComplexToFloatingComplex: {
19769 if (!Visit(S: E->getSubExpr()))
19770 return false;
19771
19772 const FPOptions FPO = E->getFPFeaturesInEffect(
19773 LO: Info.Ctx.getLangOpts());
19774 QualType To = E->getType()->castAs<ComplexType>()->getElementType();
19775 QualType From
19776 = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
19777 Result.makeComplexFloat();
19778 return HandleIntToFloatCast(Info, E, FPO, SrcType: From, Value: Result.IntReal,
19779 DestType: To, Result&: Result.FloatReal) &&
19780 HandleIntToFloatCast(Info, E, FPO, SrcType: From, Value: Result.IntImag,
19781 DestType: To, Result&: Result.FloatImag);
19782 }
19783 }
19784
19785 llvm_unreachable("unknown cast resulting in complex value");
19786}
19787
19788uint8_t GFNIMultiplicativeInverse(uint8_t Byte) {
19789 // Lookup Table for Multiplicative Inverse in GF(2^8)
19790 const uint8_t GFInv[256] = {
19791 0x00, 0x01, 0x8d, 0xf6, 0xcb, 0x52, 0x7b, 0xd1, 0xe8, 0x4f, 0x29, 0xc0,
19792 0xb0, 0xe1, 0xe5, 0xc7, 0x74, 0xb4, 0xaa, 0x4b, 0x99, 0x2b, 0x60, 0x5f,
19793 0x58, 0x3f, 0xfd, 0xcc, 0xff, 0x40, 0xee, 0xb2, 0x3a, 0x6e, 0x5a, 0xf1,
19794 0x55, 0x4d, 0xa8, 0xc9, 0xc1, 0x0a, 0x98, 0x15, 0x30, 0x44, 0xa2, 0xc2,
19795 0x2c, 0x45, 0x92, 0x6c, 0xf3, 0x39, 0x66, 0x42, 0xf2, 0x35, 0x20, 0x6f,
19796 0x77, 0xbb, 0x59, 0x19, 0x1d, 0xfe, 0x37, 0x67, 0x2d, 0x31, 0xf5, 0x69,
19797 0xa7, 0x64, 0xab, 0x13, 0x54, 0x25, 0xe9, 0x09, 0xed, 0x5c, 0x05, 0xca,
19798 0x4c, 0x24, 0x87, 0xbf, 0x18, 0x3e, 0x22, 0xf0, 0x51, 0xec, 0x61, 0x17,
19799 0x16, 0x5e, 0xaf, 0xd3, 0x49, 0xa6, 0x36, 0x43, 0xf4, 0x47, 0x91, 0xdf,
19800 0x33, 0x93, 0x21, 0x3b, 0x79, 0xb7, 0x97, 0x85, 0x10, 0xb5, 0xba, 0x3c,
19801 0xb6, 0x70, 0xd0, 0x06, 0xa1, 0xfa, 0x81, 0x82, 0x83, 0x7e, 0x7f, 0x80,
19802 0x96, 0x73, 0xbe, 0x56, 0x9b, 0x9e, 0x95, 0xd9, 0xf7, 0x02, 0xb9, 0xa4,
19803 0xde, 0x6a, 0x32, 0x6d, 0xd8, 0x8a, 0x84, 0x72, 0x2a, 0x14, 0x9f, 0x88,
19804 0xf9, 0xdc, 0x89, 0x9a, 0xfb, 0x7c, 0x2e, 0xc3, 0x8f, 0xb8, 0x65, 0x48,
19805 0x26, 0xc8, 0x12, 0x4a, 0xce, 0xe7, 0xd2, 0x62, 0x0c, 0xe0, 0x1f, 0xef,
19806 0x11, 0x75, 0x78, 0x71, 0xa5, 0x8e, 0x76, 0x3d, 0xbd, 0xbc, 0x86, 0x57,
19807 0x0b, 0x28, 0x2f, 0xa3, 0xda, 0xd4, 0xe4, 0x0f, 0xa9, 0x27, 0x53, 0x04,
19808 0x1b, 0xfc, 0xac, 0xe6, 0x7a, 0x07, 0xae, 0x63, 0xc5, 0xdb, 0xe2, 0xea,
19809 0x94, 0x8b, 0xc4, 0xd5, 0x9d, 0xf8, 0x90, 0x6b, 0xb1, 0x0d, 0xd6, 0xeb,
19810 0xc6, 0x0e, 0xcf, 0xad, 0x08, 0x4e, 0xd7, 0xe3, 0x5d, 0x50, 0x1e, 0xb3,
19811 0x5b, 0x23, 0x38, 0x34, 0x68, 0x46, 0x03, 0x8c, 0xdd, 0x9c, 0x7d, 0xa0,
19812 0xcd, 0x1a, 0x41, 0x1c};
19813
19814 return GFInv[Byte];
19815}
19816
19817uint8_t GFNIAffine(uint8_t XByte, const APInt &AQword, const APSInt &Imm,
19818 bool Inverse) {
19819 unsigned NumBitsInByte = 8;
19820 // Computing the affine transformation
19821 uint8_t RetByte = 0;
19822 for (uint32_t BitIdx = 0; BitIdx != NumBitsInByte; ++BitIdx) {
19823 uint8_t AByte =
19824 AQword.lshr(shiftAmt: (7 - static_cast<int32_t>(BitIdx)) * NumBitsInByte)
19825 .getLoBits(numBits: 8)
19826 .getZExtValue();
19827 uint8_t Product;
19828 if (Inverse) {
19829 Product = AByte & GFNIMultiplicativeInverse(Byte: XByte);
19830 } else {
19831 Product = AByte & XByte;
19832 }
19833 uint8_t Parity = 0;
19834
19835 // Dot product in GF(2) uses XOR instead of addition
19836 for (unsigned PBitIdx = 0; PBitIdx != NumBitsInByte; ++PBitIdx) {
19837 Parity = Parity ^ ((Product >> PBitIdx) & 0x1);
19838 }
19839
19840 uint8_t Temp = Imm[BitIdx] ? 1 : 0;
19841 RetByte |= (Temp ^ Parity) << BitIdx;
19842 }
19843 return RetByte;
19844}
19845
19846uint8_t GFNIMul(uint8_t AByte, uint8_t BByte) {
19847 // Multiplying two polynomials of degree 7
19848 // Polynomial of degree 7
19849 // x^7 + x^6 + x^5 + x^4 + x^3 + x^2 + x + 1
19850 uint16_t TWord = 0;
19851 unsigned NumBitsInByte = 8;
19852 for (unsigned BitIdx = 0; BitIdx != NumBitsInByte; ++BitIdx) {
19853 if ((BByte >> BitIdx) & 0x1) {
19854 TWord = TWord ^ (AByte << BitIdx);
19855 }
19856 }
19857
19858 // When multiplying two polynomials of degree 7
19859 // results in a polynomial of degree 14
19860 // so the result has to be reduced to 7
19861 // Reduction polynomial is x^8 + x^4 + x^3 + x + 1 i.e. 0x11B
19862 for (int32_t BitIdx = 14; BitIdx > 7; --BitIdx) {
19863 if ((TWord >> BitIdx) & 0x1) {
19864 TWord = TWord ^ (0x11B << (BitIdx - 8));
19865 }
19866 }
19867 return (TWord & 0xFF);
19868}
19869
19870void HandleComplexComplexMul(APFloat A, APFloat B, APFloat C, APFloat D,
19871 APFloat &ResR, APFloat &ResI) {
19872 // This is an implementation of complex multiplication according to the
19873 // constraints laid out in C11 Annex G. The implementation uses the
19874 // following naming scheme:
19875 // (a + ib) * (c + id)
19876
19877 APFloat AC = A * C;
19878 APFloat BD = B * D;
19879 APFloat AD = A * D;
19880 APFloat BC = B * C;
19881 ResR = AC - BD;
19882 ResI = AD + BC;
19883 if (ResR.isNaN() && ResI.isNaN()) {
19884 bool Recalc = false;
19885 if (A.isInfinity() || B.isInfinity()) {
19886 A = APFloat::copySign(Value: APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0),
19887 Sign: A);
19888 B = APFloat::copySign(Value: APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0),
19889 Sign: B);
19890 if (C.isNaN())
19891 C = APFloat::copySign(Value: APFloat(C.getSemantics()), Sign: C);
19892 if (D.isNaN())
19893 D = APFloat::copySign(Value: APFloat(D.getSemantics()), Sign: D);
19894 Recalc = true;
19895 }
19896 if (C.isInfinity() || D.isInfinity()) {
19897 C = APFloat::copySign(Value: APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0),
19898 Sign: C);
19899 D = APFloat::copySign(Value: APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0),
19900 Sign: D);
19901 if (A.isNaN())
19902 A = APFloat::copySign(Value: APFloat(A.getSemantics()), Sign: A);
19903 if (B.isNaN())
19904 B = APFloat::copySign(Value: APFloat(B.getSemantics()), Sign: B);
19905 Recalc = true;
19906 }
19907 if (!Recalc && (AC.isInfinity() || BD.isInfinity() || AD.isInfinity() ||
19908 BC.isInfinity())) {
19909 if (A.isNaN())
19910 A = APFloat::copySign(Value: APFloat(A.getSemantics()), Sign: A);
19911 if (B.isNaN())
19912 B = APFloat::copySign(Value: APFloat(B.getSemantics()), Sign: B);
19913 if (C.isNaN())
19914 C = APFloat::copySign(Value: APFloat(C.getSemantics()), Sign: C);
19915 if (D.isNaN())
19916 D = APFloat::copySign(Value: APFloat(D.getSemantics()), Sign: D);
19917 Recalc = true;
19918 }
19919 if (Recalc) {
19920 ResR = APFloat::getInf(Sem: A.getSemantics()) * (A * C - B * D);
19921 ResI = APFloat::getInf(Sem: A.getSemantics()) * (A * D + B * C);
19922 }
19923 }
19924}
19925
19926void HandleComplexComplexDiv(APFloat A, APFloat B, APFloat C, APFloat D,
19927 APFloat &ResR, APFloat &ResI) {
19928 // This is an implementation of complex division according to the
19929 // constraints laid out in C11 Annex G. The implementation uses the
19930 // following naming scheme:
19931 // (a + ib) / (c + id)
19932
19933 int DenomLogB = 0;
19934 APFloat MaxCD = maxnum(A: abs(X: C), B: abs(X: D));
19935 if (MaxCD.isFinite()) {
19936 DenomLogB = ilogb(Arg: MaxCD);
19937 C = scalbn(X: C, Exp: -DenomLogB, RM: APFloat::rmNearestTiesToEven);
19938 D = scalbn(X: D, Exp: -DenomLogB, RM: APFloat::rmNearestTiesToEven);
19939 }
19940 APFloat Denom = C * C + D * D;
19941 ResR =
19942 scalbn(X: (A * C + B * D) / Denom, Exp: -DenomLogB, RM: APFloat::rmNearestTiesToEven);
19943 ResI =
19944 scalbn(X: (B * C - A * D) / Denom, Exp: -DenomLogB, RM: APFloat::rmNearestTiesToEven);
19945 if (ResR.isNaN() && ResI.isNaN()) {
19946 if (Denom.isPosZero() && (!A.isNaN() || !B.isNaN())) {
19947 ResR = APFloat::getInf(Sem: ResR.getSemantics(), Negative: C.isNegative()) * A;
19948 ResI = APFloat::getInf(Sem: ResR.getSemantics(), Negative: C.isNegative()) * B;
19949 } else if ((A.isInfinity() || B.isInfinity()) && C.isFinite() &&
19950 D.isFinite()) {
19951 A = APFloat::copySign(Value: APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0),
19952 Sign: A);
19953 B = APFloat::copySign(Value: APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0),
19954 Sign: B);
19955 ResR = APFloat::getInf(Sem: ResR.getSemantics()) * (A * C + B * D);
19956 ResI = APFloat::getInf(Sem: ResI.getSemantics()) * (B * C - A * D);
19957 } else if (MaxCD.isInfinity() && A.isFinite() && B.isFinite()) {
19958 C = APFloat::copySign(Value: APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0),
19959 Sign: C);
19960 D = APFloat::copySign(Value: APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0),
19961 Sign: D);
19962 ResR = APFloat::getZero(Sem: ResR.getSemantics()) * (A * C + B * D);
19963 ResI = APFloat::getZero(Sem: ResI.getSemantics()) * (B * C - A * D);
19964 }
19965 }
19966}
19967
19968APSInt NormalizeRotateAmount(const APSInt &Value, const APSInt &Amount) {
19969 // Normalize shift amount to [0, BitWidth) range to match runtime behavior
19970 APSInt NormAmt = Amount;
19971 unsigned BitWidth = Value.getBitWidth();
19972 unsigned AmtBitWidth = NormAmt.getBitWidth();
19973 if (BitWidth == 1) {
19974 // Rotating a 1-bit value is always a no-op
19975 NormAmt = APSInt(APInt(AmtBitWidth, 0), NormAmt.isUnsigned());
19976 } else if (BitWidth == 2) {
19977 // For 2-bit values: rotation amount is 0 or 1 based on
19978 // whether the amount is even or odd. We can't use srem here because
19979 // the divisor (2) would be misinterpreted as -2 in 2-bit signed arithmetic.
19980 NormAmt =
19981 APSInt(APInt(AmtBitWidth, NormAmt[0] ? 1 : 0), NormAmt.isUnsigned());
19982 } else {
19983 APInt Divisor;
19984 if (AmtBitWidth > BitWidth) {
19985 Divisor = llvm::APInt(AmtBitWidth, BitWidth);
19986 } else {
19987 Divisor = llvm::APInt(BitWidth, BitWidth);
19988 if (AmtBitWidth < BitWidth) {
19989 NormAmt = NormAmt.extend(width: BitWidth);
19990 }
19991 }
19992
19993 // Normalize to [0, BitWidth)
19994 if (NormAmt.isSigned()) {
19995 NormAmt = APSInt(NormAmt.srem(RHS: Divisor), /*isUnsigned=*/false);
19996 if (NormAmt.isNegative()) {
19997 APSInt SignedDivisor(Divisor, /*isUnsigned=*/false);
19998 NormAmt += SignedDivisor;
19999 }
20000 } else {
20001 NormAmt = APSInt(NormAmt.urem(RHS: Divisor), /*isUnsigned=*/true);
20002 }
20003 }
20004
20005 return NormAmt;
20006}
20007
20008bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
20009 if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
20010 return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
20011
20012 // Track whether the LHS or RHS is real at the type system level. When this is
20013 // the case we can simplify our evaluation strategy.
20014 bool LHSReal = false, RHSReal = false;
20015
20016 bool LHSOK;
20017 if (E->getLHS()->getType()->isRealFloatingType()) {
20018 LHSReal = true;
20019 APFloat &Real = Result.FloatReal;
20020 LHSOK = EvaluateFloat(E: E->getLHS(), Result&: Real, Info);
20021 if (LHSOK) {
20022 Result.makeComplexFloat();
20023 Result.FloatImag = APFloat(Real.getSemantics());
20024 }
20025 } else {
20026 LHSOK = Visit(S: E->getLHS());
20027 }
20028 if (!LHSOK && !Info.noteFailure())
20029 return false;
20030
20031 ComplexValue RHS;
20032 if (E->getRHS()->getType()->isRealFloatingType()) {
20033 RHSReal = true;
20034 APFloat &Real = RHS.FloatReal;
20035 if (!EvaluateFloat(E: E->getRHS(), Result&: Real, Info) || !LHSOK)
20036 return false;
20037 RHS.makeComplexFloat();
20038 RHS.FloatImag = APFloat(Real.getSemantics());
20039 } else if (!EvaluateComplex(E: E->getRHS(), Result&: RHS, Info) || !LHSOK)
20040 return false;
20041
20042 assert(!(LHSReal && RHSReal) &&
20043 "Cannot have both operands of a complex operation be real.");
20044 switch (E->getOpcode()) {
20045 default: return Error(E);
20046 case BO_Add:
20047 if (Result.isComplexFloat()) {
20048 Result.getComplexFloatReal().add(RHS: RHS.getComplexFloatReal(),
20049 RM: APFloat::rmNearestTiesToEven);
20050 if (LHSReal)
20051 Result.getComplexFloatImag() = RHS.getComplexFloatImag();
20052 else if (!RHSReal)
20053 Result.getComplexFloatImag().add(RHS: RHS.getComplexFloatImag(),
20054 RM: APFloat::rmNearestTiesToEven);
20055 } else {
20056 Result.getComplexIntReal() += RHS.getComplexIntReal();
20057 Result.getComplexIntImag() += RHS.getComplexIntImag();
20058 }
20059 break;
20060 case BO_Sub:
20061 if (Result.isComplexFloat()) {
20062 Result.getComplexFloatReal().subtract(RHS: RHS.getComplexFloatReal(),
20063 RM: APFloat::rmNearestTiesToEven);
20064 if (LHSReal) {
20065 Result.getComplexFloatImag() = RHS.getComplexFloatImag();
20066 Result.getComplexFloatImag().changeSign();
20067 } else if (!RHSReal) {
20068 Result.getComplexFloatImag().subtract(RHS: RHS.getComplexFloatImag(),
20069 RM: APFloat::rmNearestTiesToEven);
20070 }
20071 } else {
20072 Result.getComplexIntReal() -= RHS.getComplexIntReal();
20073 Result.getComplexIntImag() -= RHS.getComplexIntImag();
20074 }
20075 break;
20076 case BO_Mul:
20077 if (Result.isComplexFloat()) {
20078 // This is an implementation of complex multiplication according to the
20079 // constraints laid out in C11 Annex G. The implementation uses the
20080 // following naming scheme:
20081 // (a + ib) * (c + id)
20082 ComplexValue LHS = Result;
20083 APFloat &A = LHS.getComplexFloatReal();
20084 APFloat &B = LHS.getComplexFloatImag();
20085 APFloat &C = RHS.getComplexFloatReal();
20086 APFloat &D = RHS.getComplexFloatImag();
20087 APFloat &ResR = Result.getComplexFloatReal();
20088 APFloat &ResI = Result.getComplexFloatImag();
20089 if (LHSReal) {
20090 assert(!RHSReal && "Cannot have two real operands for a complex op!");
20091 ResR = A;
20092 ResI = A;
20093 // ResR = A * C;
20094 // ResI = A * D;
20095 if (!handleFloatFloatBinOp(Info, E, LHS&: ResR, Opcode: BO_Mul, RHS: C) ||
20096 !handleFloatFloatBinOp(Info, E, LHS&: ResI, Opcode: BO_Mul, RHS: D))
20097 return false;
20098 } else if (RHSReal) {
20099 // ResR = C * A;
20100 // ResI = C * B;
20101 ResR = C;
20102 ResI = C;
20103 if (!handleFloatFloatBinOp(Info, E, LHS&: ResR, Opcode: BO_Mul, RHS: A) ||
20104 !handleFloatFloatBinOp(Info, E, LHS&: ResI, Opcode: BO_Mul, RHS: B))
20105 return false;
20106 } else {
20107 HandleComplexComplexMul(A, B, C, D, ResR, ResI);
20108 }
20109 } else {
20110 ComplexValue LHS = Result;
20111 Result.getComplexIntReal() =
20112 (LHS.getComplexIntReal() * RHS.getComplexIntReal() -
20113 LHS.getComplexIntImag() * RHS.getComplexIntImag());
20114 Result.getComplexIntImag() =
20115 (LHS.getComplexIntReal() * RHS.getComplexIntImag() +
20116 LHS.getComplexIntImag() * RHS.getComplexIntReal());
20117 }
20118 break;
20119 case BO_Div:
20120 if (Result.isComplexFloat()) {
20121 // This is an implementation of complex division according to the
20122 // constraints laid out in C11 Annex G. The implementation uses the
20123 // following naming scheme:
20124 // (a + ib) / (c + id)
20125 ComplexValue LHS = Result;
20126 APFloat &A = LHS.getComplexFloatReal();
20127 APFloat &B = LHS.getComplexFloatImag();
20128 APFloat &C = RHS.getComplexFloatReal();
20129 APFloat &D = RHS.getComplexFloatImag();
20130 APFloat &ResR = Result.getComplexFloatReal();
20131 APFloat &ResI = Result.getComplexFloatImag();
20132 if (RHSReal) {
20133 ResR = A;
20134 ResI = B;
20135 // ResR = A / C;
20136 // ResI = B / C;
20137 if (!handleFloatFloatBinOp(Info, E, LHS&: ResR, Opcode: BO_Div, RHS: C) ||
20138 !handleFloatFloatBinOp(Info, E, LHS&: ResI, Opcode: BO_Div, RHS: C))
20139 return false;
20140 } else {
20141 if (LHSReal) {
20142 // No real optimizations we can do here, stub out with zero.
20143 B = APFloat::getZero(Sem: A.getSemantics());
20144 }
20145 HandleComplexComplexDiv(A, B, C, D, ResR, ResI);
20146 }
20147 } else {
20148 ComplexValue LHS = Result;
20149 APSInt Den = RHS.getComplexIntReal() * RHS.getComplexIntReal() +
20150 RHS.getComplexIntImag() * RHS.getComplexIntImag();
20151 if (Den.isZero())
20152 return Error(E, D: diag::note_expr_divide_by_zero);
20153
20154 Result.getComplexIntReal() =
20155 (LHS.getComplexIntReal() * RHS.getComplexIntReal() +
20156 LHS.getComplexIntImag() * RHS.getComplexIntImag()) / Den;
20157 Result.getComplexIntImag() =
20158 (LHS.getComplexIntImag() * RHS.getComplexIntReal() -
20159 LHS.getComplexIntReal() * RHS.getComplexIntImag()) / Den;
20160 }
20161 break;
20162 }
20163
20164 return true;
20165}
20166
20167bool ComplexExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
20168 // Get the operand value into 'Result'.
20169 if (!Visit(S: E->getSubExpr()))
20170 return false;
20171
20172 switch (E->getOpcode()) {
20173 default:
20174 return Error(E);
20175 case UO_Extension:
20176 return true;
20177 case UO_Plus:
20178 // The result is always just the subexpr.
20179 return true;
20180 case UO_Minus:
20181 if (Result.isComplexFloat()) {
20182 Result.getComplexFloatReal().changeSign();
20183 Result.getComplexFloatImag().changeSign();
20184 }
20185 else {
20186 Result.getComplexIntReal() = -Result.getComplexIntReal();
20187 Result.getComplexIntImag() = -Result.getComplexIntImag();
20188 }
20189 return true;
20190 case UO_Not:
20191 if (Result.isComplexFloat())
20192 Result.getComplexFloatImag().changeSign();
20193 else
20194 Result.getComplexIntImag() = -Result.getComplexIntImag();
20195 return true;
20196 }
20197}
20198
20199bool ComplexExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
20200 if (E->getNumInits() == 2) {
20201 if (E->getType()->isComplexType()) {
20202 Result.makeComplexFloat();
20203 if (!EvaluateFloat(E: E->getInit(Init: 0), Result&: Result.FloatReal, Info))
20204 return false;
20205 if (!EvaluateFloat(E: E->getInit(Init: 1), Result&: Result.FloatImag, Info))
20206 return false;
20207 } else {
20208 Result.makeComplexInt();
20209 if (!EvaluateInteger(E: E->getInit(Init: 0), Result&: Result.IntReal, Info))
20210 return false;
20211 if (!EvaluateInteger(E: E->getInit(Init: 1), Result&: Result.IntImag, Info))
20212 return false;
20213 }
20214 return true;
20215 }
20216 return ExprEvaluatorBaseTy::VisitInitListExpr(E);
20217}
20218
20219bool ComplexExprEvaluator::VisitCallExpr(const CallExpr *E) {
20220 if (!IsConstantEvaluatedBuiltinCall(E))
20221 return ExprEvaluatorBaseTy::VisitCallExpr(E);
20222
20223 switch (E->getBuiltinCallee()) {
20224 case Builtin::BI__builtin_complex:
20225 Result.makeComplexFloat();
20226 if (!EvaluateFloat(E: E->getArg(Arg: 0), Result&: Result.FloatReal, Info))
20227 return false;
20228 if (!EvaluateFloat(E: E->getArg(Arg: 1), Result&: Result.FloatImag, Info))
20229 return false;
20230 return true;
20231
20232 default:
20233 return false;
20234 }
20235}
20236
20237//===----------------------------------------------------------------------===//
20238// Atomic expression evaluation, essentially just handling the NonAtomicToAtomic
20239// implicit conversion.
20240//===----------------------------------------------------------------------===//
20241
20242namespace {
20243class AtomicExprEvaluator :
20244 public ExprEvaluatorBase<AtomicExprEvaluator> {
20245 const LValue *This;
20246 APValue &Result;
20247public:
20248 AtomicExprEvaluator(EvalInfo &Info, const LValue *This, APValue &Result)
20249 : ExprEvaluatorBaseTy(Info), This(This), Result(Result) {}
20250
20251 bool Success(const APValue &V, const Expr *E) {
20252 Result = V;
20253 return true;
20254 }
20255
20256 bool ZeroInitialization(const Expr *E) {
20257 ImplicitValueInitExpr VIE(
20258 E->getType()->castAs<AtomicType>()->getValueType());
20259 // For atomic-qualified class (and array) types in C++, initialize the
20260 // _Atomic-wrapped subobject directly, in-place.
20261 return This ? EvaluateInPlace(Result, Info, This: *This, E: &VIE)
20262 : Evaluate(Result, Info, E: &VIE);
20263 }
20264
20265 bool VisitCastExpr(const CastExpr *E) {
20266 switch (E->getCastKind()) {
20267 default:
20268 return ExprEvaluatorBaseTy::VisitCastExpr(E);
20269 case CK_NullToPointer:
20270 VisitIgnoredValue(E: E->getSubExpr());
20271 return ZeroInitialization(E);
20272 case CK_NonAtomicToAtomic:
20273 return This ? EvaluateInPlace(Result, Info, This: *This, E: E->getSubExpr())
20274 : Evaluate(Result, Info, E: E->getSubExpr());
20275 }
20276 }
20277};
20278} // end anonymous namespace
20279
20280static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result,
20281 EvalInfo &Info) {
20282 assert(!E->isValueDependent());
20283 assert(E->isPRValue() && E->getType()->isAtomicType());
20284 return AtomicExprEvaluator(Info, This, Result).Visit(S: E);
20285}
20286
20287//===----------------------------------------------------------------------===//
20288// Void expression evaluation, primarily for a cast to void on the LHS of a
20289// comma operator
20290//===----------------------------------------------------------------------===//
20291
20292namespace {
20293class VoidExprEvaluator
20294 : public ExprEvaluatorBase<VoidExprEvaluator> {
20295public:
20296 VoidExprEvaluator(EvalInfo &Info) : ExprEvaluatorBaseTy(Info) {}
20297
20298 bool Success(const APValue &V, const Expr *e) { return true; }
20299
20300 bool ZeroInitialization(const Expr *E) { return true; }
20301
20302 bool VisitCastExpr(const CastExpr *E) {
20303 switch (E->getCastKind()) {
20304 default:
20305 return ExprEvaluatorBaseTy::VisitCastExpr(E);
20306 case CK_ToVoid:
20307 VisitIgnoredValue(E: E->getSubExpr());
20308 return true;
20309 }
20310 }
20311
20312 bool VisitCallExpr(const CallExpr *E) {
20313 if (!IsConstantEvaluatedBuiltinCall(E))
20314 return ExprEvaluatorBaseTy::VisitCallExpr(E);
20315
20316 switch (E->getBuiltinCallee()) {
20317 case Builtin::BI__assume:
20318 case Builtin::BI__builtin_assume:
20319 // The argument is not evaluated!
20320 return true;
20321
20322 case Builtin::BI__builtin_operator_delete:
20323 return HandleOperatorDeleteCall(Info, E);
20324
20325 default:
20326 return false;
20327 }
20328 }
20329
20330 bool VisitCXXDeleteExpr(const CXXDeleteExpr *E);
20331};
20332} // end anonymous namespace
20333
20334bool VoidExprEvaluator::VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
20335 // We cannot speculatively evaluate a delete expression.
20336 if (Info.SpeculativeEvaluationDepth)
20337 return false;
20338
20339 FunctionDecl *OperatorDelete = E->getOperatorDelete();
20340 if (!OperatorDelete
20341 ->isUsableAsGlobalAllocationFunctionInConstantEvaluation()) {
20342 Info.FFDiag(E, DiagId: diag::note_constexpr_new_non_replaceable)
20343 << isa<CXXMethodDecl>(Val: OperatorDelete) << OperatorDelete;
20344 return false;
20345 }
20346
20347 const Expr *Arg = E->getArgument();
20348
20349 LValue Pointer;
20350 if (!EvaluatePointer(E: Arg, Result&: Pointer, Info))
20351 return false;
20352 if (Pointer.Designator.Invalid)
20353 return false;
20354
20355 // Deleting a null pointer has no effect.
20356 if (Pointer.isNullPointer()) {
20357 // This is the only case where we need to produce an extension warning:
20358 // the only other way we can succeed is if we find a dynamic allocation,
20359 // and we will have warned when we allocated it in that case.
20360 if (!Info.getLangOpts().CPlusPlus20)
20361 Info.CCEDiag(E, DiagId: diag::note_constexpr_new);
20362 return true;
20363 }
20364
20365 std::optional<DynAlloc *> Alloc = CheckDeleteKind(
20366 Info, E, Pointer, DeallocKind: E->isArrayForm() ? DynAlloc::ArrayNew : DynAlloc::New);
20367 if (!Alloc)
20368 return false;
20369 QualType AllocType = Pointer.Base.getDynamicAllocType();
20370
20371 // For the non-array case, the designator must be empty if the static type
20372 // does not have a virtual destructor.
20373 if (!E->isArrayForm() && Pointer.Designator.Entries.size() != 0 &&
20374 !hasVirtualDestructor(T: Arg->getType()->getPointeeType())) {
20375 Info.FFDiag(E, DiagId: diag::note_constexpr_delete_base_nonvirt_dtor)
20376 << Arg->getType()->getPointeeType() << AllocType;
20377 return false;
20378 }
20379
20380 // For a class type with a virtual destructor, the selected operator delete
20381 // is the one looked up when building the destructor.
20382 if (!E->isArrayForm() && !E->isGlobalDelete()) {
20383 const FunctionDecl *VirtualDelete = getVirtualOperatorDelete(T: AllocType);
20384 if (VirtualDelete &&
20385 !VirtualDelete
20386 ->isUsableAsGlobalAllocationFunctionInConstantEvaluation()) {
20387 Info.FFDiag(E, DiagId: diag::note_constexpr_new_non_replaceable)
20388 << isa<CXXMethodDecl>(Val: VirtualDelete) << VirtualDelete;
20389 return false;
20390 }
20391 }
20392
20393 if (!HandleDestruction(Info, Loc: E->getExprLoc(), LVBase: Pointer.getLValueBase(),
20394 Value&: (*Alloc)->Value, T: AllocType))
20395 return false;
20396
20397 if (!Info.HeapAllocs.erase(x: Pointer.Base.dyn_cast<DynamicAllocLValue>())) {
20398 // The element was already erased. This means the destructor call also
20399 // deleted the object.
20400 // FIXME: This probably results in undefined behavior before we get this
20401 // far, and should be diagnosed elsewhere first.
20402 Info.FFDiag(E, DiagId: diag::note_constexpr_double_delete);
20403 return false;
20404 }
20405
20406 return true;
20407}
20408
20409static bool EvaluateVoid(const Expr *E, EvalInfo &Info) {
20410 assert(!E->isValueDependent());
20411 assert(E->isPRValue() && E->getType()->isVoidType());
20412 return VoidExprEvaluator(Info).Visit(S: E);
20413}
20414
20415//===----------------------------------------------------------------------===//
20416// Top level Expr::EvaluateAsRValue method.
20417//===----------------------------------------------------------------------===//
20418
20419static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
20420 assert(!E->isValueDependent());
20421 // In C, function designators are not lvalues, but we evaluate them as if they
20422 // are.
20423 QualType T = E->getType();
20424 if (E->isGLValue() || T->isFunctionType()) {
20425 LValue LV;
20426 if (!EvaluateLValue(E, Result&: LV, Info))
20427 return false;
20428 LV.moveInto(V&: Result);
20429 } else if (T->isVectorType()) {
20430 if (!EvaluateVector(E, Result, Info))
20431 return false;
20432 } else if (T->isIntegralOrEnumerationType()) {
20433 if (!IntExprEvaluator(Info, Result).Visit(S: E))
20434 return false;
20435 } else if (T->hasPointerRepresentation()) {
20436 LValue LV;
20437 if (!EvaluatePointer(E, Result&: LV, Info))
20438 return false;
20439 LV.moveInto(V&: Result);
20440 } else if (T->isRealFloatingType()) {
20441 llvm::APFloat F(0.0);
20442 if (!EvaluateFloat(E, Result&: F, Info))
20443 return false;
20444 Result = APValue(F);
20445 } else if (T->isAnyComplexType()) {
20446 ComplexValue C;
20447 if (!EvaluateComplex(E, Result&: C, Info))
20448 return false;
20449 C.moveInto(v&: Result);
20450 } else if (T->isFixedPointType()) {
20451 if (!FixedPointExprEvaluator(Info, Result).Visit(S: E)) return false;
20452 } else if (T->isMemberPointerType()) {
20453 MemberPtr P;
20454 if (!EvaluateMemberPointer(E, Result&: P, Info))
20455 return false;
20456 P.moveInto(V&: Result);
20457 return true;
20458 } else if (T->isArrayType()) {
20459 LValue LV;
20460 APValue &Value =
20461 Info.CurrentCall->createTemporary(Key: E, T, Scope: ScopeKind::FullExpression, LV);
20462 if (!EvaluateArray(E, This: LV, Result&: Value, Info))
20463 return false;
20464 Result = Value;
20465 } else if (T->isRecordType()) {
20466 LValue LV;
20467 APValue &Value =
20468 Info.CurrentCall->createTemporary(Key: E, T, Scope: ScopeKind::FullExpression, LV);
20469 if (!EvaluateRecord(E, This: LV, Result&: Value, Info))
20470 return false;
20471 Result = Value;
20472 } else if (T->isVoidType()) {
20473 if (!Info.getLangOpts().CPlusPlus11)
20474 Info.CCEDiag(E, DiagId: diag::note_constexpr_nonliteral)
20475 << E->getType();
20476 if (!EvaluateVoid(E, Info))
20477 return false;
20478 } else if (T->isAtomicType()) {
20479 QualType Unqual = T.getAtomicUnqualifiedType();
20480 if (Unqual->isArrayType() || Unqual->isRecordType()) {
20481 LValue LV;
20482 APValue &Value = Info.CurrentCall->createTemporary(
20483 Key: E, T: Unqual, Scope: ScopeKind::FullExpression, LV);
20484 if (!EvaluateAtomic(E, This: &LV, Result&: Value, Info))
20485 return false;
20486 Result = Value;
20487 } else {
20488 if (!EvaluateAtomic(E, This: nullptr, Result, Info))
20489 return false;
20490 }
20491 } else if (Info.getLangOpts().CPlusPlus11) {
20492 Info.FFDiag(E, DiagId: diag::note_constexpr_nonliteral) << E->getType();
20493 return false;
20494 } else {
20495 Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr);
20496 return false;
20497 }
20498
20499 return true;
20500}
20501
20502/// EvaluateInPlace - Evaluate an expression in-place in an APValue. In some
20503/// cases, the in-place evaluation is essential, since later initializers for
20504/// an object can indirectly refer to subobjects which were initialized earlier.
20505static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, const LValue &This,
20506 const Expr *E, bool AllowNonLiteralTypes) {
20507 assert(!E->isValueDependent());
20508
20509 // Normally expressions passed to EvaluateInPlace have a type, but not when
20510 // a VarDecl initializer is evaluated before the untyped ParenListExpr is
20511 // replaced with a CXXConstructExpr. This can happen in LLDB.
20512 if (E->getType().isNull())
20513 return false;
20514
20515 if (!AllowNonLiteralTypes && !CheckLiteralType(Info, E, This: &This))
20516 return false;
20517
20518 if (E->isPRValue()) {
20519 // Evaluate arrays and record types in-place, so that later initializers can
20520 // refer to earlier-initialized members of the object.
20521 QualType T = E->getType();
20522 if (T->isArrayType())
20523 return EvaluateArray(E, This, Result, Info);
20524 else if (T->isRecordType())
20525 return EvaluateRecord(E, This, Result, Info);
20526 else if (T->isAtomicType()) {
20527 QualType Unqual = T.getAtomicUnqualifiedType();
20528 if (Unqual->isArrayType() || Unqual->isRecordType())
20529 return EvaluateAtomic(E, This: &This, Result, Info);
20530 }
20531 }
20532
20533 // For any other type, in-place evaluation is unimportant.
20534 return Evaluate(Result, Info, E);
20535}
20536
20537/// EvaluateAsRValue - Try to evaluate this expression, performing an implicit
20538/// lvalue-to-rvalue cast if it is an lvalue.
20539static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result) {
20540 assert(!E->isValueDependent());
20541
20542 if (E->getType().isNull())
20543 return false;
20544
20545 if (!CheckLiteralType(Info, E))
20546 return false;
20547
20548 if (Info.EnableNewConstInterp) {
20549 if (!Info.Ctx.getInterpContext().evaluateAsRValue(Parent&: Info, E, Result))
20550 return false;
20551 return CheckConstantExpression(Info, DiagLoc: E->getExprLoc(), Type: E->getType(), Value: Result,
20552 Kind: ConstantExprKind::Normal);
20553 }
20554
20555 if (!::Evaluate(Result, Info, E))
20556 return false;
20557
20558 // Implicit lvalue-to-rvalue cast.
20559 if (E->isGLValue()) {
20560 LValue LV;
20561 LV.setFrom(Ctx: Info.Ctx, V: Result);
20562 if (!handleLValueToRValueConversion(Info, Conv: E, Type: E->getType(), LVal: LV, RVal&: Result))
20563 return false;
20564 }
20565
20566 // Check this core constant expression is a constant expression.
20567 return CheckConstantExpression(Info, DiagLoc: E->getExprLoc(), Type: E->getType(), Value: Result,
20568 Kind: ConstantExprKind::Normal) &&
20569 CheckMemoryLeaks(Info);
20570}
20571
20572static bool FastEvaluateAsRValue(const Expr *Exp, APValue &Result,
20573 const ASTContext &Ctx, bool &IsConst) {
20574 // Fast-path evaluations of integer literals, since we sometimes see files
20575 // containing vast quantities of these.
20576 if (const auto *L = dyn_cast<IntegerLiteral>(Val: Exp)) {
20577 Result =
20578 APValue(APSInt(L->getValue(), L->getType()->isUnsignedIntegerType()));
20579 IsConst = true;
20580 return true;
20581 }
20582
20583 if (const auto *L = dyn_cast<CXXBoolLiteralExpr>(Val: Exp)) {
20584 Result = APValue(APSInt(APInt(1, L->getValue())));
20585 IsConst = true;
20586 return true;
20587 }
20588
20589 if (const auto *FL = dyn_cast<FloatingLiteral>(Val: Exp)) {
20590 Result = APValue(FL->getValue());
20591 IsConst = true;
20592 return true;
20593 }
20594
20595 if (const auto *L = dyn_cast<CharacterLiteral>(Val: Exp)) {
20596 Result = APValue(Ctx.MakeIntValue(Value: L->getValue(), Type: L->getType()));
20597 IsConst = true;
20598 return true;
20599 }
20600
20601 if (const auto *CE = dyn_cast<ConstantExpr>(Val: Exp)) {
20602 if (CE->hasAPValueResult()) {
20603 APValue APV = CE->getAPValueResult();
20604 if (!APV.isLValue()) {
20605 Result = std::move(APV);
20606 IsConst = true;
20607 return true;
20608 }
20609 }
20610
20611 // The SubExpr is usually just an IntegerLiteral.
20612 return FastEvaluateAsRValue(Exp: CE->getSubExpr(), Result, Ctx, IsConst);
20613 }
20614
20615 // This case should be rare, but we need to check it before we check on
20616 // the type below.
20617 if (Exp->getType().isNull()) {
20618 IsConst = false;
20619 return true;
20620 }
20621
20622 return false;
20623}
20624
20625static bool hasUnacceptableSideEffect(Expr::EvalStatus &Result,
20626 Expr::SideEffectsKind SEK) {
20627 return (SEK < Expr::SE_AllowSideEffects && Result.HasSideEffects) ||
20628 (SEK < Expr::SE_AllowUndefinedBehavior && Result.HasUndefinedBehavior);
20629}
20630
20631static bool EvaluateAsRValue(const Expr *E, Expr::EvalResult &Result,
20632 const ASTContext &Ctx, EvalInfo &Info) {
20633 assert(!E->isValueDependent());
20634 bool IsConst;
20635 if (FastEvaluateAsRValue(Exp: E, Result&: Result.Val, Ctx, IsConst))
20636 return IsConst;
20637
20638 return EvaluateAsRValue(Info, E, Result&: Result.Val);
20639}
20640
20641static bool EvaluateAsInt(const Expr *E, Expr::EvalResult &ExprResult,
20642 const ASTContext &Ctx,
20643 Expr::SideEffectsKind AllowSideEffects,
20644 EvalInfo &Info) {
20645 assert(!E->isValueDependent());
20646 if (!E->getType()->isIntegralOrEnumerationType())
20647 return false;
20648
20649 if (!::EvaluateAsRValue(E, Result&: ExprResult, Ctx, Info) ||
20650 !ExprResult.Val.isInt() ||
20651 hasUnacceptableSideEffect(Result&: ExprResult, SEK: AllowSideEffects))
20652 return false;
20653
20654 return true;
20655}
20656
20657static bool EvaluateAsFixedPoint(const Expr *E, Expr::EvalResult &ExprResult,
20658 const ASTContext &Ctx,
20659 Expr::SideEffectsKind AllowSideEffects,
20660 EvalInfo &Info) {
20661 assert(!E->isValueDependent());
20662 if (!E->getType()->isFixedPointType())
20663 return false;
20664
20665 if (!::EvaluateAsRValue(E, Result&: ExprResult, Ctx, Info))
20666 return false;
20667
20668 if (!ExprResult.Val.isFixedPoint() ||
20669 hasUnacceptableSideEffect(Result&: ExprResult, SEK: AllowSideEffects))
20670 return false;
20671
20672 return true;
20673}
20674
20675/// EvaluateAsRValue - Return true if this is a constant which we can fold using
20676/// any crazy technique (that has nothing to do with language standards) that
20677/// we want to. If this function returns true, it returns the folded constant
20678/// in Result. If this expression is a glvalue, an lvalue-to-rvalue conversion
20679/// will be applied to the result.
20680bool Expr::EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx,
20681 bool InConstantContext) const {
20682 assert(!isValueDependent() &&
20683 "Expression evaluator can't be called on a dependent expression.");
20684 ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsRValue");
20685 EvalInfo Info(Ctx, Result, EvaluationMode::IgnoreSideEffects);
20686 Info.InConstantContext = InConstantContext;
20687 return ::EvaluateAsRValue(E: this, Result, Ctx, Info);
20688}
20689
20690bool Expr::EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx,
20691 bool InConstantContext) const {
20692 assert(!isValueDependent() &&
20693 "Expression evaluator can't be called on a dependent expression.");
20694 ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsBooleanCondition");
20695 EvalResult Scratch;
20696 return EvaluateAsRValue(Result&: Scratch, Ctx, InConstantContext) &&
20697 HandleConversionToBool(Val: Scratch.Val, Result);
20698}
20699
20700bool Expr::EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx,
20701 SideEffectsKind AllowSideEffects,
20702 bool InConstantContext) const {
20703 assert(!isValueDependent() &&
20704 "Expression evaluator can't be called on a dependent expression.");
20705 ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsInt");
20706 EvalInfo Info(Ctx, Result, EvaluationMode::IgnoreSideEffects);
20707 Info.InConstantContext = InConstantContext;
20708 return ::EvaluateAsInt(E: this, ExprResult&: Result, Ctx, AllowSideEffects, Info);
20709}
20710
20711bool Expr::EvaluateAsFixedPoint(EvalResult &Result, const ASTContext &Ctx,
20712 SideEffectsKind AllowSideEffects,
20713 bool InConstantContext) const {
20714 assert(!isValueDependent() &&
20715 "Expression evaluator can't be called on a dependent expression.");
20716 ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsFixedPoint");
20717 EvalInfo Info(Ctx, Result, EvaluationMode::IgnoreSideEffects);
20718 Info.InConstantContext = InConstantContext;
20719 return ::EvaluateAsFixedPoint(E: this, ExprResult&: Result, Ctx, AllowSideEffects, Info);
20720}
20721
20722bool Expr::EvaluateAsFloat(APFloat &Result, const ASTContext &Ctx,
20723 SideEffectsKind AllowSideEffects,
20724 bool InConstantContext) const {
20725 assert(!isValueDependent() &&
20726 "Expression evaluator can't be called on a dependent expression.");
20727
20728 if (!getType()->isRealFloatingType())
20729 return false;
20730
20731 ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsFloat");
20732 EvalResult ExprResult;
20733 if (!EvaluateAsRValue(Result&: ExprResult, Ctx, InConstantContext) ||
20734 !ExprResult.Val.isFloat() ||
20735 hasUnacceptableSideEffect(Result&: ExprResult, SEK: AllowSideEffects))
20736 return false;
20737
20738 Result = ExprResult.Val.getFloat();
20739 return true;
20740}
20741
20742bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx,
20743 bool InConstantContext) const {
20744 assert(!isValueDependent() &&
20745 "Expression evaluator can't be called on a dependent expression.");
20746
20747 ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsLValue");
20748 EvalInfo Info(Ctx, Result, EvaluationMode::ConstantFold);
20749 Info.InConstantContext = InConstantContext;
20750 LValue LV;
20751 CheckedTemporaries CheckedTemps;
20752
20753 if (Info.EnableNewConstInterp) {
20754 if (!Info.Ctx.getInterpContext().evaluate(Parent&: Info, E: this, Result&: Result.Val,
20755 Kind: ConstantExprKind::Normal))
20756 return false;
20757
20758 LV.setFrom(Ctx, V: Result.Val);
20759 return CheckLValueConstantExpression(
20760 Info, Loc: getExprLoc(), Type: Ctx.getLValueReferenceType(T: getType()), LVal: LV,
20761 Kind: ConstantExprKind::Normal, CheckedTemps);
20762 }
20763
20764 if (!EvaluateLValue(E: this, Result&: LV, Info) || !Info.discardCleanups() ||
20765 Result.HasSideEffects ||
20766 !CheckLValueConstantExpression(Info, Loc: getExprLoc(),
20767 Type: Ctx.getLValueReferenceType(T: getType()), LVal: LV,
20768 Kind: ConstantExprKind::Normal, CheckedTemps))
20769 return false;
20770
20771 LV.moveInto(V&: Result.Val);
20772 return true;
20773}
20774
20775static bool EvaluateDestruction(const ASTContext &Ctx, APValue::LValueBase Base,
20776 APValue DestroyedValue, QualType Type,
20777 SourceLocation Loc, Expr::EvalStatus &EStatus,
20778 bool IsConstantDestruction) {
20779 EvalInfo Info(Ctx, EStatus,
20780 IsConstantDestruction ? EvaluationMode::ConstantExpression
20781 : EvaluationMode::ConstantFold);
20782 Info.setEvaluatingDecl(Base, Value&: DestroyedValue,
20783 EDK: EvalInfo::EvaluatingDeclKind::Dtor);
20784 Info.InConstantContext = IsConstantDestruction;
20785
20786 LValue LVal;
20787 LVal.set(B: Base);
20788
20789 if (!HandleDestruction(Info, Loc, LVBase: Base, Value&: DestroyedValue, T: Type) ||
20790 EStatus.HasSideEffects)
20791 return false;
20792
20793 if (!Info.discardCleanups())
20794 llvm_unreachable("Unhandled cleanup; missing full expression marker?");
20795
20796 return true;
20797}
20798
20799bool Expr::EvaluateAsConstantExpr(EvalResult &Result, const ASTContext &Ctx,
20800 ConstantExprKind Kind) const {
20801 assert(!isValueDependent() &&
20802 "Expression evaluator can't be called on a dependent expression.");
20803 bool IsConst;
20804 if (FastEvaluateAsRValue(Exp: this, Result&: Result.Val, Ctx, IsConst) &&
20805 Result.Val.hasValue())
20806 return true;
20807
20808 ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsConstantExpr");
20809 EvaluationMode EM = EvaluationMode::ConstantExpression;
20810 EvalInfo Info(Ctx, Result, EM);
20811 Info.InConstantContext = true;
20812
20813 if (Info.EnableNewConstInterp) {
20814 if (!Info.Ctx.getInterpContext().evaluate(Parent&: Info, E: this, Result&: Result.Val, Kind))
20815 return false;
20816 return CheckConstantExpression(Info, DiagLoc: getExprLoc(),
20817 Type: getStorageType(Ctx, E: this), Value: Result.Val, Kind);
20818 }
20819
20820 // The type of the object we're initializing is 'const T' for a class NTTP.
20821 QualType T = getType();
20822 if (Kind == ConstantExprKind::ClassTemplateArgument)
20823 T.addConst();
20824
20825 // If we're evaluating a prvalue, fake up a MaterializeTemporaryExpr to
20826 // represent the result of the evaluation. CheckConstantExpression ensures
20827 // this doesn't escape.
20828 MaterializeTemporaryExpr BaseMTE(T, const_cast<Expr*>(this), true);
20829 APValue::LValueBase Base(&BaseMTE);
20830 Info.setEvaluatingDecl(Base, Value&: Result.Val);
20831
20832 LValue LVal;
20833 LVal.set(B: Base);
20834 // C++23 [intro.execution]/p5
20835 // A full-expression is [...] a constant-expression
20836 // So we need to make sure temporary objects are destroyed after having
20837 // evaluating the expression (per C++23 [class.temporary]/p4).
20838 FullExpressionRAII Scope(Info);
20839 if (!::EvaluateInPlace(Result&: Result.Val, Info, This: LVal, E: this) ||
20840 Result.HasSideEffects || !Scope.destroy())
20841 return false;
20842
20843 if (!Info.discardCleanups())
20844 llvm_unreachable("Unhandled cleanup; missing full expression marker?");
20845
20846 if (!CheckConstantExpression(Info, DiagLoc: getExprLoc(), Type: getStorageType(Ctx, E: this),
20847 Value: Result.Val, Kind))
20848 return false;
20849 if (!CheckMemoryLeaks(Info))
20850 return false;
20851
20852 // If this is a class template argument, it's required to have constant
20853 // destruction too.
20854 if (Kind == ConstantExprKind::ClassTemplateArgument &&
20855 (!EvaluateDestruction(Ctx, Base, DestroyedValue: Result.Val, Type: T, Loc: getBeginLoc(), EStatus&: Result,
20856 IsConstantDestruction: true) ||
20857 Result.HasSideEffects)) {
20858 // FIXME: Prefix a note to indicate that the problem is lack of constant
20859 // destruction.
20860 return false;
20861 }
20862
20863 return true;
20864}
20865
20866bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
20867 const VarDecl *VD,
20868 SmallVectorImpl<PartialDiagnosticAt> &Notes,
20869 bool IsConstantInitialization) const {
20870 assert(!isValueDependent() &&
20871 "Expression evaluator can't be called on a dependent expression.");
20872 assert(VD && "Need a valid VarDecl");
20873
20874 llvm::TimeTraceScope TimeScope("EvaluateAsInitializer", [&] {
20875 std::string Name;
20876 llvm::raw_string_ostream OS(Name);
20877 VD->printQualifiedName(OS);
20878 return Name;
20879 });
20880
20881 Expr::EvalStatus EStatus;
20882 EStatus.Diag = &Notes;
20883
20884 EvalInfo Info(Ctx, EStatus,
20885 (IsConstantInitialization &&
20886 (Ctx.getLangOpts().CPlusPlus || Ctx.getLangOpts().C23))
20887 ? EvaluationMode::ConstantExpression
20888 : EvaluationMode::ConstantFold);
20889 Info.setEvaluatingDecl(Base: VD, Value);
20890 Info.InConstantContext = IsConstantInitialization;
20891
20892 SourceLocation DeclLoc = VD->getLocation();
20893 QualType DeclTy = VD->getType();
20894
20895 if (Info.EnableNewConstInterp) {
20896 auto &InterpCtx = Ctx.getInterpContext();
20897 if (!InterpCtx.evaluateAsInitializer(Parent&: Info, VD, Init: this, Result&: Value))
20898 return false;
20899
20900 return CheckConstantExpression(Info, DiagLoc: DeclLoc, Type: DeclTy, Value,
20901 Kind: ConstantExprKind::Normal);
20902 } else {
20903 LValue LVal;
20904 LVal.set(B: VD);
20905
20906 {
20907 // C++23 [intro.execution]/p5
20908 // A full-expression is ... an init-declarator ([dcl.decl]) or a
20909 // mem-initializer.
20910 // So we need to make sure temporary objects are destroyed after having
20911 // evaluated the expression (per C++23 [class.temporary]/p4).
20912 //
20913 // FIXME: Otherwise this may break test/Modules/pr68702.cpp because the
20914 // serialization code calls ParmVarDecl::getDefaultArg() which strips the
20915 // outermost FullExpr, such as ExprWithCleanups.
20916 FullExpressionRAII Scope(Info);
20917 if (!EvaluateInPlace(Result&: Value, Info, This: LVal, E: this,
20918 /*AllowNonLiteralTypes=*/true) ||
20919 EStatus.HasSideEffects)
20920 return false;
20921 }
20922
20923 // At this point, any lifetime-extended temporaries are completely
20924 // initialized.
20925 Info.performLifetimeExtension();
20926
20927 if (!Info.discardCleanups())
20928 llvm_unreachable("Unhandled cleanup; missing full expression marker?");
20929 }
20930
20931 return CheckConstantExpression(Info, DiagLoc: DeclLoc, Type: DeclTy, Value,
20932 Kind: ConstantExprKind::Normal) &&
20933 CheckMemoryLeaks(Info);
20934}
20935
20936bool VarDecl::evaluateDestruction(
20937 SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
20938 Expr::EvalStatus EStatus;
20939 EStatus.Diag = &Notes;
20940
20941 // Only treat the destruction as constant destruction if we formally have
20942 // constant initialization (or are usable in a constant expression).
20943 bool IsConstantDestruction = hasConstantInitialization();
20944
20945 // Make a copy of the value for the destructor to mutate, if we know it.
20946 // Otherwise, treat the value as default-initialized; if the destructor works
20947 // anyway, then the destruction is constant (and must be essentially empty).
20948 APValue DestroyedValue;
20949 if (getEvaluatedValue() && !getEvaluatedValue()->isAbsent())
20950 DestroyedValue = *getEvaluatedValue();
20951 else if (!handleDefaultInitValue(T: getType(), Result&: DestroyedValue))
20952 return false;
20953
20954 if (!EvaluateDestruction(Ctx: getASTContext(), Base: this, DestroyedValue: std::move(DestroyedValue),
20955 Type: getType(), Loc: getLocation(), EStatus,
20956 IsConstantDestruction) ||
20957 EStatus.HasSideEffects)
20958 return false;
20959
20960 ensureEvaluatedStmt()->HasConstantDestruction = true;
20961 return true;
20962}
20963
20964/// isEvaluatable - Call EvaluateAsRValue to see if this expression can be
20965/// constant folded, but discard the result.
20966bool Expr::isEvaluatable(const ASTContext &Ctx, SideEffectsKind SEK) const {
20967 assert(!isValueDependent() &&
20968 "Expression evaluator can't be called on a dependent expression.");
20969
20970 EvalResult Result;
20971 return EvaluateAsRValue(Result, Ctx, /* in constant context */ InConstantContext: true) &&
20972 !hasUnacceptableSideEffect(Result, SEK);
20973}
20974
20975APSInt Expr::EvaluateKnownConstInt(const ASTContext &Ctx) const {
20976 assert(!isValueDependent() &&
20977 "Expression evaluator can't be called on a dependent expression.");
20978
20979 ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateKnownConstInt");
20980 EvalResult EVResult;
20981 EvalInfo Info(Ctx, EVResult, EvaluationMode::IgnoreSideEffects);
20982 Info.InConstantContext = true;
20983
20984 bool Result = ::EvaluateAsRValue(E: this, Result&: EVResult, Ctx, Info);
20985 (void)Result;
20986 assert(Result && "Could not evaluate expression");
20987 assert(EVResult.Val.isInt() && "Expression did not evaluate to integer");
20988
20989 return EVResult.Val.getInt();
20990}
20991
20992APSInt Expr::EvaluateKnownConstIntCheckOverflow(
20993 const ASTContext &Ctx, SmallVectorImpl<PartialDiagnosticAt> *Diag) const {
20994 assert(!isValueDependent() &&
20995 "Expression evaluator can't be called on a dependent expression.");
20996
20997 ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateKnownConstIntCheckOverflow");
20998 EvalResult EVResult;
20999 EVResult.Diag = Diag;
21000 EvalInfo Info(Ctx, EVResult, EvaluationMode::IgnoreSideEffects);
21001 Info.InConstantContext = true;
21002 Info.CheckingForUndefinedBehavior = true;
21003
21004 bool Result = ::EvaluateAsRValue(Info, E: this, Result&: EVResult.Val);
21005 (void)Result;
21006 assert(Result && "Could not evaluate expression");
21007 assert(EVResult.Val.isInt() && "Expression did not evaluate to integer");
21008
21009 return EVResult.Val.getInt();
21010}
21011
21012void Expr::EvaluateForOverflow(const ASTContext &Ctx) const {
21013 assert(!isValueDependent() &&
21014 "Expression evaluator can't be called on a dependent expression.");
21015
21016 ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateForOverflow");
21017 bool IsConst;
21018 EvalResult EVResult;
21019 if (!FastEvaluateAsRValue(Exp: this, Result&: EVResult.Val, Ctx, IsConst)) {
21020 EvalInfo Info(Ctx, EVResult, EvaluationMode::IgnoreSideEffects);
21021 Info.CheckingForUndefinedBehavior = true;
21022 (void)::EvaluateAsRValue(Info, E: this, Result&: EVResult.Val);
21023 }
21024}
21025
21026bool Expr::EvalResult::isGlobalLValue() const {
21027 assert(Val.isLValue());
21028 return IsGlobalLValue(B: Val.getLValueBase());
21029}
21030
21031/// isIntegerConstantExpr - this recursive routine will test if an expression is
21032/// an integer constant expression.
21033
21034/// FIXME: Pass up a reason why! Invalid operation in i-c-e, division by zero,
21035/// comma, etc
21036
21037// CheckICE - This function does the fundamental ICE checking: the returned
21038// ICEDiag contains an ICEKind indicating whether the expression is an ICE.
21039//
21040// Note that to reduce code duplication, this helper does no evaluation
21041// itself; the caller checks whether the expression is evaluatable, and
21042// in the rare cases where CheckICE actually cares about the evaluated
21043// value, it calls into Evaluate.
21044
21045namespace {
21046
21047enum ICEKind {
21048 /// This expression is an ICE.
21049 IK_ICE,
21050 /// This expression is not an ICE, but if it isn't evaluated, it's
21051 /// a legal subexpression for an ICE. This return value is used to handle
21052 /// the comma operator in C99 mode, and non-constant subexpressions.
21053 IK_ICEIfUnevaluated,
21054 /// This expression is not an ICE, and is not a legal subexpression for one.
21055 IK_NotICE
21056};
21057
21058struct ICEDiag {
21059 ICEKind Kind;
21060 SourceLocation Loc;
21061
21062 ICEDiag(ICEKind IK, SourceLocation l) : Kind(IK), Loc(l) {}
21063};
21064
21065}
21066
21067static ICEDiag NoDiag() { return ICEDiag(IK_ICE, SourceLocation()); }
21068
21069static ICEDiag Worst(ICEDiag A, ICEDiag B) { return A.Kind >= B.Kind ? A : B; }
21070
21071static ICEDiag CheckEvalInICE(const Expr* E, const ASTContext &Ctx) {
21072 Expr::EvalResult EVResult;
21073 Expr::EvalStatus Status;
21074 EvalInfo Info(Ctx, Status, EvaluationMode::ConstantExpression);
21075
21076 Info.InConstantContext = true;
21077 if (!::EvaluateAsRValue(E, Result&: EVResult, Ctx, Info) || EVResult.HasSideEffects ||
21078 !EVResult.Val.isInt())
21079 return ICEDiag(IK_NotICE, E->getBeginLoc());
21080
21081 return NoDiag();
21082}
21083
21084static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
21085 assert(!E->isValueDependent() && "Should not see value dependent exprs!");
21086 if (!E->getType()->isIntegralOrEnumerationType())
21087 return ICEDiag(IK_NotICE, E->getBeginLoc());
21088
21089 switch (E->getStmtClass()) {
21090#define ABSTRACT_STMT(Node)
21091#define STMT(Node, Base) case Expr::Node##Class:
21092#define EXPR(Node, Base)
21093#include "clang/AST/StmtNodes.inc"
21094 case Expr::PredefinedExprClass:
21095 case Expr::FloatingLiteralClass:
21096 case Expr::ImaginaryLiteralClass:
21097 case Expr::StringLiteralClass:
21098 case Expr::ArraySubscriptExprClass:
21099 case Expr::MatrixSingleSubscriptExprClass:
21100 case Expr::MatrixSubscriptExprClass:
21101 case Expr::ArraySectionExprClass:
21102 case Expr::OMPArrayShapingExprClass:
21103 case Expr::OMPIteratorExprClass:
21104 case Expr::MemberExprClass:
21105 case Expr::CompoundAssignOperatorClass:
21106 case Expr::CompoundLiteralExprClass:
21107 case Expr::ExtVectorElementExprClass:
21108 case Expr::MatrixElementExprClass:
21109 case Expr::DesignatedInitExprClass:
21110 case Expr::ArrayInitLoopExprClass:
21111 case Expr::ArrayInitIndexExprClass:
21112 case Expr::NoInitExprClass:
21113 case Expr::DesignatedInitUpdateExprClass:
21114 case Expr::ImplicitValueInitExprClass:
21115 case Expr::ParenListExprClass:
21116 case Expr::VAArgExprClass:
21117 case Expr::AddrLabelExprClass:
21118 case Expr::StmtExprClass:
21119 case Expr::CXXMemberCallExprClass:
21120 case Expr::CUDAKernelCallExprClass:
21121 case Expr::CXXAddrspaceCastExprClass:
21122 case Expr::CXXDynamicCastExprClass:
21123 case Expr::CXXTypeidExprClass:
21124 case Expr::CXXUuidofExprClass:
21125 case Expr::MSPropertyRefExprClass:
21126 case Expr::MSPropertySubscriptExprClass:
21127 case Expr::CXXNullPtrLiteralExprClass:
21128 case Expr::UserDefinedLiteralClass:
21129 case Expr::CXXThisExprClass:
21130 case Expr::CXXThrowExprClass:
21131 case Expr::CXXNewExprClass:
21132 case Expr::CXXDeleteExprClass:
21133 case Expr::CXXPseudoDestructorExprClass:
21134 case Expr::UnresolvedLookupExprClass:
21135 case Expr::RecoveryExprClass:
21136 case Expr::DependentScopeDeclRefExprClass:
21137 case Expr::CXXConstructExprClass:
21138 case Expr::CXXInheritedCtorInitExprClass:
21139 case Expr::CXXStdInitializerListExprClass:
21140 case Expr::CXXBindTemporaryExprClass:
21141 case Expr::ExprWithCleanupsClass:
21142 case Expr::CXXTemporaryObjectExprClass:
21143 case Expr::CXXUnresolvedConstructExprClass:
21144 case Expr::CXXDependentScopeMemberExprClass:
21145 case Expr::UnresolvedMemberExprClass:
21146 case Expr::ObjCStringLiteralClass:
21147 case Expr::ObjCBoxedExprClass:
21148 case Expr::ObjCArrayLiteralClass:
21149 case Expr::ObjCDictionaryLiteralClass:
21150 case Expr::ObjCEncodeExprClass:
21151 case Expr::ObjCMessageExprClass:
21152 case Expr::ObjCSelectorExprClass:
21153 case Expr::ObjCProtocolExprClass:
21154 case Expr::ObjCIvarRefExprClass:
21155 case Expr::ObjCPropertyRefExprClass:
21156 case Expr::ObjCSubscriptRefExprClass:
21157 case Expr::ObjCIsaExprClass:
21158 case Expr::ObjCAvailabilityCheckExprClass:
21159 case Expr::ShuffleVectorExprClass:
21160 case Expr::ConvertVectorExprClass:
21161 case Expr::BlockExprClass:
21162 case Expr::NoStmtClass:
21163 case Expr::OpaqueValueExprClass:
21164 case Expr::PackExpansionExprClass:
21165 case Expr::SubstNonTypeTemplateParmPackExprClass:
21166 case Expr::FunctionParmPackExprClass:
21167 case Expr::AsTypeExprClass:
21168 case Expr::ObjCIndirectCopyRestoreExprClass:
21169 case Expr::MaterializeTemporaryExprClass:
21170 case Expr::PseudoObjectExprClass:
21171 case Expr::AtomicExprClass:
21172 case Expr::LambdaExprClass:
21173 case Expr::CXXFoldExprClass:
21174 case Expr::CoawaitExprClass:
21175 case Expr::DependentCoawaitExprClass:
21176 case Expr::CoyieldExprClass:
21177 case Expr::SYCLUniqueStableNameExprClass:
21178 case Expr::CXXParenListInitExprClass:
21179 case Expr::HLSLOutArgExprClass:
21180 return ICEDiag(IK_NotICE, E->getBeginLoc());
21181
21182 case Expr::InitListExprClass: {
21183 // C++03 [dcl.init]p13: If T is a scalar type, then a declaration of the
21184 // form "T x = { a };" is equivalent to "T x = a;".
21185 // Unless we're initializing a reference, T is a scalar as it is known to be
21186 // of integral or enumeration type.
21187 if (E->isPRValue())
21188 if (cast<InitListExpr>(Val: E)->getNumInits() == 1)
21189 return CheckICE(E: cast<InitListExpr>(Val: E)->getInit(Init: 0), Ctx);
21190 return ICEDiag(IK_NotICE, E->getBeginLoc());
21191 }
21192
21193 case Expr::SizeOfPackExprClass:
21194 case Expr::GNUNullExprClass:
21195 case Expr::SourceLocExprClass:
21196 case Expr::EmbedExprClass:
21197 case Expr::OpenACCAsteriskSizeExprClass:
21198 return NoDiag();
21199
21200 case Expr::PackIndexingExprClass:
21201 return CheckICE(E: cast<PackIndexingExpr>(Val: E)->getSelectedExpr(), Ctx);
21202
21203 case Expr::SubstNonTypeTemplateParmExprClass:
21204 return
21205 CheckICE(E: cast<SubstNonTypeTemplateParmExpr>(Val: E)->getReplacement(), Ctx);
21206
21207 case Expr::ConstantExprClass:
21208 return CheckICE(E: cast<ConstantExpr>(Val: E)->getSubExpr(), Ctx);
21209
21210 case Expr::ParenExprClass:
21211 return CheckICE(E: cast<ParenExpr>(Val: E)->getSubExpr(), Ctx);
21212 case Expr::GenericSelectionExprClass:
21213 return CheckICE(E: cast<GenericSelectionExpr>(Val: E)->getResultExpr(), Ctx);
21214 case Expr::IntegerLiteralClass:
21215 case Expr::FixedPointLiteralClass:
21216 case Expr::CharacterLiteralClass:
21217 case Expr::ObjCBoolLiteralExprClass:
21218 case Expr::CXXBoolLiteralExprClass:
21219 case Expr::CXXScalarValueInitExprClass:
21220 case Expr::TypeTraitExprClass:
21221 case Expr::ConceptSpecializationExprClass:
21222 case Expr::RequiresExprClass:
21223 case Expr::ArrayTypeTraitExprClass:
21224 case Expr::ExpressionTraitExprClass:
21225 case Expr::CXXNoexceptExprClass:
21226 case Expr::CXXReflectExprClass:
21227 return NoDiag();
21228 case Expr::CallExprClass:
21229 case Expr::CXXOperatorCallExprClass: {
21230 // C99 6.6/3 allows function calls within unevaluated subexpressions of
21231 // constant expressions, but they can never be ICEs because an ICE cannot
21232 // contain an operand of (pointer to) function type.
21233 const CallExpr *CE = cast<CallExpr>(Val: E);
21234 if (CE->getBuiltinCallee())
21235 return CheckEvalInICE(E, Ctx);
21236 return ICEDiag(IK_NotICE, E->getBeginLoc());
21237 }
21238 case Expr::CXXRewrittenBinaryOperatorClass:
21239 return CheckICE(E: cast<CXXRewrittenBinaryOperator>(Val: E)->getSemanticForm(),
21240 Ctx);
21241 case Expr::DeclRefExprClass: {
21242 const NamedDecl *D = cast<DeclRefExpr>(Val: E)->getDecl();
21243 if (isa<EnumConstantDecl>(Val: D))
21244 return NoDiag();
21245
21246 // C++ and OpenCL (FIXME: spec reference?) allow reading const-qualified
21247 // integer variables in constant expressions:
21248 //
21249 // C++ 7.1.5.1p2
21250 // A variable of non-volatile const-qualified integral or enumeration
21251 // type initialized by an ICE can be used in ICEs.
21252 //
21253 // We sometimes use CheckICE to check the C++98 rules in C++11 mode. In
21254 // that mode, use of reference variables should not be allowed.
21255 const VarDecl *VD = dyn_cast<VarDecl>(Val: D);
21256 if (VD && VD->isUsableInConstantExpressions(C: Ctx) &&
21257 !VD->getType()->isReferenceType())
21258 return NoDiag();
21259
21260 return ICEDiag(IK_NotICE, E->getBeginLoc());
21261 }
21262 case Expr::UnaryOperatorClass: {
21263 const UnaryOperator *Exp = cast<UnaryOperator>(Val: E);
21264 switch (Exp->getOpcode()) {
21265 case UO_PostInc:
21266 case UO_PostDec:
21267 case UO_PreInc:
21268 case UO_PreDec:
21269 case UO_AddrOf:
21270 case UO_Deref:
21271 case UO_Coawait:
21272 // C99 6.6/3 allows increment and decrement within unevaluated
21273 // subexpressions of constant expressions, but they can never be ICEs
21274 // because an ICE cannot contain an lvalue operand.
21275 return ICEDiag(IK_NotICE, E->getBeginLoc());
21276 case UO_Extension:
21277 case UO_LNot:
21278 case UO_Plus:
21279 case UO_Minus:
21280 case UO_Not:
21281 case UO_Real:
21282 case UO_Imag:
21283 return CheckICE(E: Exp->getSubExpr(), Ctx);
21284 }
21285 llvm_unreachable("invalid unary operator class");
21286 }
21287 case Expr::OffsetOfExprClass: {
21288 // Note that per C99, offsetof must be an ICE. And AFAIK, using
21289 // EvaluateAsRValue matches the proposed gcc behavior for cases like
21290 // "offsetof(struct s{int x[4];}, x[1.0])". This doesn't affect
21291 // compliance: we should warn earlier for offsetof expressions with
21292 // array subscripts that aren't ICEs, and if the array subscripts
21293 // are ICEs, the value of the offsetof must be an integer constant.
21294 return CheckEvalInICE(E, Ctx);
21295 }
21296 case Expr::UnaryExprOrTypeTraitExprClass: {
21297 const UnaryExprOrTypeTraitExpr *Exp = cast<UnaryExprOrTypeTraitExpr>(Val: E);
21298 if ((Exp->getKind() == UETT_SizeOf) &&
21299 Exp->getTypeOfArgument()->isVariableArrayType())
21300 return ICEDiag(IK_NotICE, E->getBeginLoc());
21301 if (Exp->getKind() == UETT_CountOf) {
21302 QualType ArgTy = Exp->getTypeOfArgument();
21303 if (ArgTy->isVariableArrayType()) {
21304 // We need to look whether the array is multidimensional. If it is,
21305 // then we want to check the size expression manually to see whether
21306 // it is an ICE or not.
21307 const auto *VAT = Ctx.getAsVariableArrayType(T: ArgTy);
21308 if (VAT->getElementType()->isArrayType())
21309 // Variable array size expression could be missing (e.g. int a[*][10])
21310 // In that case, it can't be a constant expression.
21311 return VAT->getSizeExpr() ? CheckICE(E: VAT->getSizeExpr(), Ctx)
21312 : ICEDiag(IK_NotICE, E->getBeginLoc());
21313
21314 // Otherwise, this is a regular VLA, which is definitely not an ICE.
21315 return ICEDiag(IK_NotICE, E->getBeginLoc());
21316 }
21317 }
21318 return NoDiag();
21319 }
21320 case Expr::BinaryOperatorClass: {
21321 const BinaryOperator *Exp = cast<BinaryOperator>(Val: E);
21322 switch (Exp->getOpcode()) {
21323 case BO_PtrMemD:
21324 case BO_PtrMemI:
21325 case BO_Assign:
21326 case BO_MulAssign:
21327 case BO_DivAssign:
21328 case BO_RemAssign:
21329 case BO_AddAssign:
21330 case BO_SubAssign:
21331 case BO_ShlAssign:
21332 case BO_ShrAssign:
21333 case BO_AndAssign:
21334 case BO_XorAssign:
21335 case BO_OrAssign:
21336 // C99 6.6/3 allows assignments within unevaluated subexpressions of
21337 // constant expressions, but they can never be ICEs because an ICE cannot
21338 // contain an lvalue operand.
21339 return ICEDiag(IK_NotICE, E->getBeginLoc());
21340
21341 case BO_Mul:
21342 case BO_Div:
21343 case BO_Rem:
21344 case BO_Add:
21345 case BO_Sub:
21346 case BO_Shl:
21347 case BO_Shr:
21348 case BO_LT:
21349 case BO_GT:
21350 case BO_LE:
21351 case BO_GE:
21352 case BO_EQ:
21353 case BO_NE:
21354 case BO_And:
21355 case BO_Xor:
21356 case BO_Or:
21357 case BO_Comma:
21358 case BO_Cmp: {
21359 ICEDiag LHSResult = CheckICE(E: Exp->getLHS(), Ctx);
21360 ICEDiag RHSResult = CheckICE(E: Exp->getRHS(), Ctx);
21361 if (Exp->getOpcode() == BO_Div ||
21362 Exp->getOpcode() == BO_Rem) {
21363 // EvaluateAsRValue gives an error for undefined Div/Rem, so make sure
21364 // we don't evaluate one.
21365 if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICE) {
21366 llvm::APSInt REval = Exp->getRHS()->EvaluateKnownConstInt(Ctx);
21367 if (REval == 0)
21368 return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc());
21369 if (REval.isSigned() && REval.isAllOnes()) {
21370 llvm::APSInt LEval = Exp->getLHS()->EvaluateKnownConstInt(Ctx);
21371 if (LEval.isMinSignedValue())
21372 return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc());
21373 }
21374 }
21375 }
21376 if (Exp->getOpcode() == BO_Comma) {
21377 if (Ctx.getLangOpts().C99) {
21378 // C99 6.6p3 introduces a strange edge case: comma can be in an ICE
21379 // if it isn't evaluated.
21380 if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICE)
21381 return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc());
21382 } else {
21383 // In both C89 and C++, commas in ICEs are illegal.
21384 return ICEDiag(IK_NotICE, E->getBeginLoc());
21385 }
21386 }
21387 return Worst(A: LHSResult, B: RHSResult);
21388 }
21389 case BO_LAnd:
21390 case BO_LOr: {
21391 ICEDiag LHSResult = CheckICE(E: Exp->getLHS(), Ctx);
21392 ICEDiag RHSResult = CheckICE(E: Exp->getRHS(), Ctx);
21393 if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICEIfUnevaluated) {
21394 // Rare case where the RHS has a comma "side-effect"; we need
21395 // to actually check the condition to see whether the side
21396 // with the comma is evaluated.
21397 if ((Exp->getOpcode() == BO_LAnd) !=
21398 (Exp->getLHS()->EvaluateKnownConstInt(Ctx) == 0))
21399 return RHSResult;
21400 return NoDiag();
21401 }
21402
21403 return Worst(A: LHSResult, B: RHSResult);
21404 }
21405 }
21406 llvm_unreachable("invalid binary operator kind");
21407 }
21408 case Expr::ImplicitCastExprClass:
21409 case Expr::CStyleCastExprClass:
21410 case Expr::CXXFunctionalCastExprClass:
21411 case Expr::CXXStaticCastExprClass:
21412 case Expr::CXXReinterpretCastExprClass:
21413 case Expr::CXXConstCastExprClass:
21414 case Expr::ObjCBridgedCastExprClass: {
21415 const Expr *SubExpr = cast<CastExpr>(Val: E)->getSubExpr();
21416 if (isa<ExplicitCastExpr>(Val: E)) {
21417 if (const FloatingLiteral *FL
21418 = dyn_cast<FloatingLiteral>(Val: SubExpr->IgnoreParenImpCasts())) {
21419 unsigned DestWidth = Ctx.getIntWidth(T: E->getType());
21420 bool DestSigned = E->getType()->isSignedIntegerOrEnumerationType();
21421 APSInt IgnoredVal(DestWidth, !DestSigned);
21422 bool Ignored;
21423 // If the value does not fit in the destination type, the behavior is
21424 // undefined, so we are not required to treat it as a constant
21425 // expression.
21426 if (FL->getValue().convertToInteger(Result&: IgnoredVal,
21427 RM: llvm::APFloat::rmTowardZero,
21428 IsExact: &Ignored) & APFloat::opInvalidOp)
21429 return ICEDiag(IK_NotICE, E->getBeginLoc());
21430 return NoDiag();
21431 }
21432 }
21433 switch (cast<CastExpr>(Val: E)->getCastKind()) {
21434 case CK_LValueToRValue:
21435 case CK_AtomicToNonAtomic:
21436 case CK_NonAtomicToAtomic:
21437 case CK_NoOp:
21438 case CK_IntegralToBoolean:
21439 case CK_IntegralCast:
21440 return CheckICE(E: SubExpr, Ctx);
21441 default:
21442 return ICEDiag(IK_NotICE, E->getBeginLoc());
21443 }
21444 }
21445 case Expr::BinaryConditionalOperatorClass: {
21446 const BinaryConditionalOperator *Exp = cast<BinaryConditionalOperator>(Val: E);
21447 ICEDiag CommonResult = CheckICE(E: Exp->getCommon(), Ctx);
21448 if (CommonResult.Kind == IK_NotICE) return CommonResult;
21449 ICEDiag FalseResult = CheckICE(E: Exp->getFalseExpr(), Ctx);
21450 if (FalseResult.Kind == IK_NotICE) return FalseResult;
21451 if (CommonResult.Kind == IK_ICEIfUnevaluated) return CommonResult;
21452 if (FalseResult.Kind == IK_ICEIfUnevaluated &&
21453 Exp->getCommon()->EvaluateKnownConstInt(Ctx) != 0) return NoDiag();
21454 return FalseResult;
21455 }
21456 case Expr::ConditionalOperatorClass: {
21457 const ConditionalOperator *Exp = cast<ConditionalOperator>(Val: E);
21458 // If the condition (ignoring parens) is a __builtin_constant_p call,
21459 // then only the true side is actually considered in an integer constant
21460 // expression, and it is fully evaluated. This is an important GNU
21461 // extension. See GCC PR38377 for discussion.
21462 if (const CallExpr *CallCE
21463 = dyn_cast<CallExpr>(Val: Exp->getCond()->IgnoreParenCasts()))
21464 if (CallCE->getBuiltinCallee() == Builtin::BI__builtin_constant_p)
21465 return CheckEvalInICE(E, Ctx);
21466 ICEDiag CondResult = CheckICE(E: Exp->getCond(), Ctx);
21467 if (CondResult.Kind == IK_NotICE)
21468 return CondResult;
21469
21470 ICEDiag TrueResult = CheckICE(E: Exp->getTrueExpr(), Ctx);
21471 ICEDiag FalseResult = CheckICE(E: Exp->getFalseExpr(), Ctx);
21472
21473 if (TrueResult.Kind == IK_NotICE)
21474 return TrueResult;
21475 if (FalseResult.Kind == IK_NotICE)
21476 return FalseResult;
21477 if (CondResult.Kind == IK_ICEIfUnevaluated)
21478 return CondResult;
21479 if (TrueResult.Kind == IK_ICE && FalseResult.Kind == IK_ICE)
21480 return NoDiag();
21481 // Rare case where the diagnostics depend on which side is evaluated
21482 // Note that if we get here, CondResult is 0, and at least one of
21483 // TrueResult and FalseResult is non-zero.
21484 if (Exp->getCond()->EvaluateKnownConstInt(Ctx) == 0)
21485 return FalseResult;
21486 return TrueResult;
21487 }
21488 case Expr::CXXDefaultArgExprClass:
21489 return CheckICE(E: cast<CXXDefaultArgExpr>(Val: E)->getExpr(), Ctx);
21490 case Expr::CXXDefaultInitExprClass:
21491 return CheckICE(E: cast<CXXDefaultInitExpr>(Val: E)->getExpr(), Ctx);
21492 case Expr::ChooseExprClass: {
21493 return CheckICE(E: cast<ChooseExpr>(Val: E)->getChosenSubExpr(), Ctx);
21494 }
21495 case Expr::BuiltinBitCastExprClass: {
21496 if (!checkBitCastConstexprEligibility(Info: nullptr, Ctx, BCE: cast<CastExpr>(Val: E)))
21497 return ICEDiag(IK_NotICE, E->getBeginLoc());
21498 return CheckICE(E: cast<CastExpr>(Val: E)->getSubExpr(), Ctx);
21499 }
21500 }
21501
21502 llvm_unreachable("Invalid StmtClass!");
21503}
21504
21505/// Evaluate an expression as a C++11 integral constant expression.
21506static bool EvaluateCPlusPlus11IntegralConstantExpr(const ASTContext &Ctx,
21507 const Expr *E,
21508 llvm::APSInt *Value) {
21509 if (!E->getType()->isIntegralOrUnscopedEnumerationType())
21510 return false;
21511
21512 APValue Result;
21513 if (!E->isCXX11ConstantExpr(Ctx, Result: &Result))
21514 return false;
21515
21516 if (!Result.isInt())
21517 return false;
21518
21519 if (Value) *Value = Result.getInt();
21520 return true;
21521}
21522
21523bool Expr::isIntegerConstantExpr(const ASTContext &Ctx) const {
21524 assert(!isValueDependent() &&
21525 "Expression evaluator can't be called on a dependent expression.");
21526
21527 ExprTimeTraceScope TimeScope(this, Ctx, "isIntegerConstantExpr");
21528
21529 if (Ctx.getLangOpts().CPlusPlus11)
21530 return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, E: this, Value: nullptr);
21531
21532 ICEDiag D = CheckICE(E: this, Ctx);
21533 if (D.Kind != IK_ICE)
21534 return false;
21535 return true;
21536}
21537
21538std::optional<llvm::APSInt>
21539Expr::getIntegerConstantExpr(const ASTContext &Ctx) const {
21540 if (isValueDependent()) {
21541 // Expression evaluator can't succeed on a dependent expression.
21542 return std::nullopt;
21543 }
21544
21545 if (Ctx.getLangOpts().CPlusPlus11) {
21546 APSInt Value;
21547 if (EvaluateCPlusPlus11IntegralConstantExpr(Ctx, E: this, Value: &Value))
21548 return Value;
21549 return std::nullopt;
21550 }
21551
21552 if (!isIntegerConstantExpr(Ctx))
21553 return std::nullopt;
21554
21555 // The only possible side-effects here are due to UB discovered in the
21556 // evaluation (for instance, INT_MAX + 1). In such a case, we are still
21557 // required to treat the expression as an ICE, so we produce the folded
21558 // value.
21559 EvalResult ExprResult;
21560 Expr::EvalStatus Status;
21561 EvalInfo Info(Ctx, Status, EvaluationMode::IgnoreSideEffects);
21562 Info.InConstantContext = true;
21563
21564 if (!::EvaluateAsInt(E: this, ExprResult, Ctx, AllowSideEffects: SE_AllowSideEffects, Info))
21565 llvm_unreachable("ICE cannot be evaluated!");
21566
21567 return ExprResult.Val.getInt();
21568}
21569
21570bool Expr::isCXX98IntegralConstantExpr(const ASTContext &Ctx) const {
21571 assert(!isValueDependent() &&
21572 "Expression evaluator can't be called on a dependent expression.");
21573
21574 return CheckICE(E: this, Ctx).Kind == IK_ICE;
21575}
21576
21577bool Expr::isCXX11ConstantExpr(const ASTContext &Ctx, APValue *Result) const {
21578 assert(!isValueDependent() &&
21579 "Expression evaluator can't be called on a dependent expression.");
21580
21581 // We support this checking in C++98 mode in order to diagnose compatibility
21582 // issues.
21583 assert(Ctx.getLangOpts().CPlusPlus);
21584
21585 bool IsConst;
21586 APValue Scratch;
21587 if (FastEvaluateAsRValue(Exp: this, Result&: Scratch, Ctx, IsConst) && Scratch.hasValue()) {
21588 if (Result)
21589 *Result = Scratch;
21590 return true;
21591 }
21592
21593 // Build evaluation settings.
21594 Expr::EvalStatus Status;
21595 SmallVector<PartialDiagnosticAt, 8> Diags;
21596 Status.Diag = &Diags;
21597 EvalInfo Info(Ctx, Status, EvaluationMode::ConstantExpression);
21598
21599 bool IsConstExpr =
21600 ::EvaluateAsRValue(Info, E: this, Result&: Result ? *Result : Scratch) &&
21601 // FIXME: We don't produce a diagnostic for this, but the callers that
21602 // call us on arbitrary full-expressions should generally not care.
21603 Info.discardCleanups() && !Status.HasSideEffects;
21604
21605 return IsConstExpr && Diags.empty();
21606}
21607
21608bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx,
21609 const FunctionDecl *Callee,
21610 ArrayRef<const Expr*> Args,
21611 const Expr *This) const {
21612 assert(!isValueDependent() &&
21613 "Expression evaluator can't be called on a dependent expression.");
21614
21615 llvm::TimeTraceScope TimeScope("EvaluateWithSubstitution", [&] {
21616 std::string Name;
21617 llvm::raw_string_ostream OS(Name);
21618 Callee->getNameForDiagnostic(OS, Policy: Ctx.getPrintingPolicy(),
21619 /*Qualified=*/true);
21620 return Name;
21621 });
21622
21623 Expr::EvalStatus Status;
21624 EvalInfo Info(Ctx, Status, EvaluationMode::ConstantExpressionUnevaluated);
21625 Info.InConstantContext = true;
21626
21627 LValue ThisVal;
21628 const LValue *ThisPtr = nullptr;
21629 if (This) {
21630#ifndef NDEBUG
21631 auto *MD = dyn_cast<CXXMethodDecl>(Callee);
21632 assert(MD && "Don't provide `this` for non-methods.");
21633 assert(MD->isImplicitObjectMemberFunction() &&
21634 "Don't provide `this` for methods without an implicit object.");
21635#endif
21636 if (!This->isValueDependent() &&
21637 EvaluateObjectArgument(Info, Object: This, This&: ThisVal) &&
21638 !Info.EvalStatus.HasSideEffects)
21639 ThisPtr = &ThisVal;
21640
21641 // Ignore any side-effects from a failed evaluation. This is safe because
21642 // they can't interfere with any other argument evaluation.
21643 Info.EvalStatus.HasSideEffects = false;
21644 }
21645
21646 CallRef Call = Info.CurrentCall->createCall(Callee);
21647 for (ArrayRef<const Expr*>::iterator I = Args.begin(), E = Args.end();
21648 I != E; ++I) {
21649 unsigned Idx = I - Args.begin();
21650 if (Idx >= Callee->getNumParams())
21651 break;
21652 const ParmVarDecl *PVD = Callee->getParamDecl(i: Idx);
21653 if ((*I)->isValueDependent() ||
21654 !EvaluateCallArg(PVD, Arg: *I, Call, Info) ||
21655 Info.EvalStatus.HasSideEffects) {
21656 // If evaluation fails, throw away the argument entirely.
21657 if (APValue *Slot = Info.getParamSlot(Call, PVD))
21658 *Slot = APValue();
21659 }
21660
21661 // Ignore any side-effects from a failed evaluation. This is safe because
21662 // they can't interfere with any other argument evaluation.
21663 Info.EvalStatus.HasSideEffects = false;
21664 }
21665
21666 // Parameter cleanups happen in the caller and are not part of this
21667 // evaluation.
21668 Info.discardCleanups();
21669 Info.EvalStatus.HasSideEffects = false;
21670
21671 // Build fake call to Callee.
21672 CallStackFrame Frame(Info, Callee->getLocation(), Callee, ThisPtr, This,
21673 Call);
21674 // FIXME: Missing ExprWithCleanups in enable_if conditions?
21675 FullExpressionRAII Scope(Info);
21676 return Evaluate(Result&: Value, Info, E: this) && Scope.destroy() &&
21677 !Info.EvalStatus.HasSideEffects;
21678}
21679
21680bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
21681 SmallVectorImpl<
21682 PartialDiagnosticAt> &Diags) {
21683 // FIXME: It would be useful to check constexpr function templates, but at the
21684 // moment the constant expression evaluator cannot cope with the non-rigorous
21685 // ASTs which we build for dependent expressions.
21686 if (FD->isDependentContext())
21687 return true;
21688
21689 llvm::TimeTraceScope TimeScope("isPotentialConstantExpr", [&] {
21690 std::string Name;
21691 llvm::raw_string_ostream OS(Name);
21692 FD->getNameForDiagnostic(OS, Policy: FD->getASTContext().getPrintingPolicy(),
21693 /*Qualified=*/true);
21694 return Name;
21695 });
21696
21697 Expr::EvalStatus Status;
21698 Status.Diag = &Diags;
21699
21700 EvalInfo Info(FD->getASTContext(), Status,
21701 EvaluationMode::ConstantExpression);
21702 Info.InConstantContext = true;
21703 Info.CheckingPotentialConstantExpression = true;
21704
21705 // The constexpr VM attempts to compile all methods to bytecode here.
21706 if (Info.EnableNewConstInterp) {
21707 Info.Ctx.getInterpContext().isPotentialConstantExpr(Parent&: Info, FD);
21708 return Diags.empty();
21709 }
21710
21711 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: FD);
21712 const CXXRecordDecl *RD = MD ? MD->getParent()->getCanonicalDecl() : nullptr;
21713
21714 // Fabricate an arbitrary expression on the stack and pretend that it
21715 // is a temporary being used as the 'this' pointer.
21716 LValue This;
21717 ImplicitValueInitExpr VIE(RD ? Info.Ctx.getCanonicalTagType(TD: RD)
21718 : Info.Ctx.IntTy);
21719 This.set(B: {&VIE, Info.CurrentCall->Index});
21720
21721 ArrayRef<const Expr*> Args;
21722
21723 APValue Scratch;
21724 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Val: FD)) {
21725 // Evaluate the call as a constant initializer, to allow the construction
21726 // of objects of non-literal types.
21727 Info.setEvaluatingDecl(Base: This.getLValueBase(), Value&: Scratch);
21728 HandleConstructorCall(E: &VIE, This, Args, Definition: CD, Info, Result&: Scratch);
21729 } else {
21730 SourceLocation Loc = FD->getLocation();
21731 HandleFunctionCall(
21732 CallLoc: Loc, Callee: FD, ObjectArg: (MD && MD->isImplicitObjectMemberFunction()) ? &This : nullptr,
21733 E: &VIE, Args, Call: CallRef(), Body: FD->getBody(), Info, Result&: Scratch,
21734 /*ResultSlot=*/nullptr);
21735 }
21736
21737 return Diags.empty();
21738}
21739
21740bool Expr::isPotentialConstantExprUnevaluated(Expr *E,
21741 const FunctionDecl *FD,
21742 SmallVectorImpl<
21743 PartialDiagnosticAt> &Diags) {
21744 assert(!E->isValueDependent() &&
21745 "Expression evaluator can't be called on a dependent expression.");
21746
21747 Expr::EvalStatus Status;
21748 Status.Diag = &Diags;
21749
21750 EvalInfo Info(FD->getASTContext(), Status,
21751 EvaluationMode::ConstantExpressionUnevaluated);
21752 Info.InConstantContext = true;
21753 Info.CheckingPotentialConstantExpression = true;
21754
21755 if (Info.EnableNewConstInterp) {
21756 Info.Ctx.getInterpContext().isPotentialConstantExprUnevaluated(Parent&: Info, E, FD);
21757 return Diags.empty();
21758 }
21759
21760 // Fabricate a call stack frame to give the arguments a plausible cover story.
21761 CallStackFrame Frame(Info, SourceLocation(), FD, /*This=*/nullptr,
21762 /*CallExpr=*/nullptr, CallRef());
21763
21764 APValue ResultScratch;
21765 Evaluate(Result&: ResultScratch, Info, E);
21766 return Diags.empty();
21767}
21768
21769std::optional<uint64_t> Expr::tryEvaluateObjectSize(const ASTContext &Ctx,
21770 unsigned Type) const {
21771 if (!getType()->isPointerType())
21772 return std::nullopt;
21773
21774 Expr::EvalStatus Status;
21775 EvalInfo Info(Ctx, Status, EvaluationMode::ConstantFold);
21776 if (Info.EnableNewConstInterp)
21777 return Info.Ctx.getInterpContext().tryEvaluateObjectSize(Parent&: Info, E: this, Kind: Type);
21778 return tryEvaluateBuiltinObjectSize(E: this, Type, Info);
21779}
21780
21781static std::optional<uint64_t>
21782EvaluateBuiltinStrLen(const Expr *E, EvalInfo &Info,
21783 std::string *StringResult) {
21784 if (!E->getType()->hasPointerRepresentation() || !E->isPRValue())
21785 return std::nullopt;
21786
21787 LValue String;
21788
21789 if (!EvaluatePointer(E, Result&: String, Info))
21790 return std::nullopt;
21791
21792 QualType CharTy = E->getType()->getPointeeType();
21793
21794 // Fast path: if it's a string literal, search the string value.
21795 if (const StringLiteral *S = dyn_cast_or_null<StringLiteral>(
21796 Val: String.getLValueBase().dyn_cast<const Expr *>())) {
21797 StringRef Str = S->getBytes();
21798 int64_t Off = String.Offset.getQuantity();
21799 if (Off >= 0 && (uint64_t)Off <= (uint64_t)Str.size() &&
21800 S->getCharByteWidth() == 1 &&
21801 // FIXME: Add fast-path for wchar_t too.
21802 Info.Ctx.hasSameUnqualifiedType(T1: CharTy, T2: Info.Ctx.CharTy)) {
21803 Str = Str.substr(Start: Off);
21804
21805 StringRef::size_type Pos = Str.find(C: 0);
21806 if (Pos != StringRef::npos)
21807 Str = Str.substr(Start: 0, N: Pos);
21808
21809 if (StringResult)
21810 *StringResult = Str;
21811 return Str.size();
21812 }
21813
21814 // Fall through to slow path.
21815 }
21816
21817 // Slow path: scan the bytes of the string looking for the terminating 0.
21818 for (uint64_t Strlen = 0; /**/; ++Strlen) {
21819 APValue Char;
21820 if (!handleLValueToRValueConversion(Info, Conv: E, Type: CharTy, LVal: String, RVal&: Char) ||
21821 !Char.isInt())
21822 return std::nullopt;
21823 if (!Char.getInt())
21824 return Strlen;
21825 else if (StringResult)
21826 StringResult->push_back(c: Char.getInt().getExtValue());
21827 if (!HandleLValueArrayAdjustment(Info, E, LVal&: String, EltTy: CharTy, Adjustment: 1))
21828 return std::nullopt;
21829 }
21830}
21831
21832std::optional<std::string> Expr::tryEvaluateString(ASTContext &Ctx) const {
21833 Expr::EvalStatus Status;
21834 EvalInfo Info(Ctx, Status, EvaluationMode::ConstantFold);
21835 std::string StringResult;
21836
21837 if (Info.EnableNewConstInterp) {
21838 if (!Info.Ctx.getInterpContext().evaluateString(Parent&: Info, E: this, Result&: StringResult))
21839 return std::nullopt;
21840 return StringResult;
21841 }
21842
21843 if (EvaluateBuiltinStrLen(E: this, Info, StringResult: &StringResult))
21844 return StringResult;
21845 return std::nullopt;
21846}
21847
21848template <typename T>
21849static bool EvaluateCharRangeAsStringImpl(const Expr *, T &Result,
21850 const Expr *SizeExpression,
21851 const Expr *PtrExpression,
21852 ASTContext &Ctx,
21853 Expr::EvalResult &Status) {
21854 EvalInfo Info(Ctx, Status, EvaluationMode::ConstantExpression);
21855 Info.InConstantContext = true;
21856
21857 if (Info.EnableNewConstInterp)
21858 return Info.Ctx.getInterpContext().evaluateCharRange(Info, SizeExpression,
21859 PtrExpression, Result);
21860
21861 LValue String;
21862 FullExpressionRAII Scope(Info);
21863 APSInt SizeValue;
21864 if (!::EvaluateInteger(E: SizeExpression, Result&: SizeValue, Info))
21865 return false;
21866
21867 uint64_t Size = SizeValue.getZExtValue();
21868
21869 // FIXME: better protect against invalid or excessive sizes
21870 if constexpr (std::is_same_v<APValue, T>)
21871 Result = APValue(APValue::UninitArray{}, Size, Size);
21872 else {
21873 if (Size < Result.max_size())
21874 Result.reserve(Size);
21875 }
21876 if (!::EvaluatePointer(E: PtrExpression, Result&: String, Info))
21877 return false;
21878
21879 QualType CharTy = PtrExpression->getType()->getPointeeType();
21880 for (uint64_t I = 0; I < Size; ++I) {
21881 APValue Char;
21882 if (!handleLValueToRValueConversion(Info, Conv: PtrExpression, Type: CharTy, LVal: String,
21883 RVal&: Char))
21884 return false;
21885
21886 if constexpr (std::is_same_v<APValue, T>) {
21887 Result.getArrayInitializedElt(I) = std::move(Char);
21888 } else {
21889 APSInt C = Char.getInt();
21890
21891 assert(C.getBitWidth() <= 8 &&
21892 "string element not representable in char");
21893
21894 Result.push_back(static_cast<char>(C.getExtValue()));
21895 }
21896
21897 if (!HandleLValueArrayAdjustment(Info, E: PtrExpression, LVal&: String, EltTy: CharTy, Adjustment: 1))
21898 return false;
21899 }
21900
21901 return Scope.destroy() && CheckMemoryLeaks(Info);
21902}
21903
21904bool Expr::EvaluateCharRangeAsString(std::string &Result,
21905 const Expr *SizeExpression,
21906 const Expr *PtrExpression, ASTContext &Ctx,
21907 EvalResult &Status) const {
21908 return EvaluateCharRangeAsStringImpl(this, Result, SizeExpression,
21909 PtrExpression, Ctx, Status);
21910}
21911
21912bool Expr::EvaluateCharRangeAsString(APValue &Result,
21913 const Expr *SizeExpression,
21914 const Expr *PtrExpression, ASTContext &Ctx,
21915 EvalResult &Status) const {
21916 return EvaluateCharRangeAsStringImpl(this, Result, SizeExpression,
21917 PtrExpression, Ctx, Status);
21918}
21919
21920std::optional<uint64_t> Expr::tryEvaluateStrLen(const ASTContext &Ctx) const {
21921 Expr::EvalStatus Status;
21922 EvalInfo Info(Ctx, Status, EvaluationMode::ConstantFold);
21923
21924 if (Info.EnableNewConstInterp)
21925 return Info.Ctx.getInterpContext().evaluateStrlen(Parent&: Info, E: this);
21926 return EvaluateBuiltinStrLen(E: this, Info);
21927}
21928
21929namespace {
21930struct IsWithinLifetimeHandler {
21931 EvalInfo &Info;
21932 static constexpr AccessKinds AccessKind = AccessKinds::AK_IsWithinLifetime;
21933 using result_type = std::optional<bool>;
21934 std::optional<bool> failed() { return std::nullopt; }
21935 template <typename T>
21936 std::optional<bool> found(T &Subobj, QualType SubobjType) {
21937 return true;
21938 }
21939};
21940
21941std::optional<bool> EvaluateBuiltinIsWithinLifetime(IntExprEvaluator &IEE,
21942 const CallExpr *E) {
21943 EvalInfo &Info = IEE.Info;
21944 // Sometimes this is called during some sorts of constant folding / early
21945 // evaluation. These are meant for non-constant expressions and are not
21946 // necessary since this consteval builtin will never be evaluated at runtime.
21947 // Just fail to evaluate when not in a constant context.
21948 if (!Info.InConstantContext)
21949 return std::nullopt;
21950 assert(E->getBuiltinCallee() == Builtin::BI__builtin_is_within_lifetime);
21951 const Expr *Arg = E->getArg(Arg: 0);
21952 if (Arg->isValueDependent())
21953 return std::nullopt;
21954 LValue Val;
21955 if (!EvaluatePointer(E: Arg, Result&: Val, Info))
21956 return std::nullopt;
21957
21958 if (Val.allowConstexprUnknown())
21959 return true;
21960
21961 auto Error = [&](int Diag) {
21962 bool CalledFromStd = false;
21963 const auto *Callee = Info.CurrentCall->getCallee();
21964 if (Callee && Callee->isInStdNamespace()) {
21965 const IdentifierInfo *Identifier = Callee->getIdentifier();
21966 CalledFromStd = Identifier && Identifier->isStr(Str: "is_within_lifetime");
21967 }
21968 Info.CCEDiag(Loc: CalledFromStd ? Info.CurrentCall->getCallRange().getBegin()
21969 : E->getExprLoc(),
21970 DiagId: diag::err_invalid_is_within_lifetime)
21971 << (CalledFromStd ? "std::is_within_lifetime"
21972 : "__builtin_is_within_lifetime")
21973 << Diag;
21974 return std::nullopt;
21975 };
21976 // C++2c [meta.const.eval]p4:
21977 // During the evaluation of an expression E as a core constant expression, a
21978 // call to this function is ill-formed unless p points to an object that is
21979 // usable in constant expressions or whose complete object's lifetime began
21980 // within E.
21981
21982 // Make sure it points to an object
21983 // nullptr does not point to an object
21984 if (Val.isNullPointer() || Val.getLValueBase().isNull())
21985 return Error(0);
21986 QualType T = Val.getLValueBase().getType();
21987 assert(!T->isFunctionType() &&
21988 "Pointers to functions should have been typed as function pointers "
21989 "which would have been rejected earlier");
21990 assert(T->isObjectType());
21991 // Hypothetical array element is not an object
21992 if (Val.getLValueDesignator().isOnePastTheEnd())
21993 return Error(1);
21994 assert(Val.getLValueDesignator().isValidSubobject() &&
21995 "Unchecked case for valid subobject");
21996 // All other ill-formed values should have failed EvaluatePointer, so the
21997 // object should be a pointer to an object that is usable in a constant
21998 // expression or whose complete lifetime began within the expression
21999 CompleteObject CO =
22000 findCompleteObject(Info, E, AK: AccessKinds::AK_IsWithinLifetime, LVal: Val, LValType: T);
22001 // The lifetime hasn't begun yet if we are still evaluating the
22002 // initializer ([basic.life]p(1.2))
22003 if (Info.EvaluatingDeclValue && CO.Value == Info.EvaluatingDeclValue)
22004 return Error(2);
22005
22006 if (!CO)
22007 return false;
22008 IsWithinLifetimeHandler handler{.Info: Info};
22009 return findSubobject(Info, E, Obj: CO, Sub: Val.getLValueDesignator(), handler);
22010}
22011} // namespace
22012