| 1 | //===--- ExprConstant.cpp - Expression Constant Evaluator -----------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file implements the Expr constant evaluator. |
| 10 | // |
| 11 | // Constant expression evaluation produces four main results: |
| 12 | // |
| 13 | // * A success/failure flag indicating whether constant folding was successful. |
| 14 | // This is the 'bool' return value used by most of the code in this file. A |
| 15 | // 'false' return value indicates that constant folding has failed, and any |
| 16 | // appropriate diagnostic has already been produced. |
| 17 | // |
| 18 | // * An evaluated result, valid only if constant folding has not failed. |
| 19 | // |
| 20 | // * A flag indicating if evaluation encountered (unevaluated) side-effects. |
| 21 | // These arise in cases such as (sideEffect(), 0) and (sideEffect() || 1), |
| 22 | // where it is possible to determine the evaluated result regardless. |
| 23 | // |
| 24 | // * A set of notes indicating why the evaluation was not a constant expression |
| 25 | // (under the C++11 / C++1y rules only, at the moment), or, if folding failed |
| 26 | // too, why the expression could not be folded. |
| 27 | // |
| 28 | // If we are checking for a potential constant expression, failure to constant |
| 29 | // fold a potential constant sub-expression will be indicated by a 'false' |
| 30 | // return value (the expression could not be folded) and no diagnostic (the |
| 31 | // expression is not necessarily non-constant). |
| 32 | // |
| 33 | //===----------------------------------------------------------------------===// |
| 34 | |
| 35 | #include "ByteCode/Context.h" |
| 36 | #include "ByteCode/Frame.h" |
| 37 | #include "ByteCode/State.h" |
| 38 | #include "ExprConstShared.h" |
| 39 | #include "clang/AST/APValue.h" |
| 40 | #include "clang/AST/ASTContext.h" |
| 41 | #include "clang/AST/ASTLambda.h" |
| 42 | #include "clang/AST/Attr.h" |
| 43 | #include "clang/AST/CXXInheritance.h" |
| 44 | #include "clang/AST/CharUnits.h" |
| 45 | #include "clang/AST/CurrentSourceLocExprScope.h" |
| 46 | #include "clang/AST/Expr.h" |
| 47 | #include "clang/AST/InferAlloc.h" |
| 48 | #include "clang/AST/OSLog.h" |
| 49 | #include "clang/AST/OptionalDiagnostic.h" |
| 50 | #include "clang/AST/RecordLayout.h" |
| 51 | #include "clang/AST/StmtVisitor.h" |
| 52 | #include "clang/AST/Type.h" |
| 53 | #include "clang/AST/TypeLoc.h" |
| 54 | #include "clang/Basic/Builtins.h" |
| 55 | #include "clang/Basic/DiagnosticSema.h" |
| 56 | #include "clang/Basic/TargetBuiltins.h" |
| 57 | #include "clang/Basic/TargetInfo.h" |
| 58 | #include "llvm/ADT/APFixedPoint.h" |
| 59 | #include "llvm/ADT/Sequence.h" |
| 60 | #include "llvm/ADT/SmallBitVector.h" |
| 61 | #include "llvm/ADT/StringExtras.h" |
| 62 | #include "llvm/Support/Casting.h" |
| 63 | #include "llvm/Support/Debug.h" |
| 64 | #include "llvm/Support/SaveAndRestore.h" |
| 65 | #include "llvm/Support/SipHash.h" |
| 66 | #include "llvm/Support/TimeProfiler.h" |
| 67 | #include "llvm/Support/raw_ostream.h" |
| 68 | #include <cstring> |
| 69 | #include <functional> |
| 70 | #include <limits> |
| 71 | #include <optional> |
| 72 | |
| 73 | #define DEBUG_TYPE "exprconstant" |
| 74 | |
| 75 | using namespace clang; |
| 76 | using llvm::APFixedPoint; |
| 77 | using llvm::APInt; |
| 78 | using llvm::APSInt; |
| 79 | using llvm::APFloat; |
| 80 | using llvm::FixedPointSemantics; |
| 81 | |
| 82 | namespace { |
| 83 | struct LValue; |
| 84 | class CallStackFrame; |
| 85 | class EvalInfo; |
| 86 | |
| 87 | using SourceLocExprScopeGuard = |
| 88 | CurrentSourceLocExprScope::SourceLocExprScopeGuard; |
| 89 | |
| 90 | static QualType getType(APValue::LValueBase B) { |
| 91 | return B.getType(); |
| 92 | } |
| 93 | |
| 94 | /// Get an LValue path entry, which is known to not be an array index, as a |
| 95 | /// field declaration. |
| 96 | static const FieldDecl *getAsField(APValue::LValuePathEntry E) { |
| 97 | return dyn_cast_or_null<FieldDecl>(Val: E.getAsBaseOrMember().getPointer()); |
| 98 | } |
| 99 | /// Get an LValue path entry, which is known to not be an array index, as a |
| 100 | /// base class declaration. |
| 101 | static const CXXRecordDecl *getAsBaseClass(APValue::LValuePathEntry E) { |
| 102 | return dyn_cast_or_null<CXXRecordDecl>(Val: E.getAsBaseOrMember().getPointer()); |
| 103 | } |
| 104 | /// Determine whether this LValue path entry for a base class names a virtual |
| 105 | /// base class. |
| 106 | static bool isVirtualBaseClass(APValue::LValuePathEntry E) { |
| 107 | return E.getAsBaseOrMember().getInt(); |
| 108 | } |
| 109 | |
| 110 | /// Given an expression, determine the type used to store the result of |
| 111 | /// evaluating that expression. |
| 112 | static QualType getStorageType(const ASTContext &Ctx, const Expr *E) { |
| 113 | if (E->isPRValue()) |
| 114 | return E->getType(); |
| 115 | return Ctx.getLValueReferenceType(T: E->getType()); |
| 116 | } |
| 117 | |
| 118 | /// Attempts to unwrap a CallExpr (with an alloc_size attribute) from an Expr. |
| 119 | /// This will look through a single cast. |
| 120 | /// |
| 121 | /// Returns null if we couldn't unwrap a function with alloc_size. |
| 122 | static const CallExpr *tryUnwrapAllocSizeCall(const Expr *E) { |
| 123 | if (!E->getType()->isPointerType()) |
| 124 | return nullptr; |
| 125 | |
| 126 | E = E->IgnoreParens(); |
| 127 | // If we're doing a variable assignment from e.g. malloc(N), there will |
| 128 | // probably be a cast of some kind. In exotic cases, we might also see a |
| 129 | // top-level ExprWithCleanups. Ignore them either way. |
| 130 | if (const auto *FE = dyn_cast<FullExpr>(Val: E)) |
| 131 | E = FE->getSubExpr()->IgnoreParens(); |
| 132 | |
| 133 | if (const auto *Cast = dyn_cast<CastExpr>(Val: E)) |
| 134 | E = Cast->getSubExpr()->IgnoreParens(); |
| 135 | |
| 136 | if (const auto *CE = dyn_cast<CallExpr>(Val: E)) |
| 137 | return CE->getCalleeAllocSizeAttr() ? CE : nullptr; |
| 138 | return nullptr; |
| 139 | } |
| 140 | |
| 141 | /// Determines whether or not the given Base contains a call to a function |
| 142 | /// with the alloc_size attribute. |
| 143 | static bool isBaseAnAllocSizeCall(APValue::LValueBase Base) { |
| 144 | const auto *E = Base.dyn_cast<const Expr *>(); |
| 145 | return E && E->getType()->isPointerType() && tryUnwrapAllocSizeCall(E); |
| 146 | } |
| 147 | |
| 148 | /// Determines whether the given kind of constant expression is only ever |
| 149 | /// used for name mangling. If so, it's permitted to reference things that we |
| 150 | /// can't generate code for (in particular, dllimported functions). |
| 151 | static bool isForManglingOnly(ConstantExprKind Kind) { |
| 152 | switch (Kind) { |
| 153 | case ConstantExprKind::Normal: |
| 154 | case ConstantExprKind::ClassTemplateArgument: |
| 155 | case ConstantExprKind::ImmediateInvocation: |
| 156 | // Note that non-type template arguments of class type are emitted as |
| 157 | // template parameter objects. |
| 158 | return false; |
| 159 | |
| 160 | case ConstantExprKind::NonClassTemplateArgument: |
| 161 | return true; |
| 162 | } |
| 163 | llvm_unreachable("unknown ConstantExprKind" ); |
| 164 | } |
| 165 | |
| 166 | static bool isTemplateArgument(ConstantExprKind Kind) { |
| 167 | switch (Kind) { |
| 168 | case ConstantExprKind::Normal: |
| 169 | case ConstantExprKind::ImmediateInvocation: |
| 170 | return false; |
| 171 | |
| 172 | case ConstantExprKind::ClassTemplateArgument: |
| 173 | case ConstantExprKind::NonClassTemplateArgument: |
| 174 | return true; |
| 175 | } |
| 176 | llvm_unreachable("unknown ConstantExprKind" ); |
| 177 | } |
| 178 | |
| 179 | /// The bound to claim that an array of unknown bound has. |
| 180 | /// The value in MostDerivedArraySize is undefined in this case. So, set it |
| 181 | /// to an arbitrary value that's likely to loudly break things if it's used. |
| 182 | static const uint64_t AssumedSizeForUnsizedArray = |
| 183 | std::numeric_limits<uint64_t>::max() / 2; |
| 184 | |
| 185 | /// Determines if an LValue with the given LValueBase will have an unsized |
| 186 | /// array in its designator. |
| 187 | /// Find the path length and type of the most-derived subobject in the given |
| 188 | /// path, and find the size of the containing array, if any. |
| 189 | static unsigned |
| 190 | findMostDerivedSubobject(const ASTContext &Ctx, APValue::LValueBase Base, |
| 191 | ArrayRef<APValue::LValuePathEntry> Path, |
| 192 | uint64_t &ArraySize, QualType &Type, bool &IsArray, |
| 193 | bool &FirstEntryIsUnsizedArray) { |
| 194 | // This only accepts LValueBases from APValues, and APValues don't support |
| 195 | // arrays that lack size info. |
| 196 | assert(!isBaseAnAllocSizeCall(Base) && |
| 197 | "Unsized arrays shouldn't appear here" ); |
| 198 | unsigned MostDerivedLength = 0; |
| 199 | // The type of Base is a reference type if the base is a constexpr-unknown |
| 200 | // variable. In that case, look through the reference type. |
| 201 | Type = getType(B: Base).getNonReferenceType(); |
| 202 | |
| 203 | for (unsigned I = 0, N = Path.size(); I != N; ++I) { |
| 204 | if (Type->isArrayType()) { |
| 205 | const ArrayType *AT = Ctx.getAsArrayType(T: Type); |
| 206 | Type = AT->getElementType(); |
| 207 | MostDerivedLength = I + 1; |
| 208 | IsArray = true; |
| 209 | |
| 210 | if (auto *CAT = dyn_cast<ConstantArrayType>(Val: AT)) { |
| 211 | ArraySize = CAT->getZExtSize(); |
| 212 | } else { |
| 213 | assert(I == 0 && "unexpected unsized array designator" ); |
| 214 | FirstEntryIsUnsizedArray = true; |
| 215 | ArraySize = AssumedSizeForUnsizedArray; |
| 216 | } |
| 217 | } else if (Type->isAnyComplexType()) { |
| 218 | const ComplexType *CT = Type->castAs<ComplexType>(); |
| 219 | Type = CT->getElementType(); |
| 220 | ArraySize = 2; |
| 221 | MostDerivedLength = I + 1; |
| 222 | IsArray = true; |
| 223 | } else if (const auto *VT = Type->getAs<VectorType>()) { |
| 224 | Type = VT->getElementType(); |
| 225 | ArraySize = VT->getNumElements(); |
| 226 | MostDerivedLength = I + 1; |
| 227 | IsArray = true; |
| 228 | } else if (const FieldDecl *FD = getAsField(E: Path[I])) { |
| 229 | Type = FD->getType(); |
| 230 | ArraySize = 0; |
| 231 | MostDerivedLength = I + 1; |
| 232 | IsArray = false; |
| 233 | } else { |
| 234 | // Path[I] describes a base class. |
| 235 | ArraySize = 0; |
| 236 | IsArray = false; |
| 237 | } |
| 238 | } |
| 239 | return MostDerivedLength; |
| 240 | } |
| 241 | |
| 242 | /// A path from a glvalue to a subobject of that glvalue. |
| 243 | struct SubobjectDesignator { |
| 244 | /// True if the subobject was named in a manner not supported by C++11. Such |
| 245 | /// lvalues can still be folded, but they are not core constant expressions |
| 246 | /// and we cannot perform lvalue-to-rvalue conversions on them. |
| 247 | LLVM_PREFERRED_TYPE(bool) |
| 248 | unsigned Invalid : 1; |
| 249 | |
| 250 | /// Is this a pointer one past the end of an object? |
| 251 | LLVM_PREFERRED_TYPE(bool) |
| 252 | unsigned IsOnePastTheEnd : 1; |
| 253 | |
| 254 | /// Indicator of whether the first entry is an unsized array. |
| 255 | LLVM_PREFERRED_TYPE(bool) |
| 256 | unsigned FirstEntryIsAnUnsizedArray : 1; |
| 257 | |
| 258 | /// Indicator of whether the most-derived object is an array element. |
| 259 | LLVM_PREFERRED_TYPE(bool) |
| 260 | unsigned MostDerivedIsArrayElement : 1; |
| 261 | |
| 262 | /// The length of the path to the most-derived object of which this is a |
| 263 | /// subobject. |
| 264 | unsigned MostDerivedPathLength : 28; |
| 265 | |
| 266 | /// The size of the array of which the most-derived object is an element. |
| 267 | /// This will always be 0 if the most-derived object is not an array |
| 268 | /// element. 0 is not an indicator of whether or not the most-derived object |
| 269 | /// is an array, however, because 0-length arrays are allowed. |
| 270 | /// |
| 271 | /// If the current array is an unsized array, the value of this is |
| 272 | /// undefined. |
| 273 | uint64_t MostDerivedArraySize; |
| 274 | /// The type of the most derived object referred to by this address. |
| 275 | QualType MostDerivedType; |
| 276 | |
| 277 | typedef APValue::LValuePathEntry PathEntry; |
| 278 | |
| 279 | /// The entries on the path from the glvalue to the designated subobject. |
| 280 | SmallVector<PathEntry, 8> Entries; |
| 281 | |
| 282 | SubobjectDesignator() : Invalid(true) {} |
| 283 | |
| 284 | explicit SubobjectDesignator(QualType T) |
| 285 | : Invalid(false), IsOnePastTheEnd(false), |
| 286 | FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false), |
| 287 | MostDerivedPathLength(0), MostDerivedArraySize(0), |
| 288 | MostDerivedType(T.isNull() ? QualType() : T.getNonReferenceType()) {} |
| 289 | |
| 290 | SubobjectDesignator(const ASTContext &Ctx, const APValue &V) |
| 291 | : Invalid(!V.isLValue() || !V.hasLValuePath()), IsOnePastTheEnd(false), |
| 292 | FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false), |
| 293 | MostDerivedPathLength(0), MostDerivedArraySize(0) { |
| 294 | assert(V.isLValue() && "Non-LValue used to make an LValue designator?" ); |
| 295 | if (!Invalid) { |
| 296 | IsOnePastTheEnd = V.isLValueOnePastTheEnd(); |
| 297 | llvm::append_range(C&: Entries, R: V.getLValuePath()); |
| 298 | if (V.getLValueBase()) { |
| 299 | bool IsArray = false; |
| 300 | bool FirstIsUnsizedArray = false; |
| 301 | MostDerivedPathLength = findMostDerivedSubobject( |
| 302 | Ctx, Base: V.getLValueBase(), Path: V.getLValuePath(), ArraySize&: MostDerivedArraySize, |
| 303 | Type&: MostDerivedType, IsArray, FirstEntryIsUnsizedArray&: FirstIsUnsizedArray); |
| 304 | MostDerivedIsArrayElement = IsArray; |
| 305 | FirstEntryIsAnUnsizedArray = FirstIsUnsizedArray; |
| 306 | } |
| 307 | } |
| 308 | } |
| 309 | |
| 310 | void truncate(ASTContext &Ctx, APValue::LValueBase Base, |
| 311 | unsigned NewLength) { |
| 312 | if (Invalid) |
| 313 | return; |
| 314 | |
| 315 | assert(Base && "cannot truncate path for null pointer" ); |
| 316 | assert(NewLength <= Entries.size() && "not a truncation" ); |
| 317 | |
| 318 | if (NewLength == Entries.size()) |
| 319 | return; |
| 320 | Entries.resize(N: NewLength); |
| 321 | |
| 322 | bool IsArray = false; |
| 323 | bool FirstIsUnsizedArray = false; |
| 324 | MostDerivedPathLength = findMostDerivedSubobject( |
| 325 | Ctx, Base, Path: Entries, ArraySize&: MostDerivedArraySize, Type&: MostDerivedType, IsArray, |
| 326 | FirstEntryIsUnsizedArray&: FirstIsUnsizedArray); |
| 327 | MostDerivedIsArrayElement = IsArray; |
| 328 | FirstEntryIsAnUnsizedArray = FirstIsUnsizedArray; |
| 329 | } |
| 330 | |
| 331 | void setInvalid() { |
| 332 | Invalid = true; |
| 333 | Entries.clear(); |
| 334 | } |
| 335 | |
| 336 | /// Determine whether the most derived subobject is an array without a |
| 337 | /// known bound. |
| 338 | bool isMostDerivedAnUnsizedArray() const { |
| 339 | assert(!Invalid && "Calling this makes no sense on invalid designators" ); |
| 340 | return Entries.size() == 1 && FirstEntryIsAnUnsizedArray; |
| 341 | } |
| 342 | |
| 343 | /// Determine what the most derived array's size is. Results in an assertion |
| 344 | /// failure if the most derived array lacks a size. |
| 345 | uint64_t getMostDerivedArraySize() const { |
| 346 | assert(!isMostDerivedAnUnsizedArray() && "Unsized array has no size" ); |
| 347 | return MostDerivedArraySize; |
| 348 | } |
| 349 | |
| 350 | /// Determine whether this is a one-past-the-end pointer. |
| 351 | bool isOnePastTheEnd() const { |
| 352 | assert(!Invalid); |
| 353 | if (IsOnePastTheEnd) |
| 354 | return true; |
| 355 | if (!isMostDerivedAnUnsizedArray() && MostDerivedIsArrayElement && |
| 356 | Entries[MostDerivedPathLength - 1].getAsArrayIndex() == |
| 357 | MostDerivedArraySize) |
| 358 | return true; |
| 359 | return false; |
| 360 | } |
| 361 | |
| 362 | /// Get the range of valid index adjustments in the form |
| 363 | /// {maximum value that can be subtracted from this pointer, |
| 364 | /// maximum value that can be added to this pointer} |
| 365 | std::pair<uint64_t, uint64_t> validIndexAdjustments() { |
| 366 | if (Invalid || isMostDerivedAnUnsizedArray()) |
| 367 | return {0, 0}; |
| 368 | |
| 369 | // [expr.add]p4: For the purposes of these operators, a pointer to a |
| 370 | // nonarray object behaves the same as a pointer to the first element of |
| 371 | // an array of length one with the type of the object as its element type. |
| 372 | bool IsArray = MostDerivedPathLength == Entries.size() && |
| 373 | MostDerivedIsArrayElement; |
| 374 | uint64_t ArrayIndex = IsArray ? Entries.back().getAsArrayIndex() |
| 375 | : (uint64_t)IsOnePastTheEnd; |
| 376 | uint64_t ArraySize = |
| 377 | IsArray ? getMostDerivedArraySize() : (uint64_t)1; |
| 378 | return {ArrayIndex, ArraySize - ArrayIndex}; |
| 379 | } |
| 380 | |
| 381 | /// Check that this refers to a valid subobject. |
| 382 | bool isValidSubobject() const { |
| 383 | if (Invalid) |
| 384 | return false; |
| 385 | return !isOnePastTheEnd(); |
| 386 | } |
| 387 | /// Check that this refers to a valid subobject, and if not, produce a |
| 388 | /// relevant diagnostic and set the designator as invalid. |
| 389 | bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK); |
| 390 | |
| 391 | /// Get the type of the designated object. |
| 392 | QualType getType(ASTContext &Ctx) const { |
| 393 | assert(!Invalid && "invalid designator has no subobject type" ); |
| 394 | return MostDerivedPathLength == Entries.size() |
| 395 | ? MostDerivedType |
| 396 | : Ctx.getCanonicalTagType(TD: getAsBaseClass(E: Entries.back())); |
| 397 | } |
| 398 | |
| 399 | /// Update this designator to refer to the first element within this array. |
| 400 | void addArrayUnchecked(const ConstantArrayType *CAT) { |
| 401 | Entries.push_back(Elt: PathEntry::ArrayIndex(Index: 0)); |
| 402 | |
| 403 | // This is a most-derived object. |
| 404 | MostDerivedType = CAT->getElementType(); |
| 405 | MostDerivedIsArrayElement = true; |
| 406 | MostDerivedArraySize = CAT->getZExtSize(); |
| 407 | MostDerivedPathLength = Entries.size(); |
| 408 | } |
| 409 | /// Update this designator to refer to the first element within the array of |
| 410 | /// elements of type T. This is an array of unknown size. |
| 411 | void addUnsizedArrayUnchecked(QualType ElemTy) { |
| 412 | Entries.push_back(Elt: PathEntry::ArrayIndex(Index: 0)); |
| 413 | |
| 414 | MostDerivedType = ElemTy; |
| 415 | MostDerivedIsArrayElement = true; |
| 416 | // The value in MostDerivedArraySize is undefined in this case. So, set it |
| 417 | // to an arbitrary value that's likely to loudly break things if it's |
| 418 | // used. |
| 419 | MostDerivedArraySize = AssumedSizeForUnsizedArray; |
| 420 | MostDerivedPathLength = Entries.size(); |
| 421 | } |
| 422 | /// Update this designator to refer to the given base or member of this |
| 423 | /// object. |
| 424 | void addDeclUnchecked(const Decl *D, bool Virtual = false) { |
| 425 | Entries.push_back(Elt: APValue::BaseOrMemberType(D, Virtual)); |
| 426 | |
| 427 | // If this isn't a base class, it's a new most-derived object. |
| 428 | if (const FieldDecl *FD = dyn_cast<FieldDecl>(Val: D)) { |
| 429 | MostDerivedType = FD->getType(); |
| 430 | MostDerivedIsArrayElement = false; |
| 431 | MostDerivedArraySize = 0; |
| 432 | MostDerivedPathLength = Entries.size(); |
| 433 | } |
| 434 | } |
| 435 | /// Update this designator to refer to the given complex component. |
| 436 | void addComplexUnchecked(QualType EltTy, bool Imag) { |
| 437 | Entries.push_back(Elt: PathEntry::ArrayIndex(Index: Imag)); |
| 438 | |
| 439 | // This is technically a most-derived object, though in practice this |
| 440 | // is unlikely to matter. |
| 441 | MostDerivedType = EltTy; |
| 442 | MostDerivedIsArrayElement = true; |
| 443 | MostDerivedArraySize = 2; |
| 444 | MostDerivedPathLength = Entries.size(); |
| 445 | } |
| 446 | |
| 447 | void addVectorElementUnchecked(QualType EltTy, uint64_t Size, |
| 448 | uint64_t Idx) { |
| 449 | Entries.push_back(Elt: PathEntry::ArrayIndex(Index: Idx)); |
| 450 | MostDerivedType = EltTy; |
| 451 | MostDerivedPathLength = Entries.size(); |
| 452 | MostDerivedArraySize = 0; |
| 453 | MostDerivedIsArrayElement = false; |
| 454 | } |
| 455 | |
| 456 | void diagnoseUnsizedArrayPointerArithmetic(EvalInfo &Info, const Expr *E); |
| 457 | void diagnosePointerArithmetic(EvalInfo &Info, const Expr *E, |
| 458 | const APSInt &N); |
| 459 | /// Add N to the address of this subobject. |
| 460 | void adjustIndex(EvalInfo &Info, const Expr *E, APSInt N, const LValue &LV); |
| 461 | }; |
| 462 | |
| 463 | /// A scope at the end of which an object can need to be destroyed. |
| 464 | enum class ScopeKind { |
| 465 | Block, |
| 466 | FullExpression, |
| 467 | Call |
| 468 | }; |
| 469 | |
| 470 | /// A reference to a particular call and its arguments. |
| 471 | struct CallRef { |
| 472 | CallRef() : OrigCallee(), CallIndex(0), Version() {} |
| 473 | CallRef(const FunctionDecl *Callee, unsigned CallIndex, unsigned Version) |
| 474 | : OrigCallee(Callee), CallIndex(CallIndex), Version(Version) {} |
| 475 | |
| 476 | explicit operator bool() const { return OrigCallee; } |
| 477 | |
| 478 | /// Get the parameter that the caller initialized, corresponding to the |
| 479 | /// given parameter in the callee. |
| 480 | const ParmVarDecl *getOrigParam(const ParmVarDecl *PVD) const { |
| 481 | return OrigCallee ? OrigCallee->getParamDecl(i: PVD->getFunctionScopeIndex()) |
| 482 | : PVD; |
| 483 | } |
| 484 | |
| 485 | /// The callee at the point where the arguments were evaluated. This might |
| 486 | /// be different from the actual callee (a different redeclaration, or a |
| 487 | /// virtual override), but this function's parameters are the ones that |
| 488 | /// appear in the parameter map. |
| 489 | const FunctionDecl *OrigCallee; |
| 490 | /// The call index of the frame that holds the argument values. |
| 491 | unsigned CallIndex; |
| 492 | /// The version of the parameters corresponding to this call. |
| 493 | unsigned Version; |
| 494 | }; |
| 495 | |
| 496 | /// A stack frame in the constexpr call stack. |
| 497 | class CallStackFrame : public interp::Frame { |
| 498 | public: |
| 499 | EvalInfo &Info; |
| 500 | |
| 501 | /// Parent - The caller of this stack frame. |
| 502 | CallStackFrame *Caller; |
| 503 | |
| 504 | /// Callee - The function which was called. |
| 505 | const FunctionDecl *Callee; |
| 506 | |
| 507 | /// This - The binding for the this pointer in this call, if any. |
| 508 | const LValue *This; |
| 509 | |
| 510 | /// CallExpr - The syntactical structure of member function calls |
| 511 | const Expr *CallExpr; |
| 512 | |
| 513 | /// Information on how to find the arguments to this call. Our arguments |
| 514 | /// are stored in our parent's CallStackFrame, using the ParmVarDecl* as a |
| 515 | /// key and this value as the version. |
| 516 | CallRef Arguments; |
| 517 | |
| 518 | /// Source location information about the default argument or default |
| 519 | /// initializer expression we're evaluating, if any. |
| 520 | CurrentSourceLocExprScope CurSourceLocExprScope; |
| 521 | |
| 522 | // Note that we intentionally use std::map here so that references to |
| 523 | // values are stable. |
| 524 | typedef std::pair<const void *, unsigned> MapKeyTy; |
| 525 | typedef std::map<MapKeyTy, APValue> MapTy; |
| 526 | /// Temporaries - Temporary lvalues materialized within this stack frame. |
| 527 | MapTy Temporaries; |
| 528 | |
| 529 | /// CallRange - The source range of the call expression for this call. |
| 530 | SourceRange CallRange; |
| 531 | |
| 532 | /// Index - The call index of this call. |
| 533 | unsigned Index; |
| 534 | |
| 535 | /// The stack of integers for tracking version numbers for temporaries. |
| 536 | SmallVector<unsigned, 2> TempVersionStack = {1}; |
| 537 | unsigned CurTempVersion = TempVersionStack.back(); |
| 538 | |
| 539 | unsigned getTempVersion() const { return TempVersionStack.back(); } |
| 540 | |
| 541 | void pushTempVersion() { |
| 542 | TempVersionStack.push_back(Elt: ++CurTempVersion); |
| 543 | } |
| 544 | |
| 545 | void popTempVersion() { |
| 546 | TempVersionStack.pop_back(); |
| 547 | } |
| 548 | |
| 549 | CallRef createCall(const FunctionDecl *Callee) { |
| 550 | return {Callee, Index, ++CurTempVersion}; |
| 551 | } |
| 552 | |
| 553 | // FIXME: Adding this to every 'CallStackFrame' may have a nontrivial impact |
| 554 | // on the overall stack usage of deeply-recursing constexpr evaluations. |
| 555 | // (We should cache this map rather than recomputing it repeatedly.) |
| 556 | // But let's try this and see how it goes; we can look into caching the map |
| 557 | // as a later change. |
| 558 | |
| 559 | /// LambdaCaptureFields - Mapping from captured variables/this to |
| 560 | /// corresponding data members in the closure class. |
| 561 | llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields; |
| 562 | FieldDecl *LambdaThisCaptureField = nullptr; |
| 563 | |
| 564 | CallStackFrame(EvalInfo &Info, SourceRange CallRange, |
| 565 | const FunctionDecl *Callee, const LValue *This, |
| 566 | const Expr *CallExpr, CallRef Arguments); |
| 567 | ~CallStackFrame(); |
| 568 | |
| 569 | // Return the temporary for Key whose version number is Version. |
| 570 | APValue *getTemporary(const void *Key, unsigned Version) { |
| 571 | MapKeyTy KV(Key, Version); |
| 572 | auto LB = Temporaries.lower_bound(x: KV); |
| 573 | if (LB != Temporaries.end() && LB->first == KV) |
| 574 | return &LB->second; |
| 575 | return nullptr; |
| 576 | } |
| 577 | |
| 578 | // Return the current temporary for Key in the map. |
| 579 | APValue *getCurrentTemporary(const void *Key) { |
| 580 | auto UB = Temporaries.upper_bound(x: MapKeyTy(Key, UINT_MAX)); |
| 581 | if (UB != Temporaries.begin() && std::prev(x: UB)->first.first == Key) |
| 582 | return &std::prev(x: UB)->second; |
| 583 | return nullptr; |
| 584 | } |
| 585 | |
| 586 | // Return the version number of the current temporary for Key. |
| 587 | unsigned getCurrentTemporaryVersion(const void *Key) const { |
| 588 | auto UB = Temporaries.upper_bound(x: MapKeyTy(Key, UINT_MAX)); |
| 589 | if (UB != Temporaries.begin() && std::prev(x: UB)->first.first == Key) |
| 590 | return std::prev(x: UB)->first.second; |
| 591 | return 0; |
| 592 | } |
| 593 | |
| 594 | /// Allocate storage for an object of type T in this stack frame. |
| 595 | /// Populates LV with a handle to the created object. Key identifies |
| 596 | /// the temporary within the stack frame, and must not be reused without |
| 597 | /// bumping the temporary version number. |
| 598 | template<typename KeyT> |
| 599 | APValue &createTemporary(const KeyT *Key, QualType T, |
| 600 | ScopeKind Scope, LValue &LV); |
| 601 | |
| 602 | /// Allocate storage for a parameter of a function call made in this frame. |
| 603 | APValue &createParam(CallRef Args, const ParmVarDecl *PVD, LValue &LV); |
| 604 | |
| 605 | void describe(llvm::raw_ostream &OS) const override; |
| 606 | |
| 607 | Frame *getCaller() const override { return Caller; } |
| 608 | SourceRange getCallRange() const override { return CallRange; } |
| 609 | const FunctionDecl *getCallee() const override { return Callee; } |
| 610 | |
| 611 | bool isStdFunction() const { |
| 612 | for (const DeclContext *DC = Callee; DC; DC = DC->getParent()) |
| 613 | if (DC->isStdNamespace()) |
| 614 | return true; |
| 615 | return false; |
| 616 | } |
| 617 | |
| 618 | /// Whether we're in a context where [[msvc::constexpr]] evaluation is |
| 619 | /// permitted. See MSConstexprDocs for description of permitted contexts. |
| 620 | bool CanEvalMSConstexpr = false; |
| 621 | |
| 622 | private: |
| 623 | APValue &createLocal(APValue::LValueBase Base, const void *Key, QualType T, |
| 624 | ScopeKind Scope); |
| 625 | }; |
| 626 | |
| 627 | /// Temporarily override 'this'. |
| 628 | class ThisOverrideRAII { |
| 629 | public: |
| 630 | ThisOverrideRAII(CallStackFrame &Frame, const LValue *NewThis, bool Enable) |
| 631 | : Frame(Frame), OldThis(Frame.This) { |
| 632 | if (Enable) |
| 633 | Frame.This = NewThis; |
| 634 | } |
| 635 | ~ThisOverrideRAII() { |
| 636 | Frame.This = OldThis; |
| 637 | } |
| 638 | private: |
| 639 | CallStackFrame &Frame; |
| 640 | const LValue *OldThis; |
| 641 | }; |
| 642 | |
| 643 | // A shorthand time trace scope struct, prints source range, for example |
| 644 | // {"name":"EvaluateAsRValue","args":{"detail":"<test.cc:8:21, col:25>"}}} |
| 645 | class ExprTimeTraceScope { |
| 646 | public: |
| 647 | ExprTimeTraceScope(const Expr *E, const ASTContext &Ctx, StringRef Name) |
| 648 | : TimeScope(Name, [E, &Ctx] { |
| 649 | return E->getSourceRange().printToString(SM: Ctx.getSourceManager()); |
| 650 | }) {} |
| 651 | |
| 652 | private: |
| 653 | llvm::TimeTraceScope TimeScope; |
| 654 | }; |
| 655 | |
| 656 | /// RAII object used to change the current ability of |
| 657 | /// [[msvc::constexpr]] evaulation. |
| 658 | struct { |
| 659 | CallStackFrame &; |
| 660 | bool ; |
| 661 | explicit (CallStackFrame &Frame, bool Value) |
| 662 | : Frame(Frame), OldValue(Frame.CanEvalMSConstexpr) { |
| 663 | Frame.CanEvalMSConstexpr = Value; |
| 664 | } |
| 665 | |
| 666 | () { Frame.CanEvalMSConstexpr = OldValue; } |
| 667 | }; |
| 668 | } |
| 669 | |
| 670 | static bool HandleDestruction(EvalInfo &Info, const Expr *E, |
| 671 | const LValue &This, QualType ThisType); |
| 672 | static bool HandleDestruction(EvalInfo &Info, SourceLocation Loc, |
| 673 | APValue::LValueBase LVBase, APValue &Value, |
| 674 | QualType T); |
| 675 | |
| 676 | namespace { |
| 677 | /// A cleanup, and a flag indicating whether it is lifetime-extended. |
| 678 | class Cleanup { |
| 679 | llvm::PointerIntPair<APValue*, 2, ScopeKind> Value; |
| 680 | APValue::LValueBase Base; |
| 681 | QualType T; |
| 682 | |
| 683 | public: |
| 684 | Cleanup(APValue *Val, APValue::LValueBase Base, QualType T, |
| 685 | ScopeKind Scope) |
| 686 | : Value(Val, Scope), Base(Base), T(T) {} |
| 687 | |
| 688 | /// Determine whether this cleanup should be performed at the end of the |
| 689 | /// given kind of scope. |
| 690 | bool isDestroyedAtEndOf(ScopeKind K) const { |
| 691 | return (int)Value.getInt() >= (int)K; |
| 692 | } |
| 693 | bool endLifetime(EvalInfo &Info, bool RunDestructors) { |
| 694 | if (RunDestructors) { |
| 695 | SourceLocation Loc; |
| 696 | if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) |
| 697 | Loc = VD->getLocation(); |
| 698 | else if (const Expr *E = Base.dyn_cast<const Expr*>()) |
| 699 | Loc = E->getExprLoc(); |
| 700 | return HandleDestruction(Info, Loc, LVBase: Base, Value&: *Value.getPointer(), T); |
| 701 | } |
| 702 | *Value.getPointer() = APValue(); |
| 703 | return true; |
| 704 | } |
| 705 | |
| 706 | bool hasSideEffect() { |
| 707 | return T.isDestructedType(); |
| 708 | } |
| 709 | }; |
| 710 | |
| 711 | /// A reference to an object whose construction we are currently evaluating. |
| 712 | struct ObjectUnderConstruction { |
| 713 | APValue::LValueBase Base; |
| 714 | ArrayRef<APValue::LValuePathEntry> Path; |
| 715 | friend bool operator==(const ObjectUnderConstruction &LHS, |
| 716 | const ObjectUnderConstruction &RHS) { |
| 717 | return LHS.Base == RHS.Base && LHS.Path == RHS.Path; |
| 718 | } |
| 719 | friend llvm::hash_code hash_value(const ObjectUnderConstruction &Obj) { |
| 720 | return llvm::hash_combine(args: Obj.Base, args: Obj.Path); |
| 721 | } |
| 722 | }; |
| 723 | enum class ConstructionPhase { |
| 724 | None, |
| 725 | Bases, |
| 726 | AfterBases, |
| 727 | AfterFields, |
| 728 | Destroying, |
| 729 | DestroyingBases |
| 730 | }; |
| 731 | } |
| 732 | |
| 733 | namespace llvm { |
| 734 | template<> struct DenseMapInfo<ObjectUnderConstruction> { |
| 735 | using Base = DenseMapInfo<APValue::LValueBase>; |
| 736 | static ObjectUnderConstruction getEmptyKey() { |
| 737 | return {.Base: Base::getEmptyKey(), .Path: {}}; } |
| 738 | static ObjectUnderConstruction getTombstoneKey() { |
| 739 | return {.Base: Base::getTombstoneKey(), .Path: {}}; |
| 740 | } |
| 741 | static unsigned getHashValue(const ObjectUnderConstruction &Object) { |
| 742 | return hash_value(Obj: Object); |
| 743 | } |
| 744 | static bool isEqual(const ObjectUnderConstruction &LHS, |
| 745 | const ObjectUnderConstruction &RHS) { |
| 746 | return LHS == RHS; |
| 747 | } |
| 748 | }; |
| 749 | } |
| 750 | |
| 751 | namespace { |
| 752 | /// A dynamically-allocated heap object. |
| 753 | struct DynAlloc { |
| 754 | /// The value of this heap-allocated object. |
| 755 | APValue Value; |
| 756 | /// The allocating expression; used for diagnostics. Either a CXXNewExpr |
| 757 | /// or a CallExpr (the latter is for direct calls to operator new inside |
| 758 | /// std::allocator<T>::allocate). |
| 759 | const Expr *AllocExpr = nullptr; |
| 760 | |
| 761 | enum Kind { |
| 762 | New, |
| 763 | ArrayNew, |
| 764 | StdAllocator |
| 765 | }; |
| 766 | |
| 767 | /// Get the kind of the allocation. This must match between allocation |
| 768 | /// and deallocation. |
| 769 | Kind getKind() const { |
| 770 | if (auto *NE = dyn_cast<CXXNewExpr>(Val: AllocExpr)) |
| 771 | return NE->isArray() ? ArrayNew : New; |
| 772 | assert(isa<CallExpr>(AllocExpr)); |
| 773 | return StdAllocator; |
| 774 | } |
| 775 | }; |
| 776 | |
| 777 | struct DynAllocOrder { |
| 778 | bool operator()(DynamicAllocLValue L, DynamicAllocLValue R) const { |
| 779 | return L.getIndex() < R.getIndex(); |
| 780 | } |
| 781 | }; |
| 782 | |
| 783 | /// EvalInfo - This is a private struct used by the evaluator to capture |
| 784 | /// information about a subexpression as it is folded. It retains information |
| 785 | /// about the AST context, but also maintains information about the folded |
| 786 | /// expression. |
| 787 | /// |
| 788 | /// If an expression could be evaluated, it is still possible it is not a C |
| 789 | /// "integer constant expression" or constant expression. If not, this struct |
| 790 | /// captures information about how and why not. |
| 791 | /// |
| 792 | /// One bit of information passed *into* the request for constant folding |
| 793 | /// indicates whether the subexpression is "evaluated" or not according to C |
| 794 | /// rules. For example, the RHS of (0 && foo()) is not evaluated. We can |
| 795 | /// evaluate the expression regardless of what the RHS is, but C only allows |
| 796 | /// certain things in certain situations. |
| 797 | class EvalInfo final : public interp::State { |
| 798 | public: |
| 799 | /// CurrentCall - The top of the constexpr call stack. |
| 800 | CallStackFrame *CurrentCall; |
| 801 | |
| 802 | /// CallStackDepth - The number of calls in the call stack right now. |
| 803 | unsigned CallStackDepth; |
| 804 | |
| 805 | /// NextCallIndex - The next call index to assign. |
| 806 | unsigned NextCallIndex; |
| 807 | |
| 808 | /// StepsLeft - The remaining number of evaluation steps we're permitted |
| 809 | /// to perform. This is essentially a limit for the number of statements |
| 810 | /// we will evaluate. |
| 811 | unsigned StepsLeft; |
| 812 | |
| 813 | /// Enable the experimental new constant interpreter. If an expression is |
| 814 | /// not supported by the interpreter, an error is triggered. |
| 815 | bool EnableNewConstInterp; |
| 816 | |
| 817 | /// BottomFrame - The frame in which evaluation started. This must be |
| 818 | /// initialized after CurrentCall and CallStackDepth. |
| 819 | CallStackFrame BottomFrame; |
| 820 | |
| 821 | /// A stack of values whose lifetimes end at the end of some surrounding |
| 822 | /// evaluation frame. |
| 823 | llvm::SmallVector<Cleanup, 16> CleanupStack; |
| 824 | |
| 825 | /// EvaluatingDecl - This is the declaration whose initializer is being |
| 826 | /// evaluated, if any. |
| 827 | APValue::LValueBase EvaluatingDecl; |
| 828 | |
| 829 | enum class EvaluatingDeclKind { |
| 830 | None, |
| 831 | /// We're evaluating the construction of EvaluatingDecl. |
| 832 | Ctor, |
| 833 | /// We're evaluating the destruction of EvaluatingDecl. |
| 834 | Dtor, |
| 835 | }; |
| 836 | EvaluatingDeclKind IsEvaluatingDecl = EvaluatingDeclKind::None; |
| 837 | |
| 838 | /// EvaluatingDeclValue - This is the value being constructed for the |
| 839 | /// declaration whose initializer is being evaluated, if any. |
| 840 | APValue *EvaluatingDeclValue; |
| 841 | |
| 842 | /// Stack of loops and 'switch' statements which we're currently |
| 843 | /// breaking/continuing; null entries are used to mark unlabeled |
| 844 | /// break/continue. |
| 845 | SmallVector<const Stmt *> BreakContinueStack; |
| 846 | |
| 847 | /// Set of objects that are currently being constructed. |
| 848 | llvm::DenseMap<ObjectUnderConstruction, ConstructionPhase> |
| 849 | ObjectsUnderConstruction; |
| 850 | |
| 851 | /// Current heap allocations, along with the location where each was |
| 852 | /// allocated. We use std::map here because we need stable addresses |
| 853 | /// for the stored APValues. |
| 854 | std::map<DynamicAllocLValue, DynAlloc, DynAllocOrder> HeapAllocs; |
| 855 | |
| 856 | /// The number of heap allocations performed so far in this evaluation. |
| 857 | unsigned NumHeapAllocs = 0; |
| 858 | |
| 859 | struct EvaluatingConstructorRAII { |
| 860 | EvalInfo &EI; |
| 861 | ObjectUnderConstruction Object; |
| 862 | bool DidInsert; |
| 863 | EvaluatingConstructorRAII(EvalInfo &EI, ObjectUnderConstruction Object, |
| 864 | bool HasBases) |
| 865 | : EI(EI), Object(Object) { |
| 866 | DidInsert = |
| 867 | EI.ObjectsUnderConstruction |
| 868 | .insert(KV: {Object, HasBases ? ConstructionPhase::Bases |
| 869 | : ConstructionPhase::AfterBases}) |
| 870 | .second; |
| 871 | } |
| 872 | void finishedConstructingBases() { |
| 873 | EI.ObjectsUnderConstruction[Object] = ConstructionPhase::AfterBases; |
| 874 | } |
| 875 | void finishedConstructingFields() { |
| 876 | EI.ObjectsUnderConstruction[Object] = ConstructionPhase::AfterFields; |
| 877 | } |
| 878 | ~EvaluatingConstructorRAII() { |
| 879 | if (DidInsert) EI.ObjectsUnderConstruction.erase(Val: Object); |
| 880 | } |
| 881 | }; |
| 882 | |
| 883 | struct EvaluatingDestructorRAII { |
| 884 | EvalInfo &EI; |
| 885 | ObjectUnderConstruction Object; |
| 886 | bool DidInsert; |
| 887 | EvaluatingDestructorRAII(EvalInfo &EI, ObjectUnderConstruction Object) |
| 888 | : EI(EI), Object(Object) { |
| 889 | DidInsert = EI.ObjectsUnderConstruction |
| 890 | .insert(KV: {Object, ConstructionPhase::Destroying}) |
| 891 | .second; |
| 892 | } |
| 893 | void startedDestroyingBases() { |
| 894 | EI.ObjectsUnderConstruction[Object] = |
| 895 | ConstructionPhase::DestroyingBases; |
| 896 | } |
| 897 | ~EvaluatingDestructorRAII() { |
| 898 | if (DidInsert) |
| 899 | EI.ObjectsUnderConstruction.erase(Val: Object); |
| 900 | } |
| 901 | }; |
| 902 | |
| 903 | ConstructionPhase |
| 904 | isEvaluatingCtorDtor(APValue::LValueBase Base, |
| 905 | ArrayRef<APValue::LValuePathEntry> Path) { |
| 906 | return ObjectsUnderConstruction.lookup(Val: {.Base: Base, .Path: Path}); |
| 907 | } |
| 908 | |
| 909 | /// If we're currently speculatively evaluating, the outermost call stack |
| 910 | /// depth at which we can mutate state, otherwise 0. |
| 911 | unsigned SpeculativeEvaluationDepth = 0; |
| 912 | |
| 913 | /// The current array initialization index, if we're performing array |
| 914 | /// initialization. |
| 915 | uint64_t ArrayInitIndex = -1; |
| 916 | |
| 917 | EvalInfo(const ASTContext &C, Expr::EvalStatus &S, EvaluationMode Mode) |
| 918 | : State(const_cast<ASTContext &>(C), S), CurrentCall(nullptr), |
| 919 | CallStackDepth(0), NextCallIndex(1), |
| 920 | StepsLeft(C.getLangOpts().ConstexprStepLimit), |
| 921 | EnableNewConstInterp(C.getLangOpts().EnableNewConstInterp), |
| 922 | BottomFrame(*this, SourceLocation(), /*Callee=*/nullptr, |
| 923 | /*This=*/nullptr, |
| 924 | /*CallExpr=*/nullptr, CallRef()), |
| 925 | EvaluatingDecl((const ValueDecl *)nullptr), |
| 926 | EvaluatingDeclValue(nullptr) { |
| 927 | EvalMode = Mode; |
| 928 | } |
| 929 | |
| 930 | ~EvalInfo() { |
| 931 | discardCleanups(); |
| 932 | } |
| 933 | |
| 934 | void setEvaluatingDecl(APValue::LValueBase Base, APValue &Value, |
| 935 | EvaluatingDeclKind EDK = EvaluatingDeclKind::Ctor) { |
| 936 | EvaluatingDecl = Base; |
| 937 | IsEvaluatingDecl = EDK; |
| 938 | EvaluatingDeclValue = &Value; |
| 939 | } |
| 940 | |
| 941 | bool CheckCallLimit(SourceLocation Loc) { |
| 942 | // Don't perform any constexpr calls (other than the call we're checking) |
| 943 | // when checking a potential constant expression. |
| 944 | if (checkingPotentialConstantExpression() && CallStackDepth > 1) |
| 945 | return false; |
| 946 | if (NextCallIndex == 0) { |
| 947 | // NextCallIndex has wrapped around. |
| 948 | FFDiag(Loc, DiagId: diag::note_constexpr_call_limit_exceeded); |
| 949 | return false; |
| 950 | } |
| 951 | if (CallStackDepth <= getLangOpts().ConstexprCallDepth) |
| 952 | return true; |
| 953 | FFDiag(Loc, DiagId: diag::note_constexpr_depth_limit_exceeded) |
| 954 | << getLangOpts().ConstexprCallDepth; |
| 955 | return false; |
| 956 | } |
| 957 | |
| 958 | bool CheckArraySize(SourceLocation Loc, unsigned BitWidth, |
| 959 | uint64_t ElemCount, bool Diag) { |
| 960 | // FIXME: GH63562 |
| 961 | // APValue stores array extents as unsigned, |
| 962 | // so anything that is greater that unsigned would overflow when |
| 963 | // constructing the array, we catch this here. |
| 964 | if (BitWidth > ConstantArrayType::getMaxSizeBits(Context: Ctx) || |
| 965 | ElemCount > uint64_t(std::numeric_limits<unsigned>::max())) { |
| 966 | if (Diag) |
| 967 | FFDiag(Loc, DiagId: diag::note_constexpr_new_too_large) << ElemCount; |
| 968 | return false; |
| 969 | } |
| 970 | |
| 971 | // FIXME: GH63562 |
| 972 | // Arrays allocate an APValue per element. |
| 973 | // We use the number of constexpr steps as a proxy for the maximum size |
| 974 | // of arrays to avoid exhausting the system resources, as initialization |
| 975 | // of each element is likely to take some number of steps anyway. |
| 976 | uint64_t Limit = getLangOpts().ConstexprStepLimit; |
| 977 | if (Limit != 0 && ElemCount > Limit) { |
| 978 | if (Diag) |
| 979 | FFDiag(Loc, DiagId: diag::note_constexpr_new_exceeds_limits) |
| 980 | << ElemCount << Limit; |
| 981 | return false; |
| 982 | } |
| 983 | return true; |
| 984 | } |
| 985 | |
| 986 | std::pair<CallStackFrame *, unsigned> |
| 987 | getCallFrameAndDepth(unsigned CallIndex) { |
| 988 | assert(CallIndex && "no call index in getCallFrameAndDepth" ); |
| 989 | // We will eventually hit BottomFrame, which has Index 1, so Frame can't |
| 990 | // be null in this loop. |
| 991 | unsigned Depth = CallStackDepth; |
| 992 | CallStackFrame *Frame = CurrentCall; |
| 993 | while (Frame->Index > CallIndex) { |
| 994 | Frame = Frame->Caller; |
| 995 | --Depth; |
| 996 | } |
| 997 | if (Frame->Index == CallIndex) |
| 998 | return {Frame, Depth}; |
| 999 | return {nullptr, 0}; |
| 1000 | } |
| 1001 | |
| 1002 | bool nextStep(const Stmt *S) { |
| 1003 | if (getLangOpts().ConstexprStepLimit == 0) |
| 1004 | return true; |
| 1005 | |
| 1006 | if (!StepsLeft) { |
| 1007 | FFDiag(Loc: S->getBeginLoc(), DiagId: diag::note_constexpr_step_limit_exceeded); |
| 1008 | return false; |
| 1009 | } |
| 1010 | --StepsLeft; |
| 1011 | return true; |
| 1012 | } |
| 1013 | |
| 1014 | APValue *createHeapAlloc(const Expr *E, QualType T, LValue &LV); |
| 1015 | |
| 1016 | std::optional<DynAlloc *> lookupDynamicAlloc(DynamicAllocLValue DA) { |
| 1017 | std::optional<DynAlloc *> Result; |
| 1018 | auto It = HeapAllocs.find(x: DA); |
| 1019 | if (It != HeapAllocs.end()) |
| 1020 | Result = &It->second; |
| 1021 | return Result; |
| 1022 | } |
| 1023 | |
| 1024 | /// Get the allocated storage for the given parameter of the given call. |
| 1025 | APValue *getParamSlot(CallRef Call, const ParmVarDecl *PVD) { |
| 1026 | CallStackFrame *Frame = getCallFrameAndDepth(CallIndex: Call.CallIndex).first; |
| 1027 | return Frame ? Frame->getTemporary(Key: Call.getOrigParam(PVD), Version: Call.Version) |
| 1028 | : nullptr; |
| 1029 | } |
| 1030 | |
| 1031 | /// Information about a stack frame for std::allocator<T>::[de]allocate. |
| 1032 | struct StdAllocatorCaller { |
| 1033 | unsigned FrameIndex; |
| 1034 | QualType ElemType; |
| 1035 | const Expr *Call; |
| 1036 | explicit operator bool() const { return FrameIndex != 0; }; |
| 1037 | }; |
| 1038 | |
| 1039 | StdAllocatorCaller getStdAllocatorCaller(StringRef FnName) const { |
| 1040 | for (const CallStackFrame *Call = CurrentCall; Call != &BottomFrame; |
| 1041 | Call = Call->Caller) { |
| 1042 | const auto *MD = dyn_cast_or_null<CXXMethodDecl>(Val: Call->Callee); |
| 1043 | if (!MD) |
| 1044 | continue; |
| 1045 | const IdentifierInfo *FnII = MD->getIdentifier(); |
| 1046 | if (!FnII || !FnII->isStr(Str: FnName)) |
| 1047 | continue; |
| 1048 | |
| 1049 | const auto *CTSD = |
| 1050 | dyn_cast<ClassTemplateSpecializationDecl>(Val: MD->getParent()); |
| 1051 | if (!CTSD) |
| 1052 | continue; |
| 1053 | |
| 1054 | const IdentifierInfo *ClassII = CTSD->getIdentifier(); |
| 1055 | const TemplateArgumentList &TAL = CTSD->getTemplateArgs(); |
| 1056 | if (CTSD->isInStdNamespace() && ClassII && |
| 1057 | ClassII->isStr(Str: "allocator" ) && TAL.size() >= 1 && |
| 1058 | TAL[0].getKind() == TemplateArgument::Type) |
| 1059 | return {.FrameIndex: Call->Index, .ElemType: TAL[0].getAsType(), .Call: Call->CallExpr}; |
| 1060 | } |
| 1061 | |
| 1062 | return {}; |
| 1063 | } |
| 1064 | |
| 1065 | void performLifetimeExtension() { |
| 1066 | // Disable the cleanups for lifetime-extended temporaries. |
| 1067 | llvm::erase_if(C&: CleanupStack, P: [](Cleanup &C) { |
| 1068 | return !C.isDestroyedAtEndOf(K: ScopeKind::FullExpression); |
| 1069 | }); |
| 1070 | } |
| 1071 | |
| 1072 | /// Throw away any remaining cleanups at the end of evaluation. If any |
| 1073 | /// cleanups would have had a side-effect, note that as an unmodeled |
| 1074 | /// side-effect and return false. Otherwise, return true. |
| 1075 | bool discardCleanups() { |
| 1076 | for (Cleanup &C : CleanupStack) { |
| 1077 | if (C.hasSideEffect() && !noteSideEffect()) { |
| 1078 | CleanupStack.clear(); |
| 1079 | return false; |
| 1080 | } |
| 1081 | } |
| 1082 | CleanupStack.clear(); |
| 1083 | return true; |
| 1084 | } |
| 1085 | |
| 1086 | private: |
| 1087 | const interp::Frame *getCurrentFrame() override { return CurrentCall; } |
| 1088 | const interp::Frame *getBottomFrame() const override { return &BottomFrame; } |
| 1089 | |
| 1090 | unsigned getCallStackDepth() override { return CallStackDepth; } |
| 1091 | bool stepsLeft() const override { return StepsLeft > 0; } |
| 1092 | |
| 1093 | public: |
| 1094 | /// Notes that we failed to evaluate an expression that other expressions |
| 1095 | /// directly depend on, and determine if we should keep evaluating. This |
| 1096 | /// should only be called if we actually intend to keep evaluating. |
| 1097 | /// |
| 1098 | /// Call noteSideEffect() instead if we may be able to ignore the value that |
| 1099 | /// we failed to evaluate, e.g. if we failed to evaluate Foo() in: |
| 1100 | /// |
| 1101 | /// (Foo(), 1) // use noteSideEffect |
| 1102 | /// (Foo() || true) // use noteSideEffect |
| 1103 | /// Foo() + 1 // use noteFailure |
| 1104 | [[nodiscard]] bool noteFailure() { |
| 1105 | // Failure when evaluating some expression often means there is some |
| 1106 | // subexpression whose evaluation was skipped. Therefore, (because we |
| 1107 | // don't track whether we skipped an expression when unwinding after an |
| 1108 | // evaluation failure) every evaluation failure that bubbles up from a |
| 1109 | // subexpression implies that a side-effect has potentially happened. We |
| 1110 | // skip setting the HasSideEffects flag to true until we decide to |
| 1111 | // continue evaluating after that point, which happens here. |
| 1112 | bool KeepGoing = keepEvaluatingAfterFailure(); |
| 1113 | EvalStatus.HasSideEffects |= KeepGoing; |
| 1114 | return KeepGoing; |
| 1115 | } |
| 1116 | |
| 1117 | class ArrayInitLoopIndex { |
| 1118 | EvalInfo &Info; |
| 1119 | uint64_t OuterIndex; |
| 1120 | |
| 1121 | public: |
| 1122 | ArrayInitLoopIndex(EvalInfo &Info) |
| 1123 | : Info(Info), OuterIndex(Info.ArrayInitIndex) { |
| 1124 | Info.ArrayInitIndex = 0; |
| 1125 | } |
| 1126 | ~ArrayInitLoopIndex() { Info.ArrayInitIndex = OuterIndex; } |
| 1127 | |
| 1128 | operator uint64_t&() { return Info.ArrayInitIndex; } |
| 1129 | }; |
| 1130 | }; |
| 1131 | |
| 1132 | /// Object used to treat all foldable expressions as constant expressions. |
| 1133 | struct FoldConstant { |
| 1134 | EvalInfo &Info; |
| 1135 | bool Enabled; |
| 1136 | bool HadNoPriorDiags; |
| 1137 | EvaluationMode OldMode; |
| 1138 | |
| 1139 | explicit FoldConstant(EvalInfo &Info, bool Enabled) |
| 1140 | : Info(Info), |
| 1141 | Enabled(Enabled), |
| 1142 | HadNoPriorDiags(Info.EvalStatus.Diag && |
| 1143 | Info.EvalStatus.Diag->empty() && |
| 1144 | !Info.EvalStatus.HasSideEffects), |
| 1145 | OldMode(Info.EvalMode) { |
| 1146 | if (Enabled) |
| 1147 | Info.EvalMode = EvaluationMode::ConstantFold; |
| 1148 | } |
| 1149 | void keepDiagnostics() { Enabled = false; } |
| 1150 | ~FoldConstant() { |
| 1151 | if (Enabled && HadNoPriorDiags && !Info.EvalStatus.Diag->empty() && |
| 1152 | !Info.EvalStatus.HasSideEffects) |
| 1153 | Info.EvalStatus.Diag->clear(); |
| 1154 | Info.EvalMode = OldMode; |
| 1155 | } |
| 1156 | }; |
| 1157 | |
| 1158 | /// RAII object used to set the current evaluation mode to ignore |
| 1159 | /// side-effects. |
| 1160 | struct IgnoreSideEffectsRAII { |
| 1161 | EvalInfo &Info; |
| 1162 | EvaluationMode OldMode; |
| 1163 | explicit IgnoreSideEffectsRAII(EvalInfo &Info) |
| 1164 | : Info(Info), OldMode(Info.EvalMode) { |
| 1165 | Info.EvalMode = EvaluationMode::IgnoreSideEffects; |
| 1166 | } |
| 1167 | |
| 1168 | ~IgnoreSideEffectsRAII() { Info.EvalMode = OldMode; } |
| 1169 | }; |
| 1170 | |
| 1171 | /// RAII object used to optionally suppress diagnostics and side-effects from |
| 1172 | /// a speculative evaluation. |
| 1173 | class SpeculativeEvaluationRAII { |
| 1174 | EvalInfo *Info = nullptr; |
| 1175 | Expr::EvalStatus OldStatus; |
| 1176 | unsigned OldSpeculativeEvaluationDepth = 0; |
| 1177 | |
| 1178 | void moveFromAndCancel(SpeculativeEvaluationRAII &&Other) { |
| 1179 | Info = Other.Info; |
| 1180 | OldStatus = Other.OldStatus; |
| 1181 | OldSpeculativeEvaluationDepth = Other.OldSpeculativeEvaluationDepth; |
| 1182 | Other.Info = nullptr; |
| 1183 | } |
| 1184 | |
| 1185 | void maybeRestoreState() { |
| 1186 | if (!Info) |
| 1187 | return; |
| 1188 | |
| 1189 | Info->EvalStatus = OldStatus; |
| 1190 | Info->SpeculativeEvaluationDepth = OldSpeculativeEvaluationDepth; |
| 1191 | } |
| 1192 | |
| 1193 | public: |
| 1194 | SpeculativeEvaluationRAII() = default; |
| 1195 | |
| 1196 | SpeculativeEvaluationRAII( |
| 1197 | EvalInfo &Info, SmallVectorImpl<PartialDiagnosticAt> *NewDiag = nullptr) |
| 1198 | : Info(&Info), OldStatus(Info.EvalStatus), |
| 1199 | OldSpeculativeEvaluationDepth(Info.SpeculativeEvaluationDepth) { |
| 1200 | Info.EvalStatus.Diag = NewDiag; |
| 1201 | Info.SpeculativeEvaluationDepth = Info.CallStackDepth + 1; |
| 1202 | } |
| 1203 | |
| 1204 | SpeculativeEvaluationRAII(const SpeculativeEvaluationRAII &Other) = delete; |
| 1205 | SpeculativeEvaluationRAII(SpeculativeEvaluationRAII &&Other) { |
| 1206 | moveFromAndCancel(Other: std::move(Other)); |
| 1207 | } |
| 1208 | |
| 1209 | SpeculativeEvaluationRAII &operator=(SpeculativeEvaluationRAII &&Other) { |
| 1210 | maybeRestoreState(); |
| 1211 | moveFromAndCancel(Other: std::move(Other)); |
| 1212 | return *this; |
| 1213 | } |
| 1214 | |
| 1215 | ~SpeculativeEvaluationRAII() { maybeRestoreState(); } |
| 1216 | }; |
| 1217 | |
| 1218 | /// RAII object wrapping a full-expression or block scope, and handling |
| 1219 | /// the ending of the lifetime of temporaries created within it. |
| 1220 | template<ScopeKind Kind> |
| 1221 | class ScopeRAII { |
| 1222 | EvalInfo &Info; |
| 1223 | unsigned OldStackSize; |
| 1224 | public: |
| 1225 | ScopeRAII(EvalInfo &Info) |
| 1226 | : Info(Info), OldStackSize(Info.CleanupStack.size()) { |
| 1227 | // Push a new temporary version. This is needed to distinguish between |
| 1228 | // temporaries created in different iterations of a loop. |
| 1229 | Info.CurrentCall->pushTempVersion(); |
| 1230 | } |
| 1231 | bool destroy(bool RunDestructors = true) { |
| 1232 | bool OK = cleanup(Info, RunDestructors, OldStackSize); |
| 1233 | OldStackSize = std::numeric_limits<unsigned>::max(); |
| 1234 | return OK; |
| 1235 | } |
| 1236 | ~ScopeRAII() { |
| 1237 | if (OldStackSize != std::numeric_limits<unsigned>::max()) |
| 1238 | destroy(RunDestructors: false); |
| 1239 | // Body moved to a static method to encourage the compiler to inline away |
| 1240 | // instances of this class. |
| 1241 | Info.CurrentCall->popTempVersion(); |
| 1242 | } |
| 1243 | private: |
| 1244 | static bool cleanup(EvalInfo &Info, bool RunDestructors, |
| 1245 | unsigned OldStackSize) { |
| 1246 | assert(OldStackSize <= Info.CleanupStack.size() && |
| 1247 | "running cleanups out of order?" ); |
| 1248 | |
| 1249 | // Run all cleanups for a block scope, and non-lifetime-extended cleanups |
| 1250 | // for a full-expression scope. |
| 1251 | bool Success = true; |
| 1252 | for (unsigned I = Info.CleanupStack.size(); I > OldStackSize; --I) { |
| 1253 | if (Info.CleanupStack[I - 1].isDestroyedAtEndOf(K: Kind)) { |
| 1254 | if (!Info.CleanupStack[I - 1].endLifetime(Info, RunDestructors)) { |
| 1255 | Success = false; |
| 1256 | break; |
| 1257 | } |
| 1258 | } |
| 1259 | } |
| 1260 | |
| 1261 | // Compact any retained cleanups. |
| 1262 | auto NewEnd = Info.CleanupStack.begin() + OldStackSize; |
| 1263 | if (Kind != ScopeKind::Block) |
| 1264 | NewEnd = |
| 1265 | std::remove_if(NewEnd, Info.CleanupStack.end(), [](Cleanup &C) { |
| 1266 | return C.isDestroyedAtEndOf(K: Kind); |
| 1267 | }); |
| 1268 | Info.CleanupStack.erase(CS: NewEnd, CE: Info.CleanupStack.end()); |
| 1269 | return Success; |
| 1270 | } |
| 1271 | }; |
| 1272 | typedef ScopeRAII<ScopeKind::Block> BlockScopeRAII; |
| 1273 | typedef ScopeRAII<ScopeKind::FullExpression> FullExpressionRAII; |
| 1274 | typedef ScopeRAII<ScopeKind::Call> CallScopeRAII; |
| 1275 | } |
| 1276 | |
| 1277 | bool SubobjectDesignator::checkSubobject(EvalInfo &Info, const Expr *E, |
| 1278 | CheckSubobjectKind CSK) { |
| 1279 | if (Invalid) |
| 1280 | return false; |
| 1281 | if (isOnePastTheEnd()) { |
| 1282 | Info.CCEDiag(E, DiagId: diag::note_constexpr_past_end_subobject) |
| 1283 | << CSK; |
| 1284 | setInvalid(); |
| 1285 | return false; |
| 1286 | } |
| 1287 | // Note, we do not diagnose if isMostDerivedAnUnsizedArray(), because there |
| 1288 | // must actually be at least one array element; even a VLA cannot have a |
| 1289 | // bound of zero. And if our index is nonzero, we already had a CCEDiag. |
| 1290 | return true; |
| 1291 | } |
| 1292 | |
| 1293 | void SubobjectDesignator::diagnoseUnsizedArrayPointerArithmetic(EvalInfo &Info, |
| 1294 | const Expr *E) { |
| 1295 | Info.CCEDiag(E, DiagId: diag::note_constexpr_unsized_array_indexed); |
| 1296 | // Do not set the designator as invalid: we can represent this situation, |
| 1297 | // and correct handling of __builtin_object_size requires us to do so. |
| 1298 | } |
| 1299 | |
| 1300 | void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info, |
| 1301 | const Expr *E, |
| 1302 | const APSInt &N) { |
| 1303 | // If we're complaining, we must be able to statically determine the size of |
| 1304 | // the most derived array. |
| 1305 | if (MostDerivedPathLength == Entries.size() && MostDerivedIsArrayElement) |
| 1306 | Info.CCEDiag(E, DiagId: diag::note_constexpr_array_index) |
| 1307 | << N << /*array*/ 0 |
| 1308 | << static_cast<unsigned>(getMostDerivedArraySize()); |
| 1309 | else |
| 1310 | Info.CCEDiag(E, DiagId: diag::note_constexpr_array_index) |
| 1311 | << N << /*non-array*/ 1; |
| 1312 | setInvalid(); |
| 1313 | } |
| 1314 | |
| 1315 | CallStackFrame::CallStackFrame(EvalInfo &Info, SourceRange CallRange, |
| 1316 | const FunctionDecl *Callee, const LValue *This, |
| 1317 | const Expr *CallExpr, CallRef Call) |
| 1318 | : Info(Info), Caller(Info.CurrentCall), Callee(Callee), This(This), |
| 1319 | CallExpr(CallExpr), Arguments(Call), CallRange(CallRange), |
| 1320 | Index(Info.NextCallIndex++) { |
| 1321 | Info.CurrentCall = this; |
| 1322 | ++Info.CallStackDepth; |
| 1323 | } |
| 1324 | |
| 1325 | CallStackFrame::~CallStackFrame() { |
| 1326 | assert(Info.CurrentCall == this && "calls retired out of order" ); |
| 1327 | --Info.CallStackDepth; |
| 1328 | Info.CurrentCall = Caller; |
| 1329 | } |
| 1330 | |
| 1331 | static bool isRead(AccessKinds AK) { |
| 1332 | return AK == AK_Read || AK == AK_ReadObjectRepresentation || |
| 1333 | AK == AK_IsWithinLifetime || AK == AK_Dereference; |
| 1334 | } |
| 1335 | |
| 1336 | static bool isModification(AccessKinds AK) { |
| 1337 | switch (AK) { |
| 1338 | case AK_Read: |
| 1339 | case AK_ReadObjectRepresentation: |
| 1340 | case AK_MemberCall: |
| 1341 | case AK_DynamicCast: |
| 1342 | case AK_TypeId: |
| 1343 | case AK_IsWithinLifetime: |
| 1344 | case AK_Dereference: |
| 1345 | return false; |
| 1346 | case AK_Assign: |
| 1347 | case AK_Increment: |
| 1348 | case AK_Decrement: |
| 1349 | case AK_Construct: |
| 1350 | case AK_Destroy: |
| 1351 | return true; |
| 1352 | } |
| 1353 | llvm_unreachable("unknown access kind" ); |
| 1354 | } |
| 1355 | |
| 1356 | static bool isAnyAccess(AccessKinds AK) { |
| 1357 | return isRead(AK) || isModification(AK); |
| 1358 | } |
| 1359 | |
| 1360 | /// Is this an access per the C++ definition? |
| 1361 | static bool isFormalAccess(AccessKinds AK) { |
| 1362 | return isAnyAccess(AK) && AK != AK_Construct && AK != AK_Destroy && |
| 1363 | AK != AK_IsWithinLifetime && AK != AK_Dereference; |
| 1364 | } |
| 1365 | |
| 1366 | /// Is this kind of access valid on an indeterminate object value? |
| 1367 | static bool isValidIndeterminateAccess(AccessKinds AK) { |
| 1368 | switch (AK) { |
| 1369 | case AK_Read: |
| 1370 | case AK_Increment: |
| 1371 | case AK_Decrement: |
| 1372 | case AK_Dereference: |
| 1373 | // These need the object's value. |
| 1374 | return false; |
| 1375 | |
| 1376 | case AK_IsWithinLifetime: |
| 1377 | case AK_ReadObjectRepresentation: |
| 1378 | case AK_Assign: |
| 1379 | case AK_Construct: |
| 1380 | case AK_Destroy: |
| 1381 | // Construction and destruction don't need the value. |
| 1382 | return true; |
| 1383 | |
| 1384 | case AK_MemberCall: |
| 1385 | case AK_DynamicCast: |
| 1386 | case AK_TypeId: |
| 1387 | // These aren't really meaningful on scalars. |
| 1388 | return true; |
| 1389 | } |
| 1390 | llvm_unreachable("unknown access kind" ); |
| 1391 | } |
| 1392 | |
| 1393 | namespace { |
| 1394 | struct ComplexValue { |
| 1395 | private: |
| 1396 | bool IsInt; |
| 1397 | |
| 1398 | public: |
| 1399 | APSInt IntReal, IntImag; |
| 1400 | APFloat FloatReal, FloatImag; |
| 1401 | |
| 1402 | ComplexValue() : FloatReal(APFloat::Bogus()), FloatImag(APFloat::Bogus()) {} |
| 1403 | |
| 1404 | void makeComplexFloat() { IsInt = false; } |
| 1405 | bool isComplexFloat() const { return !IsInt; } |
| 1406 | APFloat &getComplexFloatReal() { return FloatReal; } |
| 1407 | APFloat &getComplexFloatImag() { return FloatImag; } |
| 1408 | |
| 1409 | void makeComplexInt() { IsInt = true; } |
| 1410 | bool isComplexInt() const { return IsInt; } |
| 1411 | APSInt &getComplexIntReal() { return IntReal; } |
| 1412 | APSInt &getComplexIntImag() { return IntImag; } |
| 1413 | |
| 1414 | void moveInto(APValue &v) const { |
| 1415 | if (isComplexFloat()) |
| 1416 | v = APValue(FloatReal, FloatImag); |
| 1417 | else |
| 1418 | v = APValue(IntReal, IntImag); |
| 1419 | } |
| 1420 | void setFrom(const APValue &v) { |
| 1421 | assert(v.isComplexFloat() || v.isComplexInt()); |
| 1422 | if (v.isComplexFloat()) { |
| 1423 | makeComplexFloat(); |
| 1424 | FloatReal = v.getComplexFloatReal(); |
| 1425 | FloatImag = v.getComplexFloatImag(); |
| 1426 | } else { |
| 1427 | makeComplexInt(); |
| 1428 | IntReal = v.getComplexIntReal(); |
| 1429 | IntImag = v.getComplexIntImag(); |
| 1430 | } |
| 1431 | } |
| 1432 | }; |
| 1433 | |
| 1434 | struct LValue { |
| 1435 | APValue::LValueBase Base; |
| 1436 | CharUnits Offset; |
| 1437 | SubobjectDesignator Designator; |
| 1438 | bool IsNullPtr : 1; |
| 1439 | bool InvalidBase : 1; |
| 1440 | // P2280R4 track if we have an unknown reference or pointer. |
| 1441 | bool AllowConstexprUnknown = false; |
| 1442 | |
| 1443 | const APValue::LValueBase getLValueBase() const { return Base; } |
| 1444 | bool allowConstexprUnknown() const { return AllowConstexprUnknown; } |
| 1445 | CharUnits &getLValueOffset() { return Offset; } |
| 1446 | const CharUnits &getLValueOffset() const { return Offset; } |
| 1447 | SubobjectDesignator &getLValueDesignator() { return Designator; } |
| 1448 | const SubobjectDesignator &getLValueDesignator() const { return Designator;} |
| 1449 | bool isNullPointer() const { return IsNullPtr;} |
| 1450 | |
| 1451 | unsigned getLValueCallIndex() const { return Base.getCallIndex(); } |
| 1452 | unsigned getLValueVersion() const { return Base.getVersion(); } |
| 1453 | |
| 1454 | void moveInto(APValue &V) const { |
| 1455 | if (Designator.Invalid) |
| 1456 | V = APValue(Base, Offset, APValue::NoLValuePath(), IsNullPtr); |
| 1457 | else { |
| 1458 | assert(!InvalidBase && "APValues can't handle invalid LValue bases" ); |
| 1459 | V = APValue(Base, Offset, Designator.Entries, |
| 1460 | Designator.IsOnePastTheEnd, IsNullPtr); |
| 1461 | } |
| 1462 | if (AllowConstexprUnknown) |
| 1463 | V.setConstexprUnknown(); |
| 1464 | } |
| 1465 | void setFrom(const ASTContext &Ctx, const APValue &V) { |
| 1466 | assert(V.isLValue() && "Setting LValue from a non-LValue?" ); |
| 1467 | Base = V.getLValueBase(); |
| 1468 | Offset = V.getLValueOffset(); |
| 1469 | InvalidBase = false; |
| 1470 | Designator = SubobjectDesignator(Ctx, V); |
| 1471 | IsNullPtr = V.isNullPointer(); |
| 1472 | AllowConstexprUnknown = V.allowConstexprUnknown(); |
| 1473 | } |
| 1474 | |
| 1475 | void set(APValue::LValueBase B, bool BInvalid = false) { |
| 1476 | #ifndef NDEBUG |
| 1477 | // We only allow a few types of invalid bases. Enforce that here. |
| 1478 | if (BInvalid) { |
| 1479 | const auto *E = B.get<const Expr *>(); |
| 1480 | assert((isa<MemberExpr>(E) || tryUnwrapAllocSizeCall(E)) && |
| 1481 | "Unexpected type of invalid base" ); |
| 1482 | } |
| 1483 | #endif |
| 1484 | |
| 1485 | Base = B; |
| 1486 | Offset = CharUnits::fromQuantity(Quantity: 0); |
| 1487 | InvalidBase = BInvalid; |
| 1488 | Designator = SubobjectDesignator(getType(B)); |
| 1489 | IsNullPtr = false; |
| 1490 | AllowConstexprUnknown = false; |
| 1491 | } |
| 1492 | |
| 1493 | void setNull(ASTContext &Ctx, QualType PointerTy) { |
| 1494 | Base = (const ValueDecl *)nullptr; |
| 1495 | Offset = |
| 1496 | CharUnits::fromQuantity(Quantity: Ctx.getTargetNullPointerValue(QT: PointerTy)); |
| 1497 | InvalidBase = false; |
| 1498 | Designator = SubobjectDesignator(PointerTy->getPointeeType()); |
| 1499 | IsNullPtr = true; |
| 1500 | AllowConstexprUnknown = false; |
| 1501 | } |
| 1502 | |
| 1503 | void setInvalid(APValue::LValueBase B, unsigned I = 0) { |
| 1504 | set(B, BInvalid: true); |
| 1505 | } |
| 1506 | |
| 1507 | std::string toString(ASTContext &Ctx, QualType T) const { |
| 1508 | APValue Printable; |
| 1509 | moveInto(V&: Printable); |
| 1510 | return Printable.getAsString(Ctx, Ty: T); |
| 1511 | } |
| 1512 | |
| 1513 | private: |
| 1514 | // Check that this LValue is not based on a null pointer. If it is, produce |
| 1515 | // a diagnostic and mark the designator as invalid. |
| 1516 | template <typename GenDiagType> |
| 1517 | bool checkNullPointerDiagnosingWith(const GenDiagType &GenDiag) { |
| 1518 | if (Designator.Invalid) |
| 1519 | return false; |
| 1520 | if (IsNullPtr) { |
| 1521 | GenDiag(); |
| 1522 | Designator.setInvalid(); |
| 1523 | return false; |
| 1524 | } |
| 1525 | return true; |
| 1526 | } |
| 1527 | |
| 1528 | public: |
| 1529 | bool checkNullPointer(EvalInfo &Info, const Expr *E, |
| 1530 | CheckSubobjectKind CSK) { |
| 1531 | return checkNullPointerDiagnosingWith(GenDiag: [&Info, E, CSK] { |
| 1532 | Info.CCEDiag(E, DiagId: diag::note_constexpr_null_subobject) << CSK; |
| 1533 | }); |
| 1534 | } |
| 1535 | |
| 1536 | bool checkNullPointerForFoldAccess(EvalInfo &Info, const Expr *E, |
| 1537 | AccessKinds AK) { |
| 1538 | return checkNullPointerDiagnosingWith(GenDiag: [&Info, E, AK] { |
| 1539 | if (AK == AccessKinds::AK_Dereference) |
| 1540 | Info.FFDiag(E, DiagId: diag::note_constexpr_dereferencing_null); |
| 1541 | else |
| 1542 | Info.FFDiag(E, DiagId: diag::note_constexpr_access_null) << AK; |
| 1543 | }); |
| 1544 | } |
| 1545 | |
| 1546 | // Check this LValue refers to an object. If not, set the designator to be |
| 1547 | // invalid and emit a diagnostic. |
| 1548 | bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK) { |
| 1549 | return (CSK == CSK_ArrayToPointer || checkNullPointer(Info, E, CSK)) && |
| 1550 | Designator.checkSubobject(Info, E, CSK); |
| 1551 | } |
| 1552 | |
| 1553 | void addDecl(EvalInfo &Info, const Expr *E, |
| 1554 | const Decl *D, bool Virtual = false) { |
| 1555 | if (checkSubobject(Info, E, CSK: isa<FieldDecl>(Val: D) ? CSK_Field : CSK_Base)) |
| 1556 | Designator.addDeclUnchecked(D, Virtual); |
| 1557 | } |
| 1558 | void addUnsizedArray(EvalInfo &Info, const Expr *E, QualType ElemTy) { |
| 1559 | if (!Designator.Entries.empty()) { |
| 1560 | Info.CCEDiag(E, DiagId: diag::note_constexpr_unsupported_unsized_array); |
| 1561 | Designator.setInvalid(); |
| 1562 | return; |
| 1563 | } |
| 1564 | if (checkSubobject(Info, E, CSK: CSK_ArrayToPointer)) { |
| 1565 | assert(getType(Base).getNonReferenceType()->isPointerType() || |
| 1566 | getType(Base).getNonReferenceType()->isArrayType()); |
| 1567 | Designator.FirstEntryIsAnUnsizedArray = true; |
| 1568 | Designator.addUnsizedArrayUnchecked(ElemTy); |
| 1569 | } |
| 1570 | } |
| 1571 | void addArray(EvalInfo &Info, const Expr *E, const ConstantArrayType *CAT) { |
| 1572 | if (checkSubobject(Info, E, CSK: CSK_ArrayToPointer)) |
| 1573 | Designator.addArrayUnchecked(CAT); |
| 1574 | } |
| 1575 | void addComplex(EvalInfo &Info, const Expr *E, QualType EltTy, bool Imag) { |
| 1576 | if (checkSubobject(Info, E, CSK: Imag ? CSK_Imag : CSK_Real)) |
| 1577 | Designator.addComplexUnchecked(EltTy, Imag); |
| 1578 | } |
| 1579 | void addVectorElement(EvalInfo &Info, const Expr *E, QualType EltTy, |
| 1580 | uint64_t Size, uint64_t Idx) { |
| 1581 | if (checkSubobject(Info, E, CSK: CSK_VectorElement)) |
| 1582 | Designator.addVectorElementUnchecked(EltTy, Size, Idx); |
| 1583 | } |
| 1584 | void clearIsNullPointer() { |
| 1585 | IsNullPtr = false; |
| 1586 | } |
| 1587 | void adjustOffsetAndIndex(EvalInfo &Info, const Expr *E, |
| 1588 | const APSInt &Index, CharUnits ElementSize) { |
| 1589 | // An index of 0 has no effect. (In C, adding 0 to a null pointer is UB, |
| 1590 | // but we're not required to diagnose it and it's valid in C++.) |
| 1591 | if (!Index) |
| 1592 | return; |
| 1593 | |
| 1594 | // Compute the new offset in the appropriate width, wrapping at 64 bits. |
| 1595 | // FIXME: When compiling for a 32-bit target, we should use 32-bit |
| 1596 | // offsets. |
| 1597 | uint64_t Offset64 = Offset.getQuantity(); |
| 1598 | uint64_t ElemSize64 = ElementSize.getQuantity(); |
| 1599 | uint64_t Index64 = Index.extOrTrunc(width: 64).getZExtValue(); |
| 1600 | Offset = CharUnits::fromQuantity(Quantity: Offset64 + ElemSize64 * Index64); |
| 1601 | |
| 1602 | if (checkNullPointer(Info, E, CSK: CSK_ArrayIndex)) |
| 1603 | Designator.adjustIndex(Info, E, N: Index, LV: *this); |
| 1604 | clearIsNullPointer(); |
| 1605 | } |
| 1606 | void adjustOffset(CharUnits N) { |
| 1607 | Offset += N; |
| 1608 | if (N.getQuantity()) |
| 1609 | clearIsNullPointer(); |
| 1610 | } |
| 1611 | }; |
| 1612 | |
| 1613 | struct MemberPtr { |
| 1614 | MemberPtr() {} |
| 1615 | explicit MemberPtr(const ValueDecl *Decl) |
| 1616 | : DeclAndIsDerivedMember(Decl, false) {} |
| 1617 | |
| 1618 | /// The member or (direct or indirect) field referred to by this member |
| 1619 | /// pointer, or 0 if this is a null member pointer. |
| 1620 | const ValueDecl *getDecl() const { |
| 1621 | return DeclAndIsDerivedMember.getPointer(); |
| 1622 | } |
| 1623 | /// Is this actually a member of some type derived from the relevant class? |
| 1624 | bool isDerivedMember() const { |
| 1625 | return DeclAndIsDerivedMember.getInt(); |
| 1626 | } |
| 1627 | /// Get the class which the declaration actually lives in. |
| 1628 | const CXXRecordDecl *getContainingRecord() const { |
| 1629 | return cast<CXXRecordDecl>( |
| 1630 | Val: DeclAndIsDerivedMember.getPointer()->getDeclContext()); |
| 1631 | } |
| 1632 | |
| 1633 | void moveInto(APValue &V) const { |
| 1634 | V = APValue(getDecl(), isDerivedMember(), Path); |
| 1635 | } |
| 1636 | void setFrom(const APValue &V) { |
| 1637 | assert(V.isMemberPointer()); |
| 1638 | DeclAndIsDerivedMember.setPointer(V.getMemberPointerDecl()); |
| 1639 | DeclAndIsDerivedMember.setInt(V.isMemberPointerToDerivedMember()); |
| 1640 | Path.clear(); |
| 1641 | llvm::append_range(C&: Path, R: V.getMemberPointerPath()); |
| 1642 | } |
| 1643 | |
| 1644 | /// DeclAndIsDerivedMember - The member declaration, and a flag indicating |
| 1645 | /// whether the member is a member of some class derived from the class type |
| 1646 | /// of the member pointer. |
| 1647 | llvm::PointerIntPair<const ValueDecl*, 1, bool> DeclAndIsDerivedMember; |
| 1648 | /// Path - The path of base/derived classes from the member declaration's |
| 1649 | /// class (exclusive) to the class type of the member pointer (inclusive). |
| 1650 | SmallVector<const CXXRecordDecl*, 4> Path; |
| 1651 | |
| 1652 | /// Perform a cast towards the class of the Decl (either up or down the |
| 1653 | /// hierarchy). |
| 1654 | bool castBack(const CXXRecordDecl *Class) { |
| 1655 | assert(!Path.empty()); |
| 1656 | const CXXRecordDecl *Expected; |
| 1657 | if (Path.size() >= 2) |
| 1658 | Expected = Path[Path.size() - 2]; |
| 1659 | else |
| 1660 | Expected = getContainingRecord(); |
| 1661 | if (Expected->getCanonicalDecl() != Class->getCanonicalDecl()) { |
| 1662 | // C++11 [expr.static.cast]p12: In a conversion from (D::*) to (B::*), |
| 1663 | // if B does not contain the original member and is not a base or |
| 1664 | // derived class of the class containing the original member, the result |
| 1665 | // of the cast is undefined. |
| 1666 | // C++11 [conv.mem]p2 does not cover this case for a cast from (B::*) to |
| 1667 | // (D::*). We consider that to be a language defect. |
| 1668 | return false; |
| 1669 | } |
| 1670 | Path.pop_back(); |
| 1671 | return true; |
| 1672 | } |
| 1673 | /// Perform a base-to-derived member pointer cast. |
| 1674 | bool castToDerived(const CXXRecordDecl *Derived) { |
| 1675 | if (!getDecl()) |
| 1676 | return true; |
| 1677 | if (!isDerivedMember()) { |
| 1678 | Path.push_back(Elt: Derived); |
| 1679 | return true; |
| 1680 | } |
| 1681 | if (!castBack(Class: Derived)) |
| 1682 | return false; |
| 1683 | if (Path.empty()) |
| 1684 | DeclAndIsDerivedMember.setInt(false); |
| 1685 | return true; |
| 1686 | } |
| 1687 | /// Perform a derived-to-base member pointer cast. |
| 1688 | bool castToBase(const CXXRecordDecl *Base) { |
| 1689 | if (!getDecl()) |
| 1690 | return true; |
| 1691 | if (Path.empty()) |
| 1692 | DeclAndIsDerivedMember.setInt(true); |
| 1693 | if (isDerivedMember()) { |
| 1694 | Path.push_back(Elt: Base); |
| 1695 | return true; |
| 1696 | } |
| 1697 | return castBack(Class: Base); |
| 1698 | } |
| 1699 | }; |
| 1700 | |
| 1701 | /// Compare two member pointers, which are assumed to be of the same type. |
| 1702 | static bool operator==(const MemberPtr &LHS, const MemberPtr &RHS) { |
| 1703 | if (!LHS.getDecl() || !RHS.getDecl()) |
| 1704 | return !LHS.getDecl() && !RHS.getDecl(); |
| 1705 | if (LHS.getDecl()->getCanonicalDecl() != RHS.getDecl()->getCanonicalDecl()) |
| 1706 | return false; |
| 1707 | return LHS.Path == RHS.Path; |
| 1708 | } |
| 1709 | } |
| 1710 | |
| 1711 | void SubobjectDesignator::adjustIndex(EvalInfo &Info, const Expr *E, APSInt N, |
| 1712 | const LValue &LV) { |
| 1713 | if (Invalid || !N) |
| 1714 | return; |
| 1715 | uint64_t TruncatedN = N.extOrTrunc(width: 64).getZExtValue(); |
| 1716 | if (isMostDerivedAnUnsizedArray()) { |
| 1717 | diagnoseUnsizedArrayPointerArithmetic(Info, E); |
| 1718 | // Can't verify -- trust that the user is doing the right thing (or if |
| 1719 | // not, trust that the caller will catch the bad behavior). |
| 1720 | // FIXME: Should we reject if this overflows, at least? |
| 1721 | Entries.back() = |
| 1722 | PathEntry::ArrayIndex(Index: Entries.back().getAsArrayIndex() + TruncatedN); |
| 1723 | return; |
| 1724 | } |
| 1725 | |
| 1726 | // [expr.add]p4: For the purposes of these operators, a pointer to a |
| 1727 | // nonarray object behaves the same as a pointer to the first element of |
| 1728 | // an array of length one with the type of the object as its element type. |
| 1729 | bool IsArray = |
| 1730 | MostDerivedPathLength == Entries.size() && MostDerivedIsArrayElement; |
| 1731 | uint64_t ArrayIndex = |
| 1732 | IsArray ? Entries.back().getAsArrayIndex() : (uint64_t)IsOnePastTheEnd; |
| 1733 | uint64_t ArraySize = IsArray ? getMostDerivedArraySize() : (uint64_t)1; |
| 1734 | |
| 1735 | if (N < -(int64_t)ArrayIndex || N > ArraySize - ArrayIndex) { |
| 1736 | if (!Info.checkingPotentialConstantExpression() || |
| 1737 | !LV.AllowConstexprUnknown) { |
| 1738 | // Calculate the actual index in a wide enough type, so we can include |
| 1739 | // it in the note. |
| 1740 | N = N.extend(width: std::max<unsigned>(a: N.getBitWidth() + 1, b: 65)); |
| 1741 | (llvm::APInt &)N += ArrayIndex; |
| 1742 | assert(N.ugt(ArraySize) && "bounds check failed for in-bounds index" ); |
| 1743 | diagnosePointerArithmetic(Info, E, N); |
| 1744 | } |
| 1745 | setInvalid(); |
| 1746 | return; |
| 1747 | } |
| 1748 | |
| 1749 | ArrayIndex += TruncatedN; |
| 1750 | assert(ArrayIndex <= ArraySize && |
| 1751 | "bounds check succeeded for out-of-bounds index" ); |
| 1752 | |
| 1753 | if (IsArray) |
| 1754 | Entries.back() = PathEntry::ArrayIndex(Index: ArrayIndex); |
| 1755 | else |
| 1756 | IsOnePastTheEnd = (ArrayIndex != 0); |
| 1757 | } |
| 1758 | |
| 1759 | static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E); |
| 1760 | static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, |
| 1761 | const LValue &This, const Expr *E, |
| 1762 | bool AllowNonLiteralTypes = false); |
| 1763 | static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info, |
| 1764 | bool InvalidBaseOK = false); |
| 1765 | static bool EvaluatePointer(const Expr *E, LValue &Result, EvalInfo &Info, |
| 1766 | bool InvalidBaseOK = false); |
| 1767 | static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result, |
| 1768 | EvalInfo &Info); |
| 1769 | static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info); |
| 1770 | static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info); |
| 1771 | static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result, |
| 1772 | EvalInfo &Info); |
| 1773 | static bool EvaluateFloat(const Expr *E, APFloat &Result, EvalInfo &Info); |
| 1774 | static bool EvaluateComplex(const Expr *E, ComplexValue &Res, EvalInfo &Info); |
| 1775 | static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result, |
| 1776 | EvalInfo &Info); |
| 1777 | static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result); |
| 1778 | static bool EvaluateBuiltinStrLen(const Expr *E, uint64_t &Result, |
| 1779 | EvalInfo &Info, |
| 1780 | std::string *StringResult = nullptr); |
| 1781 | |
| 1782 | /// Evaluate an integer or fixed point expression into an APResult. |
| 1783 | static bool EvaluateFixedPointOrInteger(const Expr *E, APFixedPoint &Result, |
| 1784 | EvalInfo &Info); |
| 1785 | |
| 1786 | /// Evaluate only a fixed point expression into an APResult. |
| 1787 | static bool EvaluateFixedPoint(const Expr *E, APFixedPoint &Result, |
| 1788 | EvalInfo &Info); |
| 1789 | |
| 1790 | //===----------------------------------------------------------------------===// |
| 1791 | // Misc utilities |
| 1792 | //===----------------------------------------------------------------------===// |
| 1793 | |
| 1794 | /// Negate an APSInt in place, converting it to a signed form if necessary, and |
| 1795 | /// preserving its value (by extending by up to one bit as needed). |
| 1796 | static void negateAsSigned(APSInt &Int) { |
| 1797 | if (Int.isUnsigned() || Int.isMinSignedValue()) { |
| 1798 | Int = Int.extend(width: Int.getBitWidth() + 1); |
| 1799 | Int.setIsSigned(true); |
| 1800 | } |
| 1801 | Int = -Int; |
| 1802 | } |
| 1803 | |
| 1804 | template<typename KeyT> |
| 1805 | APValue &CallStackFrame::createTemporary(const KeyT *Key, QualType T, |
| 1806 | ScopeKind Scope, LValue &LV) { |
| 1807 | unsigned Version = getTempVersion(); |
| 1808 | APValue::LValueBase Base(Key, Index, Version); |
| 1809 | LV.set(B: Base); |
| 1810 | return createLocal(Base, Key, T, Scope); |
| 1811 | } |
| 1812 | |
| 1813 | /// Allocate storage for a parameter of a function call made in this frame. |
| 1814 | APValue &CallStackFrame::createParam(CallRef Args, const ParmVarDecl *PVD, |
| 1815 | LValue &LV) { |
| 1816 | assert(Args.CallIndex == Index && "creating parameter in wrong frame" ); |
| 1817 | APValue::LValueBase Base(PVD, Index, Args.Version); |
| 1818 | LV.set(B: Base); |
| 1819 | // We always destroy parameters at the end of the call, even if we'd allow |
| 1820 | // them to live to the end of the full-expression at runtime, in order to |
| 1821 | // give portable results and match other compilers. |
| 1822 | return createLocal(Base, Key: PVD, T: PVD->getType(), Scope: ScopeKind::Call); |
| 1823 | } |
| 1824 | |
| 1825 | APValue &CallStackFrame::createLocal(APValue::LValueBase Base, const void *Key, |
| 1826 | QualType T, ScopeKind Scope) { |
| 1827 | assert(Base.getCallIndex() == Index && "lvalue for wrong frame" ); |
| 1828 | unsigned Version = Base.getVersion(); |
| 1829 | APValue &Result = Temporaries[MapKeyTy(Key, Version)]; |
| 1830 | assert(Result.isAbsent() && "local created multiple times" ); |
| 1831 | |
| 1832 | // If we're creating a local immediately in the operand of a speculative |
| 1833 | // evaluation, don't register a cleanup to be run outside the speculative |
| 1834 | // evaluation context, since we won't actually be able to initialize this |
| 1835 | // object. |
| 1836 | if (Index <= Info.SpeculativeEvaluationDepth) { |
| 1837 | if (T.isDestructedType()) |
| 1838 | Info.noteSideEffect(); |
| 1839 | } else { |
| 1840 | Info.CleanupStack.push_back(Elt: Cleanup(&Result, Base, T, Scope)); |
| 1841 | } |
| 1842 | return Result; |
| 1843 | } |
| 1844 | |
| 1845 | APValue *EvalInfo::createHeapAlloc(const Expr *E, QualType T, LValue &LV) { |
| 1846 | if (NumHeapAllocs > DynamicAllocLValue::getMaxIndex()) { |
| 1847 | FFDiag(E, DiagId: diag::note_constexpr_heap_alloc_limit_exceeded); |
| 1848 | return nullptr; |
| 1849 | } |
| 1850 | |
| 1851 | DynamicAllocLValue DA(NumHeapAllocs++); |
| 1852 | LV.set(B: APValue::LValueBase::getDynamicAlloc(LV: DA, Type: T)); |
| 1853 | auto Result = HeapAllocs.emplace(args: std::piecewise_construct, |
| 1854 | args: std::forward_as_tuple(args&: DA), args: std::tuple<>()); |
| 1855 | assert(Result.second && "reused a heap alloc index?" ); |
| 1856 | Result.first->second.AllocExpr = E; |
| 1857 | return &Result.first->second.Value; |
| 1858 | } |
| 1859 | |
| 1860 | /// Produce a string describing the given constexpr call. |
| 1861 | void CallStackFrame::describe(raw_ostream &Out) const { |
| 1862 | bool IsMemberCall = false; |
| 1863 | bool ExplicitInstanceParam = false; |
| 1864 | if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: Callee)) { |
| 1865 | IsMemberCall = !isa<CXXConstructorDecl>(Val: MD) && !MD->isStatic(); |
| 1866 | ExplicitInstanceParam = MD->isExplicitObjectMemberFunction(); |
| 1867 | } |
| 1868 | |
| 1869 | if (!IsMemberCall) |
| 1870 | Callee->getNameForDiagnostic(OS&: Out, Policy: Info.Ctx.getPrintingPolicy(), |
| 1871 | /*Qualified=*/false); |
| 1872 | |
| 1873 | if (This && IsMemberCall) { |
| 1874 | if (const auto *MCE = dyn_cast_if_present<CXXMemberCallExpr>(Val: CallExpr)) { |
| 1875 | const Expr *Object = MCE->getImplicitObjectArgument(); |
| 1876 | Object->printPretty(OS&: Out, /*Helper=*/nullptr, Policy: Info.Ctx.getPrintingPolicy(), |
| 1877 | /*Indentation=*/0); |
| 1878 | if (Object->getType()->isPointerType()) |
| 1879 | Out << "->" ; |
| 1880 | else |
| 1881 | Out << "." ; |
| 1882 | } else if (const auto *OCE = |
| 1883 | dyn_cast_if_present<CXXOperatorCallExpr>(Val: CallExpr)) { |
| 1884 | OCE->getArg(Arg: 0)->printPretty(OS&: Out, /*Helper=*/nullptr, |
| 1885 | Policy: Info.Ctx.getPrintingPolicy(), |
| 1886 | /*Indentation=*/0); |
| 1887 | Out << "." ; |
| 1888 | } else { |
| 1889 | APValue Val; |
| 1890 | This->moveInto(V&: Val); |
| 1891 | Val.printPretty( |
| 1892 | OS&: Out, Ctx: Info.Ctx, |
| 1893 | Ty: Info.Ctx.getLValueReferenceType(T: This->Designator.MostDerivedType)); |
| 1894 | Out << "." ; |
| 1895 | } |
| 1896 | Callee->getNameForDiagnostic(OS&: Out, Policy: Info.Ctx.getPrintingPolicy(), |
| 1897 | /*Qualified=*/false); |
| 1898 | } |
| 1899 | |
| 1900 | Out << '('; |
| 1901 | |
| 1902 | llvm::ListSeparator Comma; |
| 1903 | for (const ParmVarDecl *Param : |
| 1904 | Callee->parameters().slice(N: ExplicitInstanceParam)) { |
| 1905 | Out << Comma; |
| 1906 | const APValue *V = Info.getParamSlot(Call: Arguments, PVD: Param); |
| 1907 | if (V) |
| 1908 | V->printPretty(OS&: Out, Ctx: Info.Ctx, Ty: Param->getType()); |
| 1909 | else |
| 1910 | Out << "<...>" ; |
| 1911 | } |
| 1912 | |
| 1913 | Out << ')'; |
| 1914 | } |
| 1915 | |
| 1916 | /// Evaluate an expression to see if it had side-effects, and discard its |
| 1917 | /// result. |
| 1918 | /// \return \c true if the caller should keep evaluating. |
| 1919 | static bool EvaluateIgnoredValue(EvalInfo &Info, const Expr *E) { |
| 1920 | assert(!E->isValueDependent()); |
| 1921 | APValue Scratch; |
| 1922 | if (!Evaluate(Result&: Scratch, Info, E)) |
| 1923 | // We don't need the value, but we might have skipped a side effect here. |
| 1924 | return Info.noteSideEffect(); |
| 1925 | return true; |
| 1926 | } |
| 1927 | |
| 1928 | /// Should this call expression be treated as forming an opaque constant? |
| 1929 | static bool IsOpaqueConstantCall(const CallExpr *E) { |
| 1930 | unsigned Builtin = E->getBuiltinCallee(); |
| 1931 | return (Builtin == Builtin::BI__builtin___CFStringMakeConstantString || |
| 1932 | Builtin == Builtin::BI__builtin___NSStringMakeConstantString || |
| 1933 | Builtin == Builtin::BI__builtin_ptrauth_sign_constant || |
| 1934 | Builtin == Builtin::BI__builtin_function_start); |
| 1935 | } |
| 1936 | |
| 1937 | static bool IsOpaqueConstantCall(const LValue &LVal) { |
| 1938 | const auto *BaseExpr = |
| 1939 | llvm::dyn_cast_if_present<CallExpr>(Val: LVal.Base.dyn_cast<const Expr *>()); |
| 1940 | return BaseExpr && IsOpaqueConstantCall(E: BaseExpr); |
| 1941 | } |
| 1942 | |
| 1943 | static bool IsGlobalLValue(APValue::LValueBase B) { |
| 1944 | // C++11 [expr.const]p3 An address constant expression is a prvalue core |
| 1945 | // constant expression of pointer type that evaluates to... |
| 1946 | |
| 1947 | // ... a null pointer value, or a prvalue core constant expression of type |
| 1948 | // std::nullptr_t. |
| 1949 | if (!B) |
| 1950 | return true; |
| 1951 | |
| 1952 | if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) { |
| 1953 | // ... the address of an object with static storage duration, |
| 1954 | if (const VarDecl *VD = dyn_cast<VarDecl>(Val: D)) |
| 1955 | return VD->hasGlobalStorage(); |
| 1956 | if (isa<TemplateParamObjectDecl>(Val: D)) |
| 1957 | return true; |
| 1958 | // ... the address of a function, |
| 1959 | // ... the address of a GUID [MS extension], |
| 1960 | // ... the address of an unnamed global constant |
| 1961 | return isa<FunctionDecl, MSGuidDecl, UnnamedGlobalConstantDecl>(Val: D); |
| 1962 | } |
| 1963 | |
| 1964 | if (B.is<TypeInfoLValue>() || B.is<DynamicAllocLValue>()) |
| 1965 | return true; |
| 1966 | |
| 1967 | const Expr *E = B.get<const Expr*>(); |
| 1968 | switch (E->getStmtClass()) { |
| 1969 | default: |
| 1970 | return false; |
| 1971 | case Expr::CompoundLiteralExprClass: { |
| 1972 | const CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(Val: E); |
| 1973 | return CLE->isFileScope() && CLE->isLValue(); |
| 1974 | } |
| 1975 | case Expr::MaterializeTemporaryExprClass: |
| 1976 | // A materialized temporary might have been lifetime-extended to static |
| 1977 | // storage duration. |
| 1978 | return cast<MaterializeTemporaryExpr>(Val: E)->getStorageDuration() == SD_Static; |
| 1979 | // A string literal has static storage duration. |
| 1980 | case Expr::StringLiteralClass: |
| 1981 | case Expr::PredefinedExprClass: |
| 1982 | case Expr::ObjCStringLiteralClass: |
| 1983 | case Expr::ObjCEncodeExprClass: |
| 1984 | return true; |
| 1985 | case Expr::ObjCBoxedExprClass: |
| 1986 | return cast<ObjCBoxedExpr>(Val: E)->isExpressibleAsConstantInitializer(); |
| 1987 | case Expr::CallExprClass: |
| 1988 | return IsOpaqueConstantCall(E: cast<CallExpr>(Val: E)); |
| 1989 | // For GCC compatibility, &&label has static storage duration. |
| 1990 | case Expr::AddrLabelExprClass: |
| 1991 | return true; |
| 1992 | // A Block literal expression may be used as the initialization value for |
| 1993 | // Block variables at global or local static scope. |
| 1994 | case Expr::BlockExprClass: |
| 1995 | return !cast<BlockExpr>(Val: E)->getBlockDecl()->hasCaptures(); |
| 1996 | // The APValue generated from a __builtin_source_location will be emitted as a |
| 1997 | // literal. |
| 1998 | case Expr::SourceLocExprClass: |
| 1999 | return true; |
| 2000 | case Expr::ImplicitValueInitExprClass: |
| 2001 | // FIXME: |
| 2002 | // We can never form an lvalue with an implicit value initialization as its |
| 2003 | // base through expression evaluation, so these only appear in one case: the |
| 2004 | // implicit variable declaration we invent when checking whether a constexpr |
| 2005 | // constructor can produce a constant expression. We must assume that such |
| 2006 | // an expression might be a global lvalue. |
| 2007 | return true; |
| 2008 | } |
| 2009 | } |
| 2010 | |
| 2011 | static const ValueDecl *GetLValueBaseDecl(const LValue &LVal) { |
| 2012 | return LVal.Base.dyn_cast<const ValueDecl*>(); |
| 2013 | } |
| 2014 | |
| 2015 | // Information about an LValueBase that is some kind of string. |
| 2016 | struct LValueBaseString { |
| 2017 | std::string ObjCEncodeStorage; |
| 2018 | StringRef Bytes; |
| 2019 | int CharWidth; |
| 2020 | }; |
| 2021 | |
| 2022 | // Gets the lvalue base of LVal as a string. |
| 2023 | static bool GetLValueBaseAsString(const EvalInfo &Info, const LValue &LVal, |
| 2024 | LValueBaseString &AsString) { |
| 2025 | const auto *BaseExpr = LVal.Base.dyn_cast<const Expr *>(); |
| 2026 | if (!BaseExpr) |
| 2027 | return false; |
| 2028 | |
| 2029 | // For ObjCEncodeExpr, we need to compute and store the string. |
| 2030 | if (const auto *EE = dyn_cast<ObjCEncodeExpr>(Val: BaseExpr)) { |
| 2031 | Info.Ctx.getObjCEncodingForType(T: EE->getEncodedType(), |
| 2032 | S&: AsString.ObjCEncodeStorage); |
| 2033 | AsString.Bytes = AsString.ObjCEncodeStorage; |
| 2034 | AsString.CharWidth = 1; |
| 2035 | return true; |
| 2036 | } |
| 2037 | |
| 2038 | // Otherwise, we have a StringLiteral. |
| 2039 | const auto *Lit = dyn_cast<StringLiteral>(Val: BaseExpr); |
| 2040 | if (const auto *PE = dyn_cast<PredefinedExpr>(Val: BaseExpr)) |
| 2041 | Lit = PE->getFunctionName(); |
| 2042 | |
| 2043 | if (!Lit) |
| 2044 | return false; |
| 2045 | |
| 2046 | AsString.Bytes = Lit->getBytes(); |
| 2047 | AsString.CharWidth = Lit->getCharByteWidth(); |
| 2048 | return true; |
| 2049 | } |
| 2050 | |
| 2051 | // Determine whether two string literals potentially overlap. This will be the |
| 2052 | // case if they agree on the values of all the bytes on the overlapping region |
| 2053 | // between them. |
| 2054 | // |
| 2055 | // The overlapping region is the portion of the two string literals that must |
| 2056 | // overlap in memory if the pointers actually point to the same address at |
| 2057 | // runtime. For example, if LHS is "abcdef" + 3 and RHS is "cdef\0gh" + 1 then |
| 2058 | // the overlapping region is "cdef\0", which in this case does agree, so the |
| 2059 | // strings are potentially overlapping. Conversely, for "foobar" + 3 versus |
| 2060 | // "bazbar" + 3, the overlapping region contains all of both strings, so they |
| 2061 | // are not potentially overlapping, even though they agree from the given |
| 2062 | // addresses onwards. |
| 2063 | // |
| 2064 | // See open core issue CWG2765 which is discussing the desired rule here. |
| 2065 | static bool ArePotentiallyOverlappingStringLiterals(const EvalInfo &Info, |
| 2066 | const LValue &LHS, |
| 2067 | const LValue &RHS) { |
| 2068 | LValueBaseString LHSString, RHSString; |
| 2069 | if (!GetLValueBaseAsString(Info, LVal: LHS, AsString&: LHSString) || |
| 2070 | !GetLValueBaseAsString(Info, LVal: RHS, AsString&: RHSString)) |
| 2071 | return false; |
| 2072 | |
| 2073 | // This is the byte offset to the location of the first character of LHS |
| 2074 | // within RHS. We don't need to look at the characters of one string that |
| 2075 | // would appear before the start of the other string if they were merged. |
| 2076 | CharUnits Offset = RHS.Offset - LHS.Offset; |
| 2077 | if (Offset.isNegative()) { |
| 2078 | if (LHSString.Bytes.size() < (size_t)-Offset.getQuantity()) |
| 2079 | return false; |
| 2080 | LHSString.Bytes = LHSString.Bytes.drop_front(N: -Offset.getQuantity()); |
| 2081 | } else { |
| 2082 | if (RHSString.Bytes.size() < (size_t)Offset.getQuantity()) |
| 2083 | return false; |
| 2084 | RHSString.Bytes = RHSString.Bytes.drop_front(N: Offset.getQuantity()); |
| 2085 | } |
| 2086 | |
| 2087 | bool LHSIsLonger = LHSString.Bytes.size() > RHSString.Bytes.size(); |
| 2088 | StringRef Longer = LHSIsLonger ? LHSString.Bytes : RHSString.Bytes; |
| 2089 | StringRef Shorter = LHSIsLonger ? RHSString.Bytes : LHSString.Bytes; |
| 2090 | int ShorterCharWidth = (LHSIsLonger ? RHSString : LHSString).CharWidth; |
| 2091 | |
| 2092 | // The null terminator isn't included in the string data, so check for it |
| 2093 | // manually. If the longer string doesn't have a null terminator where the |
| 2094 | // shorter string ends, they aren't potentially overlapping. |
| 2095 | for (int NullByte : llvm::seq(Size: ShorterCharWidth)) { |
| 2096 | if (Shorter.size() + NullByte >= Longer.size()) |
| 2097 | break; |
| 2098 | if (Longer[Shorter.size() + NullByte]) |
| 2099 | return false; |
| 2100 | } |
| 2101 | |
| 2102 | // Otherwise, they're potentially overlapping if and only if the overlapping |
| 2103 | // region is the same. |
| 2104 | return Shorter == Longer.take_front(N: Shorter.size()); |
| 2105 | } |
| 2106 | |
| 2107 | static bool IsWeakLValue(const LValue &Value) { |
| 2108 | const ValueDecl *Decl = GetLValueBaseDecl(LVal: Value); |
| 2109 | return Decl && Decl->isWeak(); |
| 2110 | } |
| 2111 | |
| 2112 | static bool isZeroSized(const LValue &Value) { |
| 2113 | const ValueDecl *Decl = GetLValueBaseDecl(LVal: Value); |
| 2114 | if (isa_and_nonnull<VarDecl>(Val: Decl)) { |
| 2115 | QualType Ty = Decl->getType(); |
| 2116 | if (Ty->isArrayType()) |
| 2117 | return Ty->isIncompleteType() || |
| 2118 | Decl->getASTContext().getTypeSize(T: Ty) == 0; |
| 2119 | } |
| 2120 | return false; |
| 2121 | } |
| 2122 | |
| 2123 | static bool HasSameBase(const LValue &A, const LValue &B) { |
| 2124 | if (!A.getLValueBase()) |
| 2125 | return !B.getLValueBase(); |
| 2126 | if (!B.getLValueBase()) |
| 2127 | return false; |
| 2128 | |
| 2129 | if (A.getLValueBase().getOpaqueValue() != |
| 2130 | B.getLValueBase().getOpaqueValue()) |
| 2131 | return false; |
| 2132 | |
| 2133 | return A.getLValueCallIndex() == B.getLValueCallIndex() && |
| 2134 | A.getLValueVersion() == B.getLValueVersion(); |
| 2135 | } |
| 2136 | |
| 2137 | static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) { |
| 2138 | assert(Base && "no location for a null lvalue" ); |
| 2139 | const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>(); |
| 2140 | |
| 2141 | // For a parameter, find the corresponding call stack frame (if it still |
| 2142 | // exists), and point at the parameter of the function definition we actually |
| 2143 | // invoked. |
| 2144 | if (auto *PVD = dyn_cast_or_null<ParmVarDecl>(Val: VD)) { |
| 2145 | unsigned Idx = PVD->getFunctionScopeIndex(); |
| 2146 | for (CallStackFrame *F = Info.CurrentCall; F; F = F->Caller) { |
| 2147 | if (F->Arguments.CallIndex == Base.getCallIndex() && |
| 2148 | F->Arguments.Version == Base.getVersion() && F->Callee && |
| 2149 | Idx < F->Callee->getNumParams()) { |
| 2150 | VD = F->Callee->getParamDecl(i: Idx); |
| 2151 | break; |
| 2152 | } |
| 2153 | } |
| 2154 | } |
| 2155 | |
| 2156 | if (VD) |
| 2157 | Info.Note(Loc: VD->getLocation(), DiagId: diag::note_declared_at); |
| 2158 | else if (const Expr *E = Base.dyn_cast<const Expr*>()) |
| 2159 | Info.Note(Loc: E->getExprLoc(), DiagId: diag::note_constexpr_temporary_here); |
| 2160 | else if (DynamicAllocLValue DA = Base.dyn_cast<DynamicAllocLValue>()) { |
| 2161 | // FIXME: Produce a note for dangling pointers too. |
| 2162 | if (std::optional<DynAlloc *> Alloc = Info.lookupDynamicAlloc(DA)) |
| 2163 | Info.Note(Loc: (*Alloc)->AllocExpr->getExprLoc(), |
| 2164 | DiagId: diag::note_constexpr_dynamic_alloc_here); |
| 2165 | } |
| 2166 | |
| 2167 | // We have no information to show for a typeid(T) object. |
| 2168 | } |
| 2169 | |
| 2170 | enum class CheckEvaluationResultKind { |
| 2171 | ConstantExpression, |
| 2172 | FullyInitialized, |
| 2173 | }; |
| 2174 | |
| 2175 | /// Materialized temporaries that we've already checked to determine if they're |
| 2176 | /// initializsed by a constant expression. |
| 2177 | using CheckedTemporaries = |
| 2178 | llvm::SmallPtrSet<const MaterializeTemporaryExpr *, 8>; |
| 2179 | |
| 2180 | static bool CheckEvaluationResult(CheckEvaluationResultKind CERK, |
| 2181 | EvalInfo &Info, SourceLocation DiagLoc, |
| 2182 | QualType Type, const APValue &Value, |
| 2183 | ConstantExprKind Kind, |
| 2184 | const FieldDecl *SubobjectDecl, |
| 2185 | CheckedTemporaries &CheckedTemps); |
| 2186 | |
| 2187 | /// Check that this reference or pointer core constant expression is a valid |
| 2188 | /// value for an address or reference constant expression. Return true if we |
| 2189 | /// can fold this expression, whether or not it's a constant expression. |
| 2190 | static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc, |
| 2191 | QualType Type, const LValue &LVal, |
| 2192 | ConstantExprKind Kind, |
| 2193 | CheckedTemporaries &CheckedTemps) { |
| 2194 | bool IsReferenceType = Type->isReferenceType(); |
| 2195 | |
| 2196 | APValue::LValueBase Base = LVal.getLValueBase(); |
| 2197 | const SubobjectDesignator &Designator = LVal.getLValueDesignator(); |
| 2198 | |
| 2199 | const Expr *BaseE = Base.dyn_cast<const Expr *>(); |
| 2200 | const ValueDecl *BaseVD = Base.dyn_cast<const ValueDecl*>(); |
| 2201 | |
| 2202 | // Additional restrictions apply in a template argument. We only enforce the |
| 2203 | // C++20 restrictions here; additional syntactic and semantic restrictions |
| 2204 | // are applied elsewhere. |
| 2205 | if (isTemplateArgument(Kind)) { |
| 2206 | int InvalidBaseKind = -1; |
| 2207 | StringRef Ident; |
| 2208 | if (Base.is<TypeInfoLValue>()) |
| 2209 | InvalidBaseKind = 0; |
| 2210 | else if (isa_and_nonnull<StringLiteral>(Val: BaseE)) |
| 2211 | InvalidBaseKind = 1; |
| 2212 | else if (isa_and_nonnull<MaterializeTemporaryExpr>(Val: BaseE) || |
| 2213 | isa_and_nonnull<LifetimeExtendedTemporaryDecl>(Val: BaseVD)) |
| 2214 | InvalidBaseKind = 2; |
| 2215 | else if (auto *PE = dyn_cast_or_null<PredefinedExpr>(Val: BaseE)) { |
| 2216 | InvalidBaseKind = 3; |
| 2217 | Ident = PE->getIdentKindName(); |
| 2218 | } |
| 2219 | |
| 2220 | if (InvalidBaseKind != -1) { |
| 2221 | Info.FFDiag(Loc, DiagId: diag::note_constexpr_invalid_template_arg) |
| 2222 | << IsReferenceType << !Designator.Entries.empty() << InvalidBaseKind |
| 2223 | << Ident; |
| 2224 | return false; |
| 2225 | } |
| 2226 | } |
| 2227 | |
| 2228 | if (auto *FD = dyn_cast_or_null<FunctionDecl>(Val: BaseVD); |
| 2229 | FD && FD->isImmediateFunction()) { |
| 2230 | Info.FFDiag(Loc, DiagId: diag::note_consteval_address_accessible) |
| 2231 | << !Type->isAnyPointerType(); |
| 2232 | Info.Note(Loc: FD->getLocation(), DiagId: diag::note_declared_at); |
| 2233 | return false; |
| 2234 | } |
| 2235 | |
| 2236 | // Check that the object is a global. Note that the fake 'this' object we |
| 2237 | // manufacture when checking potential constant expressions is conservatively |
| 2238 | // assumed to be global here. |
| 2239 | if (!IsGlobalLValue(B: Base)) { |
| 2240 | if (Info.getLangOpts().CPlusPlus11) { |
| 2241 | Info.FFDiag(Loc, DiagId: diag::note_constexpr_non_global, ExtraNotes: 1) |
| 2242 | << IsReferenceType << !Designator.Entries.empty() << !!BaseVD |
| 2243 | << BaseVD; |
| 2244 | auto *VarD = dyn_cast_or_null<VarDecl>(Val: BaseVD); |
| 2245 | if (VarD && VarD->isConstexpr()) { |
| 2246 | // Non-static local constexpr variables have unintuitive semantics: |
| 2247 | // constexpr int a = 1; |
| 2248 | // constexpr const int *p = &a; |
| 2249 | // ... is invalid because the address of 'a' is not constant. Suggest |
| 2250 | // adding a 'static' in this case. |
| 2251 | Info.Note(Loc: VarD->getLocation(), DiagId: diag::note_constexpr_not_static) |
| 2252 | << VarD |
| 2253 | << FixItHint::CreateInsertion(InsertionLoc: VarD->getBeginLoc(), Code: "static " ); |
| 2254 | } else { |
| 2255 | NoteLValueLocation(Info, Base); |
| 2256 | } |
| 2257 | } else { |
| 2258 | Info.FFDiag(Loc); |
| 2259 | } |
| 2260 | // Don't allow references to temporaries to escape. |
| 2261 | return false; |
| 2262 | } |
| 2263 | assert((Info.checkingPotentialConstantExpression() || |
| 2264 | LVal.getLValueCallIndex() == 0) && |
| 2265 | "have call index for global lvalue" ); |
| 2266 | |
| 2267 | if (LVal.allowConstexprUnknown()) { |
| 2268 | if (BaseVD) { |
| 2269 | Info.FFDiag(Loc, DiagId: diag::note_constexpr_var_init_non_constant, ExtraNotes: 1) << BaseVD; |
| 2270 | NoteLValueLocation(Info, Base); |
| 2271 | } else { |
| 2272 | Info.FFDiag(Loc); |
| 2273 | } |
| 2274 | return false; |
| 2275 | } |
| 2276 | |
| 2277 | if (Base.is<DynamicAllocLValue>()) { |
| 2278 | Info.FFDiag(Loc, DiagId: diag::note_constexpr_dynamic_alloc) |
| 2279 | << IsReferenceType << !Designator.Entries.empty(); |
| 2280 | NoteLValueLocation(Info, Base); |
| 2281 | return false; |
| 2282 | } |
| 2283 | |
| 2284 | if (BaseVD) { |
| 2285 | if (const VarDecl *Var = dyn_cast<const VarDecl>(Val: BaseVD)) { |
| 2286 | // Check if this is a thread-local variable. |
| 2287 | if (Var->getTLSKind()) |
| 2288 | // FIXME: Diagnostic! |
| 2289 | return false; |
| 2290 | |
| 2291 | // A dllimport variable never acts like a constant, unless we're |
| 2292 | // evaluating a value for use only in name mangling, and unless it's a |
| 2293 | // static local. For the latter case, we'd still need to evaluate the |
| 2294 | // constant expression in case we're inside a (inlined) function. |
| 2295 | if (!isForManglingOnly(Kind) && Var->hasAttr<DLLImportAttr>() && |
| 2296 | !Var->isStaticLocal()) |
| 2297 | return false; |
| 2298 | |
| 2299 | // In CUDA/HIP device compilation, only device side variables have |
| 2300 | // constant addresses. |
| 2301 | if (Info.getLangOpts().CUDA && Info.getLangOpts().CUDAIsDevice && |
| 2302 | Info.Ctx.CUDAConstantEvalCtx.NoWrongSidedVars) { |
| 2303 | if ((!Var->hasAttr<CUDADeviceAttr>() && |
| 2304 | !Var->hasAttr<CUDAConstantAttr>() && |
| 2305 | !Var->getType()->isCUDADeviceBuiltinSurfaceType() && |
| 2306 | !Var->getType()->isCUDADeviceBuiltinTextureType()) || |
| 2307 | Var->hasAttr<HIPManagedAttr>()) |
| 2308 | return false; |
| 2309 | } |
| 2310 | } |
| 2311 | if (const auto *FD = dyn_cast<const FunctionDecl>(Val: BaseVD)) { |
| 2312 | // __declspec(dllimport) must be handled very carefully: |
| 2313 | // We must never initialize an expression with the thunk in C++. |
| 2314 | // Doing otherwise would allow the same id-expression to yield |
| 2315 | // different addresses for the same function in different translation |
| 2316 | // units. However, this means that we must dynamically initialize the |
| 2317 | // expression with the contents of the import address table at runtime. |
| 2318 | // |
| 2319 | // The C language has no notion of ODR; furthermore, it has no notion of |
| 2320 | // dynamic initialization. This means that we are permitted to |
| 2321 | // perform initialization with the address of the thunk. |
| 2322 | if (Info.getLangOpts().CPlusPlus && !isForManglingOnly(Kind) && |
| 2323 | FD->hasAttr<DLLImportAttr>()) |
| 2324 | // FIXME: Diagnostic! |
| 2325 | return false; |
| 2326 | } |
| 2327 | } else if (const auto *MTE = |
| 2328 | dyn_cast_or_null<MaterializeTemporaryExpr>(Val: BaseE)) { |
| 2329 | if (CheckedTemps.insert(Ptr: MTE).second) { |
| 2330 | QualType TempType = getType(B: Base); |
| 2331 | if (TempType.isDestructedType()) { |
| 2332 | Info.FFDiag(Loc: MTE->getExprLoc(), |
| 2333 | DiagId: diag::note_constexpr_unsupported_temporary_nontrivial_dtor) |
| 2334 | << TempType; |
| 2335 | return false; |
| 2336 | } |
| 2337 | |
| 2338 | APValue *V = MTE->getOrCreateValue(MayCreate: false); |
| 2339 | assert(V && "evasluation result refers to uninitialised temporary" ); |
| 2340 | if (!CheckEvaluationResult(CERK: CheckEvaluationResultKind::ConstantExpression, |
| 2341 | Info, DiagLoc: MTE->getExprLoc(), Type: TempType, Value: *V, Kind, |
| 2342 | /*SubobjectDecl=*/nullptr, CheckedTemps)) |
| 2343 | return false; |
| 2344 | } |
| 2345 | } |
| 2346 | |
| 2347 | // Allow address constant expressions to be past-the-end pointers. This is |
| 2348 | // an extension: the standard requires them to point to an object. |
| 2349 | if (!IsReferenceType) |
| 2350 | return true; |
| 2351 | |
| 2352 | // A reference constant expression must refer to an object. |
| 2353 | if (!Base) { |
| 2354 | // FIXME: diagnostic |
| 2355 | Info.CCEDiag(Loc); |
| 2356 | return true; |
| 2357 | } |
| 2358 | |
| 2359 | // Does this refer one past the end of some object? |
| 2360 | if (!Designator.Invalid && Designator.isOnePastTheEnd()) { |
| 2361 | Info.FFDiag(Loc, DiagId: diag::note_constexpr_past_end, ExtraNotes: 1) |
| 2362 | << !Designator.Entries.empty() << !!BaseVD << BaseVD; |
| 2363 | NoteLValueLocation(Info, Base); |
| 2364 | } |
| 2365 | |
| 2366 | return true; |
| 2367 | } |
| 2368 | |
| 2369 | /// Member pointers are constant expressions unless they point to a |
| 2370 | /// non-virtual dllimport member function. |
| 2371 | static bool CheckMemberPointerConstantExpression(EvalInfo &Info, |
| 2372 | SourceLocation Loc, |
| 2373 | QualType Type, |
| 2374 | const APValue &Value, |
| 2375 | ConstantExprKind Kind) { |
| 2376 | const ValueDecl *Member = Value.getMemberPointerDecl(); |
| 2377 | const auto *FD = dyn_cast_or_null<CXXMethodDecl>(Val: Member); |
| 2378 | if (!FD) |
| 2379 | return true; |
| 2380 | if (FD->isImmediateFunction()) { |
| 2381 | Info.FFDiag(Loc, DiagId: diag::note_consteval_address_accessible) << /*pointer*/ 0; |
| 2382 | Info.Note(Loc: FD->getLocation(), DiagId: diag::note_declared_at); |
| 2383 | return false; |
| 2384 | } |
| 2385 | return isForManglingOnly(Kind) || FD->isVirtual() || |
| 2386 | !FD->hasAttr<DLLImportAttr>(); |
| 2387 | } |
| 2388 | |
| 2389 | /// Check that this core constant expression is of literal type, and if not, |
| 2390 | /// produce an appropriate diagnostic. |
| 2391 | static bool CheckLiteralType(EvalInfo &Info, const Expr *E, |
| 2392 | const LValue *This = nullptr) { |
| 2393 | // The restriction to literal types does not exist in C++23 anymore. |
| 2394 | if (Info.getLangOpts().CPlusPlus23) |
| 2395 | return true; |
| 2396 | |
| 2397 | if (!E->isPRValue() || E->getType()->isLiteralType(Ctx: Info.Ctx)) |
| 2398 | return true; |
| 2399 | |
| 2400 | // C++1y: A constant initializer for an object o [...] may also invoke |
| 2401 | // constexpr constructors for o and its subobjects even if those objects |
| 2402 | // are of non-literal class types. |
| 2403 | // |
| 2404 | // C++11 missed this detail for aggregates, so classes like this: |
| 2405 | // struct foo_t { union { int i; volatile int j; } u; }; |
| 2406 | // are not (obviously) initializable like so: |
| 2407 | // __attribute__((__require_constant_initialization__)) |
| 2408 | // static const foo_t x = {{0}}; |
| 2409 | // because "i" is a subobject with non-literal initialization (due to the |
| 2410 | // volatile member of the union). See: |
| 2411 | // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#1677 |
| 2412 | // Therefore, we use the C++1y behavior. |
| 2413 | if (This && Info.EvaluatingDecl == This->getLValueBase()) |
| 2414 | return true; |
| 2415 | |
| 2416 | // Prvalue constant expressions must be of literal types. |
| 2417 | if (Info.getLangOpts().CPlusPlus11) |
| 2418 | Info.FFDiag(E, DiagId: diag::note_constexpr_nonliteral) |
| 2419 | << E->getType(); |
| 2420 | else |
| 2421 | Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr); |
| 2422 | return false; |
| 2423 | } |
| 2424 | |
| 2425 | static bool CheckEvaluationResult(CheckEvaluationResultKind CERK, |
| 2426 | EvalInfo &Info, SourceLocation DiagLoc, |
| 2427 | QualType Type, const APValue &Value, |
| 2428 | ConstantExprKind Kind, |
| 2429 | const FieldDecl *SubobjectDecl, |
| 2430 | CheckedTemporaries &CheckedTemps) { |
| 2431 | if (!Value.hasValue()) { |
| 2432 | if (SubobjectDecl) { |
| 2433 | Info.FFDiag(Loc: DiagLoc, DiagId: diag::note_constexpr_uninitialized) |
| 2434 | << /*(name)*/ 1 << SubobjectDecl; |
| 2435 | Info.Note(Loc: SubobjectDecl->getLocation(), |
| 2436 | DiagId: diag::note_constexpr_subobject_declared_here); |
| 2437 | } else { |
| 2438 | Info.FFDiag(Loc: DiagLoc, DiagId: diag::note_constexpr_uninitialized) |
| 2439 | << /*of type*/ 0 << Type; |
| 2440 | } |
| 2441 | return false; |
| 2442 | } |
| 2443 | |
| 2444 | // We allow _Atomic(T) to be initialized from anything that T can be |
| 2445 | // initialized from. |
| 2446 | if (const AtomicType *AT = Type->getAs<AtomicType>()) |
| 2447 | Type = AT->getValueType(); |
| 2448 | |
| 2449 | // Core issue 1454: For a literal constant expression of array or class type, |
| 2450 | // each subobject of its value shall have been initialized by a constant |
| 2451 | // expression. |
| 2452 | if (Value.isArray()) { |
| 2453 | QualType EltTy = Type->castAsArrayTypeUnsafe()->getElementType(); |
| 2454 | for (unsigned I = 0, N = Value.getArrayInitializedElts(); I != N; ++I) { |
| 2455 | if (!CheckEvaluationResult(CERK, Info, DiagLoc, Type: EltTy, |
| 2456 | Value: Value.getArrayInitializedElt(I), Kind, |
| 2457 | SubobjectDecl, CheckedTemps)) |
| 2458 | return false; |
| 2459 | } |
| 2460 | if (!Value.hasArrayFiller()) |
| 2461 | return true; |
| 2462 | return CheckEvaluationResult(CERK, Info, DiagLoc, Type: EltTy, |
| 2463 | Value: Value.getArrayFiller(), Kind, SubobjectDecl, |
| 2464 | CheckedTemps); |
| 2465 | } |
| 2466 | if (Value.isUnion() && Value.getUnionField()) { |
| 2467 | return CheckEvaluationResult( |
| 2468 | CERK, Info, DiagLoc, Type: Value.getUnionField()->getType(), |
| 2469 | Value: Value.getUnionValue(), Kind, SubobjectDecl: Value.getUnionField(), CheckedTemps); |
| 2470 | } |
| 2471 | if (Value.isStruct()) { |
| 2472 | auto *RD = Type->castAsRecordDecl(); |
| 2473 | if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(Val: RD)) { |
| 2474 | unsigned BaseIndex = 0; |
| 2475 | for (const CXXBaseSpecifier &BS : CD->bases()) { |
| 2476 | const APValue &BaseValue = Value.getStructBase(i: BaseIndex); |
| 2477 | if (!BaseValue.hasValue()) { |
| 2478 | SourceLocation TypeBeginLoc = BS.getBaseTypeLoc(); |
| 2479 | Info.FFDiag(Loc: TypeBeginLoc, DiagId: diag::note_constexpr_uninitialized_base) |
| 2480 | << BS.getType() << SourceRange(TypeBeginLoc, BS.getEndLoc()); |
| 2481 | return false; |
| 2482 | } |
| 2483 | if (!CheckEvaluationResult(CERK, Info, DiagLoc, Type: BS.getType(), Value: BaseValue, |
| 2484 | Kind, /*SubobjectDecl=*/nullptr, |
| 2485 | CheckedTemps)) |
| 2486 | return false; |
| 2487 | ++BaseIndex; |
| 2488 | } |
| 2489 | } |
| 2490 | for (const auto *I : RD->fields()) { |
| 2491 | if (I->isUnnamedBitField()) |
| 2492 | continue; |
| 2493 | |
| 2494 | if (!CheckEvaluationResult(CERK, Info, DiagLoc, Type: I->getType(), |
| 2495 | Value: Value.getStructField(i: I->getFieldIndex()), Kind, |
| 2496 | SubobjectDecl: I, CheckedTemps)) |
| 2497 | return false; |
| 2498 | } |
| 2499 | } |
| 2500 | |
| 2501 | if (Value.isLValue() && |
| 2502 | CERK == CheckEvaluationResultKind::ConstantExpression) { |
| 2503 | LValue LVal; |
| 2504 | LVal.setFrom(Ctx: Info.Ctx, V: Value); |
| 2505 | return CheckLValueConstantExpression(Info, Loc: DiagLoc, Type, LVal, Kind, |
| 2506 | CheckedTemps); |
| 2507 | } |
| 2508 | |
| 2509 | if (Value.isMemberPointer() && |
| 2510 | CERK == CheckEvaluationResultKind::ConstantExpression) |
| 2511 | return CheckMemberPointerConstantExpression(Info, Loc: DiagLoc, Type, Value, Kind); |
| 2512 | |
| 2513 | // Everything else is fine. |
| 2514 | return true; |
| 2515 | } |
| 2516 | |
| 2517 | /// Check that this core constant expression value is a valid value for a |
| 2518 | /// constant expression. If not, report an appropriate diagnostic. Does not |
| 2519 | /// check that the expression is of literal type. |
| 2520 | static bool CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc, |
| 2521 | QualType Type, const APValue &Value, |
| 2522 | ConstantExprKind Kind) { |
| 2523 | // Nothing to check for a constant expression of type 'cv void'. |
| 2524 | if (Type->isVoidType()) |
| 2525 | return true; |
| 2526 | |
| 2527 | CheckedTemporaries CheckedTemps; |
| 2528 | return CheckEvaluationResult(CERK: CheckEvaluationResultKind::ConstantExpression, |
| 2529 | Info, DiagLoc, Type, Value, Kind, |
| 2530 | /*SubobjectDecl=*/nullptr, CheckedTemps); |
| 2531 | } |
| 2532 | |
| 2533 | /// Check that this evaluated value is fully-initialized and can be loaded by |
| 2534 | /// an lvalue-to-rvalue conversion. |
| 2535 | static bool CheckFullyInitialized(EvalInfo &Info, SourceLocation DiagLoc, |
| 2536 | QualType Type, const APValue &Value) { |
| 2537 | CheckedTemporaries CheckedTemps; |
| 2538 | return CheckEvaluationResult( |
| 2539 | CERK: CheckEvaluationResultKind::FullyInitialized, Info, DiagLoc, Type, Value, |
| 2540 | Kind: ConstantExprKind::Normal, /*SubobjectDecl=*/nullptr, CheckedTemps); |
| 2541 | } |
| 2542 | |
| 2543 | /// Enforce C++2a [expr.const]/4.17, which disallows new-expressions unless |
| 2544 | /// "the allocated storage is deallocated within the evaluation". |
| 2545 | static bool CheckMemoryLeaks(EvalInfo &Info) { |
| 2546 | if (!Info.HeapAllocs.empty()) { |
| 2547 | // We can still fold to a constant despite a compile-time memory leak, |
| 2548 | // so long as the heap allocation isn't referenced in the result (we check |
| 2549 | // that in CheckConstantExpression). |
| 2550 | Info.CCEDiag(E: Info.HeapAllocs.begin()->second.AllocExpr, |
| 2551 | DiagId: diag::note_constexpr_memory_leak) |
| 2552 | << unsigned(Info.HeapAllocs.size() - 1); |
| 2553 | } |
| 2554 | return true; |
| 2555 | } |
| 2556 | |
| 2557 | static bool EvalPointerValueAsBool(const APValue &Value, bool &Result) { |
| 2558 | // A null base expression indicates a null pointer. These are always |
| 2559 | // evaluatable, and they are false unless the offset is zero. |
| 2560 | if (!Value.getLValueBase()) { |
| 2561 | // TODO: Should a non-null pointer with an offset of zero evaluate to true? |
| 2562 | Result = !Value.getLValueOffset().isZero(); |
| 2563 | return true; |
| 2564 | } |
| 2565 | |
| 2566 | // We have a non-null base. These are generally known to be true, but if it's |
| 2567 | // a weak declaration it can be null at runtime. |
| 2568 | Result = true; |
| 2569 | const ValueDecl *Decl = Value.getLValueBase().dyn_cast<const ValueDecl*>(); |
| 2570 | return !Decl || !Decl->isWeak(); |
| 2571 | } |
| 2572 | |
| 2573 | static bool HandleConversionToBool(const APValue &Val, bool &Result) { |
| 2574 | // TODO: This function should produce notes if it fails. |
| 2575 | switch (Val.getKind()) { |
| 2576 | case APValue::None: |
| 2577 | case APValue::Indeterminate: |
| 2578 | return false; |
| 2579 | case APValue::Int: |
| 2580 | Result = Val.getInt().getBoolValue(); |
| 2581 | return true; |
| 2582 | case APValue::FixedPoint: |
| 2583 | Result = Val.getFixedPoint().getBoolValue(); |
| 2584 | return true; |
| 2585 | case APValue::Float: |
| 2586 | Result = !Val.getFloat().isZero(); |
| 2587 | return true; |
| 2588 | case APValue::ComplexInt: |
| 2589 | Result = Val.getComplexIntReal().getBoolValue() || |
| 2590 | Val.getComplexIntImag().getBoolValue(); |
| 2591 | return true; |
| 2592 | case APValue::ComplexFloat: |
| 2593 | Result = !Val.getComplexFloatReal().isZero() || |
| 2594 | !Val.getComplexFloatImag().isZero(); |
| 2595 | return true; |
| 2596 | case APValue::LValue: |
| 2597 | return EvalPointerValueAsBool(Value: Val, Result); |
| 2598 | case APValue::MemberPointer: |
| 2599 | if (Val.getMemberPointerDecl() && Val.getMemberPointerDecl()->isWeak()) { |
| 2600 | return false; |
| 2601 | } |
| 2602 | Result = Val.getMemberPointerDecl(); |
| 2603 | return true; |
| 2604 | case APValue::Vector: |
| 2605 | case APValue::Array: |
| 2606 | case APValue::Struct: |
| 2607 | case APValue::Union: |
| 2608 | case APValue::AddrLabelDiff: |
| 2609 | return false; |
| 2610 | } |
| 2611 | |
| 2612 | llvm_unreachable("unknown APValue kind" ); |
| 2613 | } |
| 2614 | |
| 2615 | static bool EvaluateAsBooleanCondition(const Expr *E, bool &Result, |
| 2616 | EvalInfo &Info) { |
| 2617 | assert(!E->isValueDependent()); |
| 2618 | assert(E->isPRValue() && "missing lvalue-to-rvalue conv in bool condition" ); |
| 2619 | APValue Val; |
| 2620 | if (!Evaluate(Result&: Val, Info, E)) |
| 2621 | return false; |
| 2622 | return HandleConversionToBool(Val, Result); |
| 2623 | } |
| 2624 | |
| 2625 | template<typename T> |
| 2626 | static bool HandleOverflow(EvalInfo &Info, const Expr *E, |
| 2627 | const T &SrcValue, QualType DestType) { |
| 2628 | Info.CCEDiag(E, DiagId: diag::note_constexpr_overflow) |
| 2629 | << SrcValue << DestType; |
| 2630 | return Info.noteUndefinedBehavior(); |
| 2631 | } |
| 2632 | |
| 2633 | static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E, |
| 2634 | QualType SrcType, const APFloat &Value, |
| 2635 | QualType DestType, APSInt &Result) { |
| 2636 | unsigned DestWidth = Info.Ctx.getIntWidth(T: DestType); |
| 2637 | // Determine whether we are converting to unsigned or signed. |
| 2638 | bool DestSigned = DestType->isSignedIntegerOrEnumerationType(); |
| 2639 | |
| 2640 | Result = APSInt(DestWidth, !DestSigned); |
| 2641 | bool ignored; |
| 2642 | if (Value.convertToInteger(Result, RM: llvm::APFloat::rmTowardZero, IsExact: &ignored) |
| 2643 | & APFloat::opInvalidOp) |
| 2644 | return HandleOverflow(Info, E, SrcValue: Value, DestType); |
| 2645 | return true; |
| 2646 | } |
| 2647 | |
| 2648 | /// Get rounding mode to use in evaluation of the specified expression. |
| 2649 | /// |
| 2650 | /// If rounding mode is unknown at compile time, still try to evaluate the |
| 2651 | /// expression. If the result is exact, it does not depend on rounding mode. |
| 2652 | /// So return "tonearest" mode instead of "dynamic". |
| 2653 | static llvm::RoundingMode getActiveRoundingMode(EvalInfo &Info, const Expr *E) { |
| 2654 | llvm::RoundingMode RM = |
| 2655 | E->getFPFeaturesInEffect(LO: Info.getLangOpts()).getRoundingMode(); |
| 2656 | if (RM == llvm::RoundingMode::Dynamic) |
| 2657 | RM = llvm::RoundingMode::NearestTiesToEven; |
| 2658 | return RM; |
| 2659 | } |
| 2660 | |
| 2661 | /// Check if the given evaluation result is allowed for constant evaluation. |
| 2662 | static bool checkFloatingPointResult(EvalInfo &Info, const Expr *E, |
| 2663 | APFloat::opStatus St) { |
| 2664 | // In a constant context, assume that any dynamic rounding mode or FP |
| 2665 | // exception state matches the default floating-point environment. |
| 2666 | if (Info.InConstantContext) |
| 2667 | return true; |
| 2668 | |
| 2669 | FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.getLangOpts()); |
| 2670 | if ((St & APFloat::opInexact) && |
| 2671 | FPO.getRoundingMode() == llvm::RoundingMode::Dynamic) { |
| 2672 | // Inexact result means that it depends on rounding mode. If the requested |
| 2673 | // mode is dynamic, the evaluation cannot be made in compile time. |
| 2674 | Info.FFDiag(E, DiagId: diag::note_constexpr_dynamic_rounding); |
| 2675 | return false; |
| 2676 | } |
| 2677 | |
| 2678 | if ((St != APFloat::opOK) && |
| 2679 | (FPO.getRoundingMode() == llvm::RoundingMode::Dynamic || |
| 2680 | FPO.getExceptionMode() != LangOptions::FPE_Ignore || |
| 2681 | FPO.getAllowFEnvAccess())) { |
| 2682 | Info.FFDiag(E, DiagId: diag::note_constexpr_float_arithmetic_strict); |
| 2683 | return false; |
| 2684 | } |
| 2685 | |
| 2686 | if ((St & APFloat::opStatus::opInvalidOp) && |
| 2687 | FPO.getExceptionMode() != LangOptions::FPE_Ignore) { |
| 2688 | // There is no usefully definable result. |
| 2689 | Info.FFDiag(E); |
| 2690 | return false; |
| 2691 | } |
| 2692 | |
| 2693 | // FIXME: if: |
| 2694 | // - evaluation triggered other FP exception, and |
| 2695 | // - exception mode is not "ignore", and |
| 2696 | // - the expression being evaluated is not a part of global variable |
| 2697 | // initializer, |
| 2698 | // the evaluation probably need to be rejected. |
| 2699 | return true; |
| 2700 | } |
| 2701 | |
| 2702 | static bool HandleFloatToFloatCast(EvalInfo &Info, const Expr *E, |
| 2703 | QualType SrcType, QualType DestType, |
| 2704 | APFloat &Result) { |
| 2705 | assert((isa<CastExpr>(E) || isa<CompoundAssignOperator>(E) || |
| 2706 | isa<ConvertVectorExpr>(E)) && |
| 2707 | "HandleFloatToFloatCast has been checked with only CastExpr, " |
| 2708 | "CompoundAssignOperator and ConvertVectorExpr. Please either validate " |
| 2709 | "the new expression or address the root cause of this usage." ); |
| 2710 | llvm::RoundingMode RM = getActiveRoundingMode(Info, E); |
| 2711 | APFloat::opStatus St; |
| 2712 | APFloat Value = Result; |
| 2713 | bool ignored; |
| 2714 | St = Result.convert(ToSemantics: Info.Ctx.getFloatTypeSemantics(T: DestType), RM, losesInfo: &ignored); |
| 2715 | return checkFloatingPointResult(Info, E, St); |
| 2716 | } |
| 2717 | |
| 2718 | static APSInt HandleIntToIntCast(EvalInfo &Info, const Expr *E, |
| 2719 | QualType DestType, QualType SrcType, |
| 2720 | const APSInt &Value) { |
| 2721 | unsigned DestWidth = Info.Ctx.getIntWidth(T: DestType); |
| 2722 | // Figure out if this is a truncate, extend or noop cast. |
| 2723 | // If the input is signed, do a sign extend, noop, or truncate. |
| 2724 | APSInt Result = Value.extOrTrunc(width: DestWidth); |
| 2725 | Result.setIsUnsigned(DestType->isUnsignedIntegerOrEnumerationType()); |
| 2726 | if (DestType->isBooleanType()) |
| 2727 | Result = Value.getBoolValue(); |
| 2728 | return Result; |
| 2729 | } |
| 2730 | |
| 2731 | static bool HandleIntToFloatCast(EvalInfo &Info, const Expr *E, |
| 2732 | const FPOptions FPO, |
| 2733 | QualType SrcType, const APSInt &Value, |
| 2734 | QualType DestType, APFloat &Result) { |
| 2735 | Result = APFloat(Info.Ctx.getFloatTypeSemantics(T: DestType), 1); |
| 2736 | llvm::RoundingMode RM = getActiveRoundingMode(Info, E); |
| 2737 | APFloat::opStatus St = Result.convertFromAPInt(Input: Value, IsSigned: Value.isSigned(), RM); |
| 2738 | return checkFloatingPointResult(Info, E, St); |
| 2739 | } |
| 2740 | |
| 2741 | static bool truncateBitfieldValue(EvalInfo &Info, const Expr *E, |
| 2742 | APValue &Value, const FieldDecl *FD) { |
| 2743 | assert(FD->isBitField() && "truncateBitfieldValue on non-bitfield" ); |
| 2744 | |
| 2745 | if (!Value.isInt()) { |
| 2746 | // Trying to store a pointer-cast-to-integer into a bitfield. |
| 2747 | // FIXME: In this case, we should provide the diagnostic for casting |
| 2748 | // a pointer to an integer. |
| 2749 | assert(Value.isLValue() && "integral value neither int nor lvalue?" ); |
| 2750 | Info.FFDiag(E); |
| 2751 | return false; |
| 2752 | } |
| 2753 | |
| 2754 | APSInt &Int = Value.getInt(); |
| 2755 | unsigned OldBitWidth = Int.getBitWidth(); |
| 2756 | unsigned NewBitWidth = FD->getBitWidthValue(); |
| 2757 | if (NewBitWidth < OldBitWidth) |
| 2758 | Int = Int.trunc(width: NewBitWidth).extend(width: OldBitWidth); |
| 2759 | return true; |
| 2760 | } |
| 2761 | |
| 2762 | /// Perform the given integer operation, which is known to need at most BitWidth |
| 2763 | /// bits, and check for overflow in the original type (if that type was not an |
| 2764 | /// unsigned type). |
| 2765 | template<typename Operation> |
| 2766 | static bool CheckedIntArithmetic(EvalInfo &Info, const Expr *E, |
| 2767 | const APSInt &LHS, const APSInt &RHS, |
| 2768 | unsigned BitWidth, Operation Op, |
| 2769 | APSInt &Result) { |
| 2770 | if (LHS.isUnsigned()) { |
| 2771 | Result = Op(LHS, RHS); |
| 2772 | return true; |
| 2773 | } |
| 2774 | |
| 2775 | APSInt Value(Op(LHS.extend(width: BitWidth), RHS.extend(width: BitWidth)), false); |
| 2776 | Result = Value.trunc(width: LHS.getBitWidth()); |
| 2777 | if (Result.extend(width: BitWidth) != Value) { |
| 2778 | if (Info.checkingForUndefinedBehavior()) |
| 2779 | Info.Ctx.getDiagnostics().Report(Loc: E->getExprLoc(), |
| 2780 | DiagID: diag::warn_integer_constant_overflow) |
| 2781 | << toString(I: Result, Radix: 10, Signed: Result.isSigned(), /*formatAsCLiteral=*/false, |
| 2782 | /*UpperCase=*/true, /*InsertSeparators=*/true) |
| 2783 | << E->getType() << E->getSourceRange(); |
| 2784 | return HandleOverflow(Info, E, SrcValue: Value, DestType: E->getType()); |
| 2785 | } |
| 2786 | return true; |
| 2787 | } |
| 2788 | |
| 2789 | /// Perform the given binary integer operation. |
| 2790 | static bool handleIntIntBinOp(EvalInfo &Info, const BinaryOperator *E, |
| 2791 | const APSInt &LHS, BinaryOperatorKind Opcode, |
| 2792 | APSInt RHS, APSInt &Result) { |
| 2793 | bool HandleOverflowResult = true; |
| 2794 | switch (Opcode) { |
| 2795 | default: |
| 2796 | Info.FFDiag(E); |
| 2797 | return false; |
| 2798 | case BO_Mul: |
| 2799 | return CheckedIntArithmetic(Info, E, LHS, RHS, BitWidth: LHS.getBitWidth() * 2, |
| 2800 | Op: std::multiplies<APSInt>(), Result); |
| 2801 | case BO_Add: |
| 2802 | return CheckedIntArithmetic(Info, E, LHS, RHS, BitWidth: LHS.getBitWidth() + 1, |
| 2803 | Op: std::plus<APSInt>(), Result); |
| 2804 | case BO_Sub: |
| 2805 | return CheckedIntArithmetic(Info, E, LHS, RHS, BitWidth: LHS.getBitWidth() + 1, |
| 2806 | Op: std::minus<APSInt>(), Result); |
| 2807 | case BO_And: Result = LHS & RHS; return true; |
| 2808 | case BO_Xor: Result = LHS ^ RHS; return true; |
| 2809 | case BO_Or: Result = LHS | RHS; return true; |
| 2810 | case BO_Div: |
| 2811 | case BO_Rem: |
| 2812 | if (RHS == 0) { |
| 2813 | Info.FFDiag(E, DiagId: diag::note_expr_divide_by_zero) |
| 2814 | << E->getRHS()->getSourceRange(); |
| 2815 | return false; |
| 2816 | } |
| 2817 | // Check for overflow case: INT_MIN / -1 or INT_MIN % -1. APSInt supports |
| 2818 | // this operation and gives the two's complement result. |
| 2819 | if (RHS.isNegative() && RHS.isAllOnes() && LHS.isSigned() && |
| 2820 | LHS.isMinSignedValue()) |
| 2821 | HandleOverflowResult = HandleOverflow( |
| 2822 | Info, E, SrcValue: -LHS.extend(width: LHS.getBitWidth() + 1), DestType: E->getType()); |
| 2823 | Result = (Opcode == BO_Rem ? LHS % RHS : LHS / RHS); |
| 2824 | return HandleOverflowResult; |
| 2825 | case BO_Shl: { |
| 2826 | if (Info.getLangOpts().OpenCL) |
| 2827 | // OpenCL 6.3j: shift values are effectively % word size of LHS. |
| 2828 | RHS &= APSInt(llvm::APInt(RHS.getBitWidth(), |
| 2829 | static_cast<uint64_t>(LHS.getBitWidth() - 1)), |
| 2830 | RHS.isUnsigned()); |
| 2831 | else if (RHS.isSigned() && RHS.isNegative()) { |
| 2832 | // During constant-folding, a negative shift is an opposite shift. Such |
| 2833 | // a shift is not a constant expression. |
| 2834 | Info.CCEDiag(E, DiagId: diag::note_constexpr_negative_shift) << RHS; |
| 2835 | if (!Info.noteUndefinedBehavior()) |
| 2836 | return false; |
| 2837 | RHS = -RHS; |
| 2838 | goto shift_right; |
| 2839 | } |
| 2840 | shift_left: |
| 2841 | // C++11 [expr.shift]p1: Shift width must be less than the bit width of |
| 2842 | // the shifted type. |
| 2843 | unsigned SA = (unsigned) RHS.getLimitedValue(Limit: LHS.getBitWidth()-1); |
| 2844 | if (SA != RHS) { |
| 2845 | Info.CCEDiag(E, DiagId: diag::note_constexpr_large_shift) |
| 2846 | << RHS << E->getType() << LHS.getBitWidth(); |
| 2847 | if (!Info.noteUndefinedBehavior()) |
| 2848 | return false; |
| 2849 | } else if (LHS.isSigned() && !Info.getLangOpts().CPlusPlus20) { |
| 2850 | // C++11 [expr.shift]p2: A signed left shift must have a non-negative |
| 2851 | // operand, and must not overflow the corresponding unsigned type. |
| 2852 | // C++2a [expr.shift]p2: E1 << E2 is the unique value congruent to |
| 2853 | // E1 x 2^E2 module 2^N. |
| 2854 | if (LHS.isNegative()) { |
| 2855 | Info.CCEDiag(E, DiagId: diag::note_constexpr_lshift_of_negative) << LHS; |
| 2856 | if (!Info.noteUndefinedBehavior()) |
| 2857 | return false; |
| 2858 | } else if (LHS.countl_zero() < SA) { |
| 2859 | Info.CCEDiag(E, DiagId: diag::note_constexpr_lshift_discards); |
| 2860 | if (!Info.noteUndefinedBehavior()) |
| 2861 | return false; |
| 2862 | } |
| 2863 | } |
| 2864 | Result = LHS << SA; |
| 2865 | return true; |
| 2866 | } |
| 2867 | case BO_Shr: { |
| 2868 | if (Info.getLangOpts().OpenCL) |
| 2869 | // OpenCL 6.3j: shift values are effectively % word size of LHS. |
| 2870 | RHS &= APSInt(llvm::APInt(RHS.getBitWidth(), |
| 2871 | static_cast<uint64_t>(LHS.getBitWidth() - 1)), |
| 2872 | RHS.isUnsigned()); |
| 2873 | else if (RHS.isSigned() && RHS.isNegative()) { |
| 2874 | // During constant-folding, a negative shift is an opposite shift. Such a |
| 2875 | // shift is not a constant expression. |
| 2876 | Info.CCEDiag(E, DiagId: diag::note_constexpr_negative_shift) << RHS; |
| 2877 | if (!Info.noteUndefinedBehavior()) |
| 2878 | return false; |
| 2879 | RHS = -RHS; |
| 2880 | goto shift_left; |
| 2881 | } |
| 2882 | shift_right: |
| 2883 | // C++11 [expr.shift]p1: Shift width must be less than the bit width of the |
| 2884 | // shifted type. |
| 2885 | unsigned SA = (unsigned) RHS.getLimitedValue(Limit: LHS.getBitWidth()-1); |
| 2886 | if (SA != RHS) { |
| 2887 | Info.CCEDiag(E, DiagId: diag::note_constexpr_large_shift) |
| 2888 | << RHS << E->getType() << LHS.getBitWidth(); |
| 2889 | if (!Info.noteUndefinedBehavior()) |
| 2890 | return false; |
| 2891 | } |
| 2892 | |
| 2893 | Result = LHS >> SA; |
| 2894 | return true; |
| 2895 | } |
| 2896 | |
| 2897 | case BO_LT: Result = LHS < RHS; return true; |
| 2898 | case BO_GT: Result = LHS > RHS; return true; |
| 2899 | case BO_LE: Result = LHS <= RHS; return true; |
| 2900 | case BO_GE: Result = LHS >= RHS; return true; |
| 2901 | case BO_EQ: Result = LHS == RHS; return true; |
| 2902 | case BO_NE: Result = LHS != RHS; return true; |
| 2903 | case BO_Cmp: |
| 2904 | llvm_unreachable("BO_Cmp should be handled elsewhere" ); |
| 2905 | } |
| 2906 | } |
| 2907 | |
| 2908 | /// Perform the given binary floating-point operation, in-place, on LHS. |
| 2909 | static bool handleFloatFloatBinOp(EvalInfo &Info, const BinaryOperator *E, |
| 2910 | APFloat &LHS, BinaryOperatorKind Opcode, |
| 2911 | const APFloat &RHS) { |
| 2912 | llvm::RoundingMode RM = getActiveRoundingMode(Info, E); |
| 2913 | APFloat::opStatus St; |
| 2914 | switch (Opcode) { |
| 2915 | default: |
| 2916 | Info.FFDiag(E); |
| 2917 | return false; |
| 2918 | case BO_Mul: |
| 2919 | St = LHS.multiply(RHS, RM); |
| 2920 | break; |
| 2921 | case BO_Add: |
| 2922 | St = LHS.add(RHS, RM); |
| 2923 | break; |
| 2924 | case BO_Sub: |
| 2925 | St = LHS.subtract(RHS, RM); |
| 2926 | break; |
| 2927 | case BO_Div: |
| 2928 | // [expr.mul]p4: |
| 2929 | // If the second operand of / or % is zero the behavior is undefined. |
| 2930 | if (RHS.isZero()) |
| 2931 | Info.CCEDiag(E, DiagId: diag::note_expr_divide_by_zero); |
| 2932 | St = LHS.divide(RHS, RM); |
| 2933 | break; |
| 2934 | } |
| 2935 | |
| 2936 | // [expr.pre]p4: |
| 2937 | // If during the evaluation of an expression, the result is not |
| 2938 | // mathematically defined [...], the behavior is undefined. |
| 2939 | // FIXME: C++ rules require us to not conform to IEEE 754 here. |
| 2940 | if (LHS.isNaN()) { |
| 2941 | Info.CCEDiag(E, DiagId: diag::note_constexpr_float_arithmetic) << LHS.isNaN(); |
| 2942 | return Info.noteUndefinedBehavior(); |
| 2943 | } |
| 2944 | |
| 2945 | return checkFloatingPointResult(Info, E, St); |
| 2946 | } |
| 2947 | |
| 2948 | static bool handleLogicalOpForVector(const APInt &LHSValue, |
| 2949 | BinaryOperatorKind Opcode, |
| 2950 | const APInt &RHSValue, APInt &Result) { |
| 2951 | bool LHS = (LHSValue != 0); |
| 2952 | bool RHS = (RHSValue != 0); |
| 2953 | |
| 2954 | if (Opcode == BO_LAnd) |
| 2955 | Result = LHS && RHS; |
| 2956 | else |
| 2957 | Result = LHS || RHS; |
| 2958 | return true; |
| 2959 | } |
| 2960 | static bool handleLogicalOpForVector(const APFloat &LHSValue, |
| 2961 | BinaryOperatorKind Opcode, |
| 2962 | const APFloat &RHSValue, APInt &Result) { |
| 2963 | bool LHS = !LHSValue.isZero(); |
| 2964 | bool RHS = !RHSValue.isZero(); |
| 2965 | |
| 2966 | if (Opcode == BO_LAnd) |
| 2967 | Result = LHS && RHS; |
| 2968 | else |
| 2969 | Result = LHS || RHS; |
| 2970 | return true; |
| 2971 | } |
| 2972 | |
| 2973 | static bool handleLogicalOpForVector(const APValue &LHSValue, |
| 2974 | BinaryOperatorKind Opcode, |
| 2975 | const APValue &RHSValue, APInt &Result) { |
| 2976 | // The result is always an int type, however operands match the first. |
| 2977 | if (LHSValue.getKind() == APValue::Int) |
| 2978 | return handleLogicalOpForVector(LHSValue: LHSValue.getInt(), Opcode, |
| 2979 | RHSValue: RHSValue.getInt(), Result); |
| 2980 | assert(LHSValue.getKind() == APValue::Float && "Should be no other options" ); |
| 2981 | return handleLogicalOpForVector(LHSValue: LHSValue.getFloat(), Opcode, |
| 2982 | RHSValue: RHSValue.getFloat(), Result); |
| 2983 | } |
| 2984 | |
| 2985 | template <typename APTy> |
| 2986 | static bool |
| 2987 | handleCompareOpForVectorHelper(const APTy &LHSValue, BinaryOperatorKind Opcode, |
| 2988 | const APTy &RHSValue, APInt &Result) { |
| 2989 | switch (Opcode) { |
| 2990 | default: |
| 2991 | llvm_unreachable("unsupported binary operator" ); |
| 2992 | case BO_EQ: |
| 2993 | Result = (LHSValue == RHSValue); |
| 2994 | break; |
| 2995 | case BO_NE: |
| 2996 | Result = (LHSValue != RHSValue); |
| 2997 | break; |
| 2998 | case BO_LT: |
| 2999 | Result = (LHSValue < RHSValue); |
| 3000 | break; |
| 3001 | case BO_GT: |
| 3002 | Result = (LHSValue > RHSValue); |
| 3003 | break; |
| 3004 | case BO_LE: |
| 3005 | Result = (LHSValue <= RHSValue); |
| 3006 | break; |
| 3007 | case BO_GE: |
| 3008 | Result = (LHSValue >= RHSValue); |
| 3009 | break; |
| 3010 | } |
| 3011 | |
| 3012 | // The boolean operations on these vector types use an instruction that |
| 3013 | // results in a mask of '-1' for the 'truth' value. Ensure that we negate 1 |
| 3014 | // to -1 to make sure that we produce the correct value. |
| 3015 | Result.negate(); |
| 3016 | |
| 3017 | return true; |
| 3018 | } |
| 3019 | |
| 3020 | static bool handleCompareOpForVector(const APValue &LHSValue, |
| 3021 | BinaryOperatorKind Opcode, |
| 3022 | const APValue &RHSValue, APInt &Result) { |
| 3023 | // The result is always an int type, however operands match the first. |
| 3024 | if (LHSValue.getKind() == APValue::Int) |
| 3025 | return handleCompareOpForVectorHelper(LHSValue: LHSValue.getInt(), Opcode, |
| 3026 | RHSValue: RHSValue.getInt(), Result); |
| 3027 | assert(LHSValue.getKind() == APValue::Float && "Should be no other options" ); |
| 3028 | return handleCompareOpForVectorHelper(LHSValue: LHSValue.getFloat(), Opcode, |
| 3029 | RHSValue: RHSValue.getFloat(), Result); |
| 3030 | } |
| 3031 | |
| 3032 | // Perform binary operations for vector types, in place on the LHS. |
| 3033 | static bool handleVectorVectorBinOp(EvalInfo &Info, const BinaryOperator *E, |
| 3034 | BinaryOperatorKind Opcode, |
| 3035 | APValue &LHSValue, |
| 3036 | const APValue &RHSValue) { |
| 3037 | assert(Opcode != BO_PtrMemD && Opcode != BO_PtrMemI && |
| 3038 | "Operation not supported on vector types" ); |
| 3039 | |
| 3040 | const auto *VT = E->getType()->castAs<VectorType>(); |
| 3041 | unsigned NumElements = VT->getNumElements(); |
| 3042 | QualType EltTy = VT->getElementType(); |
| 3043 | |
| 3044 | // In the cases (typically C as I've observed) where we aren't evaluating |
| 3045 | // constexpr but are checking for cases where the LHS isn't yet evaluatable, |
| 3046 | // just give up. |
| 3047 | if (!LHSValue.isVector()) { |
| 3048 | assert(LHSValue.isLValue() && |
| 3049 | "A vector result that isn't a vector OR uncalculated LValue" ); |
| 3050 | Info.FFDiag(E); |
| 3051 | return false; |
| 3052 | } |
| 3053 | |
| 3054 | assert(LHSValue.getVectorLength() == NumElements && |
| 3055 | RHSValue.getVectorLength() == NumElements && "Different vector sizes" ); |
| 3056 | |
| 3057 | SmallVector<APValue, 4> ResultElements; |
| 3058 | |
| 3059 | for (unsigned EltNum = 0; EltNum < NumElements; ++EltNum) { |
| 3060 | APValue LHSElt = LHSValue.getVectorElt(I: EltNum); |
| 3061 | APValue RHSElt = RHSValue.getVectorElt(I: EltNum); |
| 3062 | |
| 3063 | if (EltTy->isIntegerType()) { |
| 3064 | APSInt EltResult{Info.Ctx.getIntWidth(T: EltTy), |
| 3065 | EltTy->isUnsignedIntegerType()}; |
| 3066 | bool Success = true; |
| 3067 | |
| 3068 | if (BinaryOperator::isLogicalOp(Opc: Opcode)) |
| 3069 | Success = handleLogicalOpForVector(LHSValue: LHSElt, Opcode, RHSValue: RHSElt, Result&: EltResult); |
| 3070 | else if (BinaryOperator::isComparisonOp(Opc: Opcode)) |
| 3071 | Success = handleCompareOpForVector(LHSValue: LHSElt, Opcode, RHSValue: RHSElt, Result&: EltResult); |
| 3072 | else |
| 3073 | Success = handleIntIntBinOp(Info, E, LHS: LHSElt.getInt(), Opcode, |
| 3074 | RHS: RHSElt.getInt(), Result&: EltResult); |
| 3075 | |
| 3076 | if (!Success) { |
| 3077 | Info.FFDiag(E); |
| 3078 | return false; |
| 3079 | } |
| 3080 | ResultElements.emplace_back(Args&: EltResult); |
| 3081 | |
| 3082 | } else if (EltTy->isFloatingType()) { |
| 3083 | assert(LHSElt.getKind() == APValue::Float && |
| 3084 | RHSElt.getKind() == APValue::Float && |
| 3085 | "Mismatched LHS/RHS/Result Type" ); |
| 3086 | APFloat LHSFloat = LHSElt.getFloat(); |
| 3087 | |
| 3088 | if (!handleFloatFloatBinOp(Info, E, LHS&: LHSFloat, Opcode, |
| 3089 | RHS: RHSElt.getFloat())) { |
| 3090 | Info.FFDiag(E); |
| 3091 | return false; |
| 3092 | } |
| 3093 | |
| 3094 | ResultElements.emplace_back(Args&: LHSFloat); |
| 3095 | } |
| 3096 | } |
| 3097 | |
| 3098 | LHSValue = APValue(ResultElements.data(), ResultElements.size()); |
| 3099 | return true; |
| 3100 | } |
| 3101 | |
| 3102 | /// Cast an lvalue referring to a base subobject to a derived class, by |
| 3103 | /// truncating the lvalue's path to the given length. |
| 3104 | static bool CastToDerivedClass(EvalInfo &Info, const Expr *E, LValue &Result, |
| 3105 | const RecordDecl *TruncatedType, |
| 3106 | unsigned TruncatedElements) { |
| 3107 | SubobjectDesignator &D = Result.Designator; |
| 3108 | |
| 3109 | // Check we actually point to a derived class object. |
| 3110 | if (TruncatedElements == D.Entries.size()) |
| 3111 | return true; |
| 3112 | assert(TruncatedElements >= D.MostDerivedPathLength && |
| 3113 | "not casting to a derived class" ); |
| 3114 | if (!Result.checkSubobject(Info, E, CSK: CSK_Derived)) |
| 3115 | return false; |
| 3116 | |
| 3117 | // Truncate the path to the subobject, and remove any derived-to-base offsets. |
| 3118 | const RecordDecl *RD = TruncatedType; |
| 3119 | for (unsigned I = TruncatedElements, N = D.Entries.size(); I != N; ++I) { |
| 3120 | if (RD->isInvalidDecl()) return false; |
| 3121 | const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD); |
| 3122 | const CXXRecordDecl *Base = getAsBaseClass(E: D.Entries[I]); |
| 3123 | if (isVirtualBaseClass(E: D.Entries[I])) |
| 3124 | Result.Offset -= Layout.getVBaseClassOffset(VBase: Base); |
| 3125 | else |
| 3126 | Result.Offset -= Layout.getBaseClassOffset(Base); |
| 3127 | RD = Base; |
| 3128 | } |
| 3129 | D.Entries.resize(N: TruncatedElements); |
| 3130 | return true; |
| 3131 | } |
| 3132 | |
| 3133 | static bool HandleLValueDirectBase(EvalInfo &Info, const Expr *E, LValue &Obj, |
| 3134 | const CXXRecordDecl *Derived, |
| 3135 | const CXXRecordDecl *Base, |
| 3136 | const ASTRecordLayout *RL = nullptr) { |
| 3137 | if (!RL) { |
| 3138 | if (Derived->isInvalidDecl()) return false; |
| 3139 | RL = &Info.Ctx.getASTRecordLayout(D: Derived); |
| 3140 | } |
| 3141 | |
| 3142 | Obj.addDecl(Info, E, D: Base, /*Virtual*/ false); |
| 3143 | Obj.getLValueOffset() += RL->getBaseClassOffset(Base); |
| 3144 | return true; |
| 3145 | } |
| 3146 | |
| 3147 | static bool HandleLValueBase(EvalInfo &Info, const Expr *E, LValue &Obj, |
| 3148 | const CXXRecordDecl *DerivedDecl, |
| 3149 | const CXXBaseSpecifier *Base) { |
| 3150 | const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); |
| 3151 | |
| 3152 | if (!Base->isVirtual()) |
| 3153 | return HandleLValueDirectBase(Info, E, Obj, Derived: DerivedDecl, Base: BaseDecl); |
| 3154 | |
| 3155 | SubobjectDesignator &D = Obj.Designator; |
| 3156 | if (D.Invalid) |
| 3157 | return false; |
| 3158 | |
| 3159 | // Extract most-derived object and corresponding type. |
| 3160 | // FIXME: After implementing P2280R4 it became possible to get references |
| 3161 | // here. We do MostDerivedType->getAsCXXRecordDecl() in several other |
| 3162 | // locations and if we see crashes in those locations in the future |
| 3163 | // it may make more sense to move this fix into Lvalue::set. |
| 3164 | DerivedDecl = D.MostDerivedType.getNonReferenceType()->getAsCXXRecordDecl(); |
| 3165 | if (!CastToDerivedClass(Info, E, Result&: Obj, TruncatedType: DerivedDecl, TruncatedElements: D.MostDerivedPathLength)) |
| 3166 | return false; |
| 3167 | |
| 3168 | // Find the virtual base class. |
| 3169 | if (DerivedDecl->isInvalidDecl()) return false; |
| 3170 | const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: DerivedDecl); |
| 3171 | Obj.addDecl(Info, E, D: BaseDecl, /*Virtual*/ true); |
| 3172 | Obj.getLValueOffset() += Layout.getVBaseClassOffset(VBase: BaseDecl); |
| 3173 | return true; |
| 3174 | } |
| 3175 | |
| 3176 | static bool HandleLValueBasePath(EvalInfo &Info, const CastExpr *E, |
| 3177 | QualType Type, LValue &Result) { |
| 3178 | for (CastExpr::path_const_iterator PathI = E->path_begin(), |
| 3179 | PathE = E->path_end(); |
| 3180 | PathI != PathE; ++PathI) { |
| 3181 | if (!HandleLValueBase(Info, E, Obj&: Result, DerivedDecl: Type->getAsCXXRecordDecl(), |
| 3182 | Base: *PathI)) |
| 3183 | return false; |
| 3184 | Type = (*PathI)->getType(); |
| 3185 | } |
| 3186 | return true; |
| 3187 | } |
| 3188 | |
| 3189 | /// Cast an lvalue referring to a derived class to a known base subobject. |
| 3190 | static bool CastToBaseClass(EvalInfo &Info, const Expr *E, LValue &Result, |
| 3191 | const CXXRecordDecl *DerivedRD, |
| 3192 | const CXXRecordDecl *BaseRD) { |
| 3193 | CXXBasePaths Paths(/*FindAmbiguities=*/false, |
| 3194 | /*RecordPaths=*/true, /*DetectVirtual=*/false); |
| 3195 | if (!DerivedRD->isDerivedFrom(Base: BaseRD, Paths)) |
| 3196 | llvm_unreachable("Class must be derived from the passed in base class!" ); |
| 3197 | |
| 3198 | for (CXXBasePathElement &Elem : Paths.front()) |
| 3199 | if (!HandleLValueBase(Info, E, Obj&: Result, DerivedDecl: Elem.Class, Base: Elem.Base)) |
| 3200 | return false; |
| 3201 | return true; |
| 3202 | } |
| 3203 | |
| 3204 | /// Update LVal to refer to the given field, which must be a member of the type |
| 3205 | /// currently described by LVal. |
| 3206 | static bool HandleLValueMember(EvalInfo &Info, const Expr *E, LValue &LVal, |
| 3207 | const FieldDecl *FD, |
| 3208 | const ASTRecordLayout *RL = nullptr) { |
| 3209 | if (!RL) { |
| 3210 | if (FD->getParent()->isInvalidDecl()) return false; |
| 3211 | RL = &Info.Ctx.getASTRecordLayout(D: FD->getParent()); |
| 3212 | } |
| 3213 | |
| 3214 | unsigned I = FD->getFieldIndex(); |
| 3215 | LVal.addDecl(Info, E, D: FD); |
| 3216 | LVal.adjustOffset(N: Info.Ctx.toCharUnitsFromBits(BitSize: RL->getFieldOffset(FieldNo: I))); |
| 3217 | return true; |
| 3218 | } |
| 3219 | |
| 3220 | /// Update LVal to refer to the given indirect field. |
| 3221 | static bool HandleLValueIndirectMember(EvalInfo &Info, const Expr *E, |
| 3222 | LValue &LVal, |
| 3223 | const IndirectFieldDecl *IFD) { |
| 3224 | for (const auto *C : IFD->chain()) |
| 3225 | if (!HandleLValueMember(Info, E, LVal, FD: cast<FieldDecl>(Val: C))) |
| 3226 | return false; |
| 3227 | return true; |
| 3228 | } |
| 3229 | |
| 3230 | enum class SizeOfType { |
| 3231 | SizeOf, |
| 3232 | DataSizeOf, |
| 3233 | }; |
| 3234 | |
| 3235 | /// Get the size of the given type in char units. |
| 3236 | static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc, QualType Type, |
| 3237 | CharUnits &Size, SizeOfType SOT = SizeOfType::SizeOf) { |
| 3238 | // sizeof(void), __alignof__(void), sizeof(function) = 1 as a gcc |
| 3239 | // extension. |
| 3240 | if (Type->isVoidType() || Type->isFunctionType()) { |
| 3241 | Size = CharUnits::One(); |
| 3242 | return true; |
| 3243 | } |
| 3244 | |
| 3245 | if (Type->isDependentType()) { |
| 3246 | Info.FFDiag(Loc); |
| 3247 | return false; |
| 3248 | } |
| 3249 | |
| 3250 | if (!Type->isConstantSizeType()) { |
| 3251 | // sizeof(vla) is not a constantexpr: C99 6.5.3.4p2. |
| 3252 | // FIXME: Better diagnostic. |
| 3253 | Info.FFDiag(Loc); |
| 3254 | return false; |
| 3255 | } |
| 3256 | |
| 3257 | if (SOT == SizeOfType::SizeOf) |
| 3258 | Size = Info.Ctx.getTypeSizeInChars(T: Type); |
| 3259 | else |
| 3260 | Size = Info.Ctx.getTypeInfoDataSizeInChars(T: Type).Width; |
| 3261 | return true; |
| 3262 | } |
| 3263 | |
| 3264 | /// Update a pointer value to model pointer arithmetic. |
| 3265 | /// \param Info - Information about the ongoing evaluation. |
| 3266 | /// \param E - The expression being evaluated, for diagnostic purposes. |
| 3267 | /// \param LVal - The pointer value to be updated. |
| 3268 | /// \param EltTy - The pointee type represented by LVal. |
| 3269 | /// \param Adjustment - The adjustment, in objects of type EltTy, to add. |
| 3270 | static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E, |
| 3271 | LValue &LVal, QualType EltTy, |
| 3272 | APSInt Adjustment) { |
| 3273 | CharUnits SizeOfPointee; |
| 3274 | if (!HandleSizeof(Info, Loc: E->getExprLoc(), Type: EltTy, Size&: SizeOfPointee)) |
| 3275 | return false; |
| 3276 | |
| 3277 | LVal.adjustOffsetAndIndex(Info, E, Index: Adjustment, ElementSize: SizeOfPointee); |
| 3278 | return true; |
| 3279 | } |
| 3280 | |
| 3281 | static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E, |
| 3282 | LValue &LVal, QualType EltTy, |
| 3283 | int64_t Adjustment) { |
| 3284 | return HandleLValueArrayAdjustment(Info, E, LVal, EltTy, |
| 3285 | Adjustment: APSInt::get(X: Adjustment)); |
| 3286 | } |
| 3287 | |
| 3288 | /// Update an lvalue to refer to a component of a complex number. |
| 3289 | /// \param Info - Information about the ongoing evaluation. |
| 3290 | /// \param LVal - The lvalue to be updated. |
| 3291 | /// \param EltTy - The complex number's component type. |
| 3292 | /// \param Imag - False for the real component, true for the imaginary. |
| 3293 | static bool HandleLValueComplexElement(EvalInfo &Info, const Expr *E, |
| 3294 | LValue &LVal, QualType EltTy, |
| 3295 | bool Imag) { |
| 3296 | if (Imag) { |
| 3297 | CharUnits SizeOfComponent; |
| 3298 | if (!HandleSizeof(Info, Loc: E->getExprLoc(), Type: EltTy, Size&: SizeOfComponent)) |
| 3299 | return false; |
| 3300 | LVal.Offset += SizeOfComponent; |
| 3301 | } |
| 3302 | LVal.addComplex(Info, E, EltTy, Imag); |
| 3303 | return true; |
| 3304 | } |
| 3305 | |
| 3306 | static bool HandleLValueVectorElement(EvalInfo &Info, const Expr *E, |
| 3307 | LValue &LVal, QualType EltTy, |
| 3308 | uint64_t Size, uint64_t Idx) { |
| 3309 | if (Idx) { |
| 3310 | CharUnits SizeOfElement; |
| 3311 | if (!HandleSizeof(Info, Loc: E->getExprLoc(), Type: EltTy, Size&: SizeOfElement)) |
| 3312 | return false; |
| 3313 | LVal.Offset += SizeOfElement * Idx; |
| 3314 | } |
| 3315 | LVal.addVectorElement(Info, E, EltTy, Size, Idx); |
| 3316 | return true; |
| 3317 | } |
| 3318 | |
| 3319 | /// Try to evaluate the initializer for a variable declaration. |
| 3320 | /// |
| 3321 | /// \param Info Information about the ongoing evaluation. |
| 3322 | /// \param E An expression to be used when printing diagnostics. |
| 3323 | /// \param VD The variable whose initializer should be obtained. |
| 3324 | /// \param Version The version of the variable within the frame. |
| 3325 | /// \param Frame The frame in which the variable was created. Must be null |
| 3326 | /// if this variable is not local to the evaluation. |
| 3327 | /// \param Result Filled in with a pointer to the value of the variable. |
| 3328 | static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E, |
| 3329 | const VarDecl *VD, CallStackFrame *Frame, |
| 3330 | unsigned Version, APValue *&Result) { |
| 3331 | // C++23 [expr.const]p8 If we have a reference type allow unknown references |
| 3332 | // and pointers. |
| 3333 | bool AllowConstexprUnknown = |
| 3334 | Info.getLangOpts().CPlusPlus23 && VD->getType()->isReferenceType(); |
| 3335 | |
| 3336 | APValue::LValueBase Base(VD, Frame ? Frame->Index : 0, Version); |
| 3337 | |
| 3338 | auto CheckUninitReference = [&](bool IsLocalVariable) { |
| 3339 | if (!Result || (!Result->hasValue() && VD->getType()->isReferenceType())) { |
| 3340 | // C++23 [expr.const]p8 |
| 3341 | // ... For such an object that is not usable in constant expressions, the |
| 3342 | // dynamic type of the object is constexpr-unknown. For such a reference |
| 3343 | // that is not usable in constant expressions, the reference is treated |
| 3344 | // as binding to an unspecified object of the referenced type whose |
| 3345 | // lifetime and that of all subobjects includes the entire constant |
| 3346 | // evaluation and whose dynamic type is constexpr-unknown. |
| 3347 | // |
| 3348 | // Variables that are part of the current evaluation are not |
| 3349 | // constexpr-unknown. |
| 3350 | if (!AllowConstexprUnknown || IsLocalVariable) { |
| 3351 | if (!Info.checkingPotentialConstantExpression()) |
| 3352 | Info.FFDiag(E, DiagId: diag::note_constexpr_use_uninit_reference); |
| 3353 | return false; |
| 3354 | } |
| 3355 | Result = nullptr; |
| 3356 | } |
| 3357 | return true; |
| 3358 | }; |
| 3359 | |
| 3360 | // If this is a local variable, dig out its value. |
| 3361 | if (Frame) { |
| 3362 | Result = Frame->getTemporary(Key: VD, Version); |
| 3363 | if (Result) |
| 3364 | return CheckUninitReference(/*IsLocalVariable=*/true); |
| 3365 | |
| 3366 | if (!isa<ParmVarDecl>(Val: VD)) { |
| 3367 | // Assume variables referenced within a lambda's call operator that were |
| 3368 | // not declared within the call operator are captures and during checking |
| 3369 | // of a potential constant expression, assume they are unknown constant |
| 3370 | // expressions. |
| 3371 | assert(isLambdaCallOperator(Frame->Callee) && |
| 3372 | (VD->getDeclContext() != Frame->Callee || VD->isInitCapture()) && |
| 3373 | "missing value for local variable" ); |
| 3374 | if (Info.checkingPotentialConstantExpression()) |
| 3375 | return false; |
| 3376 | // FIXME: This diagnostic is bogus; we do support captures. Is this code |
| 3377 | // still reachable at all? |
| 3378 | Info.FFDiag(Loc: E->getBeginLoc(), |
| 3379 | DiagId: diag::note_unimplemented_constexpr_lambda_feature_ast) |
| 3380 | << "captures not currently allowed" ; |
| 3381 | return false; |
| 3382 | } |
| 3383 | } |
| 3384 | |
| 3385 | // If we're currently evaluating the initializer of this declaration, use that |
| 3386 | // in-flight value. |
| 3387 | if (Info.EvaluatingDecl == Base) { |
| 3388 | Result = Info.EvaluatingDeclValue; |
| 3389 | return CheckUninitReference(/*IsLocalVariable=*/false); |
| 3390 | } |
| 3391 | |
| 3392 | // P2280R4 struck the restriction that variable of reference type lifetime |
| 3393 | // should begin within the evaluation of E |
| 3394 | // Used to be C++20 [expr.const]p5.12.2: |
| 3395 | // ... its lifetime began within the evaluation of E; |
| 3396 | if (isa<ParmVarDecl>(Val: VD)) { |
| 3397 | if (AllowConstexprUnknown) { |
| 3398 | Result = nullptr; |
| 3399 | return true; |
| 3400 | } |
| 3401 | |
| 3402 | // Assume parameters of a potential constant expression are usable in |
| 3403 | // constant expressions. |
| 3404 | if (!Info.checkingPotentialConstantExpression() || |
| 3405 | !Info.CurrentCall->Callee || |
| 3406 | !Info.CurrentCall->Callee->Equals(DC: VD->getDeclContext())) { |
| 3407 | if (Info.getLangOpts().CPlusPlus11) { |
| 3408 | Info.FFDiag(E, DiagId: diag::note_constexpr_function_param_value_unknown) |
| 3409 | << VD; |
| 3410 | NoteLValueLocation(Info, Base); |
| 3411 | } else { |
| 3412 | Info.FFDiag(E); |
| 3413 | } |
| 3414 | } |
| 3415 | return false; |
| 3416 | } |
| 3417 | |
| 3418 | if (E->isValueDependent()) |
| 3419 | return false; |
| 3420 | |
| 3421 | // Dig out the initializer, and use the declaration which it's attached to. |
| 3422 | // FIXME: We should eventually check whether the variable has a reachable |
| 3423 | // initializing declaration. |
| 3424 | const Expr *Init = VD->getAnyInitializer(D&: VD); |
| 3425 | // P2280R4 struck the restriction that variable of reference type should have |
| 3426 | // a preceding initialization. |
| 3427 | // Used to be C++20 [expr.const]p5.12: |
| 3428 | // ... reference has a preceding initialization and either ... |
| 3429 | if (!Init && !AllowConstexprUnknown) { |
| 3430 | // Don't diagnose during potential constant expression checking; an |
| 3431 | // initializer might be added later. |
| 3432 | if (!Info.checkingPotentialConstantExpression()) { |
| 3433 | Info.FFDiag(E, DiagId: diag::note_constexpr_var_init_unknown, ExtraNotes: 1) |
| 3434 | << VD; |
| 3435 | NoteLValueLocation(Info, Base); |
| 3436 | } |
| 3437 | return false; |
| 3438 | } |
| 3439 | |
| 3440 | // P2280R4 struck the initialization requirement for variables of reference |
| 3441 | // type so we can no longer assume we have an Init. |
| 3442 | // Used to be C++20 [expr.const]p5.12: |
| 3443 | // ... reference has a preceding initialization and either ... |
| 3444 | if (Init && Init->isValueDependent()) { |
| 3445 | // The DeclRefExpr is not value-dependent, but the variable it refers to |
| 3446 | // has a value-dependent initializer. This should only happen in |
| 3447 | // constant-folding cases, where the variable is not actually of a suitable |
| 3448 | // type for use in a constant expression (otherwise the DeclRefExpr would |
| 3449 | // have been value-dependent too), so diagnose that. |
| 3450 | assert(!VD->mightBeUsableInConstantExpressions(Info.Ctx)); |
| 3451 | if (!Info.checkingPotentialConstantExpression()) { |
| 3452 | Info.FFDiag(E, DiagId: Info.getLangOpts().CPlusPlus11 |
| 3453 | ? diag::note_constexpr_ltor_non_constexpr |
| 3454 | : diag::note_constexpr_ltor_non_integral, ExtraNotes: 1) |
| 3455 | << VD << VD->getType(); |
| 3456 | NoteLValueLocation(Info, Base); |
| 3457 | } |
| 3458 | return false; |
| 3459 | } |
| 3460 | |
| 3461 | // Check that we can fold the initializer. In C++, we will have already done |
| 3462 | // this in the cases where it matters for conformance. |
| 3463 | // P2280R4 struck the initialization requirement for variables of reference |
| 3464 | // type so we can no longer assume we have an Init. |
| 3465 | // Used to be C++20 [expr.const]p5.12: |
| 3466 | // ... reference has a preceding initialization and either ... |
| 3467 | if (Init && !VD->evaluateValue() && !AllowConstexprUnknown) { |
| 3468 | Info.FFDiag(E, DiagId: diag::note_constexpr_var_init_non_constant, ExtraNotes: 1) << VD; |
| 3469 | NoteLValueLocation(Info, Base); |
| 3470 | return false; |
| 3471 | } |
| 3472 | |
| 3473 | // Check that the variable is actually usable in constant expressions. For a |
| 3474 | // const integral variable or a reference, we might have a non-constant |
| 3475 | // initializer that we can nonetheless evaluate the initializer for. Such |
| 3476 | // variables are not usable in constant expressions. In C++98, the |
| 3477 | // initializer also syntactically needs to be an ICE. |
| 3478 | // |
| 3479 | // FIXME: We don't diagnose cases that aren't potentially usable in constant |
| 3480 | // expressions here; doing so would regress diagnostics for things like |
| 3481 | // reading from a volatile constexpr variable. |
| 3482 | if ((Info.getLangOpts().CPlusPlus && !VD->hasConstantInitialization() && |
| 3483 | VD->mightBeUsableInConstantExpressions(C: Info.Ctx) && |
| 3484 | !AllowConstexprUnknown) || |
| 3485 | ((Info.getLangOpts().CPlusPlus || Info.getLangOpts().OpenCL) && |
| 3486 | !Info.getLangOpts().CPlusPlus11 && !VD->hasICEInitializer(Context: Info.Ctx))) { |
| 3487 | if (Init) { |
| 3488 | Info.CCEDiag(E, DiagId: diag::note_constexpr_var_init_non_constant, ExtraNotes: 1) << VD; |
| 3489 | NoteLValueLocation(Info, Base); |
| 3490 | } else { |
| 3491 | Info.CCEDiag(E); |
| 3492 | } |
| 3493 | } |
| 3494 | |
| 3495 | // Never use the initializer of a weak variable, not even for constant |
| 3496 | // folding. We can't be sure that this is the definition that will be used. |
| 3497 | if (VD->isWeak()) { |
| 3498 | Info.FFDiag(E, DiagId: diag::note_constexpr_var_init_weak) << VD; |
| 3499 | NoteLValueLocation(Info, Base); |
| 3500 | return false; |
| 3501 | } |
| 3502 | |
| 3503 | Result = VD->getEvaluatedValue(); |
| 3504 | |
| 3505 | if (!Result && !AllowConstexprUnknown) |
| 3506 | return false; |
| 3507 | |
| 3508 | return CheckUninitReference(/*IsLocalVariable=*/false); |
| 3509 | } |
| 3510 | |
| 3511 | /// Get the base index of the given base class within an APValue representing |
| 3512 | /// the given derived class. |
| 3513 | static unsigned getBaseIndex(const CXXRecordDecl *Derived, |
| 3514 | const CXXRecordDecl *Base) { |
| 3515 | Base = Base->getCanonicalDecl(); |
| 3516 | unsigned Index = 0; |
| 3517 | for (CXXRecordDecl::base_class_const_iterator I = Derived->bases_begin(), |
| 3518 | E = Derived->bases_end(); I != E; ++I, ++Index) { |
| 3519 | if (I->getType()->getAsCXXRecordDecl()->getCanonicalDecl() == Base) |
| 3520 | return Index; |
| 3521 | } |
| 3522 | |
| 3523 | llvm_unreachable("base class missing from derived class's bases list" ); |
| 3524 | } |
| 3525 | |
| 3526 | /// Extract the value of a character from a string literal. |
| 3527 | static APSInt (EvalInfo &Info, const Expr *Lit, |
| 3528 | uint64_t Index) { |
| 3529 | assert(!isa<SourceLocExpr>(Lit) && |
| 3530 | "SourceLocExpr should have already been converted to a StringLiteral" ); |
| 3531 | |
| 3532 | // FIXME: Support MakeStringConstant |
| 3533 | if (const auto *ObjCEnc = dyn_cast<ObjCEncodeExpr>(Val: Lit)) { |
| 3534 | std::string Str; |
| 3535 | Info.Ctx.getObjCEncodingForType(T: ObjCEnc->getEncodedType(), S&: Str); |
| 3536 | assert(Index <= Str.size() && "Index too large" ); |
| 3537 | return APSInt::getUnsigned(X: Str.c_str()[Index]); |
| 3538 | } |
| 3539 | |
| 3540 | if (auto PE = dyn_cast<PredefinedExpr>(Val: Lit)) |
| 3541 | Lit = PE->getFunctionName(); |
| 3542 | const StringLiteral *S = cast<StringLiteral>(Val: Lit); |
| 3543 | const ConstantArrayType *CAT = |
| 3544 | Info.Ctx.getAsConstantArrayType(T: S->getType()); |
| 3545 | assert(CAT && "string literal isn't an array" ); |
| 3546 | QualType CharType = CAT->getElementType(); |
| 3547 | assert(CharType->isIntegerType() && "unexpected character type" ); |
| 3548 | APSInt Value(Info.Ctx.getTypeSize(T: CharType), |
| 3549 | CharType->isUnsignedIntegerType()); |
| 3550 | if (Index < S->getLength()) |
| 3551 | Value = S->getCodeUnit(i: Index); |
| 3552 | return Value; |
| 3553 | } |
| 3554 | |
| 3555 | // Expand a string literal into an array of characters. |
| 3556 | // |
| 3557 | // FIXME: This is inefficient; we should probably introduce something similar |
| 3558 | // to the LLVM ConstantDataArray to make this cheaper. |
| 3559 | static void expandStringLiteral(EvalInfo &Info, const StringLiteral *S, |
| 3560 | APValue &Result, |
| 3561 | QualType AllocType = QualType()) { |
| 3562 | const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType( |
| 3563 | T: AllocType.isNull() ? S->getType() : AllocType); |
| 3564 | assert(CAT && "string literal isn't an array" ); |
| 3565 | QualType CharType = CAT->getElementType(); |
| 3566 | assert(CharType->isIntegerType() && "unexpected character type" ); |
| 3567 | |
| 3568 | unsigned Elts = CAT->getZExtSize(); |
| 3569 | Result = APValue(APValue::UninitArray(), |
| 3570 | std::min(a: S->getLength(), b: Elts), Elts); |
| 3571 | APSInt Value(Info.Ctx.getTypeSize(T: CharType), |
| 3572 | CharType->isUnsignedIntegerType()); |
| 3573 | if (Result.hasArrayFiller()) |
| 3574 | Result.getArrayFiller() = APValue(Value); |
| 3575 | for (unsigned I = 0, N = Result.getArrayInitializedElts(); I != N; ++I) { |
| 3576 | Value = S->getCodeUnit(i: I); |
| 3577 | Result.getArrayInitializedElt(I) = APValue(Value); |
| 3578 | } |
| 3579 | } |
| 3580 | |
| 3581 | // Expand an array so that it has more than Index filled elements. |
| 3582 | static void expandArray(APValue &Array, unsigned Index) { |
| 3583 | unsigned Size = Array.getArraySize(); |
| 3584 | assert(Index < Size); |
| 3585 | |
| 3586 | // Always at least double the number of elements for which we store a value. |
| 3587 | unsigned OldElts = Array.getArrayInitializedElts(); |
| 3588 | unsigned NewElts = std::max(a: Index+1, b: OldElts * 2); |
| 3589 | NewElts = std::min(a: Size, b: std::max(a: NewElts, b: 8u)); |
| 3590 | |
| 3591 | // Copy the data across. |
| 3592 | APValue NewValue(APValue::UninitArray(), NewElts, Size); |
| 3593 | for (unsigned I = 0; I != OldElts; ++I) |
| 3594 | NewValue.getArrayInitializedElt(I).swap(RHS&: Array.getArrayInitializedElt(I)); |
| 3595 | for (unsigned I = OldElts; I != NewElts; ++I) |
| 3596 | NewValue.getArrayInitializedElt(I) = Array.getArrayFiller(); |
| 3597 | if (NewValue.hasArrayFiller()) |
| 3598 | NewValue.getArrayFiller() = Array.getArrayFiller(); |
| 3599 | Array.swap(RHS&: NewValue); |
| 3600 | } |
| 3601 | |
| 3602 | /// Determine whether a type would actually be read by an lvalue-to-rvalue |
| 3603 | /// conversion. If it's of class type, we may assume that the copy operation |
| 3604 | /// is trivial. Note that this is never true for a union type with fields |
| 3605 | /// (because the copy always "reads" the active member) and always true for |
| 3606 | /// a non-class type. |
| 3607 | static bool isReadByLvalueToRvalueConversion(const CXXRecordDecl *RD); |
| 3608 | static bool isReadByLvalueToRvalueConversion(QualType T) { |
| 3609 | CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl(); |
| 3610 | return !RD || isReadByLvalueToRvalueConversion(RD); |
| 3611 | } |
| 3612 | static bool isReadByLvalueToRvalueConversion(const CXXRecordDecl *RD) { |
| 3613 | // FIXME: A trivial copy of a union copies the object representation, even if |
| 3614 | // the union is empty. |
| 3615 | if (RD->isUnion()) |
| 3616 | return !RD->field_empty(); |
| 3617 | if (RD->isEmpty()) |
| 3618 | return false; |
| 3619 | |
| 3620 | for (auto *Field : RD->fields()) |
| 3621 | if (!Field->isUnnamedBitField() && |
| 3622 | isReadByLvalueToRvalueConversion(T: Field->getType())) |
| 3623 | return true; |
| 3624 | |
| 3625 | for (auto &BaseSpec : RD->bases()) |
| 3626 | if (isReadByLvalueToRvalueConversion(T: BaseSpec.getType())) |
| 3627 | return true; |
| 3628 | |
| 3629 | return false; |
| 3630 | } |
| 3631 | |
| 3632 | /// Diagnose an attempt to read from any unreadable field within the specified |
| 3633 | /// type, which might be a class type. |
| 3634 | static bool diagnoseMutableFields(EvalInfo &Info, const Expr *E, AccessKinds AK, |
| 3635 | QualType T) { |
| 3636 | CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl(); |
| 3637 | if (!RD) |
| 3638 | return false; |
| 3639 | |
| 3640 | if (!RD->hasMutableFields()) |
| 3641 | return false; |
| 3642 | |
| 3643 | for (auto *Field : RD->fields()) { |
| 3644 | // If we're actually going to read this field in some way, then it can't |
| 3645 | // be mutable. If we're in a union, then assigning to a mutable field |
| 3646 | // (even an empty one) can change the active member, so that's not OK. |
| 3647 | // FIXME: Add core issue number for the union case. |
| 3648 | if (Field->isMutable() && |
| 3649 | (RD->isUnion() || isReadByLvalueToRvalueConversion(T: Field->getType()))) { |
| 3650 | Info.FFDiag(E, DiagId: diag::note_constexpr_access_mutable, ExtraNotes: 1) << AK << Field; |
| 3651 | Info.Note(Loc: Field->getLocation(), DiagId: diag::note_declared_at); |
| 3652 | return true; |
| 3653 | } |
| 3654 | |
| 3655 | if (diagnoseMutableFields(Info, E, AK, T: Field->getType())) |
| 3656 | return true; |
| 3657 | } |
| 3658 | |
| 3659 | for (auto &BaseSpec : RD->bases()) |
| 3660 | if (diagnoseMutableFields(Info, E, AK, T: BaseSpec.getType())) |
| 3661 | return true; |
| 3662 | |
| 3663 | // All mutable fields were empty, and thus not actually read. |
| 3664 | return false; |
| 3665 | } |
| 3666 | |
| 3667 | static bool lifetimeStartedInEvaluation(EvalInfo &Info, |
| 3668 | APValue::LValueBase Base, |
| 3669 | bool MutableSubobject = false) { |
| 3670 | // A temporary or transient heap allocation we created. |
| 3671 | if (Base.getCallIndex() || Base.is<DynamicAllocLValue>()) |
| 3672 | return true; |
| 3673 | |
| 3674 | switch (Info.IsEvaluatingDecl) { |
| 3675 | case EvalInfo::EvaluatingDeclKind::None: |
| 3676 | return false; |
| 3677 | |
| 3678 | case EvalInfo::EvaluatingDeclKind::Ctor: |
| 3679 | // The variable whose initializer we're evaluating. |
| 3680 | if (Info.EvaluatingDecl == Base) |
| 3681 | return true; |
| 3682 | |
| 3683 | // A temporary lifetime-extended by the variable whose initializer we're |
| 3684 | // evaluating. |
| 3685 | if (auto *BaseE = Base.dyn_cast<const Expr *>()) |
| 3686 | if (auto *BaseMTE = dyn_cast<MaterializeTemporaryExpr>(Val: BaseE)) |
| 3687 | return Info.EvaluatingDecl == BaseMTE->getExtendingDecl(); |
| 3688 | return false; |
| 3689 | |
| 3690 | case EvalInfo::EvaluatingDeclKind::Dtor: |
| 3691 | // C++2a [expr.const]p6: |
| 3692 | // [during constant destruction] the lifetime of a and its non-mutable |
| 3693 | // subobjects (but not its mutable subobjects) [are] considered to start |
| 3694 | // within e. |
| 3695 | if (MutableSubobject || Base != Info.EvaluatingDecl) |
| 3696 | return false; |
| 3697 | // FIXME: We can meaningfully extend this to cover non-const objects, but |
| 3698 | // we will need special handling: we should be able to access only |
| 3699 | // subobjects of such objects that are themselves declared const. |
| 3700 | QualType T = getType(B: Base); |
| 3701 | return T.isConstQualified() || T->isReferenceType(); |
| 3702 | } |
| 3703 | |
| 3704 | llvm_unreachable("unknown evaluating decl kind" ); |
| 3705 | } |
| 3706 | |
| 3707 | static bool CheckArraySize(EvalInfo &Info, const ConstantArrayType *CAT, |
| 3708 | SourceLocation CallLoc = {}) { |
| 3709 | return Info.CheckArraySize( |
| 3710 | Loc: CAT->getSizeExpr() ? CAT->getSizeExpr()->getBeginLoc() : CallLoc, |
| 3711 | BitWidth: CAT->getNumAddressingBits(Context: Info.Ctx), ElemCount: CAT->getZExtSize(), |
| 3712 | /*Diag=*/true); |
| 3713 | } |
| 3714 | |
| 3715 | static bool handleScalarCast(EvalInfo &Info, const FPOptions FPO, const Expr *E, |
| 3716 | QualType SourceTy, QualType DestTy, |
| 3717 | APValue const &Original, APValue &Result) { |
| 3718 | // boolean must be checked before integer |
| 3719 | // since IsIntegerType() is true for bool |
| 3720 | if (SourceTy->isBooleanType()) { |
| 3721 | if (DestTy->isBooleanType()) { |
| 3722 | Result = Original; |
| 3723 | return true; |
| 3724 | } |
| 3725 | if (DestTy->isIntegerType() || DestTy->isRealFloatingType()) { |
| 3726 | bool BoolResult; |
| 3727 | if (!HandleConversionToBool(Val: Original, Result&: BoolResult)) |
| 3728 | return false; |
| 3729 | uint64_t IntResult = BoolResult; |
| 3730 | QualType IntType = DestTy->isIntegerType() |
| 3731 | ? DestTy |
| 3732 | : Info.Ctx.getIntTypeForBitwidth(DestWidth: 64, Signed: false); |
| 3733 | Result = APValue(Info.Ctx.MakeIntValue(Value: IntResult, Type: IntType)); |
| 3734 | } |
| 3735 | if (DestTy->isRealFloatingType()) { |
| 3736 | APValue Result2 = APValue(APFloat(0.0)); |
| 3737 | if (!HandleIntToFloatCast(Info, E, FPO, |
| 3738 | SrcType: Info.Ctx.getIntTypeForBitwidth(DestWidth: 64, Signed: false), |
| 3739 | Value: Result.getInt(), DestType: DestTy, Result&: Result2.getFloat())) |
| 3740 | return false; |
| 3741 | Result = Result2; |
| 3742 | } |
| 3743 | return true; |
| 3744 | } |
| 3745 | if (SourceTy->isIntegerType()) { |
| 3746 | if (DestTy->isRealFloatingType()) { |
| 3747 | Result = APValue(APFloat(0.0)); |
| 3748 | return HandleIntToFloatCast(Info, E, FPO, SrcType: SourceTy, Value: Original.getInt(), |
| 3749 | DestType: DestTy, Result&: Result.getFloat()); |
| 3750 | } |
| 3751 | if (DestTy->isBooleanType()) { |
| 3752 | bool BoolResult; |
| 3753 | if (!HandleConversionToBool(Val: Original, Result&: BoolResult)) |
| 3754 | return false; |
| 3755 | uint64_t IntResult = BoolResult; |
| 3756 | Result = APValue(Info.Ctx.MakeIntValue(Value: IntResult, Type: DestTy)); |
| 3757 | return true; |
| 3758 | } |
| 3759 | if (DestTy->isIntegerType()) { |
| 3760 | Result = APValue( |
| 3761 | HandleIntToIntCast(Info, E, DestType: DestTy, SrcType: SourceTy, Value: Original.getInt())); |
| 3762 | return true; |
| 3763 | } |
| 3764 | } else if (SourceTy->isRealFloatingType()) { |
| 3765 | if (DestTy->isRealFloatingType()) { |
| 3766 | Result = Original; |
| 3767 | return HandleFloatToFloatCast(Info, E, SrcType: SourceTy, DestType: DestTy, |
| 3768 | Result&: Result.getFloat()); |
| 3769 | } |
| 3770 | if (DestTy->isBooleanType()) { |
| 3771 | bool BoolResult; |
| 3772 | if (!HandleConversionToBool(Val: Original, Result&: BoolResult)) |
| 3773 | return false; |
| 3774 | uint64_t IntResult = BoolResult; |
| 3775 | Result = APValue(Info.Ctx.MakeIntValue(Value: IntResult, Type: DestTy)); |
| 3776 | return true; |
| 3777 | } |
| 3778 | if (DestTy->isIntegerType()) { |
| 3779 | Result = APValue(APSInt()); |
| 3780 | return HandleFloatToIntCast(Info, E, SrcType: SourceTy, Value: Original.getFloat(), |
| 3781 | DestType: DestTy, Result&: Result.getInt()); |
| 3782 | } |
| 3783 | } |
| 3784 | |
| 3785 | Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr); |
| 3786 | return false; |
| 3787 | } |
| 3788 | |
| 3789 | // do the heavy lifting for casting to aggregate types |
| 3790 | // because we have to deal with bitfields specially |
| 3791 | static bool constructAggregate(EvalInfo &Info, const FPOptions FPO, |
| 3792 | const Expr *E, APValue &Result, |
| 3793 | QualType ResultType, |
| 3794 | SmallVectorImpl<APValue> &Elements, |
| 3795 | SmallVectorImpl<QualType> &ElTypes) { |
| 3796 | |
| 3797 | SmallVector<std::tuple<APValue *, QualType, unsigned>> WorkList = { |
| 3798 | {&Result, ResultType, 0}}; |
| 3799 | |
| 3800 | unsigned ElI = 0; |
| 3801 | while (!WorkList.empty() && ElI < Elements.size()) { |
| 3802 | auto [Res, Type, BitWidth] = WorkList.pop_back_val(); |
| 3803 | |
| 3804 | if (Type->isRealFloatingType()) { |
| 3805 | if (!handleScalarCast(Info, FPO, E, SourceTy: ElTypes[ElI], DestTy: Type, Original: Elements[ElI], |
| 3806 | Result&: *Res)) |
| 3807 | return false; |
| 3808 | ElI++; |
| 3809 | continue; |
| 3810 | } |
| 3811 | if (Type->isIntegerType()) { |
| 3812 | if (!handleScalarCast(Info, FPO, E, SourceTy: ElTypes[ElI], DestTy: Type, Original: Elements[ElI], |
| 3813 | Result&: *Res)) |
| 3814 | return false; |
| 3815 | if (BitWidth > 0) { |
| 3816 | if (!Res->isInt()) |
| 3817 | return false; |
| 3818 | APSInt &Int = Res->getInt(); |
| 3819 | unsigned OldBitWidth = Int.getBitWidth(); |
| 3820 | unsigned NewBitWidth = BitWidth; |
| 3821 | if (NewBitWidth < OldBitWidth) |
| 3822 | Int = Int.trunc(width: NewBitWidth).extend(width: OldBitWidth); |
| 3823 | } |
| 3824 | ElI++; |
| 3825 | continue; |
| 3826 | } |
| 3827 | if (Type->isVectorType()) { |
| 3828 | QualType ElTy = Type->castAs<VectorType>()->getElementType(); |
| 3829 | unsigned NumEl = Type->castAs<VectorType>()->getNumElements(); |
| 3830 | SmallVector<APValue> Vals(NumEl); |
| 3831 | for (unsigned I = 0; I < NumEl; ++I) { |
| 3832 | if (!handleScalarCast(Info, FPO, E, SourceTy: ElTypes[ElI], DestTy: ElTy, Original: Elements[ElI], |
| 3833 | Result&: Vals[I])) |
| 3834 | return false; |
| 3835 | ElI++; |
| 3836 | } |
| 3837 | *Res = APValue(Vals.data(), NumEl); |
| 3838 | continue; |
| 3839 | } |
| 3840 | if (Type->isConstantArrayType()) { |
| 3841 | QualType ElTy = cast<ConstantArrayType>(Val: Info.Ctx.getAsArrayType(T: Type)) |
| 3842 | ->getElementType(); |
| 3843 | uint64_t Size = |
| 3844 | cast<ConstantArrayType>(Val: Info.Ctx.getAsArrayType(T: Type))->getZExtSize(); |
| 3845 | *Res = APValue(APValue::UninitArray(), Size, Size); |
| 3846 | for (int64_t I = Size - 1; I > -1; --I) |
| 3847 | WorkList.emplace_back(Args: &Res->getArrayInitializedElt(I), Args&: ElTy, Args: 0u); |
| 3848 | continue; |
| 3849 | } |
| 3850 | if (Type->isRecordType()) { |
| 3851 | const RecordDecl *RD = Type->getAsRecordDecl(); |
| 3852 | |
| 3853 | unsigned NumBases = 0; |
| 3854 | if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) |
| 3855 | NumBases = CXXRD->getNumBases(); |
| 3856 | |
| 3857 | *Res = APValue(APValue::UninitStruct(), NumBases, RD->getNumFields()); |
| 3858 | |
| 3859 | SmallVector<std::tuple<APValue *, QualType, unsigned>> ReverseList; |
| 3860 | // we need to traverse backwards |
| 3861 | // Visit the base classes. |
| 3862 | if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) { |
| 3863 | if (CXXRD->getNumBases() > 0) { |
| 3864 | assert(CXXRD->getNumBases() == 1); |
| 3865 | const CXXBaseSpecifier &BS = CXXRD->bases_begin()[0]; |
| 3866 | ReverseList.emplace_back(Args: &Res->getStructBase(i: 0), Args: BS.getType(), Args: 0u); |
| 3867 | } |
| 3868 | } |
| 3869 | |
| 3870 | // Visit the fields. |
| 3871 | for (FieldDecl *FD : RD->fields()) { |
| 3872 | unsigned FDBW = 0; |
| 3873 | if (FD->isUnnamedBitField()) |
| 3874 | continue; |
| 3875 | if (FD->isBitField()) { |
| 3876 | FDBW = FD->getBitWidthValue(); |
| 3877 | } |
| 3878 | |
| 3879 | ReverseList.emplace_back(Args: &Res->getStructField(i: FD->getFieldIndex()), |
| 3880 | Args: FD->getType(), Args&: FDBW); |
| 3881 | } |
| 3882 | |
| 3883 | std::reverse(first: ReverseList.begin(), last: ReverseList.end()); |
| 3884 | llvm::append_range(C&: WorkList, R&: ReverseList); |
| 3885 | continue; |
| 3886 | } |
| 3887 | Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr); |
| 3888 | return false; |
| 3889 | } |
| 3890 | return true; |
| 3891 | } |
| 3892 | |
| 3893 | static bool handleElementwiseCast(EvalInfo &Info, const Expr *E, |
| 3894 | const FPOptions FPO, |
| 3895 | SmallVectorImpl<APValue> &Elements, |
| 3896 | SmallVectorImpl<QualType> &SrcTypes, |
| 3897 | SmallVectorImpl<QualType> &DestTypes, |
| 3898 | SmallVectorImpl<APValue> &Results) { |
| 3899 | |
| 3900 | assert((Elements.size() == SrcTypes.size()) && |
| 3901 | (Elements.size() == DestTypes.size())); |
| 3902 | |
| 3903 | for (unsigned I = 0, ESz = Elements.size(); I < ESz; ++I) { |
| 3904 | APValue Original = Elements[I]; |
| 3905 | QualType SourceTy = SrcTypes[I]; |
| 3906 | QualType DestTy = DestTypes[I]; |
| 3907 | |
| 3908 | if (!handleScalarCast(Info, FPO, E, SourceTy, DestTy, Original, Result&: Results[I])) |
| 3909 | return false; |
| 3910 | } |
| 3911 | return true; |
| 3912 | } |
| 3913 | |
| 3914 | static unsigned elementwiseSize(EvalInfo &Info, QualType BaseTy) { |
| 3915 | |
| 3916 | SmallVector<QualType> WorkList = {BaseTy}; |
| 3917 | |
| 3918 | unsigned Size = 0; |
| 3919 | while (!WorkList.empty()) { |
| 3920 | QualType Type = WorkList.pop_back_val(); |
| 3921 | if (Type->isRealFloatingType() || Type->isIntegerType() || |
| 3922 | Type->isBooleanType()) { |
| 3923 | ++Size; |
| 3924 | continue; |
| 3925 | } |
| 3926 | if (Type->isVectorType()) { |
| 3927 | unsigned NumEl = Type->castAs<VectorType>()->getNumElements(); |
| 3928 | Size += NumEl; |
| 3929 | continue; |
| 3930 | } |
| 3931 | if (Type->isConstantArrayType()) { |
| 3932 | QualType ElTy = cast<ConstantArrayType>(Val: Info.Ctx.getAsArrayType(T: Type)) |
| 3933 | ->getElementType(); |
| 3934 | uint64_t ArrSize = |
| 3935 | cast<ConstantArrayType>(Val: Info.Ctx.getAsArrayType(T: Type))->getZExtSize(); |
| 3936 | for (uint64_t I = 0; I < ArrSize; ++I) { |
| 3937 | WorkList.push_back(Elt: ElTy); |
| 3938 | } |
| 3939 | continue; |
| 3940 | } |
| 3941 | if (Type->isRecordType()) { |
| 3942 | const RecordDecl *RD = Type->getAsRecordDecl(); |
| 3943 | |
| 3944 | // Visit the base classes. |
| 3945 | if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) { |
| 3946 | if (CXXRD->getNumBases() > 0) { |
| 3947 | assert(CXXRD->getNumBases() == 1); |
| 3948 | const CXXBaseSpecifier &BS = CXXRD->bases_begin()[0]; |
| 3949 | WorkList.push_back(Elt: BS.getType()); |
| 3950 | } |
| 3951 | } |
| 3952 | |
| 3953 | // visit the fields. |
| 3954 | for (FieldDecl *FD : RD->fields()) { |
| 3955 | if (FD->isUnnamedBitField()) |
| 3956 | continue; |
| 3957 | WorkList.push_back(Elt: FD->getType()); |
| 3958 | } |
| 3959 | continue; |
| 3960 | } |
| 3961 | } |
| 3962 | return Size; |
| 3963 | } |
| 3964 | |
| 3965 | static bool hlslAggSplatHelper(EvalInfo &Info, const Expr *E, APValue &SrcVal, |
| 3966 | QualType &SrcTy) { |
| 3967 | SrcTy = E->getType(); |
| 3968 | |
| 3969 | if (!Evaluate(Result&: SrcVal, Info, E)) |
| 3970 | return false; |
| 3971 | |
| 3972 | assert((SrcVal.isFloat() || SrcVal.isInt() || |
| 3973 | (SrcVal.isVector() && SrcVal.getVectorLength() == 1)) && |
| 3974 | "Not a valid HLSLAggregateSplatCast." ); |
| 3975 | |
| 3976 | if (SrcVal.isVector()) { |
| 3977 | assert(SrcTy->isVectorType() && "Type mismatch." ); |
| 3978 | SrcTy = SrcTy->castAs<VectorType>()->getElementType(); |
| 3979 | SrcVal = SrcVal.getVectorElt(I: 0); |
| 3980 | } |
| 3981 | return true; |
| 3982 | } |
| 3983 | |
| 3984 | static bool flattenAPValue(EvalInfo &Info, const Expr *E, APValue Value, |
| 3985 | QualType BaseTy, SmallVectorImpl<APValue> &Elements, |
| 3986 | SmallVectorImpl<QualType> &Types, unsigned Size) { |
| 3987 | |
| 3988 | SmallVector<std::pair<APValue, QualType>> WorkList = {{Value, BaseTy}}; |
| 3989 | unsigned Populated = 0; |
| 3990 | while (!WorkList.empty() && Populated < Size) { |
| 3991 | auto [Work, Type] = WorkList.pop_back_val(); |
| 3992 | |
| 3993 | if (Work.isFloat() || Work.isInt()) { |
| 3994 | Elements.push_back(Elt: Work); |
| 3995 | Types.push_back(Elt: Type); |
| 3996 | Populated++; |
| 3997 | continue; |
| 3998 | } |
| 3999 | if (Work.isVector()) { |
| 4000 | assert(Type->isVectorType() && "Type mismatch." ); |
| 4001 | QualType ElTy = Type->castAs<VectorType>()->getElementType(); |
| 4002 | for (unsigned I = 0; I < Work.getVectorLength() && Populated < Size; |
| 4003 | I++) { |
| 4004 | Elements.push_back(Elt: Work.getVectorElt(I)); |
| 4005 | Types.push_back(Elt: ElTy); |
| 4006 | Populated++; |
| 4007 | } |
| 4008 | continue; |
| 4009 | } |
| 4010 | if (Work.isArray()) { |
| 4011 | assert(Type->isConstantArrayType() && "Type mismatch." ); |
| 4012 | QualType ElTy = cast<ConstantArrayType>(Val: Info.Ctx.getAsArrayType(T: Type)) |
| 4013 | ->getElementType(); |
| 4014 | for (int64_t I = Work.getArraySize() - 1; I > -1; --I) { |
| 4015 | WorkList.emplace_back(Args&: Work.getArrayInitializedElt(I), Args&: ElTy); |
| 4016 | } |
| 4017 | continue; |
| 4018 | } |
| 4019 | |
| 4020 | if (Work.isStruct()) { |
| 4021 | assert(Type->isRecordType() && "Type mismatch." ); |
| 4022 | |
| 4023 | const RecordDecl *RD = Type->getAsRecordDecl(); |
| 4024 | |
| 4025 | SmallVector<std::pair<APValue, QualType>> ReverseList; |
| 4026 | // Visit the fields. |
| 4027 | for (FieldDecl *FD : RD->fields()) { |
| 4028 | if (FD->isUnnamedBitField()) |
| 4029 | continue; |
| 4030 | ReverseList.emplace_back(Args&: Work.getStructField(i: FD->getFieldIndex()), |
| 4031 | Args: FD->getType()); |
| 4032 | } |
| 4033 | |
| 4034 | std::reverse(first: ReverseList.begin(), last: ReverseList.end()); |
| 4035 | llvm::append_range(C&: WorkList, R&: ReverseList); |
| 4036 | |
| 4037 | // Visit the base classes. |
| 4038 | if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) { |
| 4039 | if (CXXRD->getNumBases() > 0) { |
| 4040 | assert(CXXRD->getNumBases() == 1); |
| 4041 | const CXXBaseSpecifier &BS = CXXRD->bases_begin()[0]; |
| 4042 | const APValue &Base = Work.getStructBase(i: 0); |
| 4043 | |
| 4044 | // Can happen in error cases. |
| 4045 | if (!Base.isStruct()) |
| 4046 | return false; |
| 4047 | |
| 4048 | WorkList.emplace_back(Args: Base, Args: BS.getType()); |
| 4049 | } |
| 4050 | } |
| 4051 | continue; |
| 4052 | } |
| 4053 | Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr); |
| 4054 | return false; |
| 4055 | } |
| 4056 | return true; |
| 4057 | } |
| 4058 | |
| 4059 | namespace { |
| 4060 | /// A handle to a complete object (an object that is not a subobject of |
| 4061 | /// another object). |
| 4062 | struct CompleteObject { |
| 4063 | /// The identity of the object. |
| 4064 | APValue::LValueBase Base; |
| 4065 | /// The value of the complete object. |
| 4066 | APValue *Value; |
| 4067 | /// The type of the complete object. |
| 4068 | QualType Type; |
| 4069 | |
| 4070 | CompleteObject() : Value(nullptr) {} |
| 4071 | CompleteObject(APValue::LValueBase Base, APValue *Value, QualType Type) |
| 4072 | : Base(Base), Value(Value), Type(Type) {} |
| 4073 | |
| 4074 | bool mayAccessMutableMembers(EvalInfo &Info, AccessKinds AK) const { |
| 4075 | // If this isn't a "real" access (eg, if it's just accessing the type |
| 4076 | // info), allow it. We assume the type doesn't change dynamically for |
| 4077 | // subobjects of constexpr objects (even though we'd hit UB here if it |
| 4078 | // did). FIXME: Is this right? |
| 4079 | if (!isAnyAccess(AK)) |
| 4080 | return true; |
| 4081 | |
| 4082 | // In C++14 onwards, it is permitted to read a mutable member whose |
| 4083 | // lifetime began within the evaluation. |
| 4084 | // FIXME: Should we also allow this in C++11? |
| 4085 | if (!Info.getLangOpts().CPlusPlus14 && |
| 4086 | AK != AccessKinds::AK_IsWithinLifetime) |
| 4087 | return false; |
| 4088 | return lifetimeStartedInEvaluation(Info, Base, /*MutableSubobject*/true); |
| 4089 | } |
| 4090 | |
| 4091 | explicit operator bool() const { return !Type.isNull(); } |
| 4092 | }; |
| 4093 | } // end anonymous namespace |
| 4094 | |
| 4095 | static QualType getSubobjectType(QualType ObjType, QualType SubobjType, |
| 4096 | bool IsMutable = false) { |
| 4097 | // C++ [basic.type.qualifier]p1: |
| 4098 | // - A const object is an object of type const T or a non-mutable subobject |
| 4099 | // of a const object. |
| 4100 | if (ObjType.isConstQualified() && !IsMutable) |
| 4101 | SubobjType.addConst(); |
| 4102 | // - A volatile object is an object of type const T or a subobject of a |
| 4103 | // volatile object. |
| 4104 | if (ObjType.isVolatileQualified()) |
| 4105 | SubobjType.addVolatile(); |
| 4106 | return SubobjType; |
| 4107 | } |
| 4108 | |
| 4109 | /// Find the designated sub-object of an rvalue. |
| 4110 | template <typename SubobjectHandler> |
| 4111 | static typename SubobjectHandler::result_type |
| 4112 | findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj, |
| 4113 | const SubobjectDesignator &Sub, SubobjectHandler &handler) { |
| 4114 | if (Sub.Invalid) |
| 4115 | // A diagnostic will have already been produced. |
| 4116 | return handler.failed(); |
| 4117 | if (Sub.isOnePastTheEnd() || Sub.isMostDerivedAnUnsizedArray()) { |
| 4118 | if (Info.getLangOpts().CPlusPlus11) |
| 4119 | Info.FFDiag(E, DiagId: Sub.isOnePastTheEnd() |
| 4120 | ? diag::note_constexpr_access_past_end |
| 4121 | : diag::note_constexpr_access_unsized_array) |
| 4122 | << handler.AccessKind; |
| 4123 | else |
| 4124 | Info.FFDiag(E); |
| 4125 | return handler.failed(); |
| 4126 | } |
| 4127 | |
| 4128 | APValue *O = Obj.Value; |
| 4129 | QualType ObjType = Obj.Type; |
| 4130 | const FieldDecl *LastField = nullptr; |
| 4131 | const FieldDecl *VolatileField = nullptr; |
| 4132 | |
| 4133 | // Walk the designator's path to find the subobject. |
| 4134 | for (unsigned I = 0, N = Sub.Entries.size(); /**/; ++I) { |
| 4135 | // Reading an indeterminate value is undefined, but assigning over one is OK. |
| 4136 | if ((O->isAbsent() && !(handler.AccessKind == AK_Construct && I == N)) || |
| 4137 | (O->isIndeterminate() && |
| 4138 | !isValidIndeterminateAccess(handler.AccessKind))) { |
| 4139 | // Object has ended lifetime. |
| 4140 | // If I is non-zero, some subobject (member or array element) of a |
| 4141 | // complete object has ended its lifetime, so this is valid for |
| 4142 | // IsWithinLifetime, resulting in false. |
| 4143 | if (I != 0 && handler.AccessKind == AK_IsWithinLifetime) |
| 4144 | return false; |
| 4145 | if (!Info.checkingPotentialConstantExpression()) |
| 4146 | Info.FFDiag(E, DiagId: diag::note_constexpr_access_uninit) |
| 4147 | << handler.AccessKind << O->isIndeterminate() |
| 4148 | << E->getSourceRange(); |
| 4149 | return handler.failed(); |
| 4150 | } |
| 4151 | |
| 4152 | // C++ [class.ctor]p5, C++ [class.dtor]p5: |
| 4153 | // const and volatile semantics are not applied on an object under |
| 4154 | // {con,de}struction. |
| 4155 | if ((ObjType.isConstQualified() || ObjType.isVolatileQualified()) && |
| 4156 | ObjType->isRecordType() && |
| 4157 | Info.isEvaluatingCtorDtor( |
| 4158 | Base: Obj.Base, Path: ArrayRef(Sub.Entries.begin(), Sub.Entries.begin() + I)) != |
| 4159 | ConstructionPhase::None) { |
| 4160 | ObjType = Info.Ctx.getCanonicalType(T: ObjType); |
| 4161 | ObjType.removeLocalConst(); |
| 4162 | ObjType.removeLocalVolatile(); |
| 4163 | } |
| 4164 | |
| 4165 | // If this is our last pass, check that the final object type is OK. |
| 4166 | if (I == N || (I == N - 1 && ObjType->isAnyComplexType())) { |
| 4167 | // Accesses to volatile objects are prohibited. |
| 4168 | if (ObjType.isVolatileQualified() && isFormalAccess(handler.AccessKind)) { |
| 4169 | if (Info.getLangOpts().CPlusPlus) { |
| 4170 | int DiagKind; |
| 4171 | SourceLocation Loc; |
| 4172 | const NamedDecl *Decl = nullptr; |
| 4173 | if (VolatileField) { |
| 4174 | DiagKind = 2; |
| 4175 | Loc = VolatileField->getLocation(); |
| 4176 | Decl = VolatileField; |
| 4177 | } else if (auto *VD = Obj.Base.dyn_cast<const ValueDecl*>()) { |
| 4178 | DiagKind = 1; |
| 4179 | Loc = VD->getLocation(); |
| 4180 | Decl = VD; |
| 4181 | } else { |
| 4182 | DiagKind = 0; |
| 4183 | if (auto *E = Obj.Base.dyn_cast<const Expr *>()) |
| 4184 | Loc = E->getExprLoc(); |
| 4185 | } |
| 4186 | Info.FFDiag(E, DiagId: diag::note_constexpr_access_volatile_obj, ExtraNotes: 1) |
| 4187 | << handler.AccessKind << DiagKind << Decl; |
| 4188 | Info.Note(Loc, DiagId: diag::note_constexpr_volatile_here) << DiagKind; |
| 4189 | } else { |
| 4190 | Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr); |
| 4191 | } |
| 4192 | return handler.failed(); |
| 4193 | } |
| 4194 | |
| 4195 | // If we are reading an object of class type, there may still be more |
| 4196 | // things we need to check: if there are any mutable subobjects, we |
| 4197 | // cannot perform this read. (This only happens when performing a trivial |
| 4198 | // copy or assignment.) |
| 4199 | if (ObjType->isRecordType() && |
| 4200 | !Obj.mayAccessMutableMembers(Info, AK: handler.AccessKind) && |
| 4201 | diagnoseMutableFields(Info, E, handler.AccessKind, ObjType)) |
| 4202 | return handler.failed(); |
| 4203 | } |
| 4204 | |
| 4205 | if (I == N) { |
| 4206 | if (!handler.found(*O, ObjType)) |
| 4207 | return false; |
| 4208 | |
| 4209 | // If we modified a bit-field, truncate it to the right width. |
| 4210 | if (isModification(handler.AccessKind) && |
| 4211 | LastField && LastField->isBitField() && |
| 4212 | !truncateBitfieldValue(Info, E, Value&: *O, FD: LastField)) |
| 4213 | return false; |
| 4214 | |
| 4215 | return true; |
| 4216 | } |
| 4217 | |
| 4218 | LastField = nullptr; |
| 4219 | if (ObjType->isArrayType()) { |
| 4220 | // Next subobject is an array element. |
| 4221 | const ArrayType *AT = Info.Ctx.getAsArrayType(T: ObjType); |
| 4222 | assert((isa<ConstantArrayType>(AT) || isa<IncompleteArrayType>(AT)) && |
| 4223 | "vla in literal type?" ); |
| 4224 | uint64_t Index = Sub.Entries[I].getAsArrayIndex(); |
| 4225 | if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT); |
| 4226 | CAT && CAT->getSize().ule(RHS: Index)) { |
| 4227 | // Note, it should not be possible to form a pointer with a valid |
| 4228 | // designator which points more than one past the end of the array. |
| 4229 | if (Info.getLangOpts().CPlusPlus11) |
| 4230 | Info.FFDiag(E, DiagId: diag::note_constexpr_access_past_end) |
| 4231 | << handler.AccessKind; |
| 4232 | else |
| 4233 | Info.FFDiag(E); |
| 4234 | return handler.failed(); |
| 4235 | } |
| 4236 | |
| 4237 | ObjType = AT->getElementType(); |
| 4238 | |
| 4239 | if (O->getArrayInitializedElts() > Index) |
| 4240 | O = &O->getArrayInitializedElt(I: Index); |
| 4241 | else if (!isRead(handler.AccessKind)) { |
| 4242 | if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT); |
| 4243 | CAT && !CheckArraySize(Info, CAT, CallLoc: E->getExprLoc())) |
| 4244 | return handler.failed(); |
| 4245 | |
| 4246 | expandArray(Array&: *O, Index); |
| 4247 | O = &O->getArrayInitializedElt(I: Index); |
| 4248 | } else |
| 4249 | O = &O->getArrayFiller(); |
| 4250 | } else if (ObjType->isAnyComplexType()) { |
| 4251 | // Next subobject is a complex number. |
| 4252 | uint64_t Index = Sub.Entries[I].getAsArrayIndex(); |
| 4253 | if (Index > 1) { |
| 4254 | if (Info.getLangOpts().CPlusPlus11) |
| 4255 | Info.FFDiag(E, DiagId: diag::note_constexpr_access_past_end) |
| 4256 | << handler.AccessKind; |
| 4257 | else |
| 4258 | Info.FFDiag(E); |
| 4259 | return handler.failed(); |
| 4260 | } |
| 4261 | |
| 4262 | ObjType = getSubobjectType( |
| 4263 | ObjType, SubobjType: ObjType->castAs<ComplexType>()->getElementType()); |
| 4264 | |
| 4265 | assert(I == N - 1 && "extracting subobject of scalar?" ); |
| 4266 | if (O->isComplexInt()) { |
| 4267 | return handler.found(Index ? O->getComplexIntImag() |
| 4268 | : O->getComplexIntReal(), ObjType); |
| 4269 | } else { |
| 4270 | assert(O->isComplexFloat()); |
| 4271 | return handler.found(Index ? O->getComplexFloatImag() |
| 4272 | : O->getComplexFloatReal(), ObjType); |
| 4273 | } |
| 4274 | } else if (const auto *VT = ObjType->getAs<VectorType>()) { |
| 4275 | uint64_t Index = Sub.Entries[I].getAsArrayIndex(); |
| 4276 | unsigned NumElements = VT->getNumElements(); |
| 4277 | if (Index == NumElements) { |
| 4278 | if (Info.getLangOpts().CPlusPlus11) |
| 4279 | Info.FFDiag(E, DiagId: diag::note_constexpr_access_past_end) |
| 4280 | << handler.AccessKind; |
| 4281 | else |
| 4282 | Info.FFDiag(E); |
| 4283 | return handler.failed(); |
| 4284 | } |
| 4285 | |
| 4286 | if (Index > NumElements) { |
| 4287 | Info.CCEDiag(E, DiagId: diag::note_constexpr_array_index) |
| 4288 | << Index << /*array*/ 0 << NumElements; |
| 4289 | return handler.failed(); |
| 4290 | } |
| 4291 | |
| 4292 | ObjType = VT->getElementType(); |
| 4293 | assert(I == N - 1 && "extracting subobject of scalar?" ); |
| 4294 | return handler.found(O->getVectorElt(I: Index), ObjType); |
| 4295 | } else if (const FieldDecl *Field = getAsField(E: Sub.Entries[I])) { |
| 4296 | if (Field->isMutable() && |
| 4297 | !Obj.mayAccessMutableMembers(Info, AK: handler.AccessKind)) { |
| 4298 | Info.FFDiag(E, DiagId: diag::note_constexpr_access_mutable, ExtraNotes: 1) |
| 4299 | << handler.AccessKind << Field; |
| 4300 | Info.Note(Loc: Field->getLocation(), DiagId: diag::note_declared_at); |
| 4301 | return handler.failed(); |
| 4302 | } |
| 4303 | |
| 4304 | // Next subobject is a class, struct or union field. |
| 4305 | RecordDecl *RD = ObjType->castAsCanonical<RecordType>()->getDecl(); |
| 4306 | if (RD->isUnion()) { |
| 4307 | const FieldDecl *UnionField = O->getUnionField(); |
| 4308 | if (!UnionField || |
| 4309 | UnionField->getCanonicalDecl() != Field->getCanonicalDecl()) { |
| 4310 | if (I == N - 1 && handler.AccessKind == AK_Construct) { |
| 4311 | // Placement new onto an inactive union member makes it active. |
| 4312 | O->setUnion(Field, Value: APValue()); |
| 4313 | } else { |
| 4314 | // Pointer to/into inactive union member: Not within lifetime |
| 4315 | if (handler.AccessKind == AK_IsWithinLifetime) |
| 4316 | return false; |
| 4317 | // FIXME: If O->getUnionValue() is absent, report that there's no |
| 4318 | // active union member rather than reporting the prior active union |
| 4319 | // member. We'll need to fix nullptr_t to not use APValue() as its |
| 4320 | // representation first. |
| 4321 | Info.FFDiag(E, DiagId: diag::note_constexpr_access_inactive_union_member) |
| 4322 | << handler.AccessKind << Field << !UnionField << UnionField; |
| 4323 | return handler.failed(); |
| 4324 | } |
| 4325 | } |
| 4326 | O = &O->getUnionValue(); |
| 4327 | } else |
| 4328 | O = &O->getStructField(i: Field->getFieldIndex()); |
| 4329 | |
| 4330 | ObjType = getSubobjectType(ObjType, SubobjType: Field->getType(), IsMutable: Field->isMutable()); |
| 4331 | LastField = Field; |
| 4332 | if (Field->getType().isVolatileQualified()) |
| 4333 | VolatileField = Field; |
| 4334 | } else { |
| 4335 | // Next subobject is a base class. |
| 4336 | const CXXRecordDecl *Derived = ObjType->getAsCXXRecordDecl(); |
| 4337 | const CXXRecordDecl *Base = getAsBaseClass(E: Sub.Entries[I]); |
| 4338 | O = &O->getStructBase(i: getBaseIndex(Derived, Base)); |
| 4339 | |
| 4340 | ObjType = getSubobjectType(ObjType, SubobjType: Info.Ctx.getCanonicalTagType(TD: Base)); |
| 4341 | } |
| 4342 | } |
| 4343 | } |
| 4344 | |
| 4345 | namespace { |
| 4346 | struct ExtractSubobjectHandler { |
| 4347 | EvalInfo &Info; |
| 4348 | const Expr *E; |
| 4349 | APValue &Result; |
| 4350 | const AccessKinds AccessKind; |
| 4351 | |
| 4352 | typedef bool result_type; |
| 4353 | bool failed() { return false; } |
| 4354 | bool found(APValue &Subobj, QualType SubobjType) { |
| 4355 | Result = Subobj; |
| 4356 | if (AccessKind == AK_ReadObjectRepresentation) |
| 4357 | return true; |
| 4358 | return CheckFullyInitialized(Info, DiagLoc: E->getExprLoc(), Type: SubobjType, Value: Result); |
| 4359 | } |
| 4360 | bool found(APSInt &Value, QualType SubobjType) { |
| 4361 | Result = APValue(Value); |
| 4362 | return true; |
| 4363 | } |
| 4364 | bool found(APFloat &Value, QualType SubobjType) { |
| 4365 | Result = APValue(Value); |
| 4366 | return true; |
| 4367 | } |
| 4368 | }; |
| 4369 | } // end anonymous namespace |
| 4370 | |
| 4371 | /// Extract the designated sub-object of an rvalue. |
| 4372 | static bool (EvalInfo &Info, const Expr *E, |
| 4373 | const CompleteObject &Obj, |
| 4374 | const SubobjectDesignator &Sub, APValue &Result, |
| 4375 | AccessKinds AK = AK_Read) { |
| 4376 | assert(AK == AK_Read || AK == AK_ReadObjectRepresentation); |
| 4377 | ExtractSubobjectHandler Handler = {.Info: Info, .E: E, .Result: Result, .AccessKind: AK}; |
| 4378 | return findSubobject(Info, E, Obj, Sub, handler&: Handler); |
| 4379 | } |
| 4380 | |
| 4381 | namespace { |
| 4382 | struct ModifySubobjectHandler { |
| 4383 | EvalInfo &Info; |
| 4384 | APValue &NewVal; |
| 4385 | const Expr *E; |
| 4386 | |
| 4387 | typedef bool result_type; |
| 4388 | static const AccessKinds AccessKind = AK_Assign; |
| 4389 | |
| 4390 | bool checkConst(QualType QT) { |
| 4391 | // Assigning to a const object has undefined behavior. |
| 4392 | if (QT.isConstQualified()) { |
| 4393 | Info.FFDiag(E, DiagId: diag::note_constexpr_modify_const_type) << QT; |
| 4394 | return false; |
| 4395 | } |
| 4396 | return true; |
| 4397 | } |
| 4398 | |
| 4399 | bool failed() { return false; } |
| 4400 | bool found(APValue &Subobj, QualType SubobjType) { |
| 4401 | if (!checkConst(QT: SubobjType)) |
| 4402 | return false; |
| 4403 | // We've been given ownership of NewVal, so just swap it in. |
| 4404 | Subobj.swap(RHS&: NewVal); |
| 4405 | return true; |
| 4406 | } |
| 4407 | bool found(APSInt &Value, QualType SubobjType) { |
| 4408 | if (!checkConst(QT: SubobjType)) |
| 4409 | return false; |
| 4410 | if (!NewVal.isInt()) { |
| 4411 | // Maybe trying to write a cast pointer value into a complex? |
| 4412 | Info.FFDiag(E); |
| 4413 | return false; |
| 4414 | } |
| 4415 | Value = NewVal.getInt(); |
| 4416 | return true; |
| 4417 | } |
| 4418 | bool found(APFloat &Value, QualType SubobjType) { |
| 4419 | if (!checkConst(QT: SubobjType)) |
| 4420 | return false; |
| 4421 | Value = NewVal.getFloat(); |
| 4422 | return true; |
| 4423 | } |
| 4424 | }; |
| 4425 | } // end anonymous namespace |
| 4426 | |
| 4427 | const AccessKinds ModifySubobjectHandler::AccessKind; |
| 4428 | |
| 4429 | /// Update the designated sub-object of an rvalue to the given value. |
| 4430 | static bool modifySubobject(EvalInfo &Info, const Expr *E, |
| 4431 | const CompleteObject &Obj, |
| 4432 | const SubobjectDesignator &Sub, |
| 4433 | APValue &NewVal) { |
| 4434 | ModifySubobjectHandler Handler = { .Info: Info, .NewVal: NewVal, .E: E }; |
| 4435 | return findSubobject(Info, E, Obj, Sub, handler&: Handler); |
| 4436 | } |
| 4437 | |
| 4438 | /// Find the position where two subobject designators diverge, or equivalently |
| 4439 | /// the length of the common initial subsequence. |
| 4440 | static unsigned FindDesignatorMismatch(QualType ObjType, |
| 4441 | const SubobjectDesignator &A, |
| 4442 | const SubobjectDesignator &B, |
| 4443 | bool &WasArrayIndex) { |
| 4444 | unsigned I = 0, N = std::min(a: A.Entries.size(), b: B.Entries.size()); |
| 4445 | for (/**/; I != N; ++I) { |
| 4446 | if (!ObjType.isNull() && |
| 4447 | (ObjType->isArrayType() || ObjType->isAnyComplexType())) { |
| 4448 | // Next subobject is an array element. |
| 4449 | if (A.Entries[I].getAsArrayIndex() != B.Entries[I].getAsArrayIndex()) { |
| 4450 | WasArrayIndex = true; |
| 4451 | return I; |
| 4452 | } |
| 4453 | if (ObjType->isAnyComplexType()) |
| 4454 | ObjType = ObjType->castAs<ComplexType>()->getElementType(); |
| 4455 | else |
| 4456 | ObjType = ObjType->castAsArrayTypeUnsafe()->getElementType(); |
| 4457 | } else { |
| 4458 | if (A.Entries[I].getAsBaseOrMember() != |
| 4459 | B.Entries[I].getAsBaseOrMember()) { |
| 4460 | WasArrayIndex = false; |
| 4461 | return I; |
| 4462 | } |
| 4463 | if (const FieldDecl *FD = getAsField(E: A.Entries[I])) |
| 4464 | // Next subobject is a field. |
| 4465 | ObjType = FD->getType(); |
| 4466 | else |
| 4467 | // Next subobject is a base class. |
| 4468 | ObjType = QualType(); |
| 4469 | } |
| 4470 | } |
| 4471 | WasArrayIndex = false; |
| 4472 | return I; |
| 4473 | } |
| 4474 | |
| 4475 | /// Determine whether the given subobject designators refer to elements of the |
| 4476 | /// same array object. |
| 4477 | static bool AreElementsOfSameArray(QualType ObjType, |
| 4478 | const SubobjectDesignator &A, |
| 4479 | const SubobjectDesignator &B) { |
| 4480 | if (A.Entries.size() != B.Entries.size()) |
| 4481 | return false; |
| 4482 | |
| 4483 | bool IsArray = A.MostDerivedIsArrayElement; |
| 4484 | if (IsArray && A.MostDerivedPathLength != A.Entries.size()) |
| 4485 | // A is a subobject of the array element. |
| 4486 | return false; |
| 4487 | |
| 4488 | // If A (and B) designates an array element, the last entry will be the array |
| 4489 | // index. That doesn't have to match. Otherwise, we're in the 'implicit array |
| 4490 | // of length 1' case, and the entire path must match. |
| 4491 | bool WasArrayIndex; |
| 4492 | unsigned CommonLength = FindDesignatorMismatch(ObjType, A, B, WasArrayIndex); |
| 4493 | return CommonLength >= A.Entries.size() - IsArray; |
| 4494 | } |
| 4495 | |
| 4496 | /// Find the complete object to which an LValue refers. |
| 4497 | static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E, |
| 4498 | AccessKinds AK, const LValue &LVal, |
| 4499 | QualType LValType) { |
| 4500 | if (LVal.InvalidBase) { |
| 4501 | Info.FFDiag(E); |
| 4502 | return CompleteObject(); |
| 4503 | } |
| 4504 | |
| 4505 | if (!LVal.Base) { |
| 4506 | if (AK == AccessKinds::AK_Dereference) |
| 4507 | Info.FFDiag(E, DiagId: diag::note_constexpr_dereferencing_null); |
| 4508 | else |
| 4509 | Info.FFDiag(E, DiagId: diag::note_constexpr_access_null) << AK; |
| 4510 | return CompleteObject(); |
| 4511 | } |
| 4512 | |
| 4513 | CallStackFrame *Frame = nullptr; |
| 4514 | unsigned Depth = 0; |
| 4515 | if (LVal.getLValueCallIndex()) { |
| 4516 | std::tie(args&: Frame, args&: Depth) = |
| 4517 | Info.getCallFrameAndDepth(CallIndex: LVal.getLValueCallIndex()); |
| 4518 | if (!Frame) { |
| 4519 | Info.FFDiag(E, DiagId: diag::note_constexpr_access_uninit, ExtraNotes: 1) |
| 4520 | << AK << /*Indeterminate=*/false << E->getSourceRange(); |
| 4521 | NoteLValueLocation(Info, Base: LVal.Base); |
| 4522 | return CompleteObject(); |
| 4523 | } |
| 4524 | } |
| 4525 | |
| 4526 | bool IsAccess = isAnyAccess(AK); |
| 4527 | |
| 4528 | // C++11 DR1311: An lvalue-to-rvalue conversion on a volatile-qualified type |
| 4529 | // is not a constant expression (even if the object is non-volatile). We also |
| 4530 | // apply this rule to C++98, in order to conform to the expected 'volatile' |
| 4531 | // semantics. |
| 4532 | if (isFormalAccess(AK) && LValType.isVolatileQualified()) { |
| 4533 | if (Info.getLangOpts().CPlusPlus) |
| 4534 | Info.FFDiag(E, DiagId: diag::note_constexpr_access_volatile_type) |
| 4535 | << AK << LValType; |
| 4536 | else |
| 4537 | Info.FFDiag(E); |
| 4538 | return CompleteObject(); |
| 4539 | } |
| 4540 | |
| 4541 | // Compute value storage location and type of base object. |
| 4542 | APValue *BaseVal = nullptr; |
| 4543 | QualType BaseType = getType(B: LVal.Base); |
| 4544 | |
| 4545 | if (Info.getLangOpts().CPlusPlus14 && LVal.Base == Info.EvaluatingDecl && |
| 4546 | lifetimeStartedInEvaluation(Info, Base: LVal.Base)) { |
| 4547 | // This is the object whose initializer we're evaluating, so its lifetime |
| 4548 | // started in the current evaluation. |
| 4549 | BaseVal = Info.EvaluatingDeclValue; |
| 4550 | } else if (const ValueDecl *D = LVal.Base.dyn_cast<const ValueDecl *>()) { |
| 4551 | // Allow reading from a GUID declaration. |
| 4552 | if (auto *GD = dyn_cast<MSGuidDecl>(Val: D)) { |
| 4553 | if (isModification(AK)) { |
| 4554 | // All the remaining cases do not permit modification of the object. |
| 4555 | Info.FFDiag(E, DiagId: diag::note_constexpr_modify_global); |
| 4556 | return CompleteObject(); |
| 4557 | } |
| 4558 | APValue &V = GD->getAsAPValue(); |
| 4559 | if (V.isAbsent()) { |
| 4560 | Info.FFDiag(E, DiagId: diag::note_constexpr_unsupported_layout) |
| 4561 | << GD->getType(); |
| 4562 | return CompleteObject(); |
| 4563 | } |
| 4564 | return CompleteObject(LVal.Base, &V, GD->getType()); |
| 4565 | } |
| 4566 | |
| 4567 | // Allow reading the APValue from an UnnamedGlobalConstantDecl. |
| 4568 | if (auto *GCD = dyn_cast<UnnamedGlobalConstantDecl>(Val: D)) { |
| 4569 | if (isModification(AK)) { |
| 4570 | Info.FFDiag(E, DiagId: diag::note_constexpr_modify_global); |
| 4571 | return CompleteObject(); |
| 4572 | } |
| 4573 | return CompleteObject(LVal.Base, const_cast<APValue *>(&GCD->getValue()), |
| 4574 | GCD->getType()); |
| 4575 | } |
| 4576 | |
| 4577 | // Allow reading from template parameter objects. |
| 4578 | if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(Val: D)) { |
| 4579 | if (isModification(AK)) { |
| 4580 | Info.FFDiag(E, DiagId: diag::note_constexpr_modify_global); |
| 4581 | return CompleteObject(); |
| 4582 | } |
| 4583 | return CompleteObject(LVal.Base, const_cast<APValue *>(&TPO->getValue()), |
| 4584 | TPO->getType()); |
| 4585 | } |
| 4586 | |
| 4587 | // In C++98, const, non-volatile integers initialized with ICEs are ICEs. |
| 4588 | // In C++11, constexpr, non-volatile variables initialized with constant |
| 4589 | // expressions are constant expressions too. Inside constexpr functions, |
| 4590 | // parameters are constant expressions even if they're non-const. |
| 4591 | // In C++1y, objects local to a constant expression (those with a Frame) are |
| 4592 | // both readable and writable inside constant expressions. |
| 4593 | // In C, such things can also be folded, although they are not ICEs. |
| 4594 | const VarDecl *VD = dyn_cast<VarDecl>(Val: D); |
| 4595 | if (VD) { |
| 4596 | if (const VarDecl *VDef = VD->getDefinition(C&: Info.Ctx)) |
| 4597 | VD = VDef; |
| 4598 | } |
| 4599 | if (!VD || VD->isInvalidDecl()) { |
| 4600 | Info.FFDiag(E); |
| 4601 | return CompleteObject(); |
| 4602 | } |
| 4603 | |
| 4604 | bool IsConstant = BaseType.isConstant(Ctx: Info.Ctx); |
| 4605 | bool ConstexprVar = false; |
| 4606 | if (const auto *VD = dyn_cast_if_present<VarDecl>( |
| 4607 | Val: Info.EvaluatingDecl.dyn_cast<const ValueDecl *>())) |
| 4608 | ConstexprVar = VD->isConstexpr(); |
| 4609 | |
| 4610 | // Unless we're looking at a local variable or argument in a constexpr call, |
| 4611 | // the variable we're reading must be const (unless we are binding to a |
| 4612 | // reference). |
| 4613 | if (AK != clang::AK_Dereference && !Frame) { |
| 4614 | if (IsAccess && isa<ParmVarDecl>(Val: VD)) { |
| 4615 | // Access of a parameter that's not associated with a frame isn't going |
| 4616 | // to work out, but we can leave it to evaluateVarDeclInit to provide a |
| 4617 | // suitable diagnostic. |
| 4618 | } else if (Info.getLangOpts().CPlusPlus14 && |
| 4619 | lifetimeStartedInEvaluation(Info, Base: LVal.Base)) { |
| 4620 | // OK, we can read and modify an object if we're in the process of |
| 4621 | // evaluating its initializer, because its lifetime began in this |
| 4622 | // evaluation. |
| 4623 | } else if (isModification(AK)) { |
| 4624 | // All the remaining cases do not permit modification of the object. |
| 4625 | Info.FFDiag(E, DiagId: diag::note_constexpr_modify_global); |
| 4626 | return CompleteObject(); |
| 4627 | } else if (VD->isConstexpr()) { |
| 4628 | // OK, we can read this variable. |
| 4629 | } else if (Info.getLangOpts().C23 && ConstexprVar) { |
| 4630 | Info.FFDiag(E); |
| 4631 | return CompleteObject(); |
| 4632 | } else if (BaseType->isIntegralOrEnumerationType()) { |
| 4633 | if (!IsConstant) { |
| 4634 | if (!IsAccess) |
| 4635 | return CompleteObject(LVal.getLValueBase(), nullptr, BaseType); |
| 4636 | if (Info.getLangOpts().CPlusPlus) { |
| 4637 | Info.FFDiag(E, DiagId: diag::note_constexpr_ltor_non_const_int, ExtraNotes: 1) << VD; |
| 4638 | Info.Note(Loc: VD->getLocation(), DiagId: diag::note_declared_at); |
| 4639 | } else { |
| 4640 | Info.FFDiag(E); |
| 4641 | } |
| 4642 | return CompleteObject(); |
| 4643 | } |
| 4644 | } else if (!IsAccess) { |
| 4645 | return CompleteObject(LVal.getLValueBase(), nullptr, BaseType); |
| 4646 | } else if ((IsConstant || BaseType->isReferenceType()) && |
| 4647 | Info.checkingPotentialConstantExpression() && |
| 4648 | BaseType->isLiteralType(Ctx: Info.Ctx) && !VD->hasDefinition()) { |
| 4649 | // This variable might end up being constexpr. Don't diagnose it yet. |
| 4650 | } else if (IsConstant) { |
| 4651 | // Keep evaluating to see what we can do. In particular, we support |
| 4652 | // folding of const floating-point types, in order to make static const |
| 4653 | // data members of such types (supported as an extension) more useful. |
| 4654 | if (Info.getLangOpts().CPlusPlus) { |
| 4655 | Info.CCEDiag(E, DiagId: Info.getLangOpts().CPlusPlus11 |
| 4656 | ? diag::note_constexpr_ltor_non_constexpr |
| 4657 | : diag::note_constexpr_ltor_non_integral, ExtraNotes: 1) |
| 4658 | << VD << BaseType; |
| 4659 | Info.Note(Loc: VD->getLocation(), DiagId: diag::note_declared_at); |
| 4660 | } else { |
| 4661 | Info.CCEDiag(E); |
| 4662 | } |
| 4663 | } else { |
| 4664 | // Never allow reading a non-const value. |
| 4665 | if (Info.getLangOpts().CPlusPlus) { |
| 4666 | Info.FFDiag(E, DiagId: Info.getLangOpts().CPlusPlus11 |
| 4667 | ? diag::note_constexpr_ltor_non_constexpr |
| 4668 | : diag::note_constexpr_ltor_non_integral, ExtraNotes: 1) |
| 4669 | << VD << BaseType; |
| 4670 | Info.Note(Loc: VD->getLocation(), DiagId: diag::note_declared_at); |
| 4671 | } else { |
| 4672 | Info.FFDiag(E); |
| 4673 | } |
| 4674 | return CompleteObject(); |
| 4675 | } |
| 4676 | } |
| 4677 | |
| 4678 | // When binding to a reference, the variable does not need to be constexpr |
| 4679 | // or have constant initalization. |
| 4680 | if (AK != clang::AK_Dereference && |
| 4681 | !evaluateVarDeclInit(Info, E, VD, Frame, Version: LVal.getLValueVersion(), |
| 4682 | Result&: BaseVal)) |
| 4683 | return CompleteObject(); |
| 4684 | // If evaluateVarDeclInit sees a constexpr-unknown variable, it returns |
| 4685 | // a null BaseVal. Any constexpr-unknown variable seen here is an error: |
| 4686 | // we can't access a constexpr-unknown object. |
| 4687 | if (AK != clang::AK_Dereference && !BaseVal) { |
| 4688 | if (!Info.checkingPotentialConstantExpression()) { |
| 4689 | Info.FFDiag(E, DiagId: diag::note_constexpr_access_unknown_variable, ExtraNotes: 1) |
| 4690 | << AK << VD; |
| 4691 | Info.Note(Loc: VD->getLocation(), DiagId: diag::note_declared_at); |
| 4692 | } |
| 4693 | return CompleteObject(); |
| 4694 | } |
| 4695 | } else if (DynamicAllocLValue DA = LVal.Base.dyn_cast<DynamicAllocLValue>()) { |
| 4696 | std::optional<DynAlloc *> Alloc = Info.lookupDynamicAlloc(DA); |
| 4697 | if (!Alloc) { |
| 4698 | Info.FFDiag(E, DiagId: diag::note_constexpr_access_deleted_object) << AK; |
| 4699 | return CompleteObject(); |
| 4700 | } |
| 4701 | return CompleteObject(LVal.Base, &(*Alloc)->Value, |
| 4702 | LVal.Base.getDynamicAllocType()); |
| 4703 | } |
| 4704 | // When binding to a reference, the variable does not need to be |
| 4705 | // within its lifetime. |
| 4706 | else if (AK != clang::AK_Dereference) { |
| 4707 | const Expr *Base = LVal.Base.dyn_cast<const Expr*>(); |
| 4708 | |
| 4709 | if (!Frame) { |
| 4710 | if (const MaterializeTemporaryExpr *MTE = |
| 4711 | dyn_cast_or_null<MaterializeTemporaryExpr>(Val: Base)) { |
| 4712 | assert(MTE->getStorageDuration() == SD_Static && |
| 4713 | "should have a frame for a non-global materialized temporary" ); |
| 4714 | |
| 4715 | // C++20 [expr.const]p4: [DR2126] |
| 4716 | // An object or reference is usable in constant expressions if it is |
| 4717 | // - a temporary object of non-volatile const-qualified literal type |
| 4718 | // whose lifetime is extended to that of a variable that is usable |
| 4719 | // in constant expressions |
| 4720 | // |
| 4721 | // C++20 [expr.const]p5: |
| 4722 | // an lvalue-to-rvalue conversion [is not allowed unless it applies to] |
| 4723 | // - a non-volatile glvalue that refers to an object that is usable |
| 4724 | // in constant expressions, or |
| 4725 | // - a non-volatile glvalue of literal type that refers to a |
| 4726 | // non-volatile object whose lifetime began within the evaluation |
| 4727 | // of E; |
| 4728 | // |
| 4729 | // C++11 misses the 'began within the evaluation of e' check and |
| 4730 | // instead allows all temporaries, including things like: |
| 4731 | // int &&r = 1; |
| 4732 | // int x = ++r; |
| 4733 | // constexpr int k = r; |
| 4734 | // Therefore we use the C++14-onwards rules in C++11 too. |
| 4735 | // |
| 4736 | // Note that temporaries whose lifetimes began while evaluating a |
| 4737 | // variable's constructor are not usable while evaluating the |
| 4738 | // corresponding destructor, not even if they're of const-qualified |
| 4739 | // types. |
| 4740 | if (!MTE->isUsableInConstantExpressions(Context: Info.Ctx) && |
| 4741 | !lifetimeStartedInEvaluation(Info, Base: LVal.Base)) { |
| 4742 | if (!IsAccess) |
| 4743 | return CompleteObject(LVal.getLValueBase(), nullptr, BaseType); |
| 4744 | Info.FFDiag(E, DiagId: diag::note_constexpr_access_static_temporary, ExtraNotes: 1) << AK; |
| 4745 | Info.Note(Loc: MTE->getExprLoc(), DiagId: diag::note_constexpr_temporary_here); |
| 4746 | return CompleteObject(); |
| 4747 | } |
| 4748 | |
| 4749 | BaseVal = MTE->getOrCreateValue(MayCreate: false); |
| 4750 | assert(BaseVal && "got reference to unevaluated temporary" ); |
| 4751 | } else if (const CompoundLiteralExpr *CLE = |
| 4752 | dyn_cast_or_null<CompoundLiteralExpr>(Val: Base)) { |
| 4753 | // According to GCC info page: |
| 4754 | // |
| 4755 | // 6.28 Compound Literals |
| 4756 | // |
| 4757 | // As an optimization, G++ sometimes gives array compound literals |
| 4758 | // longer lifetimes: when the array either appears outside a function or |
| 4759 | // has a const-qualified type. If foo and its initializer had elements |
| 4760 | // of type char *const rather than char *, or if foo were a global |
| 4761 | // variable, the array would have static storage duration. But it is |
| 4762 | // probably safest just to avoid the use of array compound literals in |
| 4763 | // C++ code. |
| 4764 | // |
| 4765 | // Obey that rule by checking constness for converted array types. |
| 4766 | if (QualType CLETy = CLE->getType(); CLETy->isArrayType() && |
| 4767 | !LValType->isArrayType() && |
| 4768 | !CLETy.isConstant(Ctx: Info.Ctx)) { |
| 4769 | Info.FFDiag(E); |
| 4770 | Info.Note(Loc: CLE->getExprLoc(), DiagId: diag::note_declared_at); |
| 4771 | return CompleteObject(); |
| 4772 | } |
| 4773 | |
| 4774 | BaseVal = &CLE->getStaticValue(); |
| 4775 | } else { |
| 4776 | if (!IsAccess) |
| 4777 | return CompleteObject(LVal.getLValueBase(), nullptr, BaseType); |
| 4778 | APValue Val; |
| 4779 | LVal.moveInto(V&: Val); |
| 4780 | Info.FFDiag(E, DiagId: diag::note_constexpr_access_unreadable_object) |
| 4781 | << AK |
| 4782 | << Val.getAsString(Ctx: Info.Ctx, |
| 4783 | Ty: Info.Ctx.getLValueReferenceType(T: LValType)); |
| 4784 | NoteLValueLocation(Info, Base: LVal.Base); |
| 4785 | return CompleteObject(); |
| 4786 | } |
| 4787 | } else if (AK != clang::AK_Dereference) { |
| 4788 | BaseVal = Frame->getTemporary(Key: Base, Version: LVal.Base.getVersion()); |
| 4789 | assert(BaseVal && "missing value for temporary" ); |
| 4790 | } |
| 4791 | } |
| 4792 | |
| 4793 | // In C++14, we can't safely access any mutable state when we might be |
| 4794 | // evaluating after an unmodeled side effect. Parameters are modeled as state |
| 4795 | // in the caller, but aren't visible once the call returns, so they can be |
| 4796 | // modified in a speculatively-evaluated call. |
| 4797 | // |
| 4798 | // FIXME: Not all local state is mutable. Allow local constant subobjects |
| 4799 | // to be read here (but take care with 'mutable' fields). |
| 4800 | unsigned VisibleDepth = Depth; |
| 4801 | if (llvm::isa_and_nonnull<ParmVarDecl>( |
| 4802 | Val: LVal.Base.dyn_cast<const ValueDecl *>())) |
| 4803 | ++VisibleDepth; |
| 4804 | if ((Frame && Info.getLangOpts().CPlusPlus14 && |
| 4805 | Info.EvalStatus.HasSideEffects) || |
| 4806 | (isModification(AK) && VisibleDepth < Info.SpeculativeEvaluationDepth)) |
| 4807 | return CompleteObject(); |
| 4808 | |
| 4809 | return CompleteObject(LVal.getLValueBase(), BaseVal, BaseType); |
| 4810 | } |
| 4811 | |
| 4812 | /// Perform an lvalue-to-rvalue conversion on the given glvalue. This |
| 4813 | /// can also be used for 'lvalue-to-lvalue' conversions for looking up the |
| 4814 | /// glvalue referred to by an entity of reference type. |
| 4815 | /// |
| 4816 | /// \param Info - Information about the ongoing evaluation. |
| 4817 | /// \param Conv - The expression for which we are performing the conversion. |
| 4818 | /// Used for diagnostics. |
| 4819 | /// \param Type - The type of the glvalue (before stripping cv-qualifiers in the |
| 4820 | /// case of a non-class type). |
| 4821 | /// \param LVal - The glvalue on which we are attempting to perform this action. |
| 4822 | /// \param RVal - The produced value will be placed here. |
| 4823 | /// \param WantObjectRepresentation - If true, we're looking for the object |
| 4824 | /// representation rather than the value, and in particular, |
| 4825 | /// there is no requirement that the result be fully initialized. |
| 4826 | static bool |
| 4827 | handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv, QualType Type, |
| 4828 | const LValue &LVal, APValue &RVal, |
| 4829 | bool WantObjectRepresentation = false) { |
| 4830 | if (LVal.Designator.Invalid) |
| 4831 | return false; |
| 4832 | |
| 4833 | // Check for special cases where there is no existing APValue to look at. |
| 4834 | const Expr *Base = LVal.Base.dyn_cast<const Expr*>(); |
| 4835 | |
| 4836 | AccessKinds AK = |
| 4837 | WantObjectRepresentation ? AK_ReadObjectRepresentation : AK_Read; |
| 4838 | |
| 4839 | if (Base && !LVal.getLValueCallIndex() && !Type.isVolatileQualified()) { |
| 4840 | if (isa<StringLiteral>(Val: Base) || isa<PredefinedExpr>(Val: Base)) { |
| 4841 | // Special-case character extraction so we don't have to construct an |
| 4842 | // APValue for the whole string. |
| 4843 | assert(LVal.Designator.Entries.size() <= 1 && |
| 4844 | "Can only read characters from string literals" ); |
| 4845 | if (LVal.Designator.Entries.empty()) { |
| 4846 | // Fail for now for LValue to RValue conversion of an array. |
| 4847 | // (This shouldn't show up in C/C++, but it could be triggered by a |
| 4848 | // weird EvaluateAsRValue call from a tool.) |
| 4849 | Info.FFDiag(E: Conv); |
| 4850 | return false; |
| 4851 | } |
| 4852 | if (LVal.Designator.isOnePastTheEnd()) { |
| 4853 | if (Info.getLangOpts().CPlusPlus11) |
| 4854 | Info.FFDiag(E: Conv, DiagId: diag::note_constexpr_access_past_end) << AK; |
| 4855 | else |
| 4856 | Info.FFDiag(E: Conv); |
| 4857 | return false; |
| 4858 | } |
| 4859 | uint64_t CharIndex = LVal.Designator.Entries[0].getAsArrayIndex(); |
| 4860 | RVal = APValue(extractStringLiteralCharacter(Info, Lit: Base, Index: CharIndex)); |
| 4861 | return true; |
| 4862 | } |
| 4863 | } |
| 4864 | |
| 4865 | CompleteObject Obj = findCompleteObject(Info, E: Conv, AK, LVal, LValType: Type); |
| 4866 | return Obj && extractSubobject(Info, E: Conv, Obj, Sub: LVal.Designator, Result&: RVal, AK); |
| 4867 | } |
| 4868 | |
| 4869 | static bool hlslElementwiseCastHelper(EvalInfo &Info, const Expr *E, |
| 4870 | QualType DestTy, |
| 4871 | SmallVectorImpl<APValue> &SrcVals, |
| 4872 | SmallVectorImpl<QualType> &SrcTypes) { |
| 4873 | APValue Val; |
| 4874 | if (!Evaluate(Result&: Val, Info, E)) |
| 4875 | return false; |
| 4876 | |
| 4877 | // must be dealing with a record |
| 4878 | if (Val.isLValue()) { |
| 4879 | LValue LVal; |
| 4880 | LVal.setFrom(Ctx: Info.Ctx, V: Val); |
| 4881 | if (!handleLValueToRValueConversion(Info, Conv: E, Type: E->getType(), LVal, RVal&: Val)) |
| 4882 | return false; |
| 4883 | } |
| 4884 | |
| 4885 | unsigned NEls = elementwiseSize(Info, BaseTy: DestTy); |
| 4886 | // flatten the source |
| 4887 | if (!flattenAPValue(Info, E, Value: Val, BaseTy: E->getType(), Elements&: SrcVals, Types&: SrcTypes, Size: NEls)) |
| 4888 | return false; |
| 4889 | |
| 4890 | return true; |
| 4891 | } |
| 4892 | |
| 4893 | /// Perform an assignment of Val to LVal. Takes ownership of Val. |
| 4894 | static bool handleAssignment(EvalInfo &Info, const Expr *E, const LValue &LVal, |
| 4895 | QualType LValType, APValue &Val) { |
| 4896 | if (LVal.Designator.Invalid) |
| 4897 | return false; |
| 4898 | |
| 4899 | if (!Info.getLangOpts().CPlusPlus14) { |
| 4900 | Info.FFDiag(E); |
| 4901 | return false; |
| 4902 | } |
| 4903 | |
| 4904 | CompleteObject Obj = findCompleteObject(Info, E, AK: AK_Assign, LVal, LValType); |
| 4905 | return Obj && modifySubobject(Info, E, Obj, Sub: LVal.Designator, NewVal&: Val); |
| 4906 | } |
| 4907 | |
| 4908 | namespace { |
| 4909 | struct CompoundAssignSubobjectHandler { |
| 4910 | EvalInfo &Info; |
| 4911 | const CompoundAssignOperator *E; |
| 4912 | QualType PromotedLHSType; |
| 4913 | BinaryOperatorKind Opcode; |
| 4914 | const APValue &RHS; |
| 4915 | |
| 4916 | static const AccessKinds AccessKind = AK_Assign; |
| 4917 | |
| 4918 | typedef bool result_type; |
| 4919 | |
| 4920 | bool checkConst(QualType QT) { |
| 4921 | // Assigning to a const object has undefined behavior. |
| 4922 | if (QT.isConstQualified()) { |
| 4923 | Info.FFDiag(E, DiagId: diag::note_constexpr_modify_const_type) << QT; |
| 4924 | return false; |
| 4925 | } |
| 4926 | return true; |
| 4927 | } |
| 4928 | |
| 4929 | bool failed() { return false; } |
| 4930 | bool found(APValue &Subobj, QualType SubobjType) { |
| 4931 | switch (Subobj.getKind()) { |
| 4932 | case APValue::Int: |
| 4933 | return found(Value&: Subobj.getInt(), SubobjType); |
| 4934 | case APValue::Float: |
| 4935 | return found(Value&: Subobj.getFloat(), SubobjType); |
| 4936 | case APValue::ComplexInt: |
| 4937 | case APValue::ComplexFloat: |
| 4938 | // FIXME: Implement complex compound assignment. |
| 4939 | Info.FFDiag(E); |
| 4940 | return false; |
| 4941 | case APValue::LValue: |
| 4942 | return foundPointer(Subobj, SubobjType); |
| 4943 | case APValue::Vector: |
| 4944 | return foundVector(Value&: Subobj, SubobjType); |
| 4945 | case APValue::Indeterminate: |
| 4946 | Info.FFDiag(E, DiagId: diag::note_constexpr_access_uninit) |
| 4947 | << /*read of=*/0 << /*uninitialized object=*/1 |
| 4948 | << E->getLHS()->getSourceRange(); |
| 4949 | return false; |
| 4950 | default: |
| 4951 | // FIXME: can this happen? |
| 4952 | Info.FFDiag(E); |
| 4953 | return false; |
| 4954 | } |
| 4955 | } |
| 4956 | |
| 4957 | bool foundVector(APValue &Value, QualType SubobjType) { |
| 4958 | if (!checkConst(QT: SubobjType)) |
| 4959 | return false; |
| 4960 | |
| 4961 | if (!SubobjType->isVectorType()) { |
| 4962 | Info.FFDiag(E); |
| 4963 | return false; |
| 4964 | } |
| 4965 | return handleVectorVectorBinOp(Info, E, Opcode, LHSValue&: Value, RHSValue: RHS); |
| 4966 | } |
| 4967 | |
| 4968 | bool found(APSInt &Value, QualType SubobjType) { |
| 4969 | if (!checkConst(QT: SubobjType)) |
| 4970 | return false; |
| 4971 | |
| 4972 | if (!SubobjType->isIntegerType()) { |
| 4973 | // We don't support compound assignment on integer-cast-to-pointer |
| 4974 | // values. |
| 4975 | Info.FFDiag(E); |
| 4976 | return false; |
| 4977 | } |
| 4978 | |
| 4979 | if (RHS.isInt()) { |
| 4980 | APSInt LHS = |
| 4981 | HandleIntToIntCast(Info, E, DestType: PromotedLHSType, SrcType: SubobjType, Value); |
| 4982 | if (!handleIntIntBinOp(Info, E, LHS, Opcode, RHS: RHS.getInt(), Result&: LHS)) |
| 4983 | return false; |
| 4984 | Value = HandleIntToIntCast(Info, E, DestType: SubobjType, SrcType: PromotedLHSType, Value: LHS); |
| 4985 | return true; |
| 4986 | } else if (RHS.isFloat()) { |
| 4987 | const FPOptions FPO = E->getFPFeaturesInEffect( |
| 4988 | LO: Info.Ctx.getLangOpts()); |
| 4989 | APFloat FValue(0.0); |
| 4990 | return HandleIntToFloatCast(Info, E, FPO, SrcType: SubobjType, Value, |
| 4991 | DestType: PromotedLHSType, Result&: FValue) && |
| 4992 | handleFloatFloatBinOp(Info, E, LHS&: FValue, Opcode, RHS: RHS.getFloat()) && |
| 4993 | HandleFloatToIntCast(Info, E, SrcType: PromotedLHSType, Value: FValue, DestType: SubobjType, |
| 4994 | Result&: Value); |
| 4995 | } |
| 4996 | |
| 4997 | Info.FFDiag(E); |
| 4998 | return false; |
| 4999 | } |
| 5000 | bool found(APFloat &Value, QualType SubobjType) { |
| 5001 | return checkConst(QT: SubobjType) && |
| 5002 | HandleFloatToFloatCast(Info, E, SrcType: SubobjType, DestType: PromotedLHSType, |
| 5003 | Result&: Value) && |
| 5004 | handleFloatFloatBinOp(Info, E, LHS&: Value, Opcode, RHS: RHS.getFloat()) && |
| 5005 | HandleFloatToFloatCast(Info, E, SrcType: PromotedLHSType, DestType: SubobjType, Result&: Value); |
| 5006 | } |
| 5007 | bool foundPointer(APValue &Subobj, QualType SubobjType) { |
| 5008 | if (!checkConst(QT: SubobjType)) |
| 5009 | return false; |
| 5010 | |
| 5011 | QualType PointeeType; |
| 5012 | if (const PointerType *PT = SubobjType->getAs<PointerType>()) |
| 5013 | PointeeType = PT->getPointeeType(); |
| 5014 | |
| 5015 | if (PointeeType.isNull() || !RHS.isInt() || |
| 5016 | (Opcode != BO_Add && Opcode != BO_Sub)) { |
| 5017 | Info.FFDiag(E); |
| 5018 | return false; |
| 5019 | } |
| 5020 | |
| 5021 | APSInt Offset = RHS.getInt(); |
| 5022 | if (Opcode == BO_Sub) |
| 5023 | negateAsSigned(Int&: Offset); |
| 5024 | |
| 5025 | LValue LVal; |
| 5026 | LVal.setFrom(Ctx: Info.Ctx, V: Subobj); |
| 5027 | if (!HandleLValueArrayAdjustment(Info, E, LVal, EltTy: PointeeType, Adjustment: Offset)) |
| 5028 | return false; |
| 5029 | LVal.moveInto(V&: Subobj); |
| 5030 | return true; |
| 5031 | } |
| 5032 | }; |
| 5033 | } // end anonymous namespace |
| 5034 | |
| 5035 | const AccessKinds CompoundAssignSubobjectHandler::AccessKind; |
| 5036 | |
| 5037 | /// Perform a compound assignment of LVal <op>= RVal. |
| 5038 | static bool handleCompoundAssignment(EvalInfo &Info, |
| 5039 | const CompoundAssignOperator *E, |
| 5040 | const LValue &LVal, QualType LValType, |
| 5041 | QualType PromotedLValType, |
| 5042 | BinaryOperatorKind Opcode, |
| 5043 | const APValue &RVal) { |
| 5044 | if (LVal.Designator.Invalid) |
| 5045 | return false; |
| 5046 | |
| 5047 | if (!Info.getLangOpts().CPlusPlus14) { |
| 5048 | Info.FFDiag(E); |
| 5049 | return false; |
| 5050 | } |
| 5051 | |
| 5052 | CompleteObject Obj = findCompleteObject(Info, E, AK: AK_Assign, LVal, LValType); |
| 5053 | CompoundAssignSubobjectHandler Handler = { .Info: Info, .E: E, .PromotedLHSType: PromotedLValType, .Opcode: Opcode, |
| 5054 | .RHS: RVal }; |
| 5055 | return Obj && findSubobject(Info, E, Obj, Sub: LVal.Designator, handler&: Handler); |
| 5056 | } |
| 5057 | |
| 5058 | namespace { |
| 5059 | struct IncDecSubobjectHandler { |
| 5060 | EvalInfo &Info; |
| 5061 | const UnaryOperator *E; |
| 5062 | AccessKinds AccessKind; |
| 5063 | APValue *Old; |
| 5064 | |
| 5065 | typedef bool result_type; |
| 5066 | |
| 5067 | bool checkConst(QualType QT) { |
| 5068 | // Assigning to a const object has undefined behavior. |
| 5069 | if (QT.isConstQualified()) { |
| 5070 | Info.FFDiag(E, DiagId: diag::note_constexpr_modify_const_type) << QT; |
| 5071 | return false; |
| 5072 | } |
| 5073 | return true; |
| 5074 | } |
| 5075 | |
| 5076 | bool failed() { return false; } |
| 5077 | bool found(APValue &Subobj, QualType SubobjType) { |
| 5078 | // Stash the old value. Also clear Old, so we don't clobber it later |
| 5079 | // if we're post-incrementing a complex. |
| 5080 | if (Old) { |
| 5081 | *Old = Subobj; |
| 5082 | Old = nullptr; |
| 5083 | } |
| 5084 | |
| 5085 | switch (Subobj.getKind()) { |
| 5086 | case APValue::Int: |
| 5087 | return found(Value&: Subobj.getInt(), SubobjType); |
| 5088 | case APValue::Float: |
| 5089 | return found(Value&: Subobj.getFloat(), SubobjType); |
| 5090 | case APValue::ComplexInt: |
| 5091 | return found(Value&: Subobj.getComplexIntReal(), |
| 5092 | SubobjType: SubobjType->castAs<ComplexType>()->getElementType() |
| 5093 | .withCVRQualifiers(CVR: SubobjType.getCVRQualifiers())); |
| 5094 | case APValue::ComplexFloat: |
| 5095 | return found(Value&: Subobj.getComplexFloatReal(), |
| 5096 | SubobjType: SubobjType->castAs<ComplexType>()->getElementType() |
| 5097 | .withCVRQualifiers(CVR: SubobjType.getCVRQualifiers())); |
| 5098 | case APValue::LValue: |
| 5099 | return foundPointer(Subobj, SubobjType); |
| 5100 | default: |
| 5101 | // FIXME: can this happen? |
| 5102 | Info.FFDiag(E); |
| 5103 | return false; |
| 5104 | } |
| 5105 | } |
| 5106 | bool found(APSInt &Value, QualType SubobjType) { |
| 5107 | if (!checkConst(QT: SubobjType)) |
| 5108 | return false; |
| 5109 | |
| 5110 | if (!SubobjType->isIntegerType()) { |
| 5111 | // We don't support increment / decrement on integer-cast-to-pointer |
| 5112 | // values. |
| 5113 | Info.FFDiag(E); |
| 5114 | return false; |
| 5115 | } |
| 5116 | |
| 5117 | if (Old) *Old = APValue(Value); |
| 5118 | |
| 5119 | // bool arithmetic promotes to int, and the conversion back to bool |
| 5120 | // doesn't reduce mod 2^n, so special-case it. |
| 5121 | if (SubobjType->isBooleanType()) { |
| 5122 | if (AccessKind == AK_Increment) |
| 5123 | Value = 1; |
| 5124 | else |
| 5125 | Value = !Value; |
| 5126 | return true; |
| 5127 | } |
| 5128 | |
| 5129 | bool WasNegative = Value.isNegative(); |
| 5130 | if (AccessKind == AK_Increment) { |
| 5131 | ++Value; |
| 5132 | |
| 5133 | if (!WasNegative && Value.isNegative() && E->canOverflow()) { |
| 5134 | APSInt ActualValue(Value, /*IsUnsigned*/true); |
| 5135 | return HandleOverflow(Info, E, SrcValue: ActualValue, DestType: SubobjType); |
| 5136 | } |
| 5137 | } else { |
| 5138 | --Value; |
| 5139 | |
| 5140 | if (WasNegative && !Value.isNegative() && E->canOverflow()) { |
| 5141 | unsigned BitWidth = Value.getBitWidth(); |
| 5142 | APSInt ActualValue(Value.sext(width: BitWidth + 1), /*IsUnsigned*/false); |
| 5143 | ActualValue.setBit(BitWidth); |
| 5144 | return HandleOverflow(Info, E, SrcValue: ActualValue, DestType: SubobjType); |
| 5145 | } |
| 5146 | } |
| 5147 | return true; |
| 5148 | } |
| 5149 | bool found(APFloat &Value, QualType SubobjType) { |
| 5150 | if (!checkConst(QT: SubobjType)) |
| 5151 | return false; |
| 5152 | |
| 5153 | if (Old) *Old = APValue(Value); |
| 5154 | |
| 5155 | APFloat One(Value.getSemantics(), 1); |
| 5156 | llvm::RoundingMode RM = getActiveRoundingMode(Info, E); |
| 5157 | APFloat::opStatus St; |
| 5158 | if (AccessKind == AK_Increment) |
| 5159 | St = Value.add(RHS: One, RM); |
| 5160 | else |
| 5161 | St = Value.subtract(RHS: One, RM); |
| 5162 | return checkFloatingPointResult(Info, E, St); |
| 5163 | } |
| 5164 | bool foundPointer(APValue &Subobj, QualType SubobjType) { |
| 5165 | if (!checkConst(QT: SubobjType)) |
| 5166 | return false; |
| 5167 | |
| 5168 | QualType PointeeType; |
| 5169 | if (const PointerType *PT = SubobjType->getAs<PointerType>()) |
| 5170 | PointeeType = PT->getPointeeType(); |
| 5171 | else { |
| 5172 | Info.FFDiag(E); |
| 5173 | return false; |
| 5174 | } |
| 5175 | |
| 5176 | LValue LVal; |
| 5177 | LVal.setFrom(Ctx: Info.Ctx, V: Subobj); |
| 5178 | if (!HandleLValueArrayAdjustment(Info, E, LVal, EltTy: PointeeType, |
| 5179 | Adjustment: AccessKind == AK_Increment ? 1 : -1)) |
| 5180 | return false; |
| 5181 | LVal.moveInto(V&: Subobj); |
| 5182 | return true; |
| 5183 | } |
| 5184 | }; |
| 5185 | } // end anonymous namespace |
| 5186 | |
| 5187 | /// Perform an increment or decrement on LVal. |
| 5188 | static bool handleIncDec(EvalInfo &Info, const Expr *E, const LValue &LVal, |
| 5189 | QualType LValType, bool IsIncrement, APValue *Old) { |
| 5190 | if (LVal.Designator.Invalid) |
| 5191 | return false; |
| 5192 | |
| 5193 | if (!Info.getLangOpts().CPlusPlus14) { |
| 5194 | Info.FFDiag(E); |
| 5195 | return false; |
| 5196 | } |
| 5197 | |
| 5198 | AccessKinds AK = IsIncrement ? AK_Increment : AK_Decrement; |
| 5199 | CompleteObject Obj = findCompleteObject(Info, E, AK, LVal, LValType); |
| 5200 | IncDecSubobjectHandler Handler = {.Info: Info, .E: cast<UnaryOperator>(Val: E), .AccessKind: AK, .Old: Old}; |
| 5201 | return Obj && findSubobject(Info, E, Obj, Sub: LVal.Designator, handler&: Handler); |
| 5202 | } |
| 5203 | |
| 5204 | /// Build an lvalue for the object argument of a member function call. |
| 5205 | static bool EvaluateObjectArgument(EvalInfo &Info, const Expr *Object, |
| 5206 | LValue &This) { |
| 5207 | if (Object->getType()->isPointerType() && Object->isPRValue()) |
| 5208 | return EvaluatePointer(E: Object, Result&: This, Info); |
| 5209 | |
| 5210 | if (Object->isGLValue()) |
| 5211 | return EvaluateLValue(E: Object, Result&: This, Info); |
| 5212 | |
| 5213 | if (Object->getType()->isLiteralType(Ctx: Info.Ctx)) |
| 5214 | return EvaluateTemporary(E: Object, Result&: This, Info); |
| 5215 | |
| 5216 | if (Object->getType()->isRecordType() && Object->isPRValue()) |
| 5217 | return EvaluateTemporary(E: Object, Result&: This, Info); |
| 5218 | |
| 5219 | Info.FFDiag(E: Object, DiagId: diag::note_constexpr_nonliteral) << Object->getType(); |
| 5220 | return false; |
| 5221 | } |
| 5222 | |
| 5223 | /// HandleMemberPointerAccess - Evaluate a member access operation and build an |
| 5224 | /// lvalue referring to the result. |
| 5225 | /// |
| 5226 | /// \param Info - Information about the ongoing evaluation. |
| 5227 | /// \param LV - An lvalue referring to the base of the member pointer. |
| 5228 | /// \param RHS - The member pointer expression. |
| 5229 | /// \param IncludeMember - Specifies whether the member itself is included in |
| 5230 | /// the resulting LValue subobject designator. This is not possible when |
| 5231 | /// creating a bound member function. |
| 5232 | /// \return The field or method declaration to which the member pointer refers, |
| 5233 | /// or 0 if evaluation fails. |
| 5234 | static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info, |
| 5235 | QualType LVType, |
| 5236 | LValue &LV, |
| 5237 | const Expr *RHS, |
| 5238 | bool IncludeMember = true) { |
| 5239 | MemberPtr MemPtr; |
| 5240 | if (!EvaluateMemberPointer(E: RHS, Result&: MemPtr, Info)) |
| 5241 | return nullptr; |
| 5242 | |
| 5243 | // C++11 [expr.mptr.oper]p6: If the second operand is the null pointer to |
| 5244 | // member value, the behavior is undefined. |
| 5245 | if (!MemPtr.getDecl()) { |
| 5246 | // FIXME: Specific diagnostic. |
| 5247 | Info.FFDiag(E: RHS); |
| 5248 | return nullptr; |
| 5249 | } |
| 5250 | |
| 5251 | if (MemPtr.isDerivedMember()) { |
| 5252 | // This is a member of some derived class. Truncate LV appropriately. |
| 5253 | // The end of the derived-to-base path for the base object must match the |
| 5254 | // derived-to-base path for the member pointer. |
| 5255 | // C++23 [expr.mptr.oper]p4: |
| 5256 | // If the result of E1 is an object [...] whose most derived object does |
| 5257 | // not contain the member to which E2 refers, the behavior is undefined. |
| 5258 | if (LV.Designator.MostDerivedPathLength + MemPtr.Path.size() > |
| 5259 | LV.Designator.Entries.size()) { |
| 5260 | Info.FFDiag(E: RHS); |
| 5261 | return nullptr; |
| 5262 | } |
| 5263 | unsigned PathLengthToMember = |
| 5264 | LV.Designator.Entries.size() - MemPtr.Path.size(); |
| 5265 | for (unsigned I = 0, N = MemPtr.Path.size(); I != N; ++I) { |
| 5266 | const CXXRecordDecl *LVDecl = getAsBaseClass( |
| 5267 | E: LV.Designator.Entries[PathLengthToMember + I]); |
| 5268 | const CXXRecordDecl *MPDecl = MemPtr.Path[I]; |
| 5269 | if (LVDecl->getCanonicalDecl() != MPDecl->getCanonicalDecl()) { |
| 5270 | Info.FFDiag(E: RHS); |
| 5271 | return nullptr; |
| 5272 | } |
| 5273 | } |
| 5274 | // MemPtr.Path only contains the base classes of the class directly |
| 5275 | // containing the member E2. It is still necessary to check that the class |
| 5276 | // directly containing the member E2 lies on the derived-to-base path of E1 |
| 5277 | // to avoid incorrectly permitting member pointer access into a sibling |
| 5278 | // class of the class containing the member E2. If this class would |
| 5279 | // correspond to the most-derived class of E1, it either isn't contained in |
| 5280 | // LV.Designator.Entries or the corresponding entry refers to an array |
| 5281 | // element instead. Therefore get the most derived class directly in this |
| 5282 | // case. Otherwise the previous entry should correpond to this class. |
| 5283 | const CXXRecordDecl *LastLVDecl = |
| 5284 | (PathLengthToMember > LV.Designator.MostDerivedPathLength) |
| 5285 | ? getAsBaseClass(E: LV.Designator.Entries[PathLengthToMember - 1]) |
| 5286 | : LV.Designator.MostDerivedType->getAsCXXRecordDecl(); |
| 5287 | const CXXRecordDecl *LastMPDecl = MemPtr.getContainingRecord(); |
| 5288 | if (LastLVDecl->getCanonicalDecl() != LastMPDecl->getCanonicalDecl()) { |
| 5289 | Info.FFDiag(E: RHS); |
| 5290 | return nullptr; |
| 5291 | } |
| 5292 | |
| 5293 | // Truncate the lvalue to the appropriate derived class. |
| 5294 | if (!CastToDerivedClass(Info, E: RHS, Result&: LV, TruncatedType: MemPtr.getContainingRecord(), |
| 5295 | TruncatedElements: PathLengthToMember)) |
| 5296 | return nullptr; |
| 5297 | } else if (!MemPtr.Path.empty()) { |
| 5298 | // Extend the LValue path with the member pointer's path. |
| 5299 | LV.Designator.Entries.reserve(N: LV.Designator.Entries.size() + |
| 5300 | MemPtr.Path.size() + IncludeMember); |
| 5301 | |
| 5302 | // Walk down to the appropriate base class. |
| 5303 | if (const PointerType *PT = LVType->getAs<PointerType>()) |
| 5304 | LVType = PT->getPointeeType(); |
| 5305 | const CXXRecordDecl *RD = LVType->getAsCXXRecordDecl(); |
| 5306 | assert(RD && "member pointer access on non-class-type expression" ); |
| 5307 | // The first class in the path is that of the lvalue. |
| 5308 | for (unsigned I = 1, N = MemPtr.Path.size(); I != N; ++I) { |
| 5309 | const CXXRecordDecl *Base = MemPtr.Path[N - I - 1]; |
| 5310 | if (!HandleLValueDirectBase(Info, E: RHS, Obj&: LV, Derived: RD, Base)) |
| 5311 | return nullptr; |
| 5312 | RD = Base; |
| 5313 | } |
| 5314 | // Finally cast to the class containing the member. |
| 5315 | if (!HandleLValueDirectBase(Info, E: RHS, Obj&: LV, Derived: RD, |
| 5316 | Base: MemPtr.getContainingRecord())) |
| 5317 | return nullptr; |
| 5318 | } |
| 5319 | |
| 5320 | // Add the member. Note that we cannot build bound member functions here. |
| 5321 | if (IncludeMember) { |
| 5322 | if (const FieldDecl *FD = dyn_cast<FieldDecl>(Val: MemPtr.getDecl())) { |
| 5323 | if (!HandleLValueMember(Info, E: RHS, LVal&: LV, FD)) |
| 5324 | return nullptr; |
| 5325 | } else if (const IndirectFieldDecl *IFD = |
| 5326 | dyn_cast<IndirectFieldDecl>(Val: MemPtr.getDecl())) { |
| 5327 | if (!HandleLValueIndirectMember(Info, E: RHS, LVal&: LV, IFD)) |
| 5328 | return nullptr; |
| 5329 | } else { |
| 5330 | llvm_unreachable("can't construct reference to bound member function" ); |
| 5331 | } |
| 5332 | } |
| 5333 | |
| 5334 | return MemPtr.getDecl(); |
| 5335 | } |
| 5336 | |
| 5337 | static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info, |
| 5338 | const BinaryOperator *BO, |
| 5339 | LValue &LV, |
| 5340 | bool IncludeMember = true) { |
| 5341 | assert(BO->getOpcode() == BO_PtrMemD || BO->getOpcode() == BO_PtrMemI); |
| 5342 | |
| 5343 | if (!EvaluateObjectArgument(Info, Object: BO->getLHS(), This&: LV)) { |
| 5344 | if (Info.noteFailure()) { |
| 5345 | MemberPtr MemPtr; |
| 5346 | EvaluateMemberPointer(E: BO->getRHS(), Result&: MemPtr, Info); |
| 5347 | } |
| 5348 | return nullptr; |
| 5349 | } |
| 5350 | |
| 5351 | return HandleMemberPointerAccess(Info, LVType: BO->getLHS()->getType(), LV, |
| 5352 | RHS: BO->getRHS(), IncludeMember); |
| 5353 | } |
| 5354 | |
| 5355 | /// HandleBaseToDerivedCast - Apply the given base-to-derived cast operation on |
| 5356 | /// the provided lvalue, which currently refers to the base object. |
| 5357 | static bool HandleBaseToDerivedCast(EvalInfo &Info, const CastExpr *E, |
| 5358 | LValue &Result) { |
| 5359 | SubobjectDesignator &D = Result.Designator; |
| 5360 | if (D.Invalid || !Result.checkNullPointer(Info, E, CSK: CSK_Derived)) |
| 5361 | return false; |
| 5362 | |
| 5363 | QualType TargetQT = E->getType(); |
| 5364 | if (const PointerType *PT = TargetQT->getAs<PointerType>()) |
| 5365 | TargetQT = PT->getPointeeType(); |
| 5366 | |
| 5367 | auto InvalidCast = [&]() { |
| 5368 | if (!Info.checkingPotentialConstantExpression() || |
| 5369 | !Result.AllowConstexprUnknown) { |
| 5370 | Info.CCEDiag(E, DiagId: diag::note_constexpr_invalid_downcast) |
| 5371 | << D.MostDerivedType << TargetQT; |
| 5372 | } |
| 5373 | return false; |
| 5374 | }; |
| 5375 | |
| 5376 | // Check this cast lands within the final derived-to-base subobject path. |
| 5377 | if (D.MostDerivedPathLength + E->path_size() > D.Entries.size()) |
| 5378 | return InvalidCast(); |
| 5379 | |
| 5380 | // Check the type of the final cast. We don't need to check the path, |
| 5381 | // since a cast can only be formed if the path is unique. |
| 5382 | unsigned NewEntriesSize = D.Entries.size() - E->path_size(); |
| 5383 | const CXXRecordDecl *TargetType = TargetQT->getAsCXXRecordDecl(); |
| 5384 | const CXXRecordDecl *FinalType; |
| 5385 | if (NewEntriesSize == D.MostDerivedPathLength) |
| 5386 | FinalType = D.MostDerivedType->getAsCXXRecordDecl(); |
| 5387 | else |
| 5388 | FinalType = getAsBaseClass(E: D.Entries[NewEntriesSize - 1]); |
| 5389 | if (FinalType->getCanonicalDecl() != TargetType->getCanonicalDecl()) |
| 5390 | return InvalidCast(); |
| 5391 | |
| 5392 | // Truncate the lvalue to the appropriate derived class. |
| 5393 | return CastToDerivedClass(Info, E, Result, TruncatedType: TargetType, TruncatedElements: NewEntriesSize); |
| 5394 | } |
| 5395 | |
| 5396 | /// Get the value to use for a default-initialized object of type T. |
| 5397 | /// Return false if it encounters something invalid. |
| 5398 | static bool handleDefaultInitValue(QualType T, APValue &Result) { |
| 5399 | bool Success = true; |
| 5400 | |
| 5401 | // If there is already a value present don't overwrite it. |
| 5402 | if (!Result.isAbsent()) |
| 5403 | return true; |
| 5404 | |
| 5405 | if (auto *RD = T->getAsCXXRecordDecl()) { |
| 5406 | if (RD->isInvalidDecl()) { |
| 5407 | Result = APValue(); |
| 5408 | return false; |
| 5409 | } |
| 5410 | if (RD->isUnion()) { |
| 5411 | Result = APValue((const FieldDecl *)nullptr); |
| 5412 | return true; |
| 5413 | } |
| 5414 | Result = |
| 5415 | APValue(APValue::UninitStruct(), RD->getNumBases(), RD->getNumFields()); |
| 5416 | |
| 5417 | unsigned Index = 0; |
| 5418 | for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), |
| 5419 | End = RD->bases_end(); |
| 5420 | I != End; ++I, ++Index) |
| 5421 | Success &= |
| 5422 | handleDefaultInitValue(T: I->getType(), Result&: Result.getStructBase(i: Index)); |
| 5423 | |
| 5424 | for (const auto *I : RD->fields()) { |
| 5425 | if (I->isUnnamedBitField()) |
| 5426 | continue; |
| 5427 | Success &= handleDefaultInitValue( |
| 5428 | T: I->getType(), Result&: Result.getStructField(i: I->getFieldIndex())); |
| 5429 | } |
| 5430 | return Success; |
| 5431 | } |
| 5432 | |
| 5433 | if (auto *AT = |
| 5434 | dyn_cast_or_null<ConstantArrayType>(Val: T->getAsArrayTypeUnsafe())) { |
| 5435 | Result = APValue(APValue::UninitArray(), 0, AT->getZExtSize()); |
| 5436 | if (Result.hasArrayFiller()) |
| 5437 | Success &= |
| 5438 | handleDefaultInitValue(T: AT->getElementType(), Result&: Result.getArrayFiller()); |
| 5439 | |
| 5440 | return Success; |
| 5441 | } |
| 5442 | |
| 5443 | Result = APValue::IndeterminateValue(); |
| 5444 | return true; |
| 5445 | } |
| 5446 | |
| 5447 | namespace { |
| 5448 | enum EvalStmtResult { |
| 5449 | /// Evaluation failed. |
| 5450 | ESR_Failed, |
| 5451 | /// Hit a 'return' statement. |
| 5452 | ESR_Returned, |
| 5453 | /// Evaluation succeeded. |
| 5454 | ESR_Succeeded, |
| 5455 | /// Hit a 'continue' statement. |
| 5456 | ESR_Continue, |
| 5457 | /// Hit a 'break' statement. |
| 5458 | ESR_Break, |
| 5459 | /// Still scanning for 'case' or 'default' statement. |
| 5460 | ESR_CaseNotFound |
| 5461 | }; |
| 5462 | } |
| 5463 | /// Evaluates the initializer of a reference. |
| 5464 | static bool EvaluateInitForDeclOfReferenceType(EvalInfo &Info, |
| 5465 | const ValueDecl *D, |
| 5466 | const Expr *Init, LValue &Result, |
| 5467 | APValue &Val) { |
| 5468 | assert(Init->isGLValue() && D->getType()->isReferenceType()); |
| 5469 | // A reference is an lvalue. |
| 5470 | if (!EvaluateLValue(E: Init, Result, Info)) |
| 5471 | return false; |
| 5472 | // [C++26][decl.ref] |
| 5473 | // The object designated by such a glvalue can be outside its lifetime |
| 5474 | // Because a null pointer value or a pointer past the end of an object |
| 5475 | // does not point to an object, a reference in a well-defined program cannot |
| 5476 | // refer to such things; |
| 5477 | if (!Result.Designator.Invalid && Result.Designator.isOnePastTheEnd()) { |
| 5478 | Info.FFDiag(E: Init, DiagId: diag::note_constexpr_access_past_end) << AK_Dereference; |
| 5479 | return false; |
| 5480 | } |
| 5481 | |
| 5482 | // Save the result. |
| 5483 | Result.moveInto(V&: Val); |
| 5484 | return true; |
| 5485 | } |
| 5486 | |
| 5487 | static bool EvaluateVarDecl(EvalInfo &Info, const VarDecl *VD) { |
| 5488 | if (VD->isInvalidDecl()) |
| 5489 | return false; |
| 5490 | // We don't need to evaluate the initializer for a static local. |
| 5491 | if (!VD->hasLocalStorage()) |
| 5492 | return true; |
| 5493 | |
| 5494 | LValue Result; |
| 5495 | APValue &Val = Info.CurrentCall->createTemporary(Key: VD, T: VD->getType(), |
| 5496 | Scope: ScopeKind::Block, LV&: Result); |
| 5497 | |
| 5498 | const Expr *InitE = VD->getInit(); |
| 5499 | if (!InitE) { |
| 5500 | if (VD->getType()->isDependentType()) |
| 5501 | return Info.noteSideEffect(); |
| 5502 | return handleDefaultInitValue(T: VD->getType(), Result&: Val); |
| 5503 | } |
| 5504 | if (InitE->isValueDependent()) |
| 5505 | return false; |
| 5506 | |
| 5507 | // For references to objects, check they do not designate a one-past-the-end |
| 5508 | // object. |
| 5509 | if (VD->getType()->isReferenceType()) { |
| 5510 | return EvaluateInitForDeclOfReferenceType(Info, D: VD, Init: InitE, Result, Val); |
| 5511 | } else if (!EvaluateInPlace(Result&: Val, Info, This: Result, E: InitE)) { |
| 5512 | // Wipe out any partially-computed value, to allow tracking that this |
| 5513 | // evaluation failed. |
| 5514 | Val = APValue(); |
| 5515 | return false; |
| 5516 | } |
| 5517 | |
| 5518 | return true; |
| 5519 | } |
| 5520 | |
| 5521 | static bool EvaluateDecompositionDeclInit(EvalInfo &Info, |
| 5522 | const DecompositionDecl *DD); |
| 5523 | |
| 5524 | static bool EvaluateDecl(EvalInfo &Info, const Decl *D, |
| 5525 | bool EvaluateConditionDecl = false) { |
| 5526 | bool OK = true; |
| 5527 | if (const VarDecl *VD = dyn_cast<VarDecl>(Val: D)) |
| 5528 | OK &= EvaluateVarDecl(Info, VD); |
| 5529 | |
| 5530 | if (const DecompositionDecl *DD = dyn_cast<DecompositionDecl>(Val: D); |
| 5531 | EvaluateConditionDecl && DD) |
| 5532 | OK &= EvaluateDecompositionDeclInit(Info, DD); |
| 5533 | |
| 5534 | return OK; |
| 5535 | } |
| 5536 | |
| 5537 | static bool EvaluateDecompositionDeclInit(EvalInfo &Info, |
| 5538 | const DecompositionDecl *DD) { |
| 5539 | bool OK = true; |
| 5540 | for (auto *BD : DD->flat_bindings()) |
| 5541 | if (auto *VD = BD->getHoldingVar()) |
| 5542 | OK &= EvaluateDecl(Info, D: VD, /*EvaluateConditionDecl=*/true); |
| 5543 | |
| 5544 | return OK; |
| 5545 | } |
| 5546 | |
| 5547 | static bool MaybeEvaluateDeferredVarDeclInit(EvalInfo &Info, |
| 5548 | const VarDecl *VD) { |
| 5549 | if (auto *DD = dyn_cast_if_present<DecompositionDecl>(Val: VD)) { |
| 5550 | if (!EvaluateDecompositionDeclInit(Info, DD)) |
| 5551 | return false; |
| 5552 | } |
| 5553 | return true; |
| 5554 | } |
| 5555 | |
| 5556 | static bool EvaluateDependentExpr(const Expr *E, EvalInfo &Info) { |
| 5557 | assert(E->isValueDependent()); |
| 5558 | if (Info.noteSideEffect()) |
| 5559 | return true; |
| 5560 | assert(E->containsErrors() && "valid value-dependent expression should never " |
| 5561 | "reach invalid code path." ); |
| 5562 | return false; |
| 5563 | } |
| 5564 | |
| 5565 | /// Evaluate a condition (either a variable declaration or an expression). |
| 5566 | static bool EvaluateCond(EvalInfo &Info, const VarDecl *CondDecl, |
| 5567 | const Expr *Cond, bool &Result) { |
| 5568 | if (Cond->isValueDependent()) |
| 5569 | return false; |
| 5570 | FullExpressionRAII Scope(Info); |
| 5571 | if (CondDecl && !EvaluateDecl(Info, D: CondDecl)) |
| 5572 | return false; |
| 5573 | if (!EvaluateAsBooleanCondition(E: Cond, Result, Info)) |
| 5574 | return false; |
| 5575 | if (!MaybeEvaluateDeferredVarDeclInit(Info, VD: CondDecl)) |
| 5576 | return false; |
| 5577 | return Scope.destroy(); |
| 5578 | } |
| 5579 | |
| 5580 | namespace { |
| 5581 | /// A location where the result (returned value) of evaluating a |
| 5582 | /// statement should be stored. |
| 5583 | struct StmtResult { |
| 5584 | /// The APValue that should be filled in with the returned value. |
| 5585 | APValue &Value; |
| 5586 | /// The location containing the result, if any (used to support RVO). |
| 5587 | const LValue *Slot; |
| 5588 | }; |
| 5589 | |
| 5590 | struct TempVersionRAII { |
| 5591 | CallStackFrame &Frame; |
| 5592 | |
| 5593 | TempVersionRAII(CallStackFrame &Frame) : Frame(Frame) { |
| 5594 | Frame.pushTempVersion(); |
| 5595 | } |
| 5596 | |
| 5597 | ~TempVersionRAII() { |
| 5598 | Frame.popTempVersion(); |
| 5599 | } |
| 5600 | }; |
| 5601 | |
| 5602 | } |
| 5603 | |
| 5604 | static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info, |
| 5605 | const Stmt *S, |
| 5606 | const SwitchCase *SC = nullptr); |
| 5607 | |
| 5608 | /// Helper to implement named break/continue. Returns 'true' if the evaluation |
| 5609 | /// result should be propagated up. Otherwise, it sets the evaluation result |
| 5610 | /// to either Continue to continue the current loop, or Succeeded to break it. |
| 5611 | static bool ShouldPropagateBreakContinue(EvalInfo &Info, |
| 5612 | const Stmt *LoopOrSwitch, |
| 5613 | ArrayRef<BlockScopeRAII *> Scopes, |
| 5614 | EvalStmtResult &ESR) { |
| 5615 | bool IsSwitch = isa<SwitchStmt>(Val: LoopOrSwitch); |
| 5616 | |
| 5617 | // For loops, map Succeeded to Continue so we don't have to check for both. |
| 5618 | if (!IsSwitch && ESR == ESR_Succeeded) { |
| 5619 | ESR = ESR_Continue; |
| 5620 | return false; |
| 5621 | } |
| 5622 | |
| 5623 | if (ESR != ESR_Break && ESR != ESR_Continue) |
| 5624 | return false; |
| 5625 | |
| 5626 | // Are we breaking out of or continuing this statement? |
| 5627 | bool CanBreakOrContinue = !IsSwitch || ESR == ESR_Break; |
| 5628 | const Stmt *StackTop = Info.BreakContinueStack.back(); |
| 5629 | if (CanBreakOrContinue && (StackTop == nullptr || StackTop == LoopOrSwitch)) { |
| 5630 | Info.BreakContinueStack.pop_back(); |
| 5631 | if (ESR == ESR_Break) |
| 5632 | ESR = ESR_Succeeded; |
| 5633 | return false; |
| 5634 | } |
| 5635 | |
| 5636 | // We're not. Propagate the result up. |
| 5637 | for (BlockScopeRAII *S : Scopes) { |
| 5638 | if (!S->destroy()) { |
| 5639 | ESR = ESR_Failed; |
| 5640 | break; |
| 5641 | } |
| 5642 | } |
| 5643 | return true; |
| 5644 | } |
| 5645 | |
| 5646 | /// Evaluate the body of a loop, and translate the result as appropriate. |
| 5647 | static EvalStmtResult EvaluateLoopBody(StmtResult &Result, EvalInfo &Info, |
| 5648 | const Stmt *Body, |
| 5649 | const SwitchCase *Case = nullptr) { |
| 5650 | BlockScopeRAII Scope(Info); |
| 5651 | |
| 5652 | EvalStmtResult ESR = EvaluateStmt(Result, Info, S: Body, SC: Case); |
| 5653 | if (ESR != ESR_Failed && ESR != ESR_CaseNotFound && !Scope.destroy()) |
| 5654 | ESR = ESR_Failed; |
| 5655 | |
| 5656 | return ESR; |
| 5657 | } |
| 5658 | |
| 5659 | /// Evaluate a switch statement. |
| 5660 | static EvalStmtResult EvaluateSwitch(StmtResult &Result, EvalInfo &Info, |
| 5661 | const SwitchStmt *SS) { |
| 5662 | BlockScopeRAII Scope(Info); |
| 5663 | |
| 5664 | // Evaluate the switch condition. |
| 5665 | APSInt Value; |
| 5666 | { |
| 5667 | if (const Stmt *Init = SS->getInit()) { |
| 5668 | EvalStmtResult ESR = EvaluateStmt(Result, Info, S: Init); |
| 5669 | if (ESR != ESR_Succeeded) { |
| 5670 | if (ESR != ESR_Failed && !Scope.destroy()) |
| 5671 | ESR = ESR_Failed; |
| 5672 | return ESR; |
| 5673 | } |
| 5674 | } |
| 5675 | |
| 5676 | FullExpressionRAII CondScope(Info); |
| 5677 | if (SS->getConditionVariable() && |
| 5678 | !EvaluateDecl(Info, D: SS->getConditionVariable())) |
| 5679 | return ESR_Failed; |
| 5680 | if (SS->getCond()->isValueDependent()) { |
| 5681 | // We don't know what the value is, and which branch should jump to. |
| 5682 | EvaluateDependentExpr(E: SS->getCond(), Info); |
| 5683 | return ESR_Failed; |
| 5684 | } |
| 5685 | if (!EvaluateInteger(E: SS->getCond(), Result&: Value, Info)) |
| 5686 | return ESR_Failed; |
| 5687 | |
| 5688 | if (!MaybeEvaluateDeferredVarDeclInit(Info, VD: SS->getConditionVariable())) |
| 5689 | return ESR_Failed; |
| 5690 | |
| 5691 | if (!CondScope.destroy()) |
| 5692 | return ESR_Failed; |
| 5693 | } |
| 5694 | |
| 5695 | // Find the switch case corresponding to the value of the condition. |
| 5696 | // FIXME: Cache this lookup. |
| 5697 | const SwitchCase *Found = nullptr; |
| 5698 | for (const SwitchCase *SC = SS->getSwitchCaseList(); SC; |
| 5699 | SC = SC->getNextSwitchCase()) { |
| 5700 | if (isa<DefaultStmt>(Val: SC)) { |
| 5701 | Found = SC; |
| 5702 | continue; |
| 5703 | } |
| 5704 | |
| 5705 | const CaseStmt *CS = cast<CaseStmt>(Val: SC); |
| 5706 | const Expr *LHS = CS->getLHS(); |
| 5707 | const Expr *RHS = CS->getRHS(); |
| 5708 | if (LHS->isValueDependent() || (RHS && RHS->isValueDependent())) |
| 5709 | return ESR_Failed; |
| 5710 | APSInt LHSValue = LHS->EvaluateKnownConstInt(Ctx: Info.Ctx); |
| 5711 | APSInt RHSValue = RHS ? RHS->EvaluateKnownConstInt(Ctx: Info.Ctx) : LHSValue; |
| 5712 | if (LHSValue <= Value && Value <= RHSValue) { |
| 5713 | Found = SC; |
| 5714 | break; |
| 5715 | } |
| 5716 | } |
| 5717 | |
| 5718 | if (!Found) |
| 5719 | return Scope.destroy() ? ESR_Succeeded : ESR_Failed; |
| 5720 | |
| 5721 | // Search the switch body for the switch case and evaluate it from there. |
| 5722 | EvalStmtResult ESR = EvaluateStmt(Result, Info, S: SS->getBody(), SC: Found); |
| 5723 | if (ESR != ESR_Failed && ESR != ESR_CaseNotFound && !Scope.destroy()) |
| 5724 | return ESR_Failed; |
| 5725 | if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: SS, /*Scopes=*/{}, ESR)) |
| 5726 | return ESR; |
| 5727 | |
| 5728 | switch (ESR) { |
| 5729 | case ESR_Break: |
| 5730 | llvm_unreachable("Should have been converted to Succeeded" ); |
| 5731 | case ESR_Succeeded: |
| 5732 | case ESR_Continue: |
| 5733 | case ESR_Failed: |
| 5734 | case ESR_Returned: |
| 5735 | return ESR; |
| 5736 | case ESR_CaseNotFound: |
| 5737 | // This can only happen if the switch case is nested within a statement |
| 5738 | // expression. We have no intention of supporting that. |
| 5739 | Info.FFDiag(Loc: Found->getBeginLoc(), |
| 5740 | DiagId: diag::note_constexpr_stmt_expr_unsupported); |
| 5741 | return ESR_Failed; |
| 5742 | } |
| 5743 | llvm_unreachable("Invalid EvalStmtResult!" ); |
| 5744 | } |
| 5745 | |
| 5746 | static bool CheckLocalVariableDeclaration(EvalInfo &Info, const VarDecl *VD) { |
| 5747 | // An expression E is a core constant expression unless the evaluation of E |
| 5748 | // would evaluate one of the following: [C++23] - a control flow that passes |
| 5749 | // through a declaration of a variable with static or thread storage duration |
| 5750 | // unless that variable is usable in constant expressions. |
| 5751 | if (VD->isLocalVarDecl() && VD->isStaticLocal() && |
| 5752 | !VD->isUsableInConstantExpressions(C: Info.Ctx)) { |
| 5753 | Info.CCEDiag(Loc: VD->getLocation(), DiagId: diag::note_constexpr_static_local) |
| 5754 | << (VD->getTSCSpec() == TSCS_unspecified ? 0 : 1) << VD; |
| 5755 | return false; |
| 5756 | } |
| 5757 | return true; |
| 5758 | } |
| 5759 | |
| 5760 | // Evaluate a statement. |
| 5761 | static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info, |
| 5762 | const Stmt *S, const SwitchCase *Case) { |
| 5763 | if (!Info.nextStep(S)) |
| 5764 | return ESR_Failed; |
| 5765 | |
| 5766 | // If we're hunting down a 'case' or 'default' label, recurse through |
| 5767 | // substatements until we hit the label. |
| 5768 | if (Case) { |
| 5769 | switch (S->getStmtClass()) { |
| 5770 | case Stmt::CompoundStmtClass: |
| 5771 | // FIXME: Precompute which substatement of a compound statement we |
| 5772 | // would jump to, and go straight there rather than performing a |
| 5773 | // linear scan each time. |
| 5774 | case Stmt::LabelStmtClass: |
| 5775 | case Stmt::AttributedStmtClass: |
| 5776 | case Stmt::DoStmtClass: |
| 5777 | break; |
| 5778 | |
| 5779 | case Stmt::CaseStmtClass: |
| 5780 | case Stmt::DefaultStmtClass: |
| 5781 | if (Case == S) |
| 5782 | Case = nullptr; |
| 5783 | break; |
| 5784 | |
| 5785 | case Stmt::IfStmtClass: { |
| 5786 | // FIXME: Precompute which side of an 'if' we would jump to, and go |
| 5787 | // straight there rather than scanning both sides. |
| 5788 | const IfStmt *IS = cast<IfStmt>(Val: S); |
| 5789 | |
| 5790 | // Wrap the evaluation in a block scope, in case it's a DeclStmt |
| 5791 | // preceded by our switch label. |
| 5792 | BlockScopeRAII Scope(Info); |
| 5793 | |
| 5794 | // Step into the init statement in case it brings an (uninitialized) |
| 5795 | // variable into scope. |
| 5796 | if (const Stmt *Init = IS->getInit()) { |
| 5797 | EvalStmtResult ESR = EvaluateStmt(Result, Info, S: Init, Case); |
| 5798 | if (ESR != ESR_CaseNotFound) { |
| 5799 | assert(ESR != ESR_Succeeded); |
| 5800 | return ESR; |
| 5801 | } |
| 5802 | } |
| 5803 | |
| 5804 | // Condition variable must be initialized if it exists. |
| 5805 | // FIXME: We can skip evaluating the body if there's a condition |
| 5806 | // variable, as there can't be any case labels within it. |
| 5807 | // (The same is true for 'for' statements.) |
| 5808 | |
| 5809 | EvalStmtResult ESR = EvaluateStmt(Result, Info, S: IS->getThen(), Case); |
| 5810 | if (ESR == ESR_Failed) |
| 5811 | return ESR; |
| 5812 | if (ESR != ESR_CaseNotFound) |
| 5813 | return Scope.destroy() ? ESR : ESR_Failed; |
| 5814 | if (!IS->getElse()) |
| 5815 | return ESR_CaseNotFound; |
| 5816 | |
| 5817 | ESR = EvaluateStmt(Result, Info, S: IS->getElse(), Case); |
| 5818 | if (ESR == ESR_Failed) |
| 5819 | return ESR; |
| 5820 | if (ESR != ESR_CaseNotFound) |
| 5821 | return Scope.destroy() ? ESR : ESR_Failed; |
| 5822 | return ESR_CaseNotFound; |
| 5823 | } |
| 5824 | |
| 5825 | case Stmt::WhileStmtClass: { |
| 5826 | EvalStmtResult ESR = |
| 5827 | EvaluateLoopBody(Result, Info, Body: cast<WhileStmt>(Val: S)->getBody(), Case); |
| 5828 | if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: S, /*Scopes=*/{}, ESR)) |
| 5829 | return ESR; |
| 5830 | if (ESR != ESR_Continue) |
| 5831 | return ESR; |
| 5832 | break; |
| 5833 | } |
| 5834 | |
| 5835 | case Stmt::ForStmtClass: { |
| 5836 | const ForStmt *FS = cast<ForStmt>(Val: S); |
| 5837 | BlockScopeRAII Scope(Info); |
| 5838 | |
| 5839 | // Step into the init statement in case it brings an (uninitialized) |
| 5840 | // variable into scope. |
| 5841 | if (const Stmt *Init = FS->getInit()) { |
| 5842 | EvalStmtResult ESR = EvaluateStmt(Result, Info, S: Init, Case); |
| 5843 | if (ESR != ESR_CaseNotFound) { |
| 5844 | assert(ESR != ESR_Succeeded); |
| 5845 | return ESR; |
| 5846 | } |
| 5847 | } |
| 5848 | |
| 5849 | EvalStmtResult ESR = |
| 5850 | EvaluateLoopBody(Result, Info, Body: FS->getBody(), Case); |
| 5851 | if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: FS, /*Scopes=*/{}, ESR)) |
| 5852 | return ESR; |
| 5853 | if (ESR != ESR_Continue) |
| 5854 | return ESR; |
| 5855 | if (const auto *Inc = FS->getInc()) { |
| 5856 | if (Inc->isValueDependent()) { |
| 5857 | if (!EvaluateDependentExpr(E: Inc, Info)) |
| 5858 | return ESR_Failed; |
| 5859 | } else { |
| 5860 | FullExpressionRAII IncScope(Info); |
| 5861 | if (!EvaluateIgnoredValue(Info, E: Inc) || !IncScope.destroy()) |
| 5862 | return ESR_Failed; |
| 5863 | } |
| 5864 | } |
| 5865 | break; |
| 5866 | } |
| 5867 | |
| 5868 | case Stmt::DeclStmtClass: { |
| 5869 | // Start the lifetime of any uninitialized variables we encounter. They |
| 5870 | // might be used by the selected branch of the switch. |
| 5871 | const DeclStmt *DS = cast<DeclStmt>(Val: S); |
| 5872 | for (const auto *D : DS->decls()) { |
| 5873 | if (const auto *VD = dyn_cast<VarDecl>(Val: D)) { |
| 5874 | if (!CheckLocalVariableDeclaration(Info, VD)) |
| 5875 | return ESR_Failed; |
| 5876 | if (VD->hasLocalStorage() && !VD->getInit()) |
| 5877 | if (!EvaluateVarDecl(Info, VD)) |
| 5878 | return ESR_Failed; |
| 5879 | // FIXME: If the variable has initialization that can't be jumped |
| 5880 | // over, bail out of any immediately-surrounding compound-statement |
| 5881 | // too. There can't be any case labels here. |
| 5882 | } |
| 5883 | } |
| 5884 | return ESR_CaseNotFound; |
| 5885 | } |
| 5886 | |
| 5887 | default: |
| 5888 | return ESR_CaseNotFound; |
| 5889 | } |
| 5890 | } |
| 5891 | |
| 5892 | switch (S->getStmtClass()) { |
| 5893 | default: |
| 5894 | if (const Expr *E = dyn_cast<Expr>(Val: S)) { |
| 5895 | if (E->isValueDependent()) { |
| 5896 | if (!EvaluateDependentExpr(E, Info)) |
| 5897 | return ESR_Failed; |
| 5898 | } else { |
| 5899 | // Don't bother evaluating beyond an expression-statement which couldn't |
| 5900 | // be evaluated. |
| 5901 | // FIXME: Do we need the FullExpressionRAII object here? |
| 5902 | // VisitExprWithCleanups should create one when necessary. |
| 5903 | FullExpressionRAII Scope(Info); |
| 5904 | if (!EvaluateIgnoredValue(Info, E) || !Scope.destroy()) |
| 5905 | return ESR_Failed; |
| 5906 | } |
| 5907 | return ESR_Succeeded; |
| 5908 | } |
| 5909 | |
| 5910 | Info.FFDiag(Loc: S->getBeginLoc()) << S->getSourceRange(); |
| 5911 | return ESR_Failed; |
| 5912 | |
| 5913 | case Stmt::NullStmtClass: |
| 5914 | return ESR_Succeeded; |
| 5915 | |
| 5916 | case Stmt::DeclStmtClass: { |
| 5917 | const DeclStmt *DS = cast<DeclStmt>(Val: S); |
| 5918 | for (const auto *D : DS->decls()) { |
| 5919 | const VarDecl *VD = dyn_cast_or_null<VarDecl>(Val: D); |
| 5920 | if (VD && !CheckLocalVariableDeclaration(Info, VD)) |
| 5921 | return ESR_Failed; |
| 5922 | // Each declaration initialization is its own full-expression. |
| 5923 | FullExpressionRAII Scope(Info); |
| 5924 | if (!EvaluateDecl(Info, D, /*EvaluateConditionDecl=*/true) && |
| 5925 | !Info.noteFailure()) |
| 5926 | return ESR_Failed; |
| 5927 | if (!Scope.destroy()) |
| 5928 | return ESR_Failed; |
| 5929 | } |
| 5930 | return ESR_Succeeded; |
| 5931 | } |
| 5932 | |
| 5933 | case Stmt::ReturnStmtClass: { |
| 5934 | const Expr *RetExpr = cast<ReturnStmt>(Val: S)->getRetValue(); |
| 5935 | FullExpressionRAII Scope(Info); |
| 5936 | if (RetExpr && RetExpr->isValueDependent()) { |
| 5937 | EvaluateDependentExpr(E: RetExpr, Info); |
| 5938 | // We know we returned, but we don't know what the value is. |
| 5939 | return ESR_Failed; |
| 5940 | } |
| 5941 | if (RetExpr && |
| 5942 | !(Result.Slot |
| 5943 | ? EvaluateInPlace(Result&: Result.Value, Info, This: *Result.Slot, E: RetExpr) |
| 5944 | : Evaluate(Result&: Result.Value, Info, E: RetExpr))) |
| 5945 | return ESR_Failed; |
| 5946 | return Scope.destroy() ? ESR_Returned : ESR_Failed; |
| 5947 | } |
| 5948 | |
| 5949 | case Stmt::CompoundStmtClass: { |
| 5950 | BlockScopeRAII Scope(Info); |
| 5951 | |
| 5952 | const CompoundStmt *CS = cast<CompoundStmt>(Val: S); |
| 5953 | for (const auto *BI : CS->body()) { |
| 5954 | EvalStmtResult ESR = EvaluateStmt(Result, Info, S: BI, Case); |
| 5955 | if (ESR == ESR_Succeeded) |
| 5956 | Case = nullptr; |
| 5957 | else if (ESR != ESR_CaseNotFound) { |
| 5958 | if (ESR != ESR_Failed && !Scope.destroy()) |
| 5959 | return ESR_Failed; |
| 5960 | return ESR; |
| 5961 | } |
| 5962 | } |
| 5963 | if (Case) |
| 5964 | return ESR_CaseNotFound; |
| 5965 | return Scope.destroy() ? ESR_Succeeded : ESR_Failed; |
| 5966 | } |
| 5967 | |
| 5968 | case Stmt::IfStmtClass: { |
| 5969 | const IfStmt *IS = cast<IfStmt>(Val: S); |
| 5970 | |
| 5971 | // Evaluate the condition, as either a var decl or as an expression. |
| 5972 | BlockScopeRAII Scope(Info); |
| 5973 | if (const Stmt *Init = IS->getInit()) { |
| 5974 | EvalStmtResult ESR = EvaluateStmt(Result, Info, S: Init); |
| 5975 | if (ESR != ESR_Succeeded) { |
| 5976 | if (ESR != ESR_Failed && !Scope.destroy()) |
| 5977 | return ESR_Failed; |
| 5978 | return ESR; |
| 5979 | } |
| 5980 | } |
| 5981 | bool Cond; |
| 5982 | if (IS->isConsteval()) { |
| 5983 | Cond = IS->isNonNegatedConsteval(); |
| 5984 | // If we are not in a constant context, if consteval should not evaluate |
| 5985 | // to true. |
| 5986 | if (!Info.InConstantContext) |
| 5987 | Cond = !Cond; |
| 5988 | } else if (!EvaluateCond(Info, CondDecl: IS->getConditionVariable(), Cond: IS->getCond(), |
| 5989 | Result&: Cond)) |
| 5990 | return ESR_Failed; |
| 5991 | |
| 5992 | if (const Stmt *SubStmt = Cond ? IS->getThen() : IS->getElse()) { |
| 5993 | EvalStmtResult ESR = EvaluateStmt(Result, Info, S: SubStmt); |
| 5994 | if (ESR != ESR_Succeeded) { |
| 5995 | if (ESR != ESR_Failed && !Scope.destroy()) |
| 5996 | return ESR_Failed; |
| 5997 | return ESR; |
| 5998 | } |
| 5999 | } |
| 6000 | return Scope.destroy() ? ESR_Succeeded : ESR_Failed; |
| 6001 | } |
| 6002 | |
| 6003 | case Stmt::WhileStmtClass: { |
| 6004 | const WhileStmt *WS = cast<WhileStmt>(Val: S); |
| 6005 | while (true) { |
| 6006 | BlockScopeRAII Scope(Info); |
| 6007 | bool Continue; |
| 6008 | if (!EvaluateCond(Info, CondDecl: WS->getConditionVariable(), Cond: WS->getCond(), |
| 6009 | Result&: Continue)) |
| 6010 | return ESR_Failed; |
| 6011 | if (!Continue) |
| 6012 | break; |
| 6013 | |
| 6014 | EvalStmtResult ESR = EvaluateLoopBody(Result, Info, Body: WS->getBody()); |
| 6015 | if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: WS, Scopes: &Scope, ESR)) |
| 6016 | return ESR; |
| 6017 | |
| 6018 | if (ESR != ESR_Continue) { |
| 6019 | if (ESR != ESR_Failed && !Scope.destroy()) |
| 6020 | return ESR_Failed; |
| 6021 | return ESR; |
| 6022 | } |
| 6023 | if (!Scope.destroy()) |
| 6024 | return ESR_Failed; |
| 6025 | } |
| 6026 | return ESR_Succeeded; |
| 6027 | } |
| 6028 | |
| 6029 | case Stmt::DoStmtClass: { |
| 6030 | const DoStmt *DS = cast<DoStmt>(Val: S); |
| 6031 | bool Continue; |
| 6032 | do { |
| 6033 | EvalStmtResult ESR = EvaluateLoopBody(Result, Info, Body: DS->getBody(), Case); |
| 6034 | if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: DS, /*Scopes=*/{}, ESR)) |
| 6035 | return ESR; |
| 6036 | if (ESR != ESR_Continue) |
| 6037 | return ESR; |
| 6038 | Case = nullptr; |
| 6039 | |
| 6040 | if (DS->getCond()->isValueDependent()) { |
| 6041 | EvaluateDependentExpr(E: DS->getCond(), Info); |
| 6042 | // Bailout as we don't know whether to keep going or terminate the loop. |
| 6043 | return ESR_Failed; |
| 6044 | } |
| 6045 | FullExpressionRAII CondScope(Info); |
| 6046 | if (!EvaluateAsBooleanCondition(E: DS->getCond(), Result&: Continue, Info) || |
| 6047 | !CondScope.destroy()) |
| 6048 | return ESR_Failed; |
| 6049 | } while (Continue); |
| 6050 | return ESR_Succeeded; |
| 6051 | } |
| 6052 | |
| 6053 | case Stmt::ForStmtClass: { |
| 6054 | const ForStmt *FS = cast<ForStmt>(Val: S); |
| 6055 | BlockScopeRAII ForScope(Info); |
| 6056 | if (FS->getInit()) { |
| 6057 | EvalStmtResult ESR = EvaluateStmt(Result, Info, S: FS->getInit()); |
| 6058 | if (ESR != ESR_Succeeded) { |
| 6059 | if (ESR != ESR_Failed && !ForScope.destroy()) |
| 6060 | return ESR_Failed; |
| 6061 | return ESR; |
| 6062 | } |
| 6063 | } |
| 6064 | while (true) { |
| 6065 | BlockScopeRAII IterScope(Info); |
| 6066 | bool Continue = true; |
| 6067 | if (FS->getCond() && !EvaluateCond(Info, CondDecl: FS->getConditionVariable(), |
| 6068 | Cond: FS->getCond(), Result&: Continue)) |
| 6069 | return ESR_Failed; |
| 6070 | |
| 6071 | if (!Continue) { |
| 6072 | if (!IterScope.destroy()) |
| 6073 | return ESR_Failed; |
| 6074 | break; |
| 6075 | } |
| 6076 | |
| 6077 | EvalStmtResult ESR = EvaluateLoopBody(Result, Info, Body: FS->getBody()); |
| 6078 | if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: FS, Scopes: {&IterScope, &ForScope}, ESR)) |
| 6079 | return ESR; |
| 6080 | if (ESR != ESR_Continue) { |
| 6081 | if (ESR != ESR_Failed && (!IterScope.destroy() || !ForScope.destroy())) |
| 6082 | return ESR_Failed; |
| 6083 | return ESR; |
| 6084 | } |
| 6085 | |
| 6086 | if (const auto *Inc = FS->getInc()) { |
| 6087 | if (Inc->isValueDependent()) { |
| 6088 | if (!EvaluateDependentExpr(E: Inc, Info)) |
| 6089 | return ESR_Failed; |
| 6090 | } else { |
| 6091 | FullExpressionRAII IncScope(Info); |
| 6092 | if (!EvaluateIgnoredValue(Info, E: Inc) || !IncScope.destroy()) |
| 6093 | return ESR_Failed; |
| 6094 | } |
| 6095 | } |
| 6096 | |
| 6097 | if (!IterScope.destroy()) |
| 6098 | return ESR_Failed; |
| 6099 | } |
| 6100 | return ForScope.destroy() ? ESR_Succeeded : ESR_Failed; |
| 6101 | } |
| 6102 | |
| 6103 | case Stmt::CXXForRangeStmtClass: { |
| 6104 | const CXXForRangeStmt *FS = cast<CXXForRangeStmt>(Val: S); |
| 6105 | BlockScopeRAII Scope(Info); |
| 6106 | |
| 6107 | // Evaluate the init-statement if present. |
| 6108 | if (FS->getInit()) { |
| 6109 | EvalStmtResult ESR = EvaluateStmt(Result, Info, S: FS->getInit()); |
| 6110 | if (ESR != ESR_Succeeded) { |
| 6111 | if (ESR != ESR_Failed && !Scope.destroy()) |
| 6112 | return ESR_Failed; |
| 6113 | return ESR; |
| 6114 | } |
| 6115 | } |
| 6116 | |
| 6117 | // Initialize the __range variable. |
| 6118 | EvalStmtResult ESR = EvaluateStmt(Result, Info, S: FS->getRangeStmt()); |
| 6119 | if (ESR != ESR_Succeeded) { |
| 6120 | if (ESR != ESR_Failed && !Scope.destroy()) |
| 6121 | return ESR_Failed; |
| 6122 | return ESR; |
| 6123 | } |
| 6124 | |
| 6125 | // In error-recovery cases it's possible to get here even if we failed to |
| 6126 | // synthesize the __begin and __end variables. |
| 6127 | if (!FS->getBeginStmt() || !FS->getEndStmt() || !FS->getCond()) |
| 6128 | return ESR_Failed; |
| 6129 | |
| 6130 | // Create the __begin and __end iterators. |
| 6131 | ESR = EvaluateStmt(Result, Info, S: FS->getBeginStmt()); |
| 6132 | if (ESR != ESR_Succeeded) { |
| 6133 | if (ESR != ESR_Failed && !Scope.destroy()) |
| 6134 | return ESR_Failed; |
| 6135 | return ESR; |
| 6136 | } |
| 6137 | ESR = EvaluateStmt(Result, Info, S: FS->getEndStmt()); |
| 6138 | if (ESR != ESR_Succeeded) { |
| 6139 | if (ESR != ESR_Failed && !Scope.destroy()) |
| 6140 | return ESR_Failed; |
| 6141 | return ESR; |
| 6142 | } |
| 6143 | |
| 6144 | while (true) { |
| 6145 | // Condition: __begin != __end. |
| 6146 | { |
| 6147 | if (FS->getCond()->isValueDependent()) { |
| 6148 | EvaluateDependentExpr(E: FS->getCond(), Info); |
| 6149 | // We don't know whether to keep going or terminate the loop. |
| 6150 | return ESR_Failed; |
| 6151 | } |
| 6152 | bool Continue = true; |
| 6153 | FullExpressionRAII CondExpr(Info); |
| 6154 | if (!EvaluateAsBooleanCondition(E: FS->getCond(), Result&: Continue, Info)) |
| 6155 | return ESR_Failed; |
| 6156 | if (!Continue) |
| 6157 | break; |
| 6158 | } |
| 6159 | |
| 6160 | // User's variable declaration, initialized by *__begin. |
| 6161 | BlockScopeRAII InnerScope(Info); |
| 6162 | ESR = EvaluateStmt(Result, Info, S: FS->getLoopVarStmt()); |
| 6163 | if (ESR != ESR_Succeeded) { |
| 6164 | if (ESR != ESR_Failed && (!InnerScope.destroy() || !Scope.destroy())) |
| 6165 | return ESR_Failed; |
| 6166 | return ESR; |
| 6167 | } |
| 6168 | |
| 6169 | // Loop body. |
| 6170 | ESR = EvaluateLoopBody(Result, Info, Body: FS->getBody()); |
| 6171 | if (ShouldPropagateBreakContinue(Info, LoopOrSwitch: FS, Scopes: {&InnerScope, &Scope}, ESR)) |
| 6172 | return ESR; |
| 6173 | if (ESR != ESR_Continue) { |
| 6174 | if (ESR != ESR_Failed && (!InnerScope.destroy() || !Scope.destroy())) |
| 6175 | return ESR_Failed; |
| 6176 | return ESR; |
| 6177 | } |
| 6178 | if (FS->getInc()->isValueDependent()) { |
| 6179 | if (!EvaluateDependentExpr(E: FS->getInc(), Info)) |
| 6180 | return ESR_Failed; |
| 6181 | } else { |
| 6182 | // Increment: ++__begin |
| 6183 | if (!EvaluateIgnoredValue(Info, E: FS->getInc())) |
| 6184 | return ESR_Failed; |
| 6185 | } |
| 6186 | |
| 6187 | if (!InnerScope.destroy()) |
| 6188 | return ESR_Failed; |
| 6189 | } |
| 6190 | |
| 6191 | return Scope.destroy() ? ESR_Succeeded : ESR_Failed; |
| 6192 | } |
| 6193 | |
| 6194 | case Stmt::SwitchStmtClass: |
| 6195 | return EvaluateSwitch(Result, Info, SS: cast<SwitchStmt>(Val: S)); |
| 6196 | |
| 6197 | case Stmt::ContinueStmtClass: |
| 6198 | case Stmt::BreakStmtClass: { |
| 6199 | auto *B = cast<LoopControlStmt>(Val: S); |
| 6200 | Info.BreakContinueStack.push_back(Elt: B->getNamedLoopOrSwitch()); |
| 6201 | return isa<ContinueStmt>(Val: S) ? ESR_Continue : ESR_Break; |
| 6202 | } |
| 6203 | |
| 6204 | case Stmt::LabelStmtClass: |
| 6205 | return EvaluateStmt(Result, Info, S: cast<LabelStmt>(Val: S)->getSubStmt(), Case); |
| 6206 | |
| 6207 | case Stmt::AttributedStmtClass: { |
| 6208 | const auto *AS = cast<AttributedStmt>(Val: S); |
| 6209 | const auto *SS = AS->getSubStmt(); |
| 6210 | MSConstexprContextRAII ConstexprContext( |
| 6211 | *Info.CurrentCall, hasSpecificAttr<MSConstexprAttr>(container: AS->getAttrs()) && |
| 6212 | isa<ReturnStmt>(Val: SS)); |
| 6213 | |
| 6214 | auto LO = Info.Ctx.getLangOpts(); |
| 6215 | if (LO.CXXAssumptions && !LO.MSVCCompat) { |
| 6216 | for (auto *Attr : AS->getAttrs()) { |
| 6217 | auto *AA = dyn_cast<CXXAssumeAttr>(Val: Attr); |
| 6218 | if (!AA) |
| 6219 | continue; |
| 6220 | |
| 6221 | auto *Assumption = AA->getAssumption(); |
| 6222 | if (Assumption->isValueDependent()) |
| 6223 | return ESR_Failed; |
| 6224 | |
| 6225 | if (Assumption->HasSideEffects(Ctx: Info.Ctx)) |
| 6226 | continue; |
| 6227 | |
| 6228 | bool Value; |
| 6229 | if (!EvaluateAsBooleanCondition(E: Assumption, Result&: Value, Info)) |
| 6230 | return ESR_Failed; |
| 6231 | if (!Value) { |
| 6232 | Info.CCEDiag(Loc: Assumption->getExprLoc(), |
| 6233 | DiagId: diag::note_constexpr_assumption_failed); |
| 6234 | return ESR_Failed; |
| 6235 | } |
| 6236 | } |
| 6237 | } |
| 6238 | |
| 6239 | return EvaluateStmt(Result, Info, S: SS, Case); |
| 6240 | } |
| 6241 | |
| 6242 | case Stmt::CaseStmtClass: |
| 6243 | case Stmt::DefaultStmtClass: |
| 6244 | return EvaluateStmt(Result, Info, S: cast<SwitchCase>(Val: S)->getSubStmt(), Case); |
| 6245 | case Stmt::CXXTryStmtClass: |
| 6246 | // Evaluate try blocks by evaluating all sub statements. |
| 6247 | return EvaluateStmt(Result, Info, S: cast<CXXTryStmt>(Val: S)->getTryBlock(), Case); |
| 6248 | } |
| 6249 | } |
| 6250 | |
| 6251 | /// CheckTrivialDefaultConstructor - Check whether a constructor is a trivial |
| 6252 | /// default constructor. If so, we'll fold it whether or not it's marked as |
| 6253 | /// constexpr. If it is marked as constexpr, we will never implicitly define it, |
| 6254 | /// so we need special handling. |
| 6255 | static bool CheckTrivialDefaultConstructor(EvalInfo &Info, SourceLocation Loc, |
| 6256 | const CXXConstructorDecl *CD, |
| 6257 | bool IsValueInitialization) { |
| 6258 | if (!CD->isTrivial() || !CD->isDefaultConstructor()) |
| 6259 | return false; |
| 6260 | |
| 6261 | // Value-initialization does not call a trivial default constructor, so such a |
| 6262 | // call is a core constant expression whether or not the constructor is |
| 6263 | // constexpr. |
| 6264 | if (!CD->isConstexpr() && !IsValueInitialization) { |
| 6265 | if (Info.getLangOpts().CPlusPlus11) { |
| 6266 | // FIXME: If DiagDecl is an implicitly-declared special member function, |
| 6267 | // we should be much more explicit about why it's not constexpr. |
| 6268 | Info.CCEDiag(Loc, DiagId: diag::note_constexpr_invalid_function, ExtraNotes: 1) |
| 6269 | << /*IsConstexpr*/0 << /*IsConstructor*/1 << CD; |
| 6270 | Info.Note(Loc: CD->getLocation(), DiagId: diag::note_declared_at); |
| 6271 | } else { |
| 6272 | Info.CCEDiag(Loc, DiagId: diag::note_invalid_subexpr_in_const_expr); |
| 6273 | } |
| 6274 | } |
| 6275 | return true; |
| 6276 | } |
| 6277 | |
| 6278 | /// CheckConstexprFunction - Check that a function can be called in a constant |
| 6279 | /// expression. |
| 6280 | static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc, |
| 6281 | const FunctionDecl *Declaration, |
| 6282 | const FunctionDecl *Definition, |
| 6283 | const Stmt *Body) { |
| 6284 | // Potential constant expressions can contain calls to declared, but not yet |
| 6285 | // defined, constexpr functions. |
| 6286 | if (Info.checkingPotentialConstantExpression() && !Definition && |
| 6287 | Declaration->isConstexpr()) |
| 6288 | return false; |
| 6289 | |
| 6290 | // Bail out if the function declaration itself is invalid. We will |
| 6291 | // have produced a relevant diagnostic while parsing it, so just |
| 6292 | // note the problematic sub-expression. |
| 6293 | if (Declaration->isInvalidDecl()) { |
| 6294 | Info.FFDiag(Loc: CallLoc, DiagId: diag::note_invalid_subexpr_in_const_expr); |
| 6295 | return false; |
| 6296 | } |
| 6297 | |
| 6298 | // DR1872: An instantiated virtual constexpr function can't be called in a |
| 6299 | // constant expression (prior to C++20). We can still constant-fold such a |
| 6300 | // call. |
| 6301 | if (!Info.Ctx.getLangOpts().CPlusPlus20 && isa<CXXMethodDecl>(Val: Declaration) && |
| 6302 | cast<CXXMethodDecl>(Val: Declaration)->isVirtual()) |
| 6303 | Info.CCEDiag(Loc: CallLoc, DiagId: diag::note_constexpr_virtual_call); |
| 6304 | |
| 6305 | if (Definition && Definition->isInvalidDecl()) { |
| 6306 | Info.FFDiag(Loc: CallLoc, DiagId: diag::note_invalid_subexpr_in_const_expr); |
| 6307 | return false; |
| 6308 | } |
| 6309 | |
| 6310 | // Can we evaluate this function call? |
| 6311 | if (Definition && Body && |
| 6312 | (Definition->isConstexpr() || (Info.CurrentCall->CanEvalMSConstexpr && |
| 6313 | Definition->hasAttr<MSConstexprAttr>()))) |
| 6314 | return true; |
| 6315 | |
| 6316 | const FunctionDecl *DiagDecl = Definition ? Definition : Declaration; |
| 6317 | // Special note for the assert() macro, as the normal error message falsely |
| 6318 | // implies we cannot use an assertion during constant evaluation. |
| 6319 | if (CallLoc.isMacroID() && DiagDecl->getIdentifier()) { |
| 6320 | // FIXME: Instead of checking for an implementation-defined function, |
| 6321 | // check and evaluate the assert() macro. |
| 6322 | StringRef Name = DiagDecl->getName(); |
| 6323 | bool AssertFailed = |
| 6324 | Name == "__assert_rtn" || Name == "__assert_fail" || Name == "_wassert" ; |
| 6325 | if (AssertFailed) { |
| 6326 | Info.FFDiag(Loc: CallLoc, DiagId: diag::note_constexpr_assert_failed); |
| 6327 | return false; |
| 6328 | } |
| 6329 | } |
| 6330 | |
| 6331 | if (Info.getLangOpts().CPlusPlus11) { |
| 6332 | // If this function is not constexpr because it is an inherited |
| 6333 | // non-constexpr constructor, diagnose that directly. |
| 6334 | auto *CD = dyn_cast<CXXConstructorDecl>(Val: DiagDecl); |
| 6335 | if (CD && CD->isInheritingConstructor()) { |
| 6336 | auto *Inherited = CD->getInheritedConstructor().getConstructor(); |
| 6337 | if (!Inherited->isConstexpr()) |
| 6338 | DiagDecl = CD = Inherited; |
| 6339 | } |
| 6340 | |
| 6341 | // FIXME: If DiagDecl is an implicitly-declared special member function |
| 6342 | // or an inheriting constructor, we should be much more explicit about why |
| 6343 | // it's not constexpr. |
| 6344 | if (CD && CD->isInheritingConstructor()) |
| 6345 | Info.FFDiag(Loc: CallLoc, DiagId: diag::note_constexpr_invalid_inhctor, ExtraNotes: 1) |
| 6346 | << CD->getInheritedConstructor().getConstructor()->getParent(); |
| 6347 | else |
| 6348 | Info.FFDiag(Loc: CallLoc, DiagId: diag::note_constexpr_invalid_function, ExtraNotes: 1) |
| 6349 | << DiagDecl->isConstexpr() << (bool)CD << DiagDecl; |
| 6350 | Info.Note(Loc: DiagDecl->getLocation(), DiagId: diag::note_declared_at); |
| 6351 | } else { |
| 6352 | Info.FFDiag(Loc: CallLoc, DiagId: diag::note_invalid_subexpr_in_const_expr); |
| 6353 | } |
| 6354 | return false; |
| 6355 | } |
| 6356 | |
| 6357 | namespace { |
| 6358 | struct CheckDynamicTypeHandler { |
| 6359 | AccessKinds AccessKind; |
| 6360 | typedef bool result_type; |
| 6361 | bool failed() { return false; } |
| 6362 | bool found(APValue &Subobj, QualType SubobjType) { return true; } |
| 6363 | bool found(APSInt &Value, QualType SubobjType) { return true; } |
| 6364 | bool found(APFloat &Value, QualType SubobjType) { return true; } |
| 6365 | }; |
| 6366 | } // end anonymous namespace |
| 6367 | |
| 6368 | /// Check that we can access the notional vptr of an object / determine its |
| 6369 | /// dynamic type. |
| 6370 | static bool checkDynamicType(EvalInfo &Info, const Expr *E, const LValue &This, |
| 6371 | AccessKinds AK, bool Polymorphic) { |
| 6372 | if (This.Designator.Invalid) |
| 6373 | return false; |
| 6374 | |
| 6375 | CompleteObject Obj = findCompleteObject(Info, E, AK, LVal: This, LValType: QualType()); |
| 6376 | |
| 6377 | if (!Obj) |
| 6378 | return false; |
| 6379 | |
| 6380 | if (!Obj.Value) { |
| 6381 | // The object is not usable in constant expressions, so we can't inspect |
| 6382 | // its value to see if it's in-lifetime or what the active union members |
| 6383 | // are. We can still check for a one-past-the-end lvalue. |
| 6384 | if (This.Designator.isOnePastTheEnd() || |
| 6385 | This.Designator.isMostDerivedAnUnsizedArray()) { |
| 6386 | Info.FFDiag(E, DiagId: This.Designator.isOnePastTheEnd() |
| 6387 | ? diag::note_constexpr_access_past_end |
| 6388 | : diag::note_constexpr_access_unsized_array) |
| 6389 | << AK; |
| 6390 | return false; |
| 6391 | } else if (Polymorphic) { |
| 6392 | // Conservatively refuse to perform a polymorphic operation if we would |
| 6393 | // not be able to read a notional 'vptr' value. |
| 6394 | if (!Info.checkingPotentialConstantExpression() || |
| 6395 | !This.AllowConstexprUnknown) { |
| 6396 | APValue Val; |
| 6397 | This.moveInto(V&: Val); |
| 6398 | QualType StarThisType = |
| 6399 | Info.Ctx.getLValueReferenceType(T: This.Designator.getType(Ctx&: Info.Ctx)); |
| 6400 | Info.FFDiag(E, DiagId: diag::note_constexpr_polymorphic_unknown_dynamic_type) |
| 6401 | << AK << Val.getAsString(Ctx: Info.Ctx, Ty: StarThisType); |
| 6402 | } |
| 6403 | return false; |
| 6404 | } |
| 6405 | return true; |
| 6406 | } |
| 6407 | |
| 6408 | CheckDynamicTypeHandler Handler{.AccessKind: AK}; |
| 6409 | return Obj && findSubobject(Info, E, Obj, Sub: This.Designator, handler&: Handler); |
| 6410 | } |
| 6411 | |
| 6412 | /// Check that the pointee of the 'this' pointer in a member function call is |
| 6413 | /// either within its lifetime or in its period of construction or destruction. |
| 6414 | static bool |
| 6415 | checkNonVirtualMemberCallThisPointer(EvalInfo &Info, const Expr *E, |
| 6416 | const LValue &This, |
| 6417 | const CXXMethodDecl *NamedMember) { |
| 6418 | return checkDynamicType( |
| 6419 | Info, E, This, |
| 6420 | AK: isa<CXXDestructorDecl>(Val: NamedMember) ? AK_Destroy : AK_MemberCall, Polymorphic: false); |
| 6421 | } |
| 6422 | |
| 6423 | struct DynamicType { |
| 6424 | /// The dynamic class type of the object. |
| 6425 | const CXXRecordDecl *Type; |
| 6426 | /// The corresponding path length in the lvalue. |
| 6427 | unsigned PathLength; |
| 6428 | }; |
| 6429 | |
| 6430 | static const CXXRecordDecl *getBaseClassType(SubobjectDesignator &Designator, |
| 6431 | unsigned PathLength) { |
| 6432 | assert(PathLength >= Designator.MostDerivedPathLength && PathLength <= |
| 6433 | Designator.Entries.size() && "invalid path length" ); |
| 6434 | return (PathLength == Designator.MostDerivedPathLength) |
| 6435 | ? Designator.MostDerivedType->getAsCXXRecordDecl() |
| 6436 | : getAsBaseClass(E: Designator.Entries[PathLength - 1]); |
| 6437 | } |
| 6438 | |
| 6439 | /// Determine the dynamic type of an object. |
| 6440 | static std::optional<DynamicType> ComputeDynamicType(EvalInfo &Info, |
| 6441 | const Expr *E, |
| 6442 | LValue &This, |
| 6443 | AccessKinds AK) { |
| 6444 | // If we don't have an lvalue denoting an object of class type, there is no |
| 6445 | // meaningful dynamic type. (We consider objects of non-class type to have no |
| 6446 | // dynamic type.) |
| 6447 | if (!checkDynamicType(Info, E, This, AK, |
| 6448 | Polymorphic: AK != AK_TypeId || This.AllowConstexprUnknown)) |
| 6449 | return std::nullopt; |
| 6450 | |
| 6451 | if (This.Designator.Invalid) |
| 6452 | return std::nullopt; |
| 6453 | |
| 6454 | // Refuse to compute a dynamic type in the presence of virtual bases. This |
| 6455 | // shouldn't happen other than in constant-folding situations, since literal |
| 6456 | // types can't have virtual bases. |
| 6457 | // |
| 6458 | // Note that consumers of DynamicType assume that the type has no virtual |
| 6459 | // bases, and will need modifications if this restriction is relaxed. |
| 6460 | const CXXRecordDecl *Class = |
| 6461 | This.Designator.MostDerivedType->getAsCXXRecordDecl(); |
| 6462 | if (!Class || Class->getNumVBases()) { |
| 6463 | Info.FFDiag(E); |
| 6464 | return std::nullopt; |
| 6465 | } |
| 6466 | |
| 6467 | // FIXME: For very deep class hierarchies, it might be beneficial to use a |
| 6468 | // binary search here instead. But the overwhelmingly common case is that |
| 6469 | // we're not in the middle of a constructor, so it probably doesn't matter |
| 6470 | // in practice. |
| 6471 | ArrayRef<APValue::LValuePathEntry> Path = This.Designator.Entries; |
| 6472 | for (unsigned PathLength = This.Designator.MostDerivedPathLength; |
| 6473 | PathLength <= Path.size(); ++PathLength) { |
| 6474 | switch (Info.isEvaluatingCtorDtor(Base: This.getLValueBase(), |
| 6475 | Path: Path.slice(N: 0, M: PathLength))) { |
| 6476 | case ConstructionPhase::Bases: |
| 6477 | case ConstructionPhase::DestroyingBases: |
| 6478 | // We're constructing or destroying a base class. This is not the dynamic |
| 6479 | // type. |
| 6480 | break; |
| 6481 | |
| 6482 | case ConstructionPhase::None: |
| 6483 | case ConstructionPhase::AfterBases: |
| 6484 | case ConstructionPhase::AfterFields: |
| 6485 | case ConstructionPhase::Destroying: |
| 6486 | // We've finished constructing the base classes and not yet started |
| 6487 | // destroying them again, so this is the dynamic type. |
| 6488 | return DynamicType{.Type: getBaseClassType(Designator&: This.Designator, PathLength), |
| 6489 | .PathLength: PathLength}; |
| 6490 | } |
| 6491 | } |
| 6492 | |
| 6493 | // CWG issue 1517: we're constructing a base class of the object described by |
| 6494 | // 'This', so that object has not yet begun its period of construction and |
| 6495 | // any polymorphic operation on it results in undefined behavior. |
| 6496 | Info.FFDiag(E); |
| 6497 | return std::nullopt; |
| 6498 | } |
| 6499 | |
| 6500 | /// Perform virtual dispatch. |
| 6501 | static const CXXMethodDecl *HandleVirtualDispatch( |
| 6502 | EvalInfo &Info, const Expr *E, LValue &This, const CXXMethodDecl *Found, |
| 6503 | llvm::SmallVectorImpl<QualType> &CovariantAdjustmentPath) { |
| 6504 | std::optional<DynamicType> DynType = ComputeDynamicType( |
| 6505 | Info, E, This, |
| 6506 | AK: isa<CXXDestructorDecl>(Val: Found) ? AK_Destroy : AK_MemberCall); |
| 6507 | if (!DynType) |
| 6508 | return nullptr; |
| 6509 | |
| 6510 | // Find the final overrider. It must be declared in one of the classes on the |
| 6511 | // path from the dynamic type to the static type. |
| 6512 | // FIXME: If we ever allow literal types to have virtual base classes, that |
| 6513 | // won't be true. |
| 6514 | const CXXMethodDecl *Callee = Found; |
| 6515 | unsigned PathLength = DynType->PathLength; |
| 6516 | for (/**/; PathLength <= This.Designator.Entries.size(); ++PathLength) { |
| 6517 | const CXXRecordDecl *Class = getBaseClassType(Designator&: This.Designator, PathLength); |
| 6518 | const CXXMethodDecl *Overrider = |
| 6519 | Found->getCorrespondingMethodDeclaredInClass(RD: Class, MayBeBase: false); |
| 6520 | if (Overrider) { |
| 6521 | Callee = Overrider; |
| 6522 | break; |
| 6523 | } |
| 6524 | } |
| 6525 | |
| 6526 | // C++2a [class.abstract]p6: |
| 6527 | // the effect of making a virtual call to a pure virtual function [...] is |
| 6528 | // undefined |
| 6529 | if (Callee->isPureVirtual()) { |
| 6530 | Info.FFDiag(E, DiagId: diag::note_constexpr_pure_virtual_call, ExtraNotes: 1) << Callee; |
| 6531 | Info.Note(Loc: Callee->getLocation(), DiagId: diag::note_declared_at); |
| 6532 | return nullptr; |
| 6533 | } |
| 6534 | |
| 6535 | // If necessary, walk the rest of the path to determine the sequence of |
| 6536 | // covariant adjustment steps to apply. |
| 6537 | if (!Info.Ctx.hasSameUnqualifiedType(T1: Callee->getReturnType(), |
| 6538 | T2: Found->getReturnType())) { |
| 6539 | CovariantAdjustmentPath.push_back(Elt: Callee->getReturnType()); |
| 6540 | for (unsigned CovariantPathLength = PathLength + 1; |
| 6541 | CovariantPathLength != This.Designator.Entries.size(); |
| 6542 | ++CovariantPathLength) { |
| 6543 | const CXXRecordDecl *NextClass = |
| 6544 | getBaseClassType(Designator&: This.Designator, PathLength: CovariantPathLength); |
| 6545 | const CXXMethodDecl *Next = |
| 6546 | Found->getCorrespondingMethodDeclaredInClass(RD: NextClass, MayBeBase: false); |
| 6547 | if (Next && !Info.Ctx.hasSameUnqualifiedType( |
| 6548 | T1: Next->getReturnType(), T2: CovariantAdjustmentPath.back())) |
| 6549 | CovariantAdjustmentPath.push_back(Elt: Next->getReturnType()); |
| 6550 | } |
| 6551 | if (!Info.Ctx.hasSameUnqualifiedType(T1: Found->getReturnType(), |
| 6552 | T2: CovariantAdjustmentPath.back())) |
| 6553 | CovariantAdjustmentPath.push_back(Elt: Found->getReturnType()); |
| 6554 | } |
| 6555 | |
| 6556 | // Perform 'this' adjustment. |
| 6557 | if (!CastToDerivedClass(Info, E, Result&: This, TruncatedType: Callee->getParent(), TruncatedElements: PathLength)) |
| 6558 | return nullptr; |
| 6559 | |
| 6560 | return Callee; |
| 6561 | } |
| 6562 | |
| 6563 | /// Perform the adjustment from a value returned by a virtual function to |
| 6564 | /// a value of the statically expected type, which may be a pointer or |
| 6565 | /// reference to a base class of the returned type. |
| 6566 | static bool HandleCovariantReturnAdjustment(EvalInfo &Info, const Expr *E, |
| 6567 | APValue &Result, |
| 6568 | ArrayRef<QualType> Path) { |
| 6569 | assert(Result.isLValue() && |
| 6570 | "unexpected kind of APValue for covariant return" ); |
| 6571 | if (Result.isNullPointer()) |
| 6572 | return true; |
| 6573 | |
| 6574 | LValue LVal; |
| 6575 | LVal.setFrom(Ctx: Info.Ctx, V: Result); |
| 6576 | |
| 6577 | const CXXRecordDecl *OldClass = Path[0]->getPointeeCXXRecordDecl(); |
| 6578 | for (unsigned I = 1; I != Path.size(); ++I) { |
| 6579 | const CXXRecordDecl *NewClass = Path[I]->getPointeeCXXRecordDecl(); |
| 6580 | assert(OldClass && NewClass && "unexpected kind of covariant return" ); |
| 6581 | if (OldClass != NewClass && |
| 6582 | !CastToBaseClass(Info, E, Result&: LVal, DerivedRD: OldClass, BaseRD: NewClass)) |
| 6583 | return false; |
| 6584 | OldClass = NewClass; |
| 6585 | } |
| 6586 | |
| 6587 | LVal.moveInto(V&: Result); |
| 6588 | return true; |
| 6589 | } |
| 6590 | |
| 6591 | /// Determine whether \p Base, which is known to be a direct base class of |
| 6592 | /// \p Derived, is a public base class. |
| 6593 | static bool isBaseClassPublic(const CXXRecordDecl *Derived, |
| 6594 | const CXXRecordDecl *Base) { |
| 6595 | for (const CXXBaseSpecifier &BaseSpec : Derived->bases()) { |
| 6596 | auto *BaseClass = BaseSpec.getType()->getAsCXXRecordDecl(); |
| 6597 | if (BaseClass && declaresSameEntity(D1: BaseClass, D2: Base)) |
| 6598 | return BaseSpec.getAccessSpecifier() == AS_public; |
| 6599 | } |
| 6600 | llvm_unreachable("Base is not a direct base of Derived" ); |
| 6601 | } |
| 6602 | |
| 6603 | /// Apply the given dynamic cast operation on the provided lvalue. |
| 6604 | /// |
| 6605 | /// This implements the hard case of dynamic_cast, requiring a "runtime check" |
| 6606 | /// to find a suitable target subobject. |
| 6607 | static bool HandleDynamicCast(EvalInfo &Info, const ExplicitCastExpr *E, |
| 6608 | LValue &Ptr) { |
| 6609 | // We can't do anything with a non-symbolic pointer value. |
| 6610 | SubobjectDesignator &D = Ptr.Designator; |
| 6611 | if (D.Invalid) |
| 6612 | return false; |
| 6613 | |
| 6614 | // C++ [expr.dynamic.cast]p6: |
| 6615 | // If v is a null pointer value, the result is a null pointer value. |
| 6616 | if (Ptr.isNullPointer() && !E->isGLValue()) |
| 6617 | return true; |
| 6618 | |
| 6619 | // For all the other cases, we need the pointer to point to an object within |
| 6620 | // its lifetime / period of construction / destruction, and we need to know |
| 6621 | // its dynamic type. |
| 6622 | std::optional<DynamicType> DynType = |
| 6623 | ComputeDynamicType(Info, E, This&: Ptr, AK: AK_DynamicCast); |
| 6624 | if (!DynType) |
| 6625 | return false; |
| 6626 | |
| 6627 | // C++ [expr.dynamic.cast]p7: |
| 6628 | // If T is "pointer to cv void", then the result is a pointer to the most |
| 6629 | // derived object |
| 6630 | if (E->getType()->isVoidPointerType()) |
| 6631 | return CastToDerivedClass(Info, E, Result&: Ptr, TruncatedType: DynType->Type, TruncatedElements: DynType->PathLength); |
| 6632 | |
| 6633 | const CXXRecordDecl *C = E->getTypeAsWritten()->getPointeeCXXRecordDecl(); |
| 6634 | assert(C && "dynamic_cast target is not void pointer nor class" ); |
| 6635 | CanQualType CQT = Info.Ctx.getCanonicalTagType(TD: C); |
| 6636 | |
| 6637 | auto RuntimeCheckFailed = [&] (CXXBasePaths *Paths) { |
| 6638 | // C++ [expr.dynamic.cast]p9: |
| 6639 | if (!E->isGLValue()) { |
| 6640 | // The value of a failed cast to pointer type is the null pointer value |
| 6641 | // of the required result type. |
| 6642 | Ptr.setNull(Ctx&: Info.Ctx, PointerTy: E->getType()); |
| 6643 | return true; |
| 6644 | } |
| 6645 | |
| 6646 | // A failed cast to reference type throws [...] std::bad_cast. |
| 6647 | unsigned DiagKind; |
| 6648 | if (!Paths && (declaresSameEntity(D1: DynType->Type, D2: C) || |
| 6649 | DynType->Type->isDerivedFrom(Base: C))) |
| 6650 | DiagKind = 0; |
| 6651 | else if (!Paths || Paths->begin() == Paths->end()) |
| 6652 | DiagKind = 1; |
| 6653 | else if (Paths->isAmbiguous(BaseType: CQT)) |
| 6654 | DiagKind = 2; |
| 6655 | else { |
| 6656 | assert(Paths->front().Access != AS_public && "why did the cast fail?" ); |
| 6657 | DiagKind = 3; |
| 6658 | } |
| 6659 | Info.FFDiag(E, DiagId: diag::note_constexpr_dynamic_cast_to_reference_failed) |
| 6660 | << DiagKind << Ptr.Designator.getType(Ctx&: Info.Ctx) |
| 6661 | << Info.Ctx.getCanonicalTagType(TD: DynType->Type) |
| 6662 | << E->getType().getUnqualifiedType(); |
| 6663 | return false; |
| 6664 | }; |
| 6665 | |
| 6666 | // Runtime check, phase 1: |
| 6667 | // Walk from the base subobject towards the derived object looking for the |
| 6668 | // target type. |
| 6669 | for (int PathLength = Ptr.Designator.Entries.size(); |
| 6670 | PathLength >= (int)DynType->PathLength; --PathLength) { |
| 6671 | const CXXRecordDecl *Class = getBaseClassType(Designator&: Ptr.Designator, PathLength); |
| 6672 | if (declaresSameEntity(D1: Class, D2: C)) |
| 6673 | return CastToDerivedClass(Info, E, Result&: Ptr, TruncatedType: Class, TruncatedElements: PathLength); |
| 6674 | // We can only walk across public inheritance edges. |
| 6675 | if (PathLength > (int)DynType->PathLength && |
| 6676 | !isBaseClassPublic(Derived: getBaseClassType(Designator&: Ptr.Designator, PathLength: PathLength - 1), |
| 6677 | Base: Class)) |
| 6678 | return RuntimeCheckFailed(nullptr); |
| 6679 | } |
| 6680 | |
| 6681 | // Runtime check, phase 2: |
| 6682 | // Search the dynamic type for an unambiguous public base of type C. |
| 6683 | CXXBasePaths Paths(/*FindAmbiguities=*/true, |
| 6684 | /*RecordPaths=*/true, /*DetectVirtual=*/false); |
| 6685 | if (DynType->Type->isDerivedFrom(Base: C, Paths) && !Paths.isAmbiguous(BaseType: CQT) && |
| 6686 | Paths.front().Access == AS_public) { |
| 6687 | // Downcast to the dynamic type... |
| 6688 | if (!CastToDerivedClass(Info, E, Result&: Ptr, TruncatedType: DynType->Type, TruncatedElements: DynType->PathLength)) |
| 6689 | return false; |
| 6690 | // ... then upcast to the chosen base class subobject. |
| 6691 | for (CXXBasePathElement &Elem : Paths.front()) |
| 6692 | if (!HandleLValueBase(Info, E, Obj&: Ptr, DerivedDecl: Elem.Class, Base: Elem.Base)) |
| 6693 | return false; |
| 6694 | return true; |
| 6695 | } |
| 6696 | |
| 6697 | // Otherwise, the runtime check fails. |
| 6698 | return RuntimeCheckFailed(&Paths); |
| 6699 | } |
| 6700 | |
| 6701 | namespace { |
| 6702 | struct StartLifetimeOfUnionMemberHandler { |
| 6703 | EvalInfo &Info; |
| 6704 | const Expr *LHSExpr; |
| 6705 | const FieldDecl *Field; |
| 6706 | bool DuringInit; |
| 6707 | bool Failed = false; |
| 6708 | static const AccessKinds AccessKind = AK_Assign; |
| 6709 | |
| 6710 | typedef bool result_type; |
| 6711 | bool failed() { return Failed; } |
| 6712 | bool found(APValue &Subobj, QualType SubobjType) { |
| 6713 | // We are supposed to perform no initialization but begin the lifetime of |
| 6714 | // the object. We interpret that as meaning to do what default |
| 6715 | // initialization of the object would do if all constructors involved were |
| 6716 | // trivial: |
| 6717 | // * All base, non-variant member, and array element subobjects' lifetimes |
| 6718 | // begin |
| 6719 | // * No variant members' lifetimes begin |
| 6720 | // * All scalar subobjects whose lifetimes begin have indeterminate values |
| 6721 | assert(SubobjType->isUnionType()); |
| 6722 | if (declaresSameEntity(D1: Subobj.getUnionField(), D2: Field)) { |
| 6723 | // This union member is already active. If it's also in-lifetime, there's |
| 6724 | // nothing to do. |
| 6725 | if (Subobj.getUnionValue().hasValue()) |
| 6726 | return true; |
| 6727 | } else if (DuringInit) { |
| 6728 | // We're currently in the process of initializing a different union |
| 6729 | // member. If we carried on, that initialization would attempt to |
| 6730 | // store to an inactive union member, resulting in undefined behavior. |
| 6731 | Info.FFDiag(E: LHSExpr, |
| 6732 | DiagId: diag::note_constexpr_union_member_change_during_init); |
| 6733 | return false; |
| 6734 | } |
| 6735 | APValue Result; |
| 6736 | Failed = !handleDefaultInitValue(T: Field->getType(), Result); |
| 6737 | Subobj.setUnion(Field, Value: Result); |
| 6738 | return true; |
| 6739 | } |
| 6740 | bool found(APSInt &Value, QualType SubobjType) { |
| 6741 | llvm_unreachable("wrong value kind for union object" ); |
| 6742 | } |
| 6743 | bool found(APFloat &Value, QualType SubobjType) { |
| 6744 | llvm_unreachable("wrong value kind for union object" ); |
| 6745 | } |
| 6746 | }; |
| 6747 | } // end anonymous namespace |
| 6748 | |
| 6749 | const AccessKinds StartLifetimeOfUnionMemberHandler::AccessKind; |
| 6750 | |
| 6751 | /// Handle a builtin simple-assignment or a call to a trivial assignment |
| 6752 | /// operator whose left-hand side might involve a union member access. If it |
| 6753 | /// does, implicitly start the lifetime of any accessed union elements per |
| 6754 | /// C++20 [class.union]5. |
| 6755 | static bool MaybeHandleUnionActiveMemberChange(EvalInfo &Info, |
| 6756 | const Expr *LHSExpr, |
| 6757 | const LValue &LHS) { |
| 6758 | if (LHS.InvalidBase || LHS.Designator.Invalid) |
| 6759 | return false; |
| 6760 | |
| 6761 | llvm::SmallVector<std::pair<unsigned, const FieldDecl*>, 4> UnionPathLengths; |
| 6762 | // C++ [class.union]p5: |
| 6763 | // define the set S(E) of subexpressions of E as follows: |
| 6764 | unsigned PathLength = LHS.Designator.Entries.size(); |
| 6765 | for (const Expr *E = LHSExpr; E != nullptr;) { |
| 6766 | // -- If E is of the form A.B, S(E) contains the elements of S(A)... |
| 6767 | if (auto *ME = dyn_cast<MemberExpr>(Val: E)) { |
| 6768 | auto *FD = dyn_cast<FieldDecl>(Val: ME->getMemberDecl()); |
| 6769 | // Note that we can't implicitly start the lifetime of a reference, |
| 6770 | // so we don't need to proceed any further if we reach one. |
| 6771 | if (!FD || FD->getType()->isReferenceType()) |
| 6772 | break; |
| 6773 | |
| 6774 | // ... and also contains A.B if B names a union member ... |
| 6775 | if (FD->getParent()->isUnion()) { |
| 6776 | // ... of a non-class, non-array type, or of a class type with a |
| 6777 | // trivial default constructor that is not deleted, or an array of |
| 6778 | // such types. |
| 6779 | auto *RD = |
| 6780 | FD->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl(); |
| 6781 | if (!RD || RD->hasTrivialDefaultConstructor()) |
| 6782 | UnionPathLengths.push_back(Elt: {PathLength - 1, FD}); |
| 6783 | } |
| 6784 | |
| 6785 | E = ME->getBase(); |
| 6786 | --PathLength; |
| 6787 | assert(declaresSameEntity(FD, |
| 6788 | LHS.Designator.Entries[PathLength] |
| 6789 | .getAsBaseOrMember().getPointer())); |
| 6790 | |
| 6791 | // -- If E is of the form A[B] and is interpreted as a built-in array |
| 6792 | // subscripting operator, S(E) is [S(the array operand, if any)]. |
| 6793 | } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(Val: E)) { |
| 6794 | // Step over an ArrayToPointerDecay implicit cast. |
| 6795 | auto *Base = ASE->getBase()->IgnoreImplicit(); |
| 6796 | if (!Base->getType()->isArrayType()) |
| 6797 | break; |
| 6798 | |
| 6799 | E = Base; |
| 6800 | --PathLength; |
| 6801 | |
| 6802 | } else if (auto *ICE = dyn_cast<ImplicitCastExpr>(Val: E)) { |
| 6803 | // Step over a derived-to-base conversion. |
| 6804 | E = ICE->getSubExpr(); |
| 6805 | if (ICE->getCastKind() == CK_NoOp) |
| 6806 | continue; |
| 6807 | if (ICE->getCastKind() != CK_DerivedToBase && |
| 6808 | ICE->getCastKind() != CK_UncheckedDerivedToBase) |
| 6809 | break; |
| 6810 | // Walk path backwards as we walk up from the base to the derived class. |
| 6811 | for (const CXXBaseSpecifier *Elt : llvm::reverse(C: ICE->path())) { |
| 6812 | if (Elt->isVirtual()) { |
| 6813 | // A class with virtual base classes never has a trivial default |
| 6814 | // constructor, so S(E) is empty in this case. |
| 6815 | E = nullptr; |
| 6816 | break; |
| 6817 | } |
| 6818 | |
| 6819 | --PathLength; |
| 6820 | assert(declaresSameEntity(Elt->getType()->getAsCXXRecordDecl(), |
| 6821 | LHS.Designator.Entries[PathLength] |
| 6822 | .getAsBaseOrMember().getPointer())); |
| 6823 | } |
| 6824 | |
| 6825 | // -- Otherwise, S(E) is empty. |
| 6826 | } else { |
| 6827 | break; |
| 6828 | } |
| 6829 | } |
| 6830 | |
| 6831 | // Common case: no unions' lifetimes are started. |
| 6832 | if (UnionPathLengths.empty()) |
| 6833 | return true; |
| 6834 | |
| 6835 | // if modification of X [would access an inactive union member], an object |
| 6836 | // of the type of X is implicitly created |
| 6837 | CompleteObject Obj = |
| 6838 | findCompleteObject(Info, E: LHSExpr, AK: AK_Assign, LVal: LHS, LValType: LHSExpr->getType()); |
| 6839 | if (!Obj) |
| 6840 | return false; |
| 6841 | for (std::pair<unsigned, const FieldDecl *> LengthAndField : |
| 6842 | llvm::reverse(C&: UnionPathLengths)) { |
| 6843 | // Form a designator for the union object. |
| 6844 | SubobjectDesignator D = LHS.Designator; |
| 6845 | D.truncate(Ctx&: Info.Ctx, Base: LHS.Base, NewLength: LengthAndField.first); |
| 6846 | |
| 6847 | bool DuringInit = Info.isEvaluatingCtorDtor(Base: LHS.Base, Path: D.Entries) == |
| 6848 | ConstructionPhase::AfterBases; |
| 6849 | StartLifetimeOfUnionMemberHandler StartLifetime{ |
| 6850 | .Info: Info, .LHSExpr: LHSExpr, .Field: LengthAndField.second, .DuringInit: DuringInit}; |
| 6851 | if (!findSubobject(Info, E: LHSExpr, Obj, Sub: D, handler&: StartLifetime)) |
| 6852 | return false; |
| 6853 | } |
| 6854 | |
| 6855 | return true; |
| 6856 | } |
| 6857 | |
| 6858 | static bool EvaluateCallArg(const ParmVarDecl *PVD, const Expr *Arg, |
| 6859 | CallRef Call, EvalInfo &Info, bool NonNull = false, |
| 6860 | APValue **EvaluatedArg = nullptr) { |
| 6861 | LValue LV; |
| 6862 | // Create the parameter slot and register its destruction. For a vararg |
| 6863 | // argument, create a temporary. |
| 6864 | // FIXME: For calling conventions that destroy parameters in the callee, |
| 6865 | // should we consider performing destruction when the function returns |
| 6866 | // instead? |
| 6867 | APValue &V = PVD ? Info.CurrentCall->createParam(Args: Call, PVD, LV) |
| 6868 | : Info.CurrentCall->createTemporary(Key: Arg, T: Arg->getType(), |
| 6869 | Scope: ScopeKind::Call, LV); |
| 6870 | if (!EvaluateInPlace(Result&: V, Info, This: LV, E: Arg)) |
| 6871 | return false; |
| 6872 | |
| 6873 | // Passing a null pointer to an __attribute__((nonnull)) parameter results in |
| 6874 | // undefined behavior, so is non-constant. |
| 6875 | if (NonNull && V.isLValue() && V.isNullPointer()) { |
| 6876 | Info.CCEDiag(E: Arg, DiagId: diag::note_non_null_attribute_failed); |
| 6877 | return false; |
| 6878 | } |
| 6879 | |
| 6880 | if (EvaluatedArg) |
| 6881 | *EvaluatedArg = &V; |
| 6882 | |
| 6883 | return true; |
| 6884 | } |
| 6885 | |
| 6886 | /// Evaluate the arguments to a function call. |
| 6887 | static bool EvaluateArgs(ArrayRef<const Expr *> Args, CallRef Call, |
| 6888 | EvalInfo &Info, const FunctionDecl *Callee, |
| 6889 | bool RightToLeft = false, |
| 6890 | LValue *ObjectArg = nullptr) { |
| 6891 | bool Success = true; |
| 6892 | llvm::SmallBitVector ForbiddenNullArgs; |
| 6893 | if (Callee->hasAttr<NonNullAttr>()) { |
| 6894 | ForbiddenNullArgs.resize(N: Args.size()); |
| 6895 | for (const auto *Attr : Callee->specific_attrs<NonNullAttr>()) { |
| 6896 | if (!Attr->args_size()) { |
| 6897 | ForbiddenNullArgs.set(); |
| 6898 | break; |
| 6899 | } else |
| 6900 | for (auto Idx : Attr->args()) { |
| 6901 | unsigned ASTIdx = Idx.getASTIndex(); |
| 6902 | if (ASTIdx >= Args.size()) |
| 6903 | continue; |
| 6904 | ForbiddenNullArgs[ASTIdx] = true; |
| 6905 | } |
| 6906 | } |
| 6907 | } |
| 6908 | for (unsigned I = 0; I < Args.size(); I++) { |
| 6909 | unsigned Idx = RightToLeft ? Args.size() - I - 1 : I; |
| 6910 | const ParmVarDecl *PVD = |
| 6911 | Idx < Callee->getNumParams() ? Callee->getParamDecl(i: Idx) : nullptr; |
| 6912 | bool NonNull = !ForbiddenNullArgs.empty() && ForbiddenNullArgs[Idx]; |
| 6913 | APValue *That = nullptr; |
| 6914 | if (!EvaluateCallArg(PVD, Arg: Args[Idx], Call, Info, NonNull, EvaluatedArg: &That)) { |
| 6915 | // If we're checking for a potential constant expression, evaluate all |
| 6916 | // initializers even if some of them fail. |
| 6917 | if (!Info.noteFailure()) |
| 6918 | return false; |
| 6919 | Success = false; |
| 6920 | } |
| 6921 | if (PVD && PVD->isExplicitObjectParameter() && That && That->isLValue()) |
| 6922 | ObjectArg->setFrom(Ctx: Info.Ctx, V: *That); |
| 6923 | } |
| 6924 | return Success; |
| 6925 | } |
| 6926 | |
| 6927 | /// Perform a trivial copy from Param, which is the parameter of a copy or move |
| 6928 | /// constructor or assignment operator. |
| 6929 | static bool handleTrivialCopy(EvalInfo &Info, const ParmVarDecl *Param, |
| 6930 | const Expr *E, APValue &Result, |
| 6931 | bool CopyObjectRepresentation) { |
| 6932 | // Find the reference argument. |
| 6933 | CallStackFrame *Frame = Info.CurrentCall; |
| 6934 | APValue *RefValue = Info.getParamSlot(Call: Frame->Arguments, PVD: Param); |
| 6935 | if (!RefValue) { |
| 6936 | Info.FFDiag(E); |
| 6937 | return false; |
| 6938 | } |
| 6939 | |
| 6940 | // Copy out the contents of the RHS object. |
| 6941 | LValue RefLValue; |
| 6942 | RefLValue.setFrom(Ctx: Info.Ctx, V: *RefValue); |
| 6943 | return handleLValueToRValueConversion( |
| 6944 | Info, Conv: E, Type: Param->getType().getNonReferenceType(), LVal: RefLValue, RVal&: Result, |
| 6945 | WantObjectRepresentation: CopyObjectRepresentation); |
| 6946 | } |
| 6947 | |
| 6948 | /// Evaluate a function call. |
| 6949 | static bool HandleFunctionCall(SourceLocation CallLoc, |
| 6950 | const FunctionDecl *Callee, |
| 6951 | const LValue *ObjectArg, const Expr *E, |
| 6952 | ArrayRef<const Expr *> Args, CallRef Call, |
| 6953 | const Stmt *Body, EvalInfo &Info, |
| 6954 | APValue &Result, const LValue *ResultSlot) { |
| 6955 | if (!Info.CheckCallLimit(Loc: CallLoc)) |
| 6956 | return false; |
| 6957 | |
| 6958 | CallStackFrame Frame(Info, E->getSourceRange(), Callee, ObjectArg, E, Call); |
| 6959 | |
| 6960 | // For a trivial copy or move assignment, perform an APValue copy. This is |
| 6961 | // essential for unions, where the operations performed by the assignment |
| 6962 | // operator cannot be represented as statements. |
| 6963 | // |
| 6964 | // Skip this for non-union classes with no fields; in that case, the defaulted |
| 6965 | // copy/move does not actually read the object. |
| 6966 | const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: Callee); |
| 6967 | if (MD && MD->isDefaulted() && |
| 6968 | (MD->getParent()->isUnion() || |
| 6969 | (MD->isTrivial() && |
| 6970 | isReadByLvalueToRvalueConversion(RD: MD->getParent())))) { |
| 6971 | unsigned ExplicitOffset = MD->isExplicitObjectMemberFunction() ? 1 : 0; |
| 6972 | assert(ObjectArg && |
| 6973 | (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator())); |
| 6974 | APValue RHSValue; |
| 6975 | if (!handleTrivialCopy(Info, Param: MD->getParamDecl(i: 0), E: Args[0], Result&: RHSValue, |
| 6976 | CopyObjectRepresentation: MD->getParent()->isUnion())) |
| 6977 | return false; |
| 6978 | |
| 6979 | LValue Obj; |
| 6980 | if (!handleAssignment(Info, E: Args[ExplicitOffset], LVal: *ObjectArg, |
| 6981 | LValType: MD->getFunctionObjectParameterReferenceType(), |
| 6982 | Val&: RHSValue)) |
| 6983 | return false; |
| 6984 | ObjectArg->moveInto(V&: Result); |
| 6985 | return true; |
| 6986 | } else if (MD && isLambdaCallOperator(MD)) { |
| 6987 | // We're in a lambda; determine the lambda capture field maps unless we're |
| 6988 | // just constexpr checking a lambda's call operator. constexpr checking is |
| 6989 | // done before the captures have been added to the closure object (unless |
| 6990 | // we're inferring constexpr-ness), so we don't have access to them in this |
| 6991 | // case. But since we don't need the captures to constexpr check, we can |
| 6992 | // just ignore them. |
| 6993 | if (!Info.checkingPotentialConstantExpression()) |
| 6994 | MD->getParent()->getCaptureFields(Captures&: Frame.LambdaCaptureFields, |
| 6995 | ThisCapture&: Frame.LambdaThisCaptureField); |
| 6996 | } |
| 6997 | |
| 6998 | StmtResult Ret = {.Value: Result, .Slot: ResultSlot}; |
| 6999 | EvalStmtResult ESR = EvaluateStmt(Result&: Ret, Info, S: Body); |
| 7000 | if (ESR == ESR_Succeeded) { |
| 7001 | if (Callee->getReturnType()->isVoidType()) |
| 7002 | return true; |
| 7003 | Info.FFDiag(Loc: Callee->getEndLoc(), DiagId: diag::note_constexpr_no_return); |
| 7004 | } |
| 7005 | return ESR == ESR_Returned; |
| 7006 | } |
| 7007 | |
| 7008 | /// Evaluate a constructor call. |
| 7009 | static bool HandleConstructorCall(const Expr *E, const LValue &This, |
| 7010 | CallRef Call, |
| 7011 | const CXXConstructorDecl *Definition, |
| 7012 | EvalInfo &Info, APValue &Result) { |
| 7013 | SourceLocation CallLoc = E->getExprLoc(); |
| 7014 | if (!Info.CheckCallLimit(Loc: CallLoc)) |
| 7015 | return false; |
| 7016 | |
| 7017 | const CXXRecordDecl *RD = Definition->getParent(); |
| 7018 | if (RD->getNumVBases()) { |
| 7019 | Info.FFDiag(Loc: CallLoc, DiagId: diag::note_constexpr_virtual_base) << RD; |
| 7020 | return false; |
| 7021 | } |
| 7022 | |
| 7023 | EvalInfo::EvaluatingConstructorRAII EvalObj( |
| 7024 | Info, |
| 7025 | ObjectUnderConstruction{.Base: This.getLValueBase(), .Path: This.Designator.Entries}, |
| 7026 | RD->getNumBases()); |
| 7027 | CallStackFrame Frame(Info, E->getSourceRange(), Definition, &This, E, Call); |
| 7028 | |
| 7029 | // FIXME: Creating an APValue just to hold a nonexistent return value is |
| 7030 | // wasteful. |
| 7031 | APValue RetVal; |
| 7032 | StmtResult Ret = {.Value: RetVal, .Slot: nullptr}; |
| 7033 | |
| 7034 | // If it's a delegating constructor, delegate. |
| 7035 | if (Definition->isDelegatingConstructor()) { |
| 7036 | CXXConstructorDecl::init_const_iterator I = Definition->init_begin(); |
| 7037 | if ((*I)->getInit()->isValueDependent()) { |
| 7038 | if (!EvaluateDependentExpr(E: (*I)->getInit(), Info)) |
| 7039 | return false; |
| 7040 | } else { |
| 7041 | FullExpressionRAII InitScope(Info); |
| 7042 | if (!EvaluateInPlace(Result, Info, This, E: (*I)->getInit()) || |
| 7043 | !InitScope.destroy()) |
| 7044 | return false; |
| 7045 | } |
| 7046 | return EvaluateStmt(Result&: Ret, Info, S: Definition->getBody()) != ESR_Failed; |
| 7047 | } |
| 7048 | |
| 7049 | // For a trivial copy or move constructor, perform an APValue copy. This is |
| 7050 | // essential for unions (or classes with anonymous union members), where the |
| 7051 | // operations performed by the constructor cannot be represented by |
| 7052 | // ctor-initializers. |
| 7053 | // |
| 7054 | // Skip this for empty non-union classes; we should not perform an |
| 7055 | // lvalue-to-rvalue conversion on them because their copy constructor does not |
| 7056 | // actually read them. |
| 7057 | if (Definition->isDefaulted() && Definition->isCopyOrMoveConstructor() && |
| 7058 | (Definition->getParent()->isUnion() || |
| 7059 | (Definition->isTrivial() && |
| 7060 | isReadByLvalueToRvalueConversion(RD: Definition->getParent())))) { |
| 7061 | return handleTrivialCopy(Info, Param: Definition->getParamDecl(i: 0), E, Result, |
| 7062 | CopyObjectRepresentation: Definition->getParent()->isUnion()); |
| 7063 | } |
| 7064 | |
| 7065 | // Reserve space for the struct members. |
| 7066 | if (!Result.hasValue()) { |
| 7067 | if (!RD->isUnion()) |
| 7068 | Result = APValue(APValue::UninitStruct(), RD->getNumBases(), |
| 7069 | RD->getNumFields()); |
| 7070 | else |
| 7071 | // A union starts with no active member. |
| 7072 | Result = APValue((const FieldDecl*)nullptr); |
| 7073 | } |
| 7074 | |
| 7075 | if (RD->isInvalidDecl()) return false; |
| 7076 | const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD); |
| 7077 | |
| 7078 | // A scope for temporaries lifetime-extended by reference members. |
| 7079 | BlockScopeRAII LifetimeExtendedScope(Info); |
| 7080 | |
| 7081 | bool Success = true; |
| 7082 | unsigned BasesSeen = 0; |
| 7083 | #ifndef NDEBUG |
| 7084 | CXXRecordDecl::base_class_const_iterator BaseIt = RD->bases_begin(); |
| 7085 | #endif |
| 7086 | CXXRecordDecl::field_iterator FieldIt = RD->field_begin(); |
| 7087 | auto SkipToField = [&](FieldDecl *FD, bool Indirect) { |
| 7088 | // We might be initializing the same field again if this is an indirect |
| 7089 | // field initialization. |
| 7090 | if (FieldIt == RD->field_end() || |
| 7091 | FieldIt->getFieldIndex() > FD->getFieldIndex()) { |
| 7092 | assert(Indirect && "fields out of order?" ); |
| 7093 | return; |
| 7094 | } |
| 7095 | |
| 7096 | // Default-initialize any fields with no explicit initializer. |
| 7097 | for (; !declaresSameEntity(D1: *FieldIt, D2: FD); ++FieldIt) { |
| 7098 | assert(FieldIt != RD->field_end() && "missing field?" ); |
| 7099 | if (!FieldIt->isUnnamedBitField()) |
| 7100 | Success &= handleDefaultInitValue( |
| 7101 | T: FieldIt->getType(), |
| 7102 | Result&: Result.getStructField(i: FieldIt->getFieldIndex())); |
| 7103 | } |
| 7104 | ++FieldIt; |
| 7105 | }; |
| 7106 | for (const auto *I : Definition->inits()) { |
| 7107 | LValue Subobject = This; |
| 7108 | LValue SubobjectParent = This; |
| 7109 | APValue *Value = &Result; |
| 7110 | |
| 7111 | // Determine the subobject to initialize. |
| 7112 | FieldDecl *FD = nullptr; |
| 7113 | if (I->isBaseInitializer()) { |
| 7114 | QualType BaseType(I->getBaseClass(), 0); |
| 7115 | #ifndef NDEBUG |
| 7116 | // Non-virtual base classes are initialized in the order in the class |
| 7117 | // definition. We have already checked for virtual base classes. |
| 7118 | assert(!BaseIt->isVirtual() && "virtual base for literal type" ); |
| 7119 | assert(Info.Ctx.hasSameUnqualifiedType(BaseIt->getType(), BaseType) && |
| 7120 | "base class initializers not in expected order" ); |
| 7121 | ++BaseIt; |
| 7122 | #endif |
| 7123 | if (!HandleLValueDirectBase(Info, E: I->getInit(), Obj&: Subobject, Derived: RD, |
| 7124 | Base: BaseType->getAsCXXRecordDecl(), RL: &Layout)) |
| 7125 | return false; |
| 7126 | Value = &Result.getStructBase(i: BasesSeen++); |
| 7127 | } else if ((FD = I->getMember())) { |
| 7128 | if (!HandleLValueMember(Info, E: I->getInit(), LVal&: Subobject, FD, RL: &Layout)) |
| 7129 | return false; |
| 7130 | if (RD->isUnion()) { |
| 7131 | Result = APValue(FD); |
| 7132 | Value = &Result.getUnionValue(); |
| 7133 | } else { |
| 7134 | SkipToField(FD, false); |
| 7135 | Value = &Result.getStructField(i: FD->getFieldIndex()); |
| 7136 | } |
| 7137 | } else if (IndirectFieldDecl *IFD = I->getIndirectMember()) { |
| 7138 | // Walk the indirect field decl's chain to find the object to initialize, |
| 7139 | // and make sure we've initialized every step along it. |
| 7140 | auto IndirectFieldChain = IFD->chain(); |
| 7141 | for (auto *C : IndirectFieldChain) { |
| 7142 | FD = cast<FieldDecl>(Val: C); |
| 7143 | CXXRecordDecl *CD = cast<CXXRecordDecl>(Val: FD->getParent()); |
| 7144 | // Switch the union field if it differs. This happens if we had |
| 7145 | // preceding zero-initialization, and we're now initializing a union |
| 7146 | // subobject other than the first. |
| 7147 | // FIXME: In this case, the values of the other subobjects are |
| 7148 | // specified, since zero-initialization sets all padding bits to zero. |
| 7149 | if (!Value->hasValue() || |
| 7150 | (Value->isUnion() && |
| 7151 | !declaresSameEntity(D1: Value->getUnionField(), D2: FD))) { |
| 7152 | if (CD->isUnion()) |
| 7153 | *Value = APValue(FD); |
| 7154 | else |
| 7155 | // FIXME: This immediately starts the lifetime of all members of |
| 7156 | // an anonymous struct. It would be preferable to strictly start |
| 7157 | // member lifetime in initialization order. |
| 7158 | Success &= handleDefaultInitValue(T: Info.Ctx.getCanonicalTagType(TD: CD), |
| 7159 | Result&: *Value); |
| 7160 | } |
| 7161 | // Store Subobject as its parent before updating it for the last element |
| 7162 | // in the chain. |
| 7163 | if (C == IndirectFieldChain.back()) |
| 7164 | SubobjectParent = Subobject; |
| 7165 | if (!HandleLValueMember(Info, E: I->getInit(), LVal&: Subobject, FD)) |
| 7166 | return false; |
| 7167 | if (CD->isUnion()) |
| 7168 | Value = &Value->getUnionValue(); |
| 7169 | else { |
| 7170 | if (C == IndirectFieldChain.front() && !RD->isUnion()) |
| 7171 | SkipToField(FD, true); |
| 7172 | Value = &Value->getStructField(i: FD->getFieldIndex()); |
| 7173 | } |
| 7174 | } |
| 7175 | } else { |
| 7176 | llvm_unreachable("unknown base initializer kind" ); |
| 7177 | } |
| 7178 | |
| 7179 | // Need to override This for implicit field initializers as in this case |
| 7180 | // This refers to innermost anonymous struct/union containing initializer, |
| 7181 | // not to currently constructed class. |
| 7182 | const Expr *Init = I->getInit(); |
| 7183 | if (Init->isValueDependent()) { |
| 7184 | if (!EvaluateDependentExpr(E: Init, Info)) |
| 7185 | return false; |
| 7186 | } else { |
| 7187 | ThisOverrideRAII ThisOverride(*Info.CurrentCall, &SubobjectParent, |
| 7188 | isa<CXXDefaultInitExpr>(Val: Init)); |
| 7189 | FullExpressionRAII InitScope(Info); |
| 7190 | if (FD && FD->getType()->isReferenceType() && |
| 7191 | !FD->getType()->isFunctionReferenceType()) { |
| 7192 | LValue Result; |
| 7193 | if (!EvaluateInitForDeclOfReferenceType(Info, D: FD, Init, Result, |
| 7194 | Val&: *Value)) { |
| 7195 | if (!Info.noteFailure()) |
| 7196 | return false; |
| 7197 | Success = false; |
| 7198 | } |
| 7199 | } else if (!EvaluateInPlace(Result&: *Value, Info, This: Subobject, E: Init) || |
| 7200 | (FD && FD->isBitField() && |
| 7201 | !truncateBitfieldValue(Info, E: Init, Value&: *Value, FD))) { |
| 7202 | // If we're checking for a potential constant expression, evaluate all |
| 7203 | // initializers even if some of them fail. |
| 7204 | if (!Info.noteFailure()) |
| 7205 | return false; |
| 7206 | Success = false; |
| 7207 | } |
| 7208 | } |
| 7209 | |
| 7210 | // This is the point at which the dynamic type of the object becomes this |
| 7211 | // class type. |
| 7212 | if (I->isBaseInitializer() && BasesSeen == RD->getNumBases()) |
| 7213 | EvalObj.finishedConstructingBases(); |
| 7214 | } |
| 7215 | |
| 7216 | // Default-initialize any remaining fields. |
| 7217 | if (!RD->isUnion()) { |
| 7218 | for (; FieldIt != RD->field_end(); ++FieldIt) { |
| 7219 | if (!FieldIt->isUnnamedBitField()) |
| 7220 | Success &= handleDefaultInitValue( |
| 7221 | T: FieldIt->getType(), |
| 7222 | Result&: Result.getStructField(i: FieldIt->getFieldIndex())); |
| 7223 | } |
| 7224 | } |
| 7225 | |
| 7226 | EvalObj.finishedConstructingFields(); |
| 7227 | |
| 7228 | return Success && |
| 7229 | EvaluateStmt(Result&: Ret, Info, S: Definition->getBody()) != ESR_Failed && |
| 7230 | LifetimeExtendedScope.destroy(); |
| 7231 | } |
| 7232 | |
| 7233 | static bool HandleConstructorCall(const Expr *E, const LValue &This, |
| 7234 | ArrayRef<const Expr*> Args, |
| 7235 | const CXXConstructorDecl *Definition, |
| 7236 | EvalInfo &Info, APValue &Result) { |
| 7237 | CallScopeRAII CallScope(Info); |
| 7238 | CallRef Call = Info.CurrentCall->createCall(Callee: Definition); |
| 7239 | if (!EvaluateArgs(Args, Call, Info, Callee: Definition)) |
| 7240 | return false; |
| 7241 | |
| 7242 | return HandleConstructorCall(E, This, Call, Definition, Info, Result) && |
| 7243 | CallScope.destroy(); |
| 7244 | } |
| 7245 | |
| 7246 | static bool HandleDestructionImpl(EvalInfo &Info, SourceRange CallRange, |
| 7247 | const LValue &This, APValue &Value, |
| 7248 | QualType T) { |
| 7249 | // Objects can only be destroyed while they're within their lifetimes. |
| 7250 | // FIXME: We have no representation for whether an object of type nullptr_t |
| 7251 | // is in its lifetime; it usually doesn't matter. Perhaps we should model it |
| 7252 | // as indeterminate instead? |
| 7253 | if (Value.isAbsent() && !T->isNullPtrType()) { |
| 7254 | APValue Printable; |
| 7255 | This.moveInto(V&: Printable); |
| 7256 | Info.FFDiag(Loc: CallRange.getBegin(), |
| 7257 | DiagId: diag::note_constexpr_destroy_out_of_lifetime) |
| 7258 | << Printable.getAsString(Ctx: Info.Ctx, Ty: Info.Ctx.getLValueReferenceType(T)); |
| 7259 | return false; |
| 7260 | } |
| 7261 | |
| 7262 | // Invent an expression for location purposes. |
| 7263 | // FIXME: We shouldn't need to do this. |
| 7264 | OpaqueValueExpr LocE(CallRange.getBegin(), Info.Ctx.IntTy, VK_PRValue); |
| 7265 | |
| 7266 | // For arrays, destroy elements right-to-left. |
| 7267 | if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(T)) { |
| 7268 | uint64_t Size = CAT->getZExtSize(); |
| 7269 | QualType ElemT = CAT->getElementType(); |
| 7270 | |
| 7271 | if (!CheckArraySize(Info, CAT, CallLoc: CallRange.getBegin())) |
| 7272 | return false; |
| 7273 | |
| 7274 | LValue ElemLV = This; |
| 7275 | ElemLV.addArray(Info, E: &LocE, CAT); |
| 7276 | if (!HandleLValueArrayAdjustment(Info, E: &LocE, LVal&: ElemLV, EltTy: ElemT, Adjustment: Size)) |
| 7277 | return false; |
| 7278 | |
| 7279 | // Ensure that we have actual array elements available to destroy; the |
| 7280 | // destructors might mutate the value, so we can't run them on the array |
| 7281 | // filler. |
| 7282 | if (Size && Size > Value.getArrayInitializedElts()) |
| 7283 | expandArray(Array&: Value, Index: Value.getArraySize() - 1); |
| 7284 | |
| 7285 | // The size of the array might have been reduced by |
| 7286 | // a placement new. |
| 7287 | for (Size = Value.getArraySize(); Size != 0; --Size) { |
| 7288 | APValue &Elem = Value.getArrayInitializedElt(I: Size - 1); |
| 7289 | if (!HandleLValueArrayAdjustment(Info, E: &LocE, LVal&: ElemLV, EltTy: ElemT, Adjustment: -1) || |
| 7290 | !HandleDestructionImpl(Info, CallRange, This: ElemLV, Value&: Elem, T: ElemT)) |
| 7291 | return false; |
| 7292 | } |
| 7293 | |
| 7294 | // End the lifetime of this array now. |
| 7295 | Value = APValue(); |
| 7296 | return true; |
| 7297 | } |
| 7298 | |
| 7299 | const CXXRecordDecl *RD = T->getAsCXXRecordDecl(); |
| 7300 | if (!RD) { |
| 7301 | if (T.isDestructedType()) { |
| 7302 | Info.FFDiag(Loc: CallRange.getBegin(), |
| 7303 | DiagId: diag::note_constexpr_unsupported_destruction) |
| 7304 | << T; |
| 7305 | return false; |
| 7306 | } |
| 7307 | |
| 7308 | Value = APValue(); |
| 7309 | return true; |
| 7310 | } |
| 7311 | |
| 7312 | if (RD->getNumVBases()) { |
| 7313 | Info.FFDiag(Loc: CallRange.getBegin(), DiagId: diag::note_constexpr_virtual_base) << RD; |
| 7314 | return false; |
| 7315 | } |
| 7316 | |
| 7317 | const CXXDestructorDecl *DD = RD->getDestructor(); |
| 7318 | if (!DD && !RD->hasTrivialDestructor()) { |
| 7319 | Info.FFDiag(Loc: CallRange.getBegin()); |
| 7320 | return false; |
| 7321 | } |
| 7322 | |
| 7323 | if (!DD || DD->isTrivial() || |
| 7324 | (RD->isAnonymousStructOrUnion() && RD->isUnion())) { |
| 7325 | // A trivial destructor just ends the lifetime of the object. Check for |
| 7326 | // this case before checking for a body, because we might not bother |
| 7327 | // building a body for a trivial destructor. Note that it doesn't matter |
| 7328 | // whether the destructor is constexpr in this case; all trivial |
| 7329 | // destructors are constexpr. |
| 7330 | // |
| 7331 | // If an anonymous union would be destroyed, some enclosing destructor must |
| 7332 | // have been explicitly defined, and the anonymous union destruction should |
| 7333 | // have no effect. |
| 7334 | Value = APValue(); |
| 7335 | return true; |
| 7336 | } |
| 7337 | |
| 7338 | if (!Info.CheckCallLimit(Loc: CallRange.getBegin())) |
| 7339 | return false; |
| 7340 | |
| 7341 | const FunctionDecl *Definition = nullptr; |
| 7342 | const Stmt *Body = DD->getBody(Definition); |
| 7343 | |
| 7344 | if (!CheckConstexprFunction(Info, CallLoc: CallRange.getBegin(), Declaration: DD, Definition, Body)) |
| 7345 | return false; |
| 7346 | |
| 7347 | CallStackFrame Frame(Info, CallRange, Definition, &This, /*CallExpr=*/nullptr, |
| 7348 | CallRef()); |
| 7349 | |
| 7350 | // We're now in the period of destruction of this object. |
| 7351 | unsigned BasesLeft = RD->getNumBases(); |
| 7352 | EvalInfo::EvaluatingDestructorRAII EvalObj( |
| 7353 | Info, |
| 7354 | ObjectUnderConstruction{.Base: This.getLValueBase(), .Path: This.Designator.Entries}); |
| 7355 | if (!EvalObj.DidInsert) { |
| 7356 | // C++2a [class.dtor]p19: |
| 7357 | // the behavior is undefined if the destructor is invoked for an object |
| 7358 | // whose lifetime has ended |
| 7359 | // (Note that formally the lifetime ends when the period of destruction |
| 7360 | // begins, even though certain uses of the object remain valid until the |
| 7361 | // period of destruction ends.) |
| 7362 | Info.FFDiag(Loc: CallRange.getBegin(), DiagId: diag::note_constexpr_double_destroy); |
| 7363 | return false; |
| 7364 | } |
| 7365 | |
| 7366 | // FIXME: Creating an APValue just to hold a nonexistent return value is |
| 7367 | // wasteful. |
| 7368 | APValue RetVal; |
| 7369 | StmtResult Ret = {.Value: RetVal, .Slot: nullptr}; |
| 7370 | if (EvaluateStmt(Result&: Ret, Info, S: Definition->getBody()) == ESR_Failed) |
| 7371 | return false; |
| 7372 | |
| 7373 | // A union destructor does not implicitly destroy its members. |
| 7374 | if (RD->isUnion()) |
| 7375 | return true; |
| 7376 | |
| 7377 | const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD); |
| 7378 | |
| 7379 | // We don't have a good way to iterate fields in reverse, so collect all the |
| 7380 | // fields first and then walk them backwards. |
| 7381 | SmallVector<FieldDecl*, 16> Fields(RD->fields()); |
| 7382 | for (const FieldDecl *FD : llvm::reverse(C&: Fields)) { |
| 7383 | if (FD->isUnnamedBitField()) |
| 7384 | continue; |
| 7385 | |
| 7386 | LValue Subobject = This; |
| 7387 | if (!HandleLValueMember(Info, E: &LocE, LVal&: Subobject, FD, RL: &Layout)) |
| 7388 | return false; |
| 7389 | |
| 7390 | APValue *SubobjectValue = &Value.getStructField(i: FD->getFieldIndex()); |
| 7391 | if (!HandleDestructionImpl(Info, CallRange, This: Subobject, Value&: *SubobjectValue, |
| 7392 | T: FD->getType())) |
| 7393 | return false; |
| 7394 | } |
| 7395 | |
| 7396 | if (BasesLeft != 0) |
| 7397 | EvalObj.startedDestroyingBases(); |
| 7398 | |
| 7399 | // Destroy base classes in reverse order. |
| 7400 | for (const CXXBaseSpecifier &Base : llvm::reverse(C: RD->bases())) { |
| 7401 | --BasesLeft; |
| 7402 | |
| 7403 | QualType BaseType = Base.getType(); |
| 7404 | LValue Subobject = This; |
| 7405 | if (!HandleLValueDirectBase(Info, E: &LocE, Obj&: Subobject, Derived: RD, |
| 7406 | Base: BaseType->getAsCXXRecordDecl(), RL: &Layout)) |
| 7407 | return false; |
| 7408 | |
| 7409 | APValue *SubobjectValue = &Value.getStructBase(i: BasesLeft); |
| 7410 | if (!HandleDestructionImpl(Info, CallRange, This: Subobject, Value&: *SubobjectValue, |
| 7411 | T: BaseType)) |
| 7412 | return false; |
| 7413 | } |
| 7414 | assert(BasesLeft == 0 && "NumBases was wrong?" ); |
| 7415 | |
| 7416 | // The period of destruction ends now. The object is gone. |
| 7417 | Value = APValue(); |
| 7418 | return true; |
| 7419 | } |
| 7420 | |
| 7421 | namespace { |
| 7422 | struct DestroyObjectHandler { |
| 7423 | EvalInfo &Info; |
| 7424 | const Expr *E; |
| 7425 | const LValue &This; |
| 7426 | const AccessKinds AccessKind; |
| 7427 | |
| 7428 | typedef bool result_type; |
| 7429 | bool failed() { return false; } |
| 7430 | bool found(APValue &Subobj, QualType SubobjType) { |
| 7431 | return HandleDestructionImpl(Info, CallRange: E->getSourceRange(), This, Value&: Subobj, |
| 7432 | T: SubobjType); |
| 7433 | } |
| 7434 | bool found(APSInt &Value, QualType SubobjType) { |
| 7435 | Info.FFDiag(E, DiagId: diag::note_constexpr_destroy_complex_elem); |
| 7436 | return false; |
| 7437 | } |
| 7438 | bool found(APFloat &Value, QualType SubobjType) { |
| 7439 | Info.FFDiag(E, DiagId: diag::note_constexpr_destroy_complex_elem); |
| 7440 | return false; |
| 7441 | } |
| 7442 | }; |
| 7443 | } |
| 7444 | |
| 7445 | /// Perform a destructor or pseudo-destructor call on the given object, which |
| 7446 | /// might in general not be a complete object. |
| 7447 | static bool HandleDestruction(EvalInfo &Info, const Expr *E, |
| 7448 | const LValue &This, QualType ThisType) { |
| 7449 | CompleteObject Obj = findCompleteObject(Info, E, AK: AK_Destroy, LVal: This, LValType: ThisType); |
| 7450 | DestroyObjectHandler Handler = {.Info: Info, .E: E, .This: This, .AccessKind: AK_Destroy}; |
| 7451 | return Obj && findSubobject(Info, E, Obj, Sub: This.Designator, handler&: Handler); |
| 7452 | } |
| 7453 | |
| 7454 | /// Destroy and end the lifetime of the given complete object. |
| 7455 | static bool HandleDestruction(EvalInfo &Info, SourceLocation Loc, |
| 7456 | APValue::LValueBase LVBase, APValue &Value, |
| 7457 | QualType T) { |
| 7458 | // If we've had an unmodeled side-effect, we can't rely on mutable state |
| 7459 | // (such as the object we're about to destroy) being correct. |
| 7460 | if (Info.EvalStatus.HasSideEffects) |
| 7461 | return false; |
| 7462 | |
| 7463 | LValue LV; |
| 7464 | LV.set(B: {LVBase}); |
| 7465 | return HandleDestructionImpl(Info, CallRange: Loc, This: LV, Value, T); |
| 7466 | } |
| 7467 | |
| 7468 | /// Perform a call to 'operator new' or to `__builtin_operator_new'. |
| 7469 | static bool HandleOperatorNewCall(EvalInfo &Info, const CallExpr *E, |
| 7470 | LValue &Result) { |
| 7471 | if (Info.checkingPotentialConstantExpression() || |
| 7472 | Info.SpeculativeEvaluationDepth) |
| 7473 | return false; |
| 7474 | |
| 7475 | // This is permitted only within a call to std::allocator<T>::allocate. |
| 7476 | auto Caller = Info.getStdAllocatorCaller(FnName: "allocate" ); |
| 7477 | if (!Caller) { |
| 7478 | Info.FFDiag(Loc: E->getExprLoc(), DiagId: Info.getLangOpts().CPlusPlus20 |
| 7479 | ? diag::note_constexpr_new_untyped |
| 7480 | : diag::note_constexpr_new); |
| 7481 | return false; |
| 7482 | } |
| 7483 | |
| 7484 | QualType ElemType = Caller.ElemType; |
| 7485 | if (ElemType->isIncompleteType() || ElemType->isFunctionType()) { |
| 7486 | Info.FFDiag(Loc: E->getExprLoc(), |
| 7487 | DiagId: diag::note_constexpr_new_not_complete_object_type) |
| 7488 | << (ElemType->isIncompleteType() ? 0 : 1) << ElemType; |
| 7489 | return false; |
| 7490 | } |
| 7491 | |
| 7492 | APSInt ByteSize; |
| 7493 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: ByteSize, Info)) |
| 7494 | return false; |
| 7495 | bool IsNothrow = false; |
| 7496 | for (unsigned I = 1, N = E->getNumArgs(); I != N; ++I) { |
| 7497 | EvaluateIgnoredValue(Info, E: E->getArg(Arg: I)); |
| 7498 | IsNothrow |= E->getType()->isNothrowT(); |
| 7499 | } |
| 7500 | |
| 7501 | CharUnits ElemSize; |
| 7502 | if (!HandleSizeof(Info, Loc: E->getExprLoc(), Type: ElemType, Size&: ElemSize)) |
| 7503 | return false; |
| 7504 | APInt Size, Remainder; |
| 7505 | APInt ElemSizeAP(ByteSize.getBitWidth(), ElemSize.getQuantity()); |
| 7506 | APInt::udivrem(LHS: ByteSize, RHS: ElemSizeAP, Quotient&: Size, Remainder); |
| 7507 | if (Remainder != 0) { |
| 7508 | // This likely indicates a bug in the implementation of 'std::allocator'. |
| 7509 | Info.FFDiag(Loc: E->getExprLoc(), DiagId: diag::note_constexpr_operator_new_bad_size) |
| 7510 | << ByteSize << APSInt(ElemSizeAP, true) << ElemType; |
| 7511 | return false; |
| 7512 | } |
| 7513 | |
| 7514 | if (!Info.CheckArraySize(Loc: E->getBeginLoc(), BitWidth: ByteSize.getActiveBits(), |
| 7515 | ElemCount: Size.getZExtValue(), /*Diag=*/!IsNothrow)) { |
| 7516 | if (IsNothrow) { |
| 7517 | Result.setNull(Ctx&: Info.Ctx, PointerTy: E->getType()); |
| 7518 | return true; |
| 7519 | } |
| 7520 | return false; |
| 7521 | } |
| 7522 | |
| 7523 | QualType AllocType = Info.Ctx.getConstantArrayType( |
| 7524 | EltTy: ElemType, ArySize: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0); |
| 7525 | APValue *Val = Info.createHeapAlloc(E: Caller.Call, T: AllocType, LV&: Result); |
| 7526 | *Val = APValue(APValue::UninitArray(), 0, Size.getZExtValue()); |
| 7527 | Result.addArray(Info, E, CAT: cast<ConstantArrayType>(Val&: AllocType)); |
| 7528 | return true; |
| 7529 | } |
| 7530 | |
| 7531 | static bool hasVirtualDestructor(QualType T) { |
| 7532 | if (CXXRecordDecl *RD = T->getAsCXXRecordDecl()) |
| 7533 | if (CXXDestructorDecl *DD = RD->getDestructor()) |
| 7534 | return DD->isVirtual(); |
| 7535 | return false; |
| 7536 | } |
| 7537 | |
| 7538 | static const FunctionDecl *getVirtualOperatorDelete(QualType T) { |
| 7539 | if (CXXRecordDecl *RD = T->getAsCXXRecordDecl()) |
| 7540 | if (CXXDestructorDecl *DD = RD->getDestructor()) |
| 7541 | return DD->isVirtual() ? DD->getOperatorDelete() : nullptr; |
| 7542 | return nullptr; |
| 7543 | } |
| 7544 | |
| 7545 | /// Check that the given object is a suitable pointer to a heap allocation that |
| 7546 | /// still exists and is of the right kind for the purpose of a deletion. |
| 7547 | /// |
| 7548 | /// On success, returns the heap allocation to deallocate. On failure, produces |
| 7549 | /// a diagnostic and returns std::nullopt. |
| 7550 | static std::optional<DynAlloc *> CheckDeleteKind(EvalInfo &Info, const Expr *E, |
| 7551 | const LValue &Pointer, |
| 7552 | DynAlloc::Kind DeallocKind) { |
| 7553 | auto PointerAsString = [&] { |
| 7554 | return Pointer.toString(Ctx&: Info.Ctx, T: Info.Ctx.VoidPtrTy); |
| 7555 | }; |
| 7556 | |
| 7557 | DynamicAllocLValue DA = Pointer.Base.dyn_cast<DynamicAllocLValue>(); |
| 7558 | if (!DA) { |
| 7559 | Info.FFDiag(E, DiagId: diag::note_constexpr_delete_not_heap_alloc) |
| 7560 | << PointerAsString(); |
| 7561 | if (Pointer.Base) |
| 7562 | NoteLValueLocation(Info, Base: Pointer.Base); |
| 7563 | return std::nullopt; |
| 7564 | } |
| 7565 | |
| 7566 | std::optional<DynAlloc *> Alloc = Info.lookupDynamicAlloc(DA); |
| 7567 | if (!Alloc) { |
| 7568 | Info.FFDiag(E, DiagId: diag::note_constexpr_double_delete); |
| 7569 | return std::nullopt; |
| 7570 | } |
| 7571 | |
| 7572 | if (DeallocKind != (*Alloc)->getKind()) { |
| 7573 | QualType AllocType = Pointer.Base.getDynamicAllocType(); |
| 7574 | Info.FFDiag(E, DiagId: diag::note_constexpr_new_delete_mismatch) |
| 7575 | << DeallocKind << (*Alloc)->getKind() << AllocType; |
| 7576 | NoteLValueLocation(Info, Base: Pointer.Base); |
| 7577 | return std::nullopt; |
| 7578 | } |
| 7579 | |
| 7580 | bool Subobject = false; |
| 7581 | if (DeallocKind == DynAlloc::New) { |
| 7582 | Subobject = Pointer.Designator.MostDerivedPathLength != 0 || |
| 7583 | Pointer.Designator.isOnePastTheEnd(); |
| 7584 | } else { |
| 7585 | Subobject = Pointer.Designator.Entries.size() != 1 || |
| 7586 | Pointer.Designator.Entries[0].getAsArrayIndex() != 0; |
| 7587 | } |
| 7588 | if (Subobject) { |
| 7589 | Info.FFDiag(E, DiagId: diag::note_constexpr_delete_subobject) |
| 7590 | << PointerAsString() << Pointer.Designator.isOnePastTheEnd(); |
| 7591 | return std::nullopt; |
| 7592 | } |
| 7593 | |
| 7594 | return Alloc; |
| 7595 | } |
| 7596 | |
| 7597 | // Perform a call to 'operator delete' or '__builtin_operator_delete'. |
| 7598 | static bool HandleOperatorDeleteCall(EvalInfo &Info, const CallExpr *E) { |
| 7599 | if (Info.checkingPotentialConstantExpression() || |
| 7600 | Info.SpeculativeEvaluationDepth) |
| 7601 | return false; |
| 7602 | |
| 7603 | // This is permitted only within a call to std::allocator<T>::deallocate. |
| 7604 | if (!Info.getStdAllocatorCaller(FnName: "deallocate" )) { |
| 7605 | Info.FFDiag(Loc: E->getExprLoc()); |
| 7606 | return true; |
| 7607 | } |
| 7608 | |
| 7609 | LValue Pointer; |
| 7610 | if (!EvaluatePointer(E: E->getArg(Arg: 0), Result&: Pointer, Info)) |
| 7611 | return false; |
| 7612 | for (unsigned I = 1, N = E->getNumArgs(); I != N; ++I) |
| 7613 | EvaluateIgnoredValue(Info, E: E->getArg(Arg: I)); |
| 7614 | |
| 7615 | if (Pointer.Designator.Invalid) |
| 7616 | return false; |
| 7617 | |
| 7618 | // Deleting a null pointer would have no effect, but it's not permitted by |
| 7619 | // std::allocator<T>::deallocate's contract. |
| 7620 | if (Pointer.isNullPointer()) { |
| 7621 | Info.CCEDiag(Loc: E->getExprLoc(), DiagId: diag::note_constexpr_deallocate_null); |
| 7622 | return true; |
| 7623 | } |
| 7624 | |
| 7625 | if (!CheckDeleteKind(Info, E, Pointer, DeallocKind: DynAlloc::StdAllocator)) |
| 7626 | return false; |
| 7627 | |
| 7628 | Info.HeapAllocs.erase(x: Pointer.Base.get<DynamicAllocLValue>()); |
| 7629 | return true; |
| 7630 | } |
| 7631 | |
| 7632 | //===----------------------------------------------------------------------===// |
| 7633 | // Generic Evaluation |
| 7634 | //===----------------------------------------------------------------------===// |
| 7635 | namespace { |
| 7636 | |
| 7637 | class BitCastBuffer { |
| 7638 | // FIXME: We're going to need bit-level granularity when we support |
| 7639 | // bit-fields. |
| 7640 | // FIXME: Its possible under the C++ standard for 'char' to not be 8 bits, but |
| 7641 | // we don't support a host or target where that is the case. Still, we should |
| 7642 | // use a more generic type in case we ever do. |
| 7643 | SmallVector<std::optional<unsigned char>, 32> Bytes; |
| 7644 | |
| 7645 | static_assert(std::numeric_limits<unsigned char>::digits >= 8, |
| 7646 | "Need at least 8 bit unsigned char" ); |
| 7647 | |
| 7648 | bool TargetIsLittleEndian; |
| 7649 | |
| 7650 | public: |
| 7651 | BitCastBuffer(CharUnits Width, bool TargetIsLittleEndian) |
| 7652 | : Bytes(Width.getQuantity()), |
| 7653 | TargetIsLittleEndian(TargetIsLittleEndian) {} |
| 7654 | |
| 7655 | [[nodiscard]] bool readObject(CharUnits Offset, CharUnits Width, |
| 7656 | SmallVectorImpl<unsigned char> &Output) const { |
| 7657 | for (CharUnits I = Offset, E = Offset + Width; I != E; ++I) { |
| 7658 | // If a byte of an integer is uninitialized, then the whole integer is |
| 7659 | // uninitialized. |
| 7660 | if (!Bytes[I.getQuantity()]) |
| 7661 | return false; |
| 7662 | Output.push_back(Elt: *Bytes[I.getQuantity()]); |
| 7663 | } |
| 7664 | if (llvm::sys::IsLittleEndianHost != TargetIsLittleEndian) |
| 7665 | std::reverse(first: Output.begin(), last: Output.end()); |
| 7666 | return true; |
| 7667 | } |
| 7668 | |
| 7669 | void writeObject(CharUnits Offset, SmallVectorImpl<unsigned char> &Input) { |
| 7670 | if (llvm::sys::IsLittleEndianHost != TargetIsLittleEndian) |
| 7671 | std::reverse(first: Input.begin(), last: Input.end()); |
| 7672 | |
| 7673 | size_t Index = 0; |
| 7674 | for (unsigned char Byte : Input) { |
| 7675 | assert(!Bytes[Offset.getQuantity() + Index] && "overwriting a byte?" ); |
| 7676 | Bytes[Offset.getQuantity() + Index] = Byte; |
| 7677 | ++Index; |
| 7678 | } |
| 7679 | } |
| 7680 | |
| 7681 | size_t size() { return Bytes.size(); } |
| 7682 | }; |
| 7683 | |
| 7684 | /// Traverse an APValue to produce an BitCastBuffer, emulating how the current |
| 7685 | /// target would represent the value at runtime. |
| 7686 | class APValueToBufferConverter { |
| 7687 | EvalInfo &Info; |
| 7688 | BitCastBuffer Buffer; |
| 7689 | const CastExpr *BCE; |
| 7690 | |
| 7691 | APValueToBufferConverter(EvalInfo &Info, CharUnits ObjectWidth, |
| 7692 | const CastExpr *BCE) |
| 7693 | : Info(Info), |
| 7694 | Buffer(ObjectWidth, Info.Ctx.getTargetInfo().isLittleEndian()), |
| 7695 | BCE(BCE) {} |
| 7696 | |
| 7697 | bool visit(const APValue &Val, QualType Ty) { |
| 7698 | return visit(Val, Ty, Offset: CharUnits::fromQuantity(Quantity: 0)); |
| 7699 | } |
| 7700 | |
| 7701 | // Write out Val with type Ty into Buffer starting at Offset. |
| 7702 | bool visit(const APValue &Val, QualType Ty, CharUnits Offset) { |
| 7703 | assert((size_t)Offset.getQuantity() <= Buffer.size()); |
| 7704 | |
| 7705 | // As a special case, nullptr_t has an indeterminate value. |
| 7706 | if (Ty->isNullPtrType()) |
| 7707 | return true; |
| 7708 | |
| 7709 | // Dig through Src to find the byte at SrcOffset. |
| 7710 | switch (Val.getKind()) { |
| 7711 | case APValue::Indeterminate: |
| 7712 | case APValue::None: |
| 7713 | return true; |
| 7714 | |
| 7715 | case APValue::Int: |
| 7716 | return visitInt(Val: Val.getInt(), Ty, Offset); |
| 7717 | case APValue::Float: |
| 7718 | return visitFloat(Val: Val.getFloat(), Ty, Offset); |
| 7719 | case APValue::Array: |
| 7720 | return visitArray(Val, Ty, Offset); |
| 7721 | case APValue::Struct: |
| 7722 | return visitRecord(Val, Ty, Offset); |
| 7723 | case APValue::Vector: |
| 7724 | return visitVector(Val, Ty, Offset); |
| 7725 | |
| 7726 | case APValue::ComplexInt: |
| 7727 | case APValue::ComplexFloat: |
| 7728 | return visitComplex(Val, Ty, Offset); |
| 7729 | case APValue::FixedPoint: |
| 7730 | // FIXME: We should support these. |
| 7731 | |
| 7732 | case APValue::Union: |
| 7733 | case APValue::MemberPointer: |
| 7734 | case APValue::AddrLabelDiff: { |
| 7735 | Info.FFDiag(Loc: BCE->getBeginLoc(), |
| 7736 | DiagId: diag::note_constexpr_bit_cast_unsupported_type) |
| 7737 | << Ty; |
| 7738 | return false; |
| 7739 | } |
| 7740 | |
| 7741 | case APValue::LValue: |
| 7742 | llvm_unreachable("LValue subobject in bit_cast?" ); |
| 7743 | } |
| 7744 | llvm_unreachable("Unhandled APValue::ValueKind" ); |
| 7745 | } |
| 7746 | |
| 7747 | bool visitRecord(const APValue &Val, QualType Ty, CharUnits Offset) { |
| 7748 | const RecordDecl *RD = Ty->getAsRecordDecl(); |
| 7749 | const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD); |
| 7750 | |
| 7751 | // Visit the base classes. |
| 7752 | if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) { |
| 7753 | for (size_t I = 0, E = CXXRD->getNumBases(); I != E; ++I) { |
| 7754 | const CXXBaseSpecifier &BS = CXXRD->bases_begin()[I]; |
| 7755 | CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl(); |
| 7756 | const APValue &Base = Val.getStructBase(i: I); |
| 7757 | |
| 7758 | // Can happen in error cases. |
| 7759 | if (!Base.isStruct()) |
| 7760 | return false; |
| 7761 | |
| 7762 | if (!visitRecord(Val: Base, Ty: BS.getType(), |
| 7763 | Offset: Layout.getBaseClassOffset(Base: BaseDecl) + Offset)) |
| 7764 | return false; |
| 7765 | } |
| 7766 | } |
| 7767 | |
| 7768 | // Visit the fields. |
| 7769 | unsigned FieldIdx = 0; |
| 7770 | for (FieldDecl *FD : RD->fields()) { |
| 7771 | if (FD->isBitField()) { |
| 7772 | Info.FFDiag(Loc: BCE->getBeginLoc(), |
| 7773 | DiagId: diag::note_constexpr_bit_cast_unsupported_bitfield); |
| 7774 | return false; |
| 7775 | } |
| 7776 | |
| 7777 | uint64_t FieldOffsetBits = Layout.getFieldOffset(FieldNo: FieldIdx); |
| 7778 | |
| 7779 | assert(FieldOffsetBits % Info.Ctx.getCharWidth() == 0 && |
| 7780 | "only bit-fields can have sub-char alignment" ); |
| 7781 | CharUnits FieldOffset = |
| 7782 | Info.Ctx.toCharUnitsFromBits(BitSize: FieldOffsetBits) + Offset; |
| 7783 | QualType FieldTy = FD->getType(); |
| 7784 | if (!visit(Val: Val.getStructField(i: FieldIdx), Ty: FieldTy, Offset: FieldOffset)) |
| 7785 | return false; |
| 7786 | ++FieldIdx; |
| 7787 | } |
| 7788 | |
| 7789 | return true; |
| 7790 | } |
| 7791 | |
| 7792 | bool visitArray(const APValue &Val, QualType Ty, CharUnits Offset) { |
| 7793 | const auto *CAT = |
| 7794 | dyn_cast_or_null<ConstantArrayType>(Val: Ty->getAsArrayTypeUnsafe()); |
| 7795 | if (!CAT) |
| 7796 | return false; |
| 7797 | |
| 7798 | CharUnits ElemWidth = Info.Ctx.getTypeSizeInChars(T: CAT->getElementType()); |
| 7799 | unsigned NumInitializedElts = Val.getArrayInitializedElts(); |
| 7800 | unsigned ArraySize = Val.getArraySize(); |
| 7801 | // First, initialize the initialized elements. |
| 7802 | for (unsigned I = 0; I != NumInitializedElts; ++I) { |
| 7803 | const APValue &SubObj = Val.getArrayInitializedElt(I); |
| 7804 | if (!visit(Val: SubObj, Ty: CAT->getElementType(), Offset: Offset + I * ElemWidth)) |
| 7805 | return false; |
| 7806 | } |
| 7807 | |
| 7808 | // Next, initialize the rest of the array using the filler. |
| 7809 | if (Val.hasArrayFiller()) { |
| 7810 | const APValue &Filler = Val.getArrayFiller(); |
| 7811 | for (unsigned I = NumInitializedElts; I != ArraySize; ++I) { |
| 7812 | if (!visit(Val: Filler, Ty: CAT->getElementType(), Offset: Offset + I * ElemWidth)) |
| 7813 | return false; |
| 7814 | } |
| 7815 | } |
| 7816 | |
| 7817 | return true; |
| 7818 | } |
| 7819 | |
| 7820 | bool visitComplex(const APValue &Val, QualType Ty, CharUnits Offset) { |
| 7821 | const ComplexType *ComplexTy = Ty->castAs<ComplexType>(); |
| 7822 | QualType EltTy = ComplexTy->getElementType(); |
| 7823 | CharUnits EltSizeChars = Info.Ctx.getTypeSizeInChars(T: EltTy); |
| 7824 | bool IsInt = Val.isComplexInt(); |
| 7825 | |
| 7826 | if (IsInt) { |
| 7827 | if (!visitInt(Val: Val.getComplexIntReal(), Ty: EltTy, |
| 7828 | Offset: Offset + (0 * EltSizeChars))) |
| 7829 | return false; |
| 7830 | if (!visitInt(Val: Val.getComplexIntImag(), Ty: EltTy, |
| 7831 | Offset: Offset + (1 * EltSizeChars))) |
| 7832 | return false; |
| 7833 | } else { |
| 7834 | if (!visitFloat(Val: Val.getComplexFloatReal(), Ty: EltTy, |
| 7835 | Offset: Offset + (0 * EltSizeChars))) |
| 7836 | return false; |
| 7837 | if (!visitFloat(Val: Val.getComplexFloatImag(), Ty: EltTy, |
| 7838 | Offset: Offset + (1 * EltSizeChars))) |
| 7839 | return false; |
| 7840 | } |
| 7841 | |
| 7842 | return true; |
| 7843 | } |
| 7844 | |
| 7845 | bool visitVector(const APValue &Val, QualType Ty, CharUnits Offset) { |
| 7846 | const VectorType *VTy = Ty->castAs<VectorType>(); |
| 7847 | QualType EltTy = VTy->getElementType(); |
| 7848 | unsigned NElts = VTy->getNumElements(); |
| 7849 | |
| 7850 | if (VTy->isPackedVectorBoolType(ctx: Info.Ctx)) { |
| 7851 | // Special handling for OpenCL bool vectors: |
| 7852 | // Since these vectors are stored as packed bits, but we can't write |
| 7853 | // individual bits to the BitCastBuffer, we'll buffer all of the elements |
| 7854 | // together into an appropriately sized APInt and write them all out at |
| 7855 | // once. Because we don't accept vectors where NElts * EltSize isn't a |
| 7856 | // multiple of the char size, there will be no padding space, so we don't |
| 7857 | // have to worry about writing data which should have been left |
| 7858 | // uninitialized. |
| 7859 | bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian(); |
| 7860 | |
| 7861 | llvm::APInt Res = llvm::APInt::getZero(numBits: NElts); |
| 7862 | for (unsigned I = 0; I < NElts; ++I) { |
| 7863 | const llvm::APSInt &EltAsInt = Val.getVectorElt(I).getInt(); |
| 7864 | assert(EltAsInt.isUnsigned() && EltAsInt.getBitWidth() == 1 && |
| 7865 | "bool vector element must be 1-bit unsigned integer!" ); |
| 7866 | |
| 7867 | Res.insertBits(SubBits: EltAsInt, bitPosition: BigEndian ? (NElts - I - 1) : I); |
| 7868 | } |
| 7869 | |
| 7870 | SmallVector<uint8_t, 8> Bytes(NElts / 8); |
| 7871 | llvm::StoreIntToMemory(IntVal: Res, Dst: &*Bytes.begin(), StoreBytes: NElts / 8); |
| 7872 | Buffer.writeObject(Offset, Input&: Bytes); |
| 7873 | } else { |
| 7874 | // Iterate over each of the elements and write them out to the buffer at |
| 7875 | // the appropriate offset. |
| 7876 | CharUnits EltSizeChars = Info.Ctx.getTypeSizeInChars(T: EltTy); |
| 7877 | for (unsigned I = 0; I < NElts; ++I) { |
| 7878 | if (!visit(Val: Val.getVectorElt(I), Ty: EltTy, Offset: Offset + I * EltSizeChars)) |
| 7879 | return false; |
| 7880 | } |
| 7881 | } |
| 7882 | |
| 7883 | return true; |
| 7884 | } |
| 7885 | |
| 7886 | bool visitInt(const APSInt &Val, QualType Ty, CharUnits Offset) { |
| 7887 | APSInt AdjustedVal = Val; |
| 7888 | unsigned Width = AdjustedVal.getBitWidth(); |
| 7889 | if (Ty->isBooleanType()) { |
| 7890 | Width = Info.Ctx.getTypeSize(T: Ty); |
| 7891 | AdjustedVal = AdjustedVal.extend(width: Width); |
| 7892 | } |
| 7893 | |
| 7894 | SmallVector<uint8_t, 8> Bytes(Width / 8); |
| 7895 | llvm::StoreIntToMemory(IntVal: AdjustedVal, Dst: &*Bytes.begin(), StoreBytes: Width / 8); |
| 7896 | Buffer.writeObject(Offset, Input&: Bytes); |
| 7897 | return true; |
| 7898 | } |
| 7899 | |
| 7900 | bool visitFloat(const APFloat &Val, QualType Ty, CharUnits Offset) { |
| 7901 | APSInt AsInt(Val.bitcastToAPInt()); |
| 7902 | return visitInt(Val: AsInt, Ty, Offset); |
| 7903 | } |
| 7904 | |
| 7905 | public: |
| 7906 | static std::optional<BitCastBuffer> |
| 7907 | convert(EvalInfo &Info, const APValue &Src, const CastExpr *BCE) { |
| 7908 | CharUnits DstSize = Info.Ctx.getTypeSizeInChars(T: BCE->getType()); |
| 7909 | APValueToBufferConverter Converter(Info, DstSize, BCE); |
| 7910 | if (!Converter.visit(Val: Src, Ty: BCE->getSubExpr()->getType())) |
| 7911 | return std::nullopt; |
| 7912 | return Converter.Buffer; |
| 7913 | } |
| 7914 | }; |
| 7915 | |
| 7916 | /// Write an BitCastBuffer into an APValue. |
| 7917 | class BufferToAPValueConverter { |
| 7918 | EvalInfo &Info; |
| 7919 | const BitCastBuffer &Buffer; |
| 7920 | const CastExpr *BCE; |
| 7921 | |
| 7922 | BufferToAPValueConverter(EvalInfo &Info, const BitCastBuffer &Buffer, |
| 7923 | const CastExpr *BCE) |
| 7924 | : Info(Info), Buffer(Buffer), BCE(BCE) {} |
| 7925 | |
| 7926 | // Emit an unsupported bit_cast type error. Sema refuses to build a bit_cast |
| 7927 | // with an invalid type, so anything left is a deficiency on our part (FIXME). |
| 7928 | // Ideally this will be unreachable. |
| 7929 | std::nullopt_t unsupportedType(QualType Ty) { |
| 7930 | Info.FFDiag(Loc: BCE->getBeginLoc(), |
| 7931 | DiagId: diag::note_constexpr_bit_cast_unsupported_type) |
| 7932 | << Ty; |
| 7933 | return std::nullopt; |
| 7934 | } |
| 7935 | |
| 7936 | std::nullopt_t unrepresentableValue(QualType Ty, const APSInt &Val) { |
| 7937 | Info.FFDiag(Loc: BCE->getBeginLoc(), |
| 7938 | DiagId: diag::note_constexpr_bit_cast_unrepresentable_value) |
| 7939 | << Ty << toString(I: Val, /*Radix=*/10); |
| 7940 | return std::nullopt; |
| 7941 | } |
| 7942 | |
| 7943 | std::optional<APValue> visit(const BuiltinType *T, CharUnits Offset, |
| 7944 | const EnumType *EnumSugar = nullptr) { |
| 7945 | if (T->isNullPtrType()) { |
| 7946 | uint64_t NullValue = Info.Ctx.getTargetNullPointerValue(QT: QualType(T, 0)); |
| 7947 | return APValue((Expr *)nullptr, |
| 7948 | /*Offset=*/CharUnits::fromQuantity(Quantity: NullValue), |
| 7949 | APValue::NoLValuePath{}, /*IsNullPtr=*/true); |
| 7950 | } |
| 7951 | |
| 7952 | CharUnits SizeOf = Info.Ctx.getTypeSizeInChars(T); |
| 7953 | |
| 7954 | // Work around floating point types that contain unused padding bytes. This |
| 7955 | // is really just `long double` on x86, which is the only fundamental type |
| 7956 | // with padding bytes. |
| 7957 | if (T->isRealFloatingType()) { |
| 7958 | const llvm::fltSemantics &Semantics = |
| 7959 | Info.Ctx.getFloatTypeSemantics(T: QualType(T, 0)); |
| 7960 | unsigned NumBits = llvm::APFloatBase::getSizeInBits(Sem: Semantics); |
| 7961 | assert(NumBits % 8 == 0); |
| 7962 | CharUnits NumBytes = CharUnits::fromQuantity(Quantity: NumBits / 8); |
| 7963 | if (NumBytes != SizeOf) |
| 7964 | SizeOf = NumBytes; |
| 7965 | } |
| 7966 | |
| 7967 | SmallVector<uint8_t, 8> Bytes; |
| 7968 | if (!Buffer.readObject(Offset, Width: SizeOf, Output&: Bytes)) { |
| 7969 | // If this is std::byte or unsigned char, then its okay to store an |
| 7970 | // indeterminate value. |
| 7971 | bool IsStdByte = EnumSugar && EnumSugar->isStdByteType(); |
| 7972 | bool IsUChar = |
| 7973 | !EnumSugar && (T->isSpecificBuiltinType(K: BuiltinType::UChar) || |
| 7974 | T->isSpecificBuiltinType(K: BuiltinType::Char_U)); |
| 7975 | if (!IsStdByte && !IsUChar) { |
| 7976 | QualType DisplayType(EnumSugar ? (const Type *)EnumSugar : T, 0); |
| 7977 | Info.FFDiag(Loc: BCE->getExprLoc(), |
| 7978 | DiagId: diag::note_constexpr_bit_cast_indet_dest) |
| 7979 | << DisplayType << Info.Ctx.getLangOpts().CharIsSigned; |
| 7980 | return std::nullopt; |
| 7981 | } |
| 7982 | |
| 7983 | return APValue::IndeterminateValue(); |
| 7984 | } |
| 7985 | |
| 7986 | APSInt Val(SizeOf.getQuantity() * Info.Ctx.getCharWidth(), true); |
| 7987 | llvm::LoadIntFromMemory(IntVal&: Val, Src: &*Bytes.begin(), LoadBytes: Bytes.size()); |
| 7988 | |
| 7989 | if (T->isIntegralOrEnumerationType()) { |
| 7990 | Val.setIsSigned(T->isSignedIntegerOrEnumerationType()); |
| 7991 | |
| 7992 | unsigned IntWidth = Info.Ctx.getIntWidth(T: QualType(T, 0)); |
| 7993 | if (IntWidth != Val.getBitWidth()) { |
| 7994 | APSInt Truncated = Val.trunc(width: IntWidth); |
| 7995 | if (Truncated.extend(width: Val.getBitWidth()) != Val) |
| 7996 | return unrepresentableValue(Ty: QualType(T, 0), Val); |
| 7997 | Val = Truncated; |
| 7998 | } |
| 7999 | |
| 8000 | return APValue(Val); |
| 8001 | } |
| 8002 | |
| 8003 | if (T->isRealFloatingType()) { |
| 8004 | const llvm::fltSemantics &Semantics = |
| 8005 | Info.Ctx.getFloatTypeSemantics(T: QualType(T, 0)); |
| 8006 | return APValue(APFloat(Semantics, Val)); |
| 8007 | } |
| 8008 | |
| 8009 | return unsupportedType(Ty: QualType(T, 0)); |
| 8010 | } |
| 8011 | |
| 8012 | std::optional<APValue> visit(const RecordType *RTy, CharUnits Offset) { |
| 8013 | const RecordDecl *RD = RTy->getAsRecordDecl(); |
| 8014 | const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD); |
| 8015 | |
| 8016 | unsigned NumBases = 0; |
| 8017 | if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) |
| 8018 | NumBases = CXXRD->getNumBases(); |
| 8019 | |
| 8020 | APValue ResultVal(APValue::UninitStruct(), NumBases, RD->getNumFields()); |
| 8021 | |
| 8022 | // Visit the base classes. |
| 8023 | if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) { |
| 8024 | for (size_t I = 0, E = CXXRD->getNumBases(); I != E; ++I) { |
| 8025 | const CXXBaseSpecifier &BS = CXXRD->bases_begin()[I]; |
| 8026 | CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl(); |
| 8027 | |
| 8028 | std::optional<APValue> SubObj = visitType( |
| 8029 | Ty: BS.getType(), Offset: Layout.getBaseClassOffset(Base: BaseDecl) + Offset); |
| 8030 | if (!SubObj) |
| 8031 | return std::nullopt; |
| 8032 | ResultVal.getStructBase(i: I) = *SubObj; |
| 8033 | } |
| 8034 | } |
| 8035 | |
| 8036 | // Visit the fields. |
| 8037 | unsigned FieldIdx = 0; |
| 8038 | for (FieldDecl *FD : RD->fields()) { |
| 8039 | // FIXME: We don't currently support bit-fields. A lot of the logic for |
| 8040 | // this is in CodeGen, so we need to factor it around. |
| 8041 | if (FD->isBitField()) { |
| 8042 | Info.FFDiag(Loc: BCE->getBeginLoc(), |
| 8043 | DiagId: diag::note_constexpr_bit_cast_unsupported_bitfield); |
| 8044 | return std::nullopt; |
| 8045 | } |
| 8046 | |
| 8047 | uint64_t FieldOffsetBits = Layout.getFieldOffset(FieldNo: FieldIdx); |
| 8048 | assert(FieldOffsetBits % Info.Ctx.getCharWidth() == 0); |
| 8049 | |
| 8050 | CharUnits FieldOffset = |
| 8051 | CharUnits::fromQuantity(Quantity: FieldOffsetBits / Info.Ctx.getCharWidth()) + |
| 8052 | Offset; |
| 8053 | QualType FieldTy = FD->getType(); |
| 8054 | std::optional<APValue> SubObj = visitType(Ty: FieldTy, Offset: FieldOffset); |
| 8055 | if (!SubObj) |
| 8056 | return std::nullopt; |
| 8057 | ResultVal.getStructField(i: FieldIdx) = *SubObj; |
| 8058 | ++FieldIdx; |
| 8059 | } |
| 8060 | |
| 8061 | return ResultVal; |
| 8062 | } |
| 8063 | |
| 8064 | std::optional<APValue> visit(const EnumType *Ty, CharUnits Offset) { |
| 8065 | QualType RepresentationType = |
| 8066 | Ty->getDecl()->getDefinitionOrSelf()->getIntegerType(); |
| 8067 | assert(!RepresentationType.isNull() && |
| 8068 | "enum forward decl should be caught by Sema" ); |
| 8069 | const auto *AsBuiltin = |
| 8070 | RepresentationType.getCanonicalType()->castAs<BuiltinType>(); |
| 8071 | // Recurse into the underlying type. Treat std::byte transparently as |
| 8072 | // unsigned char. |
| 8073 | return visit(T: AsBuiltin, Offset, /*EnumTy=*/EnumSugar: Ty); |
| 8074 | } |
| 8075 | |
| 8076 | std::optional<APValue> visit(const ConstantArrayType *Ty, CharUnits Offset) { |
| 8077 | size_t Size = Ty->getLimitedSize(); |
| 8078 | CharUnits ElementWidth = Info.Ctx.getTypeSizeInChars(T: Ty->getElementType()); |
| 8079 | |
| 8080 | APValue ArrayValue(APValue::UninitArray(), Size, Size); |
| 8081 | for (size_t I = 0; I != Size; ++I) { |
| 8082 | std::optional<APValue> ElementValue = |
| 8083 | visitType(Ty: Ty->getElementType(), Offset: Offset + I * ElementWidth); |
| 8084 | if (!ElementValue) |
| 8085 | return std::nullopt; |
| 8086 | ArrayValue.getArrayInitializedElt(I) = std::move(*ElementValue); |
| 8087 | } |
| 8088 | |
| 8089 | return ArrayValue; |
| 8090 | } |
| 8091 | |
| 8092 | std::optional<APValue> visit(const ComplexType *Ty, CharUnits Offset) { |
| 8093 | QualType ElementType = Ty->getElementType(); |
| 8094 | CharUnits ElementWidth = Info.Ctx.getTypeSizeInChars(T: ElementType); |
| 8095 | bool IsInt = ElementType->isIntegerType(); |
| 8096 | |
| 8097 | std::optional<APValue> Values[2]; |
| 8098 | for (unsigned I = 0; I != 2; ++I) { |
| 8099 | Values[I] = visitType(Ty: Ty->getElementType(), Offset: Offset + I * ElementWidth); |
| 8100 | if (!Values[I]) |
| 8101 | return std::nullopt; |
| 8102 | } |
| 8103 | |
| 8104 | if (IsInt) |
| 8105 | return APValue(Values[0]->getInt(), Values[1]->getInt()); |
| 8106 | return APValue(Values[0]->getFloat(), Values[1]->getFloat()); |
| 8107 | } |
| 8108 | |
| 8109 | std::optional<APValue> visit(const VectorType *VTy, CharUnits Offset) { |
| 8110 | QualType EltTy = VTy->getElementType(); |
| 8111 | unsigned NElts = VTy->getNumElements(); |
| 8112 | unsigned EltSize = |
| 8113 | VTy->isPackedVectorBoolType(ctx: Info.Ctx) ? 1 : Info.Ctx.getTypeSize(T: EltTy); |
| 8114 | |
| 8115 | SmallVector<APValue, 4> Elts; |
| 8116 | Elts.reserve(N: NElts); |
| 8117 | if (VTy->isPackedVectorBoolType(ctx: Info.Ctx)) { |
| 8118 | // Special handling for OpenCL bool vectors: |
| 8119 | // Since these vectors are stored as packed bits, but we can't read |
| 8120 | // individual bits from the BitCastBuffer, we'll buffer all of the |
| 8121 | // elements together into an appropriately sized APInt and write them all |
| 8122 | // out at once. Because we don't accept vectors where NElts * EltSize |
| 8123 | // isn't a multiple of the char size, there will be no padding space, so |
| 8124 | // we don't have to worry about reading any padding data which didn't |
| 8125 | // actually need to be accessed. |
| 8126 | bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian(); |
| 8127 | |
| 8128 | SmallVector<uint8_t, 8> Bytes; |
| 8129 | Bytes.reserve(N: NElts / 8); |
| 8130 | if (!Buffer.readObject(Offset, Width: CharUnits::fromQuantity(Quantity: NElts / 8), Output&: Bytes)) |
| 8131 | return std::nullopt; |
| 8132 | |
| 8133 | APSInt SValInt(NElts, true); |
| 8134 | llvm::LoadIntFromMemory(IntVal&: SValInt, Src: &*Bytes.begin(), LoadBytes: Bytes.size()); |
| 8135 | |
| 8136 | for (unsigned I = 0; I < NElts; ++I) { |
| 8137 | llvm::APInt Elt = |
| 8138 | SValInt.extractBits(numBits: 1, bitPosition: (BigEndian ? NElts - I - 1 : I) * EltSize); |
| 8139 | Elts.emplace_back( |
| 8140 | Args: APSInt(std::move(Elt), !EltTy->isSignedIntegerType())); |
| 8141 | } |
| 8142 | } else { |
| 8143 | // Iterate over each of the elements and read them from the buffer at |
| 8144 | // the appropriate offset. |
| 8145 | CharUnits EltSizeChars = Info.Ctx.getTypeSizeInChars(T: EltTy); |
| 8146 | for (unsigned I = 0; I < NElts; ++I) { |
| 8147 | std::optional<APValue> EltValue = |
| 8148 | visitType(Ty: EltTy, Offset: Offset + I * EltSizeChars); |
| 8149 | if (!EltValue) |
| 8150 | return std::nullopt; |
| 8151 | Elts.push_back(Elt: std::move(*EltValue)); |
| 8152 | } |
| 8153 | } |
| 8154 | |
| 8155 | return APValue(Elts.data(), Elts.size()); |
| 8156 | } |
| 8157 | |
| 8158 | std::optional<APValue> visit(const Type *Ty, CharUnits Offset) { |
| 8159 | return unsupportedType(Ty: QualType(Ty, 0)); |
| 8160 | } |
| 8161 | |
| 8162 | std::optional<APValue> visitType(QualType Ty, CharUnits Offset) { |
| 8163 | QualType Can = Ty.getCanonicalType(); |
| 8164 | |
| 8165 | switch (Can->getTypeClass()) { |
| 8166 | #define TYPE(Class, Base) \ |
| 8167 | case Type::Class: \ |
| 8168 | return visit(cast<Class##Type>(Can.getTypePtr()), Offset); |
| 8169 | #define ABSTRACT_TYPE(Class, Base) |
| 8170 | #define NON_CANONICAL_TYPE(Class, Base) \ |
| 8171 | case Type::Class: \ |
| 8172 | llvm_unreachable("non-canonical type should be impossible!"); |
| 8173 | #define DEPENDENT_TYPE(Class, Base) \ |
| 8174 | case Type::Class: \ |
| 8175 | llvm_unreachable( \ |
| 8176 | "dependent types aren't supported in the constant evaluator!"); |
| 8177 | #define NON_CANONICAL_UNLESS_DEPENDENT(Class, Base) \ |
| 8178 | case Type::Class: \ |
| 8179 | llvm_unreachable("either dependent or not canonical!"); |
| 8180 | #include "clang/AST/TypeNodes.inc" |
| 8181 | } |
| 8182 | llvm_unreachable("Unhandled Type::TypeClass" ); |
| 8183 | } |
| 8184 | |
| 8185 | public: |
| 8186 | // Pull out a full value of type DstType. |
| 8187 | static std::optional<APValue> convert(EvalInfo &Info, BitCastBuffer &Buffer, |
| 8188 | const CastExpr *BCE) { |
| 8189 | BufferToAPValueConverter Converter(Info, Buffer, BCE); |
| 8190 | return Converter.visitType(Ty: BCE->getType(), Offset: CharUnits::fromQuantity(Quantity: 0)); |
| 8191 | } |
| 8192 | }; |
| 8193 | |
| 8194 | static bool checkBitCastConstexprEligibilityType(SourceLocation Loc, |
| 8195 | QualType Ty, EvalInfo *Info, |
| 8196 | const ASTContext &Ctx, |
| 8197 | bool CheckingDest) { |
| 8198 | Ty = Ty.getCanonicalType(); |
| 8199 | |
| 8200 | auto diag = [&](int Reason) { |
| 8201 | if (Info) |
| 8202 | Info->FFDiag(Loc, DiagId: diag::note_constexpr_bit_cast_invalid_type) |
| 8203 | << CheckingDest << (Reason == 4) << Reason; |
| 8204 | return false; |
| 8205 | }; |
| 8206 | auto note = [&](int Construct, QualType NoteTy, SourceLocation NoteLoc) { |
| 8207 | if (Info) |
| 8208 | Info->Note(Loc: NoteLoc, DiagId: diag::note_constexpr_bit_cast_invalid_subtype) |
| 8209 | << NoteTy << Construct << Ty; |
| 8210 | return false; |
| 8211 | }; |
| 8212 | |
| 8213 | if (Ty->isUnionType()) |
| 8214 | return diag(0); |
| 8215 | if (Ty->isPointerType()) |
| 8216 | return diag(1); |
| 8217 | if (Ty->isMemberPointerType()) |
| 8218 | return diag(2); |
| 8219 | if (Ty.isVolatileQualified()) |
| 8220 | return diag(3); |
| 8221 | |
| 8222 | if (RecordDecl *Record = Ty->getAsRecordDecl()) { |
| 8223 | if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: Record)) { |
| 8224 | for (CXXBaseSpecifier &BS : CXXRD->bases()) |
| 8225 | if (!checkBitCastConstexprEligibilityType(Loc, Ty: BS.getType(), Info, Ctx, |
| 8226 | CheckingDest)) |
| 8227 | return note(1, BS.getType(), BS.getBeginLoc()); |
| 8228 | } |
| 8229 | for (FieldDecl *FD : Record->fields()) { |
| 8230 | if (FD->getType()->isReferenceType()) |
| 8231 | return diag(4); |
| 8232 | if (!checkBitCastConstexprEligibilityType(Loc, Ty: FD->getType(), Info, Ctx, |
| 8233 | CheckingDest)) |
| 8234 | return note(0, FD->getType(), FD->getBeginLoc()); |
| 8235 | } |
| 8236 | } |
| 8237 | |
| 8238 | if (Ty->isArrayType() && |
| 8239 | !checkBitCastConstexprEligibilityType(Loc, Ty: Ctx.getBaseElementType(QT: Ty), |
| 8240 | Info, Ctx, CheckingDest)) |
| 8241 | return false; |
| 8242 | |
| 8243 | if (const auto *VTy = Ty->getAs<VectorType>()) { |
| 8244 | QualType EltTy = VTy->getElementType(); |
| 8245 | unsigned NElts = VTy->getNumElements(); |
| 8246 | unsigned EltSize = |
| 8247 | VTy->isPackedVectorBoolType(ctx: Ctx) ? 1 : Ctx.getTypeSize(T: EltTy); |
| 8248 | |
| 8249 | if ((NElts * EltSize) % Ctx.getCharWidth() != 0) { |
| 8250 | // The vector's size in bits is not a multiple of the target's byte size, |
| 8251 | // so its layout is unspecified. For now, we'll simply treat these cases |
| 8252 | // as unsupported (this should only be possible with OpenCL bool vectors |
| 8253 | // whose element count isn't a multiple of the byte size). |
| 8254 | if (Info) |
| 8255 | Info->FFDiag(Loc, DiagId: diag::note_constexpr_bit_cast_invalid_vector) |
| 8256 | << QualType(VTy, 0) << EltSize << NElts << Ctx.getCharWidth(); |
| 8257 | return false; |
| 8258 | } |
| 8259 | |
| 8260 | if (EltTy->isRealFloatingType() && |
| 8261 | &Ctx.getFloatTypeSemantics(T: EltTy) == &APFloat::x87DoubleExtended()) { |
| 8262 | // The layout for x86_fp80 vectors seems to be handled very inconsistently |
| 8263 | // by both clang and LLVM, so for now we won't allow bit_casts involving |
| 8264 | // it in a constexpr context. |
| 8265 | if (Info) |
| 8266 | Info->FFDiag(Loc, DiagId: diag::note_constexpr_bit_cast_unsupported_type) |
| 8267 | << EltTy; |
| 8268 | return false; |
| 8269 | } |
| 8270 | } |
| 8271 | |
| 8272 | return true; |
| 8273 | } |
| 8274 | |
| 8275 | static bool checkBitCastConstexprEligibility(EvalInfo *Info, |
| 8276 | const ASTContext &Ctx, |
| 8277 | const CastExpr *BCE) { |
| 8278 | bool DestOK = checkBitCastConstexprEligibilityType( |
| 8279 | Loc: BCE->getBeginLoc(), Ty: BCE->getType(), Info, Ctx, CheckingDest: true); |
| 8280 | bool SourceOK = DestOK && checkBitCastConstexprEligibilityType( |
| 8281 | Loc: BCE->getBeginLoc(), |
| 8282 | Ty: BCE->getSubExpr()->getType(), Info, Ctx, CheckingDest: false); |
| 8283 | return SourceOK; |
| 8284 | } |
| 8285 | |
| 8286 | static bool handleRValueToRValueBitCast(EvalInfo &Info, APValue &DestValue, |
| 8287 | const APValue &SourceRValue, |
| 8288 | const CastExpr *BCE) { |
| 8289 | assert(CHAR_BIT == 8 && Info.Ctx.getTargetInfo().getCharWidth() == 8 && |
| 8290 | "no host or target supports non 8-bit chars" ); |
| 8291 | |
| 8292 | if (!checkBitCastConstexprEligibility(Info: &Info, Ctx: Info.Ctx, BCE)) |
| 8293 | return false; |
| 8294 | |
| 8295 | // Read out SourceValue into a char buffer. |
| 8296 | std::optional<BitCastBuffer> Buffer = |
| 8297 | APValueToBufferConverter::convert(Info, Src: SourceRValue, BCE); |
| 8298 | if (!Buffer) |
| 8299 | return false; |
| 8300 | |
| 8301 | // Write out the buffer into a new APValue. |
| 8302 | std::optional<APValue> MaybeDestValue = |
| 8303 | BufferToAPValueConverter::convert(Info, Buffer&: *Buffer, BCE); |
| 8304 | if (!MaybeDestValue) |
| 8305 | return false; |
| 8306 | |
| 8307 | DestValue = std::move(*MaybeDestValue); |
| 8308 | return true; |
| 8309 | } |
| 8310 | |
| 8311 | static bool handleLValueToRValueBitCast(EvalInfo &Info, APValue &DestValue, |
| 8312 | APValue &SourceValue, |
| 8313 | const CastExpr *BCE) { |
| 8314 | assert(CHAR_BIT == 8 && Info.Ctx.getTargetInfo().getCharWidth() == 8 && |
| 8315 | "no host or target supports non 8-bit chars" ); |
| 8316 | assert(SourceValue.isLValue() && |
| 8317 | "LValueToRValueBitcast requires an lvalue operand!" ); |
| 8318 | |
| 8319 | LValue SourceLValue; |
| 8320 | APValue SourceRValue; |
| 8321 | SourceLValue.setFrom(Ctx: Info.Ctx, V: SourceValue); |
| 8322 | if (!handleLValueToRValueConversion( |
| 8323 | Info, Conv: BCE, Type: BCE->getSubExpr()->getType().withConst(), LVal: SourceLValue, |
| 8324 | RVal&: SourceRValue, /*WantObjectRepresentation=*/true)) |
| 8325 | return false; |
| 8326 | |
| 8327 | return handleRValueToRValueBitCast(Info, DestValue, SourceRValue, BCE); |
| 8328 | } |
| 8329 | |
| 8330 | template <class Derived> |
| 8331 | class ExprEvaluatorBase |
| 8332 | : public ConstStmtVisitor<Derived, bool> { |
| 8333 | private: |
| 8334 | Derived &getDerived() { return static_cast<Derived&>(*this); } |
| 8335 | bool DerivedSuccess(const APValue &V, const Expr *E) { |
| 8336 | return getDerived().Success(V, E); |
| 8337 | } |
| 8338 | bool DerivedZeroInitialization(const Expr *E) { |
| 8339 | return getDerived().ZeroInitialization(E); |
| 8340 | } |
| 8341 | |
| 8342 | // Check whether a conditional operator with a non-constant condition is a |
| 8343 | // potential constant expression. If neither arm is a potential constant |
| 8344 | // expression, then the conditional operator is not either. |
| 8345 | template<typename ConditionalOperator> |
| 8346 | void CheckPotentialConstantConditional(const ConditionalOperator *E) { |
| 8347 | assert(Info.checkingPotentialConstantExpression()); |
| 8348 | |
| 8349 | // Speculatively evaluate both arms. |
| 8350 | SmallVector<PartialDiagnosticAt, 8> Diag; |
| 8351 | { |
| 8352 | SpeculativeEvaluationRAII Speculate(Info, &Diag); |
| 8353 | StmtVisitorTy::Visit(E->getFalseExpr()); |
| 8354 | if (Diag.empty()) |
| 8355 | return; |
| 8356 | } |
| 8357 | |
| 8358 | { |
| 8359 | SpeculativeEvaluationRAII Speculate(Info, &Diag); |
| 8360 | Diag.clear(); |
| 8361 | StmtVisitorTy::Visit(E->getTrueExpr()); |
| 8362 | if (Diag.empty()) |
| 8363 | return; |
| 8364 | } |
| 8365 | |
| 8366 | Error(E, diag::note_constexpr_conditional_never_const); |
| 8367 | } |
| 8368 | |
| 8369 | |
| 8370 | template<typename ConditionalOperator> |
| 8371 | bool HandleConditionalOperator(const ConditionalOperator *E) { |
| 8372 | bool BoolResult; |
| 8373 | if (!EvaluateAsBooleanCondition(E->getCond(), BoolResult, Info)) { |
| 8374 | if (Info.checkingPotentialConstantExpression() && Info.noteFailure()) { |
| 8375 | CheckPotentialConstantConditional(E); |
| 8376 | return false; |
| 8377 | } |
| 8378 | if (Info.noteFailure()) { |
| 8379 | StmtVisitorTy::Visit(E->getTrueExpr()); |
| 8380 | StmtVisitorTy::Visit(E->getFalseExpr()); |
| 8381 | } |
| 8382 | return false; |
| 8383 | } |
| 8384 | |
| 8385 | Expr *EvalExpr = BoolResult ? E->getTrueExpr() : E->getFalseExpr(); |
| 8386 | return StmtVisitorTy::Visit(EvalExpr); |
| 8387 | } |
| 8388 | |
| 8389 | protected: |
| 8390 | EvalInfo &Info; |
| 8391 | typedef ConstStmtVisitor<Derived, bool> StmtVisitorTy; |
| 8392 | typedef ExprEvaluatorBase ExprEvaluatorBaseTy; |
| 8393 | |
| 8394 | OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) { |
| 8395 | return Info.CCEDiag(E, DiagId: D); |
| 8396 | } |
| 8397 | |
| 8398 | bool ZeroInitialization(const Expr *E) { return Error(E); } |
| 8399 | |
| 8400 | bool IsConstantEvaluatedBuiltinCall(const CallExpr *E) { |
| 8401 | unsigned BuiltinOp = E->getBuiltinCallee(); |
| 8402 | return BuiltinOp != 0 && |
| 8403 | Info.Ctx.BuiltinInfo.isConstantEvaluated(ID: BuiltinOp); |
| 8404 | } |
| 8405 | |
| 8406 | public: |
| 8407 | ExprEvaluatorBase(EvalInfo &Info) : Info(Info) {} |
| 8408 | |
| 8409 | EvalInfo &getEvalInfo() { return Info; } |
| 8410 | |
| 8411 | /// Report an evaluation error. This should only be called when an error is |
| 8412 | /// first discovered. When propagating an error, just return false. |
| 8413 | bool Error(const Expr *E, diag::kind D) { |
| 8414 | Info.FFDiag(E, DiagId: D) << E->getSourceRange(); |
| 8415 | return false; |
| 8416 | } |
| 8417 | bool Error(const Expr *E) { |
| 8418 | return Error(E, diag::note_invalid_subexpr_in_const_expr); |
| 8419 | } |
| 8420 | |
| 8421 | bool VisitStmt(const Stmt *) { |
| 8422 | llvm_unreachable("Expression evaluator should not be called on stmts" ); |
| 8423 | } |
| 8424 | bool VisitExpr(const Expr *E) { |
| 8425 | return Error(E); |
| 8426 | } |
| 8427 | |
| 8428 | bool VisitEmbedExpr(const EmbedExpr *E) { |
| 8429 | const auto It = E->begin(); |
| 8430 | return StmtVisitorTy::Visit(*It); |
| 8431 | } |
| 8432 | |
| 8433 | bool VisitPredefinedExpr(const PredefinedExpr *E) { |
| 8434 | return StmtVisitorTy::Visit(E->getFunctionName()); |
| 8435 | } |
| 8436 | bool VisitConstantExpr(const ConstantExpr *E) { |
| 8437 | if (E->hasAPValueResult()) |
| 8438 | return DerivedSuccess(V: E->getAPValueResult(), E); |
| 8439 | |
| 8440 | return StmtVisitorTy::Visit(E->getSubExpr()); |
| 8441 | } |
| 8442 | |
| 8443 | bool VisitParenExpr(const ParenExpr *E) |
| 8444 | { return StmtVisitorTy::Visit(E->getSubExpr()); } |
| 8445 | bool VisitUnaryExtension(const UnaryOperator *E) |
| 8446 | { return StmtVisitorTy::Visit(E->getSubExpr()); } |
| 8447 | bool VisitUnaryPlus(const UnaryOperator *E) |
| 8448 | { return StmtVisitorTy::Visit(E->getSubExpr()); } |
| 8449 | bool VisitChooseExpr(const ChooseExpr *E) |
| 8450 | { return StmtVisitorTy::Visit(E->getChosenSubExpr()); } |
| 8451 | bool VisitGenericSelectionExpr(const GenericSelectionExpr *E) |
| 8452 | { return StmtVisitorTy::Visit(E->getResultExpr()); } |
| 8453 | bool VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E) |
| 8454 | { return StmtVisitorTy::Visit(E->getReplacement()); } |
| 8455 | bool VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E) { |
| 8456 | TempVersionRAII RAII(*Info.CurrentCall); |
| 8457 | SourceLocExprScopeGuard Guard(E, Info.CurrentCall->CurSourceLocExprScope); |
| 8458 | return StmtVisitorTy::Visit(E->getExpr()); |
| 8459 | } |
| 8460 | bool VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E) { |
| 8461 | TempVersionRAII RAII(*Info.CurrentCall); |
| 8462 | // The initializer may not have been parsed yet, or might be erroneous. |
| 8463 | if (!E->getExpr()) |
| 8464 | return Error(E); |
| 8465 | SourceLocExprScopeGuard Guard(E, Info.CurrentCall->CurSourceLocExprScope); |
| 8466 | return StmtVisitorTy::Visit(E->getExpr()); |
| 8467 | } |
| 8468 | |
| 8469 | bool VisitExprWithCleanups(const ExprWithCleanups *E) { |
| 8470 | FullExpressionRAII Scope(Info); |
| 8471 | return StmtVisitorTy::Visit(E->getSubExpr()) && Scope.destroy(); |
| 8472 | } |
| 8473 | |
| 8474 | // Temporaries are registered when created, so we don't care about |
| 8475 | // CXXBindTemporaryExpr. |
| 8476 | bool VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *E) { |
| 8477 | return StmtVisitorTy::Visit(E->getSubExpr()); |
| 8478 | } |
| 8479 | |
| 8480 | bool VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *E) { |
| 8481 | CCEDiag(E, D: diag::note_constexpr_invalid_cast) |
| 8482 | << diag::ConstexprInvalidCastKind::Reinterpret; |
| 8483 | return static_cast<Derived*>(this)->VisitCastExpr(E); |
| 8484 | } |
| 8485 | bool VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *E) { |
| 8486 | if (!Info.Ctx.getLangOpts().CPlusPlus20) |
| 8487 | CCEDiag(E, D: diag::note_constexpr_invalid_cast) |
| 8488 | << diag::ConstexprInvalidCastKind::Dynamic; |
| 8489 | return static_cast<Derived*>(this)->VisitCastExpr(E); |
| 8490 | } |
| 8491 | bool VisitBuiltinBitCastExpr(const BuiltinBitCastExpr *E) { |
| 8492 | return static_cast<Derived*>(this)->VisitCastExpr(E); |
| 8493 | } |
| 8494 | |
| 8495 | bool VisitBinaryOperator(const BinaryOperator *E) { |
| 8496 | switch (E->getOpcode()) { |
| 8497 | default: |
| 8498 | return Error(E); |
| 8499 | |
| 8500 | case BO_Comma: |
| 8501 | VisitIgnoredValue(E: E->getLHS()); |
| 8502 | return StmtVisitorTy::Visit(E->getRHS()); |
| 8503 | |
| 8504 | case BO_PtrMemD: |
| 8505 | case BO_PtrMemI: { |
| 8506 | LValue Obj; |
| 8507 | if (!HandleMemberPointerAccess(Info, BO: E, LV&: Obj)) |
| 8508 | return false; |
| 8509 | APValue Result; |
| 8510 | if (!handleLValueToRValueConversion(Info, Conv: E, Type: E->getType(), LVal: Obj, RVal&: Result)) |
| 8511 | return false; |
| 8512 | return DerivedSuccess(V: Result, E); |
| 8513 | } |
| 8514 | } |
| 8515 | } |
| 8516 | |
| 8517 | bool VisitCXXRewrittenBinaryOperator(const CXXRewrittenBinaryOperator *E) { |
| 8518 | return StmtVisitorTy::Visit(E->getSemanticForm()); |
| 8519 | } |
| 8520 | |
| 8521 | bool VisitBinaryConditionalOperator(const BinaryConditionalOperator *E) { |
| 8522 | // Evaluate and cache the common expression. We treat it as a temporary, |
| 8523 | // even though it's not quite the same thing. |
| 8524 | LValue CommonLV; |
| 8525 | if (!Evaluate(Result&: Info.CurrentCall->createTemporary( |
| 8526 | Key: E->getOpaqueValue(), |
| 8527 | T: getStorageType(Ctx: Info.Ctx, E: E->getOpaqueValue()), |
| 8528 | Scope: ScopeKind::FullExpression, LV&: CommonLV), |
| 8529 | Info, E: E->getCommon())) |
| 8530 | return false; |
| 8531 | |
| 8532 | return HandleConditionalOperator(E); |
| 8533 | } |
| 8534 | |
| 8535 | bool VisitConditionalOperator(const ConditionalOperator *E) { |
| 8536 | bool IsBcpCall = false; |
| 8537 | // If the condition (ignoring parens) is a __builtin_constant_p call, |
| 8538 | // the result is a constant expression if it can be folded without |
| 8539 | // side-effects. This is an important GNU extension. See GCC PR38377 |
| 8540 | // for discussion. |
| 8541 | if (const CallExpr *CallCE = |
| 8542 | dyn_cast<CallExpr>(Val: E->getCond()->IgnoreParenCasts())) |
| 8543 | if (CallCE->getBuiltinCallee() == Builtin::BI__builtin_constant_p) |
| 8544 | IsBcpCall = true; |
| 8545 | |
| 8546 | // Always assume __builtin_constant_p(...) ? ... : ... is a potential |
| 8547 | // constant expression; we can't check whether it's potentially foldable. |
| 8548 | // FIXME: We should instead treat __builtin_constant_p as non-constant if |
| 8549 | // it would return 'false' in this mode. |
| 8550 | if (Info.checkingPotentialConstantExpression() && IsBcpCall) |
| 8551 | return false; |
| 8552 | |
| 8553 | FoldConstant Fold(Info, IsBcpCall); |
| 8554 | if (!HandleConditionalOperator(E)) { |
| 8555 | Fold.keepDiagnostics(); |
| 8556 | return false; |
| 8557 | } |
| 8558 | |
| 8559 | return true; |
| 8560 | } |
| 8561 | |
| 8562 | bool VisitOpaqueValueExpr(const OpaqueValueExpr *E) { |
| 8563 | if (APValue *Value = Info.CurrentCall->getCurrentTemporary(Key: E); |
| 8564 | Value && !Value->isAbsent()) |
| 8565 | return DerivedSuccess(V: *Value, E); |
| 8566 | |
| 8567 | const Expr *Source = E->getSourceExpr(); |
| 8568 | if (!Source) |
| 8569 | return Error(E); |
| 8570 | if (Source == E) { |
| 8571 | assert(0 && "OpaqueValueExpr recursively refers to itself" ); |
| 8572 | return Error(E); |
| 8573 | } |
| 8574 | return StmtVisitorTy::Visit(Source); |
| 8575 | } |
| 8576 | |
| 8577 | bool VisitPseudoObjectExpr(const PseudoObjectExpr *E) { |
| 8578 | for (const Expr *SemE : E->semantics()) { |
| 8579 | if (auto *OVE = dyn_cast<OpaqueValueExpr>(Val: SemE)) { |
| 8580 | // FIXME: We can't handle the case where an OpaqueValueExpr is also the |
| 8581 | // result expression: there could be two different LValues that would |
| 8582 | // refer to the same object in that case, and we can't model that. |
| 8583 | if (SemE == E->getResultExpr()) |
| 8584 | return Error(E); |
| 8585 | |
| 8586 | // Unique OVEs get evaluated if and when we encounter them when |
| 8587 | // emitting the rest of the semantic form, rather than eagerly. |
| 8588 | if (OVE->isUnique()) |
| 8589 | continue; |
| 8590 | |
| 8591 | LValue LV; |
| 8592 | if (!Evaluate(Result&: Info.CurrentCall->createTemporary( |
| 8593 | Key: OVE, T: getStorageType(Ctx: Info.Ctx, E: OVE), |
| 8594 | Scope: ScopeKind::FullExpression, LV), |
| 8595 | Info, E: OVE->getSourceExpr())) |
| 8596 | return false; |
| 8597 | } else if (SemE == E->getResultExpr()) { |
| 8598 | if (!StmtVisitorTy::Visit(SemE)) |
| 8599 | return false; |
| 8600 | } else { |
| 8601 | if (!EvaluateIgnoredValue(Info, E: SemE)) |
| 8602 | return false; |
| 8603 | } |
| 8604 | } |
| 8605 | return true; |
| 8606 | } |
| 8607 | |
| 8608 | bool VisitCallExpr(const CallExpr *E) { |
| 8609 | APValue Result; |
| 8610 | if (!handleCallExpr(E, Result, ResultSlot: nullptr)) |
| 8611 | return false; |
| 8612 | return DerivedSuccess(V: Result, E); |
| 8613 | } |
| 8614 | |
| 8615 | bool handleCallExpr(const CallExpr *E, APValue &Result, |
| 8616 | const LValue *ResultSlot) { |
| 8617 | CallScopeRAII CallScope(Info); |
| 8618 | |
| 8619 | const Expr *Callee = E->getCallee()->IgnoreParens(); |
| 8620 | QualType CalleeType = Callee->getType(); |
| 8621 | |
| 8622 | const FunctionDecl *FD = nullptr; |
| 8623 | LValue *This = nullptr, ObjectArg; |
| 8624 | auto Args = ArrayRef(E->getArgs(), E->getNumArgs()); |
| 8625 | bool HasQualifier = false; |
| 8626 | |
| 8627 | CallRef Call; |
| 8628 | |
| 8629 | // Extract function decl and 'this' pointer from the callee. |
| 8630 | if (CalleeType->isSpecificBuiltinType(K: BuiltinType::BoundMember)) { |
| 8631 | const CXXMethodDecl *Member = nullptr; |
| 8632 | if (const MemberExpr *ME = dyn_cast<MemberExpr>(Val: Callee)) { |
| 8633 | // Explicit bound member calls, such as x.f() or p->g(); |
| 8634 | if (!EvaluateObjectArgument(Info, Object: ME->getBase(), This&: ObjectArg)) |
| 8635 | return false; |
| 8636 | Member = dyn_cast<CXXMethodDecl>(Val: ME->getMemberDecl()); |
| 8637 | if (!Member) |
| 8638 | return Error(Callee); |
| 8639 | This = &ObjectArg; |
| 8640 | HasQualifier = ME->hasQualifier(); |
| 8641 | } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(Val: Callee)) { |
| 8642 | // Indirect bound member calls ('.*' or '->*'). |
| 8643 | const ValueDecl *D = |
| 8644 | HandleMemberPointerAccess(Info, BO: BE, LV&: ObjectArg, IncludeMember: false); |
| 8645 | if (!D) |
| 8646 | return false; |
| 8647 | Member = dyn_cast<CXXMethodDecl>(Val: D); |
| 8648 | if (!Member) |
| 8649 | return Error(Callee); |
| 8650 | This = &ObjectArg; |
| 8651 | } else if (const auto *PDE = dyn_cast<CXXPseudoDestructorExpr>(Val: Callee)) { |
| 8652 | if (!Info.getLangOpts().CPlusPlus20) |
| 8653 | Info.CCEDiag(E: PDE, DiagId: diag::note_constexpr_pseudo_destructor); |
| 8654 | return EvaluateObjectArgument(Info, Object: PDE->getBase(), This&: ObjectArg) && |
| 8655 | HandleDestruction(Info, E: PDE, This: ObjectArg, ThisType: PDE->getDestroyedType()); |
| 8656 | } else |
| 8657 | return Error(Callee); |
| 8658 | FD = Member; |
| 8659 | } else if (CalleeType->isFunctionPointerType()) { |
| 8660 | LValue CalleeLV; |
| 8661 | if (!EvaluatePointer(E: Callee, Result&: CalleeLV, Info)) |
| 8662 | return false; |
| 8663 | |
| 8664 | if (!CalleeLV.getLValueOffset().isZero()) |
| 8665 | return Error(Callee); |
| 8666 | if (CalleeLV.isNullPointer()) { |
| 8667 | Info.FFDiag(E: Callee, DiagId: diag::note_constexpr_null_callee) |
| 8668 | << const_cast<Expr *>(Callee); |
| 8669 | return false; |
| 8670 | } |
| 8671 | FD = dyn_cast_or_null<FunctionDecl>( |
| 8672 | Val: CalleeLV.getLValueBase().dyn_cast<const ValueDecl *>()); |
| 8673 | if (!FD) |
| 8674 | return Error(Callee); |
| 8675 | // Don't call function pointers which have been cast to some other type. |
| 8676 | // Per DR (no number yet), the caller and callee can differ in noexcept. |
| 8677 | if (!Info.Ctx.hasSameFunctionTypeIgnoringExceptionSpec( |
| 8678 | T: CalleeType->getPointeeType(), U: FD->getType())) { |
| 8679 | return Error(E); |
| 8680 | } |
| 8681 | |
| 8682 | // For an (overloaded) assignment expression, evaluate the RHS before the |
| 8683 | // LHS. |
| 8684 | auto *OCE = dyn_cast<CXXOperatorCallExpr>(Val: E); |
| 8685 | if (OCE && OCE->isAssignmentOp()) { |
| 8686 | assert(Args.size() == 2 && "wrong number of arguments in assignment" ); |
| 8687 | Call = Info.CurrentCall->createCall(Callee: FD); |
| 8688 | bool HasThis = false; |
| 8689 | if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: FD)) |
| 8690 | HasThis = MD->isImplicitObjectMemberFunction(); |
| 8691 | if (!EvaluateArgs(Args: HasThis ? Args.slice(N: 1) : Args, Call, Info, Callee: FD, |
| 8692 | /*RightToLeft=*/true, ObjectArg: &ObjectArg)) |
| 8693 | return false; |
| 8694 | } |
| 8695 | |
| 8696 | // Overloaded operator calls to member functions are represented as normal |
| 8697 | // calls with '*this' as the first argument. |
| 8698 | const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: FD); |
| 8699 | if (MD && |
| 8700 | (MD->isImplicitObjectMemberFunction() || (OCE && MD->isStatic()))) { |
| 8701 | // FIXME: When selecting an implicit conversion for an overloaded |
| 8702 | // operator delete, we sometimes try to evaluate calls to conversion |
| 8703 | // operators without a 'this' parameter! |
| 8704 | if (Args.empty()) |
| 8705 | return Error(E); |
| 8706 | |
| 8707 | if (!EvaluateObjectArgument(Info, Object: Args[0], This&: ObjectArg)) |
| 8708 | return false; |
| 8709 | |
| 8710 | // If we are calling a static operator, the 'this' argument needs to be |
| 8711 | // ignored after being evaluated. |
| 8712 | if (MD->isInstance()) |
| 8713 | This = &ObjectArg; |
| 8714 | |
| 8715 | // If this is syntactically a simple assignment using a trivial |
| 8716 | // assignment operator, start the lifetimes of union members as needed, |
| 8717 | // per C++20 [class.union]5. |
| 8718 | if (Info.getLangOpts().CPlusPlus20 && OCE && |
| 8719 | OCE->getOperator() == OO_Equal && MD->isTrivial() && |
| 8720 | !MaybeHandleUnionActiveMemberChange(Info, LHSExpr: Args[0], LHS: ObjectArg)) |
| 8721 | return false; |
| 8722 | |
| 8723 | Args = Args.slice(N: 1); |
| 8724 | } else if (MD && MD->isLambdaStaticInvoker()) { |
| 8725 | // Map the static invoker for the lambda back to the call operator. |
| 8726 | // Conveniently, we don't have to slice out the 'this' argument (as is |
| 8727 | // being done for the non-static case), since a static member function |
| 8728 | // doesn't have an implicit argument passed in. |
| 8729 | const CXXRecordDecl *ClosureClass = MD->getParent(); |
| 8730 | assert( |
| 8731 | ClosureClass->captures().empty() && |
| 8732 | "Number of captures must be zero for conversion to function-ptr" ); |
| 8733 | |
| 8734 | const CXXMethodDecl *LambdaCallOp = |
| 8735 | ClosureClass->getLambdaCallOperator(); |
| 8736 | |
| 8737 | // Set 'FD', the function that will be called below, to the call |
| 8738 | // operator. If the closure object represents a generic lambda, find |
| 8739 | // the corresponding specialization of the call operator. |
| 8740 | |
| 8741 | if (ClosureClass->isGenericLambda()) { |
| 8742 | assert(MD->isFunctionTemplateSpecialization() && |
| 8743 | "A generic lambda's static-invoker function must be a " |
| 8744 | "template specialization" ); |
| 8745 | const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs(); |
| 8746 | FunctionTemplateDecl *CallOpTemplate = |
| 8747 | LambdaCallOp->getDescribedFunctionTemplate(); |
| 8748 | void *InsertPos = nullptr; |
| 8749 | FunctionDecl *CorrespondingCallOpSpecialization = |
| 8750 | CallOpTemplate->findSpecialization(Args: TAL->asArray(), InsertPos); |
| 8751 | assert(CorrespondingCallOpSpecialization && |
| 8752 | "We must always have a function call operator specialization " |
| 8753 | "that corresponds to our static invoker specialization" ); |
| 8754 | assert(isa<CXXMethodDecl>(CorrespondingCallOpSpecialization)); |
| 8755 | FD = CorrespondingCallOpSpecialization; |
| 8756 | } else |
| 8757 | FD = LambdaCallOp; |
| 8758 | } else if (FD->isUsableAsGlobalAllocationFunctionInConstantEvaluation()) { |
| 8759 | if (FD->getDeclName().isAnyOperatorNew()) { |
| 8760 | LValue Ptr; |
| 8761 | if (!HandleOperatorNewCall(Info, E, Result&: Ptr)) |
| 8762 | return false; |
| 8763 | Ptr.moveInto(V&: Result); |
| 8764 | return CallScope.destroy(); |
| 8765 | } else { |
| 8766 | return HandleOperatorDeleteCall(Info, E) && CallScope.destroy(); |
| 8767 | } |
| 8768 | } |
| 8769 | } else |
| 8770 | return Error(E); |
| 8771 | |
| 8772 | // Evaluate the arguments now if we've not already done so. |
| 8773 | if (!Call) { |
| 8774 | Call = Info.CurrentCall->createCall(Callee: FD); |
| 8775 | if (!EvaluateArgs(Args, Call, Info, Callee: FD, /*RightToLeft*/ false, |
| 8776 | ObjectArg: &ObjectArg)) |
| 8777 | return false; |
| 8778 | } |
| 8779 | |
| 8780 | SmallVector<QualType, 4> CovariantAdjustmentPath; |
| 8781 | if (This) { |
| 8782 | auto *NamedMember = dyn_cast<CXXMethodDecl>(Val: FD); |
| 8783 | if (NamedMember && NamedMember->isVirtual() && !HasQualifier) { |
| 8784 | // Perform virtual dispatch, if necessary. |
| 8785 | FD = HandleVirtualDispatch(Info, E, This&: *This, Found: NamedMember, |
| 8786 | CovariantAdjustmentPath); |
| 8787 | if (!FD) |
| 8788 | return false; |
| 8789 | } else if (NamedMember && NamedMember->isImplicitObjectMemberFunction()) { |
| 8790 | // Check that the 'this' pointer points to an object of the right type. |
| 8791 | // FIXME: If this is an assignment operator call, we may need to change |
| 8792 | // the active union member before we check this. |
| 8793 | if (!checkNonVirtualMemberCallThisPointer(Info, E, This: *This, NamedMember)) |
| 8794 | return false; |
| 8795 | } |
| 8796 | } |
| 8797 | |
| 8798 | // Destructor calls are different enough that they have their own codepath. |
| 8799 | if (auto *DD = dyn_cast<CXXDestructorDecl>(Val: FD)) { |
| 8800 | assert(This && "no 'this' pointer for destructor call" ); |
| 8801 | return HandleDestruction(Info, E, This: *This, |
| 8802 | ThisType: Info.Ctx.getCanonicalTagType(TD: DD->getParent())) && |
| 8803 | CallScope.destroy(); |
| 8804 | } |
| 8805 | |
| 8806 | const FunctionDecl *Definition = nullptr; |
| 8807 | Stmt *Body = FD->getBody(Definition); |
| 8808 | SourceLocation Loc = E->getExprLoc(); |
| 8809 | |
| 8810 | // Treat the object argument as `this` when evaluating defaulted |
| 8811 | // special menmber functions |
| 8812 | if (FD->hasCXXExplicitFunctionObjectParameter()) |
| 8813 | This = &ObjectArg; |
| 8814 | |
| 8815 | if (!CheckConstexprFunction(Info, CallLoc: Loc, Declaration: FD, Definition, Body) || |
| 8816 | !HandleFunctionCall(CallLoc: Loc, Callee: Definition, ObjectArg: This, E, Args, Call, Body, Info, |
| 8817 | Result, ResultSlot)) |
| 8818 | return false; |
| 8819 | |
| 8820 | if (!CovariantAdjustmentPath.empty() && |
| 8821 | !HandleCovariantReturnAdjustment(Info, E, Result, |
| 8822 | Path: CovariantAdjustmentPath)) |
| 8823 | return false; |
| 8824 | |
| 8825 | return CallScope.destroy(); |
| 8826 | } |
| 8827 | |
| 8828 | bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) { |
| 8829 | return StmtVisitorTy::Visit(E->getInitializer()); |
| 8830 | } |
| 8831 | bool VisitInitListExpr(const InitListExpr *E) { |
| 8832 | if (E->getNumInits() == 0) |
| 8833 | return DerivedZeroInitialization(E); |
| 8834 | if (E->getNumInits() == 1) |
| 8835 | return StmtVisitorTy::Visit(E->getInit(Init: 0)); |
| 8836 | return Error(E); |
| 8837 | } |
| 8838 | bool VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { |
| 8839 | return DerivedZeroInitialization(E); |
| 8840 | } |
| 8841 | bool VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) { |
| 8842 | return DerivedZeroInitialization(E); |
| 8843 | } |
| 8844 | bool VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) { |
| 8845 | return DerivedZeroInitialization(E); |
| 8846 | } |
| 8847 | |
| 8848 | /// A member expression where the object is a prvalue is itself a prvalue. |
| 8849 | bool VisitMemberExpr(const MemberExpr *E) { |
| 8850 | assert(!Info.Ctx.getLangOpts().CPlusPlus11 && |
| 8851 | "missing temporary materialization conversion" ); |
| 8852 | assert(!E->isArrow() && "missing call to bound member function?" ); |
| 8853 | |
| 8854 | APValue Val; |
| 8855 | if (!Evaluate(Result&: Val, Info, E: E->getBase())) |
| 8856 | return false; |
| 8857 | |
| 8858 | QualType BaseTy = E->getBase()->getType(); |
| 8859 | |
| 8860 | const FieldDecl *FD = dyn_cast<FieldDecl>(Val: E->getMemberDecl()); |
| 8861 | if (!FD) return Error(E); |
| 8862 | assert(!FD->getType()->isReferenceType() && "prvalue reference?" ); |
| 8863 | assert(BaseTy->castAsCanonical<RecordType>()->getDecl() == |
| 8864 | FD->getParent()->getCanonicalDecl() && |
| 8865 | "record / field mismatch" ); |
| 8866 | |
| 8867 | // Note: there is no lvalue base here. But this case should only ever |
| 8868 | // happen in C or in C++98, where we cannot be evaluating a constexpr |
| 8869 | // constructor, which is the only case the base matters. |
| 8870 | CompleteObject Obj(APValue::LValueBase(), &Val, BaseTy); |
| 8871 | SubobjectDesignator Designator(BaseTy); |
| 8872 | Designator.addDeclUnchecked(D: FD); |
| 8873 | |
| 8874 | APValue Result; |
| 8875 | return extractSubobject(Info, E, Obj, Sub: Designator, Result) && |
| 8876 | DerivedSuccess(V: Result, E); |
| 8877 | } |
| 8878 | |
| 8879 | bool VisitExtVectorElementExpr(const ExtVectorElementExpr *E) { |
| 8880 | APValue Val; |
| 8881 | if (!Evaluate(Result&: Val, Info, E: E->getBase())) |
| 8882 | return false; |
| 8883 | |
| 8884 | if (Val.isVector()) { |
| 8885 | SmallVector<uint32_t, 4> Indices; |
| 8886 | E->getEncodedElementAccess(Elts&: Indices); |
| 8887 | if (Indices.size() == 1) { |
| 8888 | // Return scalar. |
| 8889 | return DerivedSuccess(V: Val.getVectorElt(I: Indices[0]), E); |
| 8890 | } else { |
| 8891 | // Construct new APValue vector. |
| 8892 | SmallVector<APValue, 4> Elts; |
| 8893 | for (unsigned I = 0; I < Indices.size(); ++I) { |
| 8894 | Elts.push_back(Elt: Val.getVectorElt(I: Indices[I])); |
| 8895 | } |
| 8896 | APValue VecResult(Elts.data(), Indices.size()); |
| 8897 | return DerivedSuccess(V: VecResult, E); |
| 8898 | } |
| 8899 | } |
| 8900 | |
| 8901 | return false; |
| 8902 | } |
| 8903 | |
| 8904 | bool VisitCastExpr(const CastExpr *E) { |
| 8905 | switch (E->getCastKind()) { |
| 8906 | default: |
| 8907 | break; |
| 8908 | |
| 8909 | case CK_AtomicToNonAtomic: { |
| 8910 | APValue AtomicVal; |
| 8911 | // This does not need to be done in place even for class/array types: |
| 8912 | // atomic-to-non-atomic conversion implies copying the object |
| 8913 | // representation. |
| 8914 | if (!Evaluate(Result&: AtomicVal, Info, E: E->getSubExpr())) |
| 8915 | return false; |
| 8916 | return DerivedSuccess(V: AtomicVal, E); |
| 8917 | } |
| 8918 | |
| 8919 | case CK_NoOp: |
| 8920 | case CK_UserDefinedConversion: |
| 8921 | return StmtVisitorTy::Visit(E->getSubExpr()); |
| 8922 | |
| 8923 | case CK_HLSLArrayRValue: { |
| 8924 | const Expr *SubExpr = E->getSubExpr(); |
| 8925 | if (!SubExpr->isGLValue()) { |
| 8926 | APValue Val; |
| 8927 | if (!Evaluate(Result&: Val, Info, E: SubExpr)) |
| 8928 | return false; |
| 8929 | return DerivedSuccess(V: Val, E); |
| 8930 | } |
| 8931 | |
| 8932 | LValue LVal; |
| 8933 | if (!EvaluateLValue(E: SubExpr, Result&: LVal, Info)) |
| 8934 | return false; |
| 8935 | APValue RVal; |
| 8936 | // Note, we use the subexpression's type in order to retain cv-qualifiers. |
| 8937 | if (!handleLValueToRValueConversion(Info, Conv: E, Type: SubExpr->getType(), LVal, |
| 8938 | RVal)) |
| 8939 | return false; |
| 8940 | return DerivedSuccess(V: RVal, E); |
| 8941 | } |
| 8942 | case CK_LValueToRValue: { |
| 8943 | LValue LVal; |
| 8944 | if (!EvaluateLValue(E: E->getSubExpr(), Result&: LVal, Info)) |
| 8945 | return false; |
| 8946 | APValue RVal; |
| 8947 | // Note, we use the subexpression's type in order to retain cv-qualifiers. |
| 8948 | if (!handleLValueToRValueConversion(Info, Conv: E, Type: E->getSubExpr()->getType(), |
| 8949 | LVal, RVal)) |
| 8950 | return false; |
| 8951 | return DerivedSuccess(V: RVal, E); |
| 8952 | } |
| 8953 | case CK_LValueToRValueBitCast: { |
| 8954 | APValue DestValue, SourceValue; |
| 8955 | if (!Evaluate(Result&: SourceValue, Info, E: E->getSubExpr())) |
| 8956 | return false; |
| 8957 | if (!handleLValueToRValueBitCast(Info, DestValue, SourceValue, BCE: E)) |
| 8958 | return false; |
| 8959 | return DerivedSuccess(V: DestValue, E); |
| 8960 | } |
| 8961 | |
| 8962 | case CK_AddressSpaceConversion: { |
| 8963 | APValue Value; |
| 8964 | if (!Evaluate(Result&: Value, Info, E: E->getSubExpr())) |
| 8965 | return false; |
| 8966 | return DerivedSuccess(V: Value, E); |
| 8967 | } |
| 8968 | } |
| 8969 | |
| 8970 | return Error(E); |
| 8971 | } |
| 8972 | |
| 8973 | bool VisitUnaryPostInc(const UnaryOperator *UO) { |
| 8974 | return VisitUnaryPostIncDec(UO); |
| 8975 | } |
| 8976 | bool VisitUnaryPostDec(const UnaryOperator *UO) { |
| 8977 | return VisitUnaryPostIncDec(UO); |
| 8978 | } |
| 8979 | bool VisitUnaryPostIncDec(const UnaryOperator *UO) { |
| 8980 | if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure()) |
| 8981 | return Error(UO); |
| 8982 | |
| 8983 | LValue LVal; |
| 8984 | if (!EvaluateLValue(E: UO->getSubExpr(), Result&: LVal, Info)) |
| 8985 | return false; |
| 8986 | APValue RVal; |
| 8987 | if (!handleIncDec(this->Info, UO, LVal, UO->getSubExpr()->getType(), |
| 8988 | UO->isIncrementOp(), &RVal)) |
| 8989 | return false; |
| 8990 | return DerivedSuccess(V: RVal, E: UO); |
| 8991 | } |
| 8992 | |
| 8993 | bool VisitStmtExpr(const StmtExpr *E) { |
| 8994 | // We will have checked the full-expressions inside the statement expression |
| 8995 | // when they were completed, and don't need to check them again now. |
| 8996 | llvm::SaveAndRestore NotCheckingForUB(Info.CheckingForUndefinedBehavior, |
| 8997 | false); |
| 8998 | |
| 8999 | const CompoundStmt *CS = E->getSubStmt(); |
| 9000 | if (CS->body_empty()) |
| 9001 | return true; |
| 9002 | |
| 9003 | BlockScopeRAII Scope(Info); |
| 9004 | for (CompoundStmt::const_body_iterator BI = CS->body_begin(), |
| 9005 | BE = CS->body_end(); |
| 9006 | /**/; ++BI) { |
| 9007 | if (BI + 1 == BE) { |
| 9008 | const Expr *FinalExpr = dyn_cast<Expr>(Val: *BI); |
| 9009 | if (!FinalExpr) { |
| 9010 | Info.FFDiag(Loc: (*BI)->getBeginLoc(), |
| 9011 | DiagId: diag::note_constexpr_stmt_expr_unsupported); |
| 9012 | return false; |
| 9013 | } |
| 9014 | return this->Visit(FinalExpr) && Scope.destroy(); |
| 9015 | } |
| 9016 | |
| 9017 | APValue ReturnValue; |
| 9018 | StmtResult Result = { .Value: ReturnValue, .Slot: nullptr }; |
| 9019 | EvalStmtResult ESR = EvaluateStmt(Result, Info, S: *BI); |
| 9020 | if (ESR != ESR_Succeeded) { |
| 9021 | // FIXME: If the statement-expression terminated due to 'return', |
| 9022 | // 'break', or 'continue', it would be nice to propagate that to |
| 9023 | // the outer statement evaluation rather than bailing out. |
| 9024 | if (ESR != ESR_Failed) |
| 9025 | Info.FFDiag(Loc: (*BI)->getBeginLoc(), |
| 9026 | DiagId: diag::note_constexpr_stmt_expr_unsupported); |
| 9027 | return false; |
| 9028 | } |
| 9029 | } |
| 9030 | |
| 9031 | llvm_unreachable("Return from function from the loop above." ); |
| 9032 | } |
| 9033 | |
| 9034 | bool VisitPackIndexingExpr(const PackIndexingExpr *E) { |
| 9035 | return StmtVisitorTy::Visit(E->getSelectedExpr()); |
| 9036 | } |
| 9037 | |
| 9038 | /// Visit a value which is evaluated, but whose value is ignored. |
| 9039 | void VisitIgnoredValue(const Expr *E) { |
| 9040 | EvaluateIgnoredValue(Info, E); |
| 9041 | } |
| 9042 | |
| 9043 | /// Potentially visit a MemberExpr's base expression. |
| 9044 | void VisitIgnoredBaseExpression(const Expr *E) { |
| 9045 | // While MSVC doesn't evaluate the base expression, it does diagnose the |
| 9046 | // presence of side-effecting behavior. |
| 9047 | if (Info.getLangOpts().MSVCCompat && !E->HasSideEffects(Ctx: Info.Ctx)) |
| 9048 | return; |
| 9049 | VisitIgnoredValue(E); |
| 9050 | } |
| 9051 | }; |
| 9052 | |
| 9053 | } // namespace |
| 9054 | |
| 9055 | //===----------------------------------------------------------------------===// |
| 9056 | // Common base class for lvalue and temporary evaluation. |
| 9057 | //===----------------------------------------------------------------------===// |
| 9058 | namespace { |
| 9059 | template<class Derived> |
| 9060 | class LValueExprEvaluatorBase |
| 9061 | : public ExprEvaluatorBase<Derived> { |
| 9062 | protected: |
| 9063 | LValue &Result; |
| 9064 | bool InvalidBaseOK; |
| 9065 | typedef LValueExprEvaluatorBase LValueExprEvaluatorBaseTy; |
| 9066 | typedef ExprEvaluatorBase<Derived> ExprEvaluatorBaseTy; |
| 9067 | |
| 9068 | bool Success(APValue::LValueBase B) { |
| 9069 | Result.set(B); |
| 9070 | return true; |
| 9071 | } |
| 9072 | |
| 9073 | bool evaluatePointer(const Expr *E, LValue &Result) { |
| 9074 | return EvaluatePointer(E, Result, this->Info, InvalidBaseOK); |
| 9075 | } |
| 9076 | |
| 9077 | public: |
| 9078 | LValueExprEvaluatorBase(EvalInfo &Info, LValue &Result, bool InvalidBaseOK) |
| 9079 | : ExprEvaluatorBaseTy(Info), Result(Result), |
| 9080 | InvalidBaseOK(InvalidBaseOK) {} |
| 9081 | |
| 9082 | bool Success(const APValue &V, const Expr *E) { |
| 9083 | Result.setFrom(Ctx: this->Info.Ctx, V); |
| 9084 | return true; |
| 9085 | } |
| 9086 | |
| 9087 | bool VisitMemberExpr(const MemberExpr *E) { |
| 9088 | // Handle non-static data members. |
| 9089 | QualType BaseTy; |
| 9090 | bool EvalOK; |
| 9091 | if (E->isArrow()) { |
| 9092 | EvalOK = evaluatePointer(E: E->getBase(), Result); |
| 9093 | BaseTy = E->getBase()->getType()->castAs<PointerType>()->getPointeeType(); |
| 9094 | } else if (E->getBase()->isPRValue()) { |
| 9095 | assert(E->getBase()->getType()->isRecordType()); |
| 9096 | EvalOK = EvaluateTemporary(E->getBase(), Result, this->Info); |
| 9097 | BaseTy = E->getBase()->getType(); |
| 9098 | } else { |
| 9099 | EvalOK = this->Visit(E->getBase()); |
| 9100 | BaseTy = E->getBase()->getType(); |
| 9101 | } |
| 9102 | if (!EvalOK) { |
| 9103 | if (!InvalidBaseOK) |
| 9104 | return false; |
| 9105 | Result.setInvalid(B: E); |
| 9106 | return true; |
| 9107 | } |
| 9108 | |
| 9109 | const ValueDecl *MD = E->getMemberDecl(); |
| 9110 | if (const FieldDecl *FD = dyn_cast<FieldDecl>(Val: E->getMemberDecl())) { |
| 9111 | assert(BaseTy->castAsCanonical<RecordType>()->getDecl() == |
| 9112 | FD->getParent()->getCanonicalDecl() && |
| 9113 | "record / field mismatch" ); |
| 9114 | (void)BaseTy; |
| 9115 | if (!HandleLValueMember(this->Info, E, Result, FD)) |
| 9116 | return false; |
| 9117 | } else if (const IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(Val: MD)) { |
| 9118 | if (!HandleLValueIndirectMember(this->Info, E, Result, IFD)) |
| 9119 | return false; |
| 9120 | } else |
| 9121 | return this->Error(E); |
| 9122 | |
| 9123 | if (MD->getType()->isReferenceType()) { |
| 9124 | APValue RefValue; |
| 9125 | if (!handleLValueToRValueConversion(this->Info, E, MD->getType(), Result, |
| 9126 | RefValue)) |
| 9127 | return false; |
| 9128 | return Success(RefValue, E); |
| 9129 | } |
| 9130 | return true; |
| 9131 | } |
| 9132 | |
| 9133 | bool VisitBinaryOperator(const BinaryOperator *E) { |
| 9134 | switch (E->getOpcode()) { |
| 9135 | default: |
| 9136 | return ExprEvaluatorBaseTy::VisitBinaryOperator(E); |
| 9137 | |
| 9138 | case BO_PtrMemD: |
| 9139 | case BO_PtrMemI: |
| 9140 | return HandleMemberPointerAccess(this->Info, E, Result); |
| 9141 | } |
| 9142 | } |
| 9143 | |
| 9144 | bool VisitCastExpr(const CastExpr *E) { |
| 9145 | switch (E->getCastKind()) { |
| 9146 | default: |
| 9147 | return ExprEvaluatorBaseTy::VisitCastExpr(E); |
| 9148 | |
| 9149 | case CK_DerivedToBase: |
| 9150 | case CK_UncheckedDerivedToBase: |
| 9151 | if (!this->Visit(E->getSubExpr())) |
| 9152 | return false; |
| 9153 | |
| 9154 | // Now figure out the necessary offset to add to the base LV to get from |
| 9155 | // the derived class to the base class. |
| 9156 | return HandleLValueBasePath(this->Info, E, E->getSubExpr()->getType(), |
| 9157 | Result); |
| 9158 | } |
| 9159 | } |
| 9160 | }; |
| 9161 | } |
| 9162 | |
| 9163 | //===----------------------------------------------------------------------===// |
| 9164 | // LValue Evaluation |
| 9165 | // |
| 9166 | // This is used for evaluating lvalues (in C and C++), xvalues (in C++11), |
| 9167 | // function designators (in C), decl references to void objects (in C), and |
| 9168 | // temporaries (if building with -Wno-address-of-temporary). |
| 9169 | // |
| 9170 | // LValue evaluation produces values comprising a base expression of one of the |
| 9171 | // following types: |
| 9172 | // - Declarations |
| 9173 | // * VarDecl |
| 9174 | // * FunctionDecl |
| 9175 | // - Literals |
| 9176 | // * CompoundLiteralExpr in C (and in global scope in C++) |
| 9177 | // * StringLiteral |
| 9178 | // * PredefinedExpr |
| 9179 | // * ObjCStringLiteralExpr |
| 9180 | // * ObjCEncodeExpr |
| 9181 | // * AddrLabelExpr |
| 9182 | // * BlockExpr |
| 9183 | // * CallExpr for a MakeStringConstant builtin |
| 9184 | // - typeid(T) expressions, as TypeInfoLValues |
| 9185 | // - Locals and temporaries |
| 9186 | // * MaterializeTemporaryExpr |
| 9187 | // * Any Expr, with a CallIndex indicating the function in which the temporary |
| 9188 | // was evaluated, for cases where the MaterializeTemporaryExpr is missing |
| 9189 | // from the AST (FIXME). |
| 9190 | // * A MaterializeTemporaryExpr that has static storage duration, with no |
| 9191 | // CallIndex, for a lifetime-extended temporary. |
| 9192 | // * The ConstantExpr that is currently being evaluated during evaluation of an |
| 9193 | // immediate invocation. |
| 9194 | // plus an offset in bytes. |
| 9195 | //===----------------------------------------------------------------------===// |
| 9196 | namespace { |
| 9197 | class LValueExprEvaluator |
| 9198 | : public LValueExprEvaluatorBase<LValueExprEvaluator> { |
| 9199 | public: |
| 9200 | LValueExprEvaluator(EvalInfo &Info, LValue &Result, bool InvalidBaseOK) : |
| 9201 | LValueExprEvaluatorBaseTy(Info, Result, InvalidBaseOK) {} |
| 9202 | |
| 9203 | bool VisitVarDecl(const Expr *E, const VarDecl *VD); |
| 9204 | bool VisitUnaryPreIncDec(const UnaryOperator *UO); |
| 9205 | |
| 9206 | bool VisitCallExpr(const CallExpr *E); |
| 9207 | bool VisitDeclRefExpr(const DeclRefExpr *E); |
| 9208 | bool VisitPredefinedExpr(const PredefinedExpr *E) { return Success(B: E); } |
| 9209 | bool VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E); |
| 9210 | bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E); |
| 9211 | bool VisitMemberExpr(const MemberExpr *E); |
| 9212 | bool VisitStringLiteral(const StringLiteral *E) { |
| 9213 | return Success( |
| 9214 | B: APValue::LValueBase(E, 0, Info.Ctx.getNextStringLiteralVersion())); |
| 9215 | } |
| 9216 | bool VisitObjCEncodeExpr(const ObjCEncodeExpr *E) { return Success(B: E); } |
| 9217 | bool VisitCXXTypeidExpr(const CXXTypeidExpr *E); |
| 9218 | bool VisitCXXUuidofExpr(const CXXUuidofExpr *E); |
| 9219 | bool VisitArraySubscriptExpr(const ArraySubscriptExpr *E); |
| 9220 | bool VisitExtVectorElementExpr(const ExtVectorElementExpr *E); |
| 9221 | bool VisitUnaryDeref(const UnaryOperator *E); |
| 9222 | bool VisitUnaryReal(const UnaryOperator *E); |
| 9223 | bool VisitUnaryImag(const UnaryOperator *E); |
| 9224 | bool VisitUnaryPreInc(const UnaryOperator *UO) { |
| 9225 | return VisitUnaryPreIncDec(UO); |
| 9226 | } |
| 9227 | bool VisitUnaryPreDec(const UnaryOperator *UO) { |
| 9228 | return VisitUnaryPreIncDec(UO); |
| 9229 | } |
| 9230 | bool VisitBinAssign(const BinaryOperator *BO); |
| 9231 | bool VisitCompoundAssignOperator(const CompoundAssignOperator *CAO); |
| 9232 | |
| 9233 | bool VisitCastExpr(const CastExpr *E) { |
| 9234 | switch (E->getCastKind()) { |
| 9235 | default: |
| 9236 | return LValueExprEvaluatorBaseTy::VisitCastExpr(E); |
| 9237 | |
| 9238 | case CK_LValueBitCast: |
| 9239 | this->CCEDiag(E, D: diag::note_constexpr_invalid_cast) |
| 9240 | << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret |
| 9241 | << Info.Ctx.getLangOpts().CPlusPlus; |
| 9242 | if (!Visit(S: E->getSubExpr())) |
| 9243 | return false; |
| 9244 | Result.Designator.setInvalid(); |
| 9245 | return true; |
| 9246 | |
| 9247 | case CK_BaseToDerived: |
| 9248 | if (!Visit(S: E->getSubExpr())) |
| 9249 | return false; |
| 9250 | return HandleBaseToDerivedCast(Info, E, Result); |
| 9251 | |
| 9252 | case CK_Dynamic: |
| 9253 | if (!Visit(S: E->getSubExpr())) |
| 9254 | return false; |
| 9255 | return HandleDynamicCast(Info, E: cast<ExplicitCastExpr>(Val: E), Ptr&: Result); |
| 9256 | } |
| 9257 | } |
| 9258 | }; |
| 9259 | } // end anonymous namespace |
| 9260 | |
| 9261 | /// Get an lvalue to a field of a lambda's closure type. |
| 9262 | static bool HandleLambdaCapture(EvalInfo &Info, const Expr *E, LValue &Result, |
| 9263 | const CXXMethodDecl *MD, const FieldDecl *FD, |
| 9264 | bool LValueToRValueConversion) { |
| 9265 | // Static lambda function call operators can't have captures. We already |
| 9266 | // diagnosed this, so bail out here. |
| 9267 | if (MD->isStatic()) { |
| 9268 | assert(Info.CurrentCall->This == nullptr && |
| 9269 | "This should not be set for a static call operator" ); |
| 9270 | return false; |
| 9271 | } |
| 9272 | |
| 9273 | // Start with 'Result' referring to the complete closure object... |
| 9274 | if (MD->isExplicitObjectMemberFunction()) { |
| 9275 | // Self may be passed by reference or by value. |
| 9276 | const ParmVarDecl *Self = MD->getParamDecl(i: 0); |
| 9277 | if (Self->getType()->isReferenceType()) { |
| 9278 | APValue *RefValue = Info.getParamSlot(Call: Info.CurrentCall->Arguments, PVD: Self); |
| 9279 | if (!RefValue->allowConstexprUnknown() || RefValue->hasValue()) |
| 9280 | Result.setFrom(Ctx: Info.Ctx, V: *RefValue); |
| 9281 | } else { |
| 9282 | const ParmVarDecl *VD = Info.CurrentCall->Arguments.getOrigParam(PVD: Self); |
| 9283 | CallStackFrame *Frame = |
| 9284 | Info.getCallFrameAndDepth(CallIndex: Info.CurrentCall->Arguments.CallIndex) |
| 9285 | .first; |
| 9286 | unsigned Version = Info.CurrentCall->Arguments.Version; |
| 9287 | Result.set(B: {VD, Frame->Index, Version}); |
| 9288 | } |
| 9289 | } else |
| 9290 | Result = *Info.CurrentCall->This; |
| 9291 | |
| 9292 | // ... then update it to refer to the field of the closure object |
| 9293 | // that represents the capture. |
| 9294 | if (!HandleLValueMember(Info, E, LVal&: Result, FD)) |
| 9295 | return false; |
| 9296 | |
| 9297 | // And if the field is of reference type (or if we captured '*this' by |
| 9298 | // reference), update 'Result' to refer to what |
| 9299 | // the field refers to. |
| 9300 | if (LValueToRValueConversion) { |
| 9301 | APValue RVal; |
| 9302 | if (!handleLValueToRValueConversion(Info, Conv: E, Type: FD->getType(), LVal: Result, RVal)) |
| 9303 | return false; |
| 9304 | Result.setFrom(Ctx: Info.Ctx, V: RVal); |
| 9305 | } |
| 9306 | return true; |
| 9307 | } |
| 9308 | |
| 9309 | /// Evaluate an expression as an lvalue. This can be legitimately called on |
| 9310 | /// expressions which are not glvalues, in three cases: |
| 9311 | /// * function designators in C, and |
| 9312 | /// * "extern void" objects |
| 9313 | /// * @selector() expressions in Objective-C |
| 9314 | static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info, |
| 9315 | bool InvalidBaseOK) { |
| 9316 | assert(!E->isValueDependent()); |
| 9317 | assert(E->isGLValue() || E->getType()->isFunctionType() || |
| 9318 | E->getType()->isVoidType() || isa<ObjCSelectorExpr>(E->IgnoreParens())); |
| 9319 | return LValueExprEvaluator(Info, Result, InvalidBaseOK).Visit(S: E); |
| 9320 | } |
| 9321 | |
| 9322 | bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) { |
| 9323 | const ValueDecl *D = E->getDecl(); |
| 9324 | |
| 9325 | // If we are within a lambda's call operator, check whether the 'VD' referred |
| 9326 | // to within 'E' actually represents a lambda-capture that maps to a |
| 9327 | // data-member/field within the closure object, and if so, evaluate to the |
| 9328 | // field or what the field refers to. |
| 9329 | if (Info.CurrentCall && isLambdaCallOperator(DC: Info.CurrentCall->Callee) && |
| 9330 | E->refersToEnclosingVariableOrCapture()) { |
| 9331 | // We don't always have a complete capture-map when checking or inferring if |
| 9332 | // the function call operator meets the requirements of a constexpr function |
| 9333 | // - but we don't need to evaluate the captures to determine constexprness |
| 9334 | // (dcl.constexpr C++17). |
| 9335 | if (Info.checkingPotentialConstantExpression()) |
| 9336 | return false; |
| 9337 | |
| 9338 | if (auto *FD = Info.CurrentCall->LambdaCaptureFields.lookup(Val: D)) { |
| 9339 | const auto *MD = cast<CXXMethodDecl>(Val: Info.CurrentCall->Callee); |
| 9340 | return HandleLambdaCapture(Info, E, Result, MD, FD, |
| 9341 | LValueToRValueConversion: FD->getType()->isReferenceType()); |
| 9342 | } |
| 9343 | } |
| 9344 | |
| 9345 | if (isa<FunctionDecl, MSGuidDecl, TemplateParamObjectDecl, |
| 9346 | UnnamedGlobalConstantDecl>(Val: D)) |
| 9347 | return Success(B: cast<ValueDecl>(Val: D)); |
| 9348 | if (const VarDecl *VD = dyn_cast<VarDecl>(Val: D)) |
| 9349 | return VisitVarDecl(E, VD); |
| 9350 | if (const BindingDecl *BD = dyn_cast<BindingDecl>(Val: D)) |
| 9351 | return Visit(S: BD->getBinding()); |
| 9352 | return Error(E); |
| 9353 | } |
| 9354 | |
| 9355 | bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) { |
| 9356 | CallStackFrame *Frame = nullptr; |
| 9357 | unsigned Version = 0; |
| 9358 | if (VD->hasLocalStorage()) { |
| 9359 | // Only if a local variable was declared in the function currently being |
| 9360 | // evaluated, do we expect to be able to find its value in the current |
| 9361 | // frame. (Otherwise it was likely declared in an enclosing context and |
| 9362 | // could either have a valid evaluatable value (for e.g. a constexpr |
| 9363 | // variable) or be ill-formed (and trigger an appropriate evaluation |
| 9364 | // diagnostic)). |
| 9365 | CallStackFrame *CurrFrame = Info.CurrentCall; |
| 9366 | if (CurrFrame->Callee && CurrFrame->Callee->Equals(DC: VD->getDeclContext())) { |
| 9367 | // Function parameters are stored in some caller's frame. (Usually the |
| 9368 | // immediate caller, but for an inherited constructor they may be more |
| 9369 | // distant.) |
| 9370 | if (auto *PVD = dyn_cast<ParmVarDecl>(Val: VD)) { |
| 9371 | if (CurrFrame->Arguments) { |
| 9372 | VD = CurrFrame->Arguments.getOrigParam(PVD); |
| 9373 | Frame = |
| 9374 | Info.getCallFrameAndDepth(CallIndex: CurrFrame->Arguments.CallIndex).first; |
| 9375 | Version = CurrFrame->Arguments.Version; |
| 9376 | } |
| 9377 | } else { |
| 9378 | Frame = CurrFrame; |
| 9379 | Version = CurrFrame->getCurrentTemporaryVersion(Key: VD); |
| 9380 | } |
| 9381 | } |
| 9382 | } |
| 9383 | |
| 9384 | if (!VD->getType()->isReferenceType()) { |
| 9385 | if (Frame) { |
| 9386 | Result.set(B: {VD, Frame->Index, Version}); |
| 9387 | return true; |
| 9388 | } |
| 9389 | return Success(B: VD); |
| 9390 | } |
| 9391 | |
| 9392 | if (!Info.getLangOpts().CPlusPlus11) { |
| 9393 | Info.CCEDiag(E, DiagId: diag::note_constexpr_ltor_non_integral, ExtraNotes: 1) |
| 9394 | << VD << VD->getType(); |
| 9395 | Info.Note(Loc: VD->getLocation(), DiagId: diag::note_declared_at); |
| 9396 | } |
| 9397 | |
| 9398 | APValue *V; |
| 9399 | if (!evaluateVarDeclInit(Info, E, VD, Frame, Version, Result&: V)) |
| 9400 | return false; |
| 9401 | |
| 9402 | if (!V) { |
| 9403 | Result.set(B: VD); |
| 9404 | Result.AllowConstexprUnknown = true; |
| 9405 | return true; |
| 9406 | } |
| 9407 | |
| 9408 | return Success(V: *V, E); |
| 9409 | } |
| 9410 | |
| 9411 | bool LValueExprEvaluator::VisitCallExpr(const CallExpr *E) { |
| 9412 | if (!IsConstantEvaluatedBuiltinCall(E)) |
| 9413 | return ExprEvaluatorBaseTy::VisitCallExpr(E); |
| 9414 | |
| 9415 | switch (E->getBuiltinCallee()) { |
| 9416 | default: |
| 9417 | return false; |
| 9418 | case Builtin::BIas_const: |
| 9419 | case Builtin::BIforward: |
| 9420 | case Builtin::BIforward_like: |
| 9421 | case Builtin::BImove: |
| 9422 | case Builtin::BImove_if_noexcept: |
| 9423 | if (cast<FunctionDecl>(Val: E->getCalleeDecl())->isConstexpr()) |
| 9424 | return Visit(S: E->getArg(Arg: 0)); |
| 9425 | break; |
| 9426 | } |
| 9427 | |
| 9428 | return ExprEvaluatorBaseTy::VisitCallExpr(E); |
| 9429 | } |
| 9430 | |
| 9431 | bool LValueExprEvaluator::VisitMaterializeTemporaryExpr( |
| 9432 | const MaterializeTemporaryExpr *E) { |
| 9433 | // Walk through the expression to find the materialized temporary itself. |
| 9434 | SmallVector<const Expr *, 2> CommaLHSs; |
| 9435 | SmallVector<SubobjectAdjustment, 2> Adjustments; |
| 9436 | const Expr *Inner = |
| 9437 | E->getSubExpr()->skipRValueSubobjectAdjustments(CommaLHS&: CommaLHSs, Adjustments); |
| 9438 | |
| 9439 | // If we passed any comma operators, evaluate their LHSs. |
| 9440 | for (const Expr *E : CommaLHSs) |
| 9441 | if (!EvaluateIgnoredValue(Info, E)) |
| 9442 | return false; |
| 9443 | |
| 9444 | // A materialized temporary with static storage duration can appear within the |
| 9445 | // result of a constant expression evaluation, so we need to preserve its |
| 9446 | // value for use outside this evaluation. |
| 9447 | APValue *Value; |
| 9448 | if (E->getStorageDuration() == SD_Static) { |
| 9449 | if (Info.EvalMode == EvaluationMode::ConstantFold) |
| 9450 | return false; |
| 9451 | // FIXME: What about SD_Thread? |
| 9452 | Value = E->getOrCreateValue(MayCreate: true); |
| 9453 | *Value = APValue(); |
| 9454 | Result.set(B: E); |
| 9455 | } else { |
| 9456 | Value = &Info.CurrentCall->createTemporary( |
| 9457 | Key: E, T: Inner->getType(), |
| 9458 | Scope: E->getStorageDuration() == SD_FullExpression ? ScopeKind::FullExpression |
| 9459 | : ScopeKind::Block, |
| 9460 | LV&: Result); |
| 9461 | } |
| 9462 | |
| 9463 | QualType Type = Inner->getType(); |
| 9464 | |
| 9465 | // Materialize the temporary itself. |
| 9466 | if (!EvaluateInPlace(Result&: *Value, Info, This: Result, E: Inner)) { |
| 9467 | *Value = APValue(); |
| 9468 | return false; |
| 9469 | } |
| 9470 | |
| 9471 | // Adjust our lvalue to refer to the desired subobject. |
| 9472 | for (unsigned I = Adjustments.size(); I != 0; /**/) { |
| 9473 | --I; |
| 9474 | switch (Adjustments[I].Kind) { |
| 9475 | case SubobjectAdjustment::DerivedToBaseAdjustment: |
| 9476 | if (!HandleLValueBasePath(Info, E: Adjustments[I].DerivedToBase.BasePath, |
| 9477 | Type, Result)) |
| 9478 | return false; |
| 9479 | Type = Adjustments[I].DerivedToBase.BasePath->getType(); |
| 9480 | break; |
| 9481 | |
| 9482 | case SubobjectAdjustment::FieldAdjustment: |
| 9483 | if (!HandleLValueMember(Info, E, LVal&: Result, FD: Adjustments[I].Field)) |
| 9484 | return false; |
| 9485 | Type = Adjustments[I].Field->getType(); |
| 9486 | break; |
| 9487 | |
| 9488 | case SubobjectAdjustment::MemberPointerAdjustment: |
| 9489 | if (!HandleMemberPointerAccess(Info&: this->Info, LVType: Type, LV&: Result, |
| 9490 | RHS: Adjustments[I].Ptr.RHS)) |
| 9491 | return false; |
| 9492 | Type = Adjustments[I].Ptr.MPT->getPointeeType(); |
| 9493 | break; |
| 9494 | } |
| 9495 | } |
| 9496 | |
| 9497 | return true; |
| 9498 | } |
| 9499 | |
| 9500 | bool |
| 9501 | LValueExprEvaluator::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) { |
| 9502 | assert((!Info.getLangOpts().CPlusPlus || E->isFileScope()) && |
| 9503 | "lvalue compound literal in c++?" ); |
| 9504 | APValue *Lit; |
| 9505 | // If CompountLiteral has static storage, its value can be used outside |
| 9506 | // this expression. So evaluate it once and store it in ASTContext. |
| 9507 | if (E->hasStaticStorage()) { |
| 9508 | Lit = &E->getOrCreateStaticValue(Ctx&: Info.Ctx); |
| 9509 | Result.set(B: E); |
| 9510 | // Reset any previously evaluated state, otherwise evaluation below might |
| 9511 | // fail. |
| 9512 | // FIXME: Should we just re-use the previously evaluated value instead? |
| 9513 | *Lit = APValue(); |
| 9514 | } else { |
| 9515 | assert(!Info.getLangOpts().CPlusPlus); |
| 9516 | Lit = &Info.CurrentCall->createTemporary(Key: E, T: E->getInitializer()->getType(), |
| 9517 | Scope: ScopeKind::Block, LV&: Result); |
| 9518 | } |
| 9519 | // FIXME: Evaluating in place isn't always right. We should figure out how to |
| 9520 | // use appropriate evaluation context here, see |
| 9521 | // clang/test/AST/static-compound-literals-reeval.cpp for a failure. |
| 9522 | if (!EvaluateInPlace(Result&: *Lit, Info, This: Result, E: E->getInitializer())) { |
| 9523 | *Lit = APValue(); |
| 9524 | return false; |
| 9525 | } |
| 9526 | return true; |
| 9527 | } |
| 9528 | |
| 9529 | bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) { |
| 9530 | TypeInfoLValue TypeInfo; |
| 9531 | |
| 9532 | if (!E->isPotentiallyEvaluated()) { |
| 9533 | if (E->isTypeOperand()) |
| 9534 | TypeInfo = TypeInfoLValue(E->getTypeOperand(Context: Info.Ctx).getTypePtr()); |
| 9535 | else |
| 9536 | TypeInfo = TypeInfoLValue(E->getExprOperand()->getType().getTypePtr()); |
| 9537 | } else { |
| 9538 | if (!Info.Ctx.getLangOpts().CPlusPlus20) { |
| 9539 | Info.CCEDiag(E, DiagId: diag::note_constexpr_typeid_polymorphic) |
| 9540 | << E->getExprOperand()->getType() |
| 9541 | << E->getExprOperand()->getSourceRange(); |
| 9542 | } |
| 9543 | |
| 9544 | if (!Visit(S: E->getExprOperand())) |
| 9545 | return false; |
| 9546 | |
| 9547 | std::optional<DynamicType> DynType = |
| 9548 | ComputeDynamicType(Info, E, This&: Result, AK: AK_TypeId); |
| 9549 | if (!DynType) |
| 9550 | return false; |
| 9551 | |
| 9552 | TypeInfo = TypeInfoLValue( |
| 9553 | Info.Ctx.getCanonicalTagType(TD: DynType->Type).getTypePtr()); |
| 9554 | } |
| 9555 | |
| 9556 | return Success(B: APValue::LValueBase::getTypeInfo(LV: TypeInfo, TypeInfo: E->getType())); |
| 9557 | } |
| 9558 | |
| 9559 | bool LValueExprEvaluator::VisitCXXUuidofExpr(const CXXUuidofExpr *E) { |
| 9560 | return Success(B: E->getGuidDecl()); |
| 9561 | } |
| 9562 | |
| 9563 | bool LValueExprEvaluator::VisitMemberExpr(const MemberExpr *E) { |
| 9564 | // Handle static data members. |
| 9565 | if (const VarDecl *VD = dyn_cast<VarDecl>(Val: E->getMemberDecl())) { |
| 9566 | VisitIgnoredBaseExpression(E: E->getBase()); |
| 9567 | return VisitVarDecl(E, VD); |
| 9568 | } |
| 9569 | |
| 9570 | // Handle static member functions. |
| 9571 | if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: E->getMemberDecl())) { |
| 9572 | if (MD->isStatic()) { |
| 9573 | VisitIgnoredBaseExpression(E: E->getBase()); |
| 9574 | return Success(B: MD); |
| 9575 | } |
| 9576 | } |
| 9577 | |
| 9578 | // Handle non-static data members. |
| 9579 | return LValueExprEvaluatorBaseTy::VisitMemberExpr(E); |
| 9580 | } |
| 9581 | |
| 9582 | bool LValueExprEvaluator::VisitExtVectorElementExpr( |
| 9583 | const ExtVectorElementExpr *E) { |
| 9584 | bool Success = true; |
| 9585 | |
| 9586 | APValue Val; |
| 9587 | if (!Evaluate(Result&: Val, Info, E: E->getBase())) { |
| 9588 | if (!Info.noteFailure()) |
| 9589 | return false; |
| 9590 | Success = false; |
| 9591 | } |
| 9592 | |
| 9593 | SmallVector<uint32_t, 4> Indices; |
| 9594 | E->getEncodedElementAccess(Elts&: Indices); |
| 9595 | // FIXME: support accessing more than one element |
| 9596 | if (Indices.size() > 1) |
| 9597 | return false; |
| 9598 | |
| 9599 | if (Success) { |
| 9600 | Result.setFrom(Ctx: Info.Ctx, V: Val); |
| 9601 | QualType BaseType = E->getBase()->getType(); |
| 9602 | if (E->isArrow()) |
| 9603 | BaseType = BaseType->getPointeeType(); |
| 9604 | const auto *VT = BaseType->castAs<VectorType>(); |
| 9605 | HandleLValueVectorElement(Info, E, LVal&: Result, EltTy: VT->getElementType(), |
| 9606 | Size: VT->getNumElements(), Idx: Indices[0]); |
| 9607 | } |
| 9608 | |
| 9609 | return Success; |
| 9610 | } |
| 9611 | |
| 9612 | bool LValueExprEvaluator::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) { |
| 9613 | if (E->getBase()->getType()->isSveVLSBuiltinType()) |
| 9614 | return Error(E); |
| 9615 | |
| 9616 | APSInt Index; |
| 9617 | bool Success = true; |
| 9618 | |
| 9619 | if (const auto *VT = E->getBase()->getType()->getAs<VectorType>()) { |
| 9620 | APValue Val; |
| 9621 | if (!Evaluate(Result&: Val, Info, E: E->getBase())) { |
| 9622 | if (!Info.noteFailure()) |
| 9623 | return false; |
| 9624 | Success = false; |
| 9625 | } |
| 9626 | |
| 9627 | if (!EvaluateInteger(E: E->getIdx(), Result&: Index, Info)) { |
| 9628 | if (!Info.noteFailure()) |
| 9629 | return false; |
| 9630 | Success = false; |
| 9631 | } |
| 9632 | |
| 9633 | if (Success) { |
| 9634 | Result.setFrom(Ctx: Info.Ctx, V: Val); |
| 9635 | HandleLValueVectorElement(Info, E, LVal&: Result, EltTy: VT->getElementType(), |
| 9636 | Size: VT->getNumElements(), Idx: Index.getExtValue()); |
| 9637 | } |
| 9638 | |
| 9639 | return Success; |
| 9640 | } |
| 9641 | |
| 9642 | // C++17's rules require us to evaluate the LHS first, regardless of which |
| 9643 | // side is the base. |
| 9644 | for (const Expr *SubExpr : {E->getLHS(), E->getRHS()}) { |
| 9645 | if (SubExpr == E->getBase() ? !evaluatePointer(E: SubExpr, Result) |
| 9646 | : !EvaluateInteger(E: SubExpr, Result&: Index, Info)) { |
| 9647 | if (!Info.noteFailure()) |
| 9648 | return false; |
| 9649 | Success = false; |
| 9650 | } |
| 9651 | } |
| 9652 | |
| 9653 | return Success && |
| 9654 | HandleLValueArrayAdjustment(Info, E, LVal&: Result, EltTy: E->getType(), Adjustment: Index); |
| 9655 | } |
| 9656 | |
| 9657 | bool LValueExprEvaluator::VisitUnaryDeref(const UnaryOperator *E) { |
| 9658 | bool Success = evaluatePointer(E: E->getSubExpr(), Result); |
| 9659 | // [C++26][expr.unary.op] |
| 9660 | // If the operand points to an object or function, the result |
| 9661 | // denotes that object or function; otherwise, the behavior is undefined. |
| 9662 | // Because &(*(type*)0) is a common pattern, we do not fail the evaluation |
| 9663 | // immediately. |
| 9664 | if (!Success || !E->getType().getNonReferenceType()->isObjectType()) |
| 9665 | return Success; |
| 9666 | return bool(findCompleteObject(Info, E, AK: AK_Dereference, LVal: Result, |
| 9667 | LValType: E->getType())) || |
| 9668 | Info.noteUndefinedBehavior(); |
| 9669 | } |
| 9670 | |
| 9671 | bool LValueExprEvaluator::VisitUnaryReal(const UnaryOperator *E) { |
| 9672 | if (!Visit(S: E->getSubExpr())) |
| 9673 | return false; |
| 9674 | // __real is a no-op on scalar lvalues. |
| 9675 | if (E->getSubExpr()->getType()->isAnyComplexType()) |
| 9676 | HandleLValueComplexElement(Info, E, LVal&: Result, EltTy: E->getType(), Imag: false); |
| 9677 | return true; |
| 9678 | } |
| 9679 | |
| 9680 | bool LValueExprEvaluator::VisitUnaryImag(const UnaryOperator *E) { |
| 9681 | assert(E->getSubExpr()->getType()->isAnyComplexType() && |
| 9682 | "lvalue __imag__ on scalar?" ); |
| 9683 | if (!Visit(S: E->getSubExpr())) |
| 9684 | return false; |
| 9685 | HandleLValueComplexElement(Info, E, LVal&: Result, EltTy: E->getType(), Imag: true); |
| 9686 | return true; |
| 9687 | } |
| 9688 | |
| 9689 | bool LValueExprEvaluator::VisitUnaryPreIncDec(const UnaryOperator *UO) { |
| 9690 | if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure()) |
| 9691 | return Error(E: UO); |
| 9692 | |
| 9693 | if (!this->Visit(S: UO->getSubExpr())) |
| 9694 | return false; |
| 9695 | |
| 9696 | return handleIncDec( |
| 9697 | Info&: this->Info, E: UO, LVal: Result, LValType: UO->getSubExpr()->getType(), |
| 9698 | IsIncrement: UO->isIncrementOp(), Old: nullptr); |
| 9699 | } |
| 9700 | |
| 9701 | bool LValueExprEvaluator::VisitCompoundAssignOperator( |
| 9702 | const CompoundAssignOperator *CAO) { |
| 9703 | if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure()) |
| 9704 | return Error(E: CAO); |
| 9705 | |
| 9706 | bool Success = true; |
| 9707 | |
| 9708 | // C++17 onwards require that we evaluate the RHS first. |
| 9709 | APValue RHS; |
| 9710 | if (!Evaluate(Result&: RHS, Info&: this->Info, E: CAO->getRHS())) { |
| 9711 | if (!Info.noteFailure()) |
| 9712 | return false; |
| 9713 | Success = false; |
| 9714 | } |
| 9715 | |
| 9716 | // The overall lvalue result is the result of evaluating the LHS. |
| 9717 | if (!this->Visit(S: CAO->getLHS()) || !Success) |
| 9718 | return false; |
| 9719 | |
| 9720 | return handleCompoundAssignment( |
| 9721 | Info&: this->Info, E: CAO, |
| 9722 | LVal: Result, LValType: CAO->getLHS()->getType(), PromotedLValType: CAO->getComputationLHSType(), |
| 9723 | Opcode: CAO->getOpForCompoundAssignment(Opc: CAO->getOpcode()), RVal: RHS); |
| 9724 | } |
| 9725 | |
| 9726 | bool LValueExprEvaluator::VisitBinAssign(const BinaryOperator *E) { |
| 9727 | if (!Info.getLangOpts().CPlusPlus14 && !Info.keepEvaluatingAfterFailure()) |
| 9728 | return Error(E); |
| 9729 | |
| 9730 | bool Success = true; |
| 9731 | |
| 9732 | // C++17 onwards require that we evaluate the RHS first. |
| 9733 | APValue NewVal; |
| 9734 | if (!Evaluate(Result&: NewVal, Info&: this->Info, E: E->getRHS())) { |
| 9735 | if (!Info.noteFailure()) |
| 9736 | return false; |
| 9737 | Success = false; |
| 9738 | } |
| 9739 | |
| 9740 | if (!this->Visit(S: E->getLHS()) || !Success) |
| 9741 | return false; |
| 9742 | |
| 9743 | if (Info.getLangOpts().CPlusPlus20 && |
| 9744 | !MaybeHandleUnionActiveMemberChange(Info, LHSExpr: E->getLHS(), LHS: Result)) |
| 9745 | return false; |
| 9746 | |
| 9747 | return handleAssignment(Info&: this->Info, E, LVal: Result, LValType: E->getLHS()->getType(), |
| 9748 | Val&: NewVal); |
| 9749 | } |
| 9750 | |
| 9751 | //===----------------------------------------------------------------------===// |
| 9752 | // Pointer Evaluation |
| 9753 | //===----------------------------------------------------------------------===// |
| 9754 | |
| 9755 | /// Convenience function. LVal's base must be a call to an alloc_size |
| 9756 | /// function. |
| 9757 | static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx, |
| 9758 | const LValue &LVal, |
| 9759 | llvm::APInt &Result) { |
| 9760 | assert(isBaseAnAllocSizeCall(LVal.getLValueBase()) && |
| 9761 | "Can't get the size of a non alloc_size function" ); |
| 9762 | const auto *Base = LVal.getLValueBase().get<const Expr *>(); |
| 9763 | const CallExpr *CE = tryUnwrapAllocSizeCall(E: Base); |
| 9764 | std::optional<llvm::APInt> Size = |
| 9765 | CE->evaluateBytesReturnedByAllocSizeCall(Ctx); |
| 9766 | if (!Size) |
| 9767 | return false; |
| 9768 | |
| 9769 | Result = std::move(*Size); |
| 9770 | return true; |
| 9771 | } |
| 9772 | |
| 9773 | /// Attempts to evaluate the given LValueBase as the result of a call to |
| 9774 | /// a function with the alloc_size attribute. If it was possible to do so, this |
| 9775 | /// function will return true, make Result's Base point to said function call, |
| 9776 | /// and mark Result's Base as invalid. |
| 9777 | static bool evaluateLValueAsAllocSize(EvalInfo &Info, APValue::LValueBase Base, |
| 9778 | LValue &Result) { |
| 9779 | if (Base.isNull()) |
| 9780 | return false; |
| 9781 | |
| 9782 | // Because we do no form of static analysis, we only support const variables. |
| 9783 | // |
| 9784 | // Additionally, we can't support parameters, nor can we support static |
| 9785 | // variables (in the latter case, use-before-assign isn't UB; in the former, |
| 9786 | // we have no clue what they'll be assigned to). |
| 9787 | const auto *VD = |
| 9788 | dyn_cast_or_null<VarDecl>(Val: Base.dyn_cast<const ValueDecl *>()); |
| 9789 | if (!VD || !VD->isLocalVarDecl() || !VD->getType().isConstQualified()) |
| 9790 | return false; |
| 9791 | |
| 9792 | const Expr *Init = VD->getAnyInitializer(); |
| 9793 | if (!Init || Init->getType().isNull()) |
| 9794 | return false; |
| 9795 | |
| 9796 | const Expr *E = Init->IgnoreParens(); |
| 9797 | if (!tryUnwrapAllocSizeCall(E)) |
| 9798 | return false; |
| 9799 | |
| 9800 | // Store E instead of E unwrapped so that the type of the LValue's base is |
| 9801 | // what the user wanted. |
| 9802 | Result.setInvalid(B: E); |
| 9803 | |
| 9804 | QualType Pointee = E->getType()->castAs<PointerType>()->getPointeeType(); |
| 9805 | Result.addUnsizedArray(Info, E, ElemTy: Pointee); |
| 9806 | return true; |
| 9807 | } |
| 9808 | |
| 9809 | namespace { |
| 9810 | class PointerExprEvaluator |
| 9811 | : public ExprEvaluatorBase<PointerExprEvaluator> { |
| 9812 | LValue &Result; |
| 9813 | bool InvalidBaseOK; |
| 9814 | |
| 9815 | bool Success(const Expr *E) { |
| 9816 | Result.set(B: E); |
| 9817 | return true; |
| 9818 | } |
| 9819 | |
| 9820 | bool evaluateLValue(const Expr *E, LValue &Result) { |
| 9821 | return EvaluateLValue(E, Result, Info, InvalidBaseOK); |
| 9822 | } |
| 9823 | |
| 9824 | bool evaluatePointer(const Expr *E, LValue &Result) { |
| 9825 | return EvaluatePointer(E, Result, Info, InvalidBaseOK); |
| 9826 | } |
| 9827 | |
| 9828 | bool visitNonBuiltinCallExpr(const CallExpr *E); |
| 9829 | public: |
| 9830 | |
| 9831 | PointerExprEvaluator(EvalInfo &info, LValue &Result, bool InvalidBaseOK) |
| 9832 | : ExprEvaluatorBaseTy(info), Result(Result), |
| 9833 | InvalidBaseOK(InvalidBaseOK) {} |
| 9834 | |
| 9835 | bool Success(const APValue &V, const Expr *E) { |
| 9836 | Result.setFrom(Ctx: Info.Ctx, V); |
| 9837 | return true; |
| 9838 | } |
| 9839 | bool ZeroInitialization(const Expr *E) { |
| 9840 | Result.setNull(Ctx&: Info.Ctx, PointerTy: E->getType()); |
| 9841 | return true; |
| 9842 | } |
| 9843 | |
| 9844 | bool VisitBinaryOperator(const BinaryOperator *E); |
| 9845 | bool VisitCastExpr(const CastExpr* E); |
| 9846 | bool VisitUnaryAddrOf(const UnaryOperator *E); |
| 9847 | bool VisitObjCStringLiteral(const ObjCStringLiteral *E) |
| 9848 | { return Success(E); } |
| 9849 | bool VisitObjCBoxedExpr(const ObjCBoxedExpr *E) { |
| 9850 | if (E->isExpressibleAsConstantInitializer()) |
| 9851 | return Success(E); |
| 9852 | if (Info.noteFailure()) |
| 9853 | EvaluateIgnoredValue(Info, E: E->getSubExpr()); |
| 9854 | return Error(E); |
| 9855 | } |
| 9856 | bool VisitAddrLabelExpr(const AddrLabelExpr *E) |
| 9857 | { return Success(E); } |
| 9858 | bool VisitCallExpr(const CallExpr *E); |
| 9859 | bool VisitBuiltinCallExpr(const CallExpr *E, unsigned BuiltinOp); |
| 9860 | bool VisitBlockExpr(const BlockExpr *E) { |
| 9861 | if (!E->getBlockDecl()->hasCaptures()) |
| 9862 | return Success(E); |
| 9863 | return Error(E); |
| 9864 | } |
| 9865 | bool VisitCXXThisExpr(const CXXThisExpr *E) { |
| 9866 | auto DiagnoseInvalidUseOfThis = [&] { |
| 9867 | if (Info.getLangOpts().CPlusPlus11) |
| 9868 | Info.FFDiag(E, DiagId: diag::note_constexpr_this) << E->isImplicit(); |
| 9869 | else |
| 9870 | Info.FFDiag(E); |
| 9871 | }; |
| 9872 | |
| 9873 | // Can't look at 'this' when checking a potential constant expression. |
| 9874 | if (Info.checkingPotentialConstantExpression()) |
| 9875 | return false; |
| 9876 | |
| 9877 | bool IsExplicitLambda = |
| 9878 | isLambdaCallWithExplicitObjectParameter(DC: Info.CurrentCall->Callee); |
| 9879 | if (!IsExplicitLambda) { |
| 9880 | if (!Info.CurrentCall->This) { |
| 9881 | DiagnoseInvalidUseOfThis(); |
| 9882 | return false; |
| 9883 | } |
| 9884 | |
| 9885 | Result = *Info.CurrentCall->This; |
| 9886 | } |
| 9887 | |
| 9888 | if (isLambdaCallOperator(DC: Info.CurrentCall->Callee)) { |
| 9889 | // Ensure we actually have captured 'this'. If something was wrong with |
| 9890 | // 'this' capture, the error would have been previously reported. |
| 9891 | // Otherwise we can be inside of a default initialization of an object |
| 9892 | // declared by lambda's body, so no need to return false. |
| 9893 | if (!Info.CurrentCall->LambdaThisCaptureField) { |
| 9894 | if (IsExplicitLambda && !Info.CurrentCall->This) { |
| 9895 | DiagnoseInvalidUseOfThis(); |
| 9896 | return false; |
| 9897 | } |
| 9898 | |
| 9899 | return true; |
| 9900 | } |
| 9901 | |
| 9902 | const auto *MD = cast<CXXMethodDecl>(Val: Info.CurrentCall->Callee); |
| 9903 | return HandleLambdaCapture( |
| 9904 | Info, E, Result, MD, FD: Info.CurrentCall->LambdaThisCaptureField, |
| 9905 | LValueToRValueConversion: Info.CurrentCall->LambdaThisCaptureField->getType()->isPointerType()); |
| 9906 | } |
| 9907 | return true; |
| 9908 | } |
| 9909 | |
| 9910 | bool VisitCXXNewExpr(const CXXNewExpr *E); |
| 9911 | |
| 9912 | bool VisitSourceLocExpr(const SourceLocExpr *E) { |
| 9913 | assert(!E->isIntType() && "SourceLocExpr isn't a pointer type?" ); |
| 9914 | APValue LValResult = E->EvaluateInContext( |
| 9915 | Ctx: Info.Ctx, DefaultExpr: Info.CurrentCall->CurSourceLocExprScope.getDefaultExpr()); |
| 9916 | Result.setFrom(Ctx: Info.Ctx, V: LValResult); |
| 9917 | return true; |
| 9918 | } |
| 9919 | |
| 9920 | bool VisitEmbedExpr(const EmbedExpr *E) { |
| 9921 | llvm::report_fatal_error(reason: "Not yet implemented for ExprConstant.cpp" ); |
| 9922 | return true; |
| 9923 | } |
| 9924 | |
| 9925 | bool VisitSYCLUniqueStableNameExpr(const SYCLUniqueStableNameExpr *E) { |
| 9926 | std::string ResultStr = E->ComputeName(Context&: Info.Ctx); |
| 9927 | |
| 9928 | QualType CharTy = Info.Ctx.CharTy.withConst(); |
| 9929 | APInt Size(Info.Ctx.getTypeSize(T: Info.Ctx.getSizeType()), |
| 9930 | ResultStr.size() + 1); |
| 9931 | QualType ArrayTy = Info.Ctx.getConstantArrayType( |
| 9932 | EltTy: CharTy, ArySize: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0); |
| 9933 | |
| 9934 | StringLiteral *SL = |
| 9935 | StringLiteral::Create(Ctx: Info.Ctx, Str: ResultStr, Kind: StringLiteralKind::Ordinary, |
| 9936 | /*Pascal*/ false, Ty: ArrayTy, Locs: E->getLocation()); |
| 9937 | |
| 9938 | evaluateLValue(E: SL, Result); |
| 9939 | Result.addArray(Info, E, CAT: cast<ConstantArrayType>(Val&: ArrayTy)); |
| 9940 | return true; |
| 9941 | } |
| 9942 | |
| 9943 | // FIXME: Missing: @protocol, @selector |
| 9944 | }; |
| 9945 | } // end anonymous namespace |
| 9946 | |
| 9947 | static bool EvaluatePointer(const Expr* E, LValue& Result, EvalInfo &Info, |
| 9948 | bool InvalidBaseOK) { |
| 9949 | assert(!E->isValueDependent()); |
| 9950 | assert(E->isPRValue() && E->getType()->hasPointerRepresentation()); |
| 9951 | return PointerExprEvaluator(Info, Result, InvalidBaseOK).Visit(S: E); |
| 9952 | } |
| 9953 | |
| 9954 | bool PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { |
| 9955 | if (E->getOpcode() != BO_Add && |
| 9956 | E->getOpcode() != BO_Sub) |
| 9957 | return ExprEvaluatorBaseTy::VisitBinaryOperator(E); |
| 9958 | |
| 9959 | const Expr *PExp = E->getLHS(); |
| 9960 | const Expr *IExp = E->getRHS(); |
| 9961 | if (IExp->getType()->isPointerType()) |
| 9962 | std::swap(a&: PExp, b&: IExp); |
| 9963 | |
| 9964 | bool EvalPtrOK = evaluatePointer(E: PExp, Result); |
| 9965 | if (!EvalPtrOK && !Info.noteFailure()) |
| 9966 | return false; |
| 9967 | |
| 9968 | llvm::APSInt Offset; |
| 9969 | if (!EvaluateInteger(E: IExp, Result&: Offset, Info) || !EvalPtrOK) |
| 9970 | return false; |
| 9971 | |
| 9972 | if (E->getOpcode() == BO_Sub) |
| 9973 | negateAsSigned(Int&: Offset); |
| 9974 | |
| 9975 | QualType Pointee = PExp->getType()->castAs<PointerType>()->getPointeeType(); |
| 9976 | return HandleLValueArrayAdjustment(Info, E, LVal&: Result, EltTy: Pointee, Adjustment: Offset); |
| 9977 | } |
| 9978 | |
| 9979 | bool PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) { |
| 9980 | return evaluateLValue(E: E->getSubExpr(), Result); |
| 9981 | } |
| 9982 | |
| 9983 | // Is the provided decl 'std::source_location::current'? |
| 9984 | static bool IsDeclSourceLocationCurrent(const FunctionDecl *FD) { |
| 9985 | if (!FD) |
| 9986 | return false; |
| 9987 | const IdentifierInfo *FnII = FD->getIdentifier(); |
| 9988 | if (!FnII || !FnII->isStr(Str: "current" )) |
| 9989 | return false; |
| 9990 | |
| 9991 | const auto *RD = dyn_cast<RecordDecl>(Val: FD->getParent()); |
| 9992 | if (!RD) |
| 9993 | return false; |
| 9994 | |
| 9995 | const IdentifierInfo *ClassII = RD->getIdentifier(); |
| 9996 | return RD->isInStdNamespace() && ClassII && ClassII->isStr(Str: "source_location" ); |
| 9997 | } |
| 9998 | |
| 9999 | bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) { |
| 10000 | const Expr *SubExpr = E->getSubExpr(); |
| 10001 | |
| 10002 | switch (E->getCastKind()) { |
| 10003 | default: |
| 10004 | break; |
| 10005 | case CK_BitCast: |
| 10006 | case CK_CPointerToObjCPointerCast: |
| 10007 | case CK_BlockPointerToObjCPointerCast: |
| 10008 | case CK_AnyPointerToBlockPointerCast: |
| 10009 | case CK_AddressSpaceConversion: |
| 10010 | if (!Visit(S: SubExpr)) |
| 10011 | return false; |
| 10012 | if (E->getType()->isFunctionPointerType() || |
| 10013 | SubExpr->getType()->isFunctionPointerType()) { |
| 10014 | // Casting between two function pointer types, or between a function |
| 10015 | // pointer and an object pointer, is always a reinterpret_cast. |
| 10016 | CCEDiag(E, D: diag::note_constexpr_invalid_cast) |
| 10017 | << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret |
| 10018 | << Info.Ctx.getLangOpts().CPlusPlus; |
| 10019 | Result.Designator.setInvalid(); |
| 10020 | } else if (!E->getType()->isVoidPointerType()) { |
| 10021 | // Bitcasts to cv void* are static_casts, not reinterpret_casts, so are |
| 10022 | // permitted in constant expressions in C++11. Bitcasts from cv void* are |
| 10023 | // also static_casts, but we disallow them as a resolution to DR1312. |
| 10024 | // |
| 10025 | // In some circumstances, we permit casting from void* to cv1 T*, when the |
| 10026 | // actual pointee object is actually a cv2 T. |
| 10027 | bool HasValidResult = !Result.InvalidBase && !Result.Designator.Invalid && |
| 10028 | !Result.IsNullPtr; |
| 10029 | bool VoidPtrCastMaybeOK = |
| 10030 | Result.IsNullPtr || |
| 10031 | (HasValidResult && |
| 10032 | Info.Ctx.hasSimilarType(T1: Result.Designator.getType(Ctx&: Info.Ctx), |
| 10033 | T2: E->getType()->getPointeeType())); |
| 10034 | // 1. We'll allow it in std::allocator::allocate, and anything which that |
| 10035 | // calls. |
| 10036 | // 2. HACK 2022-03-28: Work around an issue with libstdc++'s |
| 10037 | // <source_location> header. Fixed in GCC 12 and later (2022-04-??). |
| 10038 | // We'll allow it in the body of std::source_location::current. GCC's |
| 10039 | // implementation had a parameter of type `void*`, and casts from |
| 10040 | // that back to `const __impl*` in its body. |
| 10041 | if (VoidPtrCastMaybeOK && |
| 10042 | (Info.getStdAllocatorCaller(FnName: "allocate" ) || |
| 10043 | IsDeclSourceLocationCurrent(FD: Info.CurrentCall->Callee) || |
| 10044 | Info.getLangOpts().CPlusPlus26)) { |
| 10045 | // Permitted. |
| 10046 | } else { |
| 10047 | if (SubExpr->getType()->isVoidPointerType() && |
| 10048 | Info.getLangOpts().CPlusPlus) { |
| 10049 | if (HasValidResult) |
| 10050 | CCEDiag(E, D: diag::note_constexpr_invalid_void_star_cast) |
| 10051 | << SubExpr->getType() << Info.getLangOpts().CPlusPlus26 |
| 10052 | << Result.Designator.getType(Ctx&: Info.Ctx).getCanonicalType() |
| 10053 | << E->getType()->getPointeeType(); |
| 10054 | else |
| 10055 | CCEDiag(E, D: diag::note_constexpr_invalid_cast) |
| 10056 | << diag::ConstexprInvalidCastKind::CastFrom |
| 10057 | << SubExpr->getType(); |
| 10058 | } else |
| 10059 | CCEDiag(E, D: diag::note_constexpr_invalid_cast) |
| 10060 | << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret |
| 10061 | << Info.Ctx.getLangOpts().CPlusPlus; |
| 10062 | Result.Designator.setInvalid(); |
| 10063 | } |
| 10064 | } |
| 10065 | if (E->getCastKind() == CK_AddressSpaceConversion && Result.IsNullPtr) |
| 10066 | ZeroInitialization(E); |
| 10067 | return true; |
| 10068 | |
| 10069 | case CK_DerivedToBase: |
| 10070 | case CK_UncheckedDerivedToBase: |
| 10071 | if (!evaluatePointer(E: E->getSubExpr(), Result)) |
| 10072 | return false; |
| 10073 | if (!Result.Base && Result.Offset.isZero()) |
| 10074 | return true; |
| 10075 | |
| 10076 | // Now figure out the necessary offset to add to the base LV to get from |
| 10077 | // the derived class to the base class. |
| 10078 | return HandleLValueBasePath(Info, E, Type: E->getSubExpr()->getType()-> |
| 10079 | castAs<PointerType>()->getPointeeType(), |
| 10080 | Result); |
| 10081 | |
| 10082 | case CK_BaseToDerived: |
| 10083 | if (!Visit(S: E->getSubExpr())) |
| 10084 | return false; |
| 10085 | if (!Result.Base && Result.Offset.isZero()) |
| 10086 | return true; |
| 10087 | return HandleBaseToDerivedCast(Info, E, Result); |
| 10088 | |
| 10089 | case CK_Dynamic: |
| 10090 | if (!Visit(S: E->getSubExpr())) |
| 10091 | return false; |
| 10092 | return HandleDynamicCast(Info, E: cast<ExplicitCastExpr>(Val: E), Ptr&: Result); |
| 10093 | |
| 10094 | case CK_NullToPointer: |
| 10095 | VisitIgnoredValue(E: E->getSubExpr()); |
| 10096 | return ZeroInitialization(E); |
| 10097 | |
| 10098 | case CK_IntegralToPointer: { |
| 10099 | CCEDiag(E, D: diag::note_constexpr_invalid_cast) |
| 10100 | << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret |
| 10101 | << Info.Ctx.getLangOpts().CPlusPlus; |
| 10102 | |
| 10103 | APValue Value; |
| 10104 | if (!EvaluateIntegerOrLValue(E: SubExpr, Result&: Value, Info)) |
| 10105 | break; |
| 10106 | |
| 10107 | if (Value.isInt()) { |
| 10108 | unsigned Size = Info.Ctx.getTypeSize(T: E->getType()); |
| 10109 | uint64_t N = Value.getInt().extOrTrunc(width: Size).getZExtValue(); |
| 10110 | if (N == Info.Ctx.getTargetNullPointerValue(QT: E->getType())) { |
| 10111 | Result.setNull(Ctx&: Info.Ctx, PointerTy: E->getType()); |
| 10112 | } else { |
| 10113 | Result.Base = (Expr *)nullptr; |
| 10114 | Result.InvalidBase = false; |
| 10115 | Result.Offset = CharUnits::fromQuantity(Quantity: N); |
| 10116 | Result.Designator.setInvalid(); |
| 10117 | Result.IsNullPtr = false; |
| 10118 | } |
| 10119 | return true; |
| 10120 | } else { |
| 10121 | // In rare instances, the value isn't an lvalue. |
| 10122 | // For example, when the value is the difference between the addresses of |
| 10123 | // two labels. We reject that as a constant expression because we can't |
| 10124 | // compute a valid offset to convert into a pointer. |
| 10125 | if (!Value.isLValue()) |
| 10126 | return false; |
| 10127 | |
| 10128 | // Cast is of an lvalue, no need to change value. |
| 10129 | Result.setFrom(Ctx: Info.Ctx, V: Value); |
| 10130 | return true; |
| 10131 | } |
| 10132 | } |
| 10133 | |
| 10134 | case CK_ArrayToPointerDecay: { |
| 10135 | if (SubExpr->isGLValue()) { |
| 10136 | if (!evaluateLValue(E: SubExpr, Result)) |
| 10137 | return false; |
| 10138 | } else { |
| 10139 | APValue &Value = Info.CurrentCall->createTemporary( |
| 10140 | Key: SubExpr, T: SubExpr->getType(), Scope: ScopeKind::FullExpression, LV&: Result); |
| 10141 | if (!EvaluateInPlace(Result&: Value, Info, This: Result, E: SubExpr)) |
| 10142 | return false; |
| 10143 | } |
| 10144 | // The result is a pointer to the first element of the array. |
| 10145 | auto *AT = Info.Ctx.getAsArrayType(T: SubExpr->getType()); |
| 10146 | if (auto *CAT = dyn_cast<ConstantArrayType>(Val: AT)) |
| 10147 | Result.addArray(Info, E, CAT); |
| 10148 | else |
| 10149 | Result.addUnsizedArray(Info, E, ElemTy: AT->getElementType()); |
| 10150 | return true; |
| 10151 | } |
| 10152 | |
| 10153 | case CK_FunctionToPointerDecay: |
| 10154 | return evaluateLValue(E: SubExpr, Result); |
| 10155 | |
| 10156 | case CK_LValueToRValue: { |
| 10157 | LValue LVal; |
| 10158 | if (!evaluateLValue(E: E->getSubExpr(), Result&: LVal)) |
| 10159 | return false; |
| 10160 | |
| 10161 | APValue RVal; |
| 10162 | // Note, we use the subexpression's type in order to retain cv-qualifiers. |
| 10163 | if (!handleLValueToRValueConversion(Info, Conv: E, Type: E->getSubExpr()->getType(), |
| 10164 | LVal, RVal)) |
| 10165 | return InvalidBaseOK && |
| 10166 | evaluateLValueAsAllocSize(Info, Base: LVal.Base, Result); |
| 10167 | return Success(V: RVal, E); |
| 10168 | } |
| 10169 | } |
| 10170 | |
| 10171 | return ExprEvaluatorBaseTy::VisitCastExpr(E); |
| 10172 | } |
| 10173 | |
| 10174 | static CharUnits GetAlignOfType(const ASTContext &Ctx, QualType T, |
| 10175 | UnaryExprOrTypeTrait ExprKind) { |
| 10176 | // C++ [expr.alignof]p3: |
| 10177 | // When alignof is applied to a reference type, the result is the |
| 10178 | // alignment of the referenced type. |
| 10179 | T = T.getNonReferenceType(); |
| 10180 | |
| 10181 | if (T.getQualifiers().hasUnaligned()) |
| 10182 | return CharUnits::One(); |
| 10183 | |
| 10184 | const bool AlignOfReturnsPreferred = |
| 10185 | Ctx.getLangOpts().getClangABICompat() <= LangOptions::ClangABI::Ver7; |
| 10186 | |
| 10187 | // __alignof is defined to return the preferred alignment. |
| 10188 | // Before 8, clang returned the preferred alignment for alignof and _Alignof |
| 10189 | // as well. |
| 10190 | if (ExprKind == UETT_PreferredAlignOf || AlignOfReturnsPreferred) |
| 10191 | return Ctx.toCharUnitsFromBits(BitSize: Ctx.getPreferredTypeAlign(T: T.getTypePtr())); |
| 10192 | // alignof and _Alignof are defined to return the ABI alignment. |
| 10193 | else if (ExprKind == UETT_AlignOf) |
| 10194 | return Ctx.getTypeAlignInChars(T: T.getTypePtr()); |
| 10195 | else |
| 10196 | llvm_unreachable("GetAlignOfType on a non-alignment ExprKind" ); |
| 10197 | } |
| 10198 | |
| 10199 | CharUnits GetAlignOfExpr(const ASTContext &Ctx, const Expr *E, |
| 10200 | UnaryExprOrTypeTrait ExprKind) { |
| 10201 | E = E->IgnoreParens(); |
| 10202 | |
| 10203 | // The kinds of expressions that we have special-case logic here for |
| 10204 | // should be kept up to date with the special checks for those |
| 10205 | // expressions in Sema. |
| 10206 | |
| 10207 | // alignof decl is always accepted, even if it doesn't make sense: we default |
| 10208 | // to 1 in those cases. |
| 10209 | if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Val: E)) |
| 10210 | return Ctx.getDeclAlign(D: DRE->getDecl(), |
| 10211 | /*RefAsPointee*/ ForAlignof: true); |
| 10212 | |
| 10213 | if (const MemberExpr *ME = dyn_cast<MemberExpr>(Val: E)) |
| 10214 | return Ctx.getDeclAlign(D: ME->getMemberDecl(), |
| 10215 | /*RefAsPointee*/ ForAlignof: true); |
| 10216 | |
| 10217 | return GetAlignOfType(Ctx, T: E->getType(), ExprKind); |
| 10218 | } |
| 10219 | |
| 10220 | static CharUnits getBaseAlignment(EvalInfo &Info, const LValue &Value) { |
| 10221 | if (const auto *VD = Value.Base.dyn_cast<const ValueDecl *>()) |
| 10222 | return Info.Ctx.getDeclAlign(D: VD); |
| 10223 | if (const auto *E = Value.Base.dyn_cast<const Expr *>()) |
| 10224 | return GetAlignOfExpr(Ctx: Info.Ctx, E, ExprKind: UETT_AlignOf); |
| 10225 | return GetAlignOfType(Ctx: Info.Ctx, T: Value.Base.getTypeInfoType(), ExprKind: UETT_AlignOf); |
| 10226 | } |
| 10227 | |
| 10228 | /// Evaluate the value of the alignment argument to __builtin_align_{up,down}, |
| 10229 | /// __builtin_is_aligned and __builtin_assume_aligned. |
| 10230 | static bool getAlignmentArgument(const Expr *E, QualType ForType, |
| 10231 | EvalInfo &Info, APSInt &Alignment) { |
| 10232 | if (!EvaluateInteger(E, Result&: Alignment, Info)) |
| 10233 | return false; |
| 10234 | if (Alignment < 0 || !Alignment.isPowerOf2()) { |
| 10235 | Info.FFDiag(E, DiagId: diag::note_constexpr_invalid_alignment) << Alignment; |
| 10236 | return false; |
| 10237 | } |
| 10238 | unsigned SrcWidth = Info.Ctx.getIntWidth(T: ForType); |
| 10239 | APSInt MaxValue(APInt::getOneBitSet(numBits: SrcWidth, BitNo: SrcWidth - 1)); |
| 10240 | if (APSInt::compareValues(I1: Alignment, I2: MaxValue) > 0) { |
| 10241 | Info.FFDiag(E, DiagId: diag::note_constexpr_alignment_too_big) |
| 10242 | << MaxValue << ForType << Alignment; |
| 10243 | return false; |
| 10244 | } |
| 10245 | // Ensure both alignment and source value have the same bit width so that we |
| 10246 | // don't assert when computing the resulting value. |
| 10247 | APSInt ExtAlignment = |
| 10248 | APSInt(Alignment.zextOrTrunc(width: SrcWidth), /*isUnsigned=*/true); |
| 10249 | assert(APSInt::compareValues(Alignment, ExtAlignment) == 0 && |
| 10250 | "Alignment should not be changed by ext/trunc" ); |
| 10251 | Alignment = ExtAlignment; |
| 10252 | assert(Alignment.getBitWidth() == SrcWidth); |
| 10253 | return true; |
| 10254 | } |
| 10255 | |
| 10256 | // To be clear: this happily visits unsupported builtins. Better name welcomed. |
| 10257 | bool PointerExprEvaluator::visitNonBuiltinCallExpr(const CallExpr *E) { |
| 10258 | if (ExprEvaluatorBaseTy::VisitCallExpr(E)) |
| 10259 | return true; |
| 10260 | |
| 10261 | if (!(InvalidBaseOK && E->getCalleeAllocSizeAttr())) |
| 10262 | return false; |
| 10263 | |
| 10264 | Result.setInvalid(B: E); |
| 10265 | QualType PointeeTy = E->getType()->castAs<PointerType>()->getPointeeType(); |
| 10266 | Result.addUnsizedArray(Info, E, ElemTy: PointeeTy); |
| 10267 | return true; |
| 10268 | } |
| 10269 | |
| 10270 | bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) { |
| 10271 | if (!IsConstantEvaluatedBuiltinCall(E)) |
| 10272 | return visitNonBuiltinCallExpr(E); |
| 10273 | return VisitBuiltinCallExpr(E, BuiltinOp: E->getBuiltinCallee()); |
| 10274 | } |
| 10275 | |
| 10276 | // Determine if T is a character type for which we guarantee that |
| 10277 | // sizeof(T) == 1. |
| 10278 | static bool isOneByteCharacterType(QualType T) { |
| 10279 | return T->isCharType() || T->isChar8Type(); |
| 10280 | } |
| 10281 | |
| 10282 | bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, |
| 10283 | unsigned BuiltinOp) { |
| 10284 | if (IsOpaqueConstantCall(E)) |
| 10285 | return Success(E); |
| 10286 | |
| 10287 | switch (BuiltinOp) { |
| 10288 | case Builtin::BIaddressof: |
| 10289 | case Builtin::BI__addressof: |
| 10290 | case Builtin::BI__builtin_addressof: |
| 10291 | return evaluateLValue(E: E->getArg(Arg: 0), Result); |
| 10292 | case Builtin::BI__builtin_assume_aligned: { |
| 10293 | // We need to be very careful here because: if the pointer does not have the |
| 10294 | // asserted alignment, then the behavior is undefined, and undefined |
| 10295 | // behavior is non-constant. |
| 10296 | if (!evaluatePointer(E: E->getArg(Arg: 0), Result)) |
| 10297 | return false; |
| 10298 | |
| 10299 | LValue OffsetResult(Result); |
| 10300 | APSInt Alignment; |
| 10301 | if (!getAlignmentArgument(E: E->getArg(Arg: 1), ForType: E->getArg(Arg: 0)->getType(), Info, |
| 10302 | Alignment)) |
| 10303 | return false; |
| 10304 | CharUnits Align = CharUnits::fromQuantity(Quantity: Alignment.getZExtValue()); |
| 10305 | |
| 10306 | if (E->getNumArgs() > 2) { |
| 10307 | APSInt Offset; |
| 10308 | if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: Offset, Info)) |
| 10309 | return false; |
| 10310 | |
| 10311 | int64_t AdditionalOffset = -Offset.getZExtValue(); |
| 10312 | OffsetResult.Offset += CharUnits::fromQuantity(Quantity: AdditionalOffset); |
| 10313 | } |
| 10314 | |
| 10315 | // If there is a base object, then it must have the correct alignment. |
| 10316 | if (OffsetResult.Base) { |
| 10317 | CharUnits BaseAlignment = getBaseAlignment(Info, Value: OffsetResult); |
| 10318 | |
| 10319 | if (BaseAlignment < Align) { |
| 10320 | Result.Designator.setInvalid(); |
| 10321 | CCEDiag(E: E->getArg(Arg: 0), D: diag::note_constexpr_baa_insufficient_alignment) |
| 10322 | << 0 << BaseAlignment.getQuantity() << Align.getQuantity(); |
| 10323 | return false; |
| 10324 | } |
| 10325 | } |
| 10326 | |
| 10327 | // The offset must also have the correct alignment. |
| 10328 | if (OffsetResult.Offset.alignTo(Align) != OffsetResult.Offset) { |
| 10329 | Result.Designator.setInvalid(); |
| 10330 | |
| 10331 | (OffsetResult.Base |
| 10332 | ? CCEDiag(E: E->getArg(Arg: 0), |
| 10333 | D: diag::note_constexpr_baa_insufficient_alignment) |
| 10334 | << 1 |
| 10335 | : CCEDiag(E: E->getArg(Arg: 0), |
| 10336 | D: diag::note_constexpr_baa_value_insufficient_alignment)) |
| 10337 | << OffsetResult.Offset.getQuantity() << Align.getQuantity(); |
| 10338 | return false; |
| 10339 | } |
| 10340 | |
| 10341 | return true; |
| 10342 | } |
| 10343 | case Builtin::BI__builtin_align_up: |
| 10344 | case Builtin::BI__builtin_align_down: { |
| 10345 | if (!evaluatePointer(E: E->getArg(Arg: 0), Result)) |
| 10346 | return false; |
| 10347 | APSInt Alignment; |
| 10348 | if (!getAlignmentArgument(E: E->getArg(Arg: 1), ForType: E->getArg(Arg: 0)->getType(), Info, |
| 10349 | Alignment)) |
| 10350 | return false; |
| 10351 | CharUnits BaseAlignment = getBaseAlignment(Info, Value: Result); |
| 10352 | CharUnits PtrAlign = BaseAlignment.alignmentAtOffset(offset: Result.Offset); |
| 10353 | // For align_up/align_down, we can return the same value if the alignment |
| 10354 | // is known to be greater or equal to the requested value. |
| 10355 | if (PtrAlign.getQuantity() >= Alignment) |
| 10356 | return true; |
| 10357 | |
| 10358 | // The alignment could be greater than the minimum at run-time, so we cannot |
| 10359 | // infer much about the resulting pointer value. One case is possible: |
| 10360 | // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we |
| 10361 | // can infer the correct index if the requested alignment is smaller than |
| 10362 | // the base alignment so we can perform the computation on the offset. |
| 10363 | if (BaseAlignment.getQuantity() >= Alignment) { |
| 10364 | assert(Alignment.getBitWidth() <= 64 && |
| 10365 | "Cannot handle > 64-bit address-space" ); |
| 10366 | uint64_t Alignment64 = Alignment.getZExtValue(); |
| 10367 | CharUnits NewOffset = CharUnits::fromQuantity( |
| 10368 | Quantity: BuiltinOp == Builtin::BI__builtin_align_down |
| 10369 | ? llvm::alignDown(Value: Result.Offset.getQuantity(), Align: Alignment64) |
| 10370 | : llvm::alignTo(Value: Result.Offset.getQuantity(), Align: Alignment64)); |
| 10371 | Result.adjustOffset(N: NewOffset - Result.Offset); |
| 10372 | // TODO: diagnose out-of-bounds values/only allow for arrays? |
| 10373 | return true; |
| 10374 | } |
| 10375 | // Otherwise, we cannot constant-evaluate the result. |
| 10376 | Info.FFDiag(E: E->getArg(Arg: 0), DiagId: diag::note_constexpr_alignment_adjust) |
| 10377 | << Alignment; |
| 10378 | return false; |
| 10379 | } |
| 10380 | case Builtin::BI__builtin_operator_new: |
| 10381 | return HandleOperatorNewCall(Info, E, Result); |
| 10382 | case Builtin::BI__builtin_launder: |
| 10383 | return evaluatePointer(E: E->getArg(Arg: 0), Result); |
| 10384 | case Builtin::BIstrchr: |
| 10385 | case Builtin::BIwcschr: |
| 10386 | case Builtin::BImemchr: |
| 10387 | case Builtin::BIwmemchr: |
| 10388 | if (Info.getLangOpts().CPlusPlus11) |
| 10389 | Info.CCEDiag(E, DiagId: diag::note_constexpr_invalid_function) |
| 10390 | << /*isConstexpr*/ 0 << /*isConstructor*/ 0 |
| 10391 | << Info.Ctx.BuiltinInfo.getQuotedName(ID: BuiltinOp); |
| 10392 | else |
| 10393 | Info.CCEDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr); |
| 10394 | [[fallthrough]]; |
| 10395 | case Builtin::BI__builtin_strchr: |
| 10396 | case Builtin::BI__builtin_wcschr: |
| 10397 | case Builtin::BI__builtin_memchr: |
| 10398 | case Builtin::BI__builtin_char_memchr: |
| 10399 | case Builtin::BI__builtin_wmemchr: { |
| 10400 | if (!Visit(S: E->getArg(Arg: 0))) |
| 10401 | return false; |
| 10402 | APSInt Desired; |
| 10403 | if (!EvaluateInteger(E: E->getArg(Arg: 1), Result&: Desired, Info)) |
| 10404 | return false; |
| 10405 | uint64_t MaxLength = uint64_t(-1); |
| 10406 | if (BuiltinOp != Builtin::BIstrchr && |
| 10407 | BuiltinOp != Builtin::BIwcschr && |
| 10408 | BuiltinOp != Builtin::BI__builtin_strchr && |
| 10409 | BuiltinOp != Builtin::BI__builtin_wcschr) { |
| 10410 | APSInt N; |
| 10411 | if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: N, Info)) |
| 10412 | return false; |
| 10413 | MaxLength = N.getZExtValue(); |
| 10414 | } |
| 10415 | // We cannot find the value if there are no candidates to match against. |
| 10416 | if (MaxLength == 0u) |
| 10417 | return ZeroInitialization(E); |
| 10418 | if (!Result.checkNullPointerForFoldAccess(Info, E, AK: AK_Read) || |
| 10419 | Result.Designator.Invalid) |
| 10420 | return false; |
| 10421 | QualType CharTy = Result.Designator.getType(Ctx&: Info.Ctx); |
| 10422 | bool IsRawByte = BuiltinOp == Builtin::BImemchr || |
| 10423 | BuiltinOp == Builtin::BI__builtin_memchr; |
| 10424 | assert(IsRawByte || |
| 10425 | Info.Ctx.hasSameUnqualifiedType( |
| 10426 | CharTy, E->getArg(0)->getType()->getPointeeType())); |
| 10427 | // Pointers to const void may point to objects of incomplete type. |
| 10428 | if (IsRawByte && CharTy->isIncompleteType()) { |
| 10429 | Info.FFDiag(E, DiagId: diag::note_constexpr_ltor_incomplete_type) << CharTy; |
| 10430 | return false; |
| 10431 | } |
| 10432 | // Give up on byte-oriented matching against multibyte elements. |
| 10433 | // FIXME: We can compare the bytes in the correct order. |
| 10434 | if (IsRawByte && !isOneByteCharacterType(T: CharTy)) { |
| 10435 | Info.FFDiag(E, DiagId: diag::note_constexpr_memchr_unsupported) |
| 10436 | << Info.Ctx.BuiltinInfo.getQuotedName(ID: BuiltinOp) << CharTy; |
| 10437 | return false; |
| 10438 | } |
| 10439 | // Figure out what value we're actually looking for (after converting to |
| 10440 | // the corresponding unsigned type if necessary). |
| 10441 | uint64_t DesiredVal; |
| 10442 | bool StopAtNull = false; |
| 10443 | switch (BuiltinOp) { |
| 10444 | case Builtin::BIstrchr: |
| 10445 | case Builtin::BI__builtin_strchr: |
| 10446 | // strchr compares directly to the passed integer, and therefore |
| 10447 | // always fails if given an int that is not a char. |
| 10448 | if (!APSInt::isSameValue(I1: HandleIntToIntCast(Info, E, DestType: CharTy, |
| 10449 | SrcType: E->getArg(Arg: 1)->getType(), |
| 10450 | Value: Desired), |
| 10451 | I2: Desired)) |
| 10452 | return ZeroInitialization(E); |
| 10453 | StopAtNull = true; |
| 10454 | [[fallthrough]]; |
| 10455 | case Builtin::BImemchr: |
| 10456 | case Builtin::BI__builtin_memchr: |
| 10457 | case Builtin::BI__builtin_char_memchr: |
| 10458 | // memchr compares by converting both sides to unsigned char. That's also |
| 10459 | // correct for strchr if we get this far (to cope with plain char being |
| 10460 | // unsigned in the strchr case). |
| 10461 | DesiredVal = Desired.trunc(width: Info.Ctx.getCharWidth()).getZExtValue(); |
| 10462 | break; |
| 10463 | |
| 10464 | case Builtin::BIwcschr: |
| 10465 | case Builtin::BI__builtin_wcschr: |
| 10466 | StopAtNull = true; |
| 10467 | [[fallthrough]]; |
| 10468 | case Builtin::BIwmemchr: |
| 10469 | case Builtin::BI__builtin_wmemchr: |
| 10470 | // wcschr and wmemchr are given a wchar_t to look for. Just use it. |
| 10471 | DesiredVal = Desired.getZExtValue(); |
| 10472 | break; |
| 10473 | } |
| 10474 | |
| 10475 | for (; MaxLength; --MaxLength) { |
| 10476 | APValue Char; |
| 10477 | if (!handleLValueToRValueConversion(Info, Conv: E, Type: CharTy, LVal: Result, RVal&: Char) || |
| 10478 | !Char.isInt()) |
| 10479 | return false; |
| 10480 | if (Char.getInt().getZExtValue() == DesiredVal) |
| 10481 | return true; |
| 10482 | if (StopAtNull && !Char.getInt()) |
| 10483 | break; |
| 10484 | if (!HandleLValueArrayAdjustment(Info, E, LVal&: Result, EltTy: CharTy, Adjustment: 1)) |
| 10485 | return false; |
| 10486 | } |
| 10487 | // Not found: return nullptr. |
| 10488 | return ZeroInitialization(E); |
| 10489 | } |
| 10490 | |
| 10491 | case Builtin::BImemcpy: |
| 10492 | case Builtin::BImemmove: |
| 10493 | case Builtin::BIwmemcpy: |
| 10494 | case Builtin::BIwmemmove: |
| 10495 | if (Info.getLangOpts().CPlusPlus11) |
| 10496 | Info.CCEDiag(E, DiagId: diag::note_constexpr_invalid_function) |
| 10497 | << /*isConstexpr*/ 0 << /*isConstructor*/ 0 |
| 10498 | << Info.Ctx.BuiltinInfo.getQuotedName(ID: BuiltinOp); |
| 10499 | else |
| 10500 | Info.CCEDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr); |
| 10501 | [[fallthrough]]; |
| 10502 | case Builtin::BI__builtin_memcpy: |
| 10503 | case Builtin::BI__builtin_memmove: |
| 10504 | case Builtin::BI__builtin_wmemcpy: |
| 10505 | case Builtin::BI__builtin_wmemmove: { |
| 10506 | bool WChar = BuiltinOp == Builtin::BIwmemcpy || |
| 10507 | BuiltinOp == Builtin::BIwmemmove || |
| 10508 | BuiltinOp == Builtin::BI__builtin_wmemcpy || |
| 10509 | BuiltinOp == Builtin::BI__builtin_wmemmove; |
| 10510 | bool Move = BuiltinOp == Builtin::BImemmove || |
| 10511 | BuiltinOp == Builtin::BIwmemmove || |
| 10512 | BuiltinOp == Builtin::BI__builtin_memmove || |
| 10513 | BuiltinOp == Builtin::BI__builtin_wmemmove; |
| 10514 | |
| 10515 | // The result of mem* is the first argument. |
| 10516 | if (!Visit(S: E->getArg(Arg: 0))) |
| 10517 | return false; |
| 10518 | LValue Dest = Result; |
| 10519 | |
| 10520 | LValue Src; |
| 10521 | if (!EvaluatePointer(E: E->getArg(Arg: 1), Result&: Src, Info)) |
| 10522 | return false; |
| 10523 | |
| 10524 | APSInt N; |
| 10525 | if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: N, Info)) |
| 10526 | return false; |
| 10527 | assert(!N.isSigned() && "memcpy and friends take an unsigned size" ); |
| 10528 | |
| 10529 | // If the size is zero, we treat this as always being a valid no-op. |
| 10530 | // (Even if one of the src and dest pointers is null.) |
| 10531 | if (!N) |
| 10532 | return true; |
| 10533 | |
| 10534 | // Otherwise, if either of the operands is null, we can't proceed. Don't |
| 10535 | // try to determine the type of the copied objects, because there aren't |
| 10536 | // any. |
| 10537 | if (!Src.Base || !Dest.Base) { |
| 10538 | APValue Val; |
| 10539 | (!Src.Base ? Src : Dest).moveInto(V&: Val); |
| 10540 | Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_null) |
| 10541 | << Move << WChar << !!Src.Base |
| 10542 | << Val.getAsString(Ctx: Info.Ctx, Ty: E->getArg(Arg: 0)->getType()); |
| 10543 | return false; |
| 10544 | } |
| 10545 | if (Src.Designator.Invalid || Dest.Designator.Invalid) |
| 10546 | return false; |
| 10547 | |
| 10548 | // We require that Src and Dest are both pointers to arrays of |
| 10549 | // trivially-copyable type. (For the wide version, the designator will be |
| 10550 | // invalid if the designated object is not a wchar_t.) |
| 10551 | QualType T = Dest.Designator.getType(Ctx&: Info.Ctx); |
| 10552 | QualType SrcT = Src.Designator.getType(Ctx&: Info.Ctx); |
| 10553 | if (!Info.Ctx.hasSameUnqualifiedType(T1: T, T2: SrcT)) { |
| 10554 | // FIXME: Consider using our bit_cast implementation to support this. |
| 10555 | Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_type_pun) << Move << SrcT << T; |
| 10556 | return false; |
| 10557 | } |
| 10558 | if (T->isIncompleteType()) { |
| 10559 | Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_incomplete_type) << Move << T; |
| 10560 | return false; |
| 10561 | } |
| 10562 | if (!T.isTriviallyCopyableType(Context: Info.Ctx)) { |
| 10563 | Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_nontrivial) << Move << T; |
| 10564 | return false; |
| 10565 | } |
| 10566 | |
| 10567 | // Figure out how many T's we're copying. |
| 10568 | uint64_t TSize = Info.Ctx.getTypeSizeInChars(T).getQuantity(); |
| 10569 | if (TSize == 0) |
| 10570 | return false; |
| 10571 | if (!WChar) { |
| 10572 | uint64_t Remainder; |
| 10573 | llvm::APInt OrigN = N; |
| 10574 | llvm::APInt::udivrem(LHS: OrigN, RHS: TSize, Quotient&: N, Remainder); |
| 10575 | if (Remainder) { |
| 10576 | Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_unsupported) |
| 10577 | << Move << WChar << 0 << T << toString(I: OrigN, Radix: 10, /*Signed*/false) |
| 10578 | << (unsigned)TSize; |
| 10579 | return false; |
| 10580 | } |
| 10581 | } |
| 10582 | |
| 10583 | // Check that the copying will remain within the arrays, just so that we |
| 10584 | // can give a more meaningful diagnostic. This implicitly also checks that |
| 10585 | // N fits into 64 bits. |
| 10586 | uint64_t RemainingSrcSize = Src.Designator.validIndexAdjustments().second; |
| 10587 | uint64_t RemainingDestSize = Dest.Designator.validIndexAdjustments().second; |
| 10588 | if (N.ugt(RHS: RemainingSrcSize) || N.ugt(RHS: RemainingDestSize)) { |
| 10589 | Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_unsupported) |
| 10590 | << Move << WChar << (N.ugt(RHS: RemainingSrcSize) ? 1 : 2) << T |
| 10591 | << toString(I: N, Radix: 10, /*Signed*/false); |
| 10592 | return false; |
| 10593 | } |
| 10594 | uint64_t NElems = N.getZExtValue(); |
| 10595 | uint64_t NBytes = NElems * TSize; |
| 10596 | |
| 10597 | // Check for overlap. |
| 10598 | int Direction = 1; |
| 10599 | if (HasSameBase(A: Src, B: Dest)) { |
| 10600 | uint64_t SrcOffset = Src.getLValueOffset().getQuantity(); |
| 10601 | uint64_t DestOffset = Dest.getLValueOffset().getQuantity(); |
| 10602 | if (DestOffset >= SrcOffset && DestOffset - SrcOffset < NBytes) { |
| 10603 | // Dest is inside the source region. |
| 10604 | if (!Move) { |
| 10605 | Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_overlap) << WChar; |
| 10606 | return false; |
| 10607 | } |
| 10608 | // For memmove and friends, copy backwards. |
| 10609 | if (!HandleLValueArrayAdjustment(Info, E, LVal&: Src, EltTy: T, Adjustment: NElems - 1) || |
| 10610 | !HandleLValueArrayAdjustment(Info, E, LVal&: Dest, EltTy: T, Adjustment: NElems - 1)) |
| 10611 | return false; |
| 10612 | Direction = -1; |
| 10613 | } else if (!Move && SrcOffset >= DestOffset && |
| 10614 | SrcOffset - DestOffset < NBytes) { |
| 10615 | // Src is inside the destination region for memcpy: invalid. |
| 10616 | Info.FFDiag(E, DiagId: diag::note_constexpr_memcpy_overlap) << WChar; |
| 10617 | return false; |
| 10618 | } |
| 10619 | } |
| 10620 | |
| 10621 | while (true) { |
| 10622 | APValue Val; |
| 10623 | // FIXME: Set WantObjectRepresentation to true if we're copying a |
| 10624 | // char-like type? |
| 10625 | if (!handleLValueToRValueConversion(Info, Conv: E, Type: T, LVal: Src, RVal&: Val) || |
| 10626 | !handleAssignment(Info, E, LVal: Dest, LValType: T, Val)) |
| 10627 | return false; |
| 10628 | // Do not iterate past the last element; if we're copying backwards, that |
| 10629 | // might take us off the start of the array. |
| 10630 | if (--NElems == 0) |
| 10631 | return true; |
| 10632 | if (!HandleLValueArrayAdjustment(Info, E, LVal&: Src, EltTy: T, Adjustment: Direction) || |
| 10633 | !HandleLValueArrayAdjustment(Info, E, LVal&: Dest, EltTy: T, Adjustment: Direction)) |
| 10634 | return false; |
| 10635 | } |
| 10636 | } |
| 10637 | |
| 10638 | default: |
| 10639 | return false; |
| 10640 | } |
| 10641 | } |
| 10642 | |
| 10643 | static bool EvaluateArrayNewInitList(EvalInfo &Info, LValue &This, |
| 10644 | APValue &Result, const InitListExpr *ILE, |
| 10645 | QualType AllocType); |
| 10646 | static bool EvaluateArrayNewConstructExpr(EvalInfo &Info, LValue &This, |
| 10647 | APValue &Result, |
| 10648 | const CXXConstructExpr *CCE, |
| 10649 | QualType AllocType); |
| 10650 | |
| 10651 | bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) { |
| 10652 | if (!Info.getLangOpts().CPlusPlus20) |
| 10653 | Info.CCEDiag(E, DiagId: diag::note_constexpr_new); |
| 10654 | |
| 10655 | // We cannot speculatively evaluate a delete expression. |
| 10656 | if (Info.SpeculativeEvaluationDepth) |
| 10657 | return false; |
| 10658 | |
| 10659 | FunctionDecl *OperatorNew = E->getOperatorNew(); |
| 10660 | QualType AllocType = E->getAllocatedType(); |
| 10661 | QualType TargetType = AllocType; |
| 10662 | |
| 10663 | bool IsNothrow = false; |
| 10664 | bool IsPlacement = false; |
| 10665 | |
| 10666 | if (E->getNumPlacementArgs() == 1 && |
| 10667 | E->getPlacementArg(I: 0)->getType()->isNothrowT()) { |
| 10668 | // The only new-placement list we support is of the form (std::nothrow). |
| 10669 | // |
| 10670 | // FIXME: There is no restriction on this, but it's not clear that any |
| 10671 | // other form makes any sense. We get here for cases such as: |
| 10672 | // |
| 10673 | // new (std::align_val_t{N}) X(int) |
| 10674 | // |
| 10675 | // (which should presumably be valid only if N is a multiple of |
| 10676 | // alignof(int), and in any case can't be deallocated unless N is |
| 10677 | // alignof(X) and X has new-extended alignment). |
| 10678 | LValue Nothrow; |
| 10679 | if (!EvaluateLValue(E: E->getPlacementArg(I: 0), Result&: Nothrow, Info)) |
| 10680 | return false; |
| 10681 | IsNothrow = true; |
| 10682 | } else if (OperatorNew->isReservedGlobalPlacementOperator()) { |
| 10683 | if (Info.CurrentCall->isStdFunction() || Info.getLangOpts().CPlusPlus26 || |
| 10684 | (Info.CurrentCall->CanEvalMSConstexpr && |
| 10685 | OperatorNew->hasAttr<MSConstexprAttr>())) { |
| 10686 | if (!EvaluatePointer(E: E->getPlacementArg(I: 0), Result, Info)) |
| 10687 | return false; |
| 10688 | if (Result.Designator.Invalid) |
| 10689 | return false; |
| 10690 | TargetType = E->getPlacementArg(I: 0)->getType(); |
| 10691 | IsPlacement = true; |
| 10692 | } else { |
| 10693 | Info.FFDiag(E, DiagId: diag::note_constexpr_new_placement) |
| 10694 | << /*C++26 feature*/ 1 << E->getSourceRange(); |
| 10695 | return false; |
| 10696 | } |
| 10697 | } else if (E->getNumPlacementArgs()) { |
| 10698 | Info.FFDiag(E, DiagId: diag::note_constexpr_new_placement) |
| 10699 | << /*Unsupported*/ 0 << E->getSourceRange(); |
| 10700 | return false; |
| 10701 | } else if (!OperatorNew |
| 10702 | ->isUsableAsGlobalAllocationFunctionInConstantEvaluation()) { |
| 10703 | Info.FFDiag(E, DiagId: diag::note_constexpr_new_non_replaceable) |
| 10704 | << isa<CXXMethodDecl>(Val: OperatorNew) << OperatorNew; |
| 10705 | return false; |
| 10706 | } |
| 10707 | |
| 10708 | const Expr *Init = E->getInitializer(); |
| 10709 | const InitListExpr *ResizedArrayILE = nullptr; |
| 10710 | const CXXConstructExpr *ResizedArrayCCE = nullptr; |
| 10711 | bool ValueInit = false; |
| 10712 | |
| 10713 | if (std::optional<const Expr *> ArraySize = E->getArraySize()) { |
| 10714 | const Expr *Stripped = *ArraySize; |
| 10715 | for (; auto *ICE = dyn_cast<ImplicitCastExpr>(Val: Stripped); |
| 10716 | Stripped = ICE->getSubExpr()) |
| 10717 | if (ICE->getCastKind() != CK_NoOp && |
| 10718 | ICE->getCastKind() != CK_IntegralCast) |
| 10719 | break; |
| 10720 | |
| 10721 | llvm::APSInt ArrayBound; |
| 10722 | if (!EvaluateInteger(E: Stripped, Result&: ArrayBound, Info)) |
| 10723 | return false; |
| 10724 | |
| 10725 | // C++ [expr.new]p9: |
| 10726 | // The expression is erroneous if: |
| 10727 | // -- [...] its value before converting to size_t [or] applying the |
| 10728 | // second standard conversion sequence is less than zero |
| 10729 | if (ArrayBound.isSigned() && ArrayBound.isNegative()) { |
| 10730 | if (IsNothrow) |
| 10731 | return ZeroInitialization(E); |
| 10732 | |
| 10733 | Info.FFDiag(E: *ArraySize, DiagId: diag::note_constexpr_new_negative) |
| 10734 | << ArrayBound << (*ArraySize)->getSourceRange(); |
| 10735 | return false; |
| 10736 | } |
| 10737 | |
| 10738 | // -- its value is such that the size of the allocated object would |
| 10739 | // exceed the implementation-defined limit |
| 10740 | if (!Info.CheckArraySize(Loc: ArraySize.value()->getExprLoc(), |
| 10741 | BitWidth: ConstantArrayType::getNumAddressingBits( |
| 10742 | Context: Info.Ctx, ElementType: AllocType, NumElements: ArrayBound), |
| 10743 | ElemCount: ArrayBound.getZExtValue(), /*Diag=*/!IsNothrow)) { |
| 10744 | if (IsNothrow) |
| 10745 | return ZeroInitialization(E); |
| 10746 | return false; |
| 10747 | } |
| 10748 | |
| 10749 | // -- the new-initializer is a braced-init-list and the number of |
| 10750 | // array elements for which initializers are provided [...] |
| 10751 | // exceeds the number of elements to initialize |
| 10752 | if (!Init) { |
| 10753 | // No initialization is performed. |
| 10754 | } else if (isa<CXXScalarValueInitExpr>(Val: Init) || |
| 10755 | isa<ImplicitValueInitExpr>(Val: Init)) { |
| 10756 | ValueInit = true; |
| 10757 | } else if (auto *CCE = dyn_cast<CXXConstructExpr>(Val: Init)) { |
| 10758 | ResizedArrayCCE = CCE; |
| 10759 | } else { |
| 10760 | auto *CAT = Info.Ctx.getAsConstantArrayType(T: Init->getType()); |
| 10761 | assert(CAT && "unexpected type for array initializer" ); |
| 10762 | |
| 10763 | unsigned Bits = |
| 10764 | std::max(a: CAT->getSizeBitWidth(), b: ArrayBound.getBitWidth()); |
| 10765 | llvm::APInt InitBound = CAT->getSize().zext(width: Bits); |
| 10766 | llvm::APInt AllocBound = ArrayBound.zext(width: Bits); |
| 10767 | if (InitBound.ugt(RHS: AllocBound)) { |
| 10768 | if (IsNothrow) |
| 10769 | return ZeroInitialization(E); |
| 10770 | |
| 10771 | Info.FFDiag(E: *ArraySize, DiagId: diag::note_constexpr_new_too_small) |
| 10772 | << toString(I: AllocBound, Radix: 10, /*Signed=*/false) |
| 10773 | << toString(I: InitBound, Radix: 10, /*Signed=*/false) |
| 10774 | << (*ArraySize)->getSourceRange(); |
| 10775 | return false; |
| 10776 | } |
| 10777 | |
| 10778 | // If the sizes differ, we must have an initializer list, and we need |
| 10779 | // special handling for this case when we initialize. |
| 10780 | if (InitBound != AllocBound) |
| 10781 | ResizedArrayILE = cast<InitListExpr>(Val: Init); |
| 10782 | } |
| 10783 | |
| 10784 | AllocType = Info.Ctx.getConstantArrayType(EltTy: AllocType, ArySize: ArrayBound, SizeExpr: nullptr, |
| 10785 | ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0); |
| 10786 | } else { |
| 10787 | assert(!AllocType->isArrayType() && |
| 10788 | "array allocation with non-array new" ); |
| 10789 | } |
| 10790 | |
| 10791 | APValue *Val; |
| 10792 | if (IsPlacement) { |
| 10793 | AccessKinds AK = AK_Construct; |
| 10794 | struct FindObjectHandler { |
| 10795 | EvalInfo &Info; |
| 10796 | const Expr *E; |
| 10797 | QualType AllocType; |
| 10798 | const AccessKinds AccessKind; |
| 10799 | APValue *Value; |
| 10800 | |
| 10801 | typedef bool result_type; |
| 10802 | bool failed() { return false; } |
| 10803 | bool checkConst(QualType QT) { |
| 10804 | if (QT.isConstQualified()) { |
| 10805 | Info.FFDiag(E, DiagId: diag::note_constexpr_modify_const_type) << QT; |
| 10806 | return false; |
| 10807 | } |
| 10808 | return true; |
| 10809 | } |
| 10810 | bool found(APValue &Subobj, QualType SubobjType) { |
| 10811 | if (!checkConst(QT: SubobjType)) |
| 10812 | return false; |
| 10813 | // FIXME: Reject the cases where [basic.life]p8 would not permit the |
| 10814 | // old name of the object to be used to name the new object. |
| 10815 | unsigned SubobjectSize = 1; |
| 10816 | unsigned AllocSize = 1; |
| 10817 | if (auto *CAT = dyn_cast<ConstantArrayType>(Val&: AllocType)) |
| 10818 | AllocSize = CAT->getZExtSize(); |
| 10819 | if (auto *CAT = dyn_cast<ConstantArrayType>(Val&: SubobjType)) |
| 10820 | SubobjectSize = CAT->getZExtSize(); |
| 10821 | if (SubobjectSize < AllocSize || |
| 10822 | !Info.Ctx.hasSimilarType(T1: Info.Ctx.getBaseElementType(QT: SubobjType), |
| 10823 | T2: Info.Ctx.getBaseElementType(QT: AllocType))) { |
| 10824 | Info.FFDiag(E, DiagId: diag::note_constexpr_placement_new_wrong_type) |
| 10825 | << SubobjType << AllocType; |
| 10826 | return false; |
| 10827 | } |
| 10828 | Value = &Subobj; |
| 10829 | return true; |
| 10830 | } |
| 10831 | bool found(APSInt &Value, QualType SubobjType) { |
| 10832 | Info.FFDiag(E, DiagId: diag::note_constexpr_construct_complex_elem); |
| 10833 | return false; |
| 10834 | } |
| 10835 | bool found(APFloat &Value, QualType SubobjType) { |
| 10836 | Info.FFDiag(E, DiagId: diag::note_constexpr_construct_complex_elem); |
| 10837 | return false; |
| 10838 | } |
| 10839 | } Handler = {.Info: Info, .E: E, .AllocType: AllocType, .AccessKind: AK, .Value: nullptr}; |
| 10840 | |
| 10841 | CompleteObject Obj = findCompleteObject(Info, E, AK, LVal: Result, LValType: AllocType); |
| 10842 | if (!Obj || !findSubobject(Info, E, Obj, Sub: Result.Designator, handler&: Handler)) |
| 10843 | return false; |
| 10844 | |
| 10845 | Val = Handler.Value; |
| 10846 | |
| 10847 | // [basic.life]p1: |
| 10848 | // The lifetime of an object o of type T ends when [...] the storage |
| 10849 | // which the object occupies is [...] reused by an object that is not |
| 10850 | // nested within o (6.6.2). |
| 10851 | *Val = APValue(); |
| 10852 | } else { |
| 10853 | // Perform the allocation and obtain a pointer to the resulting object. |
| 10854 | Val = Info.createHeapAlloc(E, T: AllocType, LV&: Result); |
| 10855 | if (!Val) |
| 10856 | return false; |
| 10857 | } |
| 10858 | |
| 10859 | if (ValueInit) { |
| 10860 | ImplicitValueInitExpr VIE(AllocType); |
| 10861 | if (!EvaluateInPlace(Result&: *Val, Info, This: Result, E: &VIE)) |
| 10862 | return false; |
| 10863 | } else if (ResizedArrayILE) { |
| 10864 | if (!EvaluateArrayNewInitList(Info, This&: Result, Result&: *Val, ILE: ResizedArrayILE, |
| 10865 | AllocType)) |
| 10866 | return false; |
| 10867 | } else if (ResizedArrayCCE) { |
| 10868 | if (!EvaluateArrayNewConstructExpr(Info, This&: Result, Result&: *Val, CCE: ResizedArrayCCE, |
| 10869 | AllocType)) |
| 10870 | return false; |
| 10871 | } else if (Init) { |
| 10872 | if (!EvaluateInPlace(Result&: *Val, Info, This: Result, E: Init)) |
| 10873 | return false; |
| 10874 | } else if (!handleDefaultInitValue(T: AllocType, Result&: *Val)) { |
| 10875 | return false; |
| 10876 | } |
| 10877 | |
| 10878 | // Array new returns a pointer to the first element, not a pointer to the |
| 10879 | // array. |
| 10880 | if (auto *AT = AllocType->getAsArrayTypeUnsafe()) |
| 10881 | Result.addArray(Info, E, CAT: cast<ConstantArrayType>(Val: AT)); |
| 10882 | |
| 10883 | return true; |
| 10884 | } |
| 10885 | //===----------------------------------------------------------------------===// |
| 10886 | // Member Pointer Evaluation |
| 10887 | //===----------------------------------------------------------------------===// |
| 10888 | |
| 10889 | namespace { |
| 10890 | class MemberPointerExprEvaluator |
| 10891 | : public ExprEvaluatorBase<MemberPointerExprEvaluator> { |
| 10892 | MemberPtr &Result; |
| 10893 | |
| 10894 | bool Success(const ValueDecl *D) { |
| 10895 | Result = MemberPtr(D); |
| 10896 | return true; |
| 10897 | } |
| 10898 | public: |
| 10899 | |
| 10900 | MemberPointerExprEvaluator(EvalInfo &Info, MemberPtr &Result) |
| 10901 | : ExprEvaluatorBaseTy(Info), Result(Result) {} |
| 10902 | |
| 10903 | bool Success(const APValue &V, const Expr *E) { |
| 10904 | Result.setFrom(V); |
| 10905 | return true; |
| 10906 | } |
| 10907 | bool ZeroInitialization(const Expr *E) { |
| 10908 | return Success(D: (const ValueDecl*)nullptr); |
| 10909 | } |
| 10910 | |
| 10911 | bool VisitCastExpr(const CastExpr *E); |
| 10912 | bool VisitUnaryAddrOf(const UnaryOperator *E); |
| 10913 | }; |
| 10914 | } // end anonymous namespace |
| 10915 | |
| 10916 | static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result, |
| 10917 | EvalInfo &Info) { |
| 10918 | assert(!E->isValueDependent()); |
| 10919 | assert(E->isPRValue() && E->getType()->isMemberPointerType()); |
| 10920 | return MemberPointerExprEvaluator(Info, Result).Visit(S: E); |
| 10921 | } |
| 10922 | |
| 10923 | bool MemberPointerExprEvaluator::VisitCastExpr(const CastExpr *E) { |
| 10924 | switch (E->getCastKind()) { |
| 10925 | default: |
| 10926 | return ExprEvaluatorBaseTy::VisitCastExpr(E); |
| 10927 | |
| 10928 | case CK_NullToMemberPointer: |
| 10929 | VisitIgnoredValue(E: E->getSubExpr()); |
| 10930 | return ZeroInitialization(E); |
| 10931 | |
| 10932 | case CK_BaseToDerivedMemberPointer: { |
| 10933 | if (!Visit(S: E->getSubExpr())) |
| 10934 | return false; |
| 10935 | if (E->path_empty()) |
| 10936 | return true; |
| 10937 | // Base-to-derived member pointer casts store the path in derived-to-base |
| 10938 | // order, so iterate backwards. The CXXBaseSpecifier also provides us with |
| 10939 | // the wrong end of the derived->base arc, so stagger the path by one class. |
| 10940 | typedef std::reverse_iterator<CastExpr::path_const_iterator> ReverseIter; |
| 10941 | for (ReverseIter PathI(E->path_end() - 1), PathE(E->path_begin()); |
| 10942 | PathI != PathE; ++PathI) { |
| 10943 | assert(!(*PathI)->isVirtual() && "memptr cast through vbase" ); |
| 10944 | const CXXRecordDecl *Derived = (*PathI)->getType()->getAsCXXRecordDecl(); |
| 10945 | if (!Result.castToDerived(Derived)) |
| 10946 | return Error(E); |
| 10947 | } |
| 10948 | if (!Result.castToDerived(Derived: E->getType() |
| 10949 | ->castAs<MemberPointerType>() |
| 10950 | ->getMostRecentCXXRecordDecl())) |
| 10951 | return Error(E); |
| 10952 | return true; |
| 10953 | } |
| 10954 | |
| 10955 | case CK_DerivedToBaseMemberPointer: |
| 10956 | if (!Visit(S: E->getSubExpr())) |
| 10957 | return false; |
| 10958 | for (CastExpr::path_const_iterator PathI = E->path_begin(), |
| 10959 | PathE = E->path_end(); PathI != PathE; ++PathI) { |
| 10960 | assert(!(*PathI)->isVirtual() && "memptr cast through vbase" ); |
| 10961 | const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl(); |
| 10962 | if (!Result.castToBase(Base)) |
| 10963 | return Error(E); |
| 10964 | } |
| 10965 | return true; |
| 10966 | } |
| 10967 | } |
| 10968 | |
| 10969 | bool MemberPointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) { |
| 10970 | // C++11 [expr.unary.op]p3 has very strict rules on how the address of a |
| 10971 | // member can be formed. |
| 10972 | return Success(D: cast<DeclRefExpr>(Val: E->getSubExpr())->getDecl()); |
| 10973 | } |
| 10974 | |
| 10975 | //===----------------------------------------------------------------------===// |
| 10976 | // Record Evaluation |
| 10977 | //===----------------------------------------------------------------------===// |
| 10978 | |
| 10979 | namespace { |
| 10980 | class RecordExprEvaluator |
| 10981 | : public ExprEvaluatorBase<RecordExprEvaluator> { |
| 10982 | const LValue &This; |
| 10983 | APValue &Result; |
| 10984 | public: |
| 10985 | |
| 10986 | RecordExprEvaluator(EvalInfo &info, const LValue &This, APValue &Result) |
| 10987 | : ExprEvaluatorBaseTy(info), This(This), Result(Result) {} |
| 10988 | |
| 10989 | bool Success(const APValue &V, const Expr *E) { |
| 10990 | Result = V; |
| 10991 | return true; |
| 10992 | } |
| 10993 | bool ZeroInitialization(const Expr *E) { |
| 10994 | return ZeroInitialization(E, T: E->getType()); |
| 10995 | } |
| 10996 | bool ZeroInitialization(const Expr *E, QualType T); |
| 10997 | |
| 10998 | bool VisitCallExpr(const CallExpr *E) { |
| 10999 | return handleCallExpr(E, Result, ResultSlot: &This); |
| 11000 | } |
| 11001 | bool VisitCastExpr(const CastExpr *E); |
| 11002 | bool VisitInitListExpr(const InitListExpr *E); |
| 11003 | bool VisitCXXConstructExpr(const CXXConstructExpr *E) { |
| 11004 | return VisitCXXConstructExpr(E, T: E->getType()); |
| 11005 | } |
| 11006 | bool VisitLambdaExpr(const LambdaExpr *E); |
| 11007 | bool VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E); |
| 11008 | bool VisitCXXConstructExpr(const CXXConstructExpr *E, QualType T); |
| 11009 | bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E); |
| 11010 | bool VisitBinCmp(const BinaryOperator *E); |
| 11011 | bool VisitCXXParenListInitExpr(const CXXParenListInitExpr *E); |
| 11012 | bool VisitCXXParenListOrInitListExpr(const Expr *ExprToVisit, |
| 11013 | ArrayRef<Expr *> Args); |
| 11014 | }; |
| 11015 | } |
| 11016 | |
| 11017 | /// Perform zero-initialization on an object of non-union class type. |
| 11018 | /// C++11 [dcl.init]p5: |
| 11019 | /// To zero-initialize an object or reference of type T means: |
| 11020 | /// [...] |
| 11021 | /// -- if T is a (possibly cv-qualified) non-union class type, |
| 11022 | /// each non-static data member and each base-class subobject is |
| 11023 | /// zero-initialized |
| 11024 | static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E, |
| 11025 | const RecordDecl *RD, |
| 11026 | const LValue &This, APValue &Result) { |
| 11027 | assert(!RD->isUnion() && "Expected non-union class type" ); |
| 11028 | const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(Val: RD); |
| 11029 | Result = APValue(APValue::UninitStruct(), CD ? CD->getNumBases() : 0, |
| 11030 | RD->getNumFields()); |
| 11031 | |
| 11032 | if (RD->isInvalidDecl()) return false; |
| 11033 | const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD); |
| 11034 | |
| 11035 | if (CD) { |
| 11036 | unsigned Index = 0; |
| 11037 | for (CXXRecordDecl::base_class_const_iterator I = CD->bases_begin(), |
| 11038 | End = CD->bases_end(); I != End; ++I, ++Index) { |
| 11039 | const CXXRecordDecl *Base = I->getType()->getAsCXXRecordDecl(); |
| 11040 | LValue Subobject = This; |
| 11041 | if (!HandleLValueDirectBase(Info, E, Obj&: Subobject, Derived: CD, Base, RL: &Layout)) |
| 11042 | return false; |
| 11043 | if (!HandleClassZeroInitialization(Info, E, RD: Base, This: Subobject, |
| 11044 | Result&: Result.getStructBase(i: Index))) |
| 11045 | return false; |
| 11046 | } |
| 11047 | } |
| 11048 | |
| 11049 | for (const auto *I : RD->fields()) { |
| 11050 | // -- if T is a reference type, no initialization is performed. |
| 11051 | if (I->isUnnamedBitField() || I->getType()->isReferenceType()) |
| 11052 | continue; |
| 11053 | |
| 11054 | LValue Subobject = This; |
| 11055 | if (!HandleLValueMember(Info, E, LVal&: Subobject, FD: I, RL: &Layout)) |
| 11056 | return false; |
| 11057 | |
| 11058 | ImplicitValueInitExpr VIE(I->getType()); |
| 11059 | if (!EvaluateInPlace( |
| 11060 | Result&: Result.getStructField(i: I->getFieldIndex()), Info, This: Subobject, E: &VIE)) |
| 11061 | return false; |
| 11062 | } |
| 11063 | |
| 11064 | return true; |
| 11065 | } |
| 11066 | |
| 11067 | bool RecordExprEvaluator::ZeroInitialization(const Expr *E, QualType T) { |
| 11068 | const auto *RD = T->castAsRecordDecl(); |
| 11069 | if (RD->isInvalidDecl()) return false; |
| 11070 | if (RD->isUnion()) { |
| 11071 | // C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the |
| 11072 | // object's first non-static named data member is zero-initialized |
| 11073 | RecordDecl::field_iterator I = RD->field_begin(); |
| 11074 | while (I != RD->field_end() && (*I)->isUnnamedBitField()) |
| 11075 | ++I; |
| 11076 | if (I == RD->field_end()) { |
| 11077 | Result = APValue((const FieldDecl*)nullptr); |
| 11078 | return true; |
| 11079 | } |
| 11080 | |
| 11081 | LValue Subobject = This; |
| 11082 | if (!HandleLValueMember(Info, E, LVal&: Subobject, FD: *I)) |
| 11083 | return false; |
| 11084 | Result = APValue(*I); |
| 11085 | ImplicitValueInitExpr VIE(I->getType()); |
| 11086 | return EvaluateInPlace(Result&: Result.getUnionValue(), Info, This: Subobject, E: &VIE); |
| 11087 | } |
| 11088 | |
| 11089 | if (isa<CXXRecordDecl>(Val: RD) && cast<CXXRecordDecl>(Val: RD)->getNumVBases()) { |
| 11090 | Info.FFDiag(E, DiagId: diag::note_constexpr_virtual_base) << RD; |
| 11091 | return false; |
| 11092 | } |
| 11093 | |
| 11094 | return HandleClassZeroInitialization(Info, E, RD, This, Result); |
| 11095 | } |
| 11096 | |
| 11097 | bool RecordExprEvaluator::VisitCastExpr(const CastExpr *E) { |
| 11098 | switch (E->getCastKind()) { |
| 11099 | default: |
| 11100 | return ExprEvaluatorBaseTy::VisitCastExpr(E); |
| 11101 | |
| 11102 | case CK_ConstructorConversion: |
| 11103 | return Visit(S: E->getSubExpr()); |
| 11104 | |
| 11105 | case CK_DerivedToBase: |
| 11106 | case CK_UncheckedDerivedToBase: { |
| 11107 | APValue DerivedObject; |
| 11108 | if (!Evaluate(Result&: DerivedObject, Info, E: E->getSubExpr())) |
| 11109 | return false; |
| 11110 | if (!DerivedObject.isStruct()) |
| 11111 | return Error(E: E->getSubExpr()); |
| 11112 | |
| 11113 | // Derived-to-base rvalue conversion: just slice off the derived part. |
| 11114 | APValue *Value = &DerivedObject; |
| 11115 | const CXXRecordDecl *RD = E->getSubExpr()->getType()->getAsCXXRecordDecl(); |
| 11116 | for (CastExpr::path_const_iterator PathI = E->path_begin(), |
| 11117 | PathE = E->path_end(); PathI != PathE; ++PathI) { |
| 11118 | assert(!(*PathI)->isVirtual() && "record rvalue with virtual base" ); |
| 11119 | const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl(); |
| 11120 | Value = &Value->getStructBase(i: getBaseIndex(Derived: RD, Base)); |
| 11121 | RD = Base; |
| 11122 | } |
| 11123 | Result = *Value; |
| 11124 | return true; |
| 11125 | } |
| 11126 | case CK_HLSLAggregateSplatCast: { |
| 11127 | APValue Val; |
| 11128 | QualType ValTy; |
| 11129 | |
| 11130 | if (!hlslAggSplatHelper(Info, E: E->getSubExpr(), SrcVal&: Val, SrcTy&: ValTy)) |
| 11131 | return false; |
| 11132 | |
| 11133 | unsigned NEls = elementwiseSize(Info, BaseTy: E->getType()); |
| 11134 | // splat our Val |
| 11135 | SmallVector<APValue> SplatEls(NEls, Val); |
| 11136 | SmallVector<QualType> SplatType(NEls, ValTy); |
| 11137 | |
| 11138 | // cast the elements and construct our struct result |
| 11139 | const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts()); |
| 11140 | if (!constructAggregate(Info, FPO, E, Result, ResultType: E->getType(), Elements&: SplatEls, |
| 11141 | ElTypes&: SplatType)) |
| 11142 | return false; |
| 11143 | |
| 11144 | return true; |
| 11145 | } |
| 11146 | case CK_HLSLElementwiseCast: { |
| 11147 | SmallVector<APValue> SrcEls; |
| 11148 | SmallVector<QualType> SrcTypes; |
| 11149 | |
| 11150 | if (!hlslElementwiseCastHelper(Info, E: E->getSubExpr(), DestTy: E->getType(), SrcVals&: SrcEls, |
| 11151 | SrcTypes)) |
| 11152 | return false; |
| 11153 | |
| 11154 | // cast the elements and construct our struct result |
| 11155 | const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts()); |
| 11156 | if (!constructAggregate(Info, FPO, E, Result, ResultType: E->getType(), Elements&: SrcEls, |
| 11157 | ElTypes&: SrcTypes)) |
| 11158 | return false; |
| 11159 | |
| 11160 | return true; |
| 11161 | } |
| 11162 | } |
| 11163 | } |
| 11164 | |
| 11165 | bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) { |
| 11166 | if (E->isTransparent()) |
| 11167 | return Visit(S: E->getInit(Init: 0)); |
| 11168 | return VisitCXXParenListOrInitListExpr(ExprToVisit: E, Args: E->inits()); |
| 11169 | } |
| 11170 | |
| 11171 | bool RecordExprEvaluator::VisitCXXParenListOrInitListExpr( |
| 11172 | const Expr *ExprToVisit, ArrayRef<Expr *> Args) { |
| 11173 | const auto *RD = ExprToVisit->getType()->castAsRecordDecl(); |
| 11174 | if (RD->isInvalidDecl()) return false; |
| 11175 | const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: RD); |
| 11176 | auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD); |
| 11177 | |
| 11178 | EvalInfo::EvaluatingConstructorRAII EvalObj( |
| 11179 | Info, |
| 11180 | ObjectUnderConstruction{.Base: This.getLValueBase(), .Path: This.Designator.Entries}, |
| 11181 | CXXRD && CXXRD->getNumBases()); |
| 11182 | |
| 11183 | if (RD->isUnion()) { |
| 11184 | const FieldDecl *Field; |
| 11185 | if (auto *ILE = dyn_cast<InitListExpr>(Val: ExprToVisit)) { |
| 11186 | Field = ILE->getInitializedFieldInUnion(); |
| 11187 | } else if (auto *PLIE = dyn_cast<CXXParenListInitExpr>(Val: ExprToVisit)) { |
| 11188 | Field = PLIE->getInitializedFieldInUnion(); |
| 11189 | } else { |
| 11190 | llvm_unreachable( |
| 11191 | "Expression is neither an init list nor a C++ paren list" ); |
| 11192 | } |
| 11193 | |
| 11194 | Result = APValue(Field); |
| 11195 | if (!Field) |
| 11196 | return true; |
| 11197 | |
| 11198 | // If the initializer list for a union does not contain any elements, the |
| 11199 | // first element of the union is value-initialized. |
| 11200 | // FIXME: The element should be initialized from an initializer list. |
| 11201 | // Is this difference ever observable for initializer lists which |
| 11202 | // we don't build? |
| 11203 | ImplicitValueInitExpr VIE(Field->getType()); |
| 11204 | const Expr *InitExpr = Args.empty() ? &VIE : Args[0]; |
| 11205 | |
| 11206 | LValue Subobject = This; |
| 11207 | if (!HandleLValueMember(Info, E: InitExpr, LVal&: Subobject, FD: Field, RL: &Layout)) |
| 11208 | return false; |
| 11209 | |
| 11210 | // Temporarily override This, in case there's a CXXDefaultInitExpr in here. |
| 11211 | ThisOverrideRAII ThisOverride(*Info.CurrentCall, &This, |
| 11212 | isa<CXXDefaultInitExpr>(Val: InitExpr)); |
| 11213 | |
| 11214 | if (EvaluateInPlace(Result&: Result.getUnionValue(), Info, This: Subobject, E: InitExpr)) { |
| 11215 | if (Field->isBitField()) |
| 11216 | return truncateBitfieldValue(Info, E: InitExpr, Value&: Result.getUnionValue(), |
| 11217 | FD: Field); |
| 11218 | return true; |
| 11219 | } |
| 11220 | |
| 11221 | return false; |
| 11222 | } |
| 11223 | |
| 11224 | if (!Result.hasValue()) |
| 11225 | Result = APValue(APValue::UninitStruct(), CXXRD ? CXXRD->getNumBases() : 0, |
| 11226 | RD->getNumFields()); |
| 11227 | unsigned ElementNo = 0; |
| 11228 | bool Success = true; |
| 11229 | |
| 11230 | // Initialize base classes. |
| 11231 | if (CXXRD && CXXRD->getNumBases()) { |
| 11232 | for (const auto &Base : CXXRD->bases()) { |
| 11233 | assert(ElementNo < Args.size() && "missing init for base class" ); |
| 11234 | const Expr *Init = Args[ElementNo]; |
| 11235 | |
| 11236 | LValue Subobject = This; |
| 11237 | if (!HandleLValueBase(Info, E: Init, Obj&: Subobject, DerivedDecl: CXXRD, Base: &Base)) |
| 11238 | return false; |
| 11239 | |
| 11240 | APValue &FieldVal = Result.getStructBase(i: ElementNo); |
| 11241 | if (!EvaluateInPlace(Result&: FieldVal, Info, This: Subobject, E: Init)) { |
| 11242 | if (!Info.noteFailure()) |
| 11243 | return false; |
| 11244 | Success = false; |
| 11245 | } |
| 11246 | ++ElementNo; |
| 11247 | } |
| 11248 | |
| 11249 | EvalObj.finishedConstructingBases(); |
| 11250 | } |
| 11251 | |
| 11252 | // Initialize members. |
| 11253 | for (const auto *Field : RD->fields()) { |
| 11254 | // Anonymous bit-fields are not considered members of the class for |
| 11255 | // purposes of aggregate initialization. |
| 11256 | if (Field->isUnnamedBitField()) |
| 11257 | continue; |
| 11258 | |
| 11259 | LValue Subobject = This; |
| 11260 | |
| 11261 | bool HaveInit = ElementNo < Args.size(); |
| 11262 | |
| 11263 | // FIXME: Diagnostics here should point to the end of the initializer |
| 11264 | // list, not the start. |
| 11265 | if (!HandleLValueMember(Info, E: HaveInit ? Args[ElementNo] : ExprToVisit, |
| 11266 | LVal&: Subobject, FD: Field, RL: &Layout)) |
| 11267 | return false; |
| 11268 | |
| 11269 | // Perform an implicit value-initialization for members beyond the end of |
| 11270 | // the initializer list. |
| 11271 | ImplicitValueInitExpr VIE(HaveInit ? Info.Ctx.IntTy : Field->getType()); |
| 11272 | const Expr *Init = HaveInit ? Args[ElementNo++] : &VIE; |
| 11273 | |
| 11274 | if (Field->getType()->isIncompleteArrayType()) { |
| 11275 | if (auto *CAT = Info.Ctx.getAsConstantArrayType(T: Init->getType())) { |
| 11276 | if (!CAT->isZeroSize()) { |
| 11277 | // Bail out for now. This might sort of "work", but the rest of the |
| 11278 | // code isn't really prepared to handle it. |
| 11279 | Info.FFDiag(E: Init, DiagId: diag::note_constexpr_unsupported_flexible_array); |
| 11280 | return false; |
| 11281 | } |
| 11282 | } |
| 11283 | } |
| 11284 | |
| 11285 | // Temporarily override This, in case there's a CXXDefaultInitExpr in here. |
| 11286 | ThisOverrideRAII ThisOverride(*Info.CurrentCall, &This, |
| 11287 | isa<CXXDefaultInitExpr>(Val: Init)); |
| 11288 | |
| 11289 | APValue &FieldVal = Result.getStructField(i: Field->getFieldIndex()); |
| 11290 | if (Field->getType()->isReferenceType()) { |
| 11291 | LValue Result; |
| 11292 | if (!EvaluateInitForDeclOfReferenceType(Info, D: Field, Init, Result, |
| 11293 | Val&: FieldVal)) { |
| 11294 | if (!Info.noteFailure()) |
| 11295 | return false; |
| 11296 | Success = false; |
| 11297 | } |
| 11298 | } else if (!EvaluateInPlace(Result&: FieldVal, Info, This: Subobject, E: Init) || |
| 11299 | (Field->isBitField() && |
| 11300 | !truncateBitfieldValue(Info, E: Init, Value&: FieldVal, FD: Field))) { |
| 11301 | if (!Info.noteFailure()) |
| 11302 | return false; |
| 11303 | Success = false; |
| 11304 | } |
| 11305 | } |
| 11306 | |
| 11307 | EvalObj.finishedConstructingFields(); |
| 11308 | |
| 11309 | return Success; |
| 11310 | } |
| 11311 | |
| 11312 | bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E, |
| 11313 | QualType T) { |
| 11314 | // Note that E's type is not necessarily the type of our class here; we might |
| 11315 | // be initializing an array element instead. |
| 11316 | const CXXConstructorDecl *FD = E->getConstructor(); |
| 11317 | if (FD->isInvalidDecl() || FD->getParent()->isInvalidDecl()) return false; |
| 11318 | |
| 11319 | bool ZeroInit = E->requiresZeroInitialization(); |
| 11320 | if (CheckTrivialDefaultConstructor(Info, Loc: E->getExprLoc(), CD: FD, IsValueInitialization: ZeroInit)) { |
| 11321 | if (ZeroInit) |
| 11322 | return ZeroInitialization(E, T); |
| 11323 | |
| 11324 | return handleDefaultInitValue(T, Result); |
| 11325 | } |
| 11326 | |
| 11327 | const FunctionDecl *Definition = nullptr; |
| 11328 | auto Body = FD->getBody(Definition); |
| 11329 | |
| 11330 | if (!CheckConstexprFunction(Info, CallLoc: E->getExprLoc(), Declaration: FD, Definition, Body)) |
| 11331 | return false; |
| 11332 | |
| 11333 | // Avoid materializing a temporary for an elidable copy/move constructor. |
| 11334 | if (E->isElidable() && !ZeroInit) { |
| 11335 | // FIXME: This only handles the simplest case, where the source object |
| 11336 | // is passed directly as the first argument to the constructor. |
| 11337 | // This should also handle stepping though implicit casts and |
| 11338 | // and conversion sequences which involve two steps, with a |
| 11339 | // conversion operator followed by a converting constructor. |
| 11340 | const Expr *SrcObj = E->getArg(Arg: 0); |
| 11341 | assert(SrcObj->isTemporaryObject(Info.Ctx, FD->getParent())); |
| 11342 | assert(Info.Ctx.hasSameUnqualifiedType(E->getType(), SrcObj->getType())); |
| 11343 | if (const MaterializeTemporaryExpr *ME = |
| 11344 | dyn_cast<MaterializeTemporaryExpr>(Val: SrcObj)) |
| 11345 | return Visit(S: ME->getSubExpr()); |
| 11346 | } |
| 11347 | |
| 11348 | if (ZeroInit && !ZeroInitialization(E, T)) |
| 11349 | return false; |
| 11350 | |
| 11351 | auto Args = ArrayRef(E->getArgs(), E->getNumArgs()); |
| 11352 | return HandleConstructorCall(E, This, Args, |
| 11353 | Definition: cast<CXXConstructorDecl>(Val: Definition), Info, |
| 11354 | Result); |
| 11355 | } |
| 11356 | |
| 11357 | bool RecordExprEvaluator::VisitCXXInheritedCtorInitExpr( |
| 11358 | const CXXInheritedCtorInitExpr *E) { |
| 11359 | if (!Info.CurrentCall) { |
| 11360 | assert(Info.checkingPotentialConstantExpression()); |
| 11361 | return false; |
| 11362 | } |
| 11363 | |
| 11364 | const CXXConstructorDecl *FD = E->getConstructor(); |
| 11365 | if (FD->isInvalidDecl() || FD->getParent()->isInvalidDecl()) |
| 11366 | return false; |
| 11367 | |
| 11368 | const FunctionDecl *Definition = nullptr; |
| 11369 | auto Body = FD->getBody(Definition); |
| 11370 | |
| 11371 | if (!CheckConstexprFunction(Info, CallLoc: E->getExprLoc(), Declaration: FD, Definition, Body)) |
| 11372 | return false; |
| 11373 | |
| 11374 | return HandleConstructorCall(E, This, Call: Info.CurrentCall->Arguments, |
| 11375 | Definition: cast<CXXConstructorDecl>(Val: Definition), Info, |
| 11376 | Result); |
| 11377 | } |
| 11378 | |
| 11379 | bool RecordExprEvaluator::VisitCXXStdInitializerListExpr( |
| 11380 | const CXXStdInitializerListExpr *E) { |
| 11381 | const ConstantArrayType *ArrayType = |
| 11382 | Info.Ctx.getAsConstantArrayType(T: E->getSubExpr()->getType()); |
| 11383 | |
| 11384 | LValue Array; |
| 11385 | if (!EvaluateLValue(E: E->getSubExpr(), Result&: Array, Info)) |
| 11386 | return false; |
| 11387 | |
| 11388 | assert(ArrayType && "unexpected type for array initializer" ); |
| 11389 | |
| 11390 | // Get a pointer to the first element of the array. |
| 11391 | Array.addArray(Info, E, CAT: ArrayType); |
| 11392 | |
| 11393 | // FIXME: What if the initializer_list type has base classes, etc? |
| 11394 | Result = APValue(APValue::UninitStruct(), 0, 2); |
| 11395 | Array.moveInto(V&: Result.getStructField(i: 0)); |
| 11396 | |
| 11397 | auto *Record = E->getType()->castAsRecordDecl(); |
| 11398 | RecordDecl::field_iterator Field = Record->field_begin(); |
| 11399 | assert(Field != Record->field_end() && |
| 11400 | Info.Ctx.hasSameType(Field->getType()->getPointeeType(), |
| 11401 | ArrayType->getElementType()) && |
| 11402 | "Expected std::initializer_list first field to be const E *" ); |
| 11403 | ++Field; |
| 11404 | assert(Field != Record->field_end() && |
| 11405 | "Expected std::initializer_list to have two fields" ); |
| 11406 | |
| 11407 | if (Info.Ctx.hasSameType(T1: Field->getType(), T2: Info.Ctx.getSizeType())) { |
| 11408 | // Length. |
| 11409 | Result.getStructField(i: 1) = APValue(APSInt(ArrayType->getSize())); |
| 11410 | } else { |
| 11411 | // End pointer. |
| 11412 | assert(Info.Ctx.hasSameType(Field->getType()->getPointeeType(), |
| 11413 | ArrayType->getElementType()) && |
| 11414 | "Expected std::initializer_list second field to be const E *" ); |
| 11415 | if (!HandleLValueArrayAdjustment(Info, E, LVal&: Array, |
| 11416 | EltTy: ArrayType->getElementType(), |
| 11417 | Adjustment: ArrayType->getZExtSize())) |
| 11418 | return false; |
| 11419 | Array.moveInto(V&: Result.getStructField(i: 1)); |
| 11420 | } |
| 11421 | |
| 11422 | assert(++Field == Record->field_end() && |
| 11423 | "Expected std::initializer_list to only have two fields" ); |
| 11424 | |
| 11425 | return true; |
| 11426 | } |
| 11427 | |
| 11428 | bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) { |
| 11429 | const CXXRecordDecl *ClosureClass = E->getLambdaClass(); |
| 11430 | if (ClosureClass->isInvalidDecl()) |
| 11431 | return false; |
| 11432 | |
| 11433 | const size_t NumFields = ClosureClass->getNumFields(); |
| 11434 | |
| 11435 | assert(NumFields == (size_t)std::distance(E->capture_init_begin(), |
| 11436 | E->capture_init_end()) && |
| 11437 | "The number of lambda capture initializers should equal the number of " |
| 11438 | "fields within the closure type" ); |
| 11439 | |
| 11440 | Result = APValue(APValue::UninitStruct(), /*NumBases*/0, NumFields); |
| 11441 | // Iterate through all the lambda's closure object's fields and initialize |
| 11442 | // them. |
| 11443 | auto *CaptureInitIt = E->capture_init_begin(); |
| 11444 | bool Success = true; |
| 11445 | const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(D: ClosureClass); |
| 11446 | for (const auto *Field : ClosureClass->fields()) { |
| 11447 | assert(CaptureInitIt != E->capture_init_end()); |
| 11448 | // Get the initializer for this field |
| 11449 | Expr *const CurFieldInit = *CaptureInitIt++; |
| 11450 | |
| 11451 | // If there is no initializer, either this is a VLA or an error has |
| 11452 | // occurred. |
| 11453 | if (!CurFieldInit || CurFieldInit->containsErrors()) |
| 11454 | return Error(E); |
| 11455 | |
| 11456 | LValue Subobject = This; |
| 11457 | |
| 11458 | if (!HandleLValueMember(Info, E, LVal&: Subobject, FD: Field, RL: &Layout)) |
| 11459 | return false; |
| 11460 | |
| 11461 | APValue &FieldVal = Result.getStructField(i: Field->getFieldIndex()); |
| 11462 | if (!EvaluateInPlace(Result&: FieldVal, Info, This: Subobject, E: CurFieldInit)) { |
| 11463 | if (!Info.keepEvaluatingAfterFailure()) |
| 11464 | return false; |
| 11465 | Success = false; |
| 11466 | } |
| 11467 | } |
| 11468 | return Success; |
| 11469 | } |
| 11470 | |
| 11471 | static bool EvaluateRecord(const Expr *E, const LValue &This, |
| 11472 | APValue &Result, EvalInfo &Info) { |
| 11473 | assert(!E->isValueDependent()); |
| 11474 | assert(E->isPRValue() && E->getType()->isRecordType() && |
| 11475 | "can't evaluate expression as a record rvalue" ); |
| 11476 | return RecordExprEvaluator(Info, This, Result).Visit(S: E); |
| 11477 | } |
| 11478 | |
| 11479 | //===----------------------------------------------------------------------===// |
| 11480 | // Temporary Evaluation |
| 11481 | // |
| 11482 | // Temporaries are represented in the AST as rvalues, but generally behave like |
| 11483 | // lvalues. The full-object of which the temporary is a subobject is implicitly |
| 11484 | // materialized so that a reference can bind to it. |
| 11485 | //===----------------------------------------------------------------------===// |
| 11486 | namespace { |
| 11487 | class TemporaryExprEvaluator |
| 11488 | : public LValueExprEvaluatorBase<TemporaryExprEvaluator> { |
| 11489 | public: |
| 11490 | TemporaryExprEvaluator(EvalInfo &Info, LValue &Result) : |
| 11491 | LValueExprEvaluatorBaseTy(Info, Result, false) {} |
| 11492 | |
| 11493 | /// Visit an expression which constructs the value of this temporary. |
| 11494 | bool VisitConstructExpr(const Expr *E) { |
| 11495 | APValue &Value = Info.CurrentCall->createTemporary( |
| 11496 | Key: E, T: E->getType(), Scope: ScopeKind::FullExpression, LV&: Result); |
| 11497 | return EvaluateInPlace(Result&: Value, Info, This: Result, E); |
| 11498 | } |
| 11499 | |
| 11500 | bool VisitCastExpr(const CastExpr *E) { |
| 11501 | switch (E->getCastKind()) { |
| 11502 | default: |
| 11503 | return LValueExprEvaluatorBaseTy::VisitCastExpr(E); |
| 11504 | |
| 11505 | case CK_ConstructorConversion: |
| 11506 | return VisitConstructExpr(E: E->getSubExpr()); |
| 11507 | } |
| 11508 | } |
| 11509 | bool VisitInitListExpr(const InitListExpr *E) { |
| 11510 | return VisitConstructExpr(E); |
| 11511 | } |
| 11512 | bool VisitCXXConstructExpr(const CXXConstructExpr *E) { |
| 11513 | return VisitConstructExpr(E); |
| 11514 | } |
| 11515 | bool VisitCallExpr(const CallExpr *E) { |
| 11516 | return VisitConstructExpr(E); |
| 11517 | } |
| 11518 | bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E) { |
| 11519 | return VisitConstructExpr(E); |
| 11520 | } |
| 11521 | bool VisitLambdaExpr(const LambdaExpr *E) { |
| 11522 | return VisitConstructExpr(E); |
| 11523 | } |
| 11524 | }; |
| 11525 | } // end anonymous namespace |
| 11526 | |
| 11527 | /// Evaluate an expression of record type as a temporary. |
| 11528 | static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info) { |
| 11529 | assert(!E->isValueDependent()); |
| 11530 | assert(E->isPRValue() && E->getType()->isRecordType()); |
| 11531 | return TemporaryExprEvaluator(Info, Result).Visit(S: E); |
| 11532 | } |
| 11533 | |
| 11534 | //===----------------------------------------------------------------------===// |
| 11535 | // Vector Evaluation |
| 11536 | //===----------------------------------------------------------------------===// |
| 11537 | |
| 11538 | namespace { |
| 11539 | class VectorExprEvaluator |
| 11540 | : public ExprEvaluatorBase<VectorExprEvaluator> { |
| 11541 | APValue &Result; |
| 11542 | public: |
| 11543 | |
| 11544 | VectorExprEvaluator(EvalInfo &info, APValue &Result) |
| 11545 | : ExprEvaluatorBaseTy(info), Result(Result) {} |
| 11546 | |
| 11547 | bool Success(ArrayRef<APValue> V, const Expr *E) { |
| 11548 | assert(V.size() == E->getType()->castAs<VectorType>()->getNumElements()); |
| 11549 | // FIXME: remove this APValue copy. |
| 11550 | Result = APValue(V.data(), V.size()); |
| 11551 | return true; |
| 11552 | } |
| 11553 | bool Success(const APValue &V, const Expr *E) { |
| 11554 | assert(V.isVector()); |
| 11555 | Result = V; |
| 11556 | return true; |
| 11557 | } |
| 11558 | bool ZeroInitialization(const Expr *E); |
| 11559 | |
| 11560 | bool VisitUnaryReal(const UnaryOperator *E) |
| 11561 | { return Visit(S: E->getSubExpr()); } |
| 11562 | bool VisitCastExpr(const CastExpr* E); |
| 11563 | bool VisitInitListExpr(const InitListExpr *E); |
| 11564 | bool VisitUnaryImag(const UnaryOperator *E); |
| 11565 | bool VisitBinaryOperator(const BinaryOperator *E); |
| 11566 | bool VisitUnaryOperator(const UnaryOperator *E); |
| 11567 | bool VisitCallExpr(const CallExpr *E); |
| 11568 | bool VisitConvertVectorExpr(const ConvertVectorExpr *E); |
| 11569 | bool VisitShuffleVectorExpr(const ShuffleVectorExpr *E); |
| 11570 | |
| 11571 | // FIXME: Missing: conditional operator (for GNU |
| 11572 | // conditional select), ExtVectorElementExpr |
| 11573 | }; |
| 11574 | } // end anonymous namespace |
| 11575 | |
| 11576 | static bool EvaluateVector(const Expr* E, APValue& Result, EvalInfo &Info) { |
| 11577 | assert(E->isPRValue() && E->getType()->isVectorType() && |
| 11578 | "not a vector prvalue" ); |
| 11579 | return VectorExprEvaluator(Info, Result).Visit(S: E); |
| 11580 | } |
| 11581 | |
| 11582 | static llvm::APInt ConvertBoolVectorToInt(const APValue &Val) { |
| 11583 | assert(Val.isVector() && "expected vector APValue" ); |
| 11584 | unsigned NumElts = Val.getVectorLength(); |
| 11585 | |
| 11586 | // Each element is one bit, so create an integer with NumElts bits. |
| 11587 | llvm::APInt Result(NumElts, 0); |
| 11588 | |
| 11589 | for (unsigned I = 0; I < NumElts; ++I) { |
| 11590 | const APValue &Elt = Val.getVectorElt(I); |
| 11591 | assert(Elt.isInt() && "expected integer element in bool vector" ); |
| 11592 | |
| 11593 | if (Elt.getInt().getBoolValue()) |
| 11594 | Result.setBit(I); |
| 11595 | } |
| 11596 | |
| 11597 | return Result; |
| 11598 | } |
| 11599 | |
| 11600 | bool VectorExprEvaluator::VisitCastExpr(const CastExpr *E) { |
| 11601 | const VectorType *VTy = E->getType()->castAs<VectorType>(); |
| 11602 | unsigned NElts = VTy->getNumElements(); |
| 11603 | |
| 11604 | const Expr *SE = E->getSubExpr(); |
| 11605 | QualType SETy = SE->getType(); |
| 11606 | |
| 11607 | switch (E->getCastKind()) { |
| 11608 | case CK_VectorSplat: { |
| 11609 | APValue Val = APValue(); |
| 11610 | if (SETy->isIntegerType()) { |
| 11611 | APSInt IntResult; |
| 11612 | if (!EvaluateInteger(E: SE, Result&: IntResult, Info)) |
| 11613 | return false; |
| 11614 | Val = APValue(std::move(IntResult)); |
| 11615 | } else if (SETy->isRealFloatingType()) { |
| 11616 | APFloat FloatResult(0.0); |
| 11617 | if (!EvaluateFloat(E: SE, Result&: FloatResult, Info)) |
| 11618 | return false; |
| 11619 | Val = APValue(std::move(FloatResult)); |
| 11620 | } else { |
| 11621 | return Error(E); |
| 11622 | } |
| 11623 | |
| 11624 | // Splat and create vector APValue. |
| 11625 | SmallVector<APValue, 4> Elts(NElts, Val); |
| 11626 | return Success(V: Elts, E); |
| 11627 | } |
| 11628 | case CK_BitCast: { |
| 11629 | APValue SVal; |
| 11630 | if (!Evaluate(Result&: SVal, Info, E: SE)) |
| 11631 | return false; |
| 11632 | |
| 11633 | if (!SVal.isInt() && !SVal.isFloat() && !SVal.isVector()) { |
| 11634 | // Give up if the input isn't an int, float, or vector. For example, we |
| 11635 | // reject "(v4i16)(intptr_t)&a". |
| 11636 | Info.FFDiag(E, DiagId: diag::note_constexpr_invalid_cast) |
| 11637 | << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret |
| 11638 | << Info.Ctx.getLangOpts().CPlusPlus; |
| 11639 | return false; |
| 11640 | } |
| 11641 | |
| 11642 | if (!handleRValueToRValueBitCast(Info, DestValue&: Result, SourceRValue: SVal, BCE: E)) |
| 11643 | return false; |
| 11644 | |
| 11645 | return true; |
| 11646 | } |
| 11647 | case CK_HLSLVectorTruncation: { |
| 11648 | APValue Val; |
| 11649 | SmallVector<APValue, 4> Elements; |
| 11650 | if (!EvaluateVector(E: SE, Result&: Val, Info)) |
| 11651 | return Error(E); |
| 11652 | for (unsigned I = 0; I < NElts; I++) |
| 11653 | Elements.push_back(Elt: Val.getVectorElt(I)); |
| 11654 | return Success(V: Elements, E); |
| 11655 | } |
| 11656 | case CK_HLSLMatrixTruncation: { |
| 11657 | // TODO: See #168935. Add matrix truncation support to expr constant. |
| 11658 | return Error(E); |
| 11659 | } |
| 11660 | case CK_HLSLAggregateSplatCast: { |
| 11661 | APValue Val; |
| 11662 | QualType ValTy; |
| 11663 | |
| 11664 | if (!hlslAggSplatHelper(Info, E: SE, SrcVal&: Val, SrcTy&: ValTy)) |
| 11665 | return false; |
| 11666 | |
| 11667 | // cast our Val once. |
| 11668 | APValue Result; |
| 11669 | const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts()); |
| 11670 | if (!handleScalarCast(Info, FPO, E, SourceTy: ValTy, DestTy: VTy->getElementType(), Original: Val, |
| 11671 | Result)) |
| 11672 | return false; |
| 11673 | |
| 11674 | SmallVector<APValue, 4> SplatEls(NElts, Result); |
| 11675 | return Success(V: SplatEls, E); |
| 11676 | } |
| 11677 | case CK_HLSLElementwiseCast: { |
| 11678 | SmallVector<APValue> SrcVals; |
| 11679 | SmallVector<QualType> SrcTypes; |
| 11680 | |
| 11681 | if (!hlslElementwiseCastHelper(Info, E: SE, DestTy: E->getType(), SrcVals, SrcTypes)) |
| 11682 | return false; |
| 11683 | |
| 11684 | const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts()); |
| 11685 | SmallVector<QualType, 4> DestTypes(NElts, VTy->getElementType()); |
| 11686 | SmallVector<APValue, 4> ResultEls(NElts); |
| 11687 | if (!handleElementwiseCast(Info, E, FPO, Elements&: SrcVals, SrcTypes, DestTypes, |
| 11688 | Results&: ResultEls)) |
| 11689 | return false; |
| 11690 | return Success(V: ResultEls, E); |
| 11691 | } |
| 11692 | default: |
| 11693 | return ExprEvaluatorBaseTy::VisitCastExpr(E); |
| 11694 | } |
| 11695 | } |
| 11696 | |
| 11697 | bool |
| 11698 | VectorExprEvaluator::VisitInitListExpr(const InitListExpr *E) { |
| 11699 | const VectorType *VT = E->getType()->castAs<VectorType>(); |
| 11700 | unsigned NumInits = E->getNumInits(); |
| 11701 | unsigned NumElements = VT->getNumElements(); |
| 11702 | |
| 11703 | QualType EltTy = VT->getElementType(); |
| 11704 | SmallVector<APValue, 4> Elements; |
| 11705 | |
| 11706 | // MFloat8 type doesn't have constants and thus constant folding |
| 11707 | // is impossible. |
| 11708 | if (EltTy->isMFloat8Type()) |
| 11709 | return false; |
| 11710 | |
| 11711 | // The number of initializers can be less than the number of |
| 11712 | // vector elements. For OpenCL, this can be due to nested vector |
| 11713 | // initialization. For GCC compatibility, missing trailing elements |
| 11714 | // should be initialized with zeroes. |
| 11715 | unsigned CountInits = 0, CountElts = 0; |
| 11716 | while (CountElts < NumElements) { |
| 11717 | // Handle nested vector initialization. |
| 11718 | if (CountInits < NumInits |
| 11719 | && E->getInit(Init: CountInits)->getType()->isVectorType()) { |
| 11720 | APValue v; |
| 11721 | if (!EvaluateVector(E: E->getInit(Init: CountInits), Result&: v, Info)) |
| 11722 | return Error(E); |
| 11723 | unsigned vlen = v.getVectorLength(); |
| 11724 | for (unsigned j = 0; j < vlen; j++) |
| 11725 | Elements.push_back(Elt: v.getVectorElt(I: j)); |
| 11726 | CountElts += vlen; |
| 11727 | } else if (EltTy->isIntegerType()) { |
| 11728 | llvm::APSInt sInt(32); |
| 11729 | if (CountInits < NumInits) { |
| 11730 | if (!EvaluateInteger(E: E->getInit(Init: CountInits), Result&: sInt, Info)) |
| 11731 | return false; |
| 11732 | } else // trailing integer zero. |
| 11733 | sInt = Info.Ctx.MakeIntValue(Value: 0, Type: EltTy); |
| 11734 | Elements.push_back(Elt: APValue(sInt)); |
| 11735 | CountElts++; |
| 11736 | } else { |
| 11737 | llvm::APFloat f(0.0); |
| 11738 | if (CountInits < NumInits) { |
| 11739 | if (!EvaluateFloat(E: E->getInit(Init: CountInits), Result&: f, Info)) |
| 11740 | return false; |
| 11741 | } else // trailing float zero. |
| 11742 | f = APFloat::getZero(Sem: Info.Ctx.getFloatTypeSemantics(T: EltTy)); |
| 11743 | Elements.push_back(Elt: APValue(f)); |
| 11744 | CountElts++; |
| 11745 | } |
| 11746 | CountInits++; |
| 11747 | } |
| 11748 | return Success(V: Elements, E); |
| 11749 | } |
| 11750 | |
| 11751 | bool |
| 11752 | VectorExprEvaluator::ZeroInitialization(const Expr *E) { |
| 11753 | const auto *VT = E->getType()->castAs<VectorType>(); |
| 11754 | QualType EltTy = VT->getElementType(); |
| 11755 | APValue ZeroElement; |
| 11756 | if (EltTy->isIntegerType()) |
| 11757 | ZeroElement = APValue(Info.Ctx.MakeIntValue(Value: 0, Type: EltTy)); |
| 11758 | else |
| 11759 | ZeroElement = |
| 11760 | APValue(APFloat::getZero(Sem: Info.Ctx.getFloatTypeSemantics(T: EltTy))); |
| 11761 | |
| 11762 | SmallVector<APValue, 4> Elements(VT->getNumElements(), ZeroElement); |
| 11763 | return Success(V: Elements, E); |
| 11764 | } |
| 11765 | |
| 11766 | bool VectorExprEvaluator::VisitUnaryImag(const UnaryOperator *E) { |
| 11767 | VisitIgnoredValue(E: E->getSubExpr()); |
| 11768 | return ZeroInitialization(E); |
| 11769 | } |
| 11770 | |
| 11771 | bool VectorExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { |
| 11772 | BinaryOperatorKind Op = E->getOpcode(); |
| 11773 | assert(Op != BO_PtrMemD && Op != BO_PtrMemI && Op != BO_Cmp && |
| 11774 | "Operation not supported on vector types" ); |
| 11775 | |
| 11776 | if (Op == BO_Comma) |
| 11777 | return ExprEvaluatorBaseTy::VisitBinaryOperator(E); |
| 11778 | |
| 11779 | Expr *LHS = E->getLHS(); |
| 11780 | Expr *RHS = E->getRHS(); |
| 11781 | |
| 11782 | assert(LHS->getType()->isVectorType() && RHS->getType()->isVectorType() && |
| 11783 | "Must both be vector types" ); |
| 11784 | // Checking JUST the types are the same would be fine, except shifts don't |
| 11785 | // need to have their types be the same (since you always shift by an int). |
| 11786 | assert(LHS->getType()->castAs<VectorType>()->getNumElements() == |
| 11787 | E->getType()->castAs<VectorType>()->getNumElements() && |
| 11788 | RHS->getType()->castAs<VectorType>()->getNumElements() == |
| 11789 | E->getType()->castAs<VectorType>()->getNumElements() && |
| 11790 | "All operands must be the same size." ); |
| 11791 | |
| 11792 | APValue LHSValue; |
| 11793 | APValue RHSValue; |
| 11794 | bool LHSOK = Evaluate(Result&: LHSValue, Info, E: LHS); |
| 11795 | if (!LHSOK && !Info.noteFailure()) |
| 11796 | return false; |
| 11797 | if (!Evaluate(Result&: RHSValue, Info, E: RHS) || !LHSOK) |
| 11798 | return false; |
| 11799 | |
| 11800 | if (!handleVectorVectorBinOp(Info, E, Opcode: Op, LHSValue, RHSValue)) |
| 11801 | return false; |
| 11802 | |
| 11803 | return Success(V: LHSValue, E); |
| 11804 | } |
| 11805 | |
| 11806 | static std::optional<APValue> handleVectorUnaryOperator(ASTContext &Ctx, |
| 11807 | QualType ResultTy, |
| 11808 | UnaryOperatorKind Op, |
| 11809 | APValue Elt) { |
| 11810 | switch (Op) { |
| 11811 | case UO_Plus: |
| 11812 | // Nothing to do here. |
| 11813 | return Elt; |
| 11814 | case UO_Minus: |
| 11815 | if (Elt.getKind() == APValue::Int) { |
| 11816 | Elt.getInt().negate(); |
| 11817 | } else { |
| 11818 | assert(Elt.getKind() == APValue::Float && |
| 11819 | "Vector can only be int or float type" ); |
| 11820 | Elt.getFloat().changeSign(); |
| 11821 | } |
| 11822 | return Elt; |
| 11823 | case UO_Not: |
| 11824 | // This is only valid for integral types anyway, so we don't have to handle |
| 11825 | // float here. |
| 11826 | assert(Elt.getKind() == APValue::Int && |
| 11827 | "Vector operator ~ can only be int" ); |
| 11828 | Elt.getInt().flipAllBits(); |
| 11829 | return Elt; |
| 11830 | case UO_LNot: { |
| 11831 | if (Elt.getKind() == APValue::Int) { |
| 11832 | Elt.getInt() = !Elt.getInt(); |
| 11833 | // operator ! on vectors returns -1 for 'truth', so negate it. |
| 11834 | Elt.getInt().negate(); |
| 11835 | return Elt; |
| 11836 | } |
| 11837 | assert(Elt.getKind() == APValue::Float && |
| 11838 | "Vector can only be int or float type" ); |
| 11839 | // Float types result in an int of the same size, but -1 for true, or 0 for |
| 11840 | // false. |
| 11841 | APSInt EltResult{Ctx.getIntWidth(T: ResultTy), |
| 11842 | ResultTy->isUnsignedIntegerType()}; |
| 11843 | if (Elt.getFloat().isZero()) |
| 11844 | EltResult.setAllBits(); |
| 11845 | else |
| 11846 | EltResult.clearAllBits(); |
| 11847 | |
| 11848 | return APValue{EltResult}; |
| 11849 | } |
| 11850 | default: |
| 11851 | // FIXME: Implement the rest of the unary operators. |
| 11852 | return std::nullopt; |
| 11853 | } |
| 11854 | } |
| 11855 | |
| 11856 | bool VectorExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) { |
| 11857 | Expr *SubExpr = E->getSubExpr(); |
| 11858 | const auto *VD = SubExpr->getType()->castAs<VectorType>(); |
| 11859 | // This result element type differs in the case of negating a floating point |
| 11860 | // vector, since the result type is the a vector of the equivilant sized |
| 11861 | // integer. |
| 11862 | const QualType ResultEltTy = VD->getElementType(); |
| 11863 | UnaryOperatorKind Op = E->getOpcode(); |
| 11864 | |
| 11865 | APValue SubExprValue; |
| 11866 | if (!Evaluate(Result&: SubExprValue, Info, E: SubExpr)) |
| 11867 | return false; |
| 11868 | |
| 11869 | // FIXME: This vector evaluator someday needs to be changed to be LValue |
| 11870 | // aware/keep LValue information around, rather than dealing with just vector |
| 11871 | // types directly. Until then, we cannot handle cases where the operand to |
| 11872 | // these unary operators is an LValue. The only case I've been able to see |
| 11873 | // cause this is operator++ assigning to a member expression (only valid in |
| 11874 | // altivec compilations) in C mode, so this shouldn't limit us too much. |
| 11875 | if (SubExprValue.isLValue()) |
| 11876 | return false; |
| 11877 | |
| 11878 | assert(SubExprValue.getVectorLength() == VD->getNumElements() && |
| 11879 | "Vector length doesn't match type?" ); |
| 11880 | |
| 11881 | SmallVector<APValue, 4> ResultElements; |
| 11882 | for (unsigned EltNum = 0; EltNum < VD->getNumElements(); ++EltNum) { |
| 11883 | std::optional<APValue> Elt = handleVectorUnaryOperator( |
| 11884 | Ctx&: Info.Ctx, ResultTy: ResultEltTy, Op, Elt: SubExprValue.getVectorElt(I: EltNum)); |
| 11885 | if (!Elt) |
| 11886 | return false; |
| 11887 | ResultElements.push_back(Elt: *Elt); |
| 11888 | } |
| 11889 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 11890 | } |
| 11891 | |
| 11892 | static bool handleVectorElementCast(EvalInfo &Info, const FPOptions FPO, |
| 11893 | const Expr *E, QualType SourceTy, |
| 11894 | QualType DestTy, APValue const &Original, |
| 11895 | APValue &Result) { |
| 11896 | if (SourceTy->isIntegerType()) { |
| 11897 | if (DestTy->isRealFloatingType()) { |
| 11898 | Result = APValue(APFloat(0.0)); |
| 11899 | return HandleIntToFloatCast(Info, E, FPO, SrcType: SourceTy, Value: Original.getInt(), |
| 11900 | DestType: DestTy, Result&: Result.getFloat()); |
| 11901 | } |
| 11902 | if (DestTy->isIntegerType()) { |
| 11903 | Result = APValue( |
| 11904 | HandleIntToIntCast(Info, E, DestType: DestTy, SrcType: SourceTy, Value: Original.getInt())); |
| 11905 | return true; |
| 11906 | } |
| 11907 | } else if (SourceTy->isRealFloatingType()) { |
| 11908 | if (DestTy->isRealFloatingType()) { |
| 11909 | Result = Original; |
| 11910 | return HandleFloatToFloatCast(Info, E, SrcType: SourceTy, DestType: DestTy, |
| 11911 | Result&: Result.getFloat()); |
| 11912 | } |
| 11913 | if (DestTy->isIntegerType()) { |
| 11914 | Result = APValue(APSInt()); |
| 11915 | return HandleFloatToIntCast(Info, E, SrcType: SourceTy, Value: Original.getFloat(), |
| 11916 | DestType: DestTy, Result&: Result.getInt()); |
| 11917 | } |
| 11918 | } |
| 11919 | |
| 11920 | Info.FFDiag(E, DiagId: diag::err_convertvector_constexpr_unsupported_vector_cast) |
| 11921 | << SourceTy << DestTy; |
| 11922 | return false; |
| 11923 | } |
| 11924 | |
| 11925 | static bool evalPackBuiltin(const CallExpr *E, EvalInfo &Info, APValue &Result, |
| 11926 | llvm::function_ref<APInt(const APSInt &)> PackFn) { |
| 11927 | APValue LHS, RHS; |
| 11928 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: LHS) || |
| 11929 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: RHS)) |
| 11930 | return false; |
| 11931 | |
| 11932 | unsigned LHSVecLen = LHS.getVectorLength(); |
| 11933 | unsigned RHSVecLen = RHS.getVectorLength(); |
| 11934 | |
| 11935 | assert(LHSVecLen != 0 && LHSVecLen == RHSVecLen && |
| 11936 | "pack builtin LHSVecLen must equal to RHSVecLen" ); |
| 11937 | |
| 11938 | const VectorType *VT0 = E->getArg(Arg: 0)->getType()->castAs<VectorType>(); |
| 11939 | const unsigned SrcBits = Info.Ctx.getIntWidth(T: VT0->getElementType()); |
| 11940 | |
| 11941 | const VectorType *DstVT = E->getType()->castAs<VectorType>(); |
| 11942 | QualType DstElemTy = DstVT->getElementType(); |
| 11943 | const bool DstIsUnsigned = DstElemTy->isUnsignedIntegerType(); |
| 11944 | |
| 11945 | const unsigned SrcPerLane = 128 / SrcBits; |
| 11946 | const unsigned Lanes = LHSVecLen * SrcBits / 128; |
| 11947 | |
| 11948 | SmallVector<APValue, 64> Out; |
| 11949 | Out.reserve(N: LHSVecLen + RHSVecLen); |
| 11950 | |
| 11951 | for (unsigned Lane = 0; Lane != Lanes; ++Lane) { |
| 11952 | unsigned base = Lane * SrcPerLane; |
| 11953 | for (unsigned I = 0; I != SrcPerLane; ++I) |
| 11954 | Out.emplace_back(Args: APValue( |
| 11955 | APSInt(PackFn(LHS.getVectorElt(I: base + I).getInt()), DstIsUnsigned))); |
| 11956 | for (unsigned I = 0; I != SrcPerLane; ++I) |
| 11957 | Out.emplace_back(Args: APValue( |
| 11958 | APSInt(PackFn(RHS.getVectorElt(I: base + I).getInt()), DstIsUnsigned))); |
| 11959 | } |
| 11960 | |
| 11961 | Result = APValue(Out.data(), Out.size()); |
| 11962 | return true; |
| 11963 | } |
| 11964 | |
| 11965 | static bool evalShuffleGeneric( |
| 11966 | EvalInfo &Info, const CallExpr *Call, APValue &Out, |
| 11967 | llvm::function_ref<std::pair<unsigned, int>(unsigned, unsigned)> |
| 11968 | GetSourceIndex) { |
| 11969 | |
| 11970 | const auto *VT = Call->getType()->getAs<VectorType>(); |
| 11971 | if (!VT) |
| 11972 | return false; |
| 11973 | |
| 11974 | unsigned ShuffleMask = 0; |
| 11975 | APValue A, MaskVector, B; |
| 11976 | bool IsVectorMask = false; |
| 11977 | bool IsSingleOperand = (Call->getNumArgs() == 2); |
| 11978 | |
| 11979 | if (IsSingleOperand) { |
| 11980 | QualType MaskType = Call->getArg(Arg: 1)->getType(); |
| 11981 | if (MaskType->isVectorType()) { |
| 11982 | IsVectorMask = true; |
| 11983 | if (!EvaluateAsRValue(Info, E: Call->getArg(Arg: 0), Result&: A) || |
| 11984 | !EvaluateAsRValue(Info, E: Call->getArg(Arg: 1), Result&: MaskVector)) |
| 11985 | return false; |
| 11986 | B = A; |
| 11987 | } else if (MaskType->isIntegerType()) { |
| 11988 | APSInt MaskImm; |
| 11989 | if (!EvaluateInteger(E: Call->getArg(Arg: 1), Result&: MaskImm, Info)) |
| 11990 | return false; |
| 11991 | ShuffleMask = static_cast<unsigned>(MaskImm.getZExtValue()); |
| 11992 | if (!EvaluateAsRValue(Info, E: Call->getArg(Arg: 0), Result&: A)) |
| 11993 | return false; |
| 11994 | B = A; |
| 11995 | } else { |
| 11996 | return false; |
| 11997 | } |
| 11998 | } else { |
| 11999 | QualType Arg2Type = Call->getArg(Arg: 2)->getType(); |
| 12000 | if (Arg2Type->isVectorType()) { |
| 12001 | IsVectorMask = true; |
| 12002 | if (!EvaluateAsRValue(Info, E: Call->getArg(Arg: 0), Result&: A) || |
| 12003 | !EvaluateAsRValue(Info, E: Call->getArg(Arg: 1), Result&: MaskVector) || |
| 12004 | !EvaluateAsRValue(Info, E: Call->getArg(Arg: 2), Result&: B)) |
| 12005 | return false; |
| 12006 | } else if (Arg2Type->isIntegerType()) { |
| 12007 | APSInt MaskImm; |
| 12008 | if (!EvaluateInteger(E: Call->getArg(Arg: 2), Result&: MaskImm, Info)) |
| 12009 | return false; |
| 12010 | ShuffleMask = static_cast<unsigned>(MaskImm.getZExtValue()); |
| 12011 | if (!EvaluateAsRValue(Info, E: Call->getArg(Arg: 0), Result&: A) || |
| 12012 | !EvaluateAsRValue(Info, E: Call->getArg(Arg: 1), Result&: B)) |
| 12013 | return false; |
| 12014 | } else { |
| 12015 | return false; |
| 12016 | } |
| 12017 | } |
| 12018 | |
| 12019 | unsigned NumElts = VT->getNumElements(); |
| 12020 | SmallVector<APValue, 64> ResultElements; |
| 12021 | ResultElements.reserve(N: NumElts); |
| 12022 | |
| 12023 | for (unsigned DstIdx = 0; DstIdx != NumElts; ++DstIdx) { |
| 12024 | if (IsVectorMask) { |
| 12025 | ShuffleMask = static_cast<unsigned>( |
| 12026 | MaskVector.getVectorElt(I: DstIdx).getInt().getZExtValue()); |
| 12027 | } |
| 12028 | auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask); |
| 12029 | |
| 12030 | if (SrcIdx < 0) { |
| 12031 | // Zero out this element |
| 12032 | QualType ElemTy = VT->getElementType(); |
| 12033 | if (ElemTy->isRealFloatingType()) { |
| 12034 | ResultElements.push_back( |
| 12035 | Elt: APValue(APFloat::getZero(Sem: Info.Ctx.getFloatTypeSemantics(T: ElemTy)))); |
| 12036 | } else if (ElemTy->isIntegerType()) { |
| 12037 | APValue Zero(Info.Ctx.MakeIntValue(Value: 0, Type: ElemTy)); |
| 12038 | ResultElements.push_back(Elt: APValue(Zero)); |
| 12039 | } else { |
| 12040 | // Other types of fallback logic |
| 12041 | ResultElements.push_back(Elt: APValue()); |
| 12042 | } |
| 12043 | } else { |
| 12044 | const APValue &Src = (SrcVecIdx == 0) ? A : B; |
| 12045 | ResultElements.push_back(Elt: Src.getVectorElt(I: SrcIdx)); |
| 12046 | } |
| 12047 | } |
| 12048 | |
| 12049 | Out = APValue(ResultElements.data(), ResultElements.size()); |
| 12050 | return true; |
| 12051 | } |
| 12052 | static bool ConvertDoubleToFloatStrict(EvalInfo &Info, const Expr *E, |
| 12053 | APFloat OrigVal, APValue &Result) { |
| 12054 | |
| 12055 | if (OrigVal.isInfinity()) { |
| 12056 | Info.CCEDiag(E, DiagId: diag::note_constexpr_float_arithmetic) << 0; |
| 12057 | return false; |
| 12058 | } |
| 12059 | if (OrigVal.isNaN()) { |
| 12060 | Info.CCEDiag(E, DiagId: diag::note_constexpr_float_arithmetic) << 1; |
| 12061 | return false; |
| 12062 | } |
| 12063 | |
| 12064 | APFloat Val = OrigVal; |
| 12065 | bool LosesInfo = false; |
| 12066 | APFloat::opStatus Status = Val.convert( |
| 12067 | ToSemantics: APFloat::IEEEsingle(), RM: APFloat::rmNearestTiesToEven, losesInfo: &LosesInfo); |
| 12068 | |
| 12069 | if (LosesInfo || Val.isDenormal()) { |
| 12070 | Info.CCEDiag(E, DiagId: diag::note_constexpr_float_arithmetic_strict); |
| 12071 | return false; |
| 12072 | } |
| 12073 | |
| 12074 | if (Status != APFloat::opOK) { |
| 12075 | Info.CCEDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr); |
| 12076 | return false; |
| 12077 | } |
| 12078 | |
| 12079 | Result = APValue(Val); |
| 12080 | return true; |
| 12081 | } |
| 12082 | static bool evalShiftWithCount( |
| 12083 | EvalInfo &Info, const CallExpr *Call, APValue &Out, |
| 12084 | llvm::function_ref<APInt(const APInt &, uint64_t)> ShiftOp, |
| 12085 | llvm::function_ref<APInt(const APInt &, unsigned)> OverflowOp) { |
| 12086 | |
| 12087 | APValue Source, Count; |
| 12088 | if (!EvaluateAsRValue(Info, E: Call->getArg(Arg: 0), Result&: Source) || |
| 12089 | !EvaluateAsRValue(Info, E: Call->getArg(Arg: 1), Result&: Count)) |
| 12090 | return false; |
| 12091 | |
| 12092 | assert(Call->getNumArgs() == 2); |
| 12093 | |
| 12094 | QualType SourceTy = Call->getArg(Arg: 0)->getType(); |
| 12095 | assert(SourceTy->isVectorType() && |
| 12096 | Call->getArg(1)->getType()->isVectorType()); |
| 12097 | |
| 12098 | QualType DestEltTy = SourceTy->castAs<VectorType>()->getElementType(); |
| 12099 | unsigned DestEltWidth = Source.getVectorElt(I: 0).getInt().getBitWidth(); |
| 12100 | unsigned DestLen = Source.getVectorLength(); |
| 12101 | bool IsDestUnsigned = DestEltTy->isUnsignedIntegerType(); |
| 12102 | unsigned CountEltWidth = Count.getVectorElt(I: 0).getInt().getBitWidth(); |
| 12103 | unsigned NumBitsInQWord = 64; |
| 12104 | unsigned NumCountElts = NumBitsInQWord / CountEltWidth; |
| 12105 | SmallVector<APValue, 64> Result; |
| 12106 | Result.reserve(N: DestLen); |
| 12107 | |
| 12108 | uint64_t CountLQWord = 0; |
| 12109 | for (unsigned EltIdx = 0; EltIdx != NumCountElts; ++EltIdx) { |
| 12110 | uint64_t Elt = Count.getVectorElt(I: EltIdx).getInt().getZExtValue(); |
| 12111 | CountLQWord |= (Elt << (EltIdx * CountEltWidth)); |
| 12112 | } |
| 12113 | |
| 12114 | for (unsigned EltIdx = 0; EltIdx != DestLen; ++EltIdx) { |
| 12115 | APInt Elt = Source.getVectorElt(I: EltIdx).getInt(); |
| 12116 | if (CountLQWord < DestEltWidth) { |
| 12117 | Result.push_back( |
| 12118 | Elt: APValue(APSInt(ShiftOp(Elt, CountLQWord), IsDestUnsigned))); |
| 12119 | } else { |
| 12120 | Result.push_back( |
| 12121 | Elt: APValue(APSInt(OverflowOp(Elt, DestEltWidth), IsDestUnsigned))); |
| 12122 | } |
| 12123 | } |
| 12124 | Out = APValue(Result.data(), Result.size()); |
| 12125 | return true; |
| 12126 | } |
| 12127 | |
| 12128 | bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) { |
| 12129 | if (!IsConstantEvaluatedBuiltinCall(E)) |
| 12130 | return ExprEvaluatorBaseTy::VisitCallExpr(E); |
| 12131 | |
| 12132 | auto EvaluateBinOpExpr = |
| 12133 | [&](llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) { |
| 12134 | APValue SourceLHS, SourceRHS; |
| 12135 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) || |
| 12136 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS)) |
| 12137 | return false; |
| 12138 | |
| 12139 | auto *DestTy = E->getType()->castAs<VectorType>(); |
| 12140 | QualType DestEltTy = DestTy->getElementType(); |
| 12141 | bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType(); |
| 12142 | unsigned SourceLen = SourceLHS.getVectorLength(); |
| 12143 | SmallVector<APValue, 4> ResultElements; |
| 12144 | ResultElements.reserve(N: SourceLen); |
| 12145 | |
| 12146 | if (SourceRHS.isInt()) { |
| 12147 | const APSInt &RHS = SourceRHS.getInt(); |
| 12148 | for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) { |
| 12149 | const APSInt &LHS = SourceLHS.getVectorElt(I: EltNum).getInt(); |
| 12150 | ResultElements.push_back( |
| 12151 | Elt: APValue(APSInt(Fn(LHS, RHS), DestUnsigned))); |
| 12152 | } |
| 12153 | } else { |
| 12154 | for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) { |
| 12155 | const APSInt &LHS = SourceLHS.getVectorElt(I: EltNum).getInt(); |
| 12156 | const APSInt &RHS = SourceRHS.getVectorElt(I: EltNum).getInt(); |
| 12157 | ResultElements.push_back( |
| 12158 | Elt: APValue(APSInt(Fn(LHS, RHS), DestUnsigned))); |
| 12159 | } |
| 12160 | } |
| 12161 | return Success(V: APValue(ResultElements.data(), SourceLen), E); |
| 12162 | }; |
| 12163 | |
| 12164 | auto EvaluateFpBinOpExpr = |
| 12165 | [&](llvm::function_ref<std::optional<APFloat>( |
| 12166 | const APFloat &, const APFloat &, std::optional<APSInt>)> |
| 12167 | Fn) { |
| 12168 | assert(E->getNumArgs() == 2 || E->getNumArgs() == 3); |
| 12169 | APValue A, B; |
| 12170 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: A) || |
| 12171 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: B)) |
| 12172 | return false; |
| 12173 | |
| 12174 | assert(A.isVector() && B.isVector()); |
| 12175 | assert(A.getVectorLength() == B.getVectorLength()); |
| 12176 | |
| 12177 | std::optional<APSInt> RoundingMode; |
| 12178 | if (E->getNumArgs() == 3) { |
| 12179 | APSInt Imm; |
| 12180 | if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: Imm, Info)) |
| 12181 | return false; |
| 12182 | RoundingMode = Imm; |
| 12183 | } |
| 12184 | |
| 12185 | unsigned NumElems = A.getVectorLength(); |
| 12186 | SmallVector<APValue, 4> ResultElements; |
| 12187 | ResultElements.reserve(N: NumElems); |
| 12188 | |
| 12189 | for (unsigned EltNum = 0; EltNum < NumElems; ++EltNum) { |
| 12190 | const APFloat &EltA = A.getVectorElt(I: EltNum).getFloat(); |
| 12191 | const APFloat &EltB = B.getVectorElt(I: EltNum).getFloat(); |
| 12192 | std::optional<APFloat> Result = Fn(EltA, EltB, RoundingMode); |
| 12193 | if (!Result) |
| 12194 | return false; |
| 12195 | ResultElements.push_back(Elt: APValue(*Result)); |
| 12196 | } |
| 12197 | return Success(V: APValue(ResultElements.data(), NumElems), E); |
| 12198 | }; |
| 12199 | |
| 12200 | auto EvalSelectScalar = [&](unsigned Len) -> bool { |
| 12201 | APSInt Mask; |
| 12202 | APValue AVal, WVal; |
| 12203 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Mask, Info) || |
| 12204 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: AVal) || |
| 12205 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: WVal)) |
| 12206 | return false; |
| 12207 | |
| 12208 | bool TakeA0 = (Mask.getZExtValue() & 1u) != 0; |
| 12209 | SmallVector<APValue, 4> Res; |
| 12210 | Res.reserve(N: Len); |
| 12211 | Res.push_back(Elt: TakeA0 ? AVal.getVectorElt(I: 0) : WVal.getVectorElt(I: 0)); |
| 12212 | for (unsigned I = 1; I < Len; ++I) |
| 12213 | Res.push_back(Elt: WVal.getVectorElt(I)); |
| 12214 | APValue V(Res.data(), Res.size()); |
| 12215 | return Success(V, E); |
| 12216 | }; |
| 12217 | |
| 12218 | switch (E->getBuiltinCallee()) { |
| 12219 | default: |
| 12220 | return false; |
| 12221 | case Builtin::BI__builtin_elementwise_popcount: |
| 12222 | case Builtin::BI__builtin_elementwise_bitreverse: { |
| 12223 | APValue Source; |
| 12224 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: Source)) |
| 12225 | return false; |
| 12226 | |
| 12227 | QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType(); |
| 12228 | unsigned SourceLen = Source.getVectorLength(); |
| 12229 | SmallVector<APValue, 4> ResultElements; |
| 12230 | ResultElements.reserve(N: SourceLen); |
| 12231 | |
| 12232 | for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) { |
| 12233 | APSInt Elt = Source.getVectorElt(I: EltNum).getInt(); |
| 12234 | switch (E->getBuiltinCallee()) { |
| 12235 | case Builtin::BI__builtin_elementwise_popcount: |
| 12236 | ResultElements.push_back(Elt: APValue( |
| 12237 | APSInt(APInt(Info.Ctx.getIntWidth(T: DestEltTy), Elt.popcount()), |
| 12238 | DestEltTy->isUnsignedIntegerOrEnumerationType()))); |
| 12239 | break; |
| 12240 | case Builtin::BI__builtin_elementwise_bitreverse: |
| 12241 | ResultElements.push_back( |
| 12242 | Elt: APValue(APSInt(Elt.reverseBits(), |
| 12243 | DestEltTy->isUnsignedIntegerOrEnumerationType()))); |
| 12244 | break; |
| 12245 | } |
| 12246 | } |
| 12247 | |
| 12248 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 12249 | } |
| 12250 | case Builtin::BI__builtin_elementwise_abs: { |
| 12251 | APValue Source; |
| 12252 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: Source)) |
| 12253 | return false; |
| 12254 | |
| 12255 | QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType(); |
| 12256 | unsigned SourceLen = Source.getVectorLength(); |
| 12257 | SmallVector<APValue, 4> ResultElements; |
| 12258 | ResultElements.reserve(N: SourceLen); |
| 12259 | |
| 12260 | for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) { |
| 12261 | APValue CurrentEle = Source.getVectorElt(I: EltNum); |
| 12262 | APValue Val = DestEltTy->isFloatingType() |
| 12263 | ? APValue(llvm::abs(X: CurrentEle.getFloat())) |
| 12264 | : APValue(APSInt( |
| 12265 | CurrentEle.getInt().abs(), |
| 12266 | DestEltTy->isUnsignedIntegerOrEnumerationType())); |
| 12267 | ResultElements.push_back(Elt: Val); |
| 12268 | } |
| 12269 | |
| 12270 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 12271 | } |
| 12272 | |
| 12273 | case Builtin::BI__builtin_elementwise_add_sat: |
| 12274 | return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) { |
| 12275 | return LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS); |
| 12276 | }); |
| 12277 | |
| 12278 | case Builtin::BI__builtin_elementwise_sub_sat: |
| 12279 | return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) { |
| 12280 | return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS); |
| 12281 | }); |
| 12282 | |
| 12283 | case X86::BI__builtin_ia32_extract128i256: |
| 12284 | case X86::BI__builtin_ia32_vextractf128_pd256: |
| 12285 | case X86::BI__builtin_ia32_vextractf128_ps256: |
| 12286 | case X86::BI__builtin_ia32_vextractf128_si256: { |
| 12287 | APValue SourceVec, SourceImm; |
| 12288 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceVec) || |
| 12289 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceImm)) |
| 12290 | return false; |
| 12291 | |
| 12292 | if (!SourceVec.isVector()) |
| 12293 | return false; |
| 12294 | |
| 12295 | const auto *RetVT = E->getType()->castAs<VectorType>(); |
| 12296 | unsigned RetLen = RetVT->getNumElements(); |
| 12297 | unsigned Idx = SourceImm.getInt().getZExtValue() & 1; |
| 12298 | |
| 12299 | SmallVector<APValue, 32> ResultElements; |
| 12300 | ResultElements.reserve(N: RetLen); |
| 12301 | |
| 12302 | for (unsigned I = 0; I < RetLen; I++) |
| 12303 | ResultElements.push_back(Elt: SourceVec.getVectorElt(I: Idx * RetLen + I)); |
| 12304 | |
| 12305 | return Success(V: APValue(ResultElements.data(), RetLen), E); |
| 12306 | } |
| 12307 | |
| 12308 | case clang::X86::BI__builtin_ia32_cvtmask2b128: |
| 12309 | case clang::X86::BI__builtin_ia32_cvtmask2b256: |
| 12310 | case clang::X86::BI__builtin_ia32_cvtmask2b512: |
| 12311 | case clang::X86::BI__builtin_ia32_cvtmask2w128: |
| 12312 | case clang::X86::BI__builtin_ia32_cvtmask2w256: |
| 12313 | case clang::X86::BI__builtin_ia32_cvtmask2w512: |
| 12314 | case clang::X86::BI__builtin_ia32_cvtmask2d128: |
| 12315 | case clang::X86::BI__builtin_ia32_cvtmask2d256: |
| 12316 | case clang::X86::BI__builtin_ia32_cvtmask2d512: |
| 12317 | case clang::X86::BI__builtin_ia32_cvtmask2q128: |
| 12318 | case clang::X86::BI__builtin_ia32_cvtmask2q256: |
| 12319 | case clang::X86::BI__builtin_ia32_cvtmask2q512: { |
| 12320 | assert(E->getNumArgs() == 1); |
| 12321 | APSInt Mask; |
| 12322 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Mask, Info)) |
| 12323 | return false; |
| 12324 | |
| 12325 | QualType VecTy = E->getType(); |
| 12326 | const VectorType *VT = VecTy->castAs<VectorType>(); |
| 12327 | unsigned VectorLen = VT->getNumElements(); |
| 12328 | QualType ElemTy = VT->getElementType(); |
| 12329 | unsigned ElemWidth = Info.Ctx.getTypeSize(T: ElemTy); |
| 12330 | |
| 12331 | SmallVector<APValue, 16> Elems; |
| 12332 | for (unsigned I = 0; I != VectorLen; ++I) { |
| 12333 | bool BitSet = Mask[I]; |
| 12334 | APSInt ElemVal(ElemWidth, /*isUnsigned=*/false); |
| 12335 | if (BitSet) { |
| 12336 | ElemVal.setAllBits(); |
| 12337 | } |
| 12338 | Elems.push_back(Elt: APValue(ElemVal)); |
| 12339 | } |
| 12340 | return Success(V: APValue(Elems.data(), VectorLen), E); |
| 12341 | } |
| 12342 | |
| 12343 | case X86::BI__builtin_ia32_extracti32x4_256_mask: |
| 12344 | case X86::BI__builtin_ia32_extractf32x4_256_mask: |
| 12345 | case X86::BI__builtin_ia32_extracti32x4_mask: |
| 12346 | case X86::BI__builtin_ia32_extractf32x4_mask: |
| 12347 | case X86::BI__builtin_ia32_extracti32x8_mask: |
| 12348 | case X86::BI__builtin_ia32_extractf32x8_mask: |
| 12349 | case X86::BI__builtin_ia32_extracti64x2_256_mask: |
| 12350 | case X86::BI__builtin_ia32_extractf64x2_256_mask: |
| 12351 | case X86::BI__builtin_ia32_extracti64x2_512_mask: |
| 12352 | case X86::BI__builtin_ia32_extractf64x2_512_mask: |
| 12353 | case X86::BI__builtin_ia32_extracti64x4_mask: |
| 12354 | case X86::BI__builtin_ia32_extractf64x4_mask: { |
| 12355 | APValue SourceVec, MergeVec; |
| 12356 | APSInt Imm, MaskImm; |
| 12357 | |
| 12358 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceVec) || |
| 12359 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Imm, Info) || |
| 12360 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: MergeVec) || |
| 12361 | !EvaluateInteger(E: E->getArg(Arg: 3), Result&: MaskImm, Info)) |
| 12362 | return false; |
| 12363 | |
| 12364 | const auto *RetVT = E->getType()->castAs<VectorType>(); |
| 12365 | unsigned RetLen = RetVT->getNumElements(); |
| 12366 | |
| 12367 | if (!SourceVec.isVector() || !MergeVec.isVector()) |
| 12368 | return false; |
| 12369 | unsigned SrcLen = SourceVec.getVectorLength(); |
| 12370 | unsigned Lanes = SrcLen / RetLen; |
| 12371 | unsigned Lane = static_cast<unsigned>(Imm.getZExtValue() % Lanes); |
| 12372 | unsigned Base = Lane * RetLen; |
| 12373 | |
| 12374 | SmallVector<APValue, 32> ResultElements; |
| 12375 | ResultElements.reserve(N: RetLen); |
| 12376 | for (unsigned I = 0; I < RetLen; ++I) { |
| 12377 | if (MaskImm[I]) |
| 12378 | ResultElements.push_back(Elt: SourceVec.getVectorElt(I: Base + I)); |
| 12379 | else |
| 12380 | ResultElements.push_back(Elt: MergeVec.getVectorElt(I)); |
| 12381 | } |
| 12382 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 12383 | } |
| 12384 | |
| 12385 | case clang::X86::BI__builtin_ia32_pavgb128: |
| 12386 | case clang::X86::BI__builtin_ia32_pavgw128: |
| 12387 | case clang::X86::BI__builtin_ia32_pavgb256: |
| 12388 | case clang::X86::BI__builtin_ia32_pavgw256: |
| 12389 | case clang::X86::BI__builtin_ia32_pavgb512: |
| 12390 | case clang::X86::BI__builtin_ia32_pavgw512: |
| 12391 | return EvaluateBinOpExpr(llvm::APIntOps::avgCeilU); |
| 12392 | |
| 12393 | case clang::X86::BI__builtin_ia32_pmulhrsw128: |
| 12394 | case clang::X86::BI__builtin_ia32_pmulhrsw256: |
| 12395 | case clang::X86::BI__builtin_ia32_pmulhrsw512: |
| 12396 | return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) { |
| 12397 | return (llvm::APIntOps::mulsExtended(C1: LHS, C2: RHS).ashr(ShiftAmt: 14) + 1) |
| 12398 | .extractBits(numBits: 16, bitPosition: 1); |
| 12399 | }); |
| 12400 | |
| 12401 | case clang::X86::BI__builtin_ia32_pmaddubsw128: |
| 12402 | case clang::X86::BI__builtin_ia32_pmaddubsw256: |
| 12403 | case clang::X86::BI__builtin_ia32_pmaddubsw512: |
| 12404 | case clang::X86::BI__builtin_ia32_pmaddwd128: |
| 12405 | case clang::X86::BI__builtin_ia32_pmaddwd256: |
| 12406 | case clang::X86::BI__builtin_ia32_pmaddwd512: { |
| 12407 | APValue SourceLHS, SourceRHS; |
| 12408 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) || |
| 12409 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS)) |
| 12410 | return false; |
| 12411 | |
| 12412 | auto *DestTy = E->getType()->castAs<VectorType>(); |
| 12413 | QualType DestEltTy = DestTy->getElementType(); |
| 12414 | unsigned SourceLen = SourceLHS.getVectorLength(); |
| 12415 | bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType(); |
| 12416 | SmallVector<APValue, 4> ResultElements; |
| 12417 | ResultElements.reserve(N: SourceLen / 2); |
| 12418 | |
| 12419 | for (unsigned EltNum = 0; EltNum < SourceLen; EltNum += 2) { |
| 12420 | const APSInt &LoLHS = SourceLHS.getVectorElt(I: EltNum).getInt(); |
| 12421 | const APSInt &HiLHS = SourceLHS.getVectorElt(I: EltNum + 1).getInt(); |
| 12422 | const APSInt &LoRHS = SourceRHS.getVectorElt(I: EltNum).getInt(); |
| 12423 | const APSInt &HiRHS = SourceRHS.getVectorElt(I: EltNum + 1).getInt(); |
| 12424 | unsigned BitWidth = 2 * LoLHS.getBitWidth(); |
| 12425 | |
| 12426 | switch (E->getBuiltinCallee()) { |
| 12427 | case clang::X86::BI__builtin_ia32_pmaddubsw128: |
| 12428 | case clang::X86::BI__builtin_ia32_pmaddubsw256: |
| 12429 | case clang::X86::BI__builtin_ia32_pmaddubsw512: |
| 12430 | ResultElements.push_back(Elt: APValue( |
| 12431 | APSInt((LoLHS.zext(width: BitWidth) * LoRHS.sext(width: BitWidth)) |
| 12432 | .sadd_sat(RHS: (HiLHS.zext(width: BitWidth) * HiRHS.sext(width: BitWidth))), |
| 12433 | DestUnsigned))); |
| 12434 | break; |
| 12435 | case clang::X86::BI__builtin_ia32_pmaddwd128: |
| 12436 | case clang::X86::BI__builtin_ia32_pmaddwd256: |
| 12437 | case clang::X86::BI__builtin_ia32_pmaddwd512: |
| 12438 | ResultElements.push_back( |
| 12439 | Elt: APValue(APSInt((LoLHS.sext(width: BitWidth) * LoRHS.sext(width: BitWidth)) + |
| 12440 | (HiLHS.sext(width: BitWidth) * HiRHS.sext(width: BitWidth)), |
| 12441 | DestUnsigned))); |
| 12442 | break; |
| 12443 | } |
| 12444 | } |
| 12445 | |
| 12446 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 12447 | } |
| 12448 | |
| 12449 | case clang::X86::BI__builtin_ia32_pmulhuw128: |
| 12450 | case clang::X86::BI__builtin_ia32_pmulhuw256: |
| 12451 | case clang::X86::BI__builtin_ia32_pmulhuw512: |
| 12452 | return EvaluateBinOpExpr(llvm::APIntOps::mulhu); |
| 12453 | |
| 12454 | case clang::X86::BI__builtin_ia32_pmulhw128: |
| 12455 | case clang::X86::BI__builtin_ia32_pmulhw256: |
| 12456 | case clang::X86::BI__builtin_ia32_pmulhw512: |
| 12457 | return EvaluateBinOpExpr(llvm::APIntOps::mulhs); |
| 12458 | |
| 12459 | case clang::X86::BI__builtin_ia32_psllv2di: |
| 12460 | case clang::X86::BI__builtin_ia32_psllv4di: |
| 12461 | case clang::X86::BI__builtin_ia32_psllv4si: |
| 12462 | case clang::X86::BI__builtin_ia32_psllv8di: |
| 12463 | case clang::X86::BI__builtin_ia32_psllv8hi: |
| 12464 | case clang::X86::BI__builtin_ia32_psllv8si: |
| 12465 | case clang::X86::BI__builtin_ia32_psllv16hi: |
| 12466 | case clang::X86::BI__builtin_ia32_psllv16si: |
| 12467 | case clang::X86::BI__builtin_ia32_psllv32hi: |
| 12468 | case clang::X86::BI__builtin_ia32_psllwi128: |
| 12469 | case clang::X86::BI__builtin_ia32_pslldi128: |
| 12470 | case clang::X86::BI__builtin_ia32_psllqi128: |
| 12471 | case clang::X86::BI__builtin_ia32_psllwi256: |
| 12472 | case clang::X86::BI__builtin_ia32_pslldi256: |
| 12473 | case clang::X86::BI__builtin_ia32_psllqi256: |
| 12474 | case clang::X86::BI__builtin_ia32_psllwi512: |
| 12475 | case clang::X86::BI__builtin_ia32_pslldi512: |
| 12476 | case clang::X86::BI__builtin_ia32_psllqi512: |
| 12477 | return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) { |
| 12478 | if (RHS.uge(RHS: LHS.getBitWidth())) { |
| 12479 | return APInt::getZero(numBits: LHS.getBitWidth()); |
| 12480 | } |
| 12481 | return LHS.shl(shiftAmt: RHS.getZExtValue()); |
| 12482 | }); |
| 12483 | |
| 12484 | case clang::X86::BI__builtin_ia32_psrav4si: |
| 12485 | case clang::X86::BI__builtin_ia32_psrav8di: |
| 12486 | case clang::X86::BI__builtin_ia32_psrav8hi: |
| 12487 | case clang::X86::BI__builtin_ia32_psrav8si: |
| 12488 | case clang::X86::BI__builtin_ia32_psrav16hi: |
| 12489 | case clang::X86::BI__builtin_ia32_psrav16si: |
| 12490 | case clang::X86::BI__builtin_ia32_psrav32hi: |
| 12491 | case clang::X86::BI__builtin_ia32_psravq128: |
| 12492 | case clang::X86::BI__builtin_ia32_psravq256: |
| 12493 | case clang::X86::BI__builtin_ia32_psrawi128: |
| 12494 | case clang::X86::BI__builtin_ia32_psradi128: |
| 12495 | case clang::X86::BI__builtin_ia32_psraqi128: |
| 12496 | case clang::X86::BI__builtin_ia32_psrawi256: |
| 12497 | case clang::X86::BI__builtin_ia32_psradi256: |
| 12498 | case clang::X86::BI__builtin_ia32_psraqi256: |
| 12499 | case clang::X86::BI__builtin_ia32_psrawi512: |
| 12500 | case clang::X86::BI__builtin_ia32_psradi512: |
| 12501 | case clang::X86::BI__builtin_ia32_psraqi512: |
| 12502 | return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) { |
| 12503 | if (RHS.uge(RHS: LHS.getBitWidth())) { |
| 12504 | return LHS.ashr(ShiftAmt: LHS.getBitWidth() - 1); |
| 12505 | } |
| 12506 | return LHS.ashr(ShiftAmt: RHS.getZExtValue()); |
| 12507 | }); |
| 12508 | |
| 12509 | case clang::X86::BI__builtin_ia32_psrlv2di: |
| 12510 | case clang::X86::BI__builtin_ia32_psrlv4di: |
| 12511 | case clang::X86::BI__builtin_ia32_psrlv4si: |
| 12512 | case clang::X86::BI__builtin_ia32_psrlv8di: |
| 12513 | case clang::X86::BI__builtin_ia32_psrlv8hi: |
| 12514 | case clang::X86::BI__builtin_ia32_psrlv8si: |
| 12515 | case clang::X86::BI__builtin_ia32_psrlv16hi: |
| 12516 | case clang::X86::BI__builtin_ia32_psrlv16si: |
| 12517 | case clang::X86::BI__builtin_ia32_psrlv32hi: |
| 12518 | case clang::X86::BI__builtin_ia32_psrlwi128: |
| 12519 | case clang::X86::BI__builtin_ia32_psrldi128: |
| 12520 | case clang::X86::BI__builtin_ia32_psrlqi128: |
| 12521 | case clang::X86::BI__builtin_ia32_psrlwi256: |
| 12522 | case clang::X86::BI__builtin_ia32_psrldi256: |
| 12523 | case clang::X86::BI__builtin_ia32_psrlqi256: |
| 12524 | case clang::X86::BI__builtin_ia32_psrlwi512: |
| 12525 | case clang::X86::BI__builtin_ia32_psrldi512: |
| 12526 | case clang::X86::BI__builtin_ia32_psrlqi512: |
| 12527 | return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) { |
| 12528 | if (RHS.uge(RHS: LHS.getBitWidth())) { |
| 12529 | return APInt::getZero(numBits: LHS.getBitWidth()); |
| 12530 | } |
| 12531 | return LHS.lshr(shiftAmt: RHS.getZExtValue()); |
| 12532 | }); |
| 12533 | case X86::BI__builtin_ia32_packsswb128: |
| 12534 | case X86::BI__builtin_ia32_packsswb256: |
| 12535 | case X86::BI__builtin_ia32_packsswb512: |
| 12536 | case X86::BI__builtin_ia32_packssdw128: |
| 12537 | case X86::BI__builtin_ia32_packssdw256: |
| 12538 | case X86::BI__builtin_ia32_packssdw512: |
| 12539 | return evalPackBuiltin(E, Info, Result, PackFn: [](const APSInt &Src) { |
| 12540 | return APSInt(Src).truncSSat(width: Src.getBitWidth() / 2); |
| 12541 | }); |
| 12542 | case X86::BI__builtin_ia32_packusdw128: |
| 12543 | case X86::BI__builtin_ia32_packusdw256: |
| 12544 | case X86::BI__builtin_ia32_packusdw512: |
| 12545 | case X86::BI__builtin_ia32_packuswb128: |
| 12546 | case X86::BI__builtin_ia32_packuswb256: |
| 12547 | case X86::BI__builtin_ia32_packuswb512: |
| 12548 | return evalPackBuiltin(E, Info, Result, PackFn: [](const APSInt &Src) { |
| 12549 | return APSInt(Src).truncSSatU(width: Src.getBitWidth() / 2); |
| 12550 | }); |
| 12551 | case clang::X86::BI__builtin_ia32_selectss_128: |
| 12552 | return EvalSelectScalar(4); |
| 12553 | case clang::X86::BI__builtin_ia32_selectsd_128: |
| 12554 | return EvalSelectScalar(2); |
| 12555 | case clang::X86::BI__builtin_ia32_selectsh_128: |
| 12556 | case clang::X86::BI__builtin_ia32_selectsbf_128: |
| 12557 | return EvalSelectScalar(8); |
| 12558 | case clang::X86::BI__builtin_ia32_pmuldq128: |
| 12559 | case clang::X86::BI__builtin_ia32_pmuldq256: |
| 12560 | case clang::X86::BI__builtin_ia32_pmuldq512: |
| 12561 | case clang::X86::BI__builtin_ia32_pmuludq128: |
| 12562 | case clang::X86::BI__builtin_ia32_pmuludq256: |
| 12563 | case clang::X86::BI__builtin_ia32_pmuludq512: { |
| 12564 | APValue SourceLHS, SourceRHS; |
| 12565 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) || |
| 12566 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS)) |
| 12567 | return false; |
| 12568 | |
| 12569 | unsigned SourceLen = SourceLHS.getVectorLength(); |
| 12570 | SmallVector<APValue, 4> ResultElements; |
| 12571 | ResultElements.reserve(N: SourceLen / 2); |
| 12572 | |
| 12573 | for (unsigned EltNum = 0; EltNum < SourceLen; EltNum += 2) { |
| 12574 | APSInt LHS = SourceLHS.getVectorElt(I: EltNum).getInt(); |
| 12575 | APSInt RHS = SourceRHS.getVectorElt(I: EltNum).getInt(); |
| 12576 | |
| 12577 | switch (E->getBuiltinCallee()) { |
| 12578 | case clang::X86::BI__builtin_ia32_pmuludq128: |
| 12579 | case clang::X86::BI__builtin_ia32_pmuludq256: |
| 12580 | case clang::X86::BI__builtin_ia32_pmuludq512: |
| 12581 | ResultElements.push_back( |
| 12582 | Elt: APValue(APSInt(llvm::APIntOps::muluExtended(C1: LHS, C2: RHS), true))); |
| 12583 | break; |
| 12584 | case clang::X86::BI__builtin_ia32_pmuldq128: |
| 12585 | case clang::X86::BI__builtin_ia32_pmuldq256: |
| 12586 | case clang::X86::BI__builtin_ia32_pmuldq512: |
| 12587 | ResultElements.push_back( |
| 12588 | Elt: APValue(APSInt(llvm::APIntOps::mulsExtended(C1: LHS, C2: RHS), false))); |
| 12589 | break; |
| 12590 | } |
| 12591 | } |
| 12592 | |
| 12593 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 12594 | } |
| 12595 | |
| 12596 | case X86::BI__builtin_ia32_vpmadd52luq128: |
| 12597 | case X86::BI__builtin_ia32_vpmadd52luq256: |
| 12598 | case X86::BI__builtin_ia32_vpmadd52luq512: { |
| 12599 | APValue A, B, C; |
| 12600 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: A) || |
| 12601 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: B) || |
| 12602 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: C)) |
| 12603 | return false; |
| 12604 | |
| 12605 | unsigned ALen = A.getVectorLength(); |
| 12606 | SmallVector<APValue, 4> ResultElements; |
| 12607 | ResultElements.reserve(N: ALen); |
| 12608 | |
| 12609 | for (unsigned EltNum = 0; EltNum < ALen; EltNum += 1) { |
| 12610 | APInt AElt = A.getVectorElt(I: EltNum).getInt(); |
| 12611 | APInt BElt = B.getVectorElt(I: EltNum).getInt().trunc(width: 52); |
| 12612 | APInt CElt = C.getVectorElt(I: EltNum).getInt().trunc(width: 52); |
| 12613 | APSInt ResElt(AElt + (BElt * CElt).zext(width: 64), false); |
| 12614 | ResultElements.push_back(Elt: APValue(ResElt)); |
| 12615 | } |
| 12616 | |
| 12617 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 12618 | } |
| 12619 | case X86::BI__builtin_ia32_vpmadd52huq128: |
| 12620 | case X86::BI__builtin_ia32_vpmadd52huq256: |
| 12621 | case X86::BI__builtin_ia32_vpmadd52huq512: { |
| 12622 | APValue A, B, C; |
| 12623 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: A) || |
| 12624 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: B) || |
| 12625 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: C)) |
| 12626 | return false; |
| 12627 | |
| 12628 | unsigned ALen = A.getVectorLength(); |
| 12629 | SmallVector<APValue, 4> ResultElements; |
| 12630 | ResultElements.reserve(N: ALen); |
| 12631 | |
| 12632 | for (unsigned EltNum = 0; EltNum < ALen; EltNum += 1) { |
| 12633 | APInt AElt = A.getVectorElt(I: EltNum).getInt(); |
| 12634 | APInt BElt = B.getVectorElt(I: EltNum).getInt().trunc(width: 52); |
| 12635 | APInt CElt = C.getVectorElt(I: EltNum).getInt().trunc(width: 52); |
| 12636 | APSInt ResElt(AElt + llvm::APIntOps::mulhu(C1: BElt, C2: CElt).zext(width: 64), false); |
| 12637 | ResultElements.push_back(Elt: APValue(ResElt)); |
| 12638 | } |
| 12639 | |
| 12640 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 12641 | } |
| 12642 | |
| 12643 | case clang::X86::BI__builtin_ia32_vprotbi: |
| 12644 | case clang::X86::BI__builtin_ia32_vprotdi: |
| 12645 | case clang::X86::BI__builtin_ia32_vprotqi: |
| 12646 | case clang::X86::BI__builtin_ia32_vprotwi: |
| 12647 | case clang::X86::BI__builtin_ia32_prold128: |
| 12648 | case clang::X86::BI__builtin_ia32_prold256: |
| 12649 | case clang::X86::BI__builtin_ia32_prold512: |
| 12650 | case clang::X86::BI__builtin_ia32_prolq128: |
| 12651 | case clang::X86::BI__builtin_ia32_prolq256: |
| 12652 | case clang::X86::BI__builtin_ia32_prolq512: |
| 12653 | return EvaluateBinOpExpr( |
| 12654 | [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotl(rotateAmt: RHS); }); |
| 12655 | |
| 12656 | case clang::X86::BI__builtin_ia32_prord128: |
| 12657 | case clang::X86::BI__builtin_ia32_prord256: |
| 12658 | case clang::X86::BI__builtin_ia32_prord512: |
| 12659 | case clang::X86::BI__builtin_ia32_prorq128: |
| 12660 | case clang::X86::BI__builtin_ia32_prorq256: |
| 12661 | case clang::X86::BI__builtin_ia32_prorq512: |
| 12662 | return EvaluateBinOpExpr( |
| 12663 | [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotr(rotateAmt: RHS); }); |
| 12664 | |
| 12665 | case Builtin::BI__builtin_elementwise_max: |
| 12666 | case Builtin::BI__builtin_elementwise_min: { |
| 12667 | APValue SourceLHS, SourceRHS; |
| 12668 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) || |
| 12669 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS)) |
| 12670 | return false; |
| 12671 | |
| 12672 | QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType(); |
| 12673 | |
| 12674 | if (!DestEltTy->isIntegerType()) |
| 12675 | return false; |
| 12676 | |
| 12677 | unsigned SourceLen = SourceLHS.getVectorLength(); |
| 12678 | SmallVector<APValue, 4> ResultElements; |
| 12679 | ResultElements.reserve(N: SourceLen); |
| 12680 | |
| 12681 | for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) { |
| 12682 | APSInt LHS = SourceLHS.getVectorElt(I: EltNum).getInt(); |
| 12683 | APSInt RHS = SourceRHS.getVectorElt(I: EltNum).getInt(); |
| 12684 | switch (E->getBuiltinCallee()) { |
| 12685 | case Builtin::BI__builtin_elementwise_max: |
| 12686 | ResultElements.push_back( |
| 12687 | Elt: APValue(APSInt(std::max(a: LHS, b: RHS), |
| 12688 | DestEltTy->isUnsignedIntegerOrEnumerationType()))); |
| 12689 | break; |
| 12690 | case Builtin::BI__builtin_elementwise_min: |
| 12691 | ResultElements.push_back( |
| 12692 | Elt: APValue(APSInt(std::min(a: LHS, b: RHS), |
| 12693 | DestEltTy->isUnsignedIntegerOrEnumerationType()))); |
| 12694 | break; |
| 12695 | } |
| 12696 | } |
| 12697 | |
| 12698 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 12699 | } |
| 12700 | case X86::BI__builtin_ia32_vpshldd128: |
| 12701 | case X86::BI__builtin_ia32_vpshldd256: |
| 12702 | case X86::BI__builtin_ia32_vpshldd512: |
| 12703 | case X86::BI__builtin_ia32_vpshldq128: |
| 12704 | case X86::BI__builtin_ia32_vpshldq256: |
| 12705 | case X86::BI__builtin_ia32_vpshldq512: |
| 12706 | case X86::BI__builtin_ia32_vpshldw128: |
| 12707 | case X86::BI__builtin_ia32_vpshldw256: |
| 12708 | case X86::BI__builtin_ia32_vpshldw512: { |
| 12709 | APValue SourceHi, SourceLo, SourceAmt; |
| 12710 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceHi) || |
| 12711 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceLo) || |
| 12712 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceAmt)) |
| 12713 | return false; |
| 12714 | |
| 12715 | QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType(); |
| 12716 | unsigned SourceLen = SourceHi.getVectorLength(); |
| 12717 | SmallVector<APValue, 32> ResultElements; |
| 12718 | ResultElements.reserve(N: SourceLen); |
| 12719 | |
| 12720 | APInt Amt = SourceAmt.getInt(); |
| 12721 | for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) { |
| 12722 | APInt Hi = SourceHi.getVectorElt(I: EltNum).getInt(); |
| 12723 | APInt Lo = SourceLo.getVectorElt(I: EltNum).getInt(); |
| 12724 | APInt R = llvm::APIntOps::fshl(Hi, Lo, Shift: Amt); |
| 12725 | ResultElements.push_back( |
| 12726 | Elt: APValue(APSInt(R, DestEltTy->isUnsignedIntegerOrEnumerationType()))); |
| 12727 | } |
| 12728 | |
| 12729 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 12730 | } |
| 12731 | case X86::BI__builtin_ia32_vpshrdd128: |
| 12732 | case X86::BI__builtin_ia32_vpshrdd256: |
| 12733 | case X86::BI__builtin_ia32_vpshrdd512: |
| 12734 | case X86::BI__builtin_ia32_vpshrdq128: |
| 12735 | case X86::BI__builtin_ia32_vpshrdq256: |
| 12736 | case X86::BI__builtin_ia32_vpshrdq512: |
| 12737 | case X86::BI__builtin_ia32_vpshrdw128: |
| 12738 | case X86::BI__builtin_ia32_vpshrdw256: |
| 12739 | case X86::BI__builtin_ia32_vpshrdw512: { |
| 12740 | // NOTE: Reversed Hi/Lo operands. |
| 12741 | APValue SourceHi, SourceLo, SourceAmt; |
| 12742 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLo) || |
| 12743 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceHi) || |
| 12744 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceAmt)) |
| 12745 | return false; |
| 12746 | |
| 12747 | QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType(); |
| 12748 | unsigned SourceLen = SourceHi.getVectorLength(); |
| 12749 | SmallVector<APValue, 32> ResultElements; |
| 12750 | ResultElements.reserve(N: SourceLen); |
| 12751 | |
| 12752 | APInt Amt = SourceAmt.getInt(); |
| 12753 | for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) { |
| 12754 | APInt Hi = SourceHi.getVectorElt(I: EltNum).getInt(); |
| 12755 | APInt Lo = SourceLo.getVectorElt(I: EltNum).getInt(); |
| 12756 | APInt R = llvm::APIntOps::fshr(Hi, Lo, Shift: Amt); |
| 12757 | ResultElements.push_back( |
| 12758 | Elt: APValue(APSInt(R, DestEltTy->isUnsignedIntegerOrEnumerationType()))); |
| 12759 | } |
| 12760 | |
| 12761 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 12762 | } |
| 12763 | case X86::BI__builtin_ia32_vpconflictsi_128: |
| 12764 | case X86::BI__builtin_ia32_vpconflictsi_256: |
| 12765 | case X86::BI__builtin_ia32_vpconflictsi_512: |
| 12766 | case X86::BI__builtin_ia32_vpconflictdi_128: |
| 12767 | case X86::BI__builtin_ia32_vpconflictdi_256: |
| 12768 | case X86::BI__builtin_ia32_vpconflictdi_512: { |
| 12769 | APValue Source; |
| 12770 | |
| 12771 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: Source)) |
| 12772 | return false; |
| 12773 | |
| 12774 | unsigned SourceLen = Source.getVectorLength(); |
| 12775 | SmallVector<APValue, 32> ResultElements; |
| 12776 | ResultElements.reserve(N: SourceLen); |
| 12777 | |
| 12778 | const auto *VecT = E->getType()->castAs<VectorType>(); |
| 12779 | bool DestUnsigned = |
| 12780 | VecT->getElementType()->isUnsignedIntegerOrEnumerationType(); |
| 12781 | |
| 12782 | for (unsigned I = 0; I != SourceLen; ++I) { |
| 12783 | const APValue &EltI = Source.getVectorElt(I); |
| 12784 | |
| 12785 | APInt ConflictMask(EltI.getInt().getBitWidth(), 0); |
| 12786 | for (unsigned J = 0; J != I; ++J) { |
| 12787 | const APValue &EltJ = Source.getVectorElt(I: J); |
| 12788 | ConflictMask.setBitVal(BitPosition: J, BitValue: EltI.getInt() == EltJ.getInt()); |
| 12789 | } |
| 12790 | ResultElements.push_back(Elt: APValue(APSInt(ConflictMask, DestUnsigned))); |
| 12791 | } |
| 12792 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 12793 | } |
| 12794 | case X86::BI__builtin_ia32_blendpd: |
| 12795 | case X86::BI__builtin_ia32_blendpd256: |
| 12796 | case X86::BI__builtin_ia32_blendps: |
| 12797 | case X86::BI__builtin_ia32_blendps256: |
| 12798 | case X86::BI__builtin_ia32_pblendw128: |
| 12799 | case X86::BI__builtin_ia32_pblendw256: |
| 12800 | case X86::BI__builtin_ia32_pblendd128: |
| 12801 | case X86::BI__builtin_ia32_pblendd256: { |
| 12802 | APValue SourceF, SourceT, SourceC; |
| 12803 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceF) || |
| 12804 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceT) || |
| 12805 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceC)) |
| 12806 | return false; |
| 12807 | |
| 12808 | const APInt &C = SourceC.getInt(); |
| 12809 | unsigned SourceLen = SourceF.getVectorLength(); |
| 12810 | SmallVector<APValue, 32> ResultElements; |
| 12811 | ResultElements.reserve(N: SourceLen); |
| 12812 | for (unsigned EltNum = 0; EltNum != SourceLen; ++EltNum) { |
| 12813 | const APValue &F = SourceF.getVectorElt(I: EltNum); |
| 12814 | const APValue &T = SourceT.getVectorElt(I: EltNum); |
| 12815 | ResultElements.push_back(Elt: C[EltNum % 8] ? T : F); |
| 12816 | } |
| 12817 | |
| 12818 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 12819 | } |
| 12820 | |
| 12821 | case X86::BI__builtin_ia32_psignb128: |
| 12822 | case X86::BI__builtin_ia32_psignb256: |
| 12823 | case X86::BI__builtin_ia32_psignw128: |
| 12824 | case X86::BI__builtin_ia32_psignw256: |
| 12825 | case X86::BI__builtin_ia32_psignd128: |
| 12826 | case X86::BI__builtin_ia32_psignd256: |
| 12827 | return EvaluateBinOpExpr([](const APInt &AElem, const APInt &BElem) { |
| 12828 | if (BElem.isZero()) |
| 12829 | return APInt::getZero(numBits: AElem.getBitWidth()); |
| 12830 | if (BElem.isNegative()) |
| 12831 | return -AElem; |
| 12832 | return AElem; |
| 12833 | }); |
| 12834 | |
| 12835 | case X86::BI__builtin_ia32_blendvpd: |
| 12836 | case X86::BI__builtin_ia32_blendvpd256: |
| 12837 | case X86::BI__builtin_ia32_blendvps: |
| 12838 | case X86::BI__builtin_ia32_blendvps256: |
| 12839 | case X86::BI__builtin_ia32_pblendvb128: |
| 12840 | case X86::BI__builtin_ia32_pblendvb256: { |
| 12841 | // SSE blendv by mask signbit: "Result = C[] < 0 ? T[] : F[]". |
| 12842 | APValue SourceF, SourceT, SourceC; |
| 12843 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceF) || |
| 12844 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceT) || |
| 12845 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceC)) |
| 12846 | return false; |
| 12847 | |
| 12848 | unsigned SourceLen = SourceF.getVectorLength(); |
| 12849 | SmallVector<APValue, 32> ResultElements; |
| 12850 | ResultElements.reserve(N: SourceLen); |
| 12851 | |
| 12852 | for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) { |
| 12853 | const APValue &F = SourceF.getVectorElt(I: EltNum); |
| 12854 | const APValue &T = SourceT.getVectorElt(I: EltNum); |
| 12855 | const APValue &C = SourceC.getVectorElt(I: EltNum); |
| 12856 | APInt M = C.isInt() ? (APInt)C.getInt() : C.getFloat().bitcastToAPInt(); |
| 12857 | ResultElements.push_back(Elt: M.isNegative() ? T : F); |
| 12858 | } |
| 12859 | |
| 12860 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 12861 | } |
| 12862 | case X86::BI__builtin_ia32_selectb_128: |
| 12863 | case X86::BI__builtin_ia32_selectb_256: |
| 12864 | case X86::BI__builtin_ia32_selectb_512: |
| 12865 | case X86::BI__builtin_ia32_selectw_128: |
| 12866 | case X86::BI__builtin_ia32_selectw_256: |
| 12867 | case X86::BI__builtin_ia32_selectw_512: |
| 12868 | case X86::BI__builtin_ia32_selectd_128: |
| 12869 | case X86::BI__builtin_ia32_selectd_256: |
| 12870 | case X86::BI__builtin_ia32_selectd_512: |
| 12871 | case X86::BI__builtin_ia32_selectq_128: |
| 12872 | case X86::BI__builtin_ia32_selectq_256: |
| 12873 | case X86::BI__builtin_ia32_selectq_512: |
| 12874 | case X86::BI__builtin_ia32_selectph_128: |
| 12875 | case X86::BI__builtin_ia32_selectph_256: |
| 12876 | case X86::BI__builtin_ia32_selectph_512: |
| 12877 | case X86::BI__builtin_ia32_selectpbf_128: |
| 12878 | case X86::BI__builtin_ia32_selectpbf_256: |
| 12879 | case X86::BI__builtin_ia32_selectpbf_512: |
| 12880 | case X86::BI__builtin_ia32_selectps_128: |
| 12881 | case X86::BI__builtin_ia32_selectps_256: |
| 12882 | case X86::BI__builtin_ia32_selectps_512: |
| 12883 | case X86::BI__builtin_ia32_selectpd_128: |
| 12884 | case X86::BI__builtin_ia32_selectpd_256: |
| 12885 | case X86::BI__builtin_ia32_selectpd_512: { |
| 12886 | // AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]". |
| 12887 | APValue SourceMask, SourceLHS, SourceRHS; |
| 12888 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceMask) || |
| 12889 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceLHS) || |
| 12890 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceRHS)) |
| 12891 | return false; |
| 12892 | |
| 12893 | APSInt Mask = SourceMask.getInt(); |
| 12894 | unsigned SourceLen = SourceLHS.getVectorLength(); |
| 12895 | SmallVector<APValue, 4> ResultElements; |
| 12896 | ResultElements.reserve(N: SourceLen); |
| 12897 | |
| 12898 | for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) { |
| 12899 | const APValue &LHS = SourceLHS.getVectorElt(I: EltNum); |
| 12900 | const APValue &RHS = SourceRHS.getVectorElt(I: EltNum); |
| 12901 | ResultElements.push_back(Elt: Mask[EltNum] ? LHS : RHS); |
| 12902 | } |
| 12903 | |
| 12904 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 12905 | } |
| 12906 | |
| 12907 | case X86::BI__builtin_ia32_cvtsd2ss: { |
| 12908 | APValue VecA, VecB; |
| 12909 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: VecA) || |
| 12910 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: VecB)) |
| 12911 | return false; |
| 12912 | |
| 12913 | SmallVector<APValue, 4> Elements; |
| 12914 | |
| 12915 | APValue ResultVal; |
| 12916 | if (!ConvertDoubleToFloatStrict(Info, E, OrigVal: VecB.getVectorElt(I: 0).getFloat(), |
| 12917 | Result&: ResultVal)) |
| 12918 | return false; |
| 12919 | |
| 12920 | Elements.push_back(Elt: ResultVal); |
| 12921 | |
| 12922 | unsigned NumEltsA = VecA.getVectorLength(); |
| 12923 | for (unsigned I = 1; I < NumEltsA; ++I) { |
| 12924 | Elements.push_back(Elt: VecA.getVectorElt(I)); |
| 12925 | } |
| 12926 | |
| 12927 | return Success(V: Elements, E); |
| 12928 | } |
| 12929 | case X86::BI__builtin_ia32_cvtsd2ss_round_mask: { |
| 12930 | APValue VecA, VecB, VecSrc, MaskValue; |
| 12931 | |
| 12932 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: VecA) || |
| 12933 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: VecB) || |
| 12934 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: VecSrc) || |
| 12935 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 3), Result&: MaskValue)) |
| 12936 | return false; |
| 12937 | |
| 12938 | unsigned Mask = MaskValue.getInt().getZExtValue(); |
| 12939 | SmallVector<APValue, 4> Elements; |
| 12940 | |
| 12941 | if (Mask & 1) { |
| 12942 | APValue ResultVal; |
| 12943 | if (!ConvertDoubleToFloatStrict(Info, E, OrigVal: VecB.getVectorElt(I: 0).getFloat(), |
| 12944 | Result&: ResultVal)) |
| 12945 | return false; |
| 12946 | Elements.push_back(Elt: ResultVal); |
| 12947 | } else { |
| 12948 | Elements.push_back(Elt: VecSrc.getVectorElt(I: 0)); |
| 12949 | } |
| 12950 | |
| 12951 | unsigned NumEltsA = VecA.getVectorLength(); |
| 12952 | for (unsigned I = 1; I < NumEltsA; ++I) { |
| 12953 | Elements.push_back(Elt: VecA.getVectorElt(I)); |
| 12954 | } |
| 12955 | |
| 12956 | return Success(V: Elements, E); |
| 12957 | } |
| 12958 | case X86::BI__builtin_ia32_cvtpd2ps: |
| 12959 | case X86::BI__builtin_ia32_cvtpd2ps256: |
| 12960 | case X86::BI__builtin_ia32_cvtpd2ps_mask: |
| 12961 | case X86::BI__builtin_ia32_cvtpd2ps512_mask: { |
| 12962 | |
| 12963 | const auto BuiltinID = E->getBuiltinCallee(); |
| 12964 | bool IsMasked = (BuiltinID == X86::BI__builtin_ia32_cvtpd2ps_mask || |
| 12965 | BuiltinID == X86::BI__builtin_ia32_cvtpd2ps512_mask); |
| 12966 | |
| 12967 | APValue InputValue; |
| 12968 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: InputValue)) |
| 12969 | return false; |
| 12970 | |
| 12971 | APValue MergeValue; |
| 12972 | unsigned Mask = 0xFFFFFFFF; |
| 12973 | bool NeedsMerge = false; |
| 12974 | if (IsMasked) { |
| 12975 | APValue MaskValue; |
| 12976 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: MaskValue)) |
| 12977 | return false; |
| 12978 | Mask = MaskValue.getInt().getZExtValue(); |
| 12979 | auto NumEltsResult = E->getType()->getAs<VectorType>()->getNumElements(); |
| 12980 | for (unsigned I = 0; I < NumEltsResult; ++I) { |
| 12981 | if (!((Mask >> I) & 1)) { |
| 12982 | NeedsMerge = true; |
| 12983 | break; |
| 12984 | } |
| 12985 | } |
| 12986 | if (NeedsMerge) { |
| 12987 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: MergeValue)) |
| 12988 | return false; |
| 12989 | } |
| 12990 | } |
| 12991 | |
| 12992 | unsigned NumEltsResult = |
| 12993 | E->getType()->getAs<VectorType>()->getNumElements(); |
| 12994 | unsigned NumEltsInput = InputValue.getVectorLength(); |
| 12995 | SmallVector<APValue, 8> Elements; |
| 12996 | for (unsigned I = 0; I < NumEltsResult; ++I) { |
| 12997 | if (IsMasked && !((Mask >> I) & 1)) { |
| 12998 | if (!NeedsMerge) { |
| 12999 | return false; |
| 13000 | } |
| 13001 | Elements.push_back(Elt: MergeValue.getVectorElt(I)); |
| 13002 | continue; |
| 13003 | } |
| 13004 | |
| 13005 | if (I >= NumEltsInput) { |
| 13006 | Elements.push_back(Elt: APValue(APFloat::getZero(Sem: APFloat::IEEEsingle()))); |
| 13007 | continue; |
| 13008 | } |
| 13009 | |
| 13010 | APValue ResultVal; |
| 13011 | if (!ConvertDoubleToFloatStrict( |
| 13012 | Info, E, OrigVal: InputValue.getVectorElt(I).getFloat(), Result&: ResultVal)) |
| 13013 | return false; |
| 13014 | |
| 13015 | Elements.push_back(Elt: ResultVal); |
| 13016 | } |
| 13017 | return Success(V: Elements, E); |
| 13018 | } |
| 13019 | |
| 13020 | case X86::BI__builtin_ia32_shufps: |
| 13021 | case X86::BI__builtin_ia32_shufps256: |
| 13022 | case X86::BI__builtin_ia32_shufps512: { |
| 13023 | APValue R; |
| 13024 | if (!evalShuffleGeneric( |
| 13025 | Info, Call: E, Out&: R, |
| 13026 | GetSourceIndex: [](unsigned DstIdx, |
| 13027 | unsigned ShuffleMask) -> std::pair<unsigned, int> { |
| 13028 | constexpr unsigned LaneBits = 128u; |
| 13029 | unsigned NumElemPerLane = LaneBits / 32; |
| 13030 | unsigned NumSelectableElems = NumElemPerLane / 2; |
| 13031 | unsigned BitsPerElem = 2; |
| 13032 | unsigned IndexMask = (1u << BitsPerElem) - 1; |
| 13033 | unsigned MaskBits = 8; |
| 13034 | unsigned Lane = DstIdx / NumElemPerLane; |
| 13035 | unsigned ElemInLane = DstIdx % NumElemPerLane; |
| 13036 | unsigned LaneOffset = Lane * NumElemPerLane; |
| 13037 | unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits; |
| 13038 | unsigned SrcIdx = (ElemInLane < NumSelectableElems) ? 0 : 1; |
| 13039 | unsigned Index = (ShuffleMask >> BitIndex) & IndexMask; |
| 13040 | return {SrcIdx, static_cast<int>(LaneOffset + Index)}; |
| 13041 | })) |
| 13042 | return false; |
| 13043 | return Success(V: R, E); |
| 13044 | } |
| 13045 | case X86::BI__builtin_ia32_shufpd: |
| 13046 | case X86::BI__builtin_ia32_shufpd256: |
| 13047 | case X86::BI__builtin_ia32_shufpd512: { |
| 13048 | APValue R; |
| 13049 | if (!evalShuffleGeneric( |
| 13050 | Info, Call: E, Out&: R, |
| 13051 | GetSourceIndex: [](unsigned DstIdx, |
| 13052 | unsigned ShuffleMask) -> std::pair<unsigned, int> { |
| 13053 | constexpr unsigned LaneBits = 128u; |
| 13054 | unsigned NumElemPerLane = LaneBits / 64; |
| 13055 | unsigned NumSelectableElems = NumElemPerLane / 2; |
| 13056 | unsigned BitsPerElem = 1; |
| 13057 | unsigned IndexMask = (1u << BitsPerElem) - 1; |
| 13058 | unsigned MaskBits = 8; |
| 13059 | unsigned Lane = DstIdx / NumElemPerLane; |
| 13060 | unsigned ElemInLane = DstIdx % NumElemPerLane; |
| 13061 | unsigned LaneOffset = Lane * NumElemPerLane; |
| 13062 | unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits; |
| 13063 | unsigned SrcIdx = (ElemInLane < NumSelectableElems) ? 0 : 1; |
| 13064 | unsigned Index = (ShuffleMask >> BitIndex) & IndexMask; |
| 13065 | return {SrcIdx, static_cast<int>(LaneOffset + Index)}; |
| 13066 | })) |
| 13067 | return false; |
| 13068 | return Success(V: R, E); |
| 13069 | } |
| 13070 | case X86::BI__builtin_ia32_insertps128: { |
| 13071 | APValue R; |
| 13072 | if (!evalShuffleGeneric( |
| 13073 | Info, Call: E, Out&: R, |
| 13074 | GetSourceIndex: [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> { |
| 13075 | // Bits [3:0]: zero mask - if bit is set, zero this element |
| 13076 | if ((Mask & (1 << DstIdx)) != 0) { |
| 13077 | return {0, -1}; |
| 13078 | } |
| 13079 | // Bits [7:6]: select element from source vector Y (0-3) |
| 13080 | // Bits [5:4]: select destination position (0-3) |
| 13081 | unsigned SrcElem = (Mask >> 6) & 0x3; |
| 13082 | unsigned DstElem = (Mask >> 4) & 0x3; |
| 13083 | if (DstIdx == DstElem) { |
| 13084 | // Insert element from source vector (B) at this position |
| 13085 | return {1, static_cast<int>(SrcElem)}; |
| 13086 | } else { |
| 13087 | // Copy from destination vector (A) |
| 13088 | return {0, static_cast<int>(DstIdx)}; |
| 13089 | } |
| 13090 | })) |
| 13091 | return false; |
| 13092 | return Success(V: R, E); |
| 13093 | } |
| 13094 | case X86::BI__builtin_ia32_pshufb128: |
| 13095 | case X86::BI__builtin_ia32_pshufb256: |
| 13096 | case X86::BI__builtin_ia32_pshufb512: { |
| 13097 | APValue R; |
| 13098 | if (!evalShuffleGeneric( |
| 13099 | Info, Call: E, Out&: R, |
| 13100 | GetSourceIndex: [](unsigned DstIdx, |
| 13101 | unsigned ShuffleMask) -> std::pair<unsigned, int> { |
| 13102 | uint8_t Ctlb = static_cast<uint8_t>(ShuffleMask); |
| 13103 | if (Ctlb & 0x80) |
| 13104 | return std::make_pair(x: 0, y: -1); |
| 13105 | |
| 13106 | unsigned LaneBase = (DstIdx / 16) * 16; |
| 13107 | unsigned SrcOffset = Ctlb & 0x0F; |
| 13108 | unsigned SrcIdx = LaneBase + SrcOffset; |
| 13109 | return std::make_pair(x: 0, y: static_cast<int>(SrcIdx)); |
| 13110 | })) |
| 13111 | return false; |
| 13112 | return Success(V: R, E); |
| 13113 | } |
| 13114 | |
| 13115 | case X86::BI__builtin_ia32_pshuflw: |
| 13116 | case X86::BI__builtin_ia32_pshuflw256: |
| 13117 | case X86::BI__builtin_ia32_pshuflw512: { |
| 13118 | APValue R; |
| 13119 | if (!evalShuffleGeneric( |
| 13120 | Info, Call: E, Out&: R, |
| 13121 | GetSourceIndex: [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> { |
| 13122 | constexpr unsigned LaneBits = 128u; |
| 13123 | constexpr unsigned ElemBits = 16u; |
| 13124 | constexpr unsigned LaneElts = LaneBits / ElemBits; |
| 13125 | constexpr unsigned HalfSize = 4; |
| 13126 | unsigned LaneBase = (DstIdx / LaneElts) * LaneElts; |
| 13127 | unsigned LaneIdx = DstIdx % LaneElts; |
| 13128 | if (LaneIdx < HalfSize) { |
| 13129 | unsigned Sel = (Mask >> (2 * LaneIdx)) & 0x3; |
| 13130 | return std::make_pair(x: 0, y: static_cast<int>(LaneBase + Sel)); |
| 13131 | } |
| 13132 | return std::make_pair(x: 0, y: static_cast<int>(DstIdx)); |
| 13133 | })) |
| 13134 | return false; |
| 13135 | return Success(V: R, E); |
| 13136 | } |
| 13137 | |
| 13138 | case X86::BI__builtin_ia32_pshufhw: |
| 13139 | case X86::BI__builtin_ia32_pshufhw256: |
| 13140 | case X86::BI__builtin_ia32_pshufhw512: { |
| 13141 | APValue R; |
| 13142 | if (!evalShuffleGeneric( |
| 13143 | Info, Call: E, Out&: R, |
| 13144 | GetSourceIndex: [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> { |
| 13145 | constexpr unsigned LaneBits = 128u; |
| 13146 | constexpr unsigned ElemBits = 16u; |
| 13147 | constexpr unsigned LaneElts = LaneBits / ElemBits; |
| 13148 | constexpr unsigned HalfSize = 4; |
| 13149 | unsigned LaneBase = (DstIdx / LaneElts) * LaneElts; |
| 13150 | unsigned LaneIdx = DstIdx % LaneElts; |
| 13151 | if (LaneIdx >= HalfSize) { |
| 13152 | unsigned Rel = LaneIdx - HalfSize; |
| 13153 | unsigned Sel = (Mask >> (2 * Rel)) & 0x3; |
| 13154 | return std::make_pair( |
| 13155 | x: 0, y: static_cast<int>(LaneBase + HalfSize + Sel)); |
| 13156 | } |
| 13157 | return std::make_pair(x: 0, y: static_cast<int>(DstIdx)); |
| 13158 | })) |
| 13159 | return false; |
| 13160 | return Success(V: R, E); |
| 13161 | } |
| 13162 | |
| 13163 | case X86::BI__builtin_ia32_pshufd: |
| 13164 | case X86::BI__builtin_ia32_pshufd256: |
| 13165 | case X86::BI__builtin_ia32_pshufd512: |
| 13166 | case X86::BI__builtin_ia32_vpermilps: |
| 13167 | case X86::BI__builtin_ia32_vpermilps256: |
| 13168 | case X86::BI__builtin_ia32_vpermilps512: { |
| 13169 | APValue R; |
| 13170 | if (!evalShuffleGeneric( |
| 13171 | Info, Call: E, Out&: R, |
| 13172 | GetSourceIndex: [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> { |
| 13173 | constexpr unsigned LaneBits = 128u; |
| 13174 | constexpr unsigned ElemBits = 32u; |
| 13175 | constexpr unsigned LaneElts = LaneBits / ElemBits; |
| 13176 | unsigned LaneBase = (DstIdx / LaneElts) * LaneElts; |
| 13177 | unsigned LaneIdx = DstIdx % LaneElts; |
| 13178 | unsigned Sel = (Mask >> (2 * LaneIdx)) & 0x3; |
| 13179 | return std::make_pair(x: 0, y: static_cast<int>(LaneBase + Sel)); |
| 13180 | })) |
| 13181 | return false; |
| 13182 | return Success(V: R, E); |
| 13183 | } |
| 13184 | |
| 13185 | case X86::BI__builtin_ia32_vpermilvarpd: |
| 13186 | case X86::BI__builtin_ia32_vpermilvarpd256: |
| 13187 | case X86::BI__builtin_ia32_vpermilvarpd512: { |
| 13188 | APValue R; |
| 13189 | if (!evalShuffleGeneric( |
| 13190 | Info, Call: E, Out&: R, |
| 13191 | GetSourceIndex: [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> { |
| 13192 | unsigned NumElemPerLane = 2; |
| 13193 | unsigned Lane = DstIdx / NumElemPerLane; |
| 13194 | unsigned Offset = Mask & 0b10 ? 1 : 0; |
| 13195 | return std::make_pair( |
| 13196 | x: 0, y: static_cast<int>(Lane * NumElemPerLane + Offset)); |
| 13197 | })) |
| 13198 | return false; |
| 13199 | return Success(V: R, E); |
| 13200 | } |
| 13201 | |
| 13202 | case X86::BI__builtin_ia32_vpermilpd: |
| 13203 | case X86::BI__builtin_ia32_vpermilpd256: |
| 13204 | case X86::BI__builtin_ia32_vpermilpd512: { |
| 13205 | APValue R; |
| 13206 | if (!evalShuffleGeneric(Info, Call: E, Out&: R, GetSourceIndex: [](unsigned DstIdx, unsigned Control) { |
| 13207 | unsigned NumElemPerLane = 2; |
| 13208 | unsigned BitsPerElem = 1; |
| 13209 | unsigned MaskBits = 8; |
| 13210 | unsigned IndexMask = 0x1; |
| 13211 | unsigned Lane = DstIdx / NumElemPerLane; |
| 13212 | unsigned LaneOffset = Lane * NumElemPerLane; |
| 13213 | unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits; |
| 13214 | unsigned Index = (Control >> BitIndex) & IndexMask; |
| 13215 | return std::make_pair(x: 0, y: static_cast<int>(LaneOffset + Index)); |
| 13216 | })) |
| 13217 | return false; |
| 13218 | return Success(V: R, E); |
| 13219 | } |
| 13220 | |
| 13221 | case X86::BI__builtin_ia32_permdf256: |
| 13222 | case X86::BI__builtin_ia32_permdi256: { |
| 13223 | APValue R; |
| 13224 | if (!evalShuffleGeneric(Info, Call: E, Out&: R, GetSourceIndex: [](unsigned DstIdx, unsigned Control) { |
| 13225 | // permute4x64 operates on 4 64-bit elements |
| 13226 | // For element i (0-3), extract bits [2*i+1:2*i] from Control |
| 13227 | unsigned Index = (Control >> (2 * DstIdx)) & 0x3; |
| 13228 | return std::make_pair(x: 0, y: static_cast<int>(Index)); |
| 13229 | })) |
| 13230 | return false; |
| 13231 | return Success(V: R, E); |
| 13232 | } |
| 13233 | |
| 13234 | case X86::BI__builtin_ia32_vpermilvarps: |
| 13235 | case X86::BI__builtin_ia32_vpermilvarps256: |
| 13236 | case X86::BI__builtin_ia32_vpermilvarps512: { |
| 13237 | APValue R; |
| 13238 | if (!evalShuffleGeneric( |
| 13239 | Info, Call: E, Out&: R, |
| 13240 | GetSourceIndex: [](unsigned DstIdx, unsigned Mask) -> std::pair<unsigned, int> { |
| 13241 | unsigned NumElemPerLane = 4; |
| 13242 | unsigned Lane = DstIdx / NumElemPerLane; |
| 13243 | unsigned Offset = Mask & 0b11; |
| 13244 | return std::make_pair( |
| 13245 | x: 0, y: static_cast<int>(Lane * NumElemPerLane + Offset)); |
| 13246 | })) |
| 13247 | return false; |
| 13248 | return Success(V: R, E); |
| 13249 | } |
| 13250 | |
| 13251 | case X86::BI__builtin_ia32_vpmultishiftqb128: |
| 13252 | case X86::BI__builtin_ia32_vpmultishiftqb256: |
| 13253 | case X86::BI__builtin_ia32_vpmultishiftqb512: { |
| 13254 | assert(E->getNumArgs() == 2); |
| 13255 | |
| 13256 | APValue A, B; |
| 13257 | if (!Evaluate(Result&: A, Info, E: E->getArg(Arg: 0)) || !Evaluate(Result&: B, Info, E: E->getArg(Arg: 1))) |
| 13258 | return false; |
| 13259 | |
| 13260 | assert(A.getVectorLength() == B.getVectorLength()); |
| 13261 | unsigned NumBytesInQWord = 8; |
| 13262 | unsigned NumBitsInByte = 8; |
| 13263 | unsigned NumBytes = A.getVectorLength(); |
| 13264 | unsigned NumQWords = NumBytes / NumBytesInQWord; |
| 13265 | SmallVector<APValue, 64> Result; |
| 13266 | Result.reserve(N: NumBytes); |
| 13267 | |
| 13268 | for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) { |
| 13269 | APInt BQWord(64, 0); |
| 13270 | for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) { |
| 13271 | unsigned Idx = QWordId * NumBytesInQWord + ByteIdx; |
| 13272 | uint64_t Byte = B.getVectorElt(I: Idx).getInt().getZExtValue(); |
| 13273 | BQWord.insertBits(SubBits: APInt(8, Byte & 0xFF), bitPosition: ByteIdx * NumBitsInByte); |
| 13274 | } |
| 13275 | |
| 13276 | for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) { |
| 13277 | unsigned Idx = QWordId * NumBytesInQWord + ByteIdx; |
| 13278 | uint64_t Ctrl = A.getVectorElt(I: Idx).getInt().getZExtValue() & 0x3F; |
| 13279 | |
| 13280 | APInt Byte(8, 0); |
| 13281 | for (unsigned BitIdx = 0; BitIdx != NumBitsInByte; ++BitIdx) { |
| 13282 | Byte.setBitVal(BitPosition: BitIdx, BitValue: BQWord[(Ctrl + BitIdx) & 0x3F]); |
| 13283 | } |
| 13284 | Result.push_back(Elt: APValue(APSInt(Byte, /*isUnsigned*/ true))); |
| 13285 | } |
| 13286 | } |
| 13287 | return Success(V: APValue(Result.data(), Result.size()), E); |
| 13288 | } |
| 13289 | |
| 13290 | case X86::BI__builtin_ia32_phminposuw128: { |
| 13291 | APValue Source; |
| 13292 | if (!Evaluate(Result&: Source, Info, E: E->getArg(Arg: 0))) |
| 13293 | return false; |
| 13294 | unsigned SourceLen = Source.getVectorLength(); |
| 13295 | const VectorType *VT = E->getArg(Arg: 0)->getType()->castAs<VectorType>(); |
| 13296 | QualType ElemQT = VT->getElementType(); |
| 13297 | unsigned ElemBitWidth = Info.Ctx.getTypeSize(T: ElemQT); |
| 13298 | |
| 13299 | APInt MinIndex(ElemBitWidth, 0); |
| 13300 | APInt MinVal = Source.getVectorElt(I: 0).getInt(); |
| 13301 | for (unsigned I = 1; I != SourceLen; ++I) { |
| 13302 | APInt Val = Source.getVectorElt(I).getInt(); |
| 13303 | if (MinVal.ugt(RHS: Val)) { |
| 13304 | MinVal = Val; |
| 13305 | MinIndex = I; |
| 13306 | } |
| 13307 | } |
| 13308 | |
| 13309 | bool ResultUnsigned = E->getCallReturnType(Ctx: Info.Ctx) |
| 13310 | ->castAs<VectorType>() |
| 13311 | ->getElementType() |
| 13312 | ->isUnsignedIntegerOrEnumerationType(); |
| 13313 | |
| 13314 | SmallVector<APValue, 8> Result; |
| 13315 | Result.reserve(N: SourceLen); |
| 13316 | Result.emplace_back(Args: APSInt(MinVal, ResultUnsigned)); |
| 13317 | Result.emplace_back(Args: APSInt(MinIndex, ResultUnsigned)); |
| 13318 | for (unsigned I = 0; I != SourceLen - 2; ++I) { |
| 13319 | Result.emplace_back(Args: APSInt(APInt(ElemBitWidth, 0), ResultUnsigned)); |
| 13320 | } |
| 13321 | return Success(V: APValue(Result.data(), Result.size()), E); |
| 13322 | } |
| 13323 | |
| 13324 | case X86::BI__builtin_ia32_psraq128: |
| 13325 | case X86::BI__builtin_ia32_psraq256: |
| 13326 | case X86::BI__builtin_ia32_psraq512: |
| 13327 | case X86::BI__builtin_ia32_psrad128: |
| 13328 | case X86::BI__builtin_ia32_psrad256: |
| 13329 | case X86::BI__builtin_ia32_psrad512: |
| 13330 | case X86::BI__builtin_ia32_psraw128: |
| 13331 | case X86::BI__builtin_ia32_psraw256: |
| 13332 | case X86::BI__builtin_ia32_psraw512: { |
| 13333 | APValue R; |
| 13334 | if (!evalShiftWithCount( |
| 13335 | Info, Call: E, Out&: R, |
| 13336 | ShiftOp: [](const APInt &Elt, uint64_t Count) { return Elt.ashr(ShiftAmt: Count); }, |
| 13337 | OverflowOp: [](const APInt &Elt, unsigned Width) { |
| 13338 | return Elt.ashr(ShiftAmt: Width - 1); |
| 13339 | })) |
| 13340 | return false; |
| 13341 | return Success(V: R, E); |
| 13342 | } |
| 13343 | |
| 13344 | case X86::BI__builtin_ia32_psllq128: |
| 13345 | case X86::BI__builtin_ia32_psllq256: |
| 13346 | case X86::BI__builtin_ia32_psllq512: |
| 13347 | case X86::BI__builtin_ia32_pslld128: |
| 13348 | case X86::BI__builtin_ia32_pslld256: |
| 13349 | case X86::BI__builtin_ia32_pslld512: |
| 13350 | case X86::BI__builtin_ia32_psllw128: |
| 13351 | case X86::BI__builtin_ia32_psllw256: |
| 13352 | case X86::BI__builtin_ia32_psllw512: { |
| 13353 | APValue R; |
| 13354 | if (!evalShiftWithCount( |
| 13355 | Info, Call: E, Out&: R, |
| 13356 | ShiftOp: [](const APInt &Elt, uint64_t Count) { return Elt.shl(shiftAmt: Count); }, |
| 13357 | OverflowOp: [](const APInt &Elt, unsigned Width) { |
| 13358 | return APInt::getZero(numBits: Width); |
| 13359 | })) |
| 13360 | return false; |
| 13361 | return Success(V: R, E); |
| 13362 | } |
| 13363 | |
| 13364 | case X86::BI__builtin_ia32_psrlq128: |
| 13365 | case X86::BI__builtin_ia32_psrlq256: |
| 13366 | case X86::BI__builtin_ia32_psrlq512: |
| 13367 | case X86::BI__builtin_ia32_psrld128: |
| 13368 | case X86::BI__builtin_ia32_psrld256: |
| 13369 | case X86::BI__builtin_ia32_psrld512: |
| 13370 | case X86::BI__builtin_ia32_psrlw128: |
| 13371 | case X86::BI__builtin_ia32_psrlw256: |
| 13372 | case X86::BI__builtin_ia32_psrlw512: { |
| 13373 | APValue R; |
| 13374 | if (!evalShiftWithCount( |
| 13375 | Info, Call: E, Out&: R, |
| 13376 | ShiftOp: [](const APInt &Elt, uint64_t Count) { return Elt.lshr(shiftAmt: Count); }, |
| 13377 | OverflowOp: [](const APInt &Elt, unsigned Width) { |
| 13378 | return APInt::getZero(numBits: Width); |
| 13379 | })) |
| 13380 | return false; |
| 13381 | return Success(V: R, E); |
| 13382 | } |
| 13383 | |
| 13384 | case X86::BI__builtin_ia32_pternlogd128_mask: |
| 13385 | case X86::BI__builtin_ia32_pternlogd256_mask: |
| 13386 | case X86::BI__builtin_ia32_pternlogd512_mask: |
| 13387 | case X86::BI__builtin_ia32_pternlogq128_mask: |
| 13388 | case X86::BI__builtin_ia32_pternlogq256_mask: |
| 13389 | case X86::BI__builtin_ia32_pternlogq512_mask: { |
| 13390 | APValue AValue, BValue, CValue, ImmValue, UValue; |
| 13391 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: AValue) || |
| 13392 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: BValue) || |
| 13393 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: CValue) || |
| 13394 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 3), Result&: ImmValue) || |
| 13395 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 4), Result&: UValue)) |
| 13396 | return false; |
| 13397 | |
| 13398 | QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType(); |
| 13399 | bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType(); |
| 13400 | APInt Imm = ImmValue.getInt(); |
| 13401 | APInt U = UValue.getInt(); |
| 13402 | unsigned ResultLen = AValue.getVectorLength(); |
| 13403 | SmallVector<APValue, 16> ResultElements; |
| 13404 | ResultElements.reserve(N: ResultLen); |
| 13405 | |
| 13406 | for (unsigned EltNum = 0; EltNum < ResultLen; ++EltNum) { |
| 13407 | APInt ALane = AValue.getVectorElt(I: EltNum).getInt(); |
| 13408 | APInt BLane = BValue.getVectorElt(I: EltNum).getInt(); |
| 13409 | APInt CLane = CValue.getVectorElt(I: EltNum).getInt(); |
| 13410 | |
| 13411 | if (U[EltNum]) { |
| 13412 | unsigned BitWidth = ALane.getBitWidth(); |
| 13413 | APInt ResLane(BitWidth, 0); |
| 13414 | |
| 13415 | for (unsigned Bit = 0; Bit < BitWidth; ++Bit) { |
| 13416 | unsigned ABit = ALane[Bit]; |
| 13417 | unsigned BBit = BLane[Bit]; |
| 13418 | unsigned CBit = CLane[Bit]; |
| 13419 | |
| 13420 | unsigned Idx = (ABit << 2) | (BBit << 1) | CBit; |
| 13421 | ResLane.setBitVal(BitPosition: Bit, BitValue: Imm[Idx]); |
| 13422 | } |
| 13423 | ResultElements.push_back(Elt: APValue(APSInt(ResLane, DestUnsigned))); |
| 13424 | } else { |
| 13425 | ResultElements.push_back(Elt: APValue(APSInt(ALane, DestUnsigned))); |
| 13426 | } |
| 13427 | } |
| 13428 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 13429 | } |
| 13430 | case X86::BI__builtin_ia32_pternlogd128_maskz: |
| 13431 | case X86::BI__builtin_ia32_pternlogd256_maskz: |
| 13432 | case X86::BI__builtin_ia32_pternlogd512_maskz: |
| 13433 | case X86::BI__builtin_ia32_pternlogq128_maskz: |
| 13434 | case X86::BI__builtin_ia32_pternlogq256_maskz: |
| 13435 | case X86::BI__builtin_ia32_pternlogq512_maskz: { |
| 13436 | APValue AValue, BValue, CValue, ImmValue, UValue; |
| 13437 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: AValue) || |
| 13438 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: BValue) || |
| 13439 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: CValue) || |
| 13440 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 3), Result&: ImmValue) || |
| 13441 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 4), Result&: UValue)) |
| 13442 | return false; |
| 13443 | |
| 13444 | QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType(); |
| 13445 | bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType(); |
| 13446 | APInt Imm = ImmValue.getInt(); |
| 13447 | APInt U = UValue.getInt(); |
| 13448 | unsigned ResultLen = AValue.getVectorLength(); |
| 13449 | SmallVector<APValue, 16> ResultElements; |
| 13450 | ResultElements.reserve(N: ResultLen); |
| 13451 | |
| 13452 | for (unsigned EltNum = 0; EltNum < ResultLen; ++EltNum) { |
| 13453 | APInt ALane = AValue.getVectorElt(I: EltNum).getInt(); |
| 13454 | APInt BLane = BValue.getVectorElt(I: EltNum).getInt(); |
| 13455 | APInt CLane = CValue.getVectorElt(I: EltNum).getInt(); |
| 13456 | |
| 13457 | unsigned BitWidth = ALane.getBitWidth(); |
| 13458 | APInt ResLane(BitWidth, 0); |
| 13459 | |
| 13460 | if (U[EltNum]) { |
| 13461 | for (unsigned Bit = 0; Bit < BitWidth; ++Bit) { |
| 13462 | unsigned ABit = ALane[Bit]; |
| 13463 | unsigned BBit = BLane[Bit]; |
| 13464 | unsigned CBit = CLane[Bit]; |
| 13465 | |
| 13466 | unsigned Idx = (ABit << 2) | (BBit << 1) | CBit; |
| 13467 | ResLane.setBitVal(BitPosition: Bit, BitValue: Imm[Idx]); |
| 13468 | } |
| 13469 | } |
| 13470 | ResultElements.push_back(Elt: APValue(APSInt(ResLane, DestUnsigned))); |
| 13471 | } |
| 13472 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 13473 | } |
| 13474 | |
| 13475 | case Builtin::BI__builtin_elementwise_clzg: |
| 13476 | case Builtin::BI__builtin_elementwise_ctzg: { |
| 13477 | APValue SourceLHS; |
| 13478 | std::optional<APValue> Fallback; |
| 13479 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS)) |
| 13480 | return false; |
| 13481 | if (E->getNumArgs() > 1) { |
| 13482 | APValue FallbackTmp; |
| 13483 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: FallbackTmp)) |
| 13484 | return false; |
| 13485 | Fallback = FallbackTmp; |
| 13486 | } |
| 13487 | |
| 13488 | QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType(); |
| 13489 | unsigned SourceLen = SourceLHS.getVectorLength(); |
| 13490 | SmallVector<APValue, 4> ResultElements; |
| 13491 | ResultElements.reserve(N: SourceLen); |
| 13492 | |
| 13493 | for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) { |
| 13494 | APSInt LHS = SourceLHS.getVectorElt(I: EltNum).getInt(); |
| 13495 | if (!LHS) { |
| 13496 | // Without a fallback, a zero element is undefined |
| 13497 | if (!Fallback) { |
| 13498 | Info.FFDiag(E, DiagId: diag::note_constexpr_countzeroes_zero) |
| 13499 | << /*IsTrailing=*/(E->getBuiltinCallee() == |
| 13500 | Builtin::BI__builtin_elementwise_ctzg); |
| 13501 | return false; |
| 13502 | } |
| 13503 | ResultElements.push_back(Elt: Fallback->getVectorElt(I: EltNum)); |
| 13504 | continue; |
| 13505 | } |
| 13506 | switch (E->getBuiltinCallee()) { |
| 13507 | case Builtin::BI__builtin_elementwise_clzg: |
| 13508 | ResultElements.push_back(Elt: APValue( |
| 13509 | APSInt(APInt(Info.Ctx.getIntWidth(T: DestEltTy), LHS.countl_zero()), |
| 13510 | DestEltTy->isUnsignedIntegerOrEnumerationType()))); |
| 13511 | break; |
| 13512 | case Builtin::BI__builtin_elementwise_ctzg: |
| 13513 | ResultElements.push_back(Elt: APValue( |
| 13514 | APSInt(APInt(Info.Ctx.getIntWidth(T: DestEltTy), LHS.countr_zero()), |
| 13515 | DestEltTy->isUnsignedIntegerOrEnumerationType()))); |
| 13516 | break; |
| 13517 | } |
| 13518 | } |
| 13519 | |
| 13520 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 13521 | } |
| 13522 | |
| 13523 | case Builtin::BI__builtin_elementwise_fma: { |
| 13524 | APValue SourceX, SourceY, SourceZ; |
| 13525 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceX) || |
| 13526 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceY) || |
| 13527 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceZ)) |
| 13528 | return false; |
| 13529 | |
| 13530 | unsigned SourceLen = SourceX.getVectorLength(); |
| 13531 | SmallVector<APValue> ResultElements; |
| 13532 | ResultElements.reserve(N: SourceLen); |
| 13533 | llvm::RoundingMode RM = getActiveRoundingMode(Info&: getEvalInfo(), E); |
| 13534 | for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) { |
| 13535 | const APFloat &X = SourceX.getVectorElt(I: EltNum).getFloat(); |
| 13536 | const APFloat &Y = SourceY.getVectorElt(I: EltNum).getFloat(); |
| 13537 | const APFloat &Z = SourceZ.getVectorElt(I: EltNum).getFloat(); |
| 13538 | APFloat Result(X); |
| 13539 | (void)Result.fusedMultiplyAdd(Multiplicand: Y, Addend: Z, RM); |
| 13540 | ResultElements.push_back(Elt: APValue(Result)); |
| 13541 | } |
| 13542 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 13543 | } |
| 13544 | |
| 13545 | case clang::X86::BI__builtin_ia32_phaddw128: |
| 13546 | case clang::X86::BI__builtin_ia32_phaddw256: |
| 13547 | case clang::X86::BI__builtin_ia32_phaddd128: |
| 13548 | case clang::X86::BI__builtin_ia32_phaddd256: |
| 13549 | case clang::X86::BI__builtin_ia32_phaddsw128: |
| 13550 | case clang::X86::BI__builtin_ia32_phaddsw256: |
| 13551 | |
| 13552 | case clang::X86::BI__builtin_ia32_phsubw128: |
| 13553 | case clang::X86::BI__builtin_ia32_phsubw256: |
| 13554 | case clang::X86::BI__builtin_ia32_phsubd128: |
| 13555 | case clang::X86::BI__builtin_ia32_phsubd256: |
| 13556 | case clang::X86::BI__builtin_ia32_phsubsw128: |
| 13557 | case clang::X86::BI__builtin_ia32_phsubsw256: { |
| 13558 | APValue SourceLHS, SourceRHS; |
| 13559 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) || |
| 13560 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS)) |
| 13561 | return false; |
| 13562 | QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType(); |
| 13563 | bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType(); |
| 13564 | |
| 13565 | unsigned NumElts = SourceLHS.getVectorLength(); |
| 13566 | unsigned EltBits = Info.Ctx.getIntWidth(T: DestEltTy); |
| 13567 | unsigned EltsPerLane = 128 / EltBits; |
| 13568 | SmallVector<APValue, 4> ResultElements; |
| 13569 | ResultElements.reserve(N: NumElts); |
| 13570 | |
| 13571 | for (unsigned LaneStart = 0; LaneStart != NumElts; |
| 13572 | LaneStart += EltsPerLane) { |
| 13573 | for (unsigned I = 0; I != EltsPerLane; I += 2) { |
| 13574 | APSInt LHSA = SourceLHS.getVectorElt(I: LaneStart + I).getInt(); |
| 13575 | APSInt LHSB = SourceLHS.getVectorElt(I: LaneStart + I + 1).getInt(); |
| 13576 | switch (E->getBuiltinCallee()) { |
| 13577 | case clang::X86::BI__builtin_ia32_phaddw128: |
| 13578 | case clang::X86::BI__builtin_ia32_phaddw256: |
| 13579 | case clang::X86::BI__builtin_ia32_phaddd128: |
| 13580 | case clang::X86::BI__builtin_ia32_phaddd256: { |
| 13581 | APSInt Res(LHSA + LHSB, DestUnsigned); |
| 13582 | ResultElements.push_back(Elt: APValue(Res)); |
| 13583 | break; |
| 13584 | } |
| 13585 | case clang::X86::BI__builtin_ia32_phaddsw128: |
| 13586 | case clang::X86::BI__builtin_ia32_phaddsw256: { |
| 13587 | APSInt Res(LHSA.sadd_sat(RHS: LHSB)); |
| 13588 | ResultElements.push_back(Elt: APValue(Res)); |
| 13589 | break; |
| 13590 | } |
| 13591 | case clang::X86::BI__builtin_ia32_phsubw128: |
| 13592 | case clang::X86::BI__builtin_ia32_phsubw256: |
| 13593 | case clang::X86::BI__builtin_ia32_phsubd128: |
| 13594 | case clang::X86::BI__builtin_ia32_phsubd256: { |
| 13595 | APSInt Res(LHSA - LHSB, DestUnsigned); |
| 13596 | ResultElements.push_back(Elt: APValue(Res)); |
| 13597 | break; |
| 13598 | } |
| 13599 | case clang::X86::BI__builtin_ia32_phsubsw128: |
| 13600 | case clang::X86::BI__builtin_ia32_phsubsw256: { |
| 13601 | APSInt Res(LHSA.ssub_sat(RHS: LHSB)); |
| 13602 | ResultElements.push_back(Elt: APValue(Res)); |
| 13603 | break; |
| 13604 | } |
| 13605 | } |
| 13606 | } |
| 13607 | for (unsigned I = 0; I != EltsPerLane; I += 2) { |
| 13608 | APSInt RHSA = SourceRHS.getVectorElt(I: LaneStart + I).getInt(); |
| 13609 | APSInt RHSB = SourceRHS.getVectorElt(I: LaneStart + I + 1).getInt(); |
| 13610 | switch (E->getBuiltinCallee()) { |
| 13611 | case clang::X86::BI__builtin_ia32_phaddw128: |
| 13612 | case clang::X86::BI__builtin_ia32_phaddw256: |
| 13613 | case clang::X86::BI__builtin_ia32_phaddd128: |
| 13614 | case clang::X86::BI__builtin_ia32_phaddd256: { |
| 13615 | APSInt Res(RHSA + RHSB, DestUnsigned); |
| 13616 | ResultElements.push_back(Elt: APValue(Res)); |
| 13617 | break; |
| 13618 | } |
| 13619 | case clang::X86::BI__builtin_ia32_phaddsw128: |
| 13620 | case clang::X86::BI__builtin_ia32_phaddsw256: { |
| 13621 | APSInt Res(RHSA.sadd_sat(RHS: RHSB)); |
| 13622 | ResultElements.push_back(Elt: APValue(Res)); |
| 13623 | break; |
| 13624 | } |
| 13625 | case clang::X86::BI__builtin_ia32_phsubw128: |
| 13626 | case clang::X86::BI__builtin_ia32_phsubw256: |
| 13627 | case clang::X86::BI__builtin_ia32_phsubd128: |
| 13628 | case clang::X86::BI__builtin_ia32_phsubd256: { |
| 13629 | APSInt Res(RHSA - RHSB, DestUnsigned); |
| 13630 | ResultElements.push_back(Elt: APValue(Res)); |
| 13631 | break; |
| 13632 | } |
| 13633 | case clang::X86::BI__builtin_ia32_phsubsw128: |
| 13634 | case clang::X86::BI__builtin_ia32_phsubsw256: { |
| 13635 | APSInt Res(RHSA.ssub_sat(RHS: RHSB)); |
| 13636 | ResultElements.push_back(Elt: APValue(Res)); |
| 13637 | break; |
| 13638 | } |
| 13639 | } |
| 13640 | } |
| 13641 | } |
| 13642 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 13643 | } |
| 13644 | case clang::X86::BI__builtin_ia32_haddpd: |
| 13645 | case clang::X86::BI__builtin_ia32_haddps: |
| 13646 | case clang::X86::BI__builtin_ia32_haddps256: |
| 13647 | case clang::X86::BI__builtin_ia32_haddpd256: |
| 13648 | case clang::X86::BI__builtin_ia32_hsubpd: |
| 13649 | case clang::X86::BI__builtin_ia32_hsubps: |
| 13650 | case clang::X86::BI__builtin_ia32_hsubps256: |
| 13651 | case clang::X86::BI__builtin_ia32_hsubpd256: { |
| 13652 | APValue SourceLHS, SourceRHS; |
| 13653 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) || |
| 13654 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS)) |
| 13655 | return false; |
| 13656 | unsigned NumElts = SourceLHS.getVectorLength(); |
| 13657 | SmallVector<APValue, 4> ResultElements; |
| 13658 | ResultElements.reserve(N: NumElts); |
| 13659 | llvm::RoundingMode RM = getActiveRoundingMode(Info&: getEvalInfo(), E); |
| 13660 | QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType(); |
| 13661 | unsigned EltBits = Info.Ctx.getTypeSize(T: DestEltTy); |
| 13662 | unsigned NumLanes = NumElts * EltBits / 128; |
| 13663 | unsigned NumElemsPerLane = NumElts / NumLanes; |
| 13664 | unsigned HalfElemsPerLane = NumElemsPerLane / 2; |
| 13665 | |
| 13666 | for (unsigned L = 0; L != NumElts; L += NumElemsPerLane) { |
| 13667 | for (unsigned I = 0; I != HalfElemsPerLane; ++I) { |
| 13668 | APFloat LHSA = SourceLHS.getVectorElt(I: L + (2 * I) + 0).getFloat(); |
| 13669 | APFloat LHSB = SourceLHS.getVectorElt(I: L + (2 * I) + 1).getFloat(); |
| 13670 | switch (E->getBuiltinCallee()) { |
| 13671 | case clang::X86::BI__builtin_ia32_haddpd: |
| 13672 | case clang::X86::BI__builtin_ia32_haddps: |
| 13673 | case clang::X86::BI__builtin_ia32_haddps256: |
| 13674 | case clang::X86::BI__builtin_ia32_haddpd256: |
| 13675 | LHSA.add(RHS: LHSB, RM); |
| 13676 | break; |
| 13677 | case clang::X86::BI__builtin_ia32_hsubpd: |
| 13678 | case clang::X86::BI__builtin_ia32_hsubps: |
| 13679 | case clang::X86::BI__builtin_ia32_hsubps256: |
| 13680 | case clang::X86::BI__builtin_ia32_hsubpd256: |
| 13681 | LHSA.subtract(RHS: LHSB, RM); |
| 13682 | break; |
| 13683 | } |
| 13684 | ResultElements.push_back(Elt: APValue(LHSA)); |
| 13685 | } |
| 13686 | for (unsigned I = 0; I != HalfElemsPerLane; ++I) { |
| 13687 | APFloat RHSA = SourceRHS.getVectorElt(I: L + (2 * I) + 0).getFloat(); |
| 13688 | APFloat RHSB = SourceRHS.getVectorElt(I: L + (2 * I) + 1).getFloat(); |
| 13689 | switch (E->getBuiltinCallee()) { |
| 13690 | case clang::X86::BI__builtin_ia32_haddpd: |
| 13691 | case clang::X86::BI__builtin_ia32_haddps: |
| 13692 | case clang::X86::BI__builtin_ia32_haddps256: |
| 13693 | case clang::X86::BI__builtin_ia32_haddpd256: |
| 13694 | RHSA.add(RHS: RHSB, RM); |
| 13695 | break; |
| 13696 | case clang::X86::BI__builtin_ia32_hsubpd: |
| 13697 | case clang::X86::BI__builtin_ia32_hsubps: |
| 13698 | case clang::X86::BI__builtin_ia32_hsubps256: |
| 13699 | case clang::X86::BI__builtin_ia32_hsubpd256: |
| 13700 | RHSA.subtract(RHS: RHSB, RM); |
| 13701 | break; |
| 13702 | } |
| 13703 | ResultElements.push_back(Elt: APValue(RHSA)); |
| 13704 | } |
| 13705 | } |
| 13706 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 13707 | } |
| 13708 | case clang::X86::BI__builtin_ia32_addsubpd: |
| 13709 | case clang::X86::BI__builtin_ia32_addsubps: |
| 13710 | case clang::X86::BI__builtin_ia32_addsubpd256: |
| 13711 | case clang::X86::BI__builtin_ia32_addsubps256: { |
| 13712 | // Addsub: alternates between subtraction and addition |
| 13713 | // Result[i] = (i % 2 == 0) ? (a[i] - b[i]) : (a[i] + b[i]) |
| 13714 | APValue SourceLHS, SourceRHS; |
| 13715 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) || |
| 13716 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS)) |
| 13717 | return false; |
| 13718 | unsigned NumElems = SourceLHS.getVectorLength(); |
| 13719 | SmallVector<APValue, 8> ResultElements; |
| 13720 | ResultElements.reserve(N: NumElems); |
| 13721 | llvm::RoundingMode RM = getActiveRoundingMode(Info&: getEvalInfo(), E); |
| 13722 | |
| 13723 | for (unsigned I = 0; I != NumElems; ++I) { |
| 13724 | APFloat LHS = SourceLHS.getVectorElt(I).getFloat(); |
| 13725 | APFloat RHS = SourceRHS.getVectorElt(I).getFloat(); |
| 13726 | if (I % 2 == 0) { |
| 13727 | // Even indices: subtract |
| 13728 | LHS.subtract(RHS, RM); |
| 13729 | } else { |
| 13730 | // Odd indices: add |
| 13731 | LHS.add(RHS, RM); |
| 13732 | } |
| 13733 | ResultElements.push_back(Elt: APValue(LHS)); |
| 13734 | } |
| 13735 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 13736 | } |
| 13737 | case clang::X86::BI__builtin_ia32_pclmulqdq128: |
| 13738 | case clang::X86::BI__builtin_ia32_pclmulqdq256: |
| 13739 | case clang::X86::BI__builtin_ia32_pclmulqdq512: { |
| 13740 | // PCLMULQDQ: carry-less multiplication of selected 64-bit halves |
| 13741 | // imm8 bit 0: selects lower (0) or upper (1) 64 bits of first operand |
| 13742 | // imm8 bit 4: selects lower (0) or upper (1) 64 bits of second operand |
| 13743 | APValue SourceLHS, SourceRHS; |
| 13744 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) || |
| 13745 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS)) |
| 13746 | return false; |
| 13747 | |
| 13748 | APSInt Imm8; |
| 13749 | if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: Imm8, Info)) |
| 13750 | return false; |
| 13751 | |
| 13752 | // Extract bits 0 and 4 from imm8 |
| 13753 | bool SelectUpperA = (Imm8 & 0x01) != 0; |
| 13754 | bool SelectUpperB = (Imm8 & 0x10) != 0; |
| 13755 | |
| 13756 | unsigned NumElems = SourceLHS.getVectorLength(); |
| 13757 | SmallVector<APValue, 8> ResultElements; |
| 13758 | ResultElements.reserve(N: NumElems); |
| 13759 | QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType(); |
| 13760 | bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType(); |
| 13761 | |
| 13762 | // Process each 128-bit lane |
| 13763 | for (unsigned Lane = 0; Lane < NumElems; Lane += 2) { |
| 13764 | // Get the two 64-bit halves of the first operand |
| 13765 | APSInt A0 = SourceLHS.getVectorElt(I: Lane + 0).getInt(); |
| 13766 | APSInt A1 = SourceLHS.getVectorElt(I: Lane + 1).getInt(); |
| 13767 | // Get the two 64-bit halves of the second operand |
| 13768 | APSInt B0 = SourceRHS.getVectorElt(I: Lane + 0).getInt(); |
| 13769 | APSInt B1 = SourceRHS.getVectorElt(I: Lane + 1).getInt(); |
| 13770 | |
| 13771 | // Select the appropriate 64-bit values based on imm8 |
| 13772 | APInt A = SelectUpperA ? A1 : A0; |
| 13773 | APInt B = SelectUpperB ? B1 : B0; |
| 13774 | |
| 13775 | // Extend both operands to 128 bits for carry-less multiplication |
| 13776 | APInt A128 = A.zext(width: 128); |
| 13777 | APInt B128 = B.zext(width: 128); |
| 13778 | |
| 13779 | // Use APIntOps::clmul for carry-less multiplication |
| 13780 | APInt Result = llvm::APIntOps::clmul(LHS: A128, RHS: B128); |
| 13781 | |
| 13782 | // Split the 128-bit result into two 64-bit halves |
| 13783 | APSInt ResultLow(Result.extractBits(numBits: 64, bitPosition: 0), DestUnsigned); |
| 13784 | APSInt ResultHigh(Result.extractBits(numBits: 64, bitPosition: 64), DestUnsigned); |
| 13785 | |
| 13786 | ResultElements.push_back(Elt: APValue(ResultLow)); |
| 13787 | ResultElements.push_back(Elt: APValue(ResultHigh)); |
| 13788 | } |
| 13789 | |
| 13790 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 13791 | } |
| 13792 | case Builtin::BI__builtin_elementwise_fshl: |
| 13793 | case Builtin::BI__builtin_elementwise_fshr: { |
| 13794 | APValue SourceHi, SourceLo, SourceShift; |
| 13795 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceHi) || |
| 13796 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceLo) || |
| 13797 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 2), Result&: SourceShift)) |
| 13798 | return false; |
| 13799 | |
| 13800 | QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType(); |
| 13801 | if (!DestEltTy->isIntegerType()) |
| 13802 | return false; |
| 13803 | |
| 13804 | unsigned SourceLen = SourceHi.getVectorLength(); |
| 13805 | SmallVector<APValue> ResultElements; |
| 13806 | ResultElements.reserve(N: SourceLen); |
| 13807 | for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) { |
| 13808 | const APSInt &Hi = SourceHi.getVectorElt(I: EltNum).getInt(); |
| 13809 | const APSInt &Lo = SourceLo.getVectorElt(I: EltNum).getInt(); |
| 13810 | const APSInt &Shift = SourceShift.getVectorElt(I: EltNum).getInt(); |
| 13811 | switch (E->getBuiltinCallee()) { |
| 13812 | case Builtin::BI__builtin_elementwise_fshl: |
| 13813 | ResultElements.push_back(Elt: APValue( |
| 13814 | APSInt(llvm::APIntOps::fshl(Hi, Lo, Shift), Hi.isUnsigned()))); |
| 13815 | break; |
| 13816 | case Builtin::BI__builtin_elementwise_fshr: |
| 13817 | ResultElements.push_back(Elt: APValue( |
| 13818 | APSInt(llvm::APIntOps::fshr(Hi, Lo, Shift), Hi.isUnsigned()))); |
| 13819 | break; |
| 13820 | } |
| 13821 | } |
| 13822 | |
| 13823 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 13824 | } |
| 13825 | |
| 13826 | case X86::BI__builtin_ia32_shuf_f32x4_256: |
| 13827 | case X86::BI__builtin_ia32_shuf_i32x4_256: |
| 13828 | case X86::BI__builtin_ia32_shuf_f64x2_256: |
| 13829 | case X86::BI__builtin_ia32_shuf_i64x2_256: |
| 13830 | case X86::BI__builtin_ia32_shuf_f32x4: |
| 13831 | case X86::BI__builtin_ia32_shuf_i32x4: |
| 13832 | case X86::BI__builtin_ia32_shuf_f64x2: |
| 13833 | case X86::BI__builtin_ia32_shuf_i64x2: { |
| 13834 | APValue SourceA, SourceB; |
| 13835 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceA) || |
| 13836 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceB)) |
| 13837 | return false; |
| 13838 | |
| 13839 | APSInt Imm; |
| 13840 | if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: Imm, Info)) |
| 13841 | return false; |
| 13842 | |
| 13843 | // Destination and sources A, B all have the same type. |
| 13844 | unsigned NumElems = SourceA.getVectorLength(); |
| 13845 | const VectorType *VT = E->getArg(Arg: 0)->getType()->castAs<VectorType>(); |
| 13846 | QualType ElemQT = VT->getElementType(); |
| 13847 | unsigned ElemBits = Info.Ctx.getTypeSize(T: ElemQT); |
| 13848 | unsigned LaneBits = 128u; |
| 13849 | unsigned NumLanes = (NumElems * ElemBits) / LaneBits; |
| 13850 | unsigned NumElemsPerLane = LaneBits / ElemBits; |
| 13851 | |
| 13852 | unsigned DstLen = SourceA.getVectorLength(); |
| 13853 | SmallVector<APValue, 16> ResultElements; |
| 13854 | ResultElements.reserve(N: DstLen); |
| 13855 | |
| 13856 | APValue R; |
| 13857 | if (!evalShuffleGeneric( |
| 13858 | Info, Call: E, Out&: R, |
| 13859 | GetSourceIndex: [NumLanes, NumElemsPerLane](unsigned DstIdx, unsigned ShuffleMask) |
| 13860 | -> std::pair<unsigned, int> { |
| 13861 | // DstIdx determines source. ShuffleMask selects lane in source. |
| 13862 | unsigned BitsPerElem = NumLanes / 2; |
| 13863 | unsigned IndexMask = (1u << BitsPerElem) - 1; |
| 13864 | unsigned Lane = DstIdx / NumElemsPerLane; |
| 13865 | unsigned SrcIdx = (Lane < NumLanes / 2) ? 0 : 1; |
| 13866 | unsigned BitIdx = BitsPerElem * Lane; |
| 13867 | unsigned SrcLaneIdx = (ShuffleMask >> BitIdx) & IndexMask; |
| 13868 | unsigned ElemInLane = DstIdx % NumElemsPerLane; |
| 13869 | unsigned IdxToPick = SrcLaneIdx * NumElemsPerLane + ElemInLane; |
| 13870 | return {SrcIdx, IdxToPick}; |
| 13871 | })) |
| 13872 | return false; |
| 13873 | return Success(V: R, E); |
| 13874 | } |
| 13875 | |
| 13876 | case X86::BI__builtin_ia32_vgf2p8affineinvqb_v16qi: |
| 13877 | case X86::BI__builtin_ia32_vgf2p8affineinvqb_v32qi: |
| 13878 | case X86::BI__builtin_ia32_vgf2p8affineinvqb_v64qi: |
| 13879 | case X86::BI__builtin_ia32_vgf2p8affineqb_v16qi: |
| 13880 | case X86::BI__builtin_ia32_vgf2p8affineqb_v32qi: |
| 13881 | case X86::BI__builtin_ia32_vgf2p8affineqb_v64qi: { |
| 13882 | |
| 13883 | APValue X, A; |
| 13884 | APSInt Imm; |
| 13885 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: X) || |
| 13886 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: A) || |
| 13887 | !EvaluateInteger(E: E->getArg(Arg: 2), Result&: Imm, Info)) |
| 13888 | return false; |
| 13889 | |
| 13890 | assert(X.isVector() && A.isVector()); |
| 13891 | assert(X.getVectorLength() == A.getVectorLength()); |
| 13892 | |
| 13893 | bool IsInverse = false; |
| 13894 | switch (E->getBuiltinCallee()) { |
| 13895 | case X86::BI__builtin_ia32_vgf2p8affineinvqb_v16qi: |
| 13896 | case X86::BI__builtin_ia32_vgf2p8affineinvqb_v32qi: |
| 13897 | case X86::BI__builtin_ia32_vgf2p8affineinvqb_v64qi: { |
| 13898 | IsInverse = true; |
| 13899 | } |
| 13900 | } |
| 13901 | |
| 13902 | unsigned NumBitsInByte = 8; |
| 13903 | unsigned NumBytesInQWord = 8; |
| 13904 | unsigned NumBitsInQWord = 64; |
| 13905 | unsigned NumBytes = A.getVectorLength(); |
| 13906 | unsigned NumQWords = NumBytes / NumBytesInQWord; |
| 13907 | SmallVector<APValue, 64> Result; |
| 13908 | Result.reserve(N: NumBytes); |
| 13909 | |
| 13910 | // computing A*X + Imm |
| 13911 | for (unsigned QWordIdx = 0; QWordIdx != NumQWords; ++QWordIdx) { |
| 13912 | // Extract the QWords from X, A |
| 13913 | APInt XQWord(NumBitsInQWord, 0); |
| 13914 | APInt AQWord(NumBitsInQWord, 0); |
| 13915 | for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) { |
| 13916 | unsigned Idx = QWordIdx * NumBytesInQWord + ByteIdx; |
| 13917 | APInt XByte = X.getVectorElt(I: Idx).getInt(); |
| 13918 | APInt AByte = A.getVectorElt(I: Idx).getInt(); |
| 13919 | XQWord.insertBits(SubBits: XByte, bitPosition: ByteIdx * NumBitsInByte); |
| 13920 | AQWord.insertBits(SubBits: AByte, bitPosition: ByteIdx * NumBitsInByte); |
| 13921 | } |
| 13922 | |
| 13923 | for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) { |
| 13924 | uint8_t XByte = |
| 13925 | XQWord.lshr(shiftAmt: ByteIdx * NumBitsInByte).getLoBits(numBits: 8).getZExtValue(); |
| 13926 | Result.push_back(Elt: APValue(APSInt( |
| 13927 | APInt(8, GFNIAffine(XByte, AQword: AQWord, Imm, Inverse: IsInverse)), false))); |
| 13928 | } |
| 13929 | } |
| 13930 | |
| 13931 | return Success(V: APValue(Result.data(), Result.size()), E); |
| 13932 | } |
| 13933 | |
| 13934 | case X86::BI__builtin_ia32_vgf2p8mulb_v16qi: |
| 13935 | case X86::BI__builtin_ia32_vgf2p8mulb_v32qi: |
| 13936 | case X86::BI__builtin_ia32_vgf2p8mulb_v64qi: { |
| 13937 | APValue A, B; |
| 13938 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: A) || |
| 13939 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: B)) |
| 13940 | return false; |
| 13941 | |
| 13942 | assert(A.isVector() && B.isVector()); |
| 13943 | assert(A.getVectorLength() == B.getVectorLength()); |
| 13944 | |
| 13945 | unsigned NumBytes = A.getVectorLength(); |
| 13946 | SmallVector<APValue, 64> Result; |
| 13947 | Result.reserve(N: NumBytes); |
| 13948 | |
| 13949 | for (unsigned ByteIdx = 0; ByteIdx != NumBytes; ++ByteIdx) { |
| 13950 | uint8_t AByte = A.getVectorElt(I: ByteIdx).getInt().getZExtValue(); |
| 13951 | uint8_t BByte = B.getVectorElt(I: ByteIdx).getInt().getZExtValue(); |
| 13952 | Result.push_back(Elt: APValue( |
| 13953 | APSInt(APInt(8, GFNIMul(AByte, BByte)), /*IsUnsigned=*/false))); |
| 13954 | } |
| 13955 | |
| 13956 | return Success(V: APValue(Result.data(), Result.size()), E); |
| 13957 | } |
| 13958 | |
| 13959 | case X86::BI__builtin_ia32_insertf32x4_256: |
| 13960 | case X86::BI__builtin_ia32_inserti32x4_256: |
| 13961 | case X86::BI__builtin_ia32_insertf64x2_256: |
| 13962 | case X86::BI__builtin_ia32_inserti64x2_256: |
| 13963 | case X86::BI__builtin_ia32_insertf32x4: |
| 13964 | case X86::BI__builtin_ia32_inserti32x4: |
| 13965 | case X86::BI__builtin_ia32_insertf64x2_512: |
| 13966 | case X86::BI__builtin_ia32_inserti64x2_512: |
| 13967 | case X86::BI__builtin_ia32_insertf32x8: |
| 13968 | case X86::BI__builtin_ia32_inserti32x8: |
| 13969 | case X86::BI__builtin_ia32_insertf64x4: |
| 13970 | case X86::BI__builtin_ia32_inserti64x4: |
| 13971 | case X86::BI__builtin_ia32_vinsertf128_ps256: |
| 13972 | case X86::BI__builtin_ia32_vinsertf128_pd256: |
| 13973 | case X86::BI__builtin_ia32_vinsertf128_si256: |
| 13974 | case X86::BI__builtin_ia32_insert128i256: { |
| 13975 | APValue SourceDst, SourceSub; |
| 13976 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceDst) || |
| 13977 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceSub)) |
| 13978 | return false; |
| 13979 | |
| 13980 | APSInt Imm; |
| 13981 | if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: Imm, Info)) |
| 13982 | return false; |
| 13983 | |
| 13984 | assert(SourceDst.isVector() && SourceSub.isVector()); |
| 13985 | unsigned DstLen = SourceDst.getVectorLength(); |
| 13986 | unsigned SubLen = SourceSub.getVectorLength(); |
| 13987 | assert(SubLen != 0 && DstLen != 0 && (DstLen % SubLen) == 0); |
| 13988 | unsigned NumLanes = DstLen / SubLen; |
| 13989 | unsigned LaneIdx = (Imm.getZExtValue() % NumLanes) * SubLen; |
| 13990 | |
| 13991 | SmallVector<APValue, 16> ResultElements; |
| 13992 | ResultElements.reserve(N: DstLen); |
| 13993 | |
| 13994 | for (unsigned EltNum = 0; EltNum < DstLen; ++EltNum) { |
| 13995 | if (EltNum >= LaneIdx && EltNum < LaneIdx + SubLen) |
| 13996 | ResultElements.push_back(Elt: SourceSub.getVectorElt(I: EltNum - LaneIdx)); |
| 13997 | else |
| 13998 | ResultElements.push_back(Elt: SourceDst.getVectorElt(I: EltNum)); |
| 13999 | } |
| 14000 | |
| 14001 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 14002 | } |
| 14003 | |
| 14004 | case clang::X86::BI__builtin_ia32_vec_set_v4hi: |
| 14005 | case clang::X86::BI__builtin_ia32_vec_set_v16qi: |
| 14006 | case clang::X86::BI__builtin_ia32_vec_set_v8hi: |
| 14007 | case clang::X86::BI__builtin_ia32_vec_set_v4si: |
| 14008 | case clang::X86::BI__builtin_ia32_vec_set_v2di: |
| 14009 | case clang::X86::BI__builtin_ia32_vec_set_v32qi: |
| 14010 | case clang::X86::BI__builtin_ia32_vec_set_v16hi: |
| 14011 | case clang::X86::BI__builtin_ia32_vec_set_v8si: |
| 14012 | case clang::X86::BI__builtin_ia32_vec_set_v4di: { |
| 14013 | APValue VecVal; |
| 14014 | APSInt Scalar, IndexAPS; |
| 14015 | if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: VecVal, Info) || |
| 14016 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Scalar, Info) || |
| 14017 | !EvaluateInteger(E: E->getArg(Arg: 2), Result&: IndexAPS, Info)) |
| 14018 | return false; |
| 14019 | |
| 14020 | QualType ElemTy = E->getType()->castAs<VectorType>()->getElementType(); |
| 14021 | unsigned ElemWidth = Info.Ctx.getIntWidth(T: ElemTy); |
| 14022 | bool ElemUnsigned = ElemTy->isUnsignedIntegerOrEnumerationType(); |
| 14023 | Scalar.setIsUnsigned(ElemUnsigned); |
| 14024 | APSInt ElemAPS = Scalar.extOrTrunc(width: ElemWidth); |
| 14025 | APValue ElemAV(ElemAPS); |
| 14026 | |
| 14027 | unsigned NumElems = VecVal.getVectorLength(); |
| 14028 | unsigned Index = |
| 14029 | static_cast<unsigned>(IndexAPS.getZExtValue() & (NumElems - 1)); |
| 14030 | |
| 14031 | SmallVector<APValue, 4> Elems; |
| 14032 | Elems.reserve(N: NumElems); |
| 14033 | for (unsigned ElemNum = 0; ElemNum != NumElems; ++ElemNum) |
| 14034 | Elems.push_back(Elt: ElemNum == Index ? ElemAV : VecVal.getVectorElt(I: ElemNum)); |
| 14035 | |
| 14036 | return Success(V: APValue(Elems.data(), NumElems), E); |
| 14037 | } |
| 14038 | |
| 14039 | case X86::BI__builtin_ia32_pslldqi128_byteshift: |
| 14040 | case X86::BI__builtin_ia32_pslldqi256_byteshift: |
| 14041 | case X86::BI__builtin_ia32_pslldqi512_byteshift: { |
| 14042 | APValue R; |
| 14043 | if (!evalShuffleGeneric( |
| 14044 | Info, Call: E, Out&: R, |
| 14045 | GetSourceIndex: [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> { |
| 14046 | unsigned LaneBase = (DstIdx / 16) * 16; |
| 14047 | unsigned LaneIdx = DstIdx % 16; |
| 14048 | if (LaneIdx < Shift) |
| 14049 | return std::make_pair(x: 0, y: -1); |
| 14050 | |
| 14051 | return std::make_pair( |
| 14052 | x: 0, y: static_cast<int>(LaneBase + LaneIdx - Shift)); |
| 14053 | })) |
| 14054 | return false; |
| 14055 | return Success(V: R, E); |
| 14056 | } |
| 14057 | |
| 14058 | case X86::BI__builtin_ia32_psrldqi128_byteshift: |
| 14059 | case X86::BI__builtin_ia32_psrldqi256_byteshift: |
| 14060 | case X86::BI__builtin_ia32_psrldqi512_byteshift: { |
| 14061 | APValue R; |
| 14062 | if (!evalShuffleGeneric( |
| 14063 | Info, Call: E, Out&: R, |
| 14064 | GetSourceIndex: [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> { |
| 14065 | unsigned LaneBase = (DstIdx / 16) * 16; |
| 14066 | unsigned LaneIdx = DstIdx % 16; |
| 14067 | if (LaneIdx + Shift < 16) |
| 14068 | return std::make_pair( |
| 14069 | x: 0, y: static_cast<int>(LaneBase + LaneIdx + Shift)); |
| 14070 | |
| 14071 | return std::make_pair(x: 0, y: -1); |
| 14072 | })) |
| 14073 | return false; |
| 14074 | return Success(V: R, E); |
| 14075 | } |
| 14076 | |
| 14077 | case X86::BI__builtin_ia32_palignr128: |
| 14078 | case X86::BI__builtin_ia32_palignr256: |
| 14079 | case X86::BI__builtin_ia32_palignr512: { |
| 14080 | APValue R; |
| 14081 | if (!evalShuffleGeneric(Info, Call: E, Out&: R, GetSourceIndex: [](unsigned DstIdx, unsigned Shift) { |
| 14082 | // Default to -1 → zero-fill this destination element |
| 14083 | unsigned VecIdx = 1; |
| 14084 | int ElemIdx = -1; |
| 14085 | |
| 14086 | int Lane = DstIdx / 16; |
| 14087 | int Offset = DstIdx % 16; |
| 14088 | |
| 14089 | // Elements come from VecB first, then VecA after the shift boundary |
| 14090 | unsigned ShiftedIdx = Offset + (Shift & 0xFF); |
| 14091 | if (ShiftedIdx < 16) { // from VecB |
| 14092 | ElemIdx = ShiftedIdx + (Lane * 16); |
| 14093 | } else if (ShiftedIdx < 32) { // from VecA |
| 14094 | VecIdx = 0; |
| 14095 | ElemIdx = (ShiftedIdx - 16) + (Lane * 16); |
| 14096 | } |
| 14097 | |
| 14098 | return std::pair<unsigned, int>{VecIdx, ElemIdx}; |
| 14099 | })) |
| 14100 | return false; |
| 14101 | return Success(V: R, E); |
| 14102 | } |
| 14103 | case X86::BI__builtin_ia32_alignd128: |
| 14104 | case X86::BI__builtin_ia32_alignd256: |
| 14105 | case X86::BI__builtin_ia32_alignd512: |
| 14106 | case X86::BI__builtin_ia32_alignq128: |
| 14107 | case X86::BI__builtin_ia32_alignq256: |
| 14108 | case X86::BI__builtin_ia32_alignq512: { |
| 14109 | APValue R; |
| 14110 | unsigned NumElems = E->getType()->castAs<VectorType>()->getNumElements(); |
| 14111 | if (!evalShuffleGeneric(Info, Call: E, Out&: R, |
| 14112 | GetSourceIndex: [NumElems](unsigned DstIdx, unsigned Shift) { |
| 14113 | unsigned Imm = Shift & 0xFF; |
| 14114 | unsigned EffectiveShift = Imm & (NumElems - 1); |
| 14115 | unsigned SourcePos = DstIdx + EffectiveShift; |
| 14116 | unsigned VecIdx = SourcePos < NumElems ? 1 : 0; |
| 14117 | unsigned ElemIdx = SourcePos & (NumElems - 1); |
| 14118 | |
| 14119 | return std::pair<unsigned, int>{ |
| 14120 | VecIdx, static_cast<int>(ElemIdx)}; |
| 14121 | })) |
| 14122 | return false; |
| 14123 | return Success(V: R, E); |
| 14124 | } |
| 14125 | case X86::BI__builtin_ia32_permvarsi256: |
| 14126 | case X86::BI__builtin_ia32_permvarsf256: |
| 14127 | case X86::BI__builtin_ia32_permvardf512: |
| 14128 | case X86::BI__builtin_ia32_permvardi512: |
| 14129 | case X86::BI__builtin_ia32_permvarhi128: { |
| 14130 | APValue R; |
| 14131 | if (!evalShuffleGeneric(Info, Call: E, Out&: R, |
| 14132 | GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) { |
| 14133 | int Offset = ShuffleMask & 0x7; |
| 14134 | return std::pair<unsigned, int>{0, Offset}; |
| 14135 | })) |
| 14136 | return false; |
| 14137 | return Success(V: R, E); |
| 14138 | } |
| 14139 | case X86::BI__builtin_ia32_permvarqi128: |
| 14140 | case X86::BI__builtin_ia32_permvarhi256: |
| 14141 | case X86::BI__builtin_ia32_permvarsi512: |
| 14142 | case X86::BI__builtin_ia32_permvarsf512: { |
| 14143 | APValue R; |
| 14144 | if (!evalShuffleGeneric(Info, Call: E, Out&: R, |
| 14145 | GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) { |
| 14146 | int Offset = ShuffleMask & 0xF; |
| 14147 | return std::pair<unsigned, int>{0, Offset}; |
| 14148 | })) |
| 14149 | return false; |
| 14150 | return Success(V: R, E); |
| 14151 | } |
| 14152 | case X86::BI__builtin_ia32_permvardi256: |
| 14153 | case X86::BI__builtin_ia32_permvardf256: { |
| 14154 | APValue R; |
| 14155 | if (!evalShuffleGeneric(Info, Call: E, Out&: R, |
| 14156 | GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) { |
| 14157 | int Offset = ShuffleMask & 0x3; |
| 14158 | return std::pair<unsigned, int>{0, Offset}; |
| 14159 | })) |
| 14160 | return false; |
| 14161 | return Success(V: R, E); |
| 14162 | } |
| 14163 | case X86::BI__builtin_ia32_permvarqi256: |
| 14164 | case X86::BI__builtin_ia32_permvarhi512: { |
| 14165 | APValue R; |
| 14166 | if (!evalShuffleGeneric(Info, Call: E, Out&: R, |
| 14167 | GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) { |
| 14168 | int Offset = ShuffleMask & 0x1F; |
| 14169 | return std::pair<unsigned, int>{0, Offset}; |
| 14170 | })) |
| 14171 | return false; |
| 14172 | return Success(V: R, E); |
| 14173 | } |
| 14174 | case X86::BI__builtin_ia32_permvarqi512: { |
| 14175 | APValue R; |
| 14176 | if (!evalShuffleGeneric(Info, Call: E, Out&: R, |
| 14177 | GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) { |
| 14178 | int Offset = ShuffleMask & 0x3F; |
| 14179 | return std::pair<unsigned, int>{0, Offset}; |
| 14180 | })) |
| 14181 | return false; |
| 14182 | return Success(V: R, E); |
| 14183 | } |
| 14184 | case X86::BI__builtin_ia32_vpermi2varq128: |
| 14185 | case X86::BI__builtin_ia32_vpermi2varpd128: { |
| 14186 | APValue R; |
| 14187 | if (!evalShuffleGeneric(Info, Call: E, Out&: R, |
| 14188 | GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) { |
| 14189 | int Offset = ShuffleMask & 0x1; |
| 14190 | unsigned SrcIdx = (ShuffleMask >> 1) & 0x1; |
| 14191 | return std::pair<unsigned, int>{SrcIdx, Offset}; |
| 14192 | })) |
| 14193 | return false; |
| 14194 | return Success(V: R, E); |
| 14195 | } |
| 14196 | case X86::BI__builtin_ia32_vpermi2vard128: |
| 14197 | case X86::BI__builtin_ia32_vpermi2varps128: |
| 14198 | case X86::BI__builtin_ia32_vpermi2varq256: |
| 14199 | case X86::BI__builtin_ia32_vpermi2varpd256: { |
| 14200 | APValue R; |
| 14201 | if (!evalShuffleGeneric(Info, Call: E, Out&: R, |
| 14202 | GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) { |
| 14203 | int Offset = ShuffleMask & 0x3; |
| 14204 | unsigned SrcIdx = (ShuffleMask >> 2) & 0x1; |
| 14205 | return std::pair<unsigned, int>{SrcIdx, Offset}; |
| 14206 | })) |
| 14207 | return false; |
| 14208 | return Success(V: R, E); |
| 14209 | } |
| 14210 | case X86::BI__builtin_ia32_vpermi2varhi128: |
| 14211 | case X86::BI__builtin_ia32_vpermi2vard256: |
| 14212 | case X86::BI__builtin_ia32_vpermi2varps256: |
| 14213 | case X86::BI__builtin_ia32_vpermi2varq512: |
| 14214 | case X86::BI__builtin_ia32_vpermi2varpd512: { |
| 14215 | APValue R; |
| 14216 | if (!evalShuffleGeneric(Info, Call: E, Out&: R, |
| 14217 | GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) { |
| 14218 | int Offset = ShuffleMask & 0x7; |
| 14219 | unsigned SrcIdx = (ShuffleMask >> 3) & 0x1; |
| 14220 | return std::pair<unsigned, int>{SrcIdx, Offset}; |
| 14221 | })) |
| 14222 | return false; |
| 14223 | return Success(V: R, E); |
| 14224 | } |
| 14225 | case X86::BI__builtin_ia32_vpermi2varqi128: |
| 14226 | case X86::BI__builtin_ia32_vpermi2varhi256: |
| 14227 | case X86::BI__builtin_ia32_vpermi2vard512: |
| 14228 | case X86::BI__builtin_ia32_vpermi2varps512: { |
| 14229 | APValue R; |
| 14230 | if (!evalShuffleGeneric(Info, Call: E, Out&: R, |
| 14231 | GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) { |
| 14232 | int Offset = ShuffleMask & 0xF; |
| 14233 | unsigned SrcIdx = (ShuffleMask >> 4) & 0x1; |
| 14234 | return std::pair<unsigned, int>{SrcIdx, Offset}; |
| 14235 | })) |
| 14236 | return false; |
| 14237 | return Success(V: R, E); |
| 14238 | } |
| 14239 | case X86::BI__builtin_ia32_vpermi2varqi256: |
| 14240 | case X86::BI__builtin_ia32_vpermi2varhi512: { |
| 14241 | APValue R; |
| 14242 | if (!evalShuffleGeneric(Info, Call: E, Out&: R, |
| 14243 | GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) { |
| 14244 | int Offset = ShuffleMask & 0x1F; |
| 14245 | unsigned SrcIdx = (ShuffleMask >> 5) & 0x1; |
| 14246 | return std::pair<unsigned, int>{SrcIdx, Offset}; |
| 14247 | })) |
| 14248 | return false; |
| 14249 | return Success(V: R, E); |
| 14250 | } |
| 14251 | case X86::BI__builtin_ia32_vpermi2varqi512: { |
| 14252 | APValue R; |
| 14253 | if (!evalShuffleGeneric(Info, Call: E, Out&: R, |
| 14254 | GetSourceIndex: [](unsigned DstIdx, unsigned ShuffleMask) { |
| 14255 | int Offset = ShuffleMask & 0x3F; |
| 14256 | unsigned SrcIdx = (ShuffleMask >> 6) & 0x1; |
| 14257 | return std::pair<unsigned, int>{SrcIdx, Offset}; |
| 14258 | })) |
| 14259 | return false; |
| 14260 | return Success(V: R, E); |
| 14261 | } |
| 14262 | |
| 14263 | case clang::X86::BI__builtin_ia32_minps: |
| 14264 | case clang::X86::BI__builtin_ia32_minpd: |
| 14265 | case clang::X86::BI__builtin_ia32_minps256: |
| 14266 | case clang::X86::BI__builtin_ia32_minpd256: |
| 14267 | case clang::X86::BI__builtin_ia32_minps512: |
| 14268 | case clang::X86::BI__builtin_ia32_minpd512: |
| 14269 | case clang::X86::BI__builtin_ia32_minph128: |
| 14270 | case clang::X86::BI__builtin_ia32_minph256: |
| 14271 | case clang::X86::BI__builtin_ia32_minph512: |
| 14272 | return EvaluateFpBinOpExpr( |
| 14273 | [](const APFloat &A, const APFloat &B, |
| 14274 | std::optional<APSInt>) -> std::optional<APFloat> { |
| 14275 | if (A.isNaN() || A.isInfinity() || A.isDenormal() || B.isNaN() || |
| 14276 | B.isInfinity() || B.isDenormal()) |
| 14277 | return std::nullopt; |
| 14278 | if (A.isZero() && B.isZero()) |
| 14279 | return B; |
| 14280 | return llvm::minimum(A, B); |
| 14281 | }); |
| 14282 | |
| 14283 | case clang::X86::BI__builtin_ia32_maxps: |
| 14284 | case clang::X86::BI__builtin_ia32_maxpd: |
| 14285 | case clang::X86::BI__builtin_ia32_maxps256: |
| 14286 | case clang::X86::BI__builtin_ia32_maxpd256: |
| 14287 | case clang::X86::BI__builtin_ia32_maxps512: |
| 14288 | case clang::X86::BI__builtin_ia32_maxpd512: |
| 14289 | case clang::X86::BI__builtin_ia32_maxph128: |
| 14290 | case clang::X86::BI__builtin_ia32_maxph256: |
| 14291 | case clang::X86::BI__builtin_ia32_maxph512: |
| 14292 | return EvaluateFpBinOpExpr( |
| 14293 | [](const APFloat &A, const APFloat &B, |
| 14294 | std::optional<APSInt>) -> std::optional<APFloat> { |
| 14295 | if (A.isNaN() || A.isInfinity() || A.isDenormal() || B.isNaN() || |
| 14296 | B.isInfinity() || B.isDenormal()) |
| 14297 | return std::nullopt; |
| 14298 | if (A.isZero() && B.isZero()) |
| 14299 | return B; |
| 14300 | return llvm::maximum(A, B); |
| 14301 | }); |
| 14302 | |
| 14303 | case clang::X86::BI__builtin_ia32_vcvtps2ph: |
| 14304 | case clang::X86::BI__builtin_ia32_vcvtps2ph256: { |
| 14305 | APValue SrcVec; |
| 14306 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SrcVec)) |
| 14307 | return false; |
| 14308 | |
| 14309 | APSInt Imm; |
| 14310 | if (!EvaluateInteger(E: E->getArg(Arg: 1), Result&: Imm, Info)) |
| 14311 | return false; |
| 14312 | |
| 14313 | const auto *SrcVTy = E->getArg(Arg: 0)->getType()->castAs<VectorType>(); |
| 14314 | unsigned SrcNumElems = SrcVTy->getNumElements(); |
| 14315 | const auto *DstVTy = E->getType()->castAs<VectorType>(); |
| 14316 | unsigned DstNumElems = DstVTy->getNumElements(); |
| 14317 | QualType DstElemTy = DstVTy->getElementType(); |
| 14318 | |
| 14319 | const llvm::fltSemantics &HalfSem = |
| 14320 | Info.Ctx.getFloatTypeSemantics(T: Info.Ctx.HalfTy); |
| 14321 | |
| 14322 | int ImmVal = Imm.getZExtValue(); |
| 14323 | bool UseMXCSR = (ImmVal & 4) != 0; |
| 14324 | bool IsFPConstrained = |
| 14325 | E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts()).isFPConstrained(); |
| 14326 | |
| 14327 | llvm::RoundingMode RM; |
| 14328 | if (!UseMXCSR) { |
| 14329 | switch (ImmVal & 3) { |
| 14330 | case 0: |
| 14331 | RM = llvm::RoundingMode::NearestTiesToEven; |
| 14332 | break; |
| 14333 | case 1: |
| 14334 | RM = llvm::RoundingMode::TowardNegative; |
| 14335 | break; |
| 14336 | case 2: |
| 14337 | RM = llvm::RoundingMode::TowardPositive; |
| 14338 | break; |
| 14339 | case 3: |
| 14340 | RM = llvm::RoundingMode::TowardZero; |
| 14341 | break; |
| 14342 | default: |
| 14343 | llvm_unreachable("Invalid immediate rounding mode" ); |
| 14344 | } |
| 14345 | } else { |
| 14346 | RM = llvm::RoundingMode::NearestTiesToEven; |
| 14347 | } |
| 14348 | |
| 14349 | SmallVector<APValue, 8> ResultElements; |
| 14350 | ResultElements.reserve(N: DstNumElems); |
| 14351 | |
| 14352 | for (unsigned I = 0; I < SrcNumElems; ++I) { |
| 14353 | APFloat SrcVal = SrcVec.getVectorElt(I).getFloat(); |
| 14354 | |
| 14355 | bool LostInfo; |
| 14356 | APFloat::opStatus St = SrcVal.convert(ToSemantics: HalfSem, RM, losesInfo: &LostInfo); |
| 14357 | |
| 14358 | if (UseMXCSR && IsFPConstrained && St != APFloat::opOK) { |
| 14359 | Info.FFDiag(E, DiagId: diag::note_constexpr_dynamic_rounding); |
| 14360 | return false; |
| 14361 | } |
| 14362 | |
| 14363 | APSInt DstInt(SrcVal.bitcastToAPInt(), |
| 14364 | DstElemTy->isUnsignedIntegerOrEnumerationType()); |
| 14365 | ResultElements.push_back(Elt: APValue(DstInt)); |
| 14366 | } |
| 14367 | |
| 14368 | if (DstNumElems > SrcNumElems) { |
| 14369 | APSInt Zero = Info.Ctx.MakeIntValue(Value: 0, Type: DstElemTy); |
| 14370 | for (unsigned I = SrcNumElems; I < DstNumElems; ++I) { |
| 14371 | ResultElements.push_back(Elt: APValue(Zero)); |
| 14372 | } |
| 14373 | } |
| 14374 | |
| 14375 | return Success(V: ResultElements, E); |
| 14376 | } |
| 14377 | case X86::BI__builtin_ia32_vperm2f128_pd256: |
| 14378 | case X86::BI__builtin_ia32_vperm2f128_ps256: |
| 14379 | case X86::BI__builtin_ia32_vperm2f128_si256: |
| 14380 | case X86::BI__builtin_ia32_permti256: { |
| 14381 | unsigned NumElements = |
| 14382 | E->getArg(Arg: 0)->getType()->getAs<VectorType>()->getNumElements(); |
| 14383 | unsigned PreservedBitsCnt = NumElements >> 2; |
| 14384 | APValue R; |
| 14385 | if (!evalShuffleGeneric( |
| 14386 | Info, Call: E, Out&: R, |
| 14387 | GetSourceIndex: [PreservedBitsCnt](unsigned DstIdx, unsigned ShuffleMask) { |
| 14388 | unsigned ControlBitsCnt = DstIdx >> PreservedBitsCnt << 2; |
| 14389 | unsigned ControlBits = ShuffleMask >> ControlBitsCnt; |
| 14390 | |
| 14391 | if (ControlBits & 0b1000) |
| 14392 | return std::make_pair(x: 0u, y: -1); |
| 14393 | |
| 14394 | unsigned SrcVecIdx = (ControlBits & 0b10) >> 1; |
| 14395 | unsigned PreservedBitsMask = (1 << PreservedBitsCnt) - 1; |
| 14396 | int SrcIdx = ((ControlBits & 0b1) << PreservedBitsCnt) | |
| 14397 | (DstIdx & PreservedBitsMask); |
| 14398 | return std::make_pair(x&: SrcVecIdx, y&: SrcIdx); |
| 14399 | })) |
| 14400 | return false; |
| 14401 | return Success(V: R, E); |
| 14402 | } |
| 14403 | } |
| 14404 | } |
| 14405 | |
| 14406 | bool VectorExprEvaluator::VisitConvertVectorExpr(const ConvertVectorExpr *E) { |
| 14407 | APValue Source; |
| 14408 | QualType SourceVecType = E->getSrcExpr()->getType(); |
| 14409 | if (!EvaluateAsRValue(Info, E: E->getSrcExpr(), Result&: Source)) |
| 14410 | return false; |
| 14411 | |
| 14412 | QualType DestTy = E->getType()->castAs<VectorType>()->getElementType(); |
| 14413 | QualType SourceTy = SourceVecType->castAs<VectorType>()->getElementType(); |
| 14414 | |
| 14415 | const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts()); |
| 14416 | |
| 14417 | auto SourceLen = Source.getVectorLength(); |
| 14418 | SmallVector<APValue, 4> ResultElements; |
| 14419 | ResultElements.reserve(N: SourceLen); |
| 14420 | for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) { |
| 14421 | APValue Elt; |
| 14422 | if (!handleVectorElementCast(Info, FPO, E, SourceTy, DestTy, |
| 14423 | Original: Source.getVectorElt(I: EltNum), Result&: Elt)) |
| 14424 | return false; |
| 14425 | ResultElements.push_back(Elt: std::move(Elt)); |
| 14426 | } |
| 14427 | |
| 14428 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 14429 | } |
| 14430 | |
| 14431 | static bool handleVectorShuffle(EvalInfo &Info, const ShuffleVectorExpr *E, |
| 14432 | QualType ElemType, APValue const &VecVal1, |
| 14433 | APValue const &VecVal2, unsigned EltNum, |
| 14434 | APValue &Result) { |
| 14435 | unsigned const TotalElementsInInputVector1 = VecVal1.getVectorLength(); |
| 14436 | unsigned const TotalElementsInInputVector2 = VecVal2.getVectorLength(); |
| 14437 | |
| 14438 | APSInt IndexVal = E->getShuffleMaskIdx(N: EltNum); |
| 14439 | int64_t index = IndexVal.getExtValue(); |
| 14440 | // The spec says that -1 should be treated as undef for optimizations, |
| 14441 | // but in constexpr we'd have to produce an APValue::Indeterminate, |
| 14442 | // which is prohibited from being a top-level constant value. Emit a |
| 14443 | // diagnostic instead. |
| 14444 | if (index == -1) { |
| 14445 | Info.FFDiag( |
| 14446 | E, DiagId: diag::err_shufflevector_minus_one_is_undefined_behavior_constexpr) |
| 14447 | << EltNum; |
| 14448 | return false; |
| 14449 | } |
| 14450 | |
| 14451 | if (index < 0 || |
| 14452 | index >= TotalElementsInInputVector1 + TotalElementsInInputVector2) |
| 14453 | llvm_unreachable("Out of bounds shuffle index" ); |
| 14454 | |
| 14455 | if (index >= TotalElementsInInputVector1) |
| 14456 | Result = VecVal2.getVectorElt(I: index - TotalElementsInInputVector1); |
| 14457 | else |
| 14458 | Result = VecVal1.getVectorElt(I: index); |
| 14459 | return true; |
| 14460 | } |
| 14461 | |
| 14462 | bool VectorExprEvaluator::VisitShuffleVectorExpr(const ShuffleVectorExpr *E) { |
| 14463 | // FIXME: Unary shuffle with mask not currently supported. |
| 14464 | if (E->getNumSubExprs() == 2) |
| 14465 | return Error(E); |
| 14466 | APValue VecVal1; |
| 14467 | const Expr *Vec1 = E->getExpr(Index: 0); |
| 14468 | if (!EvaluateAsRValue(Info, E: Vec1, Result&: VecVal1)) |
| 14469 | return false; |
| 14470 | APValue VecVal2; |
| 14471 | const Expr *Vec2 = E->getExpr(Index: 1); |
| 14472 | if (!EvaluateAsRValue(Info, E: Vec2, Result&: VecVal2)) |
| 14473 | return false; |
| 14474 | |
| 14475 | VectorType const *DestVecTy = E->getType()->castAs<VectorType>(); |
| 14476 | QualType DestElTy = DestVecTy->getElementType(); |
| 14477 | |
| 14478 | auto TotalElementsInOutputVector = DestVecTy->getNumElements(); |
| 14479 | |
| 14480 | SmallVector<APValue, 4> ResultElements; |
| 14481 | ResultElements.reserve(N: TotalElementsInOutputVector); |
| 14482 | for (unsigned EltNum = 0; EltNum < TotalElementsInOutputVector; ++EltNum) { |
| 14483 | APValue Elt; |
| 14484 | if (!handleVectorShuffle(Info, E, ElemType: DestElTy, VecVal1, VecVal2, EltNum, Result&: Elt)) |
| 14485 | return false; |
| 14486 | ResultElements.push_back(Elt: std::move(Elt)); |
| 14487 | } |
| 14488 | |
| 14489 | return Success(V: APValue(ResultElements.data(), ResultElements.size()), E); |
| 14490 | } |
| 14491 | |
| 14492 | //===----------------------------------------------------------------------===// |
| 14493 | // Array Evaluation |
| 14494 | //===----------------------------------------------------------------------===// |
| 14495 | |
| 14496 | namespace { |
| 14497 | class ArrayExprEvaluator |
| 14498 | : public ExprEvaluatorBase<ArrayExprEvaluator> { |
| 14499 | const LValue &This; |
| 14500 | APValue &Result; |
| 14501 | public: |
| 14502 | |
| 14503 | ArrayExprEvaluator(EvalInfo &Info, const LValue &This, APValue &Result) |
| 14504 | : ExprEvaluatorBaseTy(Info), This(This), Result(Result) {} |
| 14505 | |
| 14506 | bool Success(const APValue &V, const Expr *E) { |
| 14507 | assert(V.isArray() && "expected array" ); |
| 14508 | Result = V; |
| 14509 | return true; |
| 14510 | } |
| 14511 | |
| 14512 | bool ZeroInitialization(const Expr *E) { |
| 14513 | const ConstantArrayType *CAT = |
| 14514 | Info.Ctx.getAsConstantArrayType(T: E->getType()); |
| 14515 | if (!CAT) { |
| 14516 | if (E->getType()->isIncompleteArrayType()) { |
| 14517 | // We can be asked to zero-initialize a flexible array member; this |
| 14518 | // is represented as an ImplicitValueInitExpr of incomplete array |
| 14519 | // type. In this case, the array has zero elements. |
| 14520 | Result = APValue(APValue::UninitArray(), 0, 0); |
| 14521 | return true; |
| 14522 | } |
| 14523 | // FIXME: We could handle VLAs here. |
| 14524 | return Error(E); |
| 14525 | } |
| 14526 | |
| 14527 | Result = APValue(APValue::UninitArray(), 0, CAT->getZExtSize()); |
| 14528 | if (!Result.hasArrayFiller()) |
| 14529 | return true; |
| 14530 | |
| 14531 | // Zero-initialize all elements. |
| 14532 | LValue Subobject = This; |
| 14533 | Subobject.addArray(Info, E, CAT); |
| 14534 | ImplicitValueInitExpr VIE(CAT->getElementType()); |
| 14535 | return EvaluateInPlace(Result&: Result.getArrayFiller(), Info, This: Subobject, E: &VIE); |
| 14536 | } |
| 14537 | |
| 14538 | bool VisitCallExpr(const CallExpr *E) { |
| 14539 | return handleCallExpr(E, Result, ResultSlot: &This); |
| 14540 | } |
| 14541 | bool VisitCastExpr(const CastExpr *E); |
| 14542 | bool VisitInitListExpr(const InitListExpr *E, |
| 14543 | QualType AllocType = QualType()); |
| 14544 | bool VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E); |
| 14545 | bool VisitCXXConstructExpr(const CXXConstructExpr *E); |
| 14546 | bool VisitCXXConstructExpr(const CXXConstructExpr *E, |
| 14547 | const LValue &Subobject, |
| 14548 | APValue *Value, QualType Type); |
| 14549 | bool VisitStringLiteral(const StringLiteral *E, |
| 14550 | QualType AllocType = QualType()) { |
| 14551 | expandStringLiteral(Info, S: E, Result, AllocType); |
| 14552 | return true; |
| 14553 | } |
| 14554 | bool VisitCXXParenListInitExpr(const CXXParenListInitExpr *E); |
| 14555 | bool VisitCXXParenListOrInitListExpr(const Expr *ExprToVisit, |
| 14556 | ArrayRef<Expr *> Args, |
| 14557 | const Expr *ArrayFiller, |
| 14558 | QualType AllocType = QualType()); |
| 14559 | }; |
| 14560 | } // end anonymous namespace |
| 14561 | |
| 14562 | static bool EvaluateArray(const Expr *E, const LValue &This, |
| 14563 | APValue &Result, EvalInfo &Info) { |
| 14564 | assert(!E->isValueDependent()); |
| 14565 | assert(E->isPRValue() && E->getType()->isArrayType() && |
| 14566 | "not an array prvalue" ); |
| 14567 | return ArrayExprEvaluator(Info, This, Result).Visit(S: E); |
| 14568 | } |
| 14569 | |
| 14570 | static bool EvaluateArrayNewInitList(EvalInfo &Info, LValue &This, |
| 14571 | APValue &Result, const InitListExpr *ILE, |
| 14572 | QualType AllocType) { |
| 14573 | assert(!ILE->isValueDependent()); |
| 14574 | assert(ILE->isPRValue() && ILE->getType()->isArrayType() && |
| 14575 | "not an array prvalue" ); |
| 14576 | return ArrayExprEvaluator(Info, This, Result) |
| 14577 | .VisitInitListExpr(E: ILE, AllocType); |
| 14578 | } |
| 14579 | |
| 14580 | static bool EvaluateArrayNewConstructExpr(EvalInfo &Info, LValue &This, |
| 14581 | APValue &Result, |
| 14582 | const CXXConstructExpr *CCE, |
| 14583 | QualType AllocType) { |
| 14584 | assert(!CCE->isValueDependent()); |
| 14585 | assert(CCE->isPRValue() && CCE->getType()->isArrayType() && |
| 14586 | "not an array prvalue" ); |
| 14587 | return ArrayExprEvaluator(Info, This, Result) |
| 14588 | .VisitCXXConstructExpr(E: CCE, Subobject: This, Value: &Result, Type: AllocType); |
| 14589 | } |
| 14590 | |
| 14591 | // Return true iff the given array filler may depend on the element index. |
| 14592 | static bool MaybeElementDependentArrayFiller(const Expr *FillerExpr) { |
| 14593 | // For now, just allow non-class value-initialization and initialization |
| 14594 | // lists comprised of them. |
| 14595 | if (isa<ImplicitValueInitExpr>(Val: FillerExpr)) |
| 14596 | return false; |
| 14597 | if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Val: FillerExpr)) { |
| 14598 | for (unsigned I = 0, E = ILE->getNumInits(); I != E; ++I) { |
| 14599 | if (MaybeElementDependentArrayFiller(FillerExpr: ILE->getInit(Init: I))) |
| 14600 | return true; |
| 14601 | } |
| 14602 | |
| 14603 | if (ILE->hasArrayFiller() && |
| 14604 | MaybeElementDependentArrayFiller(FillerExpr: ILE->getArrayFiller())) |
| 14605 | return true; |
| 14606 | |
| 14607 | return false; |
| 14608 | } |
| 14609 | return true; |
| 14610 | } |
| 14611 | |
| 14612 | bool ArrayExprEvaluator::VisitCastExpr(const CastExpr *E) { |
| 14613 | const Expr *SE = E->getSubExpr(); |
| 14614 | |
| 14615 | switch (E->getCastKind()) { |
| 14616 | default: |
| 14617 | return ExprEvaluatorBaseTy::VisitCastExpr(E); |
| 14618 | case CK_HLSLAggregateSplatCast: { |
| 14619 | APValue Val; |
| 14620 | QualType ValTy; |
| 14621 | |
| 14622 | if (!hlslAggSplatHelper(Info, E: SE, SrcVal&: Val, SrcTy&: ValTy)) |
| 14623 | return false; |
| 14624 | |
| 14625 | unsigned NEls = elementwiseSize(Info, BaseTy: E->getType()); |
| 14626 | |
| 14627 | SmallVector<APValue> SplatEls(NEls, Val); |
| 14628 | SmallVector<QualType> SplatType(NEls, ValTy); |
| 14629 | |
| 14630 | // cast the elements |
| 14631 | const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts()); |
| 14632 | if (!constructAggregate(Info, FPO, E, Result, ResultType: E->getType(), Elements&: SplatEls, |
| 14633 | ElTypes&: SplatType)) |
| 14634 | return false; |
| 14635 | |
| 14636 | return true; |
| 14637 | } |
| 14638 | case CK_HLSLElementwiseCast: { |
| 14639 | SmallVector<APValue> SrcEls; |
| 14640 | SmallVector<QualType> SrcTypes; |
| 14641 | |
| 14642 | if (!hlslElementwiseCastHelper(Info, E: SE, DestTy: E->getType(), SrcVals&: SrcEls, SrcTypes)) |
| 14643 | return false; |
| 14644 | |
| 14645 | // cast the elements |
| 14646 | const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts()); |
| 14647 | if (!constructAggregate(Info, FPO, E, Result, ResultType: E->getType(), Elements&: SrcEls, |
| 14648 | ElTypes&: SrcTypes)) |
| 14649 | return false; |
| 14650 | return true; |
| 14651 | } |
| 14652 | } |
| 14653 | } |
| 14654 | |
| 14655 | bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E, |
| 14656 | QualType AllocType) { |
| 14657 | const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType( |
| 14658 | T: AllocType.isNull() ? E->getType() : AllocType); |
| 14659 | if (!CAT) |
| 14660 | return Error(E); |
| 14661 | |
| 14662 | // C++11 [dcl.init.string]p1: A char array [...] can be initialized by [...] |
| 14663 | // an appropriately-typed string literal enclosed in braces. |
| 14664 | if (E->isStringLiteralInit()) { |
| 14665 | auto *SL = dyn_cast<StringLiteral>(Val: E->getInit(Init: 0)->IgnoreParenImpCasts()); |
| 14666 | // FIXME: Support ObjCEncodeExpr here once we support it in |
| 14667 | // ArrayExprEvaluator generally. |
| 14668 | if (!SL) |
| 14669 | return Error(E); |
| 14670 | return VisitStringLiteral(E: SL, AllocType); |
| 14671 | } |
| 14672 | // Any other transparent list init will need proper handling of the |
| 14673 | // AllocType; we can't just recurse to the inner initializer. |
| 14674 | assert(!E->isTransparent() && |
| 14675 | "transparent array list initialization is not string literal init?" ); |
| 14676 | |
| 14677 | return VisitCXXParenListOrInitListExpr(ExprToVisit: E, Args: E->inits(), ArrayFiller: E->getArrayFiller(), |
| 14678 | AllocType); |
| 14679 | } |
| 14680 | |
| 14681 | bool ArrayExprEvaluator::VisitCXXParenListOrInitListExpr( |
| 14682 | const Expr *ExprToVisit, ArrayRef<Expr *> Args, const Expr *ArrayFiller, |
| 14683 | QualType AllocType) { |
| 14684 | const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType( |
| 14685 | T: AllocType.isNull() ? ExprToVisit->getType() : AllocType); |
| 14686 | |
| 14687 | bool Success = true; |
| 14688 | |
| 14689 | assert((!Result.isArray() || Result.getArrayInitializedElts() == 0) && |
| 14690 | "zero-initialized array shouldn't have any initialized elts" ); |
| 14691 | APValue Filler; |
| 14692 | if (Result.isArray() && Result.hasArrayFiller()) |
| 14693 | Filler = Result.getArrayFiller(); |
| 14694 | |
| 14695 | unsigned NumEltsToInit = Args.size(); |
| 14696 | unsigned NumElts = CAT->getZExtSize(); |
| 14697 | |
| 14698 | // If the initializer might depend on the array index, run it for each |
| 14699 | // array element. |
| 14700 | if (NumEltsToInit != NumElts && |
| 14701 | MaybeElementDependentArrayFiller(FillerExpr: ArrayFiller)) { |
| 14702 | NumEltsToInit = NumElts; |
| 14703 | } else { |
| 14704 | for (auto *Init : Args) { |
| 14705 | if (auto *EmbedS = dyn_cast<EmbedExpr>(Val: Init->IgnoreParenImpCasts())) |
| 14706 | NumEltsToInit += EmbedS->getDataElementCount() - 1; |
| 14707 | } |
| 14708 | if (NumEltsToInit > NumElts) |
| 14709 | NumEltsToInit = NumElts; |
| 14710 | } |
| 14711 | |
| 14712 | LLVM_DEBUG(llvm::dbgs() << "The number of elements to initialize: " |
| 14713 | << NumEltsToInit << ".\n" ); |
| 14714 | |
| 14715 | Result = APValue(APValue::UninitArray(), NumEltsToInit, NumElts); |
| 14716 | |
| 14717 | // If the array was previously zero-initialized, preserve the |
| 14718 | // zero-initialized values. |
| 14719 | if (Filler.hasValue()) { |
| 14720 | for (unsigned I = 0, E = Result.getArrayInitializedElts(); I != E; ++I) |
| 14721 | Result.getArrayInitializedElt(I) = Filler; |
| 14722 | if (Result.hasArrayFiller()) |
| 14723 | Result.getArrayFiller() = Filler; |
| 14724 | } |
| 14725 | |
| 14726 | LValue Subobject = This; |
| 14727 | Subobject.addArray(Info, E: ExprToVisit, CAT); |
| 14728 | auto Eval = [&](const Expr *Init, unsigned ArrayIndex) { |
| 14729 | if (Init->isValueDependent()) |
| 14730 | return EvaluateDependentExpr(E: Init, Info); |
| 14731 | |
| 14732 | if (!EvaluateInPlace(Result&: Result.getArrayInitializedElt(I: ArrayIndex), Info, |
| 14733 | This: Subobject, E: Init) || |
| 14734 | !HandleLValueArrayAdjustment(Info, E: Init, LVal&: Subobject, |
| 14735 | EltTy: CAT->getElementType(), Adjustment: 1)) { |
| 14736 | if (!Info.noteFailure()) |
| 14737 | return false; |
| 14738 | Success = false; |
| 14739 | } |
| 14740 | return true; |
| 14741 | }; |
| 14742 | unsigned ArrayIndex = 0; |
| 14743 | QualType DestTy = CAT->getElementType(); |
| 14744 | APSInt Value(Info.Ctx.getTypeSize(T: DestTy), DestTy->isUnsignedIntegerType()); |
| 14745 | for (unsigned Index = 0; Index != NumEltsToInit; ++Index) { |
| 14746 | const Expr *Init = Index < Args.size() ? Args[Index] : ArrayFiller; |
| 14747 | if (ArrayIndex >= NumEltsToInit) |
| 14748 | break; |
| 14749 | if (auto *EmbedS = dyn_cast<EmbedExpr>(Val: Init->IgnoreParenImpCasts())) { |
| 14750 | StringLiteral *SL = EmbedS->getDataStringLiteral(); |
| 14751 | for (unsigned I = EmbedS->getStartingElementPos(), |
| 14752 | N = EmbedS->getDataElementCount(); |
| 14753 | I != EmbedS->getStartingElementPos() + N; ++I) { |
| 14754 | Value = SL->getCodeUnit(i: I); |
| 14755 | if (DestTy->isIntegerType()) { |
| 14756 | Result.getArrayInitializedElt(I: ArrayIndex) = APValue(Value); |
| 14757 | } else { |
| 14758 | assert(DestTy->isFloatingType() && "unexpected type" ); |
| 14759 | const FPOptions FPO = |
| 14760 | Init->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts()); |
| 14761 | APFloat FValue(0.0); |
| 14762 | if (!HandleIntToFloatCast(Info, E: Init, FPO, SrcType: EmbedS->getType(), Value, |
| 14763 | DestType: DestTy, Result&: FValue)) |
| 14764 | return false; |
| 14765 | Result.getArrayInitializedElt(I: ArrayIndex) = APValue(FValue); |
| 14766 | } |
| 14767 | ArrayIndex++; |
| 14768 | } |
| 14769 | } else { |
| 14770 | if (!Eval(Init, ArrayIndex)) |
| 14771 | return false; |
| 14772 | ++ArrayIndex; |
| 14773 | } |
| 14774 | } |
| 14775 | |
| 14776 | if (!Result.hasArrayFiller()) |
| 14777 | return Success; |
| 14778 | |
| 14779 | // If we get here, we have a trivial filler, which we can just evaluate |
| 14780 | // once and splat over the rest of the array elements. |
| 14781 | assert(ArrayFiller && "no array filler for incomplete init list" ); |
| 14782 | return EvaluateInPlace(Result&: Result.getArrayFiller(), Info, This: Subobject, |
| 14783 | E: ArrayFiller) && |
| 14784 | Success; |
| 14785 | } |
| 14786 | |
| 14787 | bool ArrayExprEvaluator::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) { |
| 14788 | LValue CommonLV; |
| 14789 | if (E->getCommonExpr() && |
| 14790 | !Evaluate(Result&: Info.CurrentCall->createTemporary( |
| 14791 | Key: E->getCommonExpr(), |
| 14792 | T: getStorageType(Ctx: Info.Ctx, E: E->getCommonExpr()), |
| 14793 | Scope: ScopeKind::FullExpression, LV&: CommonLV), |
| 14794 | Info, E: E->getCommonExpr()->getSourceExpr())) |
| 14795 | return false; |
| 14796 | |
| 14797 | auto *CAT = cast<ConstantArrayType>(Val: E->getType()->castAsArrayTypeUnsafe()); |
| 14798 | |
| 14799 | uint64_t Elements = CAT->getZExtSize(); |
| 14800 | Result = APValue(APValue::UninitArray(), Elements, Elements); |
| 14801 | |
| 14802 | LValue Subobject = This; |
| 14803 | Subobject.addArray(Info, E, CAT); |
| 14804 | |
| 14805 | bool Success = true; |
| 14806 | for (EvalInfo::ArrayInitLoopIndex Index(Info); Index != Elements; ++Index) { |
| 14807 | // C++ [class.temporary]/5 |
| 14808 | // There are four contexts in which temporaries are destroyed at a different |
| 14809 | // point than the end of the full-expression. [...] The second context is |
| 14810 | // when a copy constructor is called to copy an element of an array while |
| 14811 | // the entire array is copied [...]. In either case, if the constructor has |
| 14812 | // one or more default arguments, the destruction of every temporary created |
| 14813 | // in a default argument is sequenced before the construction of the next |
| 14814 | // array element, if any. |
| 14815 | FullExpressionRAII Scope(Info); |
| 14816 | |
| 14817 | if (!EvaluateInPlace(Result&: Result.getArrayInitializedElt(I: Index), |
| 14818 | Info, This: Subobject, E: E->getSubExpr()) || |
| 14819 | !HandleLValueArrayAdjustment(Info, E, LVal&: Subobject, |
| 14820 | EltTy: CAT->getElementType(), Adjustment: 1)) { |
| 14821 | if (!Info.noteFailure()) |
| 14822 | return false; |
| 14823 | Success = false; |
| 14824 | } |
| 14825 | |
| 14826 | // Make sure we run the destructors too. |
| 14827 | Scope.destroy(); |
| 14828 | } |
| 14829 | |
| 14830 | return Success; |
| 14831 | } |
| 14832 | |
| 14833 | bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) { |
| 14834 | return VisitCXXConstructExpr(E, Subobject: This, Value: &Result, Type: E->getType()); |
| 14835 | } |
| 14836 | |
| 14837 | bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E, |
| 14838 | const LValue &Subobject, |
| 14839 | APValue *Value, |
| 14840 | QualType Type) { |
| 14841 | bool HadZeroInit = Value->hasValue(); |
| 14842 | |
| 14843 | if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(T: Type)) { |
| 14844 | unsigned FinalSize = CAT->getZExtSize(); |
| 14845 | |
| 14846 | // Preserve the array filler if we had prior zero-initialization. |
| 14847 | APValue Filler = |
| 14848 | HadZeroInit && Value->hasArrayFiller() ? Value->getArrayFiller() |
| 14849 | : APValue(); |
| 14850 | |
| 14851 | *Value = APValue(APValue::UninitArray(), 0, FinalSize); |
| 14852 | if (FinalSize == 0) |
| 14853 | return true; |
| 14854 | |
| 14855 | bool HasTrivialConstructor = CheckTrivialDefaultConstructor( |
| 14856 | Info, Loc: E->getExprLoc(), CD: E->getConstructor(), |
| 14857 | IsValueInitialization: E->requiresZeroInitialization()); |
| 14858 | LValue ArrayElt = Subobject; |
| 14859 | ArrayElt.addArray(Info, E, CAT); |
| 14860 | // We do the whole initialization in two passes, first for just one element, |
| 14861 | // then for the whole array. It's possible we may find out we can't do const |
| 14862 | // init in the first pass, in which case we avoid allocating a potentially |
| 14863 | // large array. We don't do more passes because expanding array requires |
| 14864 | // copying the data, which is wasteful. |
| 14865 | for (const unsigned N : {1u, FinalSize}) { |
| 14866 | unsigned OldElts = Value->getArrayInitializedElts(); |
| 14867 | if (OldElts == N) |
| 14868 | break; |
| 14869 | |
| 14870 | // Expand the array to appropriate size. |
| 14871 | APValue NewValue(APValue::UninitArray(), N, FinalSize); |
| 14872 | for (unsigned I = 0; I < OldElts; ++I) |
| 14873 | NewValue.getArrayInitializedElt(I).swap( |
| 14874 | RHS&: Value->getArrayInitializedElt(I)); |
| 14875 | Value->swap(RHS&: NewValue); |
| 14876 | |
| 14877 | if (HadZeroInit) |
| 14878 | for (unsigned I = OldElts; I < N; ++I) |
| 14879 | Value->getArrayInitializedElt(I) = Filler; |
| 14880 | |
| 14881 | if (HasTrivialConstructor && N == FinalSize && FinalSize != 1) { |
| 14882 | // If we have a trivial constructor, only evaluate it once and copy |
| 14883 | // the result into all the array elements. |
| 14884 | APValue &FirstResult = Value->getArrayInitializedElt(I: 0); |
| 14885 | for (unsigned I = OldElts; I < FinalSize; ++I) |
| 14886 | Value->getArrayInitializedElt(I) = FirstResult; |
| 14887 | } else { |
| 14888 | for (unsigned I = OldElts; I < N; ++I) { |
| 14889 | if (!VisitCXXConstructExpr(E, Subobject: ArrayElt, |
| 14890 | Value: &Value->getArrayInitializedElt(I), |
| 14891 | Type: CAT->getElementType()) || |
| 14892 | !HandleLValueArrayAdjustment(Info, E, LVal&: ArrayElt, |
| 14893 | EltTy: CAT->getElementType(), Adjustment: 1)) |
| 14894 | return false; |
| 14895 | // When checking for const initilization any diagnostic is considered |
| 14896 | // an error. |
| 14897 | if (Info.EvalStatus.Diag && !Info.EvalStatus.Diag->empty() && |
| 14898 | !Info.keepEvaluatingAfterFailure()) |
| 14899 | return false; |
| 14900 | } |
| 14901 | } |
| 14902 | } |
| 14903 | |
| 14904 | return true; |
| 14905 | } |
| 14906 | |
| 14907 | if (!Type->isRecordType()) |
| 14908 | return Error(E); |
| 14909 | |
| 14910 | return RecordExprEvaluator(Info, Subobject, *Value) |
| 14911 | .VisitCXXConstructExpr(E, T: Type); |
| 14912 | } |
| 14913 | |
| 14914 | bool ArrayExprEvaluator::VisitCXXParenListInitExpr( |
| 14915 | const CXXParenListInitExpr *E) { |
| 14916 | assert(E->getType()->isConstantArrayType() && |
| 14917 | "Expression result is not a constant array type" ); |
| 14918 | |
| 14919 | return VisitCXXParenListOrInitListExpr(ExprToVisit: E, Args: E->getInitExprs(), |
| 14920 | ArrayFiller: E->getArrayFiller()); |
| 14921 | } |
| 14922 | |
| 14923 | //===----------------------------------------------------------------------===// |
| 14924 | // Integer Evaluation |
| 14925 | // |
| 14926 | // As a GNU extension, we support casting pointers to sufficiently-wide integer |
| 14927 | // types and back in constant folding. Integer values are thus represented |
| 14928 | // either as an integer-valued APValue, or as an lvalue-valued APValue. |
| 14929 | //===----------------------------------------------------------------------===// |
| 14930 | |
| 14931 | namespace { |
| 14932 | class IntExprEvaluator |
| 14933 | : public ExprEvaluatorBase<IntExprEvaluator> { |
| 14934 | APValue &Result; |
| 14935 | public: |
| 14936 | IntExprEvaluator(EvalInfo &info, APValue &result) |
| 14937 | : ExprEvaluatorBaseTy(info), Result(result) {} |
| 14938 | |
| 14939 | bool Success(const llvm::APSInt &SI, const Expr *E, APValue &Result) { |
| 14940 | assert(E->getType()->isIntegralOrEnumerationType() && |
| 14941 | "Invalid evaluation result." ); |
| 14942 | assert(SI.isSigned() == E->getType()->isSignedIntegerOrEnumerationType() && |
| 14943 | "Invalid evaluation result." ); |
| 14944 | assert(SI.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) && |
| 14945 | "Invalid evaluation result." ); |
| 14946 | Result = APValue(SI); |
| 14947 | return true; |
| 14948 | } |
| 14949 | bool Success(const llvm::APSInt &SI, const Expr *E) { |
| 14950 | return Success(SI, E, Result); |
| 14951 | } |
| 14952 | |
| 14953 | bool Success(const llvm::APInt &I, const Expr *E, APValue &Result) { |
| 14954 | assert(E->getType()->isIntegralOrEnumerationType() && |
| 14955 | "Invalid evaluation result." ); |
| 14956 | assert(I.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) && |
| 14957 | "Invalid evaluation result." ); |
| 14958 | Result = APValue(APSInt(I)); |
| 14959 | Result.getInt().setIsUnsigned( |
| 14960 | E->getType()->isUnsignedIntegerOrEnumerationType()); |
| 14961 | return true; |
| 14962 | } |
| 14963 | bool Success(const llvm::APInt &I, const Expr *E) { |
| 14964 | return Success(I, E, Result); |
| 14965 | } |
| 14966 | |
| 14967 | bool Success(uint64_t Value, const Expr *E, APValue &Result) { |
| 14968 | assert(E->getType()->isIntegralOrEnumerationType() && |
| 14969 | "Invalid evaluation result." ); |
| 14970 | Result = APValue(Info.Ctx.MakeIntValue(Value, Type: E->getType())); |
| 14971 | return true; |
| 14972 | } |
| 14973 | bool Success(uint64_t Value, const Expr *E) { |
| 14974 | return Success(Value, E, Result); |
| 14975 | } |
| 14976 | |
| 14977 | bool Success(CharUnits Size, const Expr *E) { |
| 14978 | return Success(Value: Size.getQuantity(), E); |
| 14979 | } |
| 14980 | |
| 14981 | bool Success(const APValue &V, const Expr *E) { |
| 14982 | // C++23 [expr.const]p8 If we have a variable that is unknown reference or |
| 14983 | // pointer allow further evaluation of the value. |
| 14984 | if (V.isLValue() || V.isAddrLabelDiff() || V.isIndeterminate() || |
| 14985 | V.allowConstexprUnknown()) { |
| 14986 | Result = V; |
| 14987 | return true; |
| 14988 | } |
| 14989 | return Success(SI: V.getInt(), E); |
| 14990 | } |
| 14991 | |
| 14992 | bool ZeroInitialization(const Expr *E) { return Success(Value: 0, E); } |
| 14993 | |
| 14994 | friend std::optional<bool> EvaluateBuiltinIsWithinLifetime(IntExprEvaluator &, |
| 14995 | const CallExpr *); |
| 14996 | |
| 14997 | //===--------------------------------------------------------------------===// |
| 14998 | // Visitor Methods |
| 14999 | //===--------------------------------------------------------------------===// |
| 15000 | |
| 15001 | bool VisitIntegerLiteral(const IntegerLiteral *E) { |
| 15002 | return Success(I: E->getValue(), E); |
| 15003 | } |
| 15004 | bool VisitCharacterLiteral(const CharacterLiteral *E) { |
| 15005 | return Success(Value: E->getValue(), E); |
| 15006 | } |
| 15007 | |
| 15008 | bool CheckReferencedDecl(const Expr *E, const Decl *D); |
| 15009 | bool VisitDeclRefExpr(const DeclRefExpr *E) { |
| 15010 | if (CheckReferencedDecl(E, D: E->getDecl())) |
| 15011 | return true; |
| 15012 | |
| 15013 | return ExprEvaluatorBaseTy::VisitDeclRefExpr(S: E); |
| 15014 | } |
| 15015 | bool VisitMemberExpr(const MemberExpr *E) { |
| 15016 | if (CheckReferencedDecl(E, D: E->getMemberDecl())) { |
| 15017 | VisitIgnoredBaseExpression(E: E->getBase()); |
| 15018 | return true; |
| 15019 | } |
| 15020 | |
| 15021 | return ExprEvaluatorBaseTy::VisitMemberExpr(E); |
| 15022 | } |
| 15023 | |
| 15024 | bool VisitCallExpr(const CallExpr *E); |
| 15025 | bool VisitBuiltinCallExpr(const CallExpr *E, unsigned BuiltinOp); |
| 15026 | bool VisitBinaryOperator(const BinaryOperator *E); |
| 15027 | bool VisitOffsetOfExpr(const OffsetOfExpr *E); |
| 15028 | bool VisitUnaryOperator(const UnaryOperator *E); |
| 15029 | |
| 15030 | bool VisitCastExpr(const CastExpr* E); |
| 15031 | bool VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E); |
| 15032 | |
| 15033 | bool VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { |
| 15034 | return Success(Value: E->getValue(), E); |
| 15035 | } |
| 15036 | |
| 15037 | bool VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) { |
| 15038 | return Success(Value: E->getValue(), E); |
| 15039 | } |
| 15040 | |
| 15041 | bool VisitArrayInitIndexExpr(const ArrayInitIndexExpr *E) { |
| 15042 | if (Info.ArrayInitIndex == uint64_t(-1)) { |
| 15043 | // We were asked to evaluate this subexpression independent of the |
| 15044 | // enclosing ArrayInitLoopExpr. We can't do that. |
| 15045 | Info.FFDiag(E); |
| 15046 | return false; |
| 15047 | } |
| 15048 | return Success(Value: Info.ArrayInitIndex, E); |
| 15049 | } |
| 15050 | |
| 15051 | // Note, GNU defines __null as an integer, not a pointer. |
| 15052 | bool VisitGNUNullExpr(const GNUNullExpr *E) { |
| 15053 | return ZeroInitialization(E); |
| 15054 | } |
| 15055 | |
| 15056 | bool VisitTypeTraitExpr(const TypeTraitExpr *E) { |
| 15057 | if (E->isStoredAsBoolean()) |
| 15058 | return Success(Value: E->getBoolValue(), E); |
| 15059 | if (E->getAPValue().isAbsent()) |
| 15060 | return false; |
| 15061 | assert(E->getAPValue().isInt() && "APValue type not supported" ); |
| 15062 | return Success(SI: E->getAPValue().getInt(), E); |
| 15063 | } |
| 15064 | |
| 15065 | bool VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) { |
| 15066 | return Success(Value: E->getValue(), E); |
| 15067 | } |
| 15068 | |
| 15069 | bool VisitExpressionTraitExpr(const ExpressionTraitExpr *E) { |
| 15070 | return Success(Value: E->getValue(), E); |
| 15071 | } |
| 15072 | |
| 15073 | bool VisitOpenACCAsteriskSizeExpr(const OpenACCAsteriskSizeExpr *E) { |
| 15074 | // This should not be evaluated during constant expr evaluation, as it |
| 15075 | // should always be in an unevaluated context (the args list of a 'gang' or |
| 15076 | // 'tile' clause). |
| 15077 | return Error(E); |
| 15078 | } |
| 15079 | |
| 15080 | bool VisitUnaryReal(const UnaryOperator *E); |
| 15081 | bool VisitUnaryImag(const UnaryOperator *E); |
| 15082 | |
| 15083 | bool VisitCXXNoexceptExpr(const CXXNoexceptExpr *E); |
| 15084 | bool VisitSizeOfPackExpr(const SizeOfPackExpr *E); |
| 15085 | bool VisitSourceLocExpr(const SourceLocExpr *E); |
| 15086 | bool VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E); |
| 15087 | bool VisitRequiresExpr(const RequiresExpr *E); |
| 15088 | // FIXME: Missing: array subscript of vector, member of vector |
| 15089 | }; |
| 15090 | |
| 15091 | class FixedPointExprEvaluator |
| 15092 | : public ExprEvaluatorBase<FixedPointExprEvaluator> { |
| 15093 | APValue &Result; |
| 15094 | |
| 15095 | public: |
| 15096 | FixedPointExprEvaluator(EvalInfo &info, APValue &result) |
| 15097 | : ExprEvaluatorBaseTy(info), Result(result) {} |
| 15098 | |
| 15099 | bool Success(const llvm::APInt &I, const Expr *E) { |
| 15100 | return Success( |
| 15101 | V: APFixedPoint(I, Info.Ctx.getFixedPointSemantics(Ty: E->getType())), E); |
| 15102 | } |
| 15103 | |
| 15104 | bool Success(uint64_t Value, const Expr *E) { |
| 15105 | return Success( |
| 15106 | V: APFixedPoint(Value, Info.Ctx.getFixedPointSemantics(Ty: E->getType())), E); |
| 15107 | } |
| 15108 | |
| 15109 | bool Success(const APValue &V, const Expr *E) { |
| 15110 | return Success(V: V.getFixedPoint(), E); |
| 15111 | } |
| 15112 | |
| 15113 | bool Success(const APFixedPoint &V, const Expr *E) { |
| 15114 | assert(E->getType()->isFixedPointType() && "Invalid evaluation result." ); |
| 15115 | assert(V.getWidth() == Info.Ctx.getIntWidth(E->getType()) && |
| 15116 | "Invalid evaluation result." ); |
| 15117 | Result = APValue(V); |
| 15118 | return true; |
| 15119 | } |
| 15120 | |
| 15121 | bool ZeroInitialization(const Expr *E) { |
| 15122 | return Success(Value: 0, E); |
| 15123 | } |
| 15124 | |
| 15125 | //===--------------------------------------------------------------------===// |
| 15126 | // Visitor Methods |
| 15127 | //===--------------------------------------------------------------------===// |
| 15128 | |
| 15129 | bool VisitFixedPointLiteral(const FixedPointLiteral *E) { |
| 15130 | return Success(I: E->getValue(), E); |
| 15131 | } |
| 15132 | |
| 15133 | bool VisitCastExpr(const CastExpr *E); |
| 15134 | bool VisitUnaryOperator(const UnaryOperator *E); |
| 15135 | bool VisitBinaryOperator(const BinaryOperator *E); |
| 15136 | }; |
| 15137 | } // end anonymous namespace |
| 15138 | |
| 15139 | /// EvaluateIntegerOrLValue - Evaluate an rvalue integral-typed expression, and |
| 15140 | /// produce either the integer value or a pointer. |
| 15141 | /// |
| 15142 | /// GCC has a heinous extension which folds casts between pointer types and |
| 15143 | /// pointer-sized integral types. We support this by allowing the evaluation of |
| 15144 | /// an integer rvalue to produce a pointer (represented as an lvalue) instead. |
| 15145 | /// Some simple arithmetic on such values is supported (they are treated much |
| 15146 | /// like char*). |
| 15147 | static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result, |
| 15148 | EvalInfo &Info) { |
| 15149 | assert(!E->isValueDependent()); |
| 15150 | assert(E->isPRValue() && E->getType()->isIntegralOrEnumerationType()); |
| 15151 | return IntExprEvaluator(Info, Result).Visit(S: E); |
| 15152 | } |
| 15153 | |
| 15154 | static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info) { |
| 15155 | assert(!E->isValueDependent()); |
| 15156 | APValue Val; |
| 15157 | if (!EvaluateIntegerOrLValue(E, Result&: Val, Info)) |
| 15158 | return false; |
| 15159 | if (!Val.isInt()) { |
| 15160 | // FIXME: It would be better to produce the diagnostic for casting |
| 15161 | // a pointer to an integer. |
| 15162 | Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr); |
| 15163 | return false; |
| 15164 | } |
| 15165 | Result = Val.getInt(); |
| 15166 | return true; |
| 15167 | } |
| 15168 | |
| 15169 | bool IntExprEvaluator::VisitSourceLocExpr(const SourceLocExpr *E) { |
| 15170 | APValue Evaluated = E->EvaluateInContext( |
| 15171 | Ctx: Info.Ctx, DefaultExpr: Info.CurrentCall->CurSourceLocExprScope.getDefaultExpr()); |
| 15172 | return Success(V: Evaluated, E); |
| 15173 | } |
| 15174 | |
| 15175 | static bool EvaluateFixedPoint(const Expr *E, APFixedPoint &Result, |
| 15176 | EvalInfo &Info) { |
| 15177 | assert(!E->isValueDependent()); |
| 15178 | if (E->getType()->isFixedPointType()) { |
| 15179 | APValue Val; |
| 15180 | if (!FixedPointExprEvaluator(Info, Val).Visit(S: E)) |
| 15181 | return false; |
| 15182 | if (!Val.isFixedPoint()) |
| 15183 | return false; |
| 15184 | |
| 15185 | Result = Val.getFixedPoint(); |
| 15186 | return true; |
| 15187 | } |
| 15188 | return false; |
| 15189 | } |
| 15190 | |
| 15191 | static bool EvaluateFixedPointOrInteger(const Expr *E, APFixedPoint &Result, |
| 15192 | EvalInfo &Info) { |
| 15193 | assert(!E->isValueDependent()); |
| 15194 | if (E->getType()->isIntegerType()) { |
| 15195 | auto FXSema = Info.Ctx.getFixedPointSemantics(Ty: E->getType()); |
| 15196 | APSInt Val; |
| 15197 | if (!EvaluateInteger(E, Result&: Val, Info)) |
| 15198 | return false; |
| 15199 | Result = APFixedPoint(Val, FXSema); |
| 15200 | return true; |
| 15201 | } else if (E->getType()->isFixedPointType()) { |
| 15202 | return EvaluateFixedPoint(E, Result, Info); |
| 15203 | } |
| 15204 | return false; |
| 15205 | } |
| 15206 | |
| 15207 | /// Check whether the given declaration can be directly converted to an integral |
| 15208 | /// rvalue. If not, no diagnostic is produced; there are other things we can |
| 15209 | /// try. |
| 15210 | bool IntExprEvaluator::CheckReferencedDecl(const Expr* E, const Decl* D) { |
| 15211 | // Enums are integer constant exprs. |
| 15212 | if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(Val: D)) { |
| 15213 | // Check for signedness/width mismatches between E type and ECD value. |
| 15214 | bool SameSign = (ECD->getInitVal().isSigned() |
| 15215 | == E->getType()->isSignedIntegerOrEnumerationType()); |
| 15216 | bool SameWidth = (ECD->getInitVal().getBitWidth() |
| 15217 | == Info.Ctx.getIntWidth(T: E->getType())); |
| 15218 | if (SameSign && SameWidth) |
| 15219 | return Success(SI: ECD->getInitVal(), E); |
| 15220 | else { |
| 15221 | // Get rid of mismatch (otherwise Success assertions will fail) |
| 15222 | // by computing a new value matching the type of E. |
| 15223 | llvm::APSInt Val = ECD->getInitVal(); |
| 15224 | if (!SameSign) |
| 15225 | Val.setIsSigned(!ECD->getInitVal().isSigned()); |
| 15226 | if (!SameWidth) |
| 15227 | Val = Val.extOrTrunc(width: Info.Ctx.getIntWidth(T: E->getType())); |
| 15228 | return Success(SI: Val, E); |
| 15229 | } |
| 15230 | } |
| 15231 | return false; |
| 15232 | } |
| 15233 | |
| 15234 | /// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way |
| 15235 | /// as GCC. |
| 15236 | GCCTypeClass EvaluateBuiltinClassifyType(QualType T, |
| 15237 | const LangOptions &LangOpts) { |
| 15238 | assert(!T->isDependentType() && "unexpected dependent type" ); |
| 15239 | |
| 15240 | QualType CanTy = T.getCanonicalType(); |
| 15241 | |
| 15242 | switch (CanTy->getTypeClass()) { |
| 15243 | #define TYPE(ID, BASE) |
| 15244 | #define DEPENDENT_TYPE(ID, BASE) case Type::ID: |
| 15245 | #define NON_CANONICAL_TYPE(ID, BASE) case Type::ID: |
| 15246 | #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(ID, BASE) case Type::ID: |
| 15247 | #include "clang/AST/TypeNodes.inc" |
| 15248 | case Type::Auto: |
| 15249 | case Type::DeducedTemplateSpecialization: |
| 15250 | llvm_unreachable("unexpected non-canonical or dependent type" ); |
| 15251 | |
| 15252 | case Type::Builtin: |
| 15253 | switch (cast<BuiltinType>(Val&: CanTy)->getKind()) { |
| 15254 | #define BUILTIN_TYPE(ID, SINGLETON_ID) |
| 15255 | #define SIGNED_TYPE(ID, SINGLETON_ID) \ |
| 15256 | case BuiltinType::ID: return GCCTypeClass::Integer; |
| 15257 | #define FLOATING_TYPE(ID, SINGLETON_ID) \ |
| 15258 | case BuiltinType::ID: return GCCTypeClass::RealFloat; |
| 15259 | #define PLACEHOLDER_TYPE(ID, SINGLETON_ID) \ |
| 15260 | case BuiltinType::ID: break; |
| 15261 | #include "clang/AST/BuiltinTypes.def" |
| 15262 | case BuiltinType::Void: |
| 15263 | return GCCTypeClass::Void; |
| 15264 | |
| 15265 | case BuiltinType::Bool: |
| 15266 | return GCCTypeClass::Bool; |
| 15267 | |
| 15268 | case BuiltinType::Char_U: |
| 15269 | case BuiltinType::UChar: |
| 15270 | case BuiltinType::WChar_U: |
| 15271 | case BuiltinType::Char8: |
| 15272 | case BuiltinType::Char16: |
| 15273 | case BuiltinType::Char32: |
| 15274 | case BuiltinType::UShort: |
| 15275 | case BuiltinType::UInt: |
| 15276 | case BuiltinType::ULong: |
| 15277 | case BuiltinType::ULongLong: |
| 15278 | case BuiltinType::UInt128: |
| 15279 | return GCCTypeClass::Integer; |
| 15280 | |
| 15281 | case BuiltinType::UShortAccum: |
| 15282 | case BuiltinType::UAccum: |
| 15283 | case BuiltinType::ULongAccum: |
| 15284 | case BuiltinType::UShortFract: |
| 15285 | case BuiltinType::UFract: |
| 15286 | case BuiltinType::ULongFract: |
| 15287 | case BuiltinType::SatUShortAccum: |
| 15288 | case BuiltinType::SatUAccum: |
| 15289 | case BuiltinType::SatULongAccum: |
| 15290 | case BuiltinType::SatUShortFract: |
| 15291 | case BuiltinType::SatUFract: |
| 15292 | case BuiltinType::SatULongFract: |
| 15293 | return GCCTypeClass::None; |
| 15294 | |
| 15295 | case BuiltinType::NullPtr: |
| 15296 | |
| 15297 | case BuiltinType::ObjCId: |
| 15298 | case BuiltinType::ObjCClass: |
| 15299 | case BuiltinType::ObjCSel: |
| 15300 | #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ |
| 15301 | case BuiltinType::Id: |
| 15302 | #include "clang/Basic/OpenCLImageTypes.def" |
| 15303 | #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ |
| 15304 | case BuiltinType::Id: |
| 15305 | #include "clang/Basic/OpenCLExtensionTypes.def" |
| 15306 | case BuiltinType::OCLSampler: |
| 15307 | case BuiltinType::OCLEvent: |
| 15308 | case BuiltinType::OCLClkEvent: |
| 15309 | case BuiltinType::OCLQueue: |
| 15310 | case BuiltinType::OCLReserveID: |
| 15311 | #define SVE_TYPE(Name, Id, SingletonId) \ |
| 15312 | case BuiltinType::Id: |
| 15313 | #include "clang/Basic/AArch64ACLETypes.def" |
| 15314 | #define PPC_VECTOR_TYPE(Name, Id, Size) \ |
| 15315 | case BuiltinType::Id: |
| 15316 | #include "clang/Basic/PPCTypes.def" |
| 15317 | #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: |
| 15318 | #include "clang/Basic/RISCVVTypes.def" |
| 15319 | #define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id: |
| 15320 | #include "clang/Basic/WebAssemblyReferenceTypes.def" |
| 15321 | #define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id: |
| 15322 | #include "clang/Basic/AMDGPUTypes.def" |
| 15323 | #define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id: |
| 15324 | #include "clang/Basic/HLSLIntangibleTypes.def" |
| 15325 | return GCCTypeClass::None; |
| 15326 | |
| 15327 | case BuiltinType::Dependent: |
| 15328 | llvm_unreachable("unexpected dependent type" ); |
| 15329 | }; |
| 15330 | llvm_unreachable("unexpected placeholder type" ); |
| 15331 | |
| 15332 | case Type::Enum: |
| 15333 | return LangOpts.CPlusPlus ? GCCTypeClass::Enum : GCCTypeClass::Integer; |
| 15334 | |
| 15335 | case Type::Pointer: |
| 15336 | case Type::ConstantArray: |
| 15337 | case Type::VariableArray: |
| 15338 | case Type::IncompleteArray: |
| 15339 | case Type::FunctionNoProto: |
| 15340 | case Type::FunctionProto: |
| 15341 | case Type::ArrayParameter: |
| 15342 | return GCCTypeClass::Pointer; |
| 15343 | |
| 15344 | case Type::MemberPointer: |
| 15345 | return CanTy->isMemberDataPointerType() |
| 15346 | ? GCCTypeClass::PointerToDataMember |
| 15347 | : GCCTypeClass::PointerToMemberFunction; |
| 15348 | |
| 15349 | case Type::Complex: |
| 15350 | return GCCTypeClass::Complex; |
| 15351 | |
| 15352 | case Type::Record: |
| 15353 | return CanTy->isUnionType() ? GCCTypeClass::Union |
| 15354 | : GCCTypeClass::ClassOrStruct; |
| 15355 | |
| 15356 | case Type::Atomic: |
| 15357 | // GCC classifies _Atomic T the same as T. |
| 15358 | return EvaluateBuiltinClassifyType( |
| 15359 | T: CanTy->castAs<AtomicType>()->getValueType(), LangOpts); |
| 15360 | |
| 15361 | case Type::Vector: |
| 15362 | case Type::ExtVector: |
| 15363 | return GCCTypeClass::Vector; |
| 15364 | |
| 15365 | case Type::BlockPointer: |
| 15366 | case Type::ConstantMatrix: |
| 15367 | case Type::ObjCObject: |
| 15368 | case Type::ObjCInterface: |
| 15369 | case Type::ObjCObjectPointer: |
| 15370 | case Type::Pipe: |
| 15371 | case Type::HLSLAttributedResource: |
| 15372 | case Type::HLSLInlineSpirv: |
| 15373 | // Classify all other types that don't fit into the regular |
| 15374 | // classification the same way. |
| 15375 | return GCCTypeClass::None; |
| 15376 | |
| 15377 | case Type::BitInt: |
| 15378 | return GCCTypeClass::BitInt; |
| 15379 | |
| 15380 | case Type::LValueReference: |
| 15381 | case Type::RValueReference: |
| 15382 | llvm_unreachable("invalid type for expression" ); |
| 15383 | } |
| 15384 | |
| 15385 | llvm_unreachable("unexpected type class" ); |
| 15386 | } |
| 15387 | |
| 15388 | /// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way |
| 15389 | /// as GCC. |
| 15390 | static GCCTypeClass |
| 15391 | EvaluateBuiltinClassifyType(const CallExpr *E, const LangOptions &LangOpts) { |
| 15392 | // If no argument was supplied, default to None. This isn't |
| 15393 | // ideal, however it is what gcc does. |
| 15394 | if (E->getNumArgs() == 0) |
| 15395 | return GCCTypeClass::None; |
| 15396 | |
| 15397 | // FIXME: Bizarrely, GCC treats a call with more than one argument as not |
| 15398 | // being an ICE, but still folds it to a constant using the type of the first |
| 15399 | // argument. |
| 15400 | return EvaluateBuiltinClassifyType(T: E->getArg(Arg: 0)->getType(), LangOpts); |
| 15401 | } |
| 15402 | |
| 15403 | /// EvaluateBuiltinConstantPForLValue - Determine the result of |
| 15404 | /// __builtin_constant_p when applied to the given pointer. |
| 15405 | /// |
| 15406 | /// A pointer is only "constant" if it is null (or a pointer cast to integer) |
| 15407 | /// or it points to the first character of a string literal. |
| 15408 | static bool EvaluateBuiltinConstantPForLValue(const APValue &LV) { |
| 15409 | APValue::LValueBase Base = LV.getLValueBase(); |
| 15410 | if (Base.isNull()) { |
| 15411 | // A null base is acceptable. |
| 15412 | return true; |
| 15413 | } else if (const Expr *E = Base.dyn_cast<const Expr *>()) { |
| 15414 | if (!isa<StringLiteral>(Val: E)) |
| 15415 | return false; |
| 15416 | return LV.getLValueOffset().isZero(); |
| 15417 | } else if (Base.is<TypeInfoLValue>()) { |
| 15418 | // Surprisingly, GCC considers __builtin_constant_p(&typeid(int)) to |
| 15419 | // evaluate to true. |
| 15420 | return true; |
| 15421 | } else { |
| 15422 | // Any other base is not constant enough for GCC. |
| 15423 | return false; |
| 15424 | } |
| 15425 | } |
| 15426 | |
| 15427 | /// EvaluateBuiltinConstantP - Evaluate __builtin_constant_p as similarly to |
| 15428 | /// GCC as we can manage. |
| 15429 | static bool EvaluateBuiltinConstantP(EvalInfo &Info, const Expr *Arg) { |
| 15430 | // This evaluation is not permitted to have side-effects, so evaluate it in |
| 15431 | // a speculative evaluation context. |
| 15432 | SpeculativeEvaluationRAII SpeculativeEval(Info); |
| 15433 | |
| 15434 | // Constant-folding is always enabled for the operand of __builtin_constant_p |
| 15435 | // (even when the enclosing evaluation context otherwise requires a strict |
| 15436 | // language-specific constant expression). |
| 15437 | FoldConstant Fold(Info, true); |
| 15438 | |
| 15439 | QualType ArgType = Arg->getType(); |
| 15440 | |
| 15441 | // __builtin_constant_p always has one operand. The rules which gcc follows |
| 15442 | // are not precisely documented, but are as follows: |
| 15443 | // |
| 15444 | // - If the operand is of integral, floating, complex or enumeration type, |
| 15445 | // and can be folded to a known value of that type, it returns 1. |
| 15446 | // - If the operand can be folded to a pointer to the first character |
| 15447 | // of a string literal (or such a pointer cast to an integral type) |
| 15448 | // or to a null pointer or an integer cast to a pointer, it returns 1. |
| 15449 | // |
| 15450 | // Otherwise, it returns 0. |
| 15451 | // |
| 15452 | // FIXME: GCC also intends to return 1 for literals of aggregate types, but |
| 15453 | // its support for this did not work prior to GCC 9 and is not yet well |
| 15454 | // understood. |
| 15455 | if (ArgType->isIntegralOrEnumerationType() || ArgType->isFloatingType() || |
| 15456 | ArgType->isAnyComplexType() || ArgType->isPointerType() || |
| 15457 | ArgType->isNullPtrType()) { |
| 15458 | APValue V; |
| 15459 | if (!::EvaluateAsRValue(Info, E: Arg, Result&: V) || Info.EvalStatus.HasSideEffects) { |
| 15460 | Fold.keepDiagnostics(); |
| 15461 | return false; |
| 15462 | } |
| 15463 | |
| 15464 | // For a pointer (possibly cast to integer), there are special rules. |
| 15465 | if (V.getKind() == APValue::LValue) |
| 15466 | return EvaluateBuiltinConstantPForLValue(LV: V); |
| 15467 | |
| 15468 | // Otherwise, any constant value is good enough. |
| 15469 | return V.hasValue(); |
| 15470 | } |
| 15471 | |
| 15472 | // Anything else isn't considered to be sufficiently constant. |
| 15473 | return false; |
| 15474 | } |
| 15475 | |
| 15476 | /// Retrieves the "underlying object type" of the given expression, |
| 15477 | /// as used by __builtin_object_size. |
| 15478 | static QualType getObjectType(APValue::LValueBase B) { |
| 15479 | if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) { |
| 15480 | if (const VarDecl *VD = dyn_cast<VarDecl>(Val: D)) |
| 15481 | return VD->getType(); |
| 15482 | } else if (const Expr *E = B.dyn_cast<const Expr*>()) { |
| 15483 | if (isa<CompoundLiteralExpr>(Val: E)) |
| 15484 | return E->getType(); |
| 15485 | } else if (B.is<TypeInfoLValue>()) { |
| 15486 | return B.getTypeInfoType(); |
| 15487 | } else if (B.is<DynamicAllocLValue>()) { |
| 15488 | return B.getDynamicAllocType(); |
| 15489 | } |
| 15490 | |
| 15491 | return QualType(); |
| 15492 | } |
| 15493 | |
| 15494 | /// A more selective version of E->IgnoreParenCasts for |
| 15495 | /// tryEvaluateBuiltinObjectSize. This ignores some casts/parens that serve only |
| 15496 | /// to change the type of E. |
| 15497 | /// Ex. For E = `(short*)((char*)(&foo))`, returns `&foo` |
| 15498 | /// |
| 15499 | /// Always returns an RValue with a pointer representation. |
| 15500 | static const Expr *ignorePointerCastsAndParens(const Expr *E) { |
| 15501 | assert(E->isPRValue() && E->getType()->hasPointerRepresentation()); |
| 15502 | |
| 15503 | const Expr *NoParens = E->IgnoreParens(); |
| 15504 | const auto *Cast = dyn_cast<CastExpr>(Val: NoParens); |
| 15505 | if (Cast == nullptr) |
| 15506 | return NoParens; |
| 15507 | |
| 15508 | // We only conservatively allow a few kinds of casts, because this code is |
| 15509 | // inherently a simple solution that seeks to support the common case. |
| 15510 | auto CastKind = Cast->getCastKind(); |
| 15511 | if (CastKind != CK_NoOp && CastKind != CK_BitCast && |
| 15512 | CastKind != CK_AddressSpaceConversion) |
| 15513 | return NoParens; |
| 15514 | |
| 15515 | const auto *SubExpr = Cast->getSubExpr(); |
| 15516 | if (!SubExpr->getType()->hasPointerRepresentation() || !SubExpr->isPRValue()) |
| 15517 | return NoParens; |
| 15518 | return ignorePointerCastsAndParens(E: SubExpr); |
| 15519 | } |
| 15520 | |
| 15521 | /// Checks to see if the given LValue's Designator is at the end of the LValue's |
| 15522 | /// record layout. e.g. |
| 15523 | /// struct { struct { int a, b; } fst, snd; } obj; |
| 15524 | /// obj.fst // no |
| 15525 | /// obj.snd // yes |
| 15526 | /// obj.fst.a // no |
| 15527 | /// obj.fst.b // no |
| 15528 | /// obj.snd.a // no |
| 15529 | /// obj.snd.b // yes |
| 15530 | /// |
| 15531 | /// Please note: this function is specialized for how __builtin_object_size |
| 15532 | /// views "objects". |
| 15533 | /// |
| 15534 | /// If this encounters an invalid RecordDecl or otherwise cannot determine the |
| 15535 | /// correct result, it will always return true. |
| 15536 | static bool isDesignatorAtObjectEnd(const ASTContext &Ctx, const LValue &LVal) { |
| 15537 | assert(!LVal.Designator.Invalid); |
| 15538 | |
| 15539 | auto IsLastOrInvalidFieldDecl = [&Ctx](const FieldDecl *FD) { |
| 15540 | const RecordDecl *Parent = FD->getParent(); |
| 15541 | if (Parent->isInvalidDecl() || Parent->isUnion()) |
| 15542 | return true; |
| 15543 | const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(D: Parent); |
| 15544 | return FD->getFieldIndex() + 1 == Layout.getFieldCount(); |
| 15545 | }; |
| 15546 | |
| 15547 | auto &Base = LVal.getLValueBase(); |
| 15548 | if (auto *ME = dyn_cast_or_null<MemberExpr>(Val: Base.dyn_cast<const Expr *>())) { |
| 15549 | if (auto *FD = dyn_cast<FieldDecl>(Val: ME->getMemberDecl())) { |
| 15550 | if (!IsLastOrInvalidFieldDecl(FD)) |
| 15551 | return false; |
| 15552 | } else if (auto *IFD = dyn_cast<IndirectFieldDecl>(Val: ME->getMemberDecl())) { |
| 15553 | for (auto *FD : IFD->chain()) { |
| 15554 | if (!IsLastOrInvalidFieldDecl(cast<FieldDecl>(Val: FD))) |
| 15555 | return false; |
| 15556 | } |
| 15557 | } |
| 15558 | } |
| 15559 | |
| 15560 | unsigned I = 0; |
| 15561 | QualType BaseType = getType(B: Base); |
| 15562 | if (LVal.Designator.FirstEntryIsAnUnsizedArray) { |
| 15563 | // If we don't know the array bound, conservatively assume we're looking at |
| 15564 | // the final array element. |
| 15565 | ++I; |
| 15566 | if (BaseType->isIncompleteArrayType()) |
| 15567 | BaseType = Ctx.getAsArrayType(T: BaseType)->getElementType(); |
| 15568 | else |
| 15569 | BaseType = BaseType->castAs<PointerType>()->getPointeeType(); |
| 15570 | } |
| 15571 | |
| 15572 | for (unsigned E = LVal.Designator.Entries.size(); I != E; ++I) { |
| 15573 | const auto &Entry = LVal.Designator.Entries[I]; |
| 15574 | if (BaseType->isArrayType()) { |
| 15575 | // Because __builtin_object_size treats arrays as objects, we can ignore |
| 15576 | // the index iff this is the last array in the Designator. |
| 15577 | if (I + 1 == E) |
| 15578 | return true; |
| 15579 | const auto *CAT = cast<ConstantArrayType>(Val: Ctx.getAsArrayType(T: BaseType)); |
| 15580 | uint64_t Index = Entry.getAsArrayIndex(); |
| 15581 | if (Index + 1 != CAT->getZExtSize()) |
| 15582 | return false; |
| 15583 | BaseType = CAT->getElementType(); |
| 15584 | } else if (BaseType->isAnyComplexType()) { |
| 15585 | const auto *CT = BaseType->castAs<ComplexType>(); |
| 15586 | uint64_t Index = Entry.getAsArrayIndex(); |
| 15587 | if (Index != 1) |
| 15588 | return false; |
| 15589 | BaseType = CT->getElementType(); |
| 15590 | } else if (auto *FD = getAsField(E: Entry)) { |
| 15591 | if (!IsLastOrInvalidFieldDecl(FD)) |
| 15592 | return false; |
| 15593 | BaseType = FD->getType(); |
| 15594 | } else { |
| 15595 | assert(getAsBaseClass(Entry) && "Expecting cast to a base class" ); |
| 15596 | return false; |
| 15597 | } |
| 15598 | } |
| 15599 | return true; |
| 15600 | } |
| 15601 | |
| 15602 | /// Tests to see if the LValue has a user-specified designator (that isn't |
| 15603 | /// necessarily valid). Note that this always returns 'true' if the LValue has |
| 15604 | /// an unsized array as its first designator entry, because there's currently no |
| 15605 | /// way to tell if the user typed *foo or foo[0]. |
| 15606 | static bool refersToCompleteObject(const LValue &LVal) { |
| 15607 | if (LVal.Designator.Invalid) |
| 15608 | return false; |
| 15609 | |
| 15610 | if (!LVal.Designator.Entries.empty()) |
| 15611 | return LVal.Designator.isMostDerivedAnUnsizedArray(); |
| 15612 | |
| 15613 | if (!LVal.InvalidBase) |
| 15614 | return true; |
| 15615 | |
| 15616 | // If `E` is a MemberExpr, then the first part of the designator is hiding in |
| 15617 | // the LValueBase. |
| 15618 | const auto *E = LVal.Base.dyn_cast<const Expr *>(); |
| 15619 | return !E || !isa<MemberExpr>(Val: E); |
| 15620 | } |
| 15621 | |
| 15622 | /// Attempts to detect a user writing into a piece of memory that's impossible |
| 15623 | /// to figure out the size of by just using types. |
| 15624 | static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal) { |
| 15625 | const SubobjectDesignator &Designator = LVal.Designator; |
| 15626 | // Notes: |
| 15627 | // - Users can only write off of the end when we have an invalid base. Invalid |
| 15628 | // bases imply we don't know where the memory came from. |
| 15629 | // - We used to be a bit more aggressive here; we'd only be conservative if |
| 15630 | // the array at the end was flexible, or if it had 0 or 1 elements. This |
| 15631 | // broke some common standard library extensions (PR30346), but was |
| 15632 | // otherwise seemingly fine. It may be useful to reintroduce this behavior |
| 15633 | // with some sort of list. OTOH, it seems that GCC is always |
| 15634 | // conservative with the last element in structs (if it's an array), so our |
| 15635 | // current behavior is more compatible than an explicit list approach would |
| 15636 | // be. |
| 15637 | auto isFlexibleArrayMember = [&] { |
| 15638 | using FAMKind = LangOptions::StrictFlexArraysLevelKind; |
| 15639 | FAMKind StrictFlexArraysLevel = |
| 15640 | Ctx.getLangOpts().getStrictFlexArraysLevel(); |
| 15641 | |
| 15642 | if (Designator.isMostDerivedAnUnsizedArray()) |
| 15643 | return true; |
| 15644 | |
| 15645 | if (StrictFlexArraysLevel == FAMKind::Default) |
| 15646 | return true; |
| 15647 | |
| 15648 | if (Designator.getMostDerivedArraySize() == 0 && |
| 15649 | StrictFlexArraysLevel != FAMKind::IncompleteOnly) |
| 15650 | return true; |
| 15651 | |
| 15652 | if (Designator.getMostDerivedArraySize() == 1 && |
| 15653 | StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete) |
| 15654 | return true; |
| 15655 | |
| 15656 | return false; |
| 15657 | }; |
| 15658 | |
| 15659 | return LVal.InvalidBase && |
| 15660 | Designator.Entries.size() == Designator.MostDerivedPathLength && |
| 15661 | Designator.MostDerivedIsArrayElement && isFlexibleArrayMember() && |
| 15662 | isDesignatorAtObjectEnd(Ctx, LVal); |
| 15663 | } |
| 15664 | |
| 15665 | /// Converts the given APInt to CharUnits, assuming the APInt is unsigned. |
| 15666 | /// Fails if the conversion would cause loss of precision. |
| 15667 | static bool convertUnsignedAPIntToCharUnits(const llvm::APInt &Int, |
| 15668 | CharUnits &Result) { |
| 15669 | auto CharUnitsMax = std::numeric_limits<CharUnits::QuantityType>::max(); |
| 15670 | if (Int.ugt(RHS: CharUnitsMax)) |
| 15671 | return false; |
| 15672 | Result = CharUnits::fromQuantity(Quantity: Int.getZExtValue()); |
| 15673 | return true; |
| 15674 | } |
| 15675 | |
| 15676 | /// If we're evaluating the object size of an instance of a struct that |
| 15677 | /// contains a flexible array member, add the size of the initializer. |
| 15678 | static void addFlexibleArrayMemberInitSize(EvalInfo &Info, const QualType &T, |
| 15679 | const LValue &LV, CharUnits &Size) { |
| 15680 | if (!T.isNull() && T->isStructureType() && |
| 15681 | T->castAsRecordDecl()->hasFlexibleArrayMember()) |
| 15682 | if (const auto *V = LV.getLValueBase().dyn_cast<const ValueDecl *>()) |
| 15683 | if (const auto *VD = dyn_cast<VarDecl>(Val: V)) |
| 15684 | if (VD->hasInit()) |
| 15685 | Size += VD->getFlexibleArrayInitChars(Ctx: Info.Ctx); |
| 15686 | } |
| 15687 | |
| 15688 | /// Helper for tryEvaluateBuiltinObjectSize -- Given an LValue, this will |
| 15689 | /// determine how many bytes exist from the beginning of the object to either |
| 15690 | /// the end of the current subobject, or the end of the object itself, depending |
| 15691 | /// on what the LValue looks like + the value of Type. |
| 15692 | /// |
| 15693 | /// If this returns false, the value of Result is undefined. |
| 15694 | static bool determineEndOffset(EvalInfo &Info, SourceLocation ExprLoc, |
| 15695 | unsigned Type, const LValue &LVal, |
| 15696 | CharUnits &EndOffset) { |
| 15697 | bool DetermineForCompleteObject = refersToCompleteObject(LVal); |
| 15698 | |
| 15699 | auto CheckedHandleSizeof = [&](QualType Ty, CharUnits &Result) { |
| 15700 | if (Ty.isNull()) |
| 15701 | return false; |
| 15702 | |
| 15703 | Ty = Ty.getNonReferenceType(); |
| 15704 | |
| 15705 | if (Ty->isIncompleteType() || Ty->isFunctionType()) |
| 15706 | return false; |
| 15707 | |
| 15708 | return HandleSizeof(Info, Loc: ExprLoc, Type: Ty, Size&: Result); |
| 15709 | }; |
| 15710 | |
| 15711 | // We want to evaluate the size of the entire object. This is a valid fallback |
| 15712 | // for when Type=1 and the designator is invalid, because we're asked for an |
| 15713 | // upper-bound. |
| 15714 | if (!(Type & 1) || LVal.Designator.Invalid || DetermineForCompleteObject) { |
| 15715 | // Type=3 wants a lower bound, so we can't fall back to this. |
| 15716 | if (Type == 3 && !DetermineForCompleteObject) |
| 15717 | return false; |
| 15718 | |
| 15719 | llvm::APInt APEndOffset; |
| 15720 | if (isBaseAnAllocSizeCall(Base: LVal.getLValueBase()) && |
| 15721 | getBytesReturnedByAllocSizeCall(Ctx: Info.Ctx, LVal, Result&: APEndOffset)) |
| 15722 | return convertUnsignedAPIntToCharUnits(Int: APEndOffset, Result&: EndOffset); |
| 15723 | |
| 15724 | if (LVal.InvalidBase) |
| 15725 | return false; |
| 15726 | |
| 15727 | QualType BaseTy = getObjectType(B: LVal.getLValueBase()); |
| 15728 | const bool Ret = CheckedHandleSizeof(BaseTy, EndOffset); |
| 15729 | addFlexibleArrayMemberInitSize(Info, T: BaseTy, LV: LVal, Size&: EndOffset); |
| 15730 | return Ret; |
| 15731 | } |
| 15732 | |
| 15733 | // We want to evaluate the size of a subobject. |
| 15734 | const SubobjectDesignator &Designator = LVal.Designator; |
| 15735 | |
| 15736 | // The following is a moderately common idiom in C: |
| 15737 | // |
| 15738 | // struct Foo { int a; char c[1]; }; |
| 15739 | // struct Foo *F = (struct Foo *)malloc(sizeof(struct Foo) + strlen(Bar)); |
| 15740 | // strcpy(&F->c[0], Bar); |
| 15741 | // |
| 15742 | // In order to not break too much legacy code, we need to support it. |
| 15743 | if (isUserWritingOffTheEnd(Ctx: Info.Ctx, LVal)) { |
| 15744 | // If we can resolve this to an alloc_size call, we can hand that back, |
| 15745 | // because we know for certain how many bytes there are to write to. |
| 15746 | llvm::APInt APEndOffset; |
| 15747 | if (isBaseAnAllocSizeCall(Base: LVal.getLValueBase()) && |
| 15748 | getBytesReturnedByAllocSizeCall(Ctx: Info.Ctx, LVal, Result&: APEndOffset)) |
| 15749 | return convertUnsignedAPIntToCharUnits(Int: APEndOffset, Result&: EndOffset); |
| 15750 | |
| 15751 | // If we cannot determine the size of the initial allocation, then we can't |
| 15752 | // given an accurate upper-bound. However, we are still able to give |
| 15753 | // conservative lower-bounds for Type=3. |
| 15754 | if (Type == 1) |
| 15755 | return false; |
| 15756 | } |
| 15757 | |
| 15758 | CharUnits BytesPerElem; |
| 15759 | if (!CheckedHandleSizeof(Designator.MostDerivedType, BytesPerElem)) |
| 15760 | return false; |
| 15761 | |
| 15762 | // According to the GCC documentation, we want the size of the subobject |
| 15763 | // denoted by the pointer. But that's not quite right -- what we actually |
| 15764 | // want is the size of the immediately-enclosing array, if there is one. |
| 15765 | int64_t ElemsRemaining; |
| 15766 | if (Designator.MostDerivedIsArrayElement && |
| 15767 | Designator.Entries.size() == Designator.MostDerivedPathLength) { |
| 15768 | uint64_t ArraySize = Designator.getMostDerivedArraySize(); |
| 15769 | uint64_t ArrayIndex = Designator.Entries.back().getAsArrayIndex(); |
| 15770 | ElemsRemaining = ArraySize <= ArrayIndex ? 0 : ArraySize - ArrayIndex; |
| 15771 | } else { |
| 15772 | ElemsRemaining = Designator.isOnePastTheEnd() ? 0 : 1; |
| 15773 | } |
| 15774 | |
| 15775 | EndOffset = LVal.getLValueOffset() + BytesPerElem * ElemsRemaining; |
| 15776 | return true; |
| 15777 | } |
| 15778 | |
| 15779 | /// Tries to evaluate the __builtin_object_size for @p E. If successful, |
| 15780 | /// returns true and stores the result in @p Size. |
| 15781 | /// |
| 15782 | /// If @p WasError is non-null, this will report whether the failure to evaluate |
| 15783 | /// is to be treated as an Error in IntExprEvaluator. |
| 15784 | static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type, |
| 15785 | EvalInfo &Info, uint64_t &Size) { |
| 15786 | // Determine the denoted object. |
| 15787 | LValue LVal; |
| 15788 | { |
| 15789 | // The operand of __builtin_object_size is never evaluated for side-effects. |
| 15790 | // If there are any, but we can determine the pointed-to object anyway, then |
| 15791 | // ignore the side-effects. |
| 15792 | SpeculativeEvaluationRAII SpeculativeEval(Info); |
| 15793 | IgnoreSideEffectsRAII Fold(Info); |
| 15794 | |
| 15795 | if (E->isGLValue()) { |
| 15796 | // It's possible for us to be given GLValues if we're called via |
| 15797 | // Expr::tryEvaluateObjectSize. |
| 15798 | APValue RVal; |
| 15799 | if (!EvaluateAsRValue(Info, E, Result&: RVal)) |
| 15800 | return false; |
| 15801 | LVal.setFrom(Ctx: Info.Ctx, V: RVal); |
| 15802 | } else if (!EvaluatePointer(E: ignorePointerCastsAndParens(E), Result&: LVal, Info, |
| 15803 | /*InvalidBaseOK=*/true)) |
| 15804 | return false; |
| 15805 | } |
| 15806 | |
| 15807 | // If we point to before the start of the object, there are no accessible |
| 15808 | // bytes. |
| 15809 | if (LVal.getLValueOffset().isNegative()) { |
| 15810 | Size = 0; |
| 15811 | return true; |
| 15812 | } |
| 15813 | |
| 15814 | CharUnits EndOffset; |
| 15815 | if (!determineEndOffset(Info, ExprLoc: E->getExprLoc(), Type, LVal, EndOffset)) |
| 15816 | return false; |
| 15817 | |
| 15818 | // If we've fallen outside of the end offset, just pretend there's nothing to |
| 15819 | // write to/read from. |
| 15820 | if (EndOffset <= LVal.getLValueOffset()) |
| 15821 | Size = 0; |
| 15822 | else |
| 15823 | Size = (EndOffset - LVal.getLValueOffset()).getQuantity(); |
| 15824 | return true; |
| 15825 | } |
| 15826 | |
| 15827 | bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) { |
| 15828 | if (!IsConstantEvaluatedBuiltinCall(E)) |
| 15829 | return ExprEvaluatorBaseTy::VisitCallExpr(E); |
| 15830 | return VisitBuiltinCallExpr(E, BuiltinOp: E->getBuiltinCallee()); |
| 15831 | } |
| 15832 | |
| 15833 | static bool getBuiltinAlignArguments(const CallExpr *E, EvalInfo &Info, |
| 15834 | APValue &Val, APSInt &Alignment) { |
| 15835 | QualType SrcTy = E->getArg(Arg: 0)->getType(); |
| 15836 | if (!getAlignmentArgument(E: E->getArg(Arg: 1), ForType: SrcTy, Info, Alignment)) |
| 15837 | return false; |
| 15838 | // Even though we are evaluating integer expressions we could get a pointer |
| 15839 | // argument for the __builtin_is_aligned() case. |
| 15840 | if (SrcTy->isPointerType()) { |
| 15841 | LValue Ptr; |
| 15842 | if (!EvaluatePointer(E: E->getArg(Arg: 0), Result&: Ptr, Info)) |
| 15843 | return false; |
| 15844 | Ptr.moveInto(V&: Val); |
| 15845 | } else if (!SrcTy->isIntegralOrEnumerationType()) { |
| 15846 | Info.FFDiag(E: E->getArg(Arg: 0)); |
| 15847 | return false; |
| 15848 | } else { |
| 15849 | APSInt SrcInt; |
| 15850 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: SrcInt, Info)) |
| 15851 | return false; |
| 15852 | assert(SrcInt.getBitWidth() >= Alignment.getBitWidth() && |
| 15853 | "Bit widths must be the same" ); |
| 15854 | Val = APValue(SrcInt); |
| 15855 | } |
| 15856 | assert(Val.hasValue()); |
| 15857 | return true; |
| 15858 | } |
| 15859 | |
| 15860 | bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, |
| 15861 | unsigned BuiltinOp) { |
| 15862 | auto EvalTestOp = [&](llvm::function_ref<bool(const APInt &, const APInt &)> |
| 15863 | Fn) { |
| 15864 | APValue SourceLHS, SourceRHS; |
| 15865 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: SourceLHS) || |
| 15866 | !EvaluateAsRValue(Info, E: E->getArg(Arg: 1), Result&: SourceRHS)) |
| 15867 | return false; |
| 15868 | |
| 15869 | unsigned SourceLen = SourceLHS.getVectorLength(); |
| 15870 | const VectorType *VT = E->getArg(Arg: 0)->getType()->castAs<VectorType>(); |
| 15871 | QualType ElemQT = VT->getElementType(); |
| 15872 | unsigned LaneWidth = Info.Ctx.getTypeSize(T: ElemQT); |
| 15873 | |
| 15874 | APInt AWide(LaneWidth * SourceLen, 0); |
| 15875 | APInt BWide(LaneWidth * SourceLen, 0); |
| 15876 | |
| 15877 | for (unsigned I = 0; I != SourceLen; ++I) { |
| 15878 | APInt ALane; |
| 15879 | APInt BLane; |
| 15880 | if (ElemQT->isIntegerType()) { // Get value. |
| 15881 | ALane = SourceLHS.getVectorElt(I).getInt(); |
| 15882 | BLane = SourceRHS.getVectorElt(I).getInt(); |
| 15883 | } else if (ElemQT->isFloatingType()) { // Get only sign bit. |
| 15884 | ALane = |
| 15885 | SourceLHS.getVectorElt(I).getFloat().bitcastToAPInt().isNegative(); |
| 15886 | BLane = |
| 15887 | SourceRHS.getVectorElt(I).getFloat().bitcastToAPInt().isNegative(); |
| 15888 | } else { // Must be integer or floating type. |
| 15889 | return false; |
| 15890 | } |
| 15891 | AWide.insertBits(SubBits: ALane, bitPosition: I * LaneWidth); |
| 15892 | BWide.insertBits(SubBits: BLane, bitPosition: I * LaneWidth); |
| 15893 | } |
| 15894 | return Success(Value: Fn(AWide, BWide), E); |
| 15895 | }; |
| 15896 | |
| 15897 | auto HandleMaskBinOp = |
| 15898 | [&](llvm::function_ref<APSInt(const APSInt &, const APSInt &)> Fn) |
| 15899 | -> bool { |
| 15900 | APValue LHS, RHS; |
| 15901 | if (!Evaluate(Result&: LHS, Info, E: E->getArg(Arg: 0)) || |
| 15902 | !Evaluate(Result&: RHS, Info, E: E->getArg(Arg: 1))) |
| 15903 | return false; |
| 15904 | |
| 15905 | APSInt ResultInt = Fn(LHS.getInt(), RHS.getInt()); |
| 15906 | |
| 15907 | return Success(V: APValue(ResultInt), E); |
| 15908 | }; |
| 15909 | |
| 15910 | auto HandleCRC32 = [&](unsigned DataBytes) -> bool { |
| 15911 | APSInt CRC, Data; |
| 15912 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: CRC, Info) || |
| 15913 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Data, Info)) |
| 15914 | return false; |
| 15915 | |
| 15916 | uint64_t CRCVal = CRC.getZExtValue(); |
| 15917 | uint64_t DataVal = Data.getZExtValue(); |
| 15918 | |
| 15919 | // CRC32C polynomial (iSCSI polynomial, bit-reversed) |
| 15920 | static const uint32_t CRC32C_POLY = 0x82F63B78; |
| 15921 | |
| 15922 | // Process each byte |
| 15923 | uint32_t Result = static_cast<uint32_t>(CRCVal); |
| 15924 | for (unsigned I = 0; I != DataBytes; ++I) { |
| 15925 | uint8_t Byte = static_cast<uint8_t>((DataVal >> (I * 8)) & 0xFF); |
| 15926 | Result ^= Byte; |
| 15927 | for (int J = 0; J != 8; ++J) { |
| 15928 | Result = (Result >> 1) ^ ((Result & 1) ? CRC32C_POLY : 0); |
| 15929 | } |
| 15930 | } |
| 15931 | |
| 15932 | return Success(Value: Result, E); |
| 15933 | }; |
| 15934 | |
| 15935 | switch (BuiltinOp) { |
| 15936 | default: |
| 15937 | return false; |
| 15938 | |
| 15939 | case X86::BI__builtin_ia32_crc32qi: |
| 15940 | return HandleCRC32(1); |
| 15941 | case X86::BI__builtin_ia32_crc32hi: |
| 15942 | return HandleCRC32(2); |
| 15943 | case X86::BI__builtin_ia32_crc32si: |
| 15944 | return HandleCRC32(4); |
| 15945 | case X86::BI__builtin_ia32_crc32di: |
| 15946 | return HandleCRC32(8); |
| 15947 | |
| 15948 | case Builtin::BI__builtin_dynamic_object_size: |
| 15949 | case Builtin::BI__builtin_object_size: { |
| 15950 | // The type was checked when we built the expression. |
| 15951 | unsigned Type = |
| 15952 | E->getArg(Arg: 1)->EvaluateKnownConstInt(Ctx: Info.Ctx).getZExtValue(); |
| 15953 | assert(Type <= 3 && "unexpected type" ); |
| 15954 | |
| 15955 | uint64_t Size; |
| 15956 | if (tryEvaluateBuiltinObjectSize(E: E->getArg(Arg: 0), Type, Info, Size)) |
| 15957 | return Success(Value: Size, E); |
| 15958 | |
| 15959 | if (E->getArg(Arg: 0)->HasSideEffects(Ctx: Info.Ctx)) |
| 15960 | return Success(Value: (Type & 2) ? 0 : -1, E); |
| 15961 | |
| 15962 | // Expression had no side effects, but we couldn't statically determine the |
| 15963 | // size of the referenced object. |
| 15964 | switch (Info.EvalMode) { |
| 15965 | case EvaluationMode::ConstantExpression: |
| 15966 | case EvaluationMode::ConstantFold: |
| 15967 | case EvaluationMode::IgnoreSideEffects: |
| 15968 | // Leave it to IR generation. |
| 15969 | return Error(E); |
| 15970 | case EvaluationMode::ConstantExpressionUnevaluated: |
| 15971 | // Reduce it to a constant now. |
| 15972 | return Success(Value: (Type & 2) ? 0 : -1, E); |
| 15973 | } |
| 15974 | |
| 15975 | llvm_unreachable("unexpected EvalMode" ); |
| 15976 | } |
| 15977 | |
| 15978 | case Builtin::BI__builtin_os_log_format_buffer_size: { |
| 15979 | analyze_os_log::OSLogBufferLayout Layout; |
| 15980 | analyze_os_log::computeOSLogBufferLayout(Ctx&: Info.Ctx, E, layout&: Layout); |
| 15981 | return Success(Value: Layout.size().getQuantity(), E); |
| 15982 | } |
| 15983 | |
| 15984 | case Builtin::BI__builtin_is_aligned: { |
| 15985 | APValue Src; |
| 15986 | APSInt Alignment; |
| 15987 | if (!getBuiltinAlignArguments(E, Info, Val&: Src, Alignment)) |
| 15988 | return false; |
| 15989 | if (Src.isLValue()) { |
| 15990 | // If we evaluated a pointer, check the minimum known alignment. |
| 15991 | LValue Ptr; |
| 15992 | Ptr.setFrom(Ctx: Info.Ctx, V: Src); |
| 15993 | CharUnits BaseAlignment = getBaseAlignment(Info, Value: Ptr); |
| 15994 | CharUnits PtrAlign = BaseAlignment.alignmentAtOffset(offset: Ptr.Offset); |
| 15995 | // We can return true if the known alignment at the computed offset is |
| 15996 | // greater than the requested alignment. |
| 15997 | assert(PtrAlign.isPowerOfTwo()); |
| 15998 | assert(Alignment.isPowerOf2()); |
| 15999 | if (PtrAlign.getQuantity() >= Alignment) |
| 16000 | return Success(Value: 1, E); |
| 16001 | // If the alignment is not known to be sufficient, some cases could still |
| 16002 | // be aligned at run time. However, if the requested alignment is less or |
| 16003 | // equal to the base alignment and the offset is not aligned, we know that |
| 16004 | // the run-time value can never be aligned. |
| 16005 | if (BaseAlignment.getQuantity() >= Alignment && |
| 16006 | PtrAlign.getQuantity() < Alignment) |
| 16007 | return Success(Value: 0, E); |
| 16008 | // Otherwise we can't infer whether the value is sufficiently aligned. |
| 16009 | // TODO: __builtin_is_aligned(__builtin_align_{down,up{(expr, N), N) |
| 16010 | // in cases where we can't fully evaluate the pointer. |
| 16011 | Info.FFDiag(E: E->getArg(Arg: 0), DiagId: diag::note_constexpr_alignment_compute) |
| 16012 | << Alignment; |
| 16013 | return false; |
| 16014 | } |
| 16015 | assert(Src.isInt()); |
| 16016 | return Success(Value: (Src.getInt() & (Alignment - 1)) == 0 ? 1 : 0, E); |
| 16017 | } |
| 16018 | case Builtin::BI__builtin_align_up: { |
| 16019 | APValue Src; |
| 16020 | APSInt Alignment; |
| 16021 | if (!getBuiltinAlignArguments(E, Info, Val&: Src, Alignment)) |
| 16022 | return false; |
| 16023 | if (!Src.isInt()) |
| 16024 | return Error(E); |
| 16025 | APSInt AlignedVal = |
| 16026 | APSInt((Src.getInt() + (Alignment - 1)) & ~(Alignment - 1), |
| 16027 | Src.getInt().isUnsigned()); |
| 16028 | assert(AlignedVal.getBitWidth() == Src.getInt().getBitWidth()); |
| 16029 | return Success(SI: AlignedVal, E); |
| 16030 | } |
| 16031 | case Builtin::BI__builtin_align_down: { |
| 16032 | APValue Src; |
| 16033 | APSInt Alignment; |
| 16034 | if (!getBuiltinAlignArguments(E, Info, Val&: Src, Alignment)) |
| 16035 | return false; |
| 16036 | if (!Src.isInt()) |
| 16037 | return Error(E); |
| 16038 | APSInt AlignedVal = |
| 16039 | APSInt(Src.getInt() & ~(Alignment - 1), Src.getInt().isUnsigned()); |
| 16040 | assert(AlignedVal.getBitWidth() == Src.getInt().getBitWidth()); |
| 16041 | return Success(SI: AlignedVal, E); |
| 16042 | } |
| 16043 | |
| 16044 | case Builtin::BI__builtin_bitreverse8: |
| 16045 | case Builtin::BI__builtin_bitreverse16: |
| 16046 | case Builtin::BI__builtin_bitreverse32: |
| 16047 | case Builtin::BI__builtin_bitreverse64: |
| 16048 | case Builtin::BI__builtin_elementwise_bitreverse: { |
| 16049 | APSInt Val; |
| 16050 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info)) |
| 16051 | return false; |
| 16052 | |
| 16053 | return Success(I: Val.reverseBits(), E); |
| 16054 | } |
| 16055 | case Builtin::BI__builtin_bswapg: |
| 16056 | case Builtin::BI__builtin_bswap16: |
| 16057 | case Builtin::BI__builtin_bswap32: |
| 16058 | case Builtin::BI__builtin_bswap64: { |
| 16059 | APSInt Val; |
| 16060 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info)) |
| 16061 | return false; |
| 16062 | if (Val.getBitWidth() == 8) |
| 16063 | return Success(SI: Val, E); |
| 16064 | |
| 16065 | return Success(I: Val.byteSwap(), E); |
| 16066 | } |
| 16067 | |
| 16068 | case Builtin::BI__builtin_classify_type: |
| 16069 | return Success(Value: (int)EvaluateBuiltinClassifyType(E, LangOpts: Info.getLangOpts()), E); |
| 16070 | |
| 16071 | case Builtin::BI__builtin_clrsb: |
| 16072 | case Builtin::BI__builtin_clrsbl: |
| 16073 | case Builtin::BI__builtin_clrsbll: { |
| 16074 | APSInt Val; |
| 16075 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info)) |
| 16076 | return false; |
| 16077 | |
| 16078 | return Success(Value: Val.getBitWidth() - Val.getSignificantBits(), E); |
| 16079 | } |
| 16080 | |
| 16081 | case Builtin::BI__builtin_clz: |
| 16082 | case Builtin::BI__builtin_clzl: |
| 16083 | case Builtin::BI__builtin_clzll: |
| 16084 | case Builtin::BI__builtin_clzs: |
| 16085 | case Builtin::BI__builtin_clzg: |
| 16086 | case Builtin::BI__builtin_elementwise_clzg: |
| 16087 | case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes |
| 16088 | case Builtin::BI__lzcnt: |
| 16089 | case Builtin::BI__lzcnt64: { |
| 16090 | APSInt Val; |
| 16091 | if (E->getArg(Arg: 0)->getType()->isExtVectorBoolType()) { |
| 16092 | APValue Vec; |
| 16093 | if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Vec, Info)) |
| 16094 | return false; |
| 16095 | Val = ConvertBoolVectorToInt(Val: Vec); |
| 16096 | } else if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info)) { |
| 16097 | return false; |
| 16098 | } |
| 16099 | |
| 16100 | std::optional<APSInt> Fallback; |
| 16101 | if ((BuiltinOp == Builtin::BI__builtin_clzg || |
| 16102 | BuiltinOp == Builtin::BI__builtin_elementwise_clzg) && |
| 16103 | E->getNumArgs() > 1) { |
| 16104 | APSInt FallbackTemp; |
| 16105 | if (!EvaluateInteger(E: E->getArg(Arg: 1), Result&: FallbackTemp, Info)) |
| 16106 | return false; |
| 16107 | Fallback = FallbackTemp; |
| 16108 | } |
| 16109 | |
| 16110 | if (!Val) { |
| 16111 | if (Fallback) |
| 16112 | return Success(SI: *Fallback, E); |
| 16113 | |
| 16114 | // When the argument is 0, the result of GCC builtins is undefined, |
| 16115 | // whereas for Microsoft intrinsics, the result is the bit-width of the |
| 16116 | // argument. |
| 16117 | bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 && |
| 16118 | BuiltinOp != Builtin::BI__lzcnt && |
| 16119 | BuiltinOp != Builtin::BI__lzcnt64; |
| 16120 | |
| 16121 | if (BuiltinOp == Builtin::BI__builtin_elementwise_clzg) { |
| 16122 | Info.FFDiag(E, DiagId: diag::note_constexpr_countzeroes_zero) |
| 16123 | << /*IsTrailing=*/false; |
| 16124 | } |
| 16125 | |
| 16126 | if (ZeroIsUndefined) |
| 16127 | return Error(E); |
| 16128 | } |
| 16129 | |
| 16130 | return Success(Value: Val.countl_zero(), E); |
| 16131 | } |
| 16132 | |
| 16133 | case Builtin::BI__builtin_constant_p: { |
| 16134 | const Expr *Arg = E->getArg(Arg: 0); |
| 16135 | if (EvaluateBuiltinConstantP(Info, Arg)) |
| 16136 | return Success(Value: true, E); |
| 16137 | if (Info.InConstantContext || Arg->HasSideEffects(Ctx: Info.Ctx)) { |
| 16138 | // Outside a constant context, eagerly evaluate to false in the presence |
| 16139 | // of side-effects in order to avoid -Wunsequenced false-positives in |
| 16140 | // a branch on __builtin_constant_p(expr). |
| 16141 | return Success(Value: false, E); |
| 16142 | } |
| 16143 | Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr); |
| 16144 | return false; |
| 16145 | } |
| 16146 | |
| 16147 | case Builtin::BI__noop: |
| 16148 | // __noop always evaluates successfully and returns 0. |
| 16149 | return Success(Value: 0, E); |
| 16150 | |
| 16151 | case Builtin::BI__builtin_is_constant_evaluated: { |
| 16152 | const auto *Callee = Info.CurrentCall->getCallee(); |
| 16153 | if (Info.InConstantContext && !Info.CheckingPotentialConstantExpression && |
| 16154 | (Info.CallStackDepth == 1 || |
| 16155 | (Info.CallStackDepth == 2 && Callee->isInStdNamespace() && |
| 16156 | Callee->getIdentifier() && |
| 16157 | Callee->getIdentifier()->isStr(Str: "is_constant_evaluated" )))) { |
| 16158 | // FIXME: Find a better way to avoid duplicated diagnostics. |
| 16159 | if (Info.EvalStatus.Diag) |
| 16160 | Info.report(Loc: (Info.CallStackDepth == 1) |
| 16161 | ? E->getExprLoc() |
| 16162 | : Info.CurrentCall->getCallRange().getBegin(), |
| 16163 | DiagId: diag::warn_is_constant_evaluated_always_true_constexpr) |
| 16164 | << (Info.CallStackDepth == 1 ? "__builtin_is_constant_evaluated" |
| 16165 | : "std::is_constant_evaluated" ); |
| 16166 | } |
| 16167 | |
| 16168 | return Success(Value: Info.InConstantContext, E); |
| 16169 | } |
| 16170 | |
| 16171 | case Builtin::BI__builtin_is_within_lifetime: |
| 16172 | if (auto result = EvaluateBuiltinIsWithinLifetime(*this, E)) |
| 16173 | return Success(Value: *result, E); |
| 16174 | return false; |
| 16175 | |
| 16176 | case Builtin::BI__builtin_ctz: |
| 16177 | case Builtin::BI__builtin_ctzl: |
| 16178 | case Builtin::BI__builtin_ctzll: |
| 16179 | case Builtin::BI__builtin_ctzs: |
| 16180 | case Builtin::BI__builtin_ctzg: |
| 16181 | case Builtin::BI__builtin_elementwise_ctzg: { |
| 16182 | APSInt Val; |
| 16183 | if (E->getArg(Arg: 0)->getType()->isExtVectorBoolType()) { |
| 16184 | APValue Vec; |
| 16185 | if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Vec, Info)) |
| 16186 | return false; |
| 16187 | Val = ConvertBoolVectorToInt(Val: Vec); |
| 16188 | } else if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info)) { |
| 16189 | return false; |
| 16190 | } |
| 16191 | |
| 16192 | std::optional<APSInt> Fallback; |
| 16193 | if ((BuiltinOp == Builtin::BI__builtin_ctzg || |
| 16194 | BuiltinOp == Builtin::BI__builtin_elementwise_ctzg) && |
| 16195 | E->getNumArgs() > 1) { |
| 16196 | APSInt FallbackTemp; |
| 16197 | if (!EvaluateInteger(E: E->getArg(Arg: 1), Result&: FallbackTemp, Info)) |
| 16198 | return false; |
| 16199 | Fallback = FallbackTemp; |
| 16200 | } |
| 16201 | |
| 16202 | if (!Val) { |
| 16203 | if (Fallback) |
| 16204 | return Success(SI: *Fallback, E); |
| 16205 | |
| 16206 | if (BuiltinOp == Builtin::BI__builtin_elementwise_ctzg) { |
| 16207 | Info.FFDiag(E, DiagId: diag::note_constexpr_countzeroes_zero) |
| 16208 | << /*IsTrailing=*/true; |
| 16209 | } |
| 16210 | return Error(E); |
| 16211 | } |
| 16212 | |
| 16213 | return Success(Value: Val.countr_zero(), E); |
| 16214 | } |
| 16215 | |
| 16216 | case Builtin::BI__builtin_eh_return_data_regno: { |
| 16217 | int Operand = E->getArg(Arg: 0)->EvaluateKnownConstInt(Ctx: Info.Ctx).getZExtValue(); |
| 16218 | Operand = Info.Ctx.getTargetInfo().getEHDataRegisterNumber(RegNo: Operand); |
| 16219 | return Success(Value: Operand, E); |
| 16220 | } |
| 16221 | |
| 16222 | case Builtin::BI__builtin_elementwise_abs: { |
| 16223 | APSInt Val; |
| 16224 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info)) |
| 16225 | return false; |
| 16226 | |
| 16227 | return Success(I: Val.abs(), E); |
| 16228 | } |
| 16229 | |
| 16230 | case Builtin::BI__builtin_expect: |
| 16231 | case Builtin::BI__builtin_expect_with_probability: |
| 16232 | return Visit(S: E->getArg(Arg: 0)); |
| 16233 | |
| 16234 | case Builtin::BI__builtin_ptrauth_string_discriminator: { |
| 16235 | const auto *Literal = |
| 16236 | cast<StringLiteral>(Val: E->getArg(Arg: 0)->IgnoreParenImpCasts()); |
| 16237 | uint64_t Result = getPointerAuthStableSipHash(S: Literal->getString()); |
| 16238 | return Success(Value: Result, E); |
| 16239 | } |
| 16240 | |
| 16241 | case Builtin::BI__builtin_infer_alloc_token: { |
| 16242 | // If we fail to infer a type, this fails to be a constant expression; this |
| 16243 | // can be checked with __builtin_constant_p(...). |
| 16244 | QualType AllocType = infer_alloc::inferPossibleType(E, Ctx: Info.Ctx, CastE: nullptr); |
| 16245 | if (AllocType.isNull()) |
| 16246 | return Error( |
| 16247 | E, D: diag::note_constexpr_infer_alloc_token_type_inference_failed); |
| 16248 | auto ATMD = infer_alloc::getAllocTokenMetadata(T: AllocType, Ctx: Info.Ctx); |
| 16249 | if (!ATMD) |
| 16250 | return Error(E, D: diag::note_constexpr_infer_alloc_token_no_metadata); |
| 16251 | auto Mode = |
| 16252 | Info.getLangOpts().AllocTokenMode.value_or(u: llvm::DefaultAllocTokenMode); |
| 16253 | uint64_t BitWidth = Info.Ctx.getTypeSize(T: Info.Ctx.getSizeType()); |
| 16254 | auto MaxTokensOpt = Info.getLangOpts().AllocTokenMax; |
| 16255 | uint64_t MaxTokens = |
| 16256 | MaxTokensOpt.value_or(u: 0) ? *MaxTokensOpt : (~0ULL >> (64 - BitWidth)); |
| 16257 | auto MaybeToken = llvm::getAllocToken(Mode, Metadata: *ATMD, MaxTokens); |
| 16258 | if (!MaybeToken) |
| 16259 | return Error(E, D: diag::note_constexpr_infer_alloc_token_stateful_mode); |
| 16260 | return Success(I: llvm::APInt(BitWidth, *MaybeToken), E); |
| 16261 | } |
| 16262 | |
| 16263 | case Builtin::BI__builtin_ffs: |
| 16264 | case Builtin::BI__builtin_ffsl: |
| 16265 | case Builtin::BI__builtin_ffsll: { |
| 16266 | APSInt Val; |
| 16267 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info)) |
| 16268 | return false; |
| 16269 | |
| 16270 | unsigned N = Val.countr_zero(); |
| 16271 | return Success(Value: N == Val.getBitWidth() ? 0 : N + 1, E); |
| 16272 | } |
| 16273 | |
| 16274 | case Builtin::BI__builtin_fpclassify: { |
| 16275 | APFloat Val(0.0); |
| 16276 | if (!EvaluateFloat(E: E->getArg(Arg: 5), Result&: Val, Info)) |
| 16277 | return false; |
| 16278 | unsigned Arg; |
| 16279 | switch (Val.getCategory()) { |
| 16280 | case APFloat::fcNaN: Arg = 0; break; |
| 16281 | case APFloat::fcInfinity: Arg = 1; break; |
| 16282 | case APFloat::fcNormal: Arg = Val.isDenormal() ? 3 : 2; break; |
| 16283 | case APFloat::fcZero: Arg = 4; break; |
| 16284 | } |
| 16285 | return Visit(S: E->getArg(Arg)); |
| 16286 | } |
| 16287 | |
| 16288 | case Builtin::BI__builtin_isinf_sign: { |
| 16289 | APFloat Val(0.0); |
| 16290 | return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) && |
| 16291 | Success(Value: Val.isInfinity() ? (Val.isNegative() ? -1 : 1) : 0, E); |
| 16292 | } |
| 16293 | |
| 16294 | case Builtin::BI__builtin_isinf: { |
| 16295 | APFloat Val(0.0); |
| 16296 | return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) && |
| 16297 | Success(Value: Val.isInfinity() ? 1 : 0, E); |
| 16298 | } |
| 16299 | |
| 16300 | case Builtin::BI__builtin_isfinite: { |
| 16301 | APFloat Val(0.0); |
| 16302 | return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) && |
| 16303 | Success(Value: Val.isFinite() ? 1 : 0, E); |
| 16304 | } |
| 16305 | |
| 16306 | case Builtin::BI__builtin_isnan: { |
| 16307 | APFloat Val(0.0); |
| 16308 | return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) && |
| 16309 | Success(Value: Val.isNaN() ? 1 : 0, E); |
| 16310 | } |
| 16311 | |
| 16312 | case Builtin::BI__builtin_isnormal: { |
| 16313 | APFloat Val(0.0); |
| 16314 | return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) && |
| 16315 | Success(Value: Val.isNormal() ? 1 : 0, E); |
| 16316 | } |
| 16317 | |
| 16318 | case Builtin::BI__builtin_issubnormal: { |
| 16319 | APFloat Val(0.0); |
| 16320 | return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) && |
| 16321 | Success(Value: Val.isDenormal() ? 1 : 0, E); |
| 16322 | } |
| 16323 | |
| 16324 | case Builtin::BI__builtin_iszero: { |
| 16325 | APFloat Val(0.0); |
| 16326 | return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) && |
| 16327 | Success(Value: Val.isZero() ? 1 : 0, E); |
| 16328 | } |
| 16329 | |
| 16330 | case Builtin::BI__builtin_signbit: |
| 16331 | case Builtin::BI__builtin_signbitf: |
| 16332 | case Builtin::BI__builtin_signbitl: { |
| 16333 | APFloat Val(0.0); |
| 16334 | return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) && |
| 16335 | Success(Value: Val.isNegative() ? 1 : 0, E); |
| 16336 | } |
| 16337 | |
| 16338 | case Builtin::BI__builtin_isgreater: |
| 16339 | case Builtin::BI__builtin_isgreaterequal: |
| 16340 | case Builtin::BI__builtin_isless: |
| 16341 | case Builtin::BI__builtin_islessequal: |
| 16342 | case Builtin::BI__builtin_islessgreater: |
| 16343 | case Builtin::BI__builtin_isunordered: { |
| 16344 | APFloat LHS(0.0); |
| 16345 | APFloat RHS(0.0); |
| 16346 | if (!EvaluateFloat(E: E->getArg(Arg: 0), Result&: LHS, Info) || |
| 16347 | !EvaluateFloat(E: E->getArg(Arg: 1), Result&: RHS, Info)) |
| 16348 | return false; |
| 16349 | |
| 16350 | return Success( |
| 16351 | Value: [&] { |
| 16352 | switch (BuiltinOp) { |
| 16353 | case Builtin::BI__builtin_isgreater: |
| 16354 | return LHS > RHS; |
| 16355 | case Builtin::BI__builtin_isgreaterequal: |
| 16356 | return LHS >= RHS; |
| 16357 | case Builtin::BI__builtin_isless: |
| 16358 | return LHS < RHS; |
| 16359 | case Builtin::BI__builtin_islessequal: |
| 16360 | return LHS <= RHS; |
| 16361 | case Builtin::BI__builtin_islessgreater: { |
| 16362 | APFloat::cmpResult cmp = LHS.compare(RHS); |
| 16363 | return cmp == APFloat::cmpResult::cmpLessThan || |
| 16364 | cmp == APFloat::cmpResult::cmpGreaterThan; |
| 16365 | } |
| 16366 | case Builtin::BI__builtin_isunordered: |
| 16367 | return LHS.compare(RHS) == APFloat::cmpResult::cmpUnordered; |
| 16368 | default: |
| 16369 | llvm_unreachable("Unexpected builtin ID: Should be a floating " |
| 16370 | "point comparison function" ); |
| 16371 | } |
| 16372 | }() |
| 16373 | ? 1 |
| 16374 | : 0, |
| 16375 | E); |
| 16376 | } |
| 16377 | |
| 16378 | case Builtin::BI__builtin_issignaling: { |
| 16379 | APFloat Val(0.0); |
| 16380 | return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) && |
| 16381 | Success(Value: Val.isSignaling() ? 1 : 0, E); |
| 16382 | } |
| 16383 | |
| 16384 | case Builtin::BI__builtin_isfpclass: { |
| 16385 | APSInt MaskVal; |
| 16386 | if (!EvaluateInteger(E: E->getArg(Arg: 1), Result&: MaskVal, Info)) |
| 16387 | return false; |
| 16388 | unsigned Test = static_cast<llvm::FPClassTest>(MaskVal.getZExtValue()); |
| 16389 | APFloat Val(0.0); |
| 16390 | return EvaluateFloat(E: E->getArg(Arg: 0), Result&: Val, Info) && |
| 16391 | Success(Value: (Val.classify() & Test) ? 1 : 0, E); |
| 16392 | } |
| 16393 | |
| 16394 | case Builtin::BI__builtin_parity: |
| 16395 | case Builtin::BI__builtin_parityl: |
| 16396 | case Builtin::BI__builtin_parityll: { |
| 16397 | APSInt Val; |
| 16398 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info)) |
| 16399 | return false; |
| 16400 | |
| 16401 | return Success(Value: Val.popcount() % 2, E); |
| 16402 | } |
| 16403 | |
| 16404 | case Builtin::BI__builtin_abs: |
| 16405 | case Builtin::BI__builtin_labs: |
| 16406 | case Builtin::BI__builtin_llabs: { |
| 16407 | APSInt Val; |
| 16408 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info)) |
| 16409 | return false; |
| 16410 | if (Val == APSInt(APInt::getSignedMinValue(numBits: Val.getBitWidth()), |
| 16411 | /*IsUnsigned=*/false)) |
| 16412 | return false; |
| 16413 | if (Val.isNegative()) |
| 16414 | Val.negate(); |
| 16415 | return Success(SI: Val, E); |
| 16416 | } |
| 16417 | |
| 16418 | case Builtin::BI__builtin_popcount: |
| 16419 | case Builtin::BI__builtin_popcountl: |
| 16420 | case Builtin::BI__builtin_popcountll: |
| 16421 | case Builtin::BI__builtin_popcountg: |
| 16422 | case Builtin::BI__builtin_elementwise_popcount: |
| 16423 | case Builtin::BI__popcnt16: // Microsoft variants of popcount |
| 16424 | case Builtin::BI__popcnt: |
| 16425 | case Builtin::BI__popcnt64: { |
| 16426 | APSInt Val; |
| 16427 | if (E->getArg(Arg: 0)->getType()->isExtVectorBoolType()) { |
| 16428 | APValue Vec; |
| 16429 | if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Vec, Info)) |
| 16430 | return false; |
| 16431 | Val = ConvertBoolVectorToInt(Val: Vec); |
| 16432 | } else if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info)) { |
| 16433 | return false; |
| 16434 | } |
| 16435 | |
| 16436 | return Success(Value: Val.popcount(), E); |
| 16437 | } |
| 16438 | |
| 16439 | case Builtin::BI__builtin_rotateleft8: |
| 16440 | case Builtin::BI__builtin_rotateleft16: |
| 16441 | case Builtin::BI__builtin_rotateleft32: |
| 16442 | case Builtin::BI__builtin_rotateleft64: |
| 16443 | case Builtin::BI__builtin_rotateright8: |
| 16444 | case Builtin::BI__builtin_rotateright16: |
| 16445 | case Builtin::BI__builtin_rotateright32: |
| 16446 | case Builtin::BI__builtin_rotateright64: |
| 16447 | case Builtin::BI__builtin_stdc_rotate_left: |
| 16448 | case Builtin::BI__builtin_stdc_rotate_right: |
| 16449 | case Builtin::BI_rotl8: // Microsoft variants of rotate left |
| 16450 | case Builtin::BI_rotl16: |
| 16451 | case Builtin::BI_rotl: |
| 16452 | case Builtin::BI_lrotl: |
| 16453 | case Builtin::BI_rotl64: |
| 16454 | case Builtin::BI_rotr8: // Microsoft variants of rotate right |
| 16455 | case Builtin::BI_rotr16: |
| 16456 | case Builtin::BI_rotr: |
| 16457 | case Builtin::BI_lrotr: |
| 16458 | case Builtin::BI_rotr64: { |
| 16459 | APSInt Value, Amount; |
| 16460 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Value, Info) || |
| 16461 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Amount, Info)) |
| 16462 | return false; |
| 16463 | |
| 16464 | Amount = NormalizeRotateAmount(Value, Amount); |
| 16465 | |
| 16466 | switch (BuiltinOp) { |
| 16467 | case Builtin::BI__builtin_rotateright8: |
| 16468 | case Builtin::BI__builtin_rotateright16: |
| 16469 | case Builtin::BI__builtin_rotateright32: |
| 16470 | case Builtin::BI__builtin_rotateright64: |
| 16471 | case Builtin::BI__builtin_stdc_rotate_right: |
| 16472 | case Builtin::BI_rotr8: |
| 16473 | case Builtin::BI_rotr16: |
| 16474 | case Builtin::BI_rotr: |
| 16475 | case Builtin::BI_lrotr: |
| 16476 | case Builtin::BI_rotr64: |
| 16477 | return Success( |
| 16478 | SI: APSInt(Value.rotr(rotateAmt: Amount.getZExtValue()), Value.isUnsigned()), E); |
| 16479 | default: |
| 16480 | return Success( |
| 16481 | SI: APSInt(Value.rotl(rotateAmt: Amount.getZExtValue()), Value.isUnsigned()), E); |
| 16482 | } |
| 16483 | } |
| 16484 | |
| 16485 | case Builtin::BI__builtin_elementwise_add_sat: { |
| 16486 | APSInt LHS, RHS; |
| 16487 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: LHS, Info) || |
| 16488 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: RHS, Info)) |
| 16489 | return false; |
| 16490 | |
| 16491 | APInt Result = LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS); |
| 16492 | return Success(SI: APSInt(Result, !LHS.isSigned()), E); |
| 16493 | } |
| 16494 | case Builtin::BI__builtin_elementwise_sub_sat: { |
| 16495 | APSInt LHS, RHS; |
| 16496 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: LHS, Info) || |
| 16497 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: RHS, Info)) |
| 16498 | return false; |
| 16499 | |
| 16500 | APInt Result = LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS); |
| 16501 | return Success(SI: APSInt(Result, !LHS.isSigned()), E); |
| 16502 | } |
| 16503 | case Builtin::BI__builtin_elementwise_max: { |
| 16504 | APSInt LHS, RHS; |
| 16505 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: LHS, Info) || |
| 16506 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: RHS, Info)) |
| 16507 | return false; |
| 16508 | |
| 16509 | APInt Result = std::max(a: LHS, b: RHS); |
| 16510 | return Success(SI: APSInt(Result, !LHS.isSigned()), E); |
| 16511 | } |
| 16512 | case Builtin::BI__builtin_elementwise_min: { |
| 16513 | APSInt LHS, RHS; |
| 16514 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: LHS, Info) || |
| 16515 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: RHS, Info)) |
| 16516 | return false; |
| 16517 | |
| 16518 | APInt Result = std::min(a: LHS, b: RHS); |
| 16519 | return Success(SI: APSInt(Result, !LHS.isSigned()), E); |
| 16520 | } |
| 16521 | case Builtin::BI__builtin_elementwise_fshl: |
| 16522 | case Builtin::BI__builtin_elementwise_fshr: { |
| 16523 | APSInt Hi, Lo, Shift; |
| 16524 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Hi, Info) || |
| 16525 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Lo, Info) || |
| 16526 | !EvaluateInteger(E: E->getArg(Arg: 2), Result&: Shift, Info)) |
| 16527 | return false; |
| 16528 | |
| 16529 | switch (BuiltinOp) { |
| 16530 | case Builtin::BI__builtin_elementwise_fshl: { |
| 16531 | APSInt Result(llvm::APIntOps::fshl(Hi, Lo, Shift), Hi.isUnsigned()); |
| 16532 | return Success(SI: Result, E); |
| 16533 | } |
| 16534 | case Builtin::BI__builtin_elementwise_fshr: { |
| 16535 | APSInt Result(llvm::APIntOps::fshr(Hi, Lo, Shift), Hi.isUnsigned()); |
| 16536 | return Success(SI: Result, E); |
| 16537 | } |
| 16538 | } |
| 16539 | llvm_unreachable("Fully covered switch above" ); |
| 16540 | } |
| 16541 | case Builtin::BIstrlen: |
| 16542 | case Builtin::BIwcslen: |
| 16543 | // A call to strlen is not a constant expression. |
| 16544 | if (Info.getLangOpts().CPlusPlus11) |
| 16545 | Info.CCEDiag(E, DiagId: diag::note_constexpr_invalid_function) |
| 16546 | << /*isConstexpr*/ 0 << /*isConstructor*/ 0 |
| 16547 | << Info.Ctx.BuiltinInfo.getQuotedName(ID: BuiltinOp); |
| 16548 | else |
| 16549 | Info.CCEDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr); |
| 16550 | [[fallthrough]]; |
| 16551 | case Builtin::BI__builtin_strlen: |
| 16552 | case Builtin::BI__builtin_wcslen: { |
| 16553 | // As an extension, we support __builtin_strlen() as a constant expression, |
| 16554 | // and support folding strlen() to a constant. |
| 16555 | uint64_t StrLen; |
| 16556 | if (EvaluateBuiltinStrLen(E: E->getArg(Arg: 0), Result&: StrLen, Info)) |
| 16557 | return Success(Value: StrLen, E); |
| 16558 | return false; |
| 16559 | } |
| 16560 | |
| 16561 | case Builtin::BIstrcmp: |
| 16562 | case Builtin::BIwcscmp: |
| 16563 | case Builtin::BIstrncmp: |
| 16564 | case Builtin::BIwcsncmp: |
| 16565 | case Builtin::BImemcmp: |
| 16566 | case Builtin::BIbcmp: |
| 16567 | case Builtin::BIwmemcmp: |
| 16568 | // A call to strlen is not a constant expression. |
| 16569 | if (Info.getLangOpts().CPlusPlus11) |
| 16570 | Info.CCEDiag(E, DiagId: diag::note_constexpr_invalid_function) |
| 16571 | << /*isConstexpr*/ 0 << /*isConstructor*/ 0 |
| 16572 | << Info.Ctx.BuiltinInfo.getQuotedName(ID: BuiltinOp); |
| 16573 | else |
| 16574 | Info.CCEDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr); |
| 16575 | [[fallthrough]]; |
| 16576 | case Builtin::BI__builtin_strcmp: |
| 16577 | case Builtin::BI__builtin_wcscmp: |
| 16578 | case Builtin::BI__builtin_strncmp: |
| 16579 | case Builtin::BI__builtin_wcsncmp: |
| 16580 | case Builtin::BI__builtin_memcmp: |
| 16581 | case Builtin::BI__builtin_bcmp: |
| 16582 | case Builtin::BI__builtin_wmemcmp: { |
| 16583 | LValue String1, String2; |
| 16584 | if (!EvaluatePointer(E: E->getArg(Arg: 0), Result&: String1, Info) || |
| 16585 | !EvaluatePointer(E: E->getArg(Arg: 1), Result&: String2, Info)) |
| 16586 | return false; |
| 16587 | |
| 16588 | uint64_t MaxLength = uint64_t(-1); |
| 16589 | if (BuiltinOp != Builtin::BIstrcmp && |
| 16590 | BuiltinOp != Builtin::BIwcscmp && |
| 16591 | BuiltinOp != Builtin::BI__builtin_strcmp && |
| 16592 | BuiltinOp != Builtin::BI__builtin_wcscmp) { |
| 16593 | APSInt N; |
| 16594 | if (!EvaluateInteger(E: E->getArg(Arg: 2), Result&: N, Info)) |
| 16595 | return false; |
| 16596 | MaxLength = N.getZExtValue(); |
| 16597 | } |
| 16598 | |
| 16599 | // Empty substrings compare equal by definition. |
| 16600 | if (MaxLength == 0u) |
| 16601 | return Success(Value: 0, E); |
| 16602 | |
| 16603 | if (!String1.checkNullPointerForFoldAccess(Info, E, AK: AK_Read) || |
| 16604 | !String2.checkNullPointerForFoldAccess(Info, E, AK: AK_Read) || |
| 16605 | String1.Designator.Invalid || String2.Designator.Invalid) |
| 16606 | return false; |
| 16607 | |
| 16608 | QualType CharTy1 = String1.Designator.getType(Ctx&: Info.Ctx); |
| 16609 | QualType CharTy2 = String2.Designator.getType(Ctx&: Info.Ctx); |
| 16610 | |
| 16611 | bool IsRawByte = BuiltinOp == Builtin::BImemcmp || |
| 16612 | BuiltinOp == Builtin::BIbcmp || |
| 16613 | BuiltinOp == Builtin::BI__builtin_memcmp || |
| 16614 | BuiltinOp == Builtin::BI__builtin_bcmp; |
| 16615 | |
| 16616 | assert(IsRawByte || |
| 16617 | (Info.Ctx.hasSameUnqualifiedType( |
| 16618 | CharTy1, E->getArg(0)->getType()->getPointeeType()) && |
| 16619 | Info.Ctx.hasSameUnqualifiedType(CharTy1, CharTy2))); |
| 16620 | |
| 16621 | // For memcmp, allow comparing any arrays of '[[un]signed] char' or |
| 16622 | // 'char8_t', but no other types. |
| 16623 | if (IsRawByte && |
| 16624 | !(isOneByteCharacterType(T: CharTy1) && isOneByteCharacterType(T: CharTy2))) { |
| 16625 | // FIXME: Consider using our bit_cast implementation to support this. |
| 16626 | Info.FFDiag(E, DiagId: diag::note_constexpr_memcmp_unsupported) |
| 16627 | << Info.Ctx.BuiltinInfo.getQuotedName(ID: BuiltinOp) << CharTy1 |
| 16628 | << CharTy2; |
| 16629 | return false; |
| 16630 | } |
| 16631 | |
| 16632 | const auto &ReadCurElems = [&](APValue &Char1, APValue &Char2) { |
| 16633 | return handleLValueToRValueConversion(Info, Conv: E, Type: CharTy1, LVal: String1, RVal&: Char1) && |
| 16634 | handleLValueToRValueConversion(Info, Conv: E, Type: CharTy2, LVal: String2, RVal&: Char2) && |
| 16635 | Char1.isInt() && Char2.isInt(); |
| 16636 | }; |
| 16637 | const auto &AdvanceElems = [&] { |
| 16638 | return HandleLValueArrayAdjustment(Info, E, LVal&: String1, EltTy: CharTy1, Adjustment: 1) && |
| 16639 | HandleLValueArrayAdjustment(Info, E, LVal&: String2, EltTy: CharTy2, Adjustment: 1); |
| 16640 | }; |
| 16641 | |
| 16642 | bool StopAtNull = |
| 16643 | (BuiltinOp != Builtin::BImemcmp && BuiltinOp != Builtin::BIbcmp && |
| 16644 | BuiltinOp != Builtin::BIwmemcmp && |
| 16645 | BuiltinOp != Builtin::BI__builtin_memcmp && |
| 16646 | BuiltinOp != Builtin::BI__builtin_bcmp && |
| 16647 | BuiltinOp != Builtin::BI__builtin_wmemcmp); |
| 16648 | bool IsWide = BuiltinOp == Builtin::BIwcscmp || |
| 16649 | BuiltinOp == Builtin::BIwcsncmp || |
| 16650 | BuiltinOp == Builtin::BIwmemcmp || |
| 16651 | BuiltinOp == Builtin::BI__builtin_wcscmp || |
| 16652 | BuiltinOp == Builtin::BI__builtin_wcsncmp || |
| 16653 | BuiltinOp == Builtin::BI__builtin_wmemcmp; |
| 16654 | |
| 16655 | for (; MaxLength; --MaxLength) { |
| 16656 | APValue Char1, Char2; |
| 16657 | if (!ReadCurElems(Char1, Char2)) |
| 16658 | return false; |
| 16659 | if (Char1.getInt().ne(RHS: Char2.getInt())) { |
| 16660 | if (IsWide) // wmemcmp compares with wchar_t signedness. |
| 16661 | return Success(Value: Char1.getInt() < Char2.getInt() ? -1 : 1, E); |
| 16662 | // memcmp always compares unsigned chars. |
| 16663 | return Success(Value: Char1.getInt().ult(RHS: Char2.getInt()) ? -1 : 1, E); |
| 16664 | } |
| 16665 | if (StopAtNull && !Char1.getInt()) |
| 16666 | return Success(Value: 0, E); |
| 16667 | assert(!(StopAtNull && !Char2.getInt())); |
| 16668 | if (!AdvanceElems()) |
| 16669 | return false; |
| 16670 | } |
| 16671 | // We hit the strncmp / memcmp limit. |
| 16672 | return Success(Value: 0, E); |
| 16673 | } |
| 16674 | |
| 16675 | case Builtin::BI__atomic_always_lock_free: |
| 16676 | case Builtin::BI__atomic_is_lock_free: |
| 16677 | case Builtin::BI__c11_atomic_is_lock_free: { |
| 16678 | APSInt SizeVal; |
| 16679 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: SizeVal, Info)) |
| 16680 | return false; |
| 16681 | |
| 16682 | // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power |
| 16683 | // of two less than or equal to the maximum inline atomic width, we know it |
| 16684 | // is lock-free. If the size isn't a power of two, or greater than the |
| 16685 | // maximum alignment where we promote atomics, we know it is not lock-free |
| 16686 | // (at least not in the sense of atomic_is_lock_free). Otherwise, |
| 16687 | // the answer can only be determined at runtime; for example, 16-byte |
| 16688 | // atomics have lock-free implementations on some, but not all, |
| 16689 | // x86-64 processors. |
| 16690 | |
| 16691 | // Check power-of-two. |
| 16692 | CharUnits Size = CharUnits::fromQuantity(Quantity: SizeVal.getZExtValue()); |
| 16693 | if (Size.isPowerOfTwo()) { |
| 16694 | // Check against inlining width. |
| 16695 | unsigned InlineWidthBits = |
| 16696 | Info.Ctx.getTargetInfo().getMaxAtomicInlineWidth(); |
| 16697 | if (Size <= Info.Ctx.toCharUnitsFromBits(BitSize: InlineWidthBits)) { |
| 16698 | if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free || |
| 16699 | Size == CharUnits::One()) |
| 16700 | return Success(Value: 1, E); |
| 16701 | |
| 16702 | // If the pointer argument can be evaluated to a compile-time constant |
| 16703 | // integer (or nullptr), check if that value is appropriately aligned. |
| 16704 | const Expr *PtrArg = E->getArg(Arg: 1); |
| 16705 | Expr::EvalResult ExprResult; |
| 16706 | APSInt IntResult; |
| 16707 | if (PtrArg->EvaluateAsRValue(Result&: ExprResult, Ctx: Info.Ctx) && |
| 16708 | ExprResult.Val.toIntegralConstant(Result&: IntResult, SrcTy: PtrArg->getType(), |
| 16709 | Ctx: Info.Ctx) && |
| 16710 | IntResult.isAligned(A: Size.getAsAlign())) |
| 16711 | return Success(Value: 1, E); |
| 16712 | |
| 16713 | // Otherwise, check if the type's alignment against Size. |
| 16714 | if (auto *ICE = dyn_cast<ImplicitCastExpr>(Val: PtrArg)) { |
| 16715 | // Drop the potential implicit-cast to 'const volatile void*', getting |
| 16716 | // the underlying type. |
| 16717 | if (ICE->getCastKind() == CK_BitCast) |
| 16718 | PtrArg = ICE->getSubExpr(); |
| 16719 | } |
| 16720 | |
| 16721 | if (auto PtrTy = PtrArg->getType()->getAs<PointerType>()) { |
| 16722 | QualType PointeeType = PtrTy->getPointeeType(); |
| 16723 | if (!PointeeType->isIncompleteType() && |
| 16724 | Info.Ctx.getTypeAlignInChars(T: PointeeType) >= Size) { |
| 16725 | // OK, we will inline operations on this object. |
| 16726 | return Success(Value: 1, E); |
| 16727 | } |
| 16728 | } |
| 16729 | } |
| 16730 | } |
| 16731 | |
| 16732 | return BuiltinOp == Builtin::BI__atomic_always_lock_free ? |
| 16733 | Success(Value: 0, E) : Error(E); |
| 16734 | } |
| 16735 | case Builtin::BI__builtin_addcb: |
| 16736 | case Builtin::BI__builtin_addcs: |
| 16737 | case Builtin::BI__builtin_addc: |
| 16738 | case Builtin::BI__builtin_addcl: |
| 16739 | case Builtin::BI__builtin_addcll: |
| 16740 | case Builtin::BI__builtin_subcb: |
| 16741 | case Builtin::BI__builtin_subcs: |
| 16742 | case Builtin::BI__builtin_subc: |
| 16743 | case Builtin::BI__builtin_subcl: |
| 16744 | case Builtin::BI__builtin_subcll: { |
| 16745 | LValue CarryOutLValue; |
| 16746 | APSInt LHS, RHS, CarryIn, CarryOut, Result; |
| 16747 | QualType ResultType = E->getArg(Arg: 0)->getType(); |
| 16748 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: LHS, Info) || |
| 16749 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: RHS, Info) || |
| 16750 | !EvaluateInteger(E: E->getArg(Arg: 2), Result&: CarryIn, Info) || |
| 16751 | !EvaluatePointer(E: E->getArg(Arg: 3), Result&: CarryOutLValue, Info)) |
| 16752 | return false; |
| 16753 | // Copy the number of bits and sign. |
| 16754 | Result = LHS; |
| 16755 | CarryOut = LHS; |
| 16756 | |
| 16757 | bool FirstOverflowed = false; |
| 16758 | bool SecondOverflowed = false; |
| 16759 | switch (BuiltinOp) { |
| 16760 | default: |
| 16761 | llvm_unreachable("Invalid value for BuiltinOp" ); |
| 16762 | case Builtin::BI__builtin_addcb: |
| 16763 | case Builtin::BI__builtin_addcs: |
| 16764 | case Builtin::BI__builtin_addc: |
| 16765 | case Builtin::BI__builtin_addcl: |
| 16766 | case Builtin::BI__builtin_addcll: |
| 16767 | Result = |
| 16768 | LHS.uadd_ov(RHS, Overflow&: FirstOverflowed).uadd_ov(RHS: CarryIn, Overflow&: SecondOverflowed); |
| 16769 | break; |
| 16770 | case Builtin::BI__builtin_subcb: |
| 16771 | case Builtin::BI__builtin_subcs: |
| 16772 | case Builtin::BI__builtin_subc: |
| 16773 | case Builtin::BI__builtin_subcl: |
| 16774 | case Builtin::BI__builtin_subcll: |
| 16775 | Result = |
| 16776 | LHS.usub_ov(RHS, Overflow&: FirstOverflowed).usub_ov(RHS: CarryIn, Overflow&: SecondOverflowed); |
| 16777 | break; |
| 16778 | } |
| 16779 | |
| 16780 | // It is possible for both overflows to happen but CGBuiltin uses an OR so |
| 16781 | // this is consistent. |
| 16782 | CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed); |
| 16783 | APValue APV{CarryOut}; |
| 16784 | if (!handleAssignment(Info, E, LVal: CarryOutLValue, LValType: ResultType, Val&: APV)) |
| 16785 | return false; |
| 16786 | return Success(SI: Result, E); |
| 16787 | } |
| 16788 | case Builtin::BI__builtin_add_overflow: |
| 16789 | case Builtin::BI__builtin_sub_overflow: |
| 16790 | case Builtin::BI__builtin_mul_overflow: |
| 16791 | case Builtin::BI__builtin_sadd_overflow: |
| 16792 | case Builtin::BI__builtin_uadd_overflow: |
| 16793 | case Builtin::BI__builtin_uaddl_overflow: |
| 16794 | case Builtin::BI__builtin_uaddll_overflow: |
| 16795 | case Builtin::BI__builtin_usub_overflow: |
| 16796 | case Builtin::BI__builtin_usubl_overflow: |
| 16797 | case Builtin::BI__builtin_usubll_overflow: |
| 16798 | case Builtin::BI__builtin_umul_overflow: |
| 16799 | case Builtin::BI__builtin_umull_overflow: |
| 16800 | case Builtin::BI__builtin_umulll_overflow: |
| 16801 | case Builtin::BI__builtin_saddl_overflow: |
| 16802 | case Builtin::BI__builtin_saddll_overflow: |
| 16803 | case Builtin::BI__builtin_ssub_overflow: |
| 16804 | case Builtin::BI__builtin_ssubl_overflow: |
| 16805 | case Builtin::BI__builtin_ssubll_overflow: |
| 16806 | case Builtin::BI__builtin_smul_overflow: |
| 16807 | case Builtin::BI__builtin_smull_overflow: |
| 16808 | case Builtin::BI__builtin_smulll_overflow: { |
| 16809 | LValue ResultLValue; |
| 16810 | APSInt LHS, RHS; |
| 16811 | |
| 16812 | QualType ResultType = E->getArg(Arg: 2)->getType()->getPointeeType(); |
| 16813 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: LHS, Info) || |
| 16814 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: RHS, Info) || |
| 16815 | !EvaluatePointer(E: E->getArg(Arg: 2), Result&: ResultLValue, Info)) |
| 16816 | return false; |
| 16817 | |
| 16818 | APSInt Result; |
| 16819 | bool DidOverflow = false; |
| 16820 | |
| 16821 | // If the types don't have to match, enlarge all 3 to the largest of them. |
| 16822 | if (BuiltinOp == Builtin::BI__builtin_add_overflow || |
| 16823 | BuiltinOp == Builtin::BI__builtin_sub_overflow || |
| 16824 | BuiltinOp == Builtin::BI__builtin_mul_overflow) { |
| 16825 | bool IsSigned = LHS.isSigned() || RHS.isSigned() || |
| 16826 | ResultType->isSignedIntegerOrEnumerationType(); |
| 16827 | bool AllSigned = LHS.isSigned() && RHS.isSigned() && |
| 16828 | ResultType->isSignedIntegerOrEnumerationType(); |
| 16829 | uint64_t LHSSize = LHS.getBitWidth(); |
| 16830 | uint64_t RHSSize = RHS.getBitWidth(); |
| 16831 | uint64_t ResultSize = Info.Ctx.getTypeSize(T: ResultType); |
| 16832 | uint64_t MaxBits = std::max(a: std::max(a: LHSSize, b: RHSSize), b: ResultSize); |
| 16833 | |
| 16834 | // Add an additional bit if the signedness isn't uniformly agreed to. We |
| 16835 | // could do this ONLY if there is a signed and an unsigned that both have |
| 16836 | // MaxBits, but the code to check that is pretty nasty. The issue will be |
| 16837 | // caught in the shrink-to-result later anyway. |
| 16838 | if (IsSigned && !AllSigned) |
| 16839 | ++MaxBits; |
| 16840 | |
| 16841 | LHS = APSInt(LHS.extOrTrunc(width: MaxBits), !IsSigned); |
| 16842 | RHS = APSInt(RHS.extOrTrunc(width: MaxBits), !IsSigned); |
| 16843 | Result = APSInt(MaxBits, !IsSigned); |
| 16844 | } |
| 16845 | |
| 16846 | // Find largest int. |
| 16847 | switch (BuiltinOp) { |
| 16848 | default: |
| 16849 | llvm_unreachable("Invalid value for BuiltinOp" ); |
| 16850 | case Builtin::BI__builtin_add_overflow: |
| 16851 | case Builtin::BI__builtin_sadd_overflow: |
| 16852 | case Builtin::BI__builtin_saddl_overflow: |
| 16853 | case Builtin::BI__builtin_saddll_overflow: |
| 16854 | case Builtin::BI__builtin_uadd_overflow: |
| 16855 | case Builtin::BI__builtin_uaddl_overflow: |
| 16856 | case Builtin::BI__builtin_uaddll_overflow: |
| 16857 | Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow&: DidOverflow) |
| 16858 | : LHS.uadd_ov(RHS, Overflow&: DidOverflow); |
| 16859 | break; |
| 16860 | case Builtin::BI__builtin_sub_overflow: |
| 16861 | case Builtin::BI__builtin_ssub_overflow: |
| 16862 | case Builtin::BI__builtin_ssubl_overflow: |
| 16863 | case Builtin::BI__builtin_ssubll_overflow: |
| 16864 | case Builtin::BI__builtin_usub_overflow: |
| 16865 | case Builtin::BI__builtin_usubl_overflow: |
| 16866 | case Builtin::BI__builtin_usubll_overflow: |
| 16867 | Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow&: DidOverflow) |
| 16868 | : LHS.usub_ov(RHS, Overflow&: DidOverflow); |
| 16869 | break; |
| 16870 | case Builtin::BI__builtin_mul_overflow: |
| 16871 | case Builtin::BI__builtin_smul_overflow: |
| 16872 | case Builtin::BI__builtin_smull_overflow: |
| 16873 | case Builtin::BI__builtin_smulll_overflow: |
| 16874 | case Builtin::BI__builtin_umul_overflow: |
| 16875 | case Builtin::BI__builtin_umull_overflow: |
| 16876 | case Builtin::BI__builtin_umulll_overflow: |
| 16877 | Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow&: DidOverflow) |
| 16878 | : LHS.umul_ov(RHS, Overflow&: DidOverflow); |
| 16879 | break; |
| 16880 | } |
| 16881 | |
| 16882 | // In the case where multiple sizes are allowed, truncate and see if |
| 16883 | // the values are the same. |
| 16884 | if (BuiltinOp == Builtin::BI__builtin_add_overflow || |
| 16885 | BuiltinOp == Builtin::BI__builtin_sub_overflow || |
| 16886 | BuiltinOp == Builtin::BI__builtin_mul_overflow) { |
| 16887 | // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead, |
| 16888 | // since it will give us the behavior of a TruncOrSelf in the case where |
| 16889 | // its parameter <= its size. We previously set Result to be at least the |
| 16890 | // type-size of the result, so getTypeSize(ResultType) <= Result.BitWidth |
| 16891 | // will work exactly like TruncOrSelf. |
| 16892 | APSInt Temp = Result.extOrTrunc(width: Info.Ctx.getTypeSize(T: ResultType)); |
| 16893 | Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType()); |
| 16894 | |
| 16895 | if (!APSInt::isSameValue(I1: Temp, I2: Result)) |
| 16896 | DidOverflow = true; |
| 16897 | Result = Temp; |
| 16898 | } |
| 16899 | |
| 16900 | APValue APV{Result}; |
| 16901 | if (!handleAssignment(Info, E, LVal: ResultLValue, LValType: ResultType, Val&: APV)) |
| 16902 | return false; |
| 16903 | return Success(Value: DidOverflow, E); |
| 16904 | } |
| 16905 | |
| 16906 | case Builtin::BI__builtin_reduce_add: |
| 16907 | case Builtin::BI__builtin_reduce_mul: |
| 16908 | case Builtin::BI__builtin_reduce_and: |
| 16909 | case Builtin::BI__builtin_reduce_or: |
| 16910 | case Builtin::BI__builtin_reduce_xor: |
| 16911 | case Builtin::BI__builtin_reduce_min: |
| 16912 | case Builtin::BI__builtin_reduce_max: { |
| 16913 | APValue Source; |
| 16914 | if (!EvaluateAsRValue(Info, E: E->getArg(Arg: 0), Result&: Source)) |
| 16915 | return false; |
| 16916 | |
| 16917 | unsigned SourceLen = Source.getVectorLength(); |
| 16918 | APSInt Reduced = Source.getVectorElt(I: 0).getInt(); |
| 16919 | for (unsigned EltNum = 1; EltNum < SourceLen; ++EltNum) { |
| 16920 | switch (BuiltinOp) { |
| 16921 | default: |
| 16922 | return false; |
| 16923 | case Builtin::BI__builtin_reduce_add: { |
| 16924 | if (!CheckedIntArithmetic( |
| 16925 | Info, E, LHS: Reduced, RHS: Source.getVectorElt(I: EltNum).getInt(), |
| 16926 | BitWidth: Reduced.getBitWidth() + 1, Op: std::plus<APSInt>(), Result&: Reduced)) |
| 16927 | return false; |
| 16928 | break; |
| 16929 | } |
| 16930 | case Builtin::BI__builtin_reduce_mul: { |
| 16931 | if (!CheckedIntArithmetic( |
| 16932 | Info, E, LHS: Reduced, RHS: Source.getVectorElt(I: EltNum).getInt(), |
| 16933 | BitWidth: Reduced.getBitWidth() * 2, Op: std::multiplies<APSInt>(), Result&: Reduced)) |
| 16934 | return false; |
| 16935 | break; |
| 16936 | } |
| 16937 | case Builtin::BI__builtin_reduce_and: { |
| 16938 | Reduced &= Source.getVectorElt(I: EltNum).getInt(); |
| 16939 | break; |
| 16940 | } |
| 16941 | case Builtin::BI__builtin_reduce_or: { |
| 16942 | Reduced |= Source.getVectorElt(I: EltNum).getInt(); |
| 16943 | break; |
| 16944 | } |
| 16945 | case Builtin::BI__builtin_reduce_xor: { |
| 16946 | Reduced ^= Source.getVectorElt(I: EltNum).getInt(); |
| 16947 | break; |
| 16948 | } |
| 16949 | case Builtin::BI__builtin_reduce_min: { |
| 16950 | Reduced = std::min(a: Reduced, b: Source.getVectorElt(I: EltNum).getInt()); |
| 16951 | break; |
| 16952 | } |
| 16953 | case Builtin::BI__builtin_reduce_max: { |
| 16954 | Reduced = std::max(a: Reduced, b: Source.getVectorElt(I: EltNum).getInt()); |
| 16955 | break; |
| 16956 | } |
| 16957 | } |
| 16958 | } |
| 16959 | |
| 16960 | return Success(SI: Reduced, E); |
| 16961 | } |
| 16962 | |
| 16963 | case clang::X86::BI__builtin_ia32_addcarryx_u32: |
| 16964 | case clang::X86::BI__builtin_ia32_addcarryx_u64: |
| 16965 | case clang::X86::BI__builtin_ia32_subborrow_u32: |
| 16966 | case clang::X86::BI__builtin_ia32_subborrow_u64: { |
| 16967 | LValue ResultLValue; |
| 16968 | APSInt CarryIn, LHS, RHS; |
| 16969 | QualType ResultType = E->getArg(Arg: 3)->getType()->getPointeeType(); |
| 16970 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: CarryIn, Info) || |
| 16971 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: LHS, Info) || |
| 16972 | !EvaluateInteger(E: E->getArg(Arg: 2), Result&: RHS, Info) || |
| 16973 | !EvaluatePointer(E: E->getArg(Arg: 3), Result&: ResultLValue, Info)) |
| 16974 | return false; |
| 16975 | |
| 16976 | bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 || |
| 16977 | BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64; |
| 16978 | |
| 16979 | unsigned BitWidth = LHS.getBitWidth(); |
| 16980 | unsigned CarryInBit = CarryIn.ugt(RHS: 0) ? 1 : 0; |
| 16981 | APInt ExResult = |
| 16982 | IsAdd |
| 16983 | ? (LHS.zext(width: BitWidth + 1) + (RHS.zext(width: BitWidth + 1) + CarryInBit)) |
| 16984 | : (LHS.zext(width: BitWidth + 1) - (RHS.zext(width: BitWidth + 1) + CarryInBit)); |
| 16985 | |
| 16986 | APInt Result = ExResult.extractBits(numBits: BitWidth, bitPosition: 0); |
| 16987 | uint64_t CarryOut = ExResult.extractBitsAsZExtValue(numBits: 1, bitPosition: BitWidth); |
| 16988 | |
| 16989 | APValue APV{APSInt(Result, /*isUnsigned=*/true)}; |
| 16990 | if (!handleAssignment(Info, E, LVal: ResultLValue, LValType: ResultType, Val&: APV)) |
| 16991 | return false; |
| 16992 | return Success(Value: CarryOut, E); |
| 16993 | } |
| 16994 | |
| 16995 | case clang::X86::BI__builtin_ia32_movmskps: |
| 16996 | case clang::X86::BI__builtin_ia32_movmskpd: |
| 16997 | case clang::X86::BI__builtin_ia32_pmovmskb128: |
| 16998 | case clang::X86::BI__builtin_ia32_pmovmskb256: |
| 16999 | case clang::X86::BI__builtin_ia32_movmskps256: |
| 17000 | case clang::X86::BI__builtin_ia32_movmskpd256: { |
| 17001 | APValue Source; |
| 17002 | if (!Evaluate(Result&: Source, Info, E: E->getArg(Arg: 0))) |
| 17003 | return false; |
| 17004 | unsigned SourceLen = Source.getVectorLength(); |
| 17005 | const VectorType *VT = E->getArg(Arg: 0)->getType()->castAs<VectorType>(); |
| 17006 | QualType ElemQT = VT->getElementType(); |
| 17007 | unsigned ResultLen = Info.Ctx.getTypeSize( |
| 17008 | T: E->getCallReturnType(Ctx: Info.Ctx)); // Always 32-bit integer. |
| 17009 | APInt Result(ResultLen, 0); |
| 17010 | |
| 17011 | for (unsigned I = 0; I != SourceLen; ++I) { |
| 17012 | APInt Elem; |
| 17013 | if (ElemQT->isIntegerType()) { |
| 17014 | Elem = Source.getVectorElt(I).getInt(); |
| 17015 | } else if (ElemQT->isRealFloatingType()) { |
| 17016 | Elem = Source.getVectorElt(I).getFloat().bitcastToAPInt(); |
| 17017 | } else { |
| 17018 | return false; |
| 17019 | } |
| 17020 | Result.setBitVal(BitPosition: I, BitValue: Elem.isNegative()); |
| 17021 | } |
| 17022 | return Success(I: Result, E); |
| 17023 | } |
| 17024 | |
| 17025 | case clang::X86::BI__builtin_ia32_bextr_u32: |
| 17026 | case clang::X86::BI__builtin_ia32_bextr_u64: |
| 17027 | case clang::X86::BI__builtin_ia32_bextri_u32: |
| 17028 | case clang::X86::BI__builtin_ia32_bextri_u64: { |
| 17029 | APSInt Val, Idx; |
| 17030 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info) || |
| 17031 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Idx, Info)) |
| 17032 | return false; |
| 17033 | |
| 17034 | unsigned BitWidth = Val.getBitWidth(); |
| 17035 | uint64_t Shift = Idx.extractBitsAsZExtValue(numBits: 8, bitPosition: 0); |
| 17036 | uint64_t Length = Idx.extractBitsAsZExtValue(numBits: 8, bitPosition: 8); |
| 17037 | Length = Length > BitWidth ? BitWidth : Length; |
| 17038 | |
| 17039 | // Handle out of bounds cases. |
| 17040 | if (Length == 0 || Shift >= BitWidth) |
| 17041 | return Success(Value: 0, E); |
| 17042 | |
| 17043 | uint64_t Result = Val.getZExtValue() >> Shift; |
| 17044 | Result &= llvm::maskTrailingOnes<uint64_t>(N: Length); |
| 17045 | return Success(Value: Result, E); |
| 17046 | } |
| 17047 | |
| 17048 | case clang::X86::BI__builtin_ia32_bzhi_si: |
| 17049 | case clang::X86::BI__builtin_ia32_bzhi_di: { |
| 17050 | APSInt Val, Idx; |
| 17051 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info) || |
| 17052 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Idx, Info)) |
| 17053 | return false; |
| 17054 | |
| 17055 | unsigned BitWidth = Val.getBitWidth(); |
| 17056 | unsigned Index = Idx.extractBitsAsZExtValue(numBits: 8, bitPosition: 0); |
| 17057 | if (Index < BitWidth) |
| 17058 | Val.clearHighBits(hiBits: BitWidth - Index); |
| 17059 | return Success(SI: Val, E); |
| 17060 | } |
| 17061 | |
| 17062 | case clang::X86::BI__builtin_ia32_ktestcqi: |
| 17063 | case clang::X86::BI__builtin_ia32_ktestchi: |
| 17064 | case clang::X86::BI__builtin_ia32_ktestcsi: |
| 17065 | case clang::X86::BI__builtin_ia32_ktestcdi: { |
| 17066 | APSInt A, B; |
| 17067 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: A, Info) || |
| 17068 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: B, Info)) |
| 17069 | return false; |
| 17070 | |
| 17071 | return Success(Value: (~A & B) == 0, E); |
| 17072 | } |
| 17073 | |
| 17074 | case clang::X86::BI__builtin_ia32_ktestzqi: |
| 17075 | case clang::X86::BI__builtin_ia32_ktestzhi: |
| 17076 | case clang::X86::BI__builtin_ia32_ktestzsi: |
| 17077 | case clang::X86::BI__builtin_ia32_ktestzdi: { |
| 17078 | APSInt A, B; |
| 17079 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: A, Info) || |
| 17080 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: B, Info)) |
| 17081 | return false; |
| 17082 | |
| 17083 | return Success(Value: (A & B) == 0, E); |
| 17084 | } |
| 17085 | |
| 17086 | case clang::X86::BI__builtin_ia32_kortestcqi: |
| 17087 | case clang::X86::BI__builtin_ia32_kortestchi: |
| 17088 | case clang::X86::BI__builtin_ia32_kortestcsi: |
| 17089 | case clang::X86::BI__builtin_ia32_kortestcdi: { |
| 17090 | APSInt A, B; |
| 17091 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: A, Info) || |
| 17092 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: B, Info)) |
| 17093 | return false; |
| 17094 | |
| 17095 | return Success(Value: ~(A | B) == 0, E); |
| 17096 | } |
| 17097 | |
| 17098 | case clang::X86::BI__builtin_ia32_kortestzqi: |
| 17099 | case clang::X86::BI__builtin_ia32_kortestzhi: |
| 17100 | case clang::X86::BI__builtin_ia32_kortestzsi: |
| 17101 | case clang::X86::BI__builtin_ia32_kortestzdi: { |
| 17102 | APSInt A, B; |
| 17103 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: A, Info) || |
| 17104 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: B, Info)) |
| 17105 | return false; |
| 17106 | |
| 17107 | return Success(Value: (A | B) == 0, E); |
| 17108 | } |
| 17109 | |
| 17110 | case clang::X86::BI__builtin_ia32_kunpckhi: |
| 17111 | case clang::X86::BI__builtin_ia32_kunpckdi: |
| 17112 | case clang::X86::BI__builtin_ia32_kunpcksi: { |
| 17113 | APSInt A, B; |
| 17114 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: A, Info) || |
| 17115 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: B, Info)) |
| 17116 | return false; |
| 17117 | |
| 17118 | // Generic kunpack: extract lower half of each operand and concatenate |
| 17119 | // Result = A[HalfWidth-1:0] concat B[HalfWidth-1:0] |
| 17120 | unsigned BW = A.getBitWidth(); |
| 17121 | APSInt Result(A.trunc(width: BW / 2).concat(NewLSB: B.trunc(width: BW / 2)), A.isUnsigned()); |
| 17122 | return Success(SI: Result, E); |
| 17123 | } |
| 17124 | |
| 17125 | case clang::X86::BI__builtin_ia32_lzcnt_u16: |
| 17126 | case clang::X86::BI__builtin_ia32_lzcnt_u32: |
| 17127 | case clang::X86::BI__builtin_ia32_lzcnt_u64: { |
| 17128 | APSInt Val; |
| 17129 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info)) |
| 17130 | return false; |
| 17131 | return Success(Value: Val.countLeadingZeros(), E); |
| 17132 | } |
| 17133 | |
| 17134 | case clang::X86::BI__builtin_ia32_tzcnt_u16: |
| 17135 | case clang::X86::BI__builtin_ia32_tzcnt_u32: |
| 17136 | case clang::X86::BI__builtin_ia32_tzcnt_u64: { |
| 17137 | APSInt Val; |
| 17138 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info)) |
| 17139 | return false; |
| 17140 | return Success(Value: Val.countTrailingZeros(), E); |
| 17141 | } |
| 17142 | |
| 17143 | case clang::X86::BI__builtin_ia32_pdep_si: |
| 17144 | case clang::X86::BI__builtin_ia32_pdep_di: { |
| 17145 | APSInt Val, Msk; |
| 17146 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info) || |
| 17147 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Msk, Info)) |
| 17148 | return false; |
| 17149 | |
| 17150 | unsigned BitWidth = Val.getBitWidth(); |
| 17151 | APInt Result = APInt::getZero(numBits: BitWidth); |
| 17152 | for (unsigned I = 0, P = 0; I != BitWidth; ++I) |
| 17153 | if (Msk[I]) |
| 17154 | Result.setBitVal(BitPosition: I, BitValue: Val[P++]); |
| 17155 | return Success(I: Result, E); |
| 17156 | } |
| 17157 | |
| 17158 | case clang::X86::BI__builtin_ia32_pext_si: |
| 17159 | case clang::X86::BI__builtin_ia32_pext_di: { |
| 17160 | APSInt Val, Msk; |
| 17161 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info) || |
| 17162 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: Msk, Info)) |
| 17163 | return false; |
| 17164 | |
| 17165 | unsigned BitWidth = Val.getBitWidth(); |
| 17166 | APInt Result = APInt::getZero(numBits: BitWidth); |
| 17167 | for (unsigned I = 0, P = 0; I != BitWidth; ++I) |
| 17168 | if (Msk[I]) |
| 17169 | Result.setBitVal(BitPosition: P++, BitValue: Val[I]); |
| 17170 | return Success(I: Result, E); |
| 17171 | } |
| 17172 | case X86::BI__builtin_ia32_ptestz128: |
| 17173 | case X86::BI__builtin_ia32_ptestz256: |
| 17174 | case X86::BI__builtin_ia32_vtestzps: |
| 17175 | case X86::BI__builtin_ia32_vtestzps256: |
| 17176 | case X86::BI__builtin_ia32_vtestzpd: |
| 17177 | case X86::BI__builtin_ia32_vtestzpd256: { |
| 17178 | return EvalTestOp( |
| 17179 | [](const APInt &A, const APInt &B) { return (A & B) == 0; }); |
| 17180 | } |
| 17181 | case X86::BI__builtin_ia32_ptestc128: |
| 17182 | case X86::BI__builtin_ia32_ptestc256: |
| 17183 | case X86::BI__builtin_ia32_vtestcps: |
| 17184 | case X86::BI__builtin_ia32_vtestcps256: |
| 17185 | case X86::BI__builtin_ia32_vtestcpd: |
| 17186 | case X86::BI__builtin_ia32_vtestcpd256: { |
| 17187 | return EvalTestOp( |
| 17188 | [](const APInt &A, const APInt &B) { return (~A & B) == 0; }); |
| 17189 | } |
| 17190 | case X86::BI__builtin_ia32_ptestnzc128: |
| 17191 | case X86::BI__builtin_ia32_ptestnzc256: |
| 17192 | case X86::BI__builtin_ia32_vtestnzcps: |
| 17193 | case X86::BI__builtin_ia32_vtestnzcps256: |
| 17194 | case X86::BI__builtin_ia32_vtestnzcpd: |
| 17195 | case X86::BI__builtin_ia32_vtestnzcpd256: { |
| 17196 | return EvalTestOp([](const APInt &A, const APInt &B) { |
| 17197 | return ((A & B) != 0) && ((~A & B) != 0); |
| 17198 | }); |
| 17199 | } |
| 17200 | case X86::BI__builtin_ia32_kandqi: |
| 17201 | case X86::BI__builtin_ia32_kandhi: |
| 17202 | case X86::BI__builtin_ia32_kandsi: |
| 17203 | case X86::BI__builtin_ia32_kanddi: { |
| 17204 | return HandleMaskBinOp( |
| 17205 | [](const APSInt &LHS, const APSInt &RHS) { return LHS & RHS; }); |
| 17206 | } |
| 17207 | |
| 17208 | case X86::BI__builtin_ia32_kandnqi: |
| 17209 | case X86::BI__builtin_ia32_kandnhi: |
| 17210 | case X86::BI__builtin_ia32_kandnsi: |
| 17211 | case X86::BI__builtin_ia32_kandndi: { |
| 17212 | return HandleMaskBinOp( |
| 17213 | [](const APSInt &LHS, const APSInt &RHS) { return ~LHS & RHS; }); |
| 17214 | } |
| 17215 | |
| 17216 | case X86::BI__builtin_ia32_korqi: |
| 17217 | case X86::BI__builtin_ia32_korhi: |
| 17218 | case X86::BI__builtin_ia32_korsi: |
| 17219 | case X86::BI__builtin_ia32_kordi: { |
| 17220 | return HandleMaskBinOp( |
| 17221 | [](const APSInt &LHS, const APSInt &RHS) { return LHS | RHS; }); |
| 17222 | } |
| 17223 | |
| 17224 | case X86::BI__builtin_ia32_kxnorqi: |
| 17225 | case X86::BI__builtin_ia32_kxnorhi: |
| 17226 | case X86::BI__builtin_ia32_kxnorsi: |
| 17227 | case X86::BI__builtin_ia32_kxnordi: { |
| 17228 | return HandleMaskBinOp( |
| 17229 | [](const APSInt &LHS, const APSInt &RHS) { return ~(LHS ^ RHS); }); |
| 17230 | } |
| 17231 | |
| 17232 | case X86::BI__builtin_ia32_kxorqi: |
| 17233 | case X86::BI__builtin_ia32_kxorhi: |
| 17234 | case X86::BI__builtin_ia32_kxorsi: |
| 17235 | case X86::BI__builtin_ia32_kxordi: { |
| 17236 | return HandleMaskBinOp( |
| 17237 | [](const APSInt &LHS, const APSInt &RHS) { return LHS ^ RHS; }); |
| 17238 | } |
| 17239 | |
| 17240 | case X86::BI__builtin_ia32_knotqi: |
| 17241 | case X86::BI__builtin_ia32_knothi: |
| 17242 | case X86::BI__builtin_ia32_knotsi: |
| 17243 | case X86::BI__builtin_ia32_knotdi: { |
| 17244 | APSInt Val; |
| 17245 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info)) |
| 17246 | return false; |
| 17247 | APSInt Result = ~Val; |
| 17248 | return Success(V: APValue(Result), E); |
| 17249 | } |
| 17250 | |
| 17251 | case X86::BI__builtin_ia32_kaddqi: |
| 17252 | case X86::BI__builtin_ia32_kaddhi: |
| 17253 | case X86::BI__builtin_ia32_kaddsi: |
| 17254 | case X86::BI__builtin_ia32_kadddi: { |
| 17255 | return HandleMaskBinOp( |
| 17256 | [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; }); |
| 17257 | } |
| 17258 | |
| 17259 | case X86::BI__builtin_ia32_kmovb: |
| 17260 | case X86::BI__builtin_ia32_kmovw: |
| 17261 | case X86::BI__builtin_ia32_kmovd: |
| 17262 | case X86::BI__builtin_ia32_kmovq: { |
| 17263 | APSInt Val; |
| 17264 | if (!EvaluateInteger(E: E->getArg(Arg: 0), Result&: Val, Info)) |
| 17265 | return false; |
| 17266 | return Success(SI: Val, E); |
| 17267 | } |
| 17268 | |
| 17269 | case X86::BI__builtin_ia32_kshiftliqi: |
| 17270 | case X86::BI__builtin_ia32_kshiftlihi: |
| 17271 | case X86::BI__builtin_ia32_kshiftlisi: |
| 17272 | case X86::BI__builtin_ia32_kshiftlidi: { |
| 17273 | return HandleMaskBinOp([](const APSInt &LHS, const APSInt &RHS) { |
| 17274 | unsigned Amt = RHS.getZExtValue() & 0xFF; |
| 17275 | if (Amt >= LHS.getBitWidth()) |
| 17276 | return APSInt(APInt::getZero(numBits: LHS.getBitWidth()), LHS.isUnsigned()); |
| 17277 | return APSInt(LHS.shl(shiftAmt: Amt), LHS.isUnsigned()); |
| 17278 | }); |
| 17279 | } |
| 17280 | |
| 17281 | case X86::BI__builtin_ia32_kshiftriqi: |
| 17282 | case X86::BI__builtin_ia32_kshiftrihi: |
| 17283 | case X86::BI__builtin_ia32_kshiftrisi: |
| 17284 | case X86::BI__builtin_ia32_kshiftridi: { |
| 17285 | return HandleMaskBinOp([](const APSInt &LHS, const APSInt &RHS) { |
| 17286 | unsigned Amt = RHS.getZExtValue() & 0xFF; |
| 17287 | if (Amt >= LHS.getBitWidth()) |
| 17288 | return APSInt(APInt::getZero(numBits: LHS.getBitWidth()), LHS.isUnsigned()); |
| 17289 | return APSInt(LHS.lshr(shiftAmt: Amt), LHS.isUnsigned()); |
| 17290 | }); |
| 17291 | } |
| 17292 | |
| 17293 | case clang::X86::BI__builtin_ia32_vec_ext_v4hi: |
| 17294 | case clang::X86::BI__builtin_ia32_vec_ext_v16qi: |
| 17295 | case clang::X86::BI__builtin_ia32_vec_ext_v8hi: |
| 17296 | case clang::X86::BI__builtin_ia32_vec_ext_v4si: |
| 17297 | case clang::X86::BI__builtin_ia32_vec_ext_v2di: |
| 17298 | case clang::X86::BI__builtin_ia32_vec_ext_v32qi: |
| 17299 | case clang::X86::BI__builtin_ia32_vec_ext_v16hi: |
| 17300 | case clang::X86::BI__builtin_ia32_vec_ext_v8si: |
| 17301 | case clang::X86::BI__builtin_ia32_vec_ext_v4di: { |
| 17302 | APValue Vec; |
| 17303 | APSInt IdxAPS; |
| 17304 | if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Vec, Info) || |
| 17305 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: IdxAPS, Info)) |
| 17306 | return false; |
| 17307 | unsigned N = Vec.getVectorLength(); |
| 17308 | unsigned Idx = static_cast<unsigned>(IdxAPS.getZExtValue() & (N - 1)); |
| 17309 | return Success(SI: Vec.getVectorElt(I: Idx).getInt(), E); |
| 17310 | } |
| 17311 | |
| 17312 | case clang::X86::BI__builtin_ia32_cvtb2mask128: |
| 17313 | case clang::X86::BI__builtin_ia32_cvtb2mask256: |
| 17314 | case clang::X86::BI__builtin_ia32_cvtb2mask512: |
| 17315 | case clang::X86::BI__builtin_ia32_cvtw2mask128: |
| 17316 | case clang::X86::BI__builtin_ia32_cvtw2mask256: |
| 17317 | case clang::X86::BI__builtin_ia32_cvtw2mask512: |
| 17318 | case clang::X86::BI__builtin_ia32_cvtd2mask128: |
| 17319 | case clang::X86::BI__builtin_ia32_cvtd2mask256: |
| 17320 | case clang::X86::BI__builtin_ia32_cvtd2mask512: |
| 17321 | case clang::X86::BI__builtin_ia32_cvtq2mask128: |
| 17322 | case clang::X86::BI__builtin_ia32_cvtq2mask256: |
| 17323 | case clang::X86::BI__builtin_ia32_cvtq2mask512: { |
| 17324 | assert(E->getNumArgs() == 1); |
| 17325 | APValue Vec; |
| 17326 | if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Vec, Info)) |
| 17327 | return false; |
| 17328 | |
| 17329 | unsigned VectorLen = Vec.getVectorLength(); |
| 17330 | unsigned RetWidth = Info.Ctx.getIntWidth(T: E->getType()); |
| 17331 | llvm::APInt Bits(RetWidth, 0); |
| 17332 | |
| 17333 | for (unsigned ElemNum = 0; ElemNum != VectorLen; ++ElemNum) { |
| 17334 | const APSInt &A = Vec.getVectorElt(I: ElemNum).getInt(); |
| 17335 | unsigned MSB = A[A.getBitWidth() - 1]; |
| 17336 | Bits.setBitVal(BitPosition: ElemNum, BitValue: MSB); |
| 17337 | } |
| 17338 | |
| 17339 | APSInt RetMask(Bits, /*isUnsigned=*/true); |
| 17340 | return Success(V: APValue(RetMask), E); |
| 17341 | } |
| 17342 | |
| 17343 | case clang::X86::BI__builtin_ia32_cmpb128_mask: |
| 17344 | case clang::X86::BI__builtin_ia32_cmpw128_mask: |
| 17345 | case clang::X86::BI__builtin_ia32_cmpd128_mask: |
| 17346 | case clang::X86::BI__builtin_ia32_cmpq128_mask: |
| 17347 | case clang::X86::BI__builtin_ia32_cmpb256_mask: |
| 17348 | case clang::X86::BI__builtin_ia32_cmpw256_mask: |
| 17349 | case clang::X86::BI__builtin_ia32_cmpd256_mask: |
| 17350 | case clang::X86::BI__builtin_ia32_cmpq256_mask: |
| 17351 | case clang::X86::BI__builtin_ia32_cmpb512_mask: |
| 17352 | case clang::X86::BI__builtin_ia32_cmpw512_mask: |
| 17353 | case clang::X86::BI__builtin_ia32_cmpd512_mask: |
| 17354 | case clang::X86::BI__builtin_ia32_cmpq512_mask: |
| 17355 | case clang::X86::BI__builtin_ia32_ucmpb128_mask: |
| 17356 | case clang::X86::BI__builtin_ia32_ucmpw128_mask: |
| 17357 | case clang::X86::BI__builtin_ia32_ucmpd128_mask: |
| 17358 | case clang::X86::BI__builtin_ia32_ucmpq128_mask: |
| 17359 | case clang::X86::BI__builtin_ia32_ucmpb256_mask: |
| 17360 | case clang::X86::BI__builtin_ia32_ucmpw256_mask: |
| 17361 | case clang::X86::BI__builtin_ia32_ucmpd256_mask: |
| 17362 | case clang::X86::BI__builtin_ia32_ucmpq256_mask: |
| 17363 | case clang::X86::BI__builtin_ia32_ucmpb512_mask: |
| 17364 | case clang::X86::BI__builtin_ia32_ucmpw512_mask: |
| 17365 | case clang::X86::BI__builtin_ia32_ucmpd512_mask: |
| 17366 | case clang::X86::BI__builtin_ia32_ucmpq512_mask: { |
| 17367 | assert(E->getNumArgs() == 4); |
| 17368 | |
| 17369 | bool IsUnsigned = |
| 17370 | (BuiltinOp >= clang::X86::BI__builtin_ia32_ucmpb128_mask && |
| 17371 | BuiltinOp <= clang::X86::BI__builtin_ia32_ucmpw512_mask); |
| 17372 | |
| 17373 | APValue LHS, RHS; |
| 17374 | APSInt Mask, Opcode; |
| 17375 | if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: LHS, Info) || |
| 17376 | !EvaluateVector(E: E->getArg(Arg: 1), Result&: RHS, Info) || |
| 17377 | !EvaluateInteger(E: E->getArg(Arg: 2), Result&: Opcode, Info) || |
| 17378 | !EvaluateInteger(E: E->getArg(Arg: 3), Result&: Mask, Info)) |
| 17379 | return false; |
| 17380 | |
| 17381 | assert(LHS.getVectorLength() == RHS.getVectorLength()); |
| 17382 | |
| 17383 | unsigned VectorLen = LHS.getVectorLength(); |
| 17384 | unsigned RetWidth = Mask.getBitWidth(); |
| 17385 | |
| 17386 | APSInt RetMask(llvm::APInt(RetWidth, 0), /*isUnsigned=*/true); |
| 17387 | |
| 17388 | for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) { |
| 17389 | const APSInt &A = LHS.getVectorElt(I: ElemNum).getInt(); |
| 17390 | const APSInt &B = RHS.getVectorElt(I: ElemNum).getInt(); |
| 17391 | bool Result = false; |
| 17392 | |
| 17393 | switch (Opcode.getExtValue() & 0x7) { |
| 17394 | case 0: // _MM_CMPINT_EQ |
| 17395 | Result = (A == B); |
| 17396 | break; |
| 17397 | case 1: // _MM_CMPINT_LT |
| 17398 | Result = IsUnsigned ? A.ult(RHS: B) : A.slt(RHS: B); |
| 17399 | break; |
| 17400 | case 2: // _MM_CMPINT_LE |
| 17401 | Result = IsUnsigned ? A.ule(RHS: B) : A.sle(RHS: B); |
| 17402 | break; |
| 17403 | case 3: // _MM_CMPINT_FALSE |
| 17404 | Result = false; |
| 17405 | break; |
| 17406 | case 4: // _MM_CMPINT_NE |
| 17407 | Result = (A != B); |
| 17408 | break; |
| 17409 | case 5: // _MM_CMPINT_NLT (>=) |
| 17410 | Result = IsUnsigned ? A.uge(RHS: B) : A.sge(RHS: B); |
| 17411 | break; |
| 17412 | case 6: // _MM_CMPINT_NLE (>) |
| 17413 | Result = IsUnsigned ? A.ugt(RHS: B) : A.sgt(RHS: B); |
| 17414 | break; |
| 17415 | case 7: // _MM_CMPINT_TRUE |
| 17416 | Result = true; |
| 17417 | break; |
| 17418 | } |
| 17419 | |
| 17420 | RetMask.setBitVal(BitPosition: ElemNum, BitValue: Mask[ElemNum] && Result); |
| 17421 | } |
| 17422 | |
| 17423 | return Success(V: APValue(RetMask), E); |
| 17424 | } |
| 17425 | case X86::BI__builtin_ia32_vpshufbitqmb128_mask: |
| 17426 | case X86::BI__builtin_ia32_vpshufbitqmb256_mask: |
| 17427 | case X86::BI__builtin_ia32_vpshufbitqmb512_mask: { |
| 17428 | assert(E->getNumArgs() == 3); |
| 17429 | |
| 17430 | APValue Source, ShuffleMask; |
| 17431 | APSInt ZeroMask; |
| 17432 | if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Source, Info) || |
| 17433 | !EvaluateVector(E: E->getArg(Arg: 1), Result&: ShuffleMask, Info) || |
| 17434 | !EvaluateInteger(E: E->getArg(Arg: 2), Result&: ZeroMask, Info)) |
| 17435 | return false; |
| 17436 | |
| 17437 | assert(Source.getVectorLength() == ShuffleMask.getVectorLength()); |
| 17438 | assert(ZeroMask.getBitWidth() == Source.getVectorLength()); |
| 17439 | |
| 17440 | unsigned NumBytesInQWord = 8; |
| 17441 | unsigned NumBitsInByte = 8; |
| 17442 | unsigned NumBytes = Source.getVectorLength(); |
| 17443 | unsigned NumQWords = NumBytes / NumBytesInQWord; |
| 17444 | unsigned RetWidth = ZeroMask.getBitWidth(); |
| 17445 | APSInt RetMask(llvm::APInt(RetWidth, 0), /*isUnsigned=*/true); |
| 17446 | |
| 17447 | for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) { |
| 17448 | APInt SourceQWord(64, 0); |
| 17449 | for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) { |
| 17450 | uint64_t Byte = Source.getVectorElt(I: QWordId * NumBytesInQWord + ByteIdx) |
| 17451 | .getInt() |
| 17452 | .getZExtValue(); |
| 17453 | SourceQWord.insertBits(SubBits: APInt(8, Byte & 0xFF), bitPosition: ByteIdx * NumBitsInByte); |
| 17454 | } |
| 17455 | |
| 17456 | for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) { |
| 17457 | unsigned SelIdx = QWordId * NumBytesInQWord + ByteIdx; |
| 17458 | unsigned M = |
| 17459 | ShuffleMask.getVectorElt(I: SelIdx).getInt().getZExtValue() & 0x3F; |
| 17460 | if (ZeroMask[SelIdx]) { |
| 17461 | RetMask.setBitVal(BitPosition: SelIdx, BitValue: SourceQWord[M]); |
| 17462 | } |
| 17463 | } |
| 17464 | } |
| 17465 | return Success(V: APValue(RetMask), E); |
| 17466 | } |
| 17467 | } |
| 17468 | } |
| 17469 | |
| 17470 | /// Determine whether this is a pointer past the end of the complete |
| 17471 | /// object referred to by the lvalue. |
| 17472 | static bool isOnePastTheEndOfCompleteObject(const ASTContext &Ctx, |
| 17473 | const LValue &LV) { |
| 17474 | // A null pointer can be viewed as being "past the end" but we don't |
| 17475 | // choose to look at it that way here. |
| 17476 | if (!LV.getLValueBase()) |
| 17477 | return false; |
| 17478 | |
| 17479 | // If the designator is valid and refers to a subobject, we're not pointing |
| 17480 | // past the end. |
| 17481 | if (!LV.getLValueDesignator().Invalid && |
| 17482 | !LV.getLValueDesignator().isOnePastTheEnd()) |
| 17483 | return false; |
| 17484 | |
| 17485 | // A pointer to an incomplete type might be past-the-end if the type's size is |
| 17486 | // zero. We cannot tell because the type is incomplete. |
| 17487 | QualType Ty = getType(B: LV.getLValueBase()); |
| 17488 | if (Ty->isIncompleteType()) |
| 17489 | return true; |
| 17490 | |
| 17491 | // Can't be past the end of an invalid object. |
| 17492 | if (LV.getLValueDesignator().Invalid) |
| 17493 | return false; |
| 17494 | |
| 17495 | // We're a past-the-end pointer if we point to the byte after the object, |
| 17496 | // no matter what our type or path is. |
| 17497 | auto Size = Ctx.getTypeSizeInChars(T: Ty); |
| 17498 | return LV.getLValueOffset() == Size; |
| 17499 | } |
| 17500 | |
| 17501 | namespace { |
| 17502 | |
| 17503 | /// Data recursive integer evaluator of certain binary operators. |
| 17504 | /// |
| 17505 | /// We use a data recursive algorithm for binary operators so that we are able |
| 17506 | /// to handle extreme cases of chained binary operators without causing stack |
| 17507 | /// overflow. |
| 17508 | class DataRecursiveIntBinOpEvaluator { |
| 17509 | struct EvalResult { |
| 17510 | APValue Val; |
| 17511 | bool Failed = false; |
| 17512 | |
| 17513 | EvalResult() = default; |
| 17514 | |
| 17515 | void swap(EvalResult &RHS) { |
| 17516 | Val.swap(RHS&: RHS.Val); |
| 17517 | Failed = RHS.Failed; |
| 17518 | RHS.Failed = false; |
| 17519 | } |
| 17520 | }; |
| 17521 | |
| 17522 | struct Job { |
| 17523 | const Expr *E; |
| 17524 | EvalResult LHSResult; // meaningful only for binary operator expression. |
| 17525 | enum { AnyExprKind, BinOpKind, BinOpVisitedLHSKind } Kind; |
| 17526 | |
| 17527 | Job() = default; |
| 17528 | Job(Job &&) = default; |
| 17529 | |
| 17530 | void startSpeculativeEval(EvalInfo &Info) { |
| 17531 | SpecEvalRAII = SpeculativeEvaluationRAII(Info); |
| 17532 | } |
| 17533 | |
| 17534 | private: |
| 17535 | SpeculativeEvaluationRAII SpecEvalRAII; |
| 17536 | }; |
| 17537 | |
| 17538 | SmallVector<Job, 16> Queue; |
| 17539 | |
| 17540 | IntExprEvaluator &IntEval; |
| 17541 | EvalInfo &Info; |
| 17542 | APValue &FinalResult; |
| 17543 | |
| 17544 | public: |
| 17545 | DataRecursiveIntBinOpEvaluator(IntExprEvaluator &IntEval, APValue &Result) |
| 17546 | : IntEval(IntEval), Info(IntEval.getEvalInfo()), FinalResult(Result) { } |
| 17547 | |
| 17548 | /// True if \param E is a binary operator that we are going to handle |
| 17549 | /// data recursively. |
| 17550 | /// We handle binary operators that are comma, logical, or that have operands |
| 17551 | /// with integral or enumeration type. |
| 17552 | static bool shouldEnqueue(const BinaryOperator *E) { |
| 17553 | return E->getOpcode() == BO_Comma || E->isLogicalOp() || |
| 17554 | (E->isPRValue() && E->getType()->isIntegralOrEnumerationType() && |
| 17555 | E->getLHS()->getType()->isIntegralOrEnumerationType() && |
| 17556 | E->getRHS()->getType()->isIntegralOrEnumerationType()); |
| 17557 | } |
| 17558 | |
| 17559 | bool Traverse(const BinaryOperator *E) { |
| 17560 | enqueue(E); |
| 17561 | EvalResult PrevResult; |
| 17562 | while (!Queue.empty()) |
| 17563 | process(Result&: PrevResult); |
| 17564 | |
| 17565 | if (PrevResult.Failed) return false; |
| 17566 | |
| 17567 | FinalResult.swap(RHS&: PrevResult.Val); |
| 17568 | return true; |
| 17569 | } |
| 17570 | |
| 17571 | private: |
| 17572 | bool Success(uint64_t Value, const Expr *E, APValue &Result) { |
| 17573 | return IntEval.Success(Value, E, Result); |
| 17574 | } |
| 17575 | bool Success(const APSInt &Value, const Expr *E, APValue &Result) { |
| 17576 | return IntEval.Success(SI: Value, E, Result); |
| 17577 | } |
| 17578 | bool Error(const Expr *E) { |
| 17579 | return IntEval.Error(E); |
| 17580 | } |
| 17581 | bool Error(const Expr *E, diag::kind D) { |
| 17582 | return IntEval.Error(E, D); |
| 17583 | } |
| 17584 | |
| 17585 | OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) { |
| 17586 | return Info.CCEDiag(E, DiagId: D); |
| 17587 | } |
| 17588 | |
| 17589 | // Returns true if visiting the RHS is necessary, false otherwise. |
| 17590 | bool VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E, |
| 17591 | bool &SuppressRHSDiags); |
| 17592 | |
| 17593 | bool VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult, |
| 17594 | const BinaryOperator *E, APValue &Result); |
| 17595 | |
| 17596 | void EvaluateExpr(const Expr *E, EvalResult &Result) { |
| 17597 | Result.Failed = !Evaluate(Result&: Result.Val, Info, E); |
| 17598 | if (Result.Failed) |
| 17599 | Result.Val = APValue(); |
| 17600 | } |
| 17601 | |
| 17602 | void process(EvalResult &Result); |
| 17603 | |
| 17604 | void enqueue(const Expr *E) { |
| 17605 | E = E->IgnoreParens(); |
| 17606 | Queue.resize(N: Queue.size()+1); |
| 17607 | Queue.back().E = E; |
| 17608 | Queue.back().Kind = Job::AnyExprKind; |
| 17609 | } |
| 17610 | }; |
| 17611 | |
| 17612 | } |
| 17613 | |
| 17614 | bool DataRecursiveIntBinOpEvaluator:: |
| 17615 | VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E, |
| 17616 | bool &SuppressRHSDiags) { |
| 17617 | if (E->getOpcode() == BO_Comma) { |
| 17618 | // Ignore LHS but note if we could not evaluate it. |
| 17619 | if (LHSResult.Failed) |
| 17620 | return Info.noteSideEffect(); |
| 17621 | return true; |
| 17622 | } |
| 17623 | |
| 17624 | if (E->isLogicalOp()) { |
| 17625 | bool LHSAsBool; |
| 17626 | if (!LHSResult.Failed && HandleConversionToBool(Val: LHSResult.Val, Result&: LHSAsBool)) { |
| 17627 | // We were able to evaluate the LHS, see if we can get away with not |
| 17628 | // evaluating the RHS: 0 && X -> 0, 1 || X -> 1 |
| 17629 | if (LHSAsBool == (E->getOpcode() == BO_LOr)) { |
| 17630 | Success(Value: LHSAsBool, E, Result&: LHSResult.Val); |
| 17631 | return false; // Ignore RHS |
| 17632 | } |
| 17633 | } else { |
| 17634 | LHSResult.Failed = true; |
| 17635 | |
| 17636 | // Since we weren't able to evaluate the left hand side, it |
| 17637 | // might have had side effects. |
| 17638 | if (!Info.noteSideEffect()) |
| 17639 | return false; |
| 17640 | |
| 17641 | // We can't evaluate the LHS; however, sometimes the result |
| 17642 | // is determined by the RHS: X && 0 -> 0, X || 1 -> 1. |
| 17643 | // Don't ignore RHS and suppress diagnostics from this arm. |
| 17644 | SuppressRHSDiags = true; |
| 17645 | } |
| 17646 | |
| 17647 | return true; |
| 17648 | } |
| 17649 | |
| 17650 | assert(E->getLHS()->getType()->isIntegralOrEnumerationType() && |
| 17651 | E->getRHS()->getType()->isIntegralOrEnumerationType()); |
| 17652 | |
| 17653 | if (LHSResult.Failed && !Info.noteFailure()) |
| 17654 | return false; // Ignore RHS; |
| 17655 | |
| 17656 | return true; |
| 17657 | } |
| 17658 | |
| 17659 | static void addOrSubLValueAsInteger(APValue &LVal, const APSInt &Index, |
| 17660 | bool IsSub) { |
| 17661 | // Compute the new offset in the appropriate width, wrapping at 64 bits. |
| 17662 | // FIXME: When compiling for a 32-bit target, we should use 32-bit |
| 17663 | // offsets. |
| 17664 | assert(!LVal.hasLValuePath() && "have designator for integer lvalue" ); |
| 17665 | CharUnits &Offset = LVal.getLValueOffset(); |
| 17666 | uint64_t Offset64 = Offset.getQuantity(); |
| 17667 | uint64_t Index64 = Index.extOrTrunc(width: 64).getZExtValue(); |
| 17668 | Offset = CharUnits::fromQuantity(Quantity: IsSub ? Offset64 - Index64 |
| 17669 | : Offset64 + Index64); |
| 17670 | } |
| 17671 | |
| 17672 | bool DataRecursiveIntBinOpEvaluator:: |
| 17673 | VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult, |
| 17674 | const BinaryOperator *E, APValue &Result) { |
| 17675 | if (E->getOpcode() == BO_Comma) { |
| 17676 | if (RHSResult.Failed) |
| 17677 | return false; |
| 17678 | Result = RHSResult.Val; |
| 17679 | return true; |
| 17680 | } |
| 17681 | |
| 17682 | if (E->isLogicalOp()) { |
| 17683 | bool lhsResult, rhsResult; |
| 17684 | bool LHSIsOK = HandleConversionToBool(Val: LHSResult.Val, Result&: lhsResult); |
| 17685 | bool RHSIsOK = HandleConversionToBool(Val: RHSResult.Val, Result&: rhsResult); |
| 17686 | |
| 17687 | if (LHSIsOK) { |
| 17688 | if (RHSIsOK) { |
| 17689 | if (E->getOpcode() == BO_LOr) |
| 17690 | return Success(Value: lhsResult || rhsResult, E, Result); |
| 17691 | else |
| 17692 | return Success(Value: lhsResult && rhsResult, E, Result); |
| 17693 | } |
| 17694 | } else { |
| 17695 | if (RHSIsOK) { |
| 17696 | // We can't evaluate the LHS; however, sometimes the result |
| 17697 | // is determined by the RHS: X && 0 -> 0, X || 1 -> 1. |
| 17698 | if (rhsResult == (E->getOpcode() == BO_LOr)) |
| 17699 | return Success(Value: rhsResult, E, Result); |
| 17700 | } |
| 17701 | } |
| 17702 | |
| 17703 | return false; |
| 17704 | } |
| 17705 | |
| 17706 | assert(E->getLHS()->getType()->isIntegralOrEnumerationType() && |
| 17707 | E->getRHS()->getType()->isIntegralOrEnumerationType()); |
| 17708 | |
| 17709 | if (LHSResult.Failed || RHSResult.Failed) |
| 17710 | return false; |
| 17711 | |
| 17712 | const APValue &LHSVal = LHSResult.Val; |
| 17713 | const APValue &RHSVal = RHSResult.Val; |
| 17714 | |
| 17715 | // Handle cases like (unsigned long)&a + 4. |
| 17716 | if (E->isAdditiveOp() && LHSVal.isLValue() && RHSVal.isInt()) { |
| 17717 | Result = LHSVal; |
| 17718 | addOrSubLValueAsInteger(LVal&: Result, Index: RHSVal.getInt(), IsSub: E->getOpcode() == BO_Sub); |
| 17719 | return true; |
| 17720 | } |
| 17721 | |
| 17722 | // Handle cases like 4 + (unsigned long)&a |
| 17723 | if (E->getOpcode() == BO_Add && |
| 17724 | RHSVal.isLValue() && LHSVal.isInt()) { |
| 17725 | Result = RHSVal; |
| 17726 | addOrSubLValueAsInteger(LVal&: Result, Index: LHSVal.getInt(), /*IsSub*/false); |
| 17727 | return true; |
| 17728 | } |
| 17729 | |
| 17730 | if (E->getOpcode() == BO_Sub && LHSVal.isLValue() && RHSVal.isLValue()) { |
| 17731 | // Handle (intptr_t)&&A - (intptr_t)&&B. |
| 17732 | if (!LHSVal.getLValueOffset().isZero() || |
| 17733 | !RHSVal.getLValueOffset().isZero()) |
| 17734 | return false; |
| 17735 | const Expr *LHSExpr = LHSVal.getLValueBase().dyn_cast<const Expr*>(); |
| 17736 | const Expr *RHSExpr = RHSVal.getLValueBase().dyn_cast<const Expr*>(); |
| 17737 | if (!LHSExpr || !RHSExpr) |
| 17738 | return false; |
| 17739 | const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(Val: LHSExpr); |
| 17740 | const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(Val: RHSExpr); |
| 17741 | if (!LHSAddrExpr || !RHSAddrExpr) |
| 17742 | return false; |
| 17743 | // Make sure both labels come from the same function. |
| 17744 | if (LHSAddrExpr->getLabel()->getDeclContext() != |
| 17745 | RHSAddrExpr->getLabel()->getDeclContext()) |
| 17746 | return false; |
| 17747 | Result = APValue(LHSAddrExpr, RHSAddrExpr); |
| 17748 | return true; |
| 17749 | } |
| 17750 | |
| 17751 | // All the remaining cases expect both operands to be an integer |
| 17752 | if (!LHSVal.isInt() || !RHSVal.isInt()) |
| 17753 | return Error(E); |
| 17754 | |
| 17755 | // Set up the width and signedness manually, in case it can't be deduced |
| 17756 | // from the operation we're performing. |
| 17757 | // FIXME: Don't do this in the cases where we can deduce it. |
| 17758 | APSInt Value(Info.Ctx.getIntWidth(T: E->getType()), |
| 17759 | E->getType()->isUnsignedIntegerOrEnumerationType()); |
| 17760 | if (!handleIntIntBinOp(Info, E, LHS: LHSVal.getInt(), Opcode: E->getOpcode(), |
| 17761 | RHS: RHSVal.getInt(), Result&: Value)) |
| 17762 | return false; |
| 17763 | return Success(Value, E, Result); |
| 17764 | } |
| 17765 | |
| 17766 | void DataRecursiveIntBinOpEvaluator::process(EvalResult &Result) { |
| 17767 | Job &job = Queue.back(); |
| 17768 | |
| 17769 | switch (job.Kind) { |
| 17770 | case Job::AnyExprKind: { |
| 17771 | if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(Val: job.E)) { |
| 17772 | if (shouldEnqueue(E: Bop)) { |
| 17773 | job.Kind = Job::BinOpKind; |
| 17774 | enqueue(E: Bop->getLHS()); |
| 17775 | return; |
| 17776 | } |
| 17777 | } |
| 17778 | |
| 17779 | EvaluateExpr(E: job.E, Result); |
| 17780 | Queue.pop_back(); |
| 17781 | return; |
| 17782 | } |
| 17783 | |
| 17784 | case Job::BinOpKind: { |
| 17785 | const BinaryOperator *Bop = cast<BinaryOperator>(Val: job.E); |
| 17786 | bool SuppressRHSDiags = false; |
| 17787 | if (!VisitBinOpLHSOnly(LHSResult&: Result, E: Bop, SuppressRHSDiags)) { |
| 17788 | Queue.pop_back(); |
| 17789 | return; |
| 17790 | } |
| 17791 | if (SuppressRHSDiags) |
| 17792 | job.startSpeculativeEval(Info); |
| 17793 | job.LHSResult.swap(RHS&: Result); |
| 17794 | job.Kind = Job::BinOpVisitedLHSKind; |
| 17795 | enqueue(E: Bop->getRHS()); |
| 17796 | return; |
| 17797 | } |
| 17798 | |
| 17799 | case Job::BinOpVisitedLHSKind: { |
| 17800 | const BinaryOperator *Bop = cast<BinaryOperator>(Val: job.E); |
| 17801 | EvalResult RHS; |
| 17802 | RHS.swap(RHS&: Result); |
| 17803 | Result.Failed = !VisitBinOp(LHSResult: job.LHSResult, RHSResult: RHS, E: Bop, Result&: Result.Val); |
| 17804 | Queue.pop_back(); |
| 17805 | return; |
| 17806 | } |
| 17807 | } |
| 17808 | |
| 17809 | llvm_unreachable("Invalid Job::Kind!" ); |
| 17810 | } |
| 17811 | |
| 17812 | namespace { |
| 17813 | enum class CmpResult { |
| 17814 | Unequal, |
| 17815 | Less, |
| 17816 | Equal, |
| 17817 | Greater, |
| 17818 | Unordered, |
| 17819 | }; |
| 17820 | } |
| 17821 | |
| 17822 | template <class SuccessCB, class AfterCB> |
| 17823 | static bool |
| 17824 | EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E, |
| 17825 | SuccessCB &&Success, AfterCB &&DoAfter) { |
| 17826 | assert(!E->isValueDependent()); |
| 17827 | assert(E->isComparisonOp() && "expected comparison operator" ); |
| 17828 | assert((E->getOpcode() == BO_Cmp || |
| 17829 | E->getType()->isIntegralOrEnumerationType()) && |
| 17830 | "unsupported binary expression evaluation" ); |
| 17831 | auto Error = [&](const Expr *E) { |
| 17832 | Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr); |
| 17833 | return false; |
| 17834 | }; |
| 17835 | |
| 17836 | bool IsRelational = E->isRelationalOp() || E->getOpcode() == BO_Cmp; |
| 17837 | bool IsEquality = E->isEqualityOp(); |
| 17838 | |
| 17839 | QualType LHSTy = E->getLHS()->getType(); |
| 17840 | QualType RHSTy = E->getRHS()->getType(); |
| 17841 | |
| 17842 | if (LHSTy->isIntegralOrEnumerationType() && |
| 17843 | RHSTy->isIntegralOrEnumerationType()) { |
| 17844 | APSInt LHS, RHS; |
| 17845 | bool LHSOK = EvaluateInteger(E: E->getLHS(), Result&: LHS, Info); |
| 17846 | if (!LHSOK && !Info.noteFailure()) |
| 17847 | return false; |
| 17848 | if (!EvaluateInteger(E: E->getRHS(), Result&: RHS, Info) || !LHSOK) |
| 17849 | return false; |
| 17850 | if (LHS < RHS) |
| 17851 | return Success(CmpResult::Less, E); |
| 17852 | if (LHS > RHS) |
| 17853 | return Success(CmpResult::Greater, E); |
| 17854 | return Success(CmpResult::Equal, E); |
| 17855 | } |
| 17856 | |
| 17857 | if (LHSTy->isFixedPointType() || RHSTy->isFixedPointType()) { |
| 17858 | APFixedPoint LHSFX(Info.Ctx.getFixedPointSemantics(Ty: LHSTy)); |
| 17859 | APFixedPoint RHSFX(Info.Ctx.getFixedPointSemantics(Ty: RHSTy)); |
| 17860 | |
| 17861 | bool LHSOK = EvaluateFixedPointOrInteger(E: E->getLHS(), Result&: LHSFX, Info); |
| 17862 | if (!LHSOK && !Info.noteFailure()) |
| 17863 | return false; |
| 17864 | if (!EvaluateFixedPointOrInteger(E: E->getRHS(), Result&: RHSFX, Info) || !LHSOK) |
| 17865 | return false; |
| 17866 | if (LHSFX < RHSFX) |
| 17867 | return Success(CmpResult::Less, E); |
| 17868 | if (LHSFX > RHSFX) |
| 17869 | return Success(CmpResult::Greater, E); |
| 17870 | return Success(CmpResult::Equal, E); |
| 17871 | } |
| 17872 | |
| 17873 | if (LHSTy->isAnyComplexType() || RHSTy->isAnyComplexType()) { |
| 17874 | ComplexValue LHS, RHS; |
| 17875 | bool LHSOK; |
| 17876 | if (E->isAssignmentOp()) { |
| 17877 | LValue LV; |
| 17878 | EvaluateLValue(E: E->getLHS(), Result&: LV, Info); |
| 17879 | LHSOK = false; |
| 17880 | } else if (LHSTy->isRealFloatingType()) { |
| 17881 | LHSOK = EvaluateFloat(E: E->getLHS(), Result&: LHS.FloatReal, Info); |
| 17882 | if (LHSOK) { |
| 17883 | LHS.makeComplexFloat(); |
| 17884 | LHS.FloatImag = APFloat(LHS.FloatReal.getSemantics()); |
| 17885 | } |
| 17886 | } else { |
| 17887 | LHSOK = EvaluateComplex(E: E->getLHS(), Res&: LHS, Info); |
| 17888 | } |
| 17889 | if (!LHSOK && !Info.noteFailure()) |
| 17890 | return false; |
| 17891 | |
| 17892 | if (E->getRHS()->getType()->isRealFloatingType()) { |
| 17893 | if (!EvaluateFloat(E: E->getRHS(), Result&: RHS.FloatReal, Info) || !LHSOK) |
| 17894 | return false; |
| 17895 | RHS.makeComplexFloat(); |
| 17896 | RHS.FloatImag = APFloat(RHS.FloatReal.getSemantics()); |
| 17897 | } else if (!EvaluateComplex(E: E->getRHS(), Res&: RHS, Info) || !LHSOK) |
| 17898 | return false; |
| 17899 | |
| 17900 | if (LHS.isComplexFloat()) { |
| 17901 | APFloat::cmpResult CR_r = |
| 17902 | LHS.getComplexFloatReal().compare(RHS: RHS.getComplexFloatReal()); |
| 17903 | APFloat::cmpResult CR_i = |
| 17904 | LHS.getComplexFloatImag().compare(RHS: RHS.getComplexFloatImag()); |
| 17905 | bool IsEqual = CR_r == APFloat::cmpEqual && CR_i == APFloat::cmpEqual; |
| 17906 | return Success(IsEqual ? CmpResult::Equal : CmpResult::Unequal, E); |
| 17907 | } else { |
| 17908 | assert(IsEquality && "invalid complex comparison" ); |
| 17909 | bool IsEqual = LHS.getComplexIntReal() == RHS.getComplexIntReal() && |
| 17910 | LHS.getComplexIntImag() == RHS.getComplexIntImag(); |
| 17911 | return Success(IsEqual ? CmpResult::Equal : CmpResult::Unequal, E); |
| 17912 | } |
| 17913 | } |
| 17914 | |
| 17915 | if (LHSTy->isRealFloatingType() && |
| 17916 | RHSTy->isRealFloatingType()) { |
| 17917 | APFloat RHS(0.0), LHS(0.0); |
| 17918 | |
| 17919 | bool LHSOK = EvaluateFloat(E: E->getRHS(), Result&: RHS, Info); |
| 17920 | if (!LHSOK && !Info.noteFailure()) |
| 17921 | return false; |
| 17922 | |
| 17923 | if (!EvaluateFloat(E: E->getLHS(), Result&: LHS, Info) || !LHSOK) |
| 17924 | return false; |
| 17925 | |
| 17926 | assert(E->isComparisonOp() && "Invalid binary operator!" ); |
| 17927 | llvm::APFloatBase::cmpResult APFloatCmpResult = LHS.compare(RHS); |
| 17928 | if (!Info.InConstantContext && |
| 17929 | APFloatCmpResult == APFloat::cmpUnordered && |
| 17930 | E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts()).isFPConstrained()) { |
| 17931 | // Note: Compares may raise invalid in some cases involving NaN or sNaN. |
| 17932 | Info.FFDiag(E, DiagId: diag::note_constexpr_float_arithmetic_strict); |
| 17933 | return false; |
| 17934 | } |
| 17935 | auto GetCmpRes = [&]() { |
| 17936 | switch (APFloatCmpResult) { |
| 17937 | case APFloat::cmpEqual: |
| 17938 | return CmpResult::Equal; |
| 17939 | case APFloat::cmpLessThan: |
| 17940 | return CmpResult::Less; |
| 17941 | case APFloat::cmpGreaterThan: |
| 17942 | return CmpResult::Greater; |
| 17943 | case APFloat::cmpUnordered: |
| 17944 | return CmpResult::Unordered; |
| 17945 | } |
| 17946 | llvm_unreachable("Unrecognised APFloat::cmpResult enum" ); |
| 17947 | }; |
| 17948 | return Success(GetCmpRes(), E); |
| 17949 | } |
| 17950 | |
| 17951 | if (LHSTy->isPointerType() && RHSTy->isPointerType()) { |
| 17952 | LValue LHSValue, RHSValue; |
| 17953 | |
| 17954 | bool LHSOK = EvaluatePointer(E: E->getLHS(), Result&: LHSValue, Info); |
| 17955 | if (!LHSOK && !Info.noteFailure()) |
| 17956 | return false; |
| 17957 | |
| 17958 | if (!EvaluatePointer(E: E->getRHS(), Result&: RHSValue, Info) || !LHSOK) |
| 17959 | return false; |
| 17960 | |
| 17961 | // Reject differing bases from the normal codepath; we special-case |
| 17962 | // comparisons to null. |
| 17963 | if (!HasSameBase(A: LHSValue, B: RHSValue)) { |
| 17964 | // Bail out early if we're checking potential constant expression. |
| 17965 | // Otherwise, prefer to diagnose other issues. |
| 17966 | if (Info.checkingPotentialConstantExpression() && |
| 17967 | (LHSValue.AllowConstexprUnknown || RHSValue.AllowConstexprUnknown)) |
| 17968 | return false; |
| 17969 | auto DiagComparison = [&] (unsigned DiagID, bool Reversed = false) { |
| 17970 | std::string LHS = LHSValue.toString(Ctx&: Info.Ctx, T: E->getLHS()->getType()); |
| 17971 | std::string RHS = RHSValue.toString(Ctx&: Info.Ctx, T: E->getRHS()->getType()); |
| 17972 | Info.FFDiag(E, DiagId: DiagID) |
| 17973 | << (Reversed ? RHS : LHS) << (Reversed ? LHS : RHS); |
| 17974 | return false; |
| 17975 | }; |
| 17976 | // Inequalities and subtractions between unrelated pointers have |
| 17977 | // unspecified or undefined behavior. |
| 17978 | if (!IsEquality) |
| 17979 | return DiagComparison( |
| 17980 | diag::note_constexpr_pointer_comparison_unspecified); |
| 17981 | // A constant address may compare equal to the address of a symbol. |
| 17982 | // The one exception is that address of an object cannot compare equal |
| 17983 | // to a null pointer constant. |
| 17984 | // TODO: Should we restrict this to actual null pointers, and exclude the |
| 17985 | // case of zero cast to pointer type? |
| 17986 | if ((!LHSValue.Base && !LHSValue.Offset.isZero()) || |
| 17987 | (!RHSValue.Base && !RHSValue.Offset.isZero())) |
| 17988 | return DiagComparison(diag::note_constexpr_pointer_constant_comparison, |
| 17989 | !RHSValue.Base); |
| 17990 | // C++2c [intro.object]/10: |
| 17991 | // Two objects [...] may have the same address if [...] they are both |
| 17992 | // potentially non-unique objects. |
| 17993 | // C++2c [intro.object]/9: |
| 17994 | // An object is potentially non-unique if it is a string literal object, |
| 17995 | // the backing array of an initializer list, or a subobject thereof. |
| 17996 | // |
| 17997 | // This makes the comparison result unspecified, so it's not a constant |
| 17998 | // expression. |
| 17999 | // |
| 18000 | // TODO: Do we need to handle the initializer list case here? |
| 18001 | if (ArePotentiallyOverlappingStringLiterals(Info, LHS: LHSValue, RHS: RHSValue)) |
| 18002 | return DiagComparison(diag::note_constexpr_literal_comparison); |
| 18003 | if (IsOpaqueConstantCall(LVal: LHSValue) || IsOpaqueConstantCall(LVal: RHSValue)) |
| 18004 | return DiagComparison(diag::note_constexpr_opaque_call_comparison, |
| 18005 | !IsOpaqueConstantCall(LVal: LHSValue)); |
| 18006 | // We can't tell whether weak symbols will end up pointing to the same |
| 18007 | // object. |
| 18008 | if (IsWeakLValue(Value: LHSValue) || IsWeakLValue(Value: RHSValue)) |
| 18009 | return DiagComparison(diag::note_constexpr_pointer_weak_comparison, |
| 18010 | !IsWeakLValue(Value: LHSValue)); |
| 18011 | // We can't compare the address of the start of one object with the |
| 18012 | // past-the-end address of another object, per C++ DR1652. |
| 18013 | if (LHSValue.Base && LHSValue.Offset.isZero() && |
| 18014 | isOnePastTheEndOfCompleteObject(Ctx: Info.Ctx, LV: RHSValue)) |
| 18015 | return DiagComparison(diag::note_constexpr_pointer_comparison_past_end, |
| 18016 | true); |
| 18017 | if (RHSValue.Base && RHSValue.Offset.isZero() && |
| 18018 | isOnePastTheEndOfCompleteObject(Ctx: Info.Ctx, LV: LHSValue)) |
| 18019 | return DiagComparison(diag::note_constexpr_pointer_comparison_past_end, |
| 18020 | false); |
| 18021 | // We can't tell whether an object is at the same address as another |
| 18022 | // zero sized object. |
| 18023 | if ((RHSValue.Base && isZeroSized(Value: LHSValue)) || |
| 18024 | (LHSValue.Base && isZeroSized(Value: RHSValue))) |
| 18025 | return DiagComparison( |
| 18026 | diag::note_constexpr_pointer_comparison_zero_sized); |
| 18027 | if (LHSValue.AllowConstexprUnknown || RHSValue.AllowConstexprUnknown) |
| 18028 | return DiagComparison( |
| 18029 | diag::note_constexpr_pointer_comparison_unspecified); |
| 18030 | // FIXME: Verify both variables are live. |
| 18031 | return Success(CmpResult::Unequal, E); |
| 18032 | } |
| 18033 | |
| 18034 | const CharUnits &LHSOffset = LHSValue.getLValueOffset(); |
| 18035 | const CharUnits &RHSOffset = RHSValue.getLValueOffset(); |
| 18036 | |
| 18037 | SubobjectDesignator &LHSDesignator = LHSValue.getLValueDesignator(); |
| 18038 | SubobjectDesignator &RHSDesignator = RHSValue.getLValueDesignator(); |
| 18039 | |
| 18040 | // C++11 [expr.rel]p2: |
| 18041 | // - If two pointers point to non-static data members of the same object, |
| 18042 | // or to subobjects or array elements fo such members, recursively, the |
| 18043 | // pointer to the later declared member compares greater provided the |
| 18044 | // two members have the same access control and provided their class is |
| 18045 | // not a union. |
| 18046 | // [...] |
| 18047 | // - Otherwise pointer comparisons are unspecified. |
| 18048 | if (!LHSDesignator.Invalid && !RHSDesignator.Invalid && IsRelational) { |
| 18049 | bool WasArrayIndex; |
| 18050 | unsigned Mismatch = FindDesignatorMismatch( |
| 18051 | ObjType: LHSValue.Base.isNull() ? QualType() |
| 18052 | : getType(B: LHSValue.Base).getNonReferenceType(), |
| 18053 | A: LHSDesignator, B: RHSDesignator, WasArrayIndex); |
| 18054 | // At the point where the designators diverge, the comparison has a |
| 18055 | // specified value if: |
| 18056 | // - we are comparing array indices |
| 18057 | // - we are comparing fields of a union, or fields with the same access |
| 18058 | // Otherwise, the result is unspecified and thus the comparison is not a |
| 18059 | // constant expression. |
| 18060 | if (!WasArrayIndex && Mismatch < LHSDesignator.Entries.size() && |
| 18061 | Mismatch < RHSDesignator.Entries.size()) { |
| 18062 | const FieldDecl *LF = getAsField(E: LHSDesignator.Entries[Mismatch]); |
| 18063 | const FieldDecl *RF = getAsField(E: RHSDesignator.Entries[Mismatch]); |
| 18064 | if (!LF && !RF) |
| 18065 | Info.CCEDiag(E, DiagId: diag::note_constexpr_pointer_comparison_base_classes); |
| 18066 | else if (!LF) |
| 18067 | Info.CCEDiag(E, DiagId: diag::note_constexpr_pointer_comparison_base_field) |
| 18068 | << getAsBaseClass(E: LHSDesignator.Entries[Mismatch]) |
| 18069 | << RF->getParent() << RF; |
| 18070 | else if (!RF) |
| 18071 | Info.CCEDiag(E, DiagId: diag::note_constexpr_pointer_comparison_base_field) |
| 18072 | << getAsBaseClass(E: RHSDesignator.Entries[Mismatch]) |
| 18073 | << LF->getParent() << LF; |
| 18074 | else if (!LF->getParent()->isUnion() && |
| 18075 | LF->getAccess() != RF->getAccess()) |
| 18076 | Info.CCEDiag(E, |
| 18077 | DiagId: diag::note_constexpr_pointer_comparison_differing_access) |
| 18078 | << LF << LF->getAccess() << RF << RF->getAccess() |
| 18079 | << LF->getParent(); |
| 18080 | } |
| 18081 | } |
| 18082 | |
| 18083 | // The comparison here must be unsigned, and performed with the same |
| 18084 | // width as the pointer. |
| 18085 | unsigned PtrSize = Info.Ctx.getTypeSize(T: LHSTy); |
| 18086 | uint64_t CompareLHS = LHSOffset.getQuantity(); |
| 18087 | uint64_t CompareRHS = RHSOffset.getQuantity(); |
| 18088 | assert(PtrSize <= 64 && "Unexpected pointer width" ); |
| 18089 | uint64_t Mask = ~0ULL >> (64 - PtrSize); |
| 18090 | CompareLHS &= Mask; |
| 18091 | CompareRHS &= Mask; |
| 18092 | |
| 18093 | // If there is a base and this is a relational operator, we can only |
| 18094 | // compare pointers within the object in question; otherwise, the result |
| 18095 | // depends on where the object is located in memory. |
| 18096 | if (!LHSValue.Base.isNull() && IsRelational) { |
| 18097 | QualType BaseTy = getType(B: LHSValue.Base).getNonReferenceType(); |
| 18098 | if (BaseTy->isIncompleteType()) |
| 18099 | return Error(E); |
| 18100 | CharUnits Size = Info.Ctx.getTypeSizeInChars(T: BaseTy); |
| 18101 | uint64_t OffsetLimit = Size.getQuantity(); |
| 18102 | if (CompareLHS > OffsetLimit || CompareRHS > OffsetLimit) |
| 18103 | return Error(E); |
| 18104 | } |
| 18105 | |
| 18106 | if (CompareLHS < CompareRHS) |
| 18107 | return Success(CmpResult::Less, E); |
| 18108 | if (CompareLHS > CompareRHS) |
| 18109 | return Success(CmpResult::Greater, E); |
| 18110 | return Success(CmpResult::Equal, E); |
| 18111 | } |
| 18112 | |
| 18113 | if (LHSTy->isMemberPointerType()) { |
| 18114 | assert(IsEquality && "unexpected member pointer operation" ); |
| 18115 | assert(RHSTy->isMemberPointerType() && "invalid comparison" ); |
| 18116 | |
| 18117 | MemberPtr LHSValue, RHSValue; |
| 18118 | |
| 18119 | bool LHSOK = EvaluateMemberPointer(E: E->getLHS(), Result&: LHSValue, Info); |
| 18120 | if (!LHSOK && !Info.noteFailure()) |
| 18121 | return false; |
| 18122 | |
| 18123 | if (!EvaluateMemberPointer(E: E->getRHS(), Result&: RHSValue, Info) || !LHSOK) |
| 18124 | return false; |
| 18125 | |
| 18126 | // If either operand is a pointer to a weak function, the comparison is not |
| 18127 | // constant. |
| 18128 | if (LHSValue.getDecl() && LHSValue.getDecl()->isWeak()) { |
| 18129 | Info.FFDiag(E, DiagId: diag::note_constexpr_mem_pointer_weak_comparison) |
| 18130 | << LHSValue.getDecl(); |
| 18131 | return false; |
| 18132 | } |
| 18133 | if (RHSValue.getDecl() && RHSValue.getDecl()->isWeak()) { |
| 18134 | Info.FFDiag(E, DiagId: diag::note_constexpr_mem_pointer_weak_comparison) |
| 18135 | << RHSValue.getDecl(); |
| 18136 | return false; |
| 18137 | } |
| 18138 | |
| 18139 | // C++11 [expr.eq]p2: |
| 18140 | // If both operands are null, they compare equal. Otherwise if only one is |
| 18141 | // null, they compare unequal. |
| 18142 | if (!LHSValue.getDecl() || !RHSValue.getDecl()) { |
| 18143 | bool Equal = !LHSValue.getDecl() && !RHSValue.getDecl(); |
| 18144 | return Success(Equal ? CmpResult::Equal : CmpResult::Unequal, E); |
| 18145 | } |
| 18146 | |
| 18147 | // Otherwise if either is a pointer to a virtual member function, the |
| 18148 | // result is unspecified. |
| 18149 | if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: LHSValue.getDecl())) |
| 18150 | if (MD->isVirtual()) |
| 18151 | Info.CCEDiag(E, DiagId: diag::note_constexpr_compare_virtual_mem_ptr) << MD; |
| 18152 | if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: RHSValue.getDecl())) |
| 18153 | if (MD->isVirtual()) |
| 18154 | Info.CCEDiag(E, DiagId: diag::note_constexpr_compare_virtual_mem_ptr) << MD; |
| 18155 | |
| 18156 | // Otherwise they compare equal if and only if they would refer to the |
| 18157 | // same member of the same most derived object or the same subobject if |
| 18158 | // they were dereferenced with a hypothetical object of the associated |
| 18159 | // class type. |
| 18160 | bool Equal = LHSValue == RHSValue; |
| 18161 | return Success(Equal ? CmpResult::Equal : CmpResult::Unequal, E); |
| 18162 | } |
| 18163 | |
| 18164 | if (LHSTy->isNullPtrType()) { |
| 18165 | assert(E->isComparisonOp() && "unexpected nullptr operation" ); |
| 18166 | assert(RHSTy->isNullPtrType() && "missing pointer conversion" ); |
| 18167 | // C++11 [expr.rel]p4, [expr.eq]p3: If two operands of type std::nullptr_t |
| 18168 | // are compared, the result is true of the operator is <=, >= or ==, and |
| 18169 | // false otherwise. |
| 18170 | LValue Res; |
| 18171 | if (!EvaluatePointer(E: E->getLHS(), Result&: Res, Info) || |
| 18172 | !EvaluatePointer(E: E->getRHS(), Result&: Res, Info)) |
| 18173 | return false; |
| 18174 | return Success(CmpResult::Equal, E); |
| 18175 | } |
| 18176 | |
| 18177 | return DoAfter(); |
| 18178 | } |
| 18179 | |
| 18180 | bool RecordExprEvaluator::VisitBinCmp(const BinaryOperator *E) { |
| 18181 | if (!CheckLiteralType(Info, E)) |
| 18182 | return false; |
| 18183 | |
| 18184 | auto OnSuccess = [&](CmpResult CR, const BinaryOperator *E) { |
| 18185 | ComparisonCategoryResult CCR; |
| 18186 | switch (CR) { |
| 18187 | case CmpResult::Unequal: |
| 18188 | llvm_unreachable("should never produce Unequal for three-way comparison" ); |
| 18189 | case CmpResult::Less: |
| 18190 | CCR = ComparisonCategoryResult::Less; |
| 18191 | break; |
| 18192 | case CmpResult::Equal: |
| 18193 | CCR = ComparisonCategoryResult::Equal; |
| 18194 | break; |
| 18195 | case CmpResult::Greater: |
| 18196 | CCR = ComparisonCategoryResult::Greater; |
| 18197 | break; |
| 18198 | case CmpResult::Unordered: |
| 18199 | CCR = ComparisonCategoryResult::Unordered; |
| 18200 | break; |
| 18201 | } |
| 18202 | // Evaluation succeeded. Lookup the information for the comparison category |
| 18203 | // type and fetch the VarDecl for the result. |
| 18204 | const ComparisonCategoryInfo &CmpInfo = |
| 18205 | Info.Ctx.CompCategories.getInfoForType(Ty: E->getType()); |
| 18206 | const VarDecl *VD = CmpInfo.getValueInfo(ValueKind: CmpInfo.makeWeakResult(Res: CCR))->VD; |
| 18207 | // Check and evaluate the result as a constant expression. |
| 18208 | LValue LV; |
| 18209 | LV.set(B: VD); |
| 18210 | if (!handleLValueToRValueConversion(Info, Conv: E, Type: E->getType(), LVal: LV, RVal&: Result)) |
| 18211 | return false; |
| 18212 | return CheckConstantExpression(Info, DiagLoc: E->getExprLoc(), Type: E->getType(), Value: Result, |
| 18213 | Kind: ConstantExprKind::Normal); |
| 18214 | }; |
| 18215 | return EvaluateComparisonBinaryOperator(Info, E, Success&: OnSuccess, DoAfter: [&]() { |
| 18216 | return ExprEvaluatorBaseTy::VisitBinCmp(S: E); |
| 18217 | }); |
| 18218 | } |
| 18219 | |
| 18220 | bool RecordExprEvaluator::VisitCXXParenListInitExpr( |
| 18221 | const CXXParenListInitExpr *E) { |
| 18222 | return VisitCXXParenListOrInitListExpr(ExprToVisit: E, Args: E->getInitExprs()); |
| 18223 | } |
| 18224 | |
| 18225 | bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { |
| 18226 | // We don't support assignment in C. C++ assignments don't get here because |
| 18227 | // assignment is an lvalue in C++. |
| 18228 | if (E->isAssignmentOp()) { |
| 18229 | Error(E); |
| 18230 | if (!Info.noteFailure()) |
| 18231 | return false; |
| 18232 | } |
| 18233 | |
| 18234 | if (DataRecursiveIntBinOpEvaluator::shouldEnqueue(E)) |
| 18235 | return DataRecursiveIntBinOpEvaluator(*this, Result).Traverse(E); |
| 18236 | |
| 18237 | assert((!E->getLHS()->getType()->isIntegralOrEnumerationType() || |
| 18238 | !E->getRHS()->getType()->isIntegralOrEnumerationType()) && |
| 18239 | "DataRecursiveIntBinOpEvaluator should have handled integral types" ); |
| 18240 | |
| 18241 | if (E->isComparisonOp()) { |
| 18242 | // Evaluate builtin binary comparisons by evaluating them as three-way |
| 18243 | // comparisons and then translating the result. |
| 18244 | auto OnSuccess = [&](CmpResult CR, const BinaryOperator *E) { |
| 18245 | assert((CR != CmpResult::Unequal || E->isEqualityOp()) && |
| 18246 | "should only produce Unequal for equality comparisons" ); |
| 18247 | bool IsEqual = CR == CmpResult::Equal, |
| 18248 | IsLess = CR == CmpResult::Less, |
| 18249 | IsGreater = CR == CmpResult::Greater; |
| 18250 | auto Op = E->getOpcode(); |
| 18251 | switch (Op) { |
| 18252 | default: |
| 18253 | llvm_unreachable("unsupported binary operator" ); |
| 18254 | case BO_EQ: |
| 18255 | case BO_NE: |
| 18256 | return Success(Value: IsEqual == (Op == BO_EQ), E); |
| 18257 | case BO_LT: |
| 18258 | return Success(Value: IsLess, E); |
| 18259 | case BO_GT: |
| 18260 | return Success(Value: IsGreater, E); |
| 18261 | case BO_LE: |
| 18262 | return Success(Value: IsEqual || IsLess, E); |
| 18263 | case BO_GE: |
| 18264 | return Success(Value: IsEqual || IsGreater, E); |
| 18265 | } |
| 18266 | }; |
| 18267 | return EvaluateComparisonBinaryOperator(Info, E, Success&: OnSuccess, DoAfter: [&]() { |
| 18268 | return ExprEvaluatorBaseTy::VisitBinaryOperator(E); |
| 18269 | }); |
| 18270 | } |
| 18271 | |
| 18272 | QualType LHSTy = E->getLHS()->getType(); |
| 18273 | QualType RHSTy = E->getRHS()->getType(); |
| 18274 | |
| 18275 | if (LHSTy->isPointerType() && RHSTy->isPointerType() && |
| 18276 | E->getOpcode() == BO_Sub) { |
| 18277 | LValue LHSValue, RHSValue; |
| 18278 | |
| 18279 | bool LHSOK = EvaluatePointer(E: E->getLHS(), Result&: LHSValue, Info); |
| 18280 | if (!LHSOK && !Info.noteFailure()) |
| 18281 | return false; |
| 18282 | |
| 18283 | if (!EvaluatePointer(E: E->getRHS(), Result&: RHSValue, Info) || !LHSOK) |
| 18284 | return false; |
| 18285 | |
| 18286 | // Reject differing bases from the normal codepath; we special-case |
| 18287 | // comparisons to null. |
| 18288 | if (!HasSameBase(A: LHSValue, B: RHSValue)) { |
| 18289 | if (Info.checkingPotentialConstantExpression() && |
| 18290 | (LHSValue.AllowConstexprUnknown || RHSValue.AllowConstexprUnknown)) |
| 18291 | return false; |
| 18292 | |
| 18293 | const Expr *LHSExpr = LHSValue.Base.dyn_cast<const Expr *>(); |
| 18294 | const Expr *RHSExpr = RHSValue.Base.dyn_cast<const Expr *>(); |
| 18295 | |
| 18296 | auto DiagArith = [&](unsigned DiagID) { |
| 18297 | std::string LHS = LHSValue.toString(Ctx&: Info.Ctx, T: E->getLHS()->getType()); |
| 18298 | std::string RHS = RHSValue.toString(Ctx&: Info.Ctx, T: E->getRHS()->getType()); |
| 18299 | Info.FFDiag(E, DiagId: DiagID) << LHS << RHS; |
| 18300 | if (LHSExpr && LHSExpr == RHSExpr) |
| 18301 | Info.Note(Loc: LHSExpr->getExprLoc(), |
| 18302 | DiagId: diag::note_constexpr_repeated_literal_eval) |
| 18303 | << LHSExpr->getSourceRange(); |
| 18304 | return false; |
| 18305 | }; |
| 18306 | |
| 18307 | if (!LHSExpr || !RHSExpr) |
| 18308 | return DiagArith(diag::note_constexpr_pointer_arith_unspecified); |
| 18309 | |
| 18310 | if (ArePotentiallyOverlappingStringLiterals(Info, LHS: LHSValue, RHS: RHSValue)) |
| 18311 | return DiagArith(diag::note_constexpr_literal_arith); |
| 18312 | |
| 18313 | const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(Val: LHSExpr); |
| 18314 | const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(Val: RHSExpr); |
| 18315 | if (!LHSAddrExpr || !RHSAddrExpr) |
| 18316 | return Error(E); |
| 18317 | // Make sure both labels come from the same function. |
| 18318 | if (LHSAddrExpr->getLabel()->getDeclContext() != |
| 18319 | RHSAddrExpr->getLabel()->getDeclContext()) |
| 18320 | return Error(E); |
| 18321 | return Success(V: APValue(LHSAddrExpr, RHSAddrExpr), E); |
| 18322 | } |
| 18323 | const CharUnits &LHSOffset = LHSValue.getLValueOffset(); |
| 18324 | const CharUnits &RHSOffset = RHSValue.getLValueOffset(); |
| 18325 | |
| 18326 | SubobjectDesignator &LHSDesignator = LHSValue.getLValueDesignator(); |
| 18327 | SubobjectDesignator &RHSDesignator = RHSValue.getLValueDesignator(); |
| 18328 | |
| 18329 | // C++11 [expr.add]p6: |
| 18330 | // Unless both pointers point to elements of the same array object, or |
| 18331 | // one past the last element of the array object, the behavior is |
| 18332 | // undefined. |
| 18333 | if (!LHSDesignator.Invalid && !RHSDesignator.Invalid && |
| 18334 | !AreElementsOfSameArray(ObjType: getType(B: LHSValue.Base), A: LHSDesignator, |
| 18335 | B: RHSDesignator)) |
| 18336 | Info.CCEDiag(E, DiagId: diag::note_constexpr_pointer_subtraction_not_same_array); |
| 18337 | |
| 18338 | QualType Type = E->getLHS()->getType(); |
| 18339 | QualType ElementType = Type->castAs<PointerType>()->getPointeeType(); |
| 18340 | |
| 18341 | CharUnits ElementSize; |
| 18342 | if (!HandleSizeof(Info, Loc: E->getExprLoc(), Type: ElementType, Size&: ElementSize)) |
| 18343 | return false; |
| 18344 | |
| 18345 | // As an extension, a type may have zero size (empty struct or union in |
| 18346 | // C, array of zero length). Pointer subtraction in such cases has |
| 18347 | // undefined behavior, so is not constant. |
| 18348 | if (ElementSize.isZero()) { |
| 18349 | Info.FFDiag(E, DiagId: diag::note_constexpr_pointer_subtraction_zero_size) |
| 18350 | << ElementType; |
| 18351 | return false; |
| 18352 | } |
| 18353 | |
| 18354 | // FIXME: LLVM and GCC both compute LHSOffset - RHSOffset at runtime, |
| 18355 | // and produce incorrect results when it overflows. Such behavior |
| 18356 | // appears to be non-conforming, but is common, so perhaps we should |
| 18357 | // assume the standard intended for such cases to be undefined behavior |
| 18358 | // and check for them. |
| 18359 | |
| 18360 | // Compute (LHSOffset - RHSOffset) / Size carefully, checking for |
| 18361 | // overflow in the final conversion to ptrdiff_t. |
| 18362 | APSInt LHS(llvm::APInt(65, (int64_t)LHSOffset.getQuantity(), true), false); |
| 18363 | APSInt RHS(llvm::APInt(65, (int64_t)RHSOffset.getQuantity(), true), false); |
| 18364 | APSInt ElemSize(llvm::APInt(65, (int64_t)ElementSize.getQuantity(), true), |
| 18365 | false); |
| 18366 | APSInt TrueResult = (LHS - RHS) / ElemSize; |
| 18367 | APSInt Result = TrueResult.trunc(width: Info.Ctx.getIntWidth(T: E->getType())); |
| 18368 | |
| 18369 | if (Result.extend(width: 65) != TrueResult && |
| 18370 | !HandleOverflow(Info, E, SrcValue: TrueResult, DestType: E->getType())) |
| 18371 | return false; |
| 18372 | return Success(SI: Result, E); |
| 18373 | } |
| 18374 | |
| 18375 | return ExprEvaluatorBaseTy::VisitBinaryOperator(E); |
| 18376 | } |
| 18377 | |
| 18378 | /// VisitUnaryExprOrTypeTraitExpr - Evaluate a sizeof, alignof or vec_step with |
| 18379 | /// a result as the expression's type. |
| 18380 | bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr( |
| 18381 | const UnaryExprOrTypeTraitExpr *E) { |
| 18382 | switch(E->getKind()) { |
| 18383 | case UETT_PreferredAlignOf: |
| 18384 | case UETT_AlignOf: { |
| 18385 | if (E->isArgumentType()) |
| 18386 | return Success( |
| 18387 | Size: GetAlignOfType(Ctx: Info.Ctx, T: E->getArgumentType(), ExprKind: E->getKind()), E); |
| 18388 | else |
| 18389 | return Success( |
| 18390 | Size: GetAlignOfExpr(Ctx: Info.Ctx, E: E->getArgumentExpr(), ExprKind: E->getKind()), E); |
| 18391 | } |
| 18392 | |
| 18393 | case UETT_PtrAuthTypeDiscriminator: { |
| 18394 | if (E->getArgumentType()->isDependentType()) |
| 18395 | return false; |
| 18396 | return Success( |
| 18397 | Value: Info.Ctx.getPointerAuthTypeDiscriminator(T: E->getArgumentType()), E); |
| 18398 | } |
| 18399 | case UETT_VecStep: { |
| 18400 | QualType Ty = E->getTypeOfArgument(); |
| 18401 | |
| 18402 | if (Ty->isVectorType()) { |
| 18403 | unsigned n = Ty->castAs<VectorType>()->getNumElements(); |
| 18404 | |
| 18405 | // The vec_step built-in functions that take a 3-component |
| 18406 | // vector return 4. (OpenCL 1.1 spec 6.11.12) |
| 18407 | if (n == 3) |
| 18408 | n = 4; |
| 18409 | |
| 18410 | return Success(Value: n, E); |
| 18411 | } else |
| 18412 | return Success(Value: 1, E); |
| 18413 | } |
| 18414 | |
| 18415 | case UETT_DataSizeOf: |
| 18416 | case UETT_SizeOf: { |
| 18417 | QualType SrcTy = E->getTypeOfArgument(); |
| 18418 | // C++ [expr.sizeof]p2: "When applied to a reference or a reference type, |
| 18419 | // the result is the size of the referenced type." |
| 18420 | if (const ReferenceType *Ref = SrcTy->getAs<ReferenceType>()) |
| 18421 | SrcTy = Ref->getPointeeType(); |
| 18422 | |
| 18423 | CharUnits Sizeof; |
| 18424 | if (!HandleSizeof(Info, Loc: E->getExprLoc(), Type: SrcTy, Size&: Sizeof, |
| 18425 | SOT: E->getKind() == UETT_DataSizeOf ? SizeOfType::DataSizeOf |
| 18426 | : SizeOfType::SizeOf)) { |
| 18427 | return false; |
| 18428 | } |
| 18429 | return Success(Size: Sizeof, E); |
| 18430 | } |
| 18431 | case UETT_OpenMPRequiredSimdAlign: |
| 18432 | assert(E->isArgumentType()); |
| 18433 | return Success( |
| 18434 | Value: Info.Ctx.toCharUnitsFromBits( |
| 18435 | BitSize: Info.Ctx.getOpenMPDefaultSimdAlign(T: E->getArgumentType())) |
| 18436 | .getQuantity(), |
| 18437 | E); |
| 18438 | case UETT_VectorElements: { |
| 18439 | QualType Ty = E->getTypeOfArgument(); |
| 18440 | // If the vector has a fixed size, we can determine the number of elements |
| 18441 | // at compile time. |
| 18442 | if (const auto *VT = Ty->getAs<VectorType>()) |
| 18443 | return Success(Value: VT->getNumElements(), E); |
| 18444 | |
| 18445 | assert(Ty->isSizelessVectorType()); |
| 18446 | if (Info.InConstantContext) |
| 18447 | Info.CCEDiag(E, DiagId: diag::note_constexpr_non_const_vectorelements) |
| 18448 | << E->getSourceRange(); |
| 18449 | |
| 18450 | return false; |
| 18451 | } |
| 18452 | case UETT_CountOf: { |
| 18453 | QualType Ty = E->getTypeOfArgument(); |
| 18454 | assert(Ty->isArrayType()); |
| 18455 | |
| 18456 | // We don't need to worry about array element qualifiers, so getting the |
| 18457 | // unsafe array type is fine. |
| 18458 | if (const auto *CAT = |
| 18459 | dyn_cast<ConstantArrayType>(Val: Ty->getAsArrayTypeUnsafe())) { |
| 18460 | return Success(I: CAT->getSize(), E); |
| 18461 | } |
| 18462 | |
| 18463 | assert(!Ty->isConstantSizeType()); |
| 18464 | |
| 18465 | // If it's a variable-length array type, we need to check whether it is a |
| 18466 | // multidimensional array. If so, we need to check the size expression of |
| 18467 | // the VLA to see if it's a constant size. If so, we can return that value. |
| 18468 | const auto *VAT = Info.Ctx.getAsVariableArrayType(T: Ty); |
| 18469 | assert(VAT); |
| 18470 | if (VAT->getElementType()->isArrayType()) { |
| 18471 | // Variable array size expression could be missing (e.g. int a[*][10]) In |
| 18472 | // that case, it can't be a constant expression. |
| 18473 | if (!VAT->getSizeExpr()) { |
| 18474 | Info.FFDiag(Loc: E->getBeginLoc()); |
| 18475 | return false; |
| 18476 | } |
| 18477 | |
| 18478 | std::optional<APSInt> Res = |
| 18479 | VAT->getSizeExpr()->getIntegerConstantExpr(Ctx: Info.Ctx); |
| 18480 | if (Res) { |
| 18481 | // The resulting value always has type size_t, so we need to make the |
| 18482 | // returned APInt have the correct sign and bit-width. |
| 18483 | APInt Val{ |
| 18484 | static_cast<unsigned>(Info.Ctx.getTypeSize(T: Info.Ctx.getSizeType())), |
| 18485 | Res->getZExtValue()}; |
| 18486 | return Success(I: Val, E); |
| 18487 | } |
| 18488 | } |
| 18489 | |
| 18490 | // Definitely a variable-length type, which is not an ICE. |
| 18491 | // FIXME: Better diagnostic. |
| 18492 | Info.FFDiag(Loc: E->getBeginLoc()); |
| 18493 | return false; |
| 18494 | } |
| 18495 | } |
| 18496 | |
| 18497 | llvm_unreachable("unknown expr/type trait" ); |
| 18498 | } |
| 18499 | |
| 18500 | bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) { |
| 18501 | CharUnits Result; |
| 18502 | unsigned n = OOE->getNumComponents(); |
| 18503 | if (n == 0) |
| 18504 | return Error(E: OOE); |
| 18505 | QualType CurrentType = OOE->getTypeSourceInfo()->getType(); |
| 18506 | for (unsigned i = 0; i != n; ++i) { |
| 18507 | OffsetOfNode ON = OOE->getComponent(Idx: i); |
| 18508 | switch (ON.getKind()) { |
| 18509 | case OffsetOfNode::Array: { |
| 18510 | const Expr *Idx = OOE->getIndexExpr(Idx: ON.getArrayExprIndex()); |
| 18511 | APSInt IdxResult; |
| 18512 | if (!EvaluateInteger(E: Idx, Result&: IdxResult, Info)) |
| 18513 | return false; |
| 18514 | const ArrayType *AT = Info.Ctx.getAsArrayType(T: CurrentType); |
| 18515 | if (!AT) |
| 18516 | return Error(E: OOE); |
| 18517 | CurrentType = AT->getElementType(); |
| 18518 | CharUnits ElementSize = Info.Ctx.getTypeSizeInChars(T: CurrentType); |
| 18519 | Result += IdxResult.getSExtValue() * ElementSize; |
| 18520 | break; |
| 18521 | } |
| 18522 | |
| 18523 | case OffsetOfNode::Field: { |
| 18524 | FieldDecl *MemberDecl = ON.getField(); |
| 18525 | const auto *RD = CurrentType->getAsRecordDecl(); |
| 18526 | if (!RD) |
| 18527 | return Error(E: OOE); |
| 18528 | if (RD->isInvalidDecl()) return false; |
| 18529 | const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(D: RD); |
| 18530 | unsigned i = MemberDecl->getFieldIndex(); |
| 18531 | assert(i < RL.getFieldCount() && "offsetof field in wrong type" ); |
| 18532 | Result += Info.Ctx.toCharUnitsFromBits(BitSize: RL.getFieldOffset(FieldNo: i)); |
| 18533 | CurrentType = MemberDecl->getType().getNonReferenceType(); |
| 18534 | break; |
| 18535 | } |
| 18536 | |
| 18537 | case OffsetOfNode::Identifier: |
| 18538 | llvm_unreachable("dependent __builtin_offsetof" ); |
| 18539 | |
| 18540 | case OffsetOfNode::Base: { |
| 18541 | CXXBaseSpecifier *BaseSpec = ON.getBase(); |
| 18542 | if (BaseSpec->isVirtual()) |
| 18543 | return Error(E: OOE); |
| 18544 | |
| 18545 | // Find the layout of the class whose base we are looking into. |
| 18546 | const auto *RD = CurrentType->getAsCXXRecordDecl(); |
| 18547 | if (!RD) |
| 18548 | return Error(E: OOE); |
| 18549 | if (RD->isInvalidDecl()) return false; |
| 18550 | const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(D: RD); |
| 18551 | |
| 18552 | // Find the base class itself. |
| 18553 | CurrentType = BaseSpec->getType(); |
| 18554 | const auto *BaseRD = CurrentType->getAsCXXRecordDecl(); |
| 18555 | if (!BaseRD) |
| 18556 | return Error(E: OOE); |
| 18557 | |
| 18558 | // Add the offset to the base. |
| 18559 | Result += RL.getBaseClassOffset(Base: BaseRD); |
| 18560 | break; |
| 18561 | } |
| 18562 | } |
| 18563 | } |
| 18564 | return Success(Size: Result, E: OOE); |
| 18565 | } |
| 18566 | |
| 18567 | bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) { |
| 18568 | switch (E->getOpcode()) { |
| 18569 | default: |
| 18570 | // Address, indirect, pre/post inc/dec, etc are not valid constant exprs. |
| 18571 | // See C99 6.6p3. |
| 18572 | return Error(E); |
| 18573 | case UO_Extension: |
| 18574 | // FIXME: Should extension allow i-c-e extension expressions in its scope? |
| 18575 | // If so, we could clear the diagnostic ID. |
| 18576 | return Visit(S: E->getSubExpr()); |
| 18577 | case UO_Plus: |
| 18578 | // The result is just the value. |
| 18579 | return Visit(S: E->getSubExpr()); |
| 18580 | case UO_Minus: { |
| 18581 | if (!Visit(S: E->getSubExpr())) |
| 18582 | return false; |
| 18583 | if (!Result.isInt()) return Error(E); |
| 18584 | const APSInt &Value = Result.getInt(); |
| 18585 | if (Value.isSigned() && Value.isMinSignedValue() && E->canOverflow()) { |
| 18586 | if (Info.checkingForUndefinedBehavior()) |
| 18587 | Info.Ctx.getDiagnostics().Report(Loc: E->getExprLoc(), |
| 18588 | DiagID: diag::warn_integer_constant_overflow) |
| 18589 | << toString(I: Value, Radix: 10, Signed: Value.isSigned(), /*formatAsCLiteral=*/false, |
| 18590 | /*UpperCase=*/true, /*InsertSeparators=*/true) |
| 18591 | << E->getType() << E->getSourceRange(); |
| 18592 | |
| 18593 | if (!HandleOverflow(Info, E, SrcValue: -Value.extend(width: Value.getBitWidth() + 1), |
| 18594 | DestType: E->getType())) |
| 18595 | return false; |
| 18596 | } |
| 18597 | return Success(SI: -Value, E); |
| 18598 | } |
| 18599 | case UO_Not: { |
| 18600 | if (!Visit(S: E->getSubExpr())) |
| 18601 | return false; |
| 18602 | if (!Result.isInt()) return Error(E); |
| 18603 | return Success(SI: ~Result.getInt(), E); |
| 18604 | } |
| 18605 | case UO_LNot: { |
| 18606 | bool bres; |
| 18607 | if (!EvaluateAsBooleanCondition(E: E->getSubExpr(), Result&: bres, Info)) |
| 18608 | return false; |
| 18609 | return Success(Value: !bres, E); |
| 18610 | } |
| 18611 | } |
| 18612 | } |
| 18613 | |
| 18614 | /// HandleCast - This is used to evaluate implicit or explicit casts where the |
| 18615 | /// result type is integer. |
| 18616 | bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) { |
| 18617 | const Expr *SubExpr = E->getSubExpr(); |
| 18618 | QualType DestType = E->getType(); |
| 18619 | QualType SrcType = SubExpr->getType(); |
| 18620 | |
| 18621 | switch (E->getCastKind()) { |
| 18622 | case CK_BaseToDerived: |
| 18623 | case CK_DerivedToBase: |
| 18624 | case CK_UncheckedDerivedToBase: |
| 18625 | case CK_Dynamic: |
| 18626 | case CK_ToUnion: |
| 18627 | case CK_ArrayToPointerDecay: |
| 18628 | case CK_FunctionToPointerDecay: |
| 18629 | case CK_NullToPointer: |
| 18630 | case CK_NullToMemberPointer: |
| 18631 | case CK_BaseToDerivedMemberPointer: |
| 18632 | case CK_DerivedToBaseMemberPointer: |
| 18633 | case CK_ReinterpretMemberPointer: |
| 18634 | case CK_ConstructorConversion: |
| 18635 | case CK_IntegralToPointer: |
| 18636 | case CK_ToVoid: |
| 18637 | case CK_VectorSplat: |
| 18638 | case CK_IntegralToFloating: |
| 18639 | case CK_FloatingCast: |
| 18640 | case CK_CPointerToObjCPointerCast: |
| 18641 | case CK_BlockPointerToObjCPointerCast: |
| 18642 | case CK_AnyPointerToBlockPointerCast: |
| 18643 | case CK_ObjCObjectLValueCast: |
| 18644 | case CK_FloatingRealToComplex: |
| 18645 | case CK_FloatingComplexToReal: |
| 18646 | case CK_FloatingComplexCast: |
| 18647 | case CK_FloatingComplexToIntegralComplex: |
| 18648 | case CK_IntegralRealToComplex: |
| 18649 | case CK_IntegralComplexCast: |
| 18650 | case CK_IntegralComplexToFloatingComplex: |
| 18651 | case CK_BuiltinFnToFnPtr: |
| 18652 | case CK_ZeroToOCLOpaqueType: |
| 18653 | case CK_NonAtomicToAtomic: |
| 18654 | case CK_AddressSpaceConversion: |
| 18655 | case CK_IntToOCLSampler: |
| 18656 | case CK_FloatingToFixedPoint: |
| 18657 | case CK_FixedPointToFloating: |
| 18658 | case CK_FixedPointCast: |
| 18659 | case CK_IntegralToFixedPoint: |
| 18660 | case CK_MatrixCast: |
| 18661 | case CK_HLSLAggregateSplatCast: |
| 18662 | llvm_unreachable("invalid cast kind for integral value" ); |
| 18663 | |
| 18664 | case CK_BitCast: |
| 18665 | case CK_Dependent: |
| 18666 | case CK_LValueBitCast: |
| 18667 | case CK_ARCProduceObject: |
| 18668 | case CK_ARCConsumeObject: |
| 18669 | case CK_ARCReclaimReturnedObject: |
| 18670 | case CK_ARCExtendBlockObject: |
| 18671 | case CK_CopyAndAutoreleaseBlockObject: |
| 18672 | return Error(E); |
| 18673 | |
| 18674 | case CK_UserDefinedConversion: |
| 18675 | case CK_LValueToRValue: |
| 18676 | case CK_AtomicToNonAtomic: |
| 18677 | case CK_NoOp: |
| 18678 | case CK_LValueToRValueBitCast: |
| 18679 | case CK_HLSLArrayRValue: |
| 18680 | return ExprEvaluatorBaseTy::VisitCastExpr(E); |
| 18681 | |
| 18682 | case CK_MemberPointerToBoolean: |
| 18683 | case CK_PointerToBoolean: |
| 18684 | case CK_IntegralToBoolean: |
| 18685 | case CK_FloatingToBoolean: |
| 18686 | case CK_BooleanToSignedIntegral: |
| 18687 | case CK_FloatingComplexToBoolean: |
| 18688 | case CK_IntegralComplexToBoolean: { |
| 18689 | bool BoolResult; |
| 18690 | if (!EvaluateAsBooleanCondition(E: SubExpr, Result&: BoolResult, Info)) |
| 18691 | return false; |
| 18692 | uint64_t IntResult = BoolResult; |
| 18693 | if (BoolResult && E->getCastKind() == CK_BooleanToSignedIntegral) |
| 18694 | IntResult = (uint64_t)-1; |
| 18695 | return Success(Value: IntResult, E); |
| 18696 | } |
| 18697 | |
| 18698 | case CK_FixedPointToIntegral: { |
| 18699 | APFixedPoint Src(Info.Ctx.getFixedPointSemantics(Ty: SrcType)); |
| 18700 | if (!EvaluateFixedPoint(E: SubExpr, Result&: Src, Info)) |
| 18701 | return false; |
| 18702 | bool Overflowed; |
| 18703 | llvm::APSInt Result = Src.convertToInt( |
| 18704 | DstWidth: Info.Ctx.getIntWidth(T: DestType), |
| 18705 | DstSign: DestType->isSignedIntegerOrEnumerationType(), Overflow: &Overflowed); |
| 18706 | if (Overflowed && !HandleOverflow(Info, E, SrcValue: Result, DestType)) |
| 18707 | return false; |
| 18708 | return Success(SI: Result, E); |
| 18709 | } |
| 18710 | |
| 18711 | case CK_FixedPointToBoolean: { |
| 18712 | // Unsigned padding does not affect this. |
| 18713 | APValue Val; |
| 18714 | if (!Evaluate(Result&: Val, Info, E: SubExpr)) |
| 18715 | return false; |
| 18716 | return Success(Value: Val.getFixedPoint().getBoolValue(), E); |
| 18717 | } |
| 18718 | |
| 18719 | case CK_IntegralCast: { |
| 18720 | if (!Visit(S: SubExpr)) |
| 18721 | return false; |
| 18722 | |
| 18723 | if (!Result.isInt()) { |
| 18724 | // Allow casts of address-of-label differences if they are no-ops |
| 18725 | // or narrowing, if the result is at least 32 bits wide. |
| 18726 | // (The narrowing case isn't actually guaranteed to |
| 18727 | // be constant-evaluatable except in some narrow cases which are hard |
| 18728 | // to detect here. We let it through on the assumption the user knows |
| 18729 | // what they are doing.) |
| 18730 | if (Result.isAddrLabelDiff()) { |
| 18731 | unsigned DestBits = Info.Ctx.getTypeSize(T: DestType); |
| 18732 | return DestBits >= 32 && DestBits <= Info.Ctx.getTypeSize(T: SrcType); |
| 18733 | } |
| 18734 | // Only allow casts of lvalues if they are lossless. |
| 18735 | return Info.Ctx.getTypeSize(T: DestType) == Info.Ctx.getTypeSize(T: SrcType); |
| 18736 | } |
| 18737 | |
| 18738 | if (Info.Ctx.getLangOpts().CPlusPlus && DestType->isEnumeralType()) { |
| 18739 | const auto *ED = DestType->getAsEnumDecl(); |
| 18740 | // Check that the value is within the range of the enumeration values. |
| 18741 | // |
| 18742 | // This corressponds to [expr.static.cast]p10 which says: |
| 18743 | // A value of integral or enumeration type can be explicitly converted |
| 18744 | // to a complete enumeration type ... If the enumeration type does not |
| 18745 | // have a fixed underlying type, the value is unchanged if the original |
| 18746 | // value is within the range of the enumeration values ([dcl.enum]), and |
| 18747 | // otherwise, the behavior is undefined. |
| 18748 | // |
| 18749 | // This was resolved as part of DR2338 which has CD5 status. |
| 18750 | if (!ED->isFixed()) { |
| 18751 | llvm::APInt Min; |
| 18752 | llvm::APInt Max; |
| 18753 | |
| 18754 | ED->getValueRange(Max, Min); |
| 18755 | --Max; |
| 18756 | |
| 18757 | if (ED->getNumNegativeBits() && |
| 18758 | (Max.slt(RHS: Result.getInt().getSExtValue()) || |
| 18759 | Min.sgt(RHS: Result.getInt().getSExtValue()))) |
| 18760 | Info.CCEDiag(E, DiagId: diag::note_constexpr_unscoped_enum_out_of_range) |
| 18761 | << llvm::toString(I: Result.getInt(), Radix: 10) << Min.getSExtValue() |
| 18762 | << Max.getSExtValue() << ED; |
| 18763 | else if (!ED->getNumNegativeBits() && |
| 18764 | Max.ult(RHS: Result.getInt().getZExtValue())) |
| 18765 | Info.CCEDiag(E, DiagId: diag::note_constexpr_unscoped_enum_out_of_range) |
| 18766 | << llvm::toString(I: Result.getInt(), Radix: 10) << Min.getZExtValue() |
| 18767 | << Max.getZExtValue() << ED; |
| 18768 | } |
| 18769 | } |
| 18770 | |
| 18771 | return Success(SI: HandleIntToIntCast(Info, E, DestType, SrcType, |
| 18772 | Value: Result.getInt()), E); |
| 18773 | } |
| 18774 | |
| 18775 | case CK_PointerToIntegral: { |
| 18776 | CCEDiag(E, D: diag::note_constexpr_invalid_cast) |
| 18777 | << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret |
| 18778 | << Info.Ctx.getLangOpts().CPlusPlus << E->getSourceRange(); |
| 18779 | |
| 18780 | LValue LV; |
| 18781 | if (!EvaluatePointer(E: SubExpr, Result&: LV, Info)) |
| 18782 | return false; |
| 18783 | |
| 18784 | if (LV.getLValueBase()) { |
| 18785 | // Only allow based lvalue casts if they are lossless. |
| 18786 | // FIXME: Allow a larger integer size than the pointer size, and allow |
| 18787 | // narrowing back down to pointer width in subsequent integral casts. |
| 18788 | // FIXME: Check integer type's active bits, not its type size. |
| 18789 | if (Info.Ctx.getTypeSize(T: DestType) != Info.Ctx.getTypeSize(T: SrcType)) |
| 18790 | return Error(E); |
| 18791 | |
| 18792 | LV.Designator.setInvalid(); |
| 18793 | LV.moveInto(V&: Result); |
| 18794 | return true; |
| 18795 | } |
| 18796 | |
| 18797 | APSInt AsInt; |
| 18798 | APValue V; |
| 18799 | LV.moveInto(V); |
| 18800 | if (!V.toIntegralConstant(Result&: AsInt, SrcTy: SrcType, Ctx: Info.Ctx)) |
| 18801 | llvm_unreachable("Can't cast this!" ); |
| 18802 | |
| 18803 | return Success(SI: HandleIntToIntCast(Info, E, DestType, SrcType, Value: AsInt), E); |
| 18804 | } |
| 18805 | |
| 18806 | case CK_IntegralComplexToReal: { |
| 18807 | ComplexValue C; |
| 18808 | if (!EvaluateComplex(E: SubExpr, Res&: C, Info)) |
| 18809 | return false; |
| 18810 | return Success(SI: C.getComplexIntReal(), E); |
| 18811 | } |
| 18812 | |
| 18813 | case CK_FloatingToIntegral: { |
| 18814 | APFloat F(0.0); |
| 18815 | if (!EvaluateFloat(E: SubExpr, Result&: F, Info)) |
| 18816 | return false; |
| 18817 | |
| 18818 | APSInt Value; |
| 18819 | if (!HandleFloatToIntCast(Info, E, SrcType, Value: F, DestType, Result&: Value)) |
| 18820 | return false; |
| 18821 | return Success(SI: Value, E); |
| 18822 | } |
| 18823 | case CK_HLSLVectorTruncation: { |
| 18824 | APValue Val; |
| 18825 | if (!EvaluateVector(E: SubExpr, Result&: Val, Info)) |
| 18826 | return Error(E); |
| 18827 | return Success(V: Val.getVectorElt(I: 0), E); |
| 18828 | } |
| 18829 | case CK_HLSLMatrixTruncation: { |
| 18830 | // TODO: See #168935. Add matrix truncation support to expr constant. |
| 18831 | return Error(E); |
| 18832 | } |
| 18833 | case CK_HLSLElementwiseCast: { |
| 18834 | SmallVector<APValue> SrcVals; |
| 18835 | SmallVector<QualType> SrcTypes; |
| 18836 | |
| 18837 | if (!hlslElementwiseCastHelper(Info, E: SubExpr, DestTy: DestType, SrcVals, SrcTypes)) |
| 18838 | return false; |
| 18839 | |
| 18840 | // cast our single element |
| 18841 | const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts()); |
| 18842 | APValue ResultVal; |
| 18843 | if (!handleScalarCast(Info, FPO, E, SourceTy: SrcTypes[0], DestTy: DestType, Original: SrcVals[0], |
| 18844 | Result&: ResultVal)) |
| 18845 | return false; |
| 18846 | return Success(V: ResultVal, E); |
| 18847 | } |
| 18848 | } |
| 18849 | |
| 18850 | llvm_unreachable("unknown cast resulting in integral value" ); |
| 18851 | } |
| 18852 | |
| 18853 | bool IntExprEvaluator::VisitUnaryReal(const UnaryOperator *E) { |
| 18854 | if (E->getSubExpr()->getType()->isAnyComplexType()) { |
| 18855 | ComplexValue LV; |
| 18856 | if (!EvaluateComplex(E: E->getSubExpr(), Res&: LV, Info)) |
| 18857 | return false; |
| 18858 | if (!LV.isComplexInt()) |
| 18859 | return Error(E); |
| 18860 | return Success(SI: LV.getComplexIntReal(), E); |
| 18861 | } |
| 18862 | |
| 18863 | return Visit(S: E->getSubExpr()); |
| 18864 | } |
| 18865 | |
| 18866 | bool IntExprEvaluator::VisitUnaryImag(const UnaryOperator *E) { |
| 18867 | if (E->getSubExpr()->getType()->isComplexIntegerType()) { |
| 18868 | ComplexValue LV; |
| 18869 | if (!EvaluateComplex(E: E->getSubExpr(), Res&: LV, Info)) |
| 18870 | return false; |
| 18871 | if (!LV.isComplexInt()) |
| 18872 | return Error(E); |
| 18873 | return Success(SI: LV.getComplexIntImag(), E); |
| 18874 | } |
| 18875 | |
| 18876 | VisitIgnoredValue(E: E->getSubExpr()); |
| 18877 | return Success(Value: 0, E); |
| 18878 | } |
| 18879 | |
| 18880 | bool IntExprEvaluator::VisitSizeOfPackExpr(const SizeOfPackExpr *E) { |
| 18881 | return Success(Value: E->getPackLength(), E); |
| 18882 | } |
| 18883 | |
| 18884 | bool IntExprEvaluator::VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) { |
| 18885 | return Success(Value: E->getValue(), E); |
| 18886 | } |
| 18887 | |
| 18888 | bool IntExprEvaluator::VisitConceptSpecializationExpr( |
| 18889 | const ConceptSpecializationExpr *E) { |
| 18890 | return Success(Value: E->isSatisfied(), E); |
| 18891 | } |
| 18892 | |
| 18893 | bool IntExprEvaluator::VisitRequiresExpr(const RequiresExpr *E) { |
| 18894 | return Success(Value: E->isSatisfied(), E); |
| 18895 | } |
| 18896 | |
| 18897 | bool FixedPointExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) { |
| 18898 | switch (E->getOpcode()) { |
| 18899 | default: |
| 18900 | // Invalid unary operators |
| 18901 | return Error(E); |
| 18902 | case UO_Plus: |
| 18903 | // The result is just the value. |
| 18904 | return Visit(S: E->getSubExpr()); |
| 18905 | case UO_Minus: { |
| 18906 | if (!Visit(S: E->getSubExpr())) return false; |
| 18907 | if (!Result.isFixedPoint()) |
| 18908 | return Error(E); |
| 18909 | bool Overflowed; |
| 18910 | APFixedPoint Negated = Result.getFixedPoint().negate(Overflow: &Overflowed); |
| 18911 | if (Overflowed && !HandleOverflow(Info, E, SrcValue: Negated, DestType: E->getType())) |
| 18912 | return false; |
| 18913 | return Success(V: Negated, E); |
| 18914 | } |
| 18915 | case UO_LNot: { |
| 18916 | bool bres; |
| 18917 | if (!EvaluateAsBooleanCondition(E: E->getSubExpr(), Result&: bres, Info)) |
| 18918 | return false; |
| 18919 | return Success(Value: !bres, E); |
| 18920 | } |
| 18921 | } |
| 18922 | } |
| 18923 | |
| 18924 | bool FixedPointExprEvaluator::VisitCastExpr(const CastExpr *E) { |
| 18925 | const Expr *SubExpr = E->getSubExpr(); |
| 18926 | QualType DestType = E->getType(); |
| 18927 | assert(DestType->isFixedPointType() && |
| 18928 | "Expected destination type to be a fixed point type" ); |
| 18929 | auto DestFXSema = Info.Ctx.getFixedPointSemantics(Ty: DestType); |
| 18930 | |
| 18931 | switch (E->getCastKind()) { |
| 18932 | case CK_FixedPointCast: { |
| 18933 | APFixedPoint Src(Info.Ctx.getFixedPointSemantics(Ty: SubExpr->getType())); |
| 18934 | if (!EvaluateFixedPoint(E: SubExpr, Result&: Src, Info)) |
| 18935 | return false; |
| 18936 | bool Overflowed; |
| 18937 | APFixedPoint Result = Src.convert(DstSema: DestFXSema, Overflow: &Overflowed); |
| 18938 | if (Overflowed) { |
| 18939 | if (Info.checkingForUndefinedBehavior()) |
| 18940 | Info.Ctx.getDiagnostics().Report(Loc: E->getExprLoc(), |
| 18941 | DiagID: diag::warn_fixedpoint_constant_overflow) |
| 18942 | << Result.toString() << E->getType(); |
| 18943 | if (!HandleOverflow(Info, E, SrcValue: Result, DestType: E->getType())) |
| 18944 | return false; |
| 18945 | } |
| 18946 | return Success(V: Result, E); |
| 18947 | } |
| 18948 | case CK_IntegralToFixedPoint: { |
| 18949 | APSInt Src; |
| 18950 | if (!EvaluateInteger(E: SubExpr, Result&: Src, Info)) |
| 18951 | return false; |
| 18952 | |
| 18953 | bool Overflowed; |
| 18954 | APFixedPoint IntResult = APFixedPoint::getFromIntValue( |
| 18955 | Value: Src, DstFXSema: Info.Ctx.getFixedPointSemantics(Ty: DestType), Overflow: &Overflowed); |
| 18956 | |
| 18957 | if (Overflowed) { |
| 18958 | if (Info.checkingForUndefinedBehavior()) |
| 18959 | Info.Ctx.getDiagnostics().Report(Loc: E->getExprLoc(), |
| 18960 | DiagID: diag::warn_fixedpoint_constant_overflow) |
| 18961 | << IntResult.toString() << E->getType(); |
| 18962 | if (!HandleOverflow(Info, E, SrcValue: IntResult, DestType: E->getType())) |
| 18963 | return false; |
| 18964 | } |
| 18965 | |
| 18966 | return Success(V: IntResult, E); |
| 18967 | } |
| 18968 | case CK_FloatingToFixedPoint: { |
| 18969 | APFloat Src(0.0); |
| 18970 | if (!EvaluateFloat(E: SubExpr, Result&: Src, Info)) |
| 18971 | return false; |
| 18972 | |
| 18973 | bool Overflowed; |
| 18974 | APFixedPoint Result = APFixedPoint::getFromFloatValue( |
| 18975 | Value: Src, DstFXSema: Info.Ctx.getFixedPointSemantics(Ty: DestType), Overflow: &Overflowed); |
| 18976 | |
| 18977 | if (Overflowed) { |
| 18978 | if (Info.checkingForUndefinedBehavior()) |
| 18979 | Info.Ctx.getDiagnostics().Report(Loc: E->getExprLoc(), |
| 18980 | DiagID: diag::warn_fixedpoint_constant_overflow) |
| 18981 | << Result.toString() << E->getType(); |
| 18982 | if (!HandleOverflow(Info, E, SrcValue: Result, DestType: E->getType())) |
| 18983 | return false; |
| 18984 | } |
| 18985 | |
| 18986 | return Success(V: Result, E); |
| 18987 | } |
| 18988 | case CK_NoOp: |
| 18989 | case CK_LValueToRValue: |
| 18990 | return ExprEvaluatorBaseTy::VisitCastExpr(E); |
| 18991 | default: |
| 18992 | return Error(E); |
| 18993 | } |
| 18994 | } |
| 18995 | |
| 18996 | bool FixedPointExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { |
| 18997 | if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma) |
| 18998 | return ExprEvaluatorBaseTy::VisitBinaryOperator(E); |
| 18999 | |
| 19000 | const Expr *LHS = E->getLHS(); |
| 19001 | const Expr *RHS = E->getRHS(); |
| 19002 | FixedPointSemantics ResultFXSema = |
| 19003 | Info.Ctx.getFixedPointSemantics(Ty: E->getType()); |
| 19004 | |
| 19005 | APFixedPoint LHSFX(Info.Ctx.getFixedPointSemantics(Ty: LHS->getType())); |
| 19006 | if (!EvaluateFixedPointOrInteger(E: LHS, Result&: LHSFX, Info)) |
| 19007 | return false; |
| 19008 | APFixedPoint RHSFX(Info.Ctx.getFixedPointSemantics(Ty: RHS->getType())); |
| 19009 | if (!EvaluateFixedPointOrInteger(E: RHS, Result&: RHSFX, Info)) |
| 19010 | return false; |
| 19011 | |
| 19012 | bool OpOverflow = false, ConversionOverflow = false; |
| 19013 | APFixedPoint Result(LHSFX.getSemantics()); |
| 19014 | switch (E->getOpcode()) { |
| 19015 | case BO_Add: { |
| 19016 | Result = LHSFX.add(Other: RHSFX, Overflow: &OpOverflow) |
| 19017 | .convert(DstSema: ResultFXSema, Overflow: &ConversionOverflow); |
| 19018 | break; |
| 19019 | } |
| 19020 | case BO_Sub: { |
| 19021 | Result = LHSFX.sub(Other: RHSFX, Overflow: &OpOverflow) |
| 19022 | .convert(DstSema: ResultFXSema, Overflow: &ConversionOverflow); |
| 19023 | break; |
| 19024 | } |
| 19025 | case BO_Mul: { |
| 19026 | Result = LHSFX.mul(Other: RHSFX, Overflow: &OpOverflow) |
| 19027 | .convert(DstSema: ResultFXSema, Overflow: &ConversionOverflow); |
| 19028 | break; |
| 19029 | } |
| 19030 | case BO_Div: { |
| 19031 | if (RHSFX.getValue() == 0) { |
| 19032 | Info.FFDiag(E, DiagId: diag::note_expr_divide_by_zero); |
| 19033 | return false; |
| 19034 | } |
| 19035 | Result = LHSFX.div(Other: RHSFX, Overflow: &OpOverflow) |
| 19036 | .convert(DstSema: ResultFXSema, Overflow: &ConversionOverflow); |
| 19037 | break; |
| 19038 | } |
| 19039 | case BO_Shl: |
| 19040 | case BO_Shr: { |
| 19041 | FixedPointSemantics LHSSema = LHSFX.getSemantics(); |
| 19042 | llvm::APSInt RHSVal = RHSFX.getValue(); |
| 19043 | |
| 19044 | unsigned ShiftBW = |
| 19045 | LHSSema.getWidth() - (unsigned)LHSSema.hasUnsignedPadding(); |
| 19046 | unsigned Amt = RHSVal.getLimitedValue(Limit: ShiftBW - 1); |
| 19047 | // Embedded-C 4.1.6.2.2: |
| 19048 | // The right operand must be nonnegative and less than the total number |
| 19049 | // of (nonpadding) bits of the fixed-point operand ... |
| 19050 | if (RHSVal.isNegative()) |
| 19051 | Info.CCEDiag(E, DiagId: diag::note_constexpr_negative_shift) << RHSVal; |
| 19052 | else if (Amt != RHSVal) |
| 19053 | Info.CCEDiag(E, DiagId: diag::note_constexpr_large_shift) |
| 19054 | << RHSVal << E->getType() << ShiftBW; |
| 19055 | |
| 19056 | if (E->getOpcode() == BO_Shl) |
| 19057 | Result = LHSFX.shl(Amt, Overflow: &OpOverflow); |
| 19058 | else |
| 19059 | Result = LHSFX.shr(Amt, Overflow: &OpOverflow); |
| 19060 | break; |
| 19061 | } |
| 19062 | default: |
| 19063 | return false; |
| 19064 | } |
| 19065 | if (OpOverflow || ConversionOverflow) { |
| 19066 | if (Info.checkingForUndefinedBehavior()) |
| 19067 | Info.Ctx.getDiagnostics().Report(Loc: E->getExprLoc(), |
| 19068 | DiagID: diag::warn_fixedpoint_constant_overflow) |
| 19069 | << Result.toString() << E->getType(); |
| 19070 | if (!HandleOverflow(Info, E, SrcValue: Result, DestType: E->getType())) |
| 19071 | return false; |
| 19072 | } |
| 19073 | return Success(V: Result, E); |
| 19074 | } |
| 19075 | |
| 19076 | //===----------------------------------------------------------------------===// |
| 19077 | // Float Evaluation |
| 19078 | //===----------------------------------------------------------------------===// |
| 19079 | |
| 19080 | namespace { |
| 19081 | class FloatExprEvaluator |
| 19082 | : public ExprEvaluatorBase<FloatExprEvaluator> { |
| 19083 | APFloat &Result; |
| 19084 | public: |
| 19085 | FloatExprEvaluator(EvalInfo &info, APFloat &result) |
| 19086 | : ExprEvaluatorBaseTy(info), Result(result) {} |
| 19087 | |
| 19088 | bool Success(const APValue &V, const Expr *e) { |
| 19089 | Result = V.getFloat(); |
| 19090 | return true; |
| 19091 | } |
| 19092 | |
| 19093 | bool ZeroInitialization(const Expr *E) { |
| 19094 | Result = APFloat::getZero(Sem: Info.Ctx.getFloatTypeSemantics(T: E->getType())); |
| 19095 | return true; |
| 19096 | } |
| 19097 | |
| 19098 | bool VisitCallExpr(const CallExpr *E); |
| 19099 | |
| 19100 | bool VisitUnaryOperator(const UnaryOperator *E); |
| 19101 | bool VisitBinaryOperator(const BinaryOperator *E); |
| 19102 | bool VisitFloatingLiteral(const FloatingLiteral *E); |
| 19103 | bool VisitCastExpr(const CastExpr *E); |
| 19104 | |
| 19105 | bool VisitUnaryReal(const UnaryOperator *E); |
| 19106 | bool VisitUnaryImag(const UnaryOperator *E); |
| 19107 | |
| 19108 | // FIXME: Missing: array subscript of vector, member of vector |
| 19109 | }; |
| 19110 | } // end anonymous namespace |
| 19111 | |
| 19112 | static bool EvaluateFloat(const Expr* E, APFloat& Result, EvalInfo &Info) { |
| 19113 | assert(!E->isValueDependent()); |
| 19114 | assert(E->isPRValue() && E->getType()->isRealFloatingType()); |
| 19115 | return FloatExprEvaluator(Info, Result).Visit(S: E); |
| 19116 | } |
| 19117 | |
| 19118 | static bool TryEvaluateBuiltinNaN(const ASTContext &Context, |
| 19119 | QualType ResultTy, |
| 19120 | const Expr *Arg, |
| 19121 | bool SNaN, |
| 19122 | llvm::APFloat &Result) { |
| 19123 | const StringLiteral *S = dyn_cast<StringLiteral>(Val: Arg->IgnoreParenCasts()); |
| 19124 | if (!S) return false; |
| 19125 | |
| 19126 | const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(T: ResultTy); |
| 19127 | |
| 19128 | llvm::APInt fill; |
| 19129 | |
| 19130 | // Treat empty strings as if they were zero. |
| 19131 | if (S->getString().empty()) |
| 19132 | fill = llvm::APInt(32, 0); |
| 19133 | else if (S->getString().getAsInteger(Radix: 0, Result&: fill)) |
| 19134 | return false; |
| 19135 | |
| 19136 | if (Context.getTargetInfo().isNan2008()) { |
| 19137 | if (SNaN) |
| 19138 | Result = llvm::APFloat::getSNaN(Sem, Negative: false, payload: &fill); |
| 19139 | else |
| 19140 | Result = llvm::APFloat::getQNaN(Sem, Negative: false, payload: &fill); |
| 19141 | } else { |
| 19142 | // Prior to IEEE 754-2008, architectures were allowed to choose whether |
| 19143 | // the first bit of their significand was set for qNaN or sNaN. MIPS chose |
| 19144 | // a different encoding to what became a standard in 2008, and for pre- |
| 19145 | // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as |
| 19146 | // sNaN. This is now known as "legacy NaN" encoding. |
| 19147 | if (SNaN) |
| 19148 | Result = llvm::APFloat::getQNaN(Sem, Negative: false, payload: &fill); |
| 19149 | else |
| 19150 | Result = llvm::APFloat::getSNaN(Sem, Negative: false, payload: &fill); |
| 19151 | } |
| 19152 | |
| 19153 | return true; |
| 19154 | } |
| 19155 | |
| 19156 | bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) { |
| 19157 | if (!IsConstantEvaluatedBuiltinCall(E)) |
| 19158 | return ExprEvaluatorBaseTy::VisitCallExpr(E); |
| 19159 | |
| 19160 | switch (E->getBuiltinCallee()) { |
| 19161 | default: |
| 19162 | return false; |
| 19163 | |
| 19164 | case Builtin::BI__builtin_huge_val: |
| 19165 | case Builtin::BI__builtin_huge_valf: |
| 19166 | case Builtin::BI__builtin_huge_vall: |
| 19167 | case Builtin::BI__builtin_huge_valf16: |
| 19168 | case Builtin::BI__builtin_huge_valf128: |
| 19169 | case Builtin::BI__builtin_inf: |
| 19170 | case Builtin::BI__builtin_inff: |
| 19171 | case Builtin::BI__builtin_infl: |
| 19172 | case Builtin::BI__builtin_inff16: |
| 19173 | case Builtin::BI__builtin_inff128: { |
| 19174 | const llvm::fltSemantics &Sem = |
| 19175 | Info.Ctx.getFloatTypeSemantics(T: E->getType()); |
| 19176 | Result = llvm::APFloat::getInf(Sem); |
| 19177 | return true; |
| 19178 | } |
| 19179 | |
| 19180 | case Builtin::BI__builtin_nans: |
| 19181 | case Builtin::BI__builtin_nansf: |
| 19182 | case Builtin::BI__builtin_nansl: |
| 19183 | case Builtin::BI__builtin_nansf16: |
| 19184 | case Builtin::BI__builtin_nansf128: |
| 19185 | if (!TryEvaluateBuiltinNaN(Context: Info.Ctx, ResultTy: E->getType(), Arg: E->getArg(Arg: 0), |
| 19186 | SNaN: true, Result)) |
| 19187 | return Error(E); |
| 19188 | return true; |
| 19189 | |
| 19190 | case Builtin::BI__builtin_nan: |
| 19191 | case Builtin::BI__builtin_nanf: |
| 19192 | case Builtin::BI__builtin_nanl: |
| 19193 | case Builtin::BI__builtin_nanf16: |
| 19194 | case Builtin::BI__builtin_nanf128: |
| 19195 | // If this is __builtin_nan() turn this into a nan, otherwise we |
| 19196 | // can't constant fold it. |
| 19197 | if (!TryEvaluateBuiltinNaN(Context: Info.Ctx, ResultTy: E->getType(), Arg: E->getArg(Arg: 0), |
| 19198 | SNaN: false, Result)) |
| 19199 | return Error(E); |
| 19200 | return true; |
| 19201 | |
| 19202 | case Builtin::BI__builtin_elementwise_abs: |
| 19203 | case Builtin::BI__builtin_fabs: |
| 19204 | case Builtin::BI__builtin_fabsf: |
| 19205 | case Builtin::BI__builtin_fabsl: |
| 19206 | case Builtin::BI__builtin_fabsf128: |
| 19207 | // The C standard says "fabs raises no floating-point exceptions, |
| 19208 | // even if x is a signaling NaN. The returned value is independent of |
| 19209 | // the current rounding direction mode." Therefore constant folding can |
| 19210 | // proceed without regard to the floating point settings. |
| 19211 | // Reference, WG14 N2478 F.10.4.3 |
| 19212 | if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info)) |
| 19213 | return false; |
| 19214 | |
| 19215 | if (Result.isNegative()) |
| 19216 | Result.changeSign(); |
| 19217 | return true; |
| 19218 | |
| 19219 | case Builtin::BI__arithmetic_fence: |
| 19220 | return EvaluateFloat(E: E->getArg(Arg: 0), Result, Info); |
| 19221 | |
| 19222 | // FIXME: Builtin::BI__builtin_powi |
| 19223 | // FIXME: Builtin::BI__builtin_powif |
| 19224 | // FIXME: Builtin::BI__builtin_powil |
| 19225 | |
| 19226 | case Builtin::BI__builtin_copysign: |
| 19227 | case Builtin::BI__builtin_copysignf: |
| 19228 | case Builtin::BI__builtin_copysignl: |
| 19229 | case Builtin::BI__builtin_copysignf128: { |
| 19230 | APFloat RHS(0.); |
| 19231 | if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info) || |
| 19232 | !EvaluateFloat(E: E->getArg(Arg: 1), Result&: RHS, Info)) |
| 19233 | return false; |
| 19234 | Result.copySign(RHS); |
| 19235 | return true; |
| 19236 | } |
| 19237 | |
| 19238 | case Builtin::BI__builtin_fmax: |
| 19239 | case Builtin::BI__builtin_fmaxf: |
| 19240 | case Builtin::BI__builtin_fmaxl: |
| 19241 | case Builtin::BI__builtin_fmaxf16: |
| 19242 | case Builtin::BI__builtin_fmaxf128: { |
| 19243 | APFloat RHS(0.); |
| 19244 | if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info) || |
| 19245 | !EvaluateFloat(E: E->getArg(Arg: 1), Result&: RHS, Info)) |
| 19246 | return false; |
| 19247 | Result = maxnum(A: Result, B: RHS); |
| 19248 | return true; |
| 19249 | } |
| 19250 | |
| 19251 | case Builtin::BI__builtin_fmin: |
| 19252 | case Builtin::BI__builtin_fminf: |
| 19253 | case Builtin::BI__builtin_fminl: |
| 19254 | case Builtin::BI__builtin_fminf16: |
| 19255 | case Builtin::BI__builtin_fminf128: { |
| 19256 | APFloat RHS(0.); |
| 19257 | if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info) || |
| 19258 | !EvaluateFloat(E: E->getArg(Arg: 1), Result&: RHS, Info)) |
| 19259 | return false; |
| 19260 | Result = minnum(A: Result, B: RHS); |
| 19261 | return true; |
| 19262 | } |
| 19263 | |
| 19264 | case Builtin::BI__builtin_fmaximum_num: |
| 19265 | case Builtin::BI__builtin_fmaximum_numf: |
| 19266 | case Builtin::BI__builtin_fmaximum_numl: |
| 19267 | case Builtin::BI__builtin_fmaximum_numf16: |
| 19268 | case Builtin::BI__builtin_fmaximum_numf128: { |
| 19269 | APFloat RHS(0.); |
| 19270 | if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info) || |
| 19271 | !EvaluateFloat(E: E->getArg(Arg: 1), Result&: RHS, Info)) |
| 19272 | return false; |
| 19273 | Result = maximumnum(A: Result, B: RHS); |
| 19274 | return true; |
| 19275 | } |
| 19276 | |
| 19277 | case Builtin::BI__builtin_fminimum_num: |
| 19278 | case Builtin::BI__builtin_fminimum_numf: |
| 19279 | case Builtin::BI__builtin_fminimum_numl: |
| 19280 | case Builtin::BI__builtin_fminimum_numf16: |
| 19281 | case Builtin::BI__builtin_fminimum_numf128: { |
| 19282 | APFloat RHS(0.); |
| 19283 | if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info) || |
| 19284 | !EvaluateFloat(E: E->getArg(Arg: 1), Result&: RHS, Info)) |
| 19285 | return false; |
| 19286 | Result = minimumnum(A: Result, B: RHS); |
| 19287 | return true; |
| 19288 | } |
| 19289 | |
| 19290 | case Builtin::BI__builtin_elementwise_fma: { |
| 19291 | if (!E->getArg(Arg: 0)->isPRValue() || !E->getArg(Arg: 1)->isPRValue() || |
| 19292 | !E->getArg(Arg: 2)->isPRValue()) { |
| 19293 | return false; |
| 19294 | } |
| 19295 | APFloat SourceY(0.), SourceZ(0.); |
| 19296 | if (!EvaluateFloat(E: E->getArg(Arg: 0), Result, Info) || |
| 19297 | !EvaluateFloat(E: E->getArg(Arg: 1), Result&: SourceY, Info) || |
| 19298 | !EvaluateFloat(E: E->getArg(Arg: 2), Result&: SourceZ, Info)) |
| 19299 | return false; |
| 19300 | llvm::RoundingMode RM = getActiveRoundingMode(Info&: getEvalInfo(), E); |
| 19301 | (void)Result.fusedMultiplyAdd(Multiplicand: SourceY, Addend: SourceZ, RM); |
| 19302 | return true; |
| 19303 | } |
| 19304 | |
| 19305 | case clang::X86::BI__builtin_ia32_vec_ext_v4sf: { |
| 19306 | APValue Vec; |
| 19307 | APSInt IdxAPS; |
| 19308 | if (!EvaluateVector(E: E->getArg(Arg: 0), Result&: Vec, Info) || |
| 19309 | !EvaluateInteger(E: E->getArg(Arg: 1), Result&: IdxAPS, Info)) |
| 19310 | return false; |
| 19311 | unsigned N = Vec.getVectorLength(); |
| 19312 | unsigned Idx = static_cast<unsigned>(IdxAPS.getZExtValue() & (N - 1)); |
| 19313 | return Success(V: Vec.getVectorElt(I: Idx), e: E); |
| 19314 | } |
| 19315 | } |
| 19316 | } |
| 19317 | |
| 19318 | bool FloatExprEvaluator::VisitUnaryReal(const UnaryOperator *E) { |
| 19319 | if (E->getSubExpr()->getType()->isAnyComplexType()) { |
| 19320 | ComplexValue CV; |
| 19321 | if (!EvaluateComplex(E: E->getSubExpr(), Res&: CV, Info)) |
| 19322 | return false; |
| 19323 | Result = CV.FloatReal; |
| 19324 | return true; |
| 19325 | } |
| 19326 | |
| 19327 | return Visit(S: E->getSubExpr()); |
| 19328 | } |
| 19329 | |
| 19330 | bool FloatExprEvaluator::VisitUnaryImag(const UnaryOperator *E) { |
| 19331 | if (E->getSubExpr()->getType()->isAnyComplexType()) { |
| 19332 | ComplexValue CV; |
| 19333 | if (!EvaluateComplex(E: E->getSubExpr(), Res&: CV, Info)) |
| 19334 | return false; |
| 19335 | Result = CV.FloatImag; |
| 19336 | return true; |
| 19337 | } |
| 19338 | |
| 19339 | VisitIgnoredValue(E: E->getSubExpr()); |
| 19340 | const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(T: E->getType()); |
| 19341 | Result = llvm::APFloat::getZero(Sem); |
| 19342 | return true; |
| 19343 | } |
| 19344 | |
| 19345 | bool FloatExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) { |
| 19346 | switch (E->getOpcode()) { |
| 19347 | default: return Error(E); |
| 19348 | case UO_Plus: |
| 19349 | return EvaluateFloat(E: E->getSubExpr(), Result, Info); |
| 19350 | case UO_Minus: |
| 19351 | // In C standard, WG14 N2478 F.3 p4 |
| 19352 | // "the unary - raises no floating point exceptions, |
| 19353 | // even if the operand is signalling." |
| 19354 | if (!EvaluateFloat(E: E->getSubExpr(), Result, Info)) |
| 19355 | return false; |
| 19356 | Result.changeSign(); |
| 19357 | return true; |
| 19358 | } |
| 19359 | } |
| 19360 | |
| 19361 | bool FloatExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { |
| 19362 | if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma) |
| 19363 | return ExprEvaluatorBaseTy::VisitBinaryOperator(E); |
| 19364 | |
| 19365 | APFloat RHS(0.0); |
| 19366 | bool LHSOK = EvaluateFloat(E: E->getLHS(), Result, Info); |
| 19367 | if (!LHSOK && !Info.noteFailure()) |
| 19368 | return false; |
| 19369 | return EvaluateFloat(E: E->getRHS(), Result&: RHS, Info) && LHSOK && |
| 19370 | handleFloatFloatBinOp(Info, E, LHS&: Result, Opcode: E->getOpcode(), RHS); |
| 19371 | } |
| 19372 | |
| 19373 | bool FloatExprEvaluator::VisitFloatingLiteral(const FloatingLiteral *E) { |
| 19374 | Result = E->getValue(); |
| 19375 | return true; |
| 19376 | } |
| 19377 | |
| 19378 | bool FloatExprEvaluator::VisitCastExpr(const CastExpr *E) { |
| 19379 | const Expr* SubExpr = E->getSubExpr(); |
| 19380 | |
| 19381 | switch (E->getCastKind()) { |
| 19382 | default: |
| 19383 | return ExprEvaluatorBaseTy::VisitCastExpr(E); |
| 19384 | |
| 19385 | case CK_HLSLAggregateSplatCast: |
| 19386 | llvm_unreachable("invalid cast kind for floating value" ); |
| 19387 | |
| 19388 | case CK_IntegralToFloating: { |
| 19389 | APSInt IntResult; |
| 19390 | const FPOptions FPO = E->getFPFeaturesInEffect( |
| 19391 | LO: Info.Ctx.getLangOpts()); |
| 19392 | return EvaluateInteger(E: SubExpr, Result&: IntResult, Info) && |
| 19393 | HandleIntToFloatCast(Info, E, FPO, SrcType: SubExpr->getType(), |
| 19394 | Value: IntResult, DestType: E->getType(), Result); |
| 19395 | } |
| 19396 | |
| 19397 | case CK_FixedPointToFloating: { |
| 19398 | APFixedPoint FixResult(Info.Ctx.getFixedPointSemantics(Ty: SubExpr->getType())); |
| 19399 | if (!EvaluateFixedPoint(E: SubExpr, Result&: FixResult, Info)) |
| 19400 | return false; |
| 19401 | Result = |
| 19402 | FixResult.convertToFloat(FloatSema: Info.Ctx.getFloatTypeSemantics(T: E->getType())); |
| 19403 | return true; |
| 19404 | } |
| 19405 | |
| 19406 | case CK_FloatingCast: { |
| 19407 | if (!Visit(S: SubExpr)) |
| 19408 | return false; |
| 19409 | return HandleFloatToFloatCast(Info, E, SrcType: SubExpr->getType(), DestType: E->getType(), |
| 19410 | Result); |
| 19411 | } |
| 19412 | |
| 19413 | case CK_FloatingComplexToReal: { |
| 19414 | ComplexValue V; |
| 19415 | if (!EvaluateComplex(E: SubExpr, Res&: V, Info)) |
| 19416 | return false; |
| 19417 | Result = V.getComplexFloatReal(); |
| 19418 | return true; |
| 19419 | } |
| 19420 | case CK_HLSLVectorTruncation: { |
| 19421 | APValue Val; |
| 19422 | if (!EvaluateVector(E: SubExpr, Result&: Val, Info)) |
| 19423 | return Error(E); |
| 19424 | return Success(V: Val.getVectorElt(I: 0), e: E); |
| 19425 | } |
| 19426 | case CK_HLSLMatrixTruncation: { |
| 19427 | // TODO: See #168935. Add matrix truncation support to expr constant. |
| 19428 | return Error(E); |
| 19429 | } |
| 19430 | case CK_HLSLElementwiseCast: { |
| 19431 | SmallVector<APValue> SrcVals; |
| 19432 | SmallVector<QualType> SrcTypes; |
| 19433 | |
| 19434 | if (!hlslElementwiseCastHelper(Info, E: SubExpr, DestTy: E->getType(), SrcVals, |
| 19435 | SrcTypes)) |
| 19436 | return false; |
| 19437 | APValue Val; |
| 19438 | |
| 19439 | // cast our single element |
| 19440 | const FPOptions FPO = E->getFPFeaturesInEffect(LO: Info.Ctx.getLangOpts()); |
| 19441 | APValue ResultVal; |
| 19442 | if (!handleScalarCast(Info, FPO, E, SourceTy: SrcTypes[0], DestTy: E->getType(), Original: SrcVals[0], |
| 19443 | Result&: ResultVal)) |
| 19444 | return false; |
| 19445 | return Success(V: ResultVal, e: E); |
| 19446 | } |
| 19447 | } |
| 19448 | } |
| 19449 | |
| 19450 | //===----------------------------------------------------------------------===// |
| 19451 | // Complex Evaluation (for float and integer) |
| 19452 | //===----------------------------------------------------------------------===// |
| 19453 | |
| 19454 | namespace { |
| 19455 | class ComplexExprEvaluator |
| 19456 | : public ExprEvaluatorBase<ComplexExprEvaluator> { |
| 19457 | ComplexValue &Result; |
| 19458 | |
| 19459 | public: |
| 19460 | ComplexExprEvaluator(EvalInfo &info, ComplexValue &Result) |
| 19461 | : ExprEvaluatorBaseTy(info), Result(Result) {} |
| 19462 | |
| 19463 | bool Success(const APValue &V, const Expr *e) { |
| 19464 | Result.setFrom(V); |
| 19465 | return true; |
| 19466 | } |
| 19467 | |
| 19468 | bool ZeroInitialization(const Expr *E); |
| 19469 | |
| 19470 | //===--------------------------------------------------------------------===// |
| 19471 | // Visitor Methods |
| 19472 | //===--------------------------------------------------------------------===// |
| 19473 | |
| 19474 | bool VisitImaginaryLiteral(const ImaginaryLiteral *E); |
| 19475 | bool VisitCastExpr(const CastExpr *E); |
| 19476 | bool VisitBinaryOperator(const BinaryOperator *E); |
| 19477 | bool VisitUnaryOperator(const UnaryOperator *E); |
| 19478 | bool VisitInitListExpr(const InitListExpr *E); |
| 19479 | bool VisitCallExpr(const CallExpr *E); |
| 19480 | }; |
| 19481 | } // end anonymous namespace |
| 19482 | |
| 19483 | static bool EvaluateComplex(const Expr *E, ComplexValue &Result, |
| 19484 | EvalInfo &Info) { |
| 19485 | assert(!E->isValueDependent()); |
| 19486 | assert(E->isPRValue() && E->getType()->isAnyComplexType()); |
| 19487 | return ComplexExprEvaluator(Info, Result).Visit(S: E); |
| 19488 | } |
| 19489 | |
| 19490 | bool ComplexExprEvaluator::ZeroInitialization(const Expr *E) { |
| 19491 | QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType(); |
| 19492 | if (ElemTy->isRealFloatingType()) { |
| 19493 | Result.makeComplexFloat(); |
| 19494 | APFloat Zero = APFloat::getZero(Sem: Info.Ctx.getFloatTypeSemantics(T: ElemTy)); |
| 19495 | Result.FloatReal = Zero; |
| 19496 | Result.FloatImag = Zero; |
| 19497 | } else { |
| 19498 | Result.makeComplexInt(); |
| 19499 | APSInt Zero = Info.Ctx.MakeIntValue(Value: 0, Type: ElemTy); |
| 19500 | Result.IntReal = Zero; |
| 19501 | Result.IntImag = Zero; |
| 19502 | } |
| 19503 | return true; |
| 19504 | } |
| 19505 | |
| 19506 | bool ComplexExprEvaluator::VisitImaginaryLiteral(const ImaginaryLiteral *E) { |
| 19507 | const Expr* SubExpr = E->getSubExpr(); |
| 19508 | |
| 19509 | if (SubExpr->getType()->isRealFloatingType()) { |
| 19510 | Result.makeComplexFloat(); |
| 19511 | APFloat &Imag = Result.FloatImag; |
| 19512 | if (!EvaluateFloat(E: SubExpr, Result&: Imag, Info)) |
| 19513 | return false; |
| 19514 | |
| 19515 | Result.FloatReal = APFloat(Imag.getSemantics()); |
| 19516 | return true; |
| 19517 | } else { |
| 19518 | assert(SubExpr->getType()->isIntegerType() && |
| 19519 | "Unexpected imaginary literal." ); |
| 19520 | |
| 19521 | Result.makeComplexInt(); |
| 19522 | APSInt &Imag = Result.IntImag; |
| 19523 | if (!EvaluateInteger(E: SubExpr, Result&: Imag, Info)) |
| 19524 | return false; |
| 19525 | |
| 19526 | Result.IntReal = APSInt(Imag.getBitWidth(), !Imag.isSigned()); |
| 19527 | return true; |
| 19528 | } |
| 19529 | } |
| 19530 | |
| 19531 | bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) { |
| 19532 | |
| 19533 | switch (E->getCastKind()) { |
| 19534 | case CK_BitCast: |
| 19535 | case CK_BaseToDerived: |
| 19536 | case CK_DerivedToBase: |
| 19537 | case CK_UncheckedDerivedToBase: |
| 19538 | case CK_Dynamic: |
| 19539 | case CK_ToUnion: |
| 19540 | case CK_ArrayToPointerDecay: |
| 19541 | case CK_FunctionToPointerDecay: |
| 19542 | case CK_NullToPointer: |
| 19543 | case CK_NullToMemberPointer: |
| 19544 | case CK_BaseToDerivedMemberPointer: |
| 19545 | case CK_DerivedToBaseMemberPointer: |
| 19546 | case CK_MemberPointerToBoolean: |
| 19547 | case CK_ReinterpretMemberPointer: |
| 19548 | case CK_ConstructorConversion: |
| 19549 | case CK_IntegralToPointer: |
| 19550 | case CK_PointerToIntegral: |
| 19551 | case CK_PointerToBoolean: |
| 19552 | case CK_ToVoid: |
| 19553 | case CK_VectorSplat: |
| 19554 | case CK_IntegralCast: |
| 19555 | case CK_BooleanToSignedIntegral: |
| 19556 | case CK_IntegralToBoolean: |
| 19557 | case CK_IntegralToFloating: |
| 19558 | case CK_FloatingToIntegral: |
| 19559 | case CK_FloatingToBoolean: |
| 19560 | case CK_FloatingCast: |
| 19561 | case CK_CPointerToObjCPointerCast: |
| 19562 | case CK_BlockPointerToObjCPointerCast: |
| 19563 | case CK_AnyPointerToBlockPointerCast: |
| 19564 | case CK_ObjCObjectLValueCast: |
| 19565 | case CK_FloatingComplexToReal: |
| 19566 | case CK_FloatingComplexToBoolean: |
| 19567 | case CK_IntegralComplexToReal: |
| 19568 | case CK_IntegralComplexToBoolean: |
| 19569 | case CK_ARCProduceObject: |
| 19570 | case CK_ARCConsumeObject: |
| 19571 | case CK_ARCReclaimReturnedObject: |
| 19572 | case CK_ARCExtendBlockObject: |
| 19573 | case CK_CopyAndAutoreleaseBlockObject: |
| 19574 | case CK_BuiltinFnToFnPtr: |
| 19575 | case CK_ZeroToOCLOpaqueType: |
| 19576 | case CK_NonAtomicToAtomic: |
| 19577 | case CK_AddressSpaceConversion: |
| 19578 | case CK_IntToOCLSampler: |
| 19579 | case CK_FloatingToFixedPoint: |
| 19580 | case CK_FixedPointToFloating: |
| 19581 | case CK_FixedPointCast: |
| 19582 | case CK_FixedPointToBoolean: |
| 19583 | case CK_FixedPointToIntegral: |
| 19584 | case CK_IntegralToFixedPoint: |
| 19585 | case CK_MatrixCast: |
| 19586 | case CK_HLSLVectorTruncation: |
| 19587 | case CK_HLSLMatrixTruncation: |
| 19588 | case CK_HLSLElementwiseCast: |
| 19589 | case CK_HLSLAggregateSplatCast: |
| 19590 | llvm_unreachable("invalid cast kind for complex value" ); |
| 19591 | |
| 19592 | case CK_LValueToRValue: |
| 19593 | case CK_AtomicToNonAtomic: |
| 19594 | case CK_NoOp: |
| 19595 | case CK_LValueToRValueBitCast: |
| 19596 | case CK_HLSLArrayRValue: |
| 19597 | return ExprEvaluatorBaseTy::VisitCastExpr(E); |
| 19598 | |
| 19599 | case CK_Dependent: |
| 19600 | case CK_LValueBitCast: |
| 19601 | case CK_UserDefinedConversion: |
| 19602 | return Error(E); |
| 19603 | |
| 19604 | case CK_FloatingRealToComplex: { |
| 19605 | APFloat &Real = Result.FloatReal; |
| 19606 | if (!EvaluateFloat(E: E->getSubExpr(), Result&: Real, Info)) |
| 19607 | return false; |
| 19608 | |
| 19609 | Result.makeComplexFloat(); |
| 19610 | Result.FloatImag = APFloat(Real.getSemantics()); |
| 19611 | return true; |
| 19612 | } |
| 19613 | |
| 19614 | case CK_FloatingComplexCast: { |
| 19615 | if (!Visit(S: E->getSubExpr())) |
| 19616 | return false; |
| 19617 | |
| 19618 | QualType To = E->getType()->castAs<ComplexType>()->getElementType(); |
| 19619 | QualType From |
| 19620 | = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType(); |
| 19621 | |
| 19622 | return HandleFloatToFloatCast(Info, E, SrcType: From, DestType: To, Result&: Result.FloatReal) && |
| 19623 | HandleFloatToFloatCast(Info, E, SrcType: From, DestType: To, Result&: Result.FloatImag); |
| 19624 | } |
| 19625 | |
| 19626 | case CK_FloatingComplexToIntegralComplex: { |
| 19627 | if (!Visit(S: E->getSubExpr())) |
| 19628 | return false; |
| 19629 | |
| 19630 | QualType To = E->getType()->castAs<ComplexType>()->getElementType(); |
| 19631 | QualType From |
| 19632 | = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType(); |
| 19633 | Result.makeComplexInt(); |
| 19634 | return HandleFloatToIntCast(Info, E, SrcType: From, Value: Result.FloatReal, |
| 19635 | DestType: To, Result&: Result.IntReal) && |
| 19636 | HandleFloatToIntCast(Info, E, SrcType: From, Value: Result.FloatImag, |
| 19637 | DestType: To, Result&: Result.IntImag); |
| 19638 | } |
| 19639 | |
| 19640 | case CK_IntegralRealToComplex: { |
| 19641 | APSInt &Real = Result.IntReal; |
| 19642 | if (!EvaluateInteger(E: E->getSubExpr(), Result&: Real, Info)) |
| 19643 | return false; |
| 19644 | |
| 19645 | Result.makeComplexInt(); |
| 19646 | Result.IntImag = APSInt(Real.getBitWidth(), !Real.isSigned()); |
| 19647 | return true; |
| 19648 | } |
| 19649 | |
| 19650 | case CK_IntegralComplexCast: { |
| 19651 | if (!Visit(S: E->getSubExpr())) |
| 19652 | return false; |
| 19653 | |
| 19654 | QualType To = E->getType()->castAs<ComplexType>()->getElementType(); |
| 19655 | QualType From |
| 19656 | = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType(); |
| 19657 | |
| 19658 | Result.IntReal = HandleIntToIntCast(Info, E, DestType: To, SrcType: From, Value: Result.IntReal); |
| 19659 | Result.IntImag = HandleIntToIntCast(Info, E, DestType: To, SrcType: From, Value: Result.IntImag); |
| 19660 | return true; |
| 19661 | } |
| 19662 | |
| 19663 | case CK_IntegralComplexToFloatingComplex: { |
| 19664 | if (!Visit(S: E->getSubExpr())) |
| 19665 | return false; |
| 19666 | |
| 19667 | const FPOptions FPO = E->getFPFeaturesInEffect( |
| 19668 | LO: Info.Ctx.getLangOpts()); |
| 19669 | QualType To = E->getType()->castAs<ComplexType>()->getElementType(); |
| 19670 | QualType From |
| 19671 | = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType(); |
| 19672 | Result.makeComplexFloat(); |
| 19673 | return HandleIntToFloatCast(Info, E, FPO, SrcType: From, Value: Result.IntReal, |
| 19674 | DestType: To, Result&: Result.FloatReal) && |
| 19675 | HandleIntToFloatCast(Info, E, FPO, SrcType: From, Value: Result.IntImag, |
| 19676 | DestType: To, Result&: Result.FloatImag); |
| 19677 | } |
| 19678 | } |
| 19679 | |
| 19680 | llvm_unreachable("unknown cast resulting in complex value" ); |
| 19681 | } |
| 19682 | |
| 19683 | uint8_t GFNIMultiplicativeInverse(uint8_t Byte) { |
| 19684 | // Lookup Table for Multiplicative Inverse in GF(2^8) |
| 19685 | const uint8_t GFInv[256] = { |
| 19686 | 0x00, 0x01, 0x8d, 0xf6, 0xcb, 0x52, 0x7b, 0xd1, 0xe8, 0x4f, 0x29, 0xc0, |
| 19687 | 0xb0, 0xe1, 0xe5, 0xc7, 0x74, 0xb4, 0xaa, 0x4b, 0x99, 0x2b, 0x60, 0x5f, |
| 19688 | 0x58, 0x3f, 0xfd, 0xcc, 0xff, 0x40, 0xee, 0xb2, 0x3a, 0x6e, 0x5a, 0xf1, |
| 19689 | 0x55, 0x4d, 0xa8, 0xc9, 0xc1, 0x0a, 0x98, 0x15, 0x30, 0x44, 0xa2, 0xc2, |
| 19690 | 0x2c, 0x45, 0x92, 0x6c, 0xf3, 0x39, 0x66, 0x42, 0xf2, 0x35, 0x20, 0x6f, |
| 19691 | 0x77, 0xbb, 0x59, 0x19, 0x1d, 0xfe, 0x37, 0x67, 0x2d, 0x31, 0xf5, 0x69, |
| 19692 | 0xa7, 0x64, 0xab, 0x13, 0x54, 0x25, 0xe9, 0x09, 0xed, 0x5c, 0x05, 0xca, |
| 19693 | 0x4c, 0x24, 0x87, 0xbf, 0x18, 0x3e, 0x22, 0xf0, 0x51, 0xec, 0x61, 0x17, |
| 19694 | 0x16, 0x5e, 0xaf, 0xd3, 0x49, 0xa6, 0x36, 0x43, 0xf4, 0x47, 0x91, 0xdf, |
| 19695 | 0x33, 0x93, 0x21, 0x3b, 0x79, 0xb7, 0x97, 0x85, 0x10, 0xb5, 0xba, 0x3c, |
| 19696 | 0xb6, 0x70, 0xd0, 0x06, 0xa1, 0xfa, 0x81, 0x82, 0x83, 0x7e, 0x7f, 0x80, |
| 19697 | 0x96, 0x73, 0xbe, 0x56, 0x9b, 0x9e, 0x95, 0xd9, 0xf7, 0x02, 0xb9, 0xa4, |
| 19698 | 0xde, 0x6a, 0x32, 0x6d, 0xd8, 0x8a, 0x84, 0x72, 0x2a, 0x14, 0x9f, 0x88, |
| 19699 | 0xf9, 0xdc, 0x89, 0x9a, 0xfb, 0x7c, 0x2e, 0xc3, 0x8f, 0xb8, 0x65, 0x48, |
| 19700 | 0x26, 0xc8, 0x12, 0x4a, 0xce, 0xe7, 0xd2, 0x62, 0x0c, 0xe0, 0x1f, 0xef, |
| 19701 | 0x11, 0x75, 0x78, 0x71, 0xa5, 0x8e, 0x76, 0x3d, 0xbd, 0xbc, 0x86, 0x57, |
| 19702 | 0x0b, 0x28, 0x2f, 0xa3, 0xda, 0xd4, 0xe4, 0x0f, 0xa9, 0x27, 0x53, 0x04, |
| 19703 | 0x1b, 0xfc, 0xac, 0xe6, 0x7a, 0x07, 0xae, 0x63, 0xc5, 0xdb, 0xe2, 0xea, |
| 19704 | 0x94, 0x8b, 0xc4, 0xd5, 0x9d, 0xf8, 0x90, 0x6b, 0xb1, 0x0d, 0xd6, 0xeb, |
| 19705 | 0xc6, 0x0e, 0xcf, 0xad, 0x08, 0x4e, 0xd7, 0xe3, 0x5d, 0x50, 0x1e, 0xb3, |
| 19706 | 0x5b, 0x23, 0x38, 0x34, 0x68, 0x46, 0x03, 0x8c, 0xdd, 0x9c, 0x7d, 0xa0, |
| 19707 | 0xcd, 0x1a, 0x41, 0x1c}; |
| 19708 | |
| 19709 | return GFInv[Byte]; |
| 19710 | } |
| 19711 | |
| 19712 | uint8_t GFNIAffine(uint8_t XByte, const APInt &AQword, const APSInt &Imm, |
| 19713 | bool Inverse) { |
| 19714 | unsigned NumBitsInByte = 8; |
| 19715 | // Computing the affine transformation |
| 19716 | uint8_t RetByte = 0; |
| 19717 | for (uint32_t BitIdx = 0; BitIdx != NumBitsInByte; ++BitIdx) { |
| 19718 | uint8_t AByte = |
| 19719 | AQword.lshr(shiftAmt: (7 - static_cast<int32_t>(BitIdx)) * NumBitsInByte) |
| 19720 | .getLoBits(numBits: 8) |
| 19721 | .getZExtValue(); |
| 19722 | uint8_t Product; |
| 19723 | if (Inverse) { |
| 19724 | Product = AByte & GFNIMultiplicativeInverse(Byte: XByte); |
| 19725 | } else { |
| 19726 | Product = AByte & XByte; |
| 19727 | } |
| 19728 | uint8_t Parity = 0; |
| 19729 | |
| 19730 | // Dot product in GF(2) uses XOR instead of addition |
| 19731 | for (unsigned PBitIdx = 0; PBitIdx != NumBitsInByte; ++PBitIdx) { |
| 19732 | Parity = Parity ^ ((Product >> PBitIdx) & 0x1); |
| 19733 | } |
| 19734 | |
| 19735 | uint8_t Temp = Imm[BitIdx] ? 1 : 0; |
| 19736 | RetByte |= (Temp ^ Parity) << BitIdx; |
| 19737 | } |
| 19738 | return RetByte; |
| 19739 | } |
| 19740 | |
| 19741 | uint8_t GFNIMul(uint8_t AByte, uint8_t BByte) { |
| 19742 | // Multiplying two polynomials of degree 7 |
| 19743 | // Polynomial of degree 7 |
| 19744 | // x^7 + x^6 + x^5 + x^4 + x^3 + x^2 + x + 1 |
| 19745 | uint16_t TWord = 0; |
| 19746 | unsigned NumBitsInByte = 8; |
| 19747 | for (unsigned BitIdx = 0; BitIdx != NumBitsInByte; ++BitIdx) { |
| 19748 | if ((BByte >> BitIdx) & 0x1) { |
| 19749 | TWord = TWord ^ (AByte << BitIdx); |
| 19750 | } |
| 19751 | } |
| 19752 | |
| 19753 | // When multiplying two polynomials of degree 7 |
| 19754 | // results in a polynomial of degree 14 |
| 19755 | // so the result has to be reduced to 7 |
| 19756 | // Reduction polynomial is x^8 + x^4 + x^3 + x + 1 i.e. 0x11B |
| 19757 | for (int32_t BitIdx = 14; BitIdx > 7; --BitIdx) { |
| 19758 | if ((TWord >> BitIdx) & 0x1) { |
| 19759 | TWord = TWord ^ (0x11B << (BitIdx - 8)); |
| 19760 | } |
| 19761 | } |
| 19762 | return (TWord & 0xFF); |
| 19763 | } |
| 19764 | |
| 19765 | void HandleComplexComplexMul(APFloat A, APFloat B, APFloat C, APFloat D, |
| 19766 | APFloat &ResR, APFloat &ResI) { |
| 19767 | // This is an implementation of complex multiplication according to the |
| 19768 | // constraints laid out in C11 Annex G. The implementation uses the |
| 19769 | // following naming scheme: |
| 19770 | // (a + ib) * (c + id) |
| 19771 | |
| 19772 | APFloat AC = A * C; |
| 19773 | APFloat BD = B * D; |
| 19774 | APFloat AD = A * D; |
| 19775 | APFloat BC = B * C; |
| 19776 | ResR = AC - BD; |
| 19777 | ResI = AD + BC; |
| 19778 | if (ResR.isNaN() && ResI.isNaN()) { |
| 19779 | bool Recalc = false; |
| 19780 | if (A.isInfinity() || B.isInfinity()) { |
| 19781 | A = APFloat::copySign(Value: APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0), |
| 19782 | Sign: A); |
| 19783 | B = APFloat::copySign(Value: APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0), |
| 19784 | Sign: B); |
| 19785 | if (C.isNaN()) |
| 19786 | C = APFloat::copySign(Value: APFloat(C.getSemantics()), Sign: C); |
| 19787 | if (D.isNaN()) |
| 19788 | D = APFloat::copySign(Value: APFloat(D.getSemantics()), Sign: D); |
| 19789 | Recalc = true; |
| 19790 | } |
| 19791 | if (C.isInfinity() || D.isInfinity()) { |
| 19792 | C = APFloat::copySign(Value: APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0), |
| 19793 | Sign: C); |
| 19794 | D = APFloat::copySign(Value: APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0), |
| 19795 | Sign: D); |
| 19796 | if (A.isNaN()) |
| 19797 | A = APFloat::copySign(Value: APFloat(A.getSemantics()), Sign: A); |
| 19798 | if (B.isNaN()) |
| 19799 | B = APFloat::copySign(Value: APFloat(B.getSemantics()), Sign: B); |
| 19800 | Recalc = true; |
| 19801 | } |
| 19802 | if (!Recalc && (AC.isInfinity() || BD.isInfinity() || AD.isInfinity() || |
| 19803 | BC.isInfinity())) { |
| 19804 | if (A.isNaN()) |
| 19805 | A = APFloat::copySign(Value: APFloat(A.getSemantics()), Sign: A); |
| 19806 | if (B.isNaN()) |
| 19807 | B = APFloat::copySign(Value: APFloat(B.getSemantics()), Sign: B); |
| 19808 | if (C.isNaN()) |
| 19809 | C = APFloat::copySign(Value: APFloat(C.getSemantics()), Sign: C); |
| 19810 | if (D.isNaN()) |
| 19811 | D = APFloat::copySign(Value: APFloat(D.getSemantics()), Sign: D); |
| 19812 | Recalc = true; |
| 19813 | } |
| 19814 | if (Recalc) { |
| 19815 | ResR = APFloat::getInf(Sem: A.getSemantics()) * (A * C - B * D); |
| 19816 | ResI = APFloat::getInf(Sem: A.getSemantics()) * (A * D + B * C); |
| 19817 | } |
| 19818 | } |
| 19819 | } |
| 19820 | |
| 19821 | void HandleComplexComplexDiv(APFloat A, APFloat B, APFloat C, APFloat D, |
| 19822 | APFloat &ResR, APFloat &ResI) { |
| 19823 | // This is an implementation of complex division according to the |
| 19824 | // constraints laid out in C11 Annex G. The implementation uses the |
| 19825 | // following naming scheme: |
| 19826 | // (a + ib) / (c + id) |
| 19827 | |
| 19828 | int DenomLogB = 0; |
| 19829 | APFloat MaxCD = maxnum(A: abs(X: C), B: abs(X: D)); |
| 19830 | if (MaxCD.isFinite()) { |
| 19831 | DenomLogB = ilogb(Arg: MaxCD); |
| 19832 | C = scalbn(X: C, Exp: -DenomLogB, RM: APFloat::rmNearestTiesToEven); |
| 19833 | D = scalbn(X: D, Exp: -DenomLogB, RM: APFloat::rmNearestTiesToEven); |
| 19834 | } |
| 19835 | APFloat Denom = C * C + D * D; |
| 19836 | ResR = |
| 19837 | scalbn(X: (A * C + B * D) / Denom, Exp: -DenomLogB, RM: APFloat::rmNearestTiesToEven); |
| 19838 | ResI = |
| 19839 | scalbn(X: (B * C - A * D) / Denom, Exp: -DenomLogB, RM: APFloat::rmNearestTiesToEven); |
| 19840 | if (ResR.isNaN() && ResI.isNaN()) { |
| 19841 | if (Denom.isPosZero() && (!A.isNaN() || !B.isNaN())) { |
| 19842 | ResR = APFloat::getInf(Sem: ResR.getSemantics(), Negative: C.isNegative()) * A; |
| 19843 | ResI = APFloat::getInf(Sem: ResR.getSemantics(), Negative: C.isNegative()) * B; |
| 19844 | } else if ((A.isInfinity() || B.isInfinity()) && C.isFinite() && |
| 19845 | D.isFinite()) { |
| 19846 | A = APFloat::copySign(Value: APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0), |
| 19847 | Sign: A); |
| 19848 | B = APFloat::copySign(Value: APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0), |
| 19849 | Sign: B); |
| 19850 | ResR = APFloat::getInf(Sem: ResR.getSemantics()) * (A * C + B * D); |
| 19851 | ResI = APFloat::getInf(Sem: ResI.getSemantics()) * (B * C - A * D); |
| 19852 | } else if (MaxCD.isInfinity() && A.isFinite() && B.isFinite()) { |
| 19853 | C = APFloat::copySign(Value: APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0), |
| 19854 | Sign: C); |
| 19855 | D = APFloat::copySign(Value: APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0), |
| 19856 | Sign: D); |
| 19857 | ResR = APFloat::getZero(Sem: ResR.getSemantics()) * (A * C + B * D); |
| 19858 | ResI = APFloat::getZero(Sem: ResI.getSemantics()) * (B * C - A * D); |
| 19859 | } |
| 19860 | } |
| 19861 | } |
| 19862 | |
| 19863 | APSInt NormalizeRotateAmount(const APSInt &Value, const APSInt &Amount) { |
| 19864 | // Normalize shift amount to [0, BitWidth) range to match runtime behavior |
| 19865 | APSInt NormAmt = Amount; |
| 19866 | unsigned BitWidth = Value.getBitWidth(); |
| 19867 | unsigned AmtBitWidth = NormAmt.getBitWidth(); |
| 19868 | if (BitWidth == 1) { |
| 19869 | // Rotating a 1-bit value is always a no-op |
| 19870 | NormAmt = APSInt(APInt(AmtBitWidth, 0), NormAmt.isUnsigned()); |
| 19871 | } else if (BitWidth == 2) { |
| 19872 | // For 2-bit values: rotation amount is 0 or 1 based on |
| 19873 | // whether the amount is even or odd. We can't use srem here because |
| 19874 | // the divisor (2) would be misinterpreted as -2 in 2-bit signed arithmetic. |
| 19875 | NormAmt = |
| 19876 | APSInt(APInt(AmtBitWidth, NormAmt[0] ? 1 : 0), NormAmt.isUnsigned()); |
| 19877 | } else { |
| 19878 | APInt Divisor; |
| 19879 | if (AmtBitWidth > BitWidth) { |
| 19880 | Divisor = llvm::APInt(AmtBitWidth, BitWidth); |
| 19881 | } else { |
| 19882 | Divisor = llvm::APInt(BitWidth, BitWidth); |
| 19883 | if (AmtBitWidth < BitWidth) { |
| 19884 | NormAmt = NormAmt.extend(width: BitWidth); |
| 19885 | } |
| 19886 | } |
| 19887 | |
| 19888 | // Normalize to [0, BitWidth) |
| 19889 | if (NormAmt.isSigned()) { |
| 19890 | NormAmt = APSInt(NormAmt.srem(RHS: Divisor), /*isUnsigned=*/false); |
| 19891 | if (NormAmt.isNegative()) { |
| 19892 | APSInt SignedDivisor(Divisor, /*isUnsigned=*/false); |
| 19893 | NormAmt += SignedDivisor; |
| 19894 | } |
| 19895 | } else { |
| 19896 | NormAmt = APSInt(NormAmt.urem(RHS: Divisor), /*isUnsigned=*/true); |
| 19897 | } |
| 19898 | } |
| 19899 | |
| 19900 | return NormAmt; |
| 19901 | } |
| 19902 | |
| 19903 | bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { |
| 19904 | if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma) |
| 19905 | return ExprEvaluatorBaseTy::VisitBinaryOperator(E); |
| 19906 | |
| 19907 | // Track whether the LHS or RHS is real at the type system level. When this is |
| 19908 | // the case we can simplify our evaluation strategy. |
| 19909 | bool LHSReal = false, RHSReal = false; |
| 19910 | |
| 19911 | bool LHSOK; |
| 19912 | if (E->getLHS()->getType()->isRealFloatingType()) { |
| 19913 | LHSReal = true; |
| 19914 | APFloat &Real = Result.FloatReal; |
| 19915 | LHSOK = EvaluateFloat(E: E->getLHS(), Result&: Real, Info); |
| 19916 | if (LHSOK) { |
| 19917 | Result.makeComplexFloat(); |
| 19918 | Result.FloatImag = APFloat(Real.getSemantics()); |
| 19919 | } |
| 19920 | } else { |
| 19921 | LHSOK = Visit(S: E->getLHS()); |
| 19922 | } |
| 19923 | if (!LHSOK && !Info.noteFailure()) |
| 19924 | return false; |
| 19925 | |
| 19926 | ComplexValue RHS; |
| 19927 | if (E->getRHS()->getType()->isRealFloatingType()) { |
| 19928 | RHSReal = true; |
| 19929 | APFloat &Real = RHS.FloatReal; |
| 19930 | if (!EvaluateFloat(E: E->getRHS(), Result&: Real, Info) || !LHSOK) |
| 19931 | return false; |
| 19932 | RHS.makeComplexFloat(); |
| 19933 | RHS.FloatImag = APFloat(Real.getSemantics()); |
| 19934 | } else if (!EvaluateComplex(E: E->getRHS(), Result&: RHS, Info) || !LHSOK) |
| 19935 | return false; |
| 19936 | |
| 19937 | assert(!(LHSReal && RHSReal) && |
| 19938 | "Cannot have both operands of a complex operation be real." ); |
| 19939 | switch (E->getOpcode()) { |
| 19940 | default: return Error(E); |
| 19941 | case BO_Add: |
| 19942 | if (Result.isComplexFloat()) { |
| 19943 | Result.getComplexFloatReal().add(RHS: RHS.getComplexFloatReal(), |
| 19944 | RM: APFloat::rmNearestTiesToEven); |
| 19945 | if (LHSReal) |
| 19946 | Result.getComplexFloatImag() = RHS.getComplexFloatImag(); |
| 19947 | else if (!RHSReal) |
| 19948 | Result.getComplexFloatImag().add(RHS: RHS.getComplexFloatImag(), |
| 19949 | RM: APFloat::rmNearestTiesToEven); |
| 19950 | } else { |
| 19951 | Result.getComplexIntReal() += RHS.getComplexIntReal(); |
| 19952 | Result.getComplexIntImag() += RHS.getComplexIntImag(); |
| 19953 | } |
| 19954 | break; |
| 19955 | case BO_Sub: |
| 19956 | if (Result.isComplexFloat()) { |
| 19957 | Result.getComplexFloatReal().subtract(RHS: RHS.getComplexFloatReal(), |
| 19958 | RM: APFloat::rmNearestTiesToEven); |
| 19959 | if (LHSReal) { |
| 19960 | Result.getComplexFloatImag() = RHS.getComplexFloatImag(); |
| 19961 | Result.getComplexFloatImag().changeSign(); |
| 19962 | } else if (!RHSReal) { |
| 19963 | Result.getComplexFloatImag().subtract(RHS: RHS.getComplexFloatImag(), |
| 19964 | RM: APFloat::rmNearestTiesToEven); |
| 19965 | } |
| 19966 | } else { |
| 19967 | Result.getComplexIntReal() -= RHS.getComplexIntReal(); |
| 19968 | Result.getComplexIntImag() -= RHS.getComplexIntImag(); |
| 19969 | } |
| 19970 | break; |
| 19971 | case BO_Mul: |
| 19972 | if (Result.isComplexFloat()) { |
| 19973 | // This is an implementation of complex multiplication according to the |
| 19974 | // constraints laid out in C11 Annex G. The implementation uses the |
| 19975 | // following naming scheme: |
| 19976 | // (a + ib) * (c + id) |
| 19977 | ComplexValue LHS = Result; |
| 19978 | APFloat &A = LHS.getComplexFloatReal(); |
| 19979 | APFloat &B = LHS.getComplexFloatImag(); |
| 19980 | APFloat &C = RHS.getComplexFloatReal(); |
| 19981 | APFloat &D = RHS.getComplexFloatImag(); |
| 19982 | APFloat &ResR = Result.getComplexFloatReal(); |
| 19983 | APFloat &ResI = Result.getComplexFloatImag(); |
| 19984 | if (LHSReal) { |
| 19985 | assert(!RHSReal && "Cannot have two real operands for a complex op!" ); |
| 19986 | ResR = A; |
| 19987 | ResI = A; |
| 19988 | // ResR = A * C; |
| 19989 | // ResI = A * D; |
| 19990 | if (!handleFloatFloatBinOp(Info, E, LHS&: ResR, Opcode: BO_Mul, RHS: C) || |
| 19991 | !handleFloatFloatBinOp(Info, E, LHS&: ResI, Opcode: BO_Mul, RHS: D)) |
| 19992 | return false; |
| 19993 | } else if (RHSReal) { |
| 19994 | // ResR = C * A; |
| 19995 | // ResI = C * B; |
| 19996 | ResR = C; |
| 19997 | ResI = C; |
| 19998 | if (!handleFloatFloatBinOp(Info, E, LHS&: ResR, Opcode: BO_Mul, RHS: A) || |
| 19999 | !handleFloatFloatBinOp(Info, E, LHS&: ResI, Opcode: BO_Mul, RHS: B)) |
| 20000 | return false; |
| 20001 | } else { |
| 20002 | HandleComplexComplexMul(A, B, C, D, ResR, ResI); |
| 20003 | } |
| 20004 | } else { |
| 20005 | ComplexValue LHS = Result; |
| 20006 | Result.getComplexIntReal() = |
| 20007 | (LHS.getComplexIntReal() * RHS.getComplexIntReal() - |
| 20008 | LHS.getComplexIntImag() * RHS.getComplexIntImag()); |
| 20009 | Result.getComplexIntImag() = |
| 20010 | (LHS.getComplexIntReal() * RHS.getComplexIntImag() + |
| 20011 | LHS.getComplexIntImag() * RHS.getComplexIntReal()); |
| 20012 | } |
| 20013 | break; |
| 20014 | case BO_Div: |
| 20015 | if (Result.isComplexFloat()) { |
| 20016 | // This is an implementation of complex division according to the |
| 20017 | // constraints laid out in C11 Annex G. The implementation uses the |
| 20018 | // following naming scheme: |
| 20019 | // (a + ib) / (c + id) |
| 20020 | ComplexValue LHS = Result; |
| 20021 | APFloat &A = LHS.getComplexFloatReal(); |
| 20022 | APFloat &B = LHS.getComplexFloatImag(); |
| 20023 | APFloat &C = RHS.getComplexFloatReal(); |
| 20024 | APFloat &D = RHS.getComplexFloatImag(); |
| 20025 | APFloat &ResR = Result.getComplexFloatReal(); |
| 20026 | APFloat &ResI = Result.getComplexFloatImag(); |
| 20027 | if (RHSReal) { |
| 20028 | ResR = A; |
| 20029 | ResI = B; |
| 20030 | // ResR = A / C; |
| 20031 | // ResI = B / C; |
| 20032 | if (!handleFloatFloatBinOp(Info, E, LHS&: ResR, Opcode: BO_Div, RHS: C) || |
| 20033 | !handleFloatFloatBinOp(Info, E, LHS&: ResI, Opcode: BO_Div, RHS: C)) |
| 20034 | return false; |
| 20035 | } else { |
| 20036 | if (LHSReal) { |
| 20037 | // No real optimizations we can do here, stub out with zero. |
| 20038 | B = APFloat::getZero(Sem: A.getSemantics()); |
| 20039 | } |
| 20040 | HandleComplexComplexDiv(A, B, C, D, ResR, ResI); |
| 20041 | } |
| 20042 | } else { |
| 20043 | ComplexValue LHS = Result; |
| 20044 | APSInt Den = RHS.getComplexIntReal() * RHS.getComplexIntReal() + |
| 20045 | RHS.getComplexIntImag() * RHS.getComplexIntImag(); |
| 20046 | if (Den.isZero()) |
| 20047 | return Error(E, D: diag::note_expr_divide_by_zero); |
| 20048 | |
| 20049 | Result.getComplexIntReal() = |
| 20050 | (LHS.getComplexIntReal() * RHS.getComplexIntReal() + |
| 20051 | LHS.getComplexIntImag() * RHS.getComplexIntImag()) / Den; |
| 20052 | Result.getComplexIntImag() = |
| 20053 | (LHS.getComplexIntImag() * RHS.getComplexIntReal() - |
| 20054 | LHS.getComplexIntReal() * RHS.getComplexIntImag()) / Den; |
| 20055 | } |
| 20056 | break; |
| 20057 | } |
| 20058 | |
| 20059 | return true; |
| 20060 | } |
| 20061 | |
| 20062 | bool ComplexExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) { |
| 20063 | // Get the operand value into 'Result'. |
| 20064 | if (!Visit(S: E->getSubExpr())) |
| 20065 | return false; |
| 20066 | |
| 20067 | switch (E->getOpcode()) { |
| 20068 | default: |
| 20069 | return Error(E); |
| 20070 | case UO_Extension: |
| 20071 | return true; |
| 20072 | case UO_Plus: |
| 20073 | // The result is always just the subexpr. |
| 20074 | return true; |
| 20075 | case UO_Minus: |
| 20076 | if (Result.isComplexFloat()) { |
| 20077 | Result.getComplexFloatReal().changeSign(); |
| 20078 | Result.getComplexFloatImag().changeSign(); |
| 20079 | } |
| 20080 | else { |
| 20081 | Result.getComplexIntReal() = -Result.getComplexIntReal(); |
| 20082 | Result.getComplexIntImag() = -Result.getComplexIntImag(); |
| 20083 | } |
| 20084 | return true; |
| 20085 | case UO_Not: |
| 20086 | if (Result.isComplexFloat()) |
| 20087 | Result.getComplexFloatImag().changeSign(); |
| 20088 | else |
| 20089 | Result.getComplexIntImag() = -Result.getComplexIntImag(); |
| 20090 | return true; |
| 20091 | } |
| 20092 | } |
| 20093 | |
| 20094 | bool ComplexExprEvaluator::VisitInitListExpr(const InitListExpr *E) { |
| 20095 | if (E->getNumInits() == 2) { |
| 20096 | if (E->getType()->isComplexType()) { |
| 20097 | Result.makeComplexFloat(); |
| 20098 | if (!EvaluateFloat(E: E->getInit(Init: 0), Result&: Result.FloatReal, Info)) |
| 20099 | return false; |
| 20100 | if (!EvaluateFloat(E: E->getInit(Init: 1), Result&: Result.FloatImag, Info)) |
| 20101 | return false; |
| 20102 | } else { |
| 20103 | Result.makeComplexInt(); |
| 20104 | if (!EvaluateInteger(E: E->getInit(Init: 0), Result&: Result.IntReal, Info)) |
| 20105 | return false; |
| 20106 | if (!EvaluateInteger(E: E->getInit(Init: 1), Result&: Result.IntImag, Info)) |
| 20107 | return false; |
| 20108 | } |
| 20109 | return true; |
| 20110 | } |
| 20111 | return ExprEvaluatorBaseTy::VisitInitListExpr(E); |
| 20112 | } |
| 20113 | |
| 20114 | bool ComplexExprEvaluator::VisitCallExpr(const CallExpr *E) { |
| 20115 | if (!IsConstantEvaluatedBuiltinCall(E)) |
| 20116 | return ExprEvaluatorBaseTy::VisitCallExpr(E); |
| 20117 | |
| 20118 | switch (E->getBuiltinCallee()) { |
| 20119 | case Builtin::BI__builtin_complex: |
| 20120 | Result.makeComplexFloat(); |
| 20121 | if (!EvaluateFloat(E: E->getArg(Arg: 0), Result&: Result.FloatReal, Info)) |
| 20122 | return false; |
| 20123 | if (!EvaluateFloat(E: E->getArg(Arg: 1), Result&: Result.FloatImag, Info)) |
| 20124 | return false; |
| 20125 | return true; |
| 20126 | |
| 20127 | default: |
| 20128 | return false; |
| 20129 | } |
| 20130 | } |
| 20131 | |
| 20132 | //===----------------------------------------------------------------------===// |
| 20133 | // Atomic expression evaluation, essentially just handling the NonAtomicToAtomic |
| 20134 | // implicit conversion. |
| 20135 | //===----------------------------------------------------------------------===// |
| 20136 | |
| 20137 | namespace { |
| 20138 | class AtomicExprEvaluator : |
| 20139 | public ExprEvaluatorBase<AtomicExprEvaluator> { |
| 20140 | const LValue *This; |
| 20141 | APValue &Result; |
| 20142 | public: |
| 20143 | AtomicExprEvaluator(EvalInfo &Info, const LValue *This, APValue &Result) |
| 20144 | : ExprEvaluatorBaseTy(Info), This(This), Result(Result) {} |
| 20145 | |
| 20146 | bool Success(const APValue &V, const Expr *E) { |
| 20147 | Result = V; |
| 20148 | return true; |
| 20149 | } |
| 20150 | |
| 20151 | bool ZeroInitialization(const Expr *E) { |
| 20152 | ImplicitValueInitExpr VIE( |
| 20153 | E->getType()->castAs<AtomicType>()->getValueType()); |
| 20154 | // For atomic-qualified class (and array) types in C++, initialize the |
| 20155 | // _Atomic-wrapped subobject directly, in-place. |
| 20156 | return This ? EvaluateInPlace(Result, Info, This: *This, E: &VIE) |
| 20157 | : Evaluate(Result, Info, E: &VIE); |
| 20158 | } |
| 20159 | |
| 20160 | bool VisitCastExpr(const CastExpr *E) { |
| 20161 | switch (E->getCastKind()) { |
| 20162 | default: |
| 20163 | return ExprEvaluatorBaseTy::VisitCastExpr(E); |
| 20164 | case CK_NullToPointer: |
| 20165 | VisitIgnoredValue(E: E->getSubExpr()); |
| 20166 | return ZeroInitialization(E); |
| 20167 | case CK_NonAtomicToAtomic: |
| 20168 | return This ? EvaluateInPlace(Result, Info, This: *This, E: E->getSubExpr()) |
| 20169 | : Evaluate(Result, Info, E: E->getSubExpr()); |
| 20170 | } |
| 20171 | } |
| 20172 | }; |
| 20173 | } // end anonymous namespace |
| 20174 | |
| 20175 | static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result, |
| 20176 | EvalInfo &Info) { |
| 20177 | assert(!E->isValueDependent()); |
| 20178 | assert(E->isPRValue() && E->getType()->isAtomicType()); |
| 20179 | return AtomicExprEvaluator(Info, This, Result).Visit(S: E); |
| 20180 | } |
| 20181 | |
| 20182 | //===----------------------------------------------------------------------===// |
| 20183 | // Void expression evaluation, primarily for a cast to void on the LHS of a |
| 20184 | // comma operator |
| 20185 | //===----------------------------------------------------------------------===// |
| 20186 | |
| 20187 | namespace { |
| 20188 | class VoidExprEvaluator |
| 20189 | : public ExprEvaluatorBase<VoidExprEvaluator> { |
| 20190 | public: |
| 20191 | VoidExprEvaluator(EvalInfo &Info) : ExprEvaluatorBaseTy(Info) {} |
| 20192 | |
| 20193 | bool Success(const APValue &V, const Expr *e) { return true; } |
| 20194 | |
| 20195 | bool ZeroInitialization(const Expr *E) { return true; } |
| 20196 | |
| 20197 | bool VisitCastExpr(const CastExpr *E) { |
| 20198 | switch (E->getCastKind()) { |
| 20199 | default: |
| 20200 | return ExprEvaluatorBaseTy::VisitCastExpr(E); |
| 20201 | case CK_ToVoid: |
| 20202 | VisitIgnoredValue(E: E->getSubExpr()); |
| 20203 | return true; |
| 20204 | } |
| 20205 | } |
| 20206 | |
| 20207 | bool VisitCallExpr(const CallExpr *E) { |
| 20208 | if (!IsConstantEvaluatedBuiltinCall(E)) |
| 20209 | return ExprEvaluatorBaseTy::VisitCallExpr(E); |
| 20210 | |
| 20211 | switch (E->getBuiltinCallee()) { |
| 20212 | case Builtin::BI__assume: |
| 20213 | case Builtin::BI__builtin_assume: |
| 20214 | // The argument is not evaluated! |
| 20215 | return true; |
| 20216 | |
| 20217 | case Builtin::BI__builtin_operator_delete: |
| 20218 | return HandleOperatorDeleteCall(Info, E); |
| 20219 | |
| 20220 | default: |
| 20221 | return false; |
| 20222 | } |
| 20223 | } |
| 20224 | |
| 20225 | bool VisitCXXDeleteExpr(const CXXDeleteExpr *E); |
| 20226 | }; |
| 20227 | } // end anonymous namespace |
| 20228 | |
| 20229 | bool VoidExprEvaluator::VisitCXXDeleteExpr(const CXXDeleteExpr *E) { |
| 20230 | // We cannot speculatively evaluate a delete expression. |
| 20231 | if (Info.SpeculativeEvaluationDepth) |
| 20232 | return false; |
| 20233 | |
| 20234 | FunctionDecl *OperatorDelete = E->getOperatorDelete(); |
| 20235 | if (!OperatorDelete |
| 20236 | ->isUsableAsGlobalAllocationFunctionInConstantEvaluation()) { |
| 20237 | Info.FFDiag(E, DiagId: diag::note_constexpr_new_non_replaceable) |
| 20238 | << isa<CXXMethodDecl>(Val: OperatorDelete) << OperatorDelete; |
| 20239 | return false; |
| 20240 | } |
| 20241 | |
| 20242 | const Expr *Arg = E->getArgument(); |
| 20243 | |
| 20244 | LValue Pointer; |
| 20245 | if (!EvaluatePointer(E: Arg, Result&: Pointer, Info)) |
| 20246 | return false; |
| 20247 | if (Pointer.Designator.Invalid) |
| 20248 | return false; |
| 20249 | |
| 20250 | // Deleting a null pointer has no effect. |
| 20251 | if (Pointer.isNullPointer()) { |
| 20252 | // This is the only case where we need to produce an extension warning: |
| 20253 | // the only other way we can succeed is if we find a dynamic allocation, |
| 20254 | // and we will have warned when we allocated it in that case. |
| 20255 | if (!Info.getLangOpts().CPlusPlus20) |
| 20256 | Info.CCEDiag(E, DiagId: diag::note_constexpr_new); |
| 20257 | return true; |
| 20258 | } |
| 20259 | |
| 20260 | std::optional<DynAlloc *> Alloc = CheckDeleteKind( |
| 20261 | Info, E, Pointer, DeallocKind: E->isArrayForm() ? DynAlloc::ArrayNew : DynAlloc::New); |
| 20262 | if (!Alloc) |
| 20263 | return false; |
| 20264 | QualType AllocType = Pointer.Base.getDynamicAllocType(); |
| 20265 | |
| 20266 | // For the non-array case, the designator must be empty if the static type |
| 20267 | // does not have a virtual destructor. |
| 20268 | if (!E->isArrayForm() && Pointer.Designator.Entries.size() != 0 && |
| 20269 | !hasVirtualDestructor(T: Arg->getType()->getPointeeType())) { |
| 20270 | Info.FFDiag(E, DiagId: diag::note_constexpr_delete_base_nonvirt_dtor) |
| 20271 | << Arg->getType()->getPointeeType() << AllocType; |
| 20272 | return false; |
| 20273 | } |
| 20274 | |
| 20275 | // For a class type with a virtual destructor, the selected operator delete |
| 20276 | // is the one looked up when building the destructor. |
| 20277 | if (!E->isArrayForm() && !E->isGlobalDelete()) { |
| 20278 | const FunctionDecl *VirtualDelete = getVirtualOperatorDelete(T: AllocType); |
| 20279 | if (VirtualDelete && |
| 20280 | !VirtualDelete |
| 20281 | ->isUsableAsGlobalAllocationFunctionInConstantEvaluation()) { |
| 20282 | Info.FFDiag(E, DiagId: diag::note_constexpr_new_non_replaceable) |
| 20283 | << isa<CXXMethodDecl>(Val: VirtualDelete) << VirtualDelete; |
| 20284 | return false; |
| 20285 | } |
| 20286 | } |
| 20287 | |
| 20288 | if (!HandleDestruction(Info, Loc: E->getExprLoc(), LVBase: Pointer.getLValueBase(), |
| 20289 | Value&: (*Alloc)->Value, T: AllocType)) |
| 20290 | return false; |
| 20291 | |
| 20292 | if (!Info.HeapAllocs.erase(x: Pointer.Base.dyn_cast<DynamicAllocLValue>())) { |
| 20293 | // The element was already erased. This means the destructor call also |
| 20294 | // deleted the object. |
| 20295 | // FIXME: This probably results in undefined behavior before we get this |
| 20296 | // far, and should be diagnosed elsewhere first. |
| 20297 | Info.FFDiag(E, DiagId: diag::note_constexpr_double_delete); |
| 20298 | return false; |
| 20299 | } |
| 20300 | |
| 20301 | return true; |
| 20302 | } |
| 20303 | |
| 20304 | static bool EvaluateVoid(const Expr *E, EvalInfo &Info) { |
| 20305 | assert(!E->isValueDependent()); |
| 20306 | assert(E->isPRValue() && E->getType()->isVoidType()); |
| 20307 | return VoidExprEvaluator(Info).Visit(S: E); |
| 20308 | } |
| 20309 | |
| 20310 | //===----------------------------------------------------------------------===// |
| 20311 | // Top level Expr::EvaluateAsRValue method. |
| 20312 | //===----------------------------------------------------------------------===// |
| 20313 | |
| 20314 | static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) { |
| 20315 | assert(!E->isValueDependent()); |
| 20316 | // In C, function designators are not lvalues, but we evaluate them as if they |
| 20317 | // are. |
| 20318 | QualType T = E->getType(); |
| 20319 | if (E->isGLValue() || T->isFunctionType()) { |
| 20320 | LValue LV; |
| 20321 | if (!EvaluateLValue(E, Result&: LV, Info)) |
| 20322 | return false; |
| 20323 | LV.moveInto(V&: Result); |
| 20324 | } else if (T->isVectorType()) { |
| 20325 | if (!EvaluateVector(E, Result, Info)) |
| 20326 | return false; |
| 20327 | } else if (T->isIntegralOrEnumerationType()) { |
| 20328 | if (!IntExprEvaluator(Info, Result).Visit(S: E)) |
| 20329 | return false; |
| 20330 | } else if (T->hasPointerRepresentation()) { |
| 20331 | LValue LV; |
| 20332 | if (!EvaluatePointer(E, Result&: LV, Info)) |
| 20333 | return false; |
| 20334 | LV.moveInto(V&: Result); |
| 20335 | } else if (T->isRealFloatingType()) { |
| 20336 | llvm::APFloat F(0.0); |
| 20337 | if (!EvaluateFloat(E, Result&: F, Info)) |
| 20338 | return false; |
| 20339 | Result = APValue(F); |
| 20340 | } else if (T->isAnyComplexType()) { |
| 20341 | ComplexValue C; |
| 20342 | if (!EvaluateComplex(E, Result&: C, Info)) |
| 20343 | return false; |
| 20344 | C.moveInto(v&: Result); |
| 20345 | } else if (T->isFixedPointType()) { |
| 20346 | if (!FixedPointExprEvaluator(Info, Result).Visit(S: E)) return false; |
| 20347 | } else if (T->isMemberPointerType()) { |
| 20348 | MemberPtr P; |
| 20349 | if (!EvaluateMemberPointer(E, Result&: P, Info)) |
| 20350 | return false; |
| 20351 | P.moveInto(V&: Result); |
| 20352 | return true; |
| 20353 | } else if (T->isArrayType()) { |
| 20354 | LValue LV; |
| 20355 | APValue &Value = |
| 20356 | Info.CurrentCall->createTemporary(Key: E, T, Scope: ScopeKind::FullExpression, LV); |
| 20357 | if (!EvaluateArray(E, This: LV, Result&: Value, Info)) |
| 20358 | return false; |
| 20359 | Result = Value; |
| 20360 | } else if (T->isRecordType()) { |
| 20361 | LValue LV; |
| 20362 | APValue &Value = |
| 20363 | Info.CurrentCall->createTemporary(Key: E, T, Scope: ScopeKind::FullExpression, LV); |
| 20364 | if (!EvaluateRecord(E, This: LV, Result&: Value, Info)) |
| 20365 | return false; |
| 20366 | Result = Value; |
| 20367 | } else if (T->isVoidType()) { |
| 20368 | if (!Info.getLangOpts().CPlusPlus11) |
| 20369 | Info.CCEDiag(E, DiagId: diag::note_constexpr_nonliteral) |
| 20370 | << E->getType(); |
| 20371 | if (!EvaluateVoid(E, Info)) |
| 20372 | return false; |
| 20373 | } else if (T->isAtomicType()) { |
| 20374 | QualType Unqual = T.getAtomicUnqualifiedType(); |
| 20375 | if (Unqual->isArrayType() || Unqual->isRecordType()) { |
| 20376 | LValue LV; |
| 20377 | APValue &Value = Info.CurrentCall->createTemporary( |
| 20378 | Key: E, T: Unqual, Scope: ScopeKind::FullExpression, LV); |
| 20379 | if (!EvaluateAtomic(E, This: &LV, Result&: Value, Info)) |
| 20380 | return false; |
| 20381 | Result = Value; |
| 20382 | } else { |
| 20383 | if (!EvaluateAtomic(E, This: nullptr, Result, Info)) |
| 20384 | return false; |
| 20385 | } |
| 20386 | } else if (Info.getLangOpts().CPlusPlus11) { |
| 20387 | Info.FFDiag(E, DiagId: diag::note_constexpr_nonliteral) << E->getType(); |
| 20388 | return false; |
| 20389 | } else { |
| 20390 | Info.FFDiag(E, DiagId: diag::note_invalid_subexpr_in_const_expr); |
| 20391 | return false; |
| 20392 | } |
| 20393 | |
| 20394 | return true; |
| 20395 | } |
| 20396 | |
| 20397 | /// EvaluateInPlace - Evaluate an expression in-place in an APValue. In some |
| 20398 | /// cases, the in-place evaluation is essential, since later initializers for |
| 20399 | /// an object can indirectly refer to subobjects which were initialized earlier. |
| 20400 | static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, const LValue &This, |
| 20401 | const Expr *E, bool AllowNonLiteralTypes) { |
| 20402 | assert(!E->isValueDependent()); |
| 20403 | |
| 20404 | // Normally expressions passed to EvaluateInPlace have a type, but not when |
| 20405 | // a VarDecl initializer is evaluated before the untyped ParenListExpr is |
| 20406 | // replaced with a CXXConstructExpr. This can happen in LLDB. |
| 20407 | if (E->getType().isNull()) |
| 20408 | return false; |
| 20409 | |
| 20410 | if (!AllowNonLiteralTypes && !CheckLiteralType(Info, E, This: &This)) |
| 20411 | return false; |
| 20412 | |
| 20413 | if (E->isPRValue()) { |
| 20414 | // Evaluate arrays and record types in-place, so that later initializers can |
| 20415 | // refer to earlier-initialized members of the object. |
| 20416 | QualType T = E->getType(); |
| 20417 | if (T->isArrayType()) |
| 20418 | return EvaluateArray(E, This, Result, Info); |
| 20419 | else if (T->isRecordType()) |
| 20420 | return EvaluateRecord(E, This, Result, Info); |
| 20421 | else if (T->isAtomicType()) { |
| 20422 | QualType Unqual = T.getAtomicUnqualifiedType(); |
| 20423 | if (Unqual->isArrayType() || Unqual->isRecordType()) |
| 20424 | return EvaluateAtomic(E, This: &This, Result, Info); |
| 20425 | } |
| 20426 | } |
| 20427 | |
| 20428 | // For any other type, in-place evaluation is unimportant. |
| 20429 | return Evaluate(Result, Info, E); |
| 20430 | } |
| 20431 | |
| 20432 | /// EvaluateAsRValue - Try to evaluate this expression, performing an implicit |
| 20433 | /// lvalue-to-rvalue cast if it is an lvalue. |
| 20434 | static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result) { |
| 20435 | assert(!E->isValueDependent()); |
| 20436 | |
| 20437 | if (E->getType().isNull()) |
| 20438 | return false; |
| 20439 | |
| 20440 | if (!CheckLiteralType(Info, E)) |
| 20441 | return false; |
| 20442 | |
| 20443 | if (Info.EnableNewConstInterp) { |
| 20444 | if (!Info.Ctx.getInterpContext().evaluateAsRValue(Parent&: Info, E, Result)) |
| 20445 | return false; |
| 20446 | return CheckConstantExpression(Info, DiagLoc: E->getExprLoc(), Type: E->getType(), Value: Result, |
| 20447 | Kind: ConstantExprKind::Normal); |
| 20448 | } |
| 20449 | |
| 20450 | if (!::Evaluate(Result, Info, E)) |
| 20451 | return false; |
| 20452 | |
| 20453 | // Implicit lvalue-to-rvalue cast. |
| 20454 | if (E->isGLValue()) { |
| 20455 | LValue LV; |
| 20456 | LV.setFrom(Ctx: Info.Ctx, V: Result); |
| 20457 | if (!handleLValueToRValueConversion(Info, Conv: E, Type: E->getType(), LVal: LV, RVal&: Result)) |
| 20458 | return false; |
| 20459 | } |
| 20460 | |
| 20461 | // Check this core constant expression is a constant expression. |
| 20462 | return CheckConstantExpression(Info, DiagLoc: E->getExprLoc(), Type: E->getType(), Value: Result, |
| 20463 | Kind: ConstantExprKind::Normal) && |
| 20464 | CheckMemoryLeaks(Info); |
| 20465 | } |
| 20466 | |
| 20467 | static bool FastEvaluateAsRValue(const Expr *Exp, APValue &Result, |
| 20468 | const ASTContext &Ctx, bool &IsConst) { |
| 20469 | // Fast-path evaluations of integer literals, since we sometimes see files |
| 20470 | // containing vast quantities of these. |
| 20471 | if (const auto *L = dyn_cast<IntegerLiteral>(Val: Exp)) { |
| 20472 | Result = |
| 20473 | APValue(APSInt(L->getValue(), L->getType()->isUnsignedIntegerType())); |
| 20474 | IsConst = true; |
| 20475 | return true; |
| 20476 | } |
| 20477 | |
| 20478 | if (const auto *L = dyn_cast<CXXBoolLiteralExpr>(Val: Exp)) { |
| 20479 | Result = APValue(APSInt(APInt(1, L->getValue()))); |
| 20480 | IsConst = true; |
| 20481 | return true; |
| 20482 | } |
| 20483 | |
| 20484 | if (const auto *FL = dyn_cast<FloatingLiteral>(Val: Exp)) { |
| 20485 | Result = APValue(FL->getValue()); |
| 20486 | IsConst = true; |
| 20487 | return true; |
| 20488 | } |
| 20489 | |
| 20490 | if (const auto *L = dyn_cast<CharacterLiteral>(Val: Exp)) { |
| 20491 | Result = APValue(Ctx.MakeIntValue(Value: L->getValue(), Type: L->getType())); |
| 20492 | IsConst = true; |
| 20493 | return true; |
| 20494 | } |
| 20495 | |
| 20496 | if (const auto *CE = dyn_cast<ConstantExpr>(Val: Exp)) { |
| 20497 | if (CE->hasAPValueResult()) { |
| 20498 | APValue APV = CE->getAPValueResult(); |
| 20499 | if (!APV.isLValue()) { |
| 20500 | Result = std::move(APV); |
| 20501 | IsConst = true; |
| 20502 | return true; |
| 20503 | } |
| 20504 | } |
| 20505 | |
| 20506 | // The SubExpr is usually just an IntegerLiteral. |
| 20507 | return FastEvaluateAsRValue(Exp: CE->getSubExpr(), Result, Ctx, IsConst); |
| 20508 | } |
| 20509 | |
| 20510 | // This case should be rare, but we need to check it before we check on |
| 20511 | // the type below. |
| 20512 | if (Exp->getType().isNull()) { |
| 20513 | IsConst = false; |
| 20514 | return true; |
| 20515 | } |
| 20516 | |
| 20517 | return false; |
| 20518 | } |
| 20519 | |
| 20520 | static bool hasUnacceptableSideEffect(Expr::EvalStatus &Result, |
| 20521 | Expr::SideEffectsKind SEK) { |
| 20522 | return (SEK < Expr::SE_AllowSideEffects && Result.HasSideEffects) || |
| 20523 | (SEK < Expr::SE_AllowUndefinedBehavior && Result.HasUndefinedBehavior); |
| 20524 | } |
| 20525 | |
| 20526 | static bool EvaluateAsRValue(const Expr *E, Expr::EvalResult &Result, |
| 20527 | const ASTContext &Ctx, EvalInfo &Info) { |
| 20528 | assert(!E->isValueDependent()); |
| 20529 | bool IsConst; |
| 20530 | if (FastEvaluateAsRValue(Exp: E, Result&: Result.Val, Ctx, IsConst)) |
| 20531 | return IsConst; |
| 20532 | |
| 20533 | return EvaluateAsRValue(Info, E, Result&: Result.Val); |
| 20534 | } |
| 20535 | |
| 20536 | static bool EvaluateAsInt(const Expr *E, Expr::EvalResult &ExprResult, |
| 20537 | const ASTContext &Ctx, |
| 20538 | Expr::SideEffectsKind AllowSideEffects, |
| 20539 | EvalInfo &Info) { |
| 20540 | assert(!E->isValueDependent()); |
| 20541 | if (!E->getType()->isIntegralOrEnumerationType()) |
| 20542 | return false; |
| 20543 | |
| 20544 | if (!::EvaluateAsRValue(E, Result&: ExprResult, Ctx, Info) || |
| 20545 | !ExprResult.Val.isInt() || |
| 20546 | hasUnacceptableSideEffect(Result&: ExprResult, SEK: AllowSideEffects)) |
| 20547 | return false; |
| 20548 | |
| 20549 | return true; |
| 20550 | } |
| 20551 | |
| 20552 | static bool EvaluateAsFixedPoint(const Expr *E, Expr::EvalResult &ExprResult, |
| 20553 | const ASTContext &Ctx, |
| 20554 | Expr::SideEffectsKind AllowSideEffects, |
| 20555 | EvalInfo &Info) { |
| 20556 | assert(!E->isValueDependent()); |
| 20557 | if (!E->getType()->isFixedPointType()) |
| 20558 | return false; |
| 20559 | |
| 20560 | if (!::EvaluateAsRValue(E, Result&: ExprResult, Ctx, Info)) |
| 20561 | return false; |
| 20562 | |
| 20563 | if (!ExprResult.Val.isFixedPoint() || |
| 20564 | hasUnacceptableSideEffect(Result&: ExprResult, SEK: AllowSideEffects)) |
| 20565 | return false; |
| 20566 | |
| 20567 | return true; |
| 20568 | } |
| 20569 | |
| 20570 | /// EvaluateAsRValue - Return true if this is a constant which we can fold using |
| 20571 | /// any crazy technique (that has nothing to do with language standards) that |
| 20572 | /// we want to. If this function returns true, it returns the folded constant |
| 20573 | /// in Result. If this expression is a glvalue, an lvalue-to-rvalue conversion |
| 20574 | /// will be applied to the result. |
| 20575 | bool Expr::EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, |
| 20576 | bool InConstantContext) const { |
| 20577 | assert(!isValueDependent() && |
| 20578 | "Expression evaluator can't be called on a dependent expression." ); |
| 20579 | ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsRValue" ); |
| 20580 | EvalInfo Info(Ctx, Result, EvaluationMode::IgnoreSideEffects); |
| 20581 | Info.InConstantContext = InConstantContext; |
| 20582 | return ::EvaluateAsRValue(E: this, Result, Ctx, Info); |
| 20583 | } |
| 20584 | |
| 20585 | bool Expr::EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx, |
| 20586 | bool InConstantContext) const { |
| 20587 | assert(!isValueDependent() && |
| 20588 | "Expression evaluator can't be called on a dependent expression." ); |
| 20589 | ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsBooleanCondition" ); |
| 20590 | EvalResult Scratch; |
| 20591 | return EvaluateAsRValue(Result&: Scratch, Ctx, InConstantContext) && |
| 20592 | HandleConversionToBool(Val: Scratch.Val, Result); |
| 20593 | } |
| 20594 | |
| 20595 | bool Expr::EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, |
| 20596 | SideEffectsKind AllowSideEffects, |
| 20597 | bool InConstantContext) const { |
| 20598 | assert(!isValueDependent() && |
| 20599 | "Expression evaluator can't be called on a dependent expression." ); |
| 20600 | ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsInt" ); |
| 20601 | EvalInfo Info(Ctx, Result, EvaluationMode::IgnoreSideEffects); |
| 20602 | Info.InConstantContext = InConstantContext; |
| 20603 | return ::EvaluateAsInt(E: this, ExprResult&: Result, Ctx, AllowSideEffects, Info); |
| 20604 | } |
| 20605 | |
| 20606 | bool Expr::EvaluateAsFixedPoint(EvalResult &Result, const ASTContext &Ctx, |
| 20607 | SideEffectsKind AllowSideEffects, |
| 20608 | bool InConstantContext) const { |
| 20609 | assert(!isValueDependent() && |
| 20610 | "Expression evaluator can't be called on a dependent expression." ); |
| 20611 | ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsFixedPoint" ); |
| 20612 | EvalInfo Info(Ctx, Result, EvaluationMode::IgnoreSideEffects); |
| 20613 | Info.InConstantContext = InConstantContext; |
| 20614 | return ::EvaluateAsFixedPoint(E: this, ExprResult&: Result, Ctx, AllowSideEffects, Info); |
| 20615 | } |
| 20616 | |
| 20617 | bool Expr::EvaluateAsFloat(APFloat &Result, const ASTContext &Ctx, |
| 20618 | SideEffectsKind AllowSideEffects, |
| 20619 | bool InConstantContext) const { |
| 20620 | assert(!isValueDependent() && |
| 20621 | "Expression evaluator can't be called on a dependent expression." ); |
| 20622 | |
| 20623 | if (!getType()->isRealFloatingType()) |
| 20624 | return false; |
| 20625 | |
| 20626 | ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsFloat" ); |
| 20627 | EvalResult ExprResult; |
| 20628 | if (!EvaluateAsRValue(Result&: ExprResult, Ctx, InConstantContext) || |
| 20629 | !ExprResult.Val.isFloat() || |
| 20630 | hasUnacceptableSideEffect(Result&: ExprResult, SEK: AllowSideEffects)) |
| 20631 | return false; |
| 20632 | |
| 20633 | Result = ExprResult.Val.getFloat(); |
| 20634 | return true; |
| 20635 | } |
| 20636 | |
| 20637 | bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx, |
| 20638 | bool InConstantContext) const { |
| 20639 | assert(!isValueDependent() && |
| 20640 | "Expression evaluator can't be called on a dependent expression." ); |
| 20641 | |
| 20642 | ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsLValue" ); |
| 20643 | EvalInfo Info(Ctx, Result, EvaluationMode::ConstantFold); |
| 20644 | Info.InConstantContext = InConstantContext; |
| 20645 | LValue LV; |
| 20646 | CheckedTemporaries CheckedTemps; |
| 20647 | |
| 20648 | if (Info.EnableNewConstInterp) { |
| 20649 | if (!Info.Ctx.getInterpContext().evaluate(Parent&: Info, E: this, Result&: Result.Val, |
| 20650 | Kind: ConstantExprKind::Normal)) |
| 20651 | return false; |
| 20652 | |
| 20653 | LV.setFrom(Ctx, V: Result.Val); |
| 20654 | return CheckLValueConstantExpression( |
| 20655 | Info, Loc: getExprLoc(), Type: Ctx.getLValueReferenceType(T: getType()), LVal: LV, |
| 20656 | Kind: ConstantExprKind::Normal, CheckedTemps); |
| 20657 | } |
| 20658 | |
| 20659 | if (!EvaluateLValue(E: this, Result&: LV, Info) || !Info.discardCleanups() || |
| 20660 | Result.HasSideEffects || |
| 20661 | !CheckLValueConstantExpression(Info, Loc: getExprLoc(), |
| 20662 | Type: Ctx.getLValueReferenceType(T: getType()), LVal: LV, |
| 20663 | Kind: ConstantExprKind::Normal, CheckedTemps)) |
| 20664 | return false; |
| 20665 | |
| 20666 | LV.moveInto(V&: Result.Val); |
| 20667 | return true; |
| 20668 | } |
| 20669 | |
| 20670 | static bool EvaluateDestruction(const ASTContext &Ctx, APValue::LValueBase Base, |
| 20671 | APValue DestroyedValue, QualType Type, |
| 20672 | SourceLocation Loc, Expr::EvalStatus &EStatus, |
| 20673 | bool IsConstantDestruction) { |
| 20674 | EvalInfo Info(Ctx, EStatus, |
| 20675 | IsConstantDestruction ? EvaluationMode::ConstantExpression |
| 20676 | : EvaluationMode::ConstantFold); |
| 20677 | Info.setEvaluatingDecl(Base, Value&: DestroyedValue, |
| 20678 | EDK: EvalInfo::EvaluatingDeclKind::Dtor); |
| 20679 | Info.InConstantContext = IsConstantDestruction; |
| 20680 | |
| 20681 | LValue LVal; |
| 20682 | LVal.set(B: Base); |
| 20683 | |
| 20684 | if (!HandleDestruction(Info, Loc, LVBase: Base, Value&: DestroyedValue, T: Type) || |
| 20685 | EStatus.HasSideEffects) |
| 20686 | return false; |
| 20687 | |
| 20688 | if (!Info.discardCleanups()) |
| 20689 | llvm_unreachable("Unhandled cleanup; missing full expression marker?" ); |
| 20690 | |
| 20691 | return true; |
| 20692 | } |
| 20693 | |
| 20694 | bool Expr::EvaluateAsConstantExpr(EvalResult &Result, const ASTContext &Ctx, |
| 20695 | ConstantExprKind Kind) const { |
| 20696 | assert(!isValueDependent() && |
| 20697 | "Expression evaluator can't be called on a dependent expression." ); |
| 20698 | bool IsConst; |
| 20699 | if (FastEvaluateAsRValue(Exp: this, Result&: Result.Val, Ctx, IsConst) && |
| 20700 | Result.Val.hasValue()) |
| 20701 | return true; |
| 20702 | |
| 20703 | ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsConstantExpr" ); |
| 20704 | EvaluationMode EM = EvaluationMode::ConstantExpression; |
| 20705 | EvalInfo Info(Ctx, Result, EM); |
| 20706 | Info.InConstantContext = true; |
| 20707 | |
| 20708 | if (Info.EnableNewConstInterp) { |
| 20709 | if (!Info.Ctx.getInterpContext().evaluate(Parent&: Info, E: this, Result&: Result.Val, Kind)) |
| 20710 | return false; |
| 20711 | return CheckConstantExpression(Info, DiagLoc: getExprLoc(), |
| 20712 | Type: getStorageType(Ctx, E: this), Value: Result.Val, Kind); |
| 20713 | } |
| 20714 | |
| 20715 | // The type of the object we're initializing is 'const T' for a class NTTP. |
| 20716 | QualType T = getType(); |
| 20717 | if (Kind == ConstantExprKind::ClassTemplateArgument) |
| 20718 | T.addConst(); |
| 20719 | |
| 20720 | // If we're evaluating a prvalue, fake up a MaterializeTemporaryExpr to |
| 20721 | // represent the result of the evaluation. CheckConstantExpression ensures |
| 20722 | // this doesn't escape. |
| 20723 | MaterializeTemporaryExpr BaseMTE(T, const_cast<Expr*>(this), true); |
| 20724 | APValue::LValueBase Base(&BaseMTE); |
| 20725 | Info.setEvaluatingDecl(Base, Value&: Result.Val); |
| 20726 | |
| 20727 | LValue LVal; |
| 20728 | LVal.set(B: Base); |
| 20729 | // C++23 [intro.execution]/p5 |
| 20730 | // A full-expression is [...] a constant-expression |
| 20731 | // So we need to make sure temporary objects are destroyed after having |
| 20732 | // evaluating the expression (per C++23 [class.temporary]/p4). |
| 20733 | FullExpressionRAII Scope(Info); |
| 20734 | if (!::EvaluateInPlace(Result&: Result.Val, Info, This: LVal, E: this) || |
| 20735 | Result.HasSideEffects || !Scope.destroy()) |
| 20736 | return false; |
| 20737 | |
| 20738 | if (!Info.discardCleanups()) |
| 20739 | llvm_unreachable("Unhandled cleanup; missing full expression marker?" ); |
| 20740 | |
| 20741 | if (!CheckConstantExpression(Info, DiagLoc: getExprLoc(), Type: getStorageType(Ctx, E: this), |
| 20742 | Value: Result.Val, Kind)) |
| 20743 | return false; |
| 20744 | if (!CheckMemoryLeaks(Info)) |
| 20745 | return false; |
| 20746 | |
| 20747 | // If this is a class template argument, it's required to have constant |
| 20748 | // destruction too. |
| 20749 | if (Kind == ConstantExprKind::ClassTemplateArgument && |
| 20750 | (!EvaluateDestruction(Ctx, Base, DestroyedValue: Result.Val, Type: T, Loc: getBeginLoc(), EStatus&: Result, |
| 20751 | IsConstantDestruction: true) || |
| 20752 | Result.HasSideEffects)) { |
| 20753 | // FIXME: Prefix a note to indicate that the problem is lack of constant |
| 20754 | // destruction. |
| 20755 | return false; |
| 20756 | } |
| 20757 | |
| 20758 | return true; |
| 20759 | } |
| 20760 | |
| 20761 | bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx, |
| 20762 | const VarDecl *VD, |
| 20763 | SmallVectorImpl<PartialDiagnosticAt> &Notes, |
| 20764 | bool IsConstantInitialization) const { |
| 20765 | assert(!isValueDependent() && |
| 20766 | "Expression evaluator can't be called on a dependent expression." ); |
| 20767 | assert(VD && "Need a valid VarDecl" ); |
| 20768 | |
| 20769 | llvm::TimeTraceScope TimeScope("EvaluateAsInitializer" , [&] { |
| 20770 | std::string Name; |
| 20771 | llvm::raw_string_ostream OS(Name); |
| 20772 | VD->printQualifiedName(OS); |
| 20773 | return Name; |
| 20774 | }); |
| 20775 | |
| 20776 | Expr::EvalStatus EStatus; |
| 20777 | EStatus.Diag = &Notes; |
| 20778 | |
| 20779 | EvalInfo Info(Ctx, EStatus, |
| 20780 | (IsConstantInitialization && |
| 20781 | (Ctx.getLangOpts().CPlusPlus || Ctx.getLangOpts().C23)) |
| 20782 | ? EvaluationMode::ConstantExpression |
| 20783 | : EvaluationMode::ConstantFold); |
| 20784 | Info.setEvaluatingDecl(Base: VD, Value); |
| 20785 | Info.InConstantContext = IsConstantInitialization; |
| 20786 | |
| 20787 | SourceLocation DeclLoc = VD->getLocation(); |
| 20788 | QualType DeclTy = VD->getType(); |
| 20789 | |
| 20790 | if (Info.EnableNewConstInterp) { |
| 20791 | auto &InterpCtx = const_cast<ASTContext &>(Ctx).getInterpContext(); |
| 20792 | if (!InterpCtx.evaluateAsInitializer(Parent&: Info, VD, Init: this, Result&: Value)) |
| 20793 | return false; |
| 20794 | |
| 20795 | return CheckConstantExpression(Info, DiagLoc: DeclLoc, Type: DeclTy, Value, |
| 20796 | Kind: ConstantExprKind::Normal); |
| 20797 | } else { |
| 20798 | LValue LVal; |
| 20799 | LVal.set(B: VD); |
| 20800 | |
| 20801 | { |
| 20802 | // C++23 [intro.execution]/p5 |
| 20803 | // A full-expression is ... an init-declarator ([dcl.decl]) or a |
| 20804 | // mem-initializer. |
| 20805 | // So we need to make sure temporary objects are destroyed after having |
| 20806 | // evaluated the expression (per C++23 [class.temporary]/p4). |
| 20807 | // |
| 20808 | // FIXME: Otherwise this may break test/Modules/pr68702.cpp because the |
| 20809 | // serialization code calls ParmVarDecl::getDefaultArg() which strips the |
| 20810 | // outermost FullExpr, such as ExprWithCleanups. |
| 20811 | FullExpressionRAII Scope(Info); |
| 20812 | if (!EvaluateInPlace(Result&: Value, Info, This: LVal, E: this, |
| 20813 | /*AllowNonLiteralTypes=*/true) || |
| 20814 | EStatus.HasSideEffects) |
| 20815 | return false; |
| 20816 | } |
| 20817 | |
| 20818 | // At this point, any lifetime-extended temporaries are completely |
| 20819 | // initialized. |
| 20820 | Info.performLifetimeExtension(); |
| 20821 | |
| 20822 | if (!Info.discardCleanups()) |
| 20823 | llvm_unreachable("Unhandled cleanup; missing full expression marker?" ); |
| 20824 | } |
| 20825 | |
| 20826 | return CheckConstantExpression(Info, DiagLoc: DeclLoc, Type: DeclTy, Value, |
| 20827 | Kind: ConstantExprKind::Normal) && |
| 20828 | CheckMemoryLeaks(Info); |
| 20829 | } |
| 20830 | |
| 20831 | bool VarDecl::evaluateDestruction( |
| 20832 | SmallVectorImpl<PartialDiagnosticAt> &Notes) const { |
| 20833 | Expr::EvalStatus EStatus; |
| 20834 | EStatus.Diag = &Notes; |
| 20835 | |
| 20836 | // Only treat the destruction as constant destruction if we formally have |
| 20837 | // constant initialization (or are usable in a constant expression). |
| 20838 | bool IsConstantDestruction = hasConstantInitialization(); |
| 20839 | |
| 20840 | // Make a copy of the value for the destructor to mutate, if we know it. |
| 20841 | // Otherwise, treat the value as default-initialized; if the destructor works |
| 20842 | // anyway, then the destruction is constant (and must be essentially empty). |
| 20843 | APValue DestroyedValue; |
| 20844 | if (getEvaluatedValue() && !getEvaluatedValue()->isAbsent()) |
| 20845 | DestroyedValue = *getEvaluatedValue(); |
| 20846 | else if (!handleDefaultInitValue(T: getType(), Result&: DestroyedValue)) |
| 20847 | return false; |
| 20848 | |
| 20849 | if (!EvaluateDestruction(Ctx: getASTContext(), Base: this, DestroyedValue: std::move(DestroyedValue), |
| 20850 | Type: getType(), Loc: getLocation(), EStatus, |
| 20851 | IsConstantDestruction) || |
| 20852 | EStatus.HasSideEffects) |
| 20853 | return false; |
| 20854 | |
| 20855 | ensureEvaluatedStmt()->HasConstantDestruction = true; |
| 20856 | return true; |
| 20857 | } |
| 20858 | |
| 20859 | /// isEvaluatable - Call EvaluateAsRValue to see if this expression can be |
| 20860 | /// constant folded, but discard the result. |
| 20861 | bool Expr::isEvaluatable(const ASTContext &Ctx, SideEffectsKind SEK) const { |
| 20862 | assert(!isValueDependent() && |
| 20863 | "Expression evaluator can't be called on a dependent expression." ); |
| 20864 | |
| 20865 | EvalResult Result; |
| 20866 | return EvaluateAsRValue(Result, Ctx, /* in constant context */ InConstantContext: true) && |
| 20867 | !hasUnacceptableSideEffect(Result, SEK); |
| 20868 | } |
| 20869 | |
| 20870 | APSInt Expr::EvaluateKnownConstInt(const ASTContext &Ctx) const { |
| 20871 | assert(!isValueDependent() && |
| 20872 | "Expression evaluator can't be called on a dependent expression." ); |
| 20873 | |
| 20874 | ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateKnownConstInt" ); |
| 20875 | EvalResult EVResult; |
| 20876 | EvalInfo Info(Ctx, EVResult, EvaluationMode::IgnoreSideEffects); |
| 20877 | Info.InConstantContext = true; |
| 20878 | |
| 20879 | bool Result = ::EvaluateAsRValue(E: this, Result&: EVResult, Ctx, Info); |
| 20880 | (void)Result; |
| 20881 | assert(Result && "Could not evaluate expression" ); |
| 20882 | assert(EVResult.Val.isInt() && "Expression did not evaluate to integer" ); |
| 20883 | |
| 20884 | return EVResult.Val.getInt(); |
| 20885 | } |
| 20886 | |
| 20887 | APSInt Expr::EvaluateKnownConstIntCheckOverflow( |
| 20888 | const ASTContext &Ctx, SmallVectorImpl<PartialDiagnosticAt> *Diag) const { |
| 20889 | assert(!isValueDependent() && |
| 20890 | "Expression evaluator can't be called on a dependent expression." ); |
| 20891 | |
| 20892 | ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateKnownConstIntCheckOverflow" ); |
| 20893 | EvalResult EVResult; |
| 20894 | EVResult.Diag = Diag; |
| 20895 | EvalInfo Info(Ctx, EVResult, EvaluationMode::IgnoreSideEffects); |
| 20896 | Info.InConstantContext = true; |
| 20897 | Info.CheckingForUndefinedBehavior = true; |
| 20898 | |
| 20899 | bool Result = ::EvaluateAsRValue(Info, E: this, Result&: EVResult.Val); |
| 20900 | (void)Result; |
| 20901 | assert(Result && "Could not evaluate expression" ); |
| 20902 | assert(EVResult.Val.isInt() && "Expression did not evaluate to integer" ); |
| 20903 | |
| 20904 | return EVResult.Val.getInt(); |
| 20905 | } |
| 20906 | |
| 20907 | void Expr::EvaluateForOverflow(const ASTContext &Ctx) const { |
| 20908 | assert(!isValueDependent() && |
| 20909 | "Expression evaluator can't be called on a dependent expression." ); |
| 20910 | |
| 20911 | ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateForOverflow" ); |
| 20912 | bool IsConst; |
| 20913 | EvalResult EVResult; |
| 20914 | if (!FastEvaluateAsRValue(Exp: this, Result&: EVResult.Val, Ctx, IsConst)) { |
| 20915 | EvalInfo Info(Ctx, EVResult, EvaluationMode::IgnoreSideEffects); |
| 20916 | Info.CheckingForUndefinedBehavior = true; |
| 20917 | (void)::EvaluateAsRValue(Info, E: this, Result&: EVResult.Val); |
| 20918 | } |
| 20919 | } |
| 20920 | |
| 20921 | bool Expr::EvalResult::isGlobalLValue() const { |
| 20922 | assert(Val.isLValue()); |
| 20923 | return IsGlobalLValue(B: Val.getLValueBase()); |
| 20924 | } |
| 20925 | |
| 20926 | /// isIntegerConstantExpr - this recursive routine will test if an expression is |
| 20927 | /// an integer constant expression. |
| 20928 | |
| 20929 | /// FIXME: Pass up a reason why! Invalid operation in i-c-e, division by zero, |
| 20930 | /// comma, etc |
| 20931 | |
| 20932 | // CheckICE - This function does the fundamental ICE checking: the returned |
| 20933 | // ICEDiag contains an ICEKind indicating whether the expression is an ICE. |
| 20934 | // |
| 20935 | // Note that to reduce code duplication, this helper does no evaluation |
| 20936 | // itself; the caller checks whether the expression is evaluatable, and |
| 20937 | // in the rare cases where CheckICE actually cares about the evaluated |
| 20938 | // value, it calls into Evaluate. |
| 20939 | |
| 20940 | namespace { |
| 20941 | |
| 20942 | enum ICEKind { |
| 20943 | /// This expression is an ICE. |
| 20944 | IK_ICE, |
| 20945 | /// This expression is not an ICE, but if it isn't evaluated, it's |
| 20946 | /// a legal subexpression for an ICE. This return value is used to handle |
| 20947 | /// the comma operator in C99 mode, and non-constant subexpressions. |
| 20948 | IK_ICEIfUnevaluated, |
| 20949 | /// This expression is not an ICE, and is not a legal subexpression for one. |
| 20950 | IK_NotICE |
| 20951 | }; |
| 20952 | |
| 20953 | struct ICEDiag { |
| 20954 | ICEKind Kind; |
| 20955 | SourceLocation Loc; |
| 20956 | |
| 20957 | ICEDiag(ICEKind IK, SourceLocation l) : Kind(IK), Loc(l) {} |
| 20958 | }; |
| 20959 | |
| 20960 | } |
| 20961 | |
| 20962 | static ICEDiag NoDiag() { return ICEDiag(IK_ICE, SourceLocation()); } |
| 20963 | |
| 20964 | static ICEDiag Worst(ICEDiag A, ICEDiag B) { return A.Kind >= B.Kind ? A : B; } |
| 20965 | |
| 20966 | static ICEDiag CheckEvalInICE(const Expr* E, const ASTContext &Ctx) { |
| 20967 | Expr::EvalResult EVResult; |
| 20968 | Expr::EvalStatus Status; |
| 20969 | EvalInfo Info(Ctx, Status, EvaluationMode::ConstantExpression); |
| 20970 | |
| 20971 | Info.InConstantContext = true; |
| 20972 | if (!::EvaluateAsRValue(E, Result&: EVResult, Ctx, Info) || EVResult.HasSideEffects || |
| 20973 | !EVResult.Val.isInt()) |
| 20974 | return ICEDiag(IK_NotICE, E->getBeginLoc()); |
| 20975 | |
| 20976 | return NoDiag(); |
| 20977 | } |
| 20978 | |
| 20979 | static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) { |
| 20980 | assert(!E->isValueDependent() && "Should not see value dependent exprs!" ); |
| 20981 | if (!E->getType()->isIntegralOrEnumerationType()) |
| 20982 | return ICEDiag(IK_NotICE, E->getBeginLoc()); |
| 20983 | |
| 20984 | switch (E->getStmtClass()) { |
| 20985 | #define ABSTRACT_STMT(Node) |
| 20986 | #define STMT(Node, Base) case Expr::Node##Class: |
| 20987 | #define EXPR(Node, Base) |
| 20988 | #include "clang/AST/StmtNodes.inc" |
| 20989 | case Expr::PredefinedExprClass: |
| 20990 | case Expr::FloatingLiteralClass: |
| 20991 | case Expr::ImaginaryLiteralClass: |
| 20992 | case Expr::StringLiteralClass: |
| 20993 | case Expr::ArraySubscriptExprClass: |
| 20994 | case Expr::MatrixSingleSubscriptExprClass: |
| 20995 | case Expr::MatrixSubscriptExprClass: |
| 20996 | case Expr::ArraySectionExprClass: |
| 20997 | case Expr::OMPArrayShapingExprClass: |
| 20998 | case Expr::OMPIteratorExprClass: |
| 20999 | case Expr::MemberExprClass: |
| 21000 | case Expr::CompoundAssignOperatorClass: |
| 21001 | case Expr::CompoundLiteralExprClass: |
| 21002 | case Expr::ExtVectorElementExprClass: |
| 21003 | case Expr::DesignatedInitExprClass: |
| 21004 | case Expr::ArrayInitLoopExprClass: |
| 21005 | case Expr::ArrayInitIndexExprClass: |
| 21006 | case Expr::NoInitExprClass: |
| 21007 | case Expr::DesignatedInitUpdateExprClass: |
| 21008 | case Expr::ImplicitValueInitExprClass: |
| 21009 | case Expr::ParenListExprClass: |
| 21010 | case Expr::VAArgExprClass: |
| 21011 | case Expr::AddrLabelExprClass: |
| 21012 | case Expr::StmtExprClass: |
| 21013 | case Expr::CXXMemberCallExprClass: |
| 21014 | case Expr::CUDAKernelCallExprClass: |
| 21015 | case Expr::CXXAddrspaceCastExprClass: |
| 21016 | case Expr::CXXDynamicCastExprClass: |
| 21017 | case Expr::CXXTypeidExprClass: |
| 21018 | case Expr::CXXUuidofExprClass: |
| 21019 | case Expr::MSPropertyRefExprClass: |
| 21020 | case Expr::MSPropertySubscriptExprClass: |
| 21021 | case Expr::CXXNullPtrLiteralExprClass: |
| 21022 | case Expr::UserDefinedLiteralClass: |
| 21023 | case Expr::CXXThisExprClass: |
| 21024 | case Expr::CXXThrowExprClass: |
| 21025 | case Expr::CXXNewExprClass: |
| 21026 | case Expr::CXXDeleteExprClass: |
| 21027 | case Expr::CXXPseudoDestructorExprClass: |
| 21028 | case Expr::UnresolvedLookupExprClass: |
| 21029 | case Expr::RecoveryExprClass: |
| 21030 | case Expr::DependentScopeDeclRefExprClass: |
| 21031 | case Expr::CXXConstructExprClass: |
| 21032 | case Expr::CXXInheritedCtorInitExprClass: |
| 21033 | case Expr::CXXStdInitializerListExprClass: |
| 21034 | case Expr::CXXBindTemporaryExprClass: |
| 21035 | case Expr::ExprWithCleanupsClass: |
| 21036 | case Expr::CXXTemporaryObjectExprClass: |
| 21037 | case Expr::CXXUnresolvedConstructExprClass: |
| 21038 | case Expr::CXXDependentScopeMemberExprClass: |
| 21039 | case Expr::UnresolvedMemberExprClass: |
| 21040 | case Expr::ObjCStringLiteralClass: |
| 21041 | case Expr::ObjCBoxedExprClass: |
| 21042 | case Expr::ObjCArrayLiteralClass: |
| 21043 | case Expr::ObjCDictionaryLiteralClass: |
| 21044 | case Expr::ObjCEncodeExprClass: |
| 21045 | case Expr::ObjCMessageExprClass: |
| 21046 | case Expr::ObjCSelectorExprClass: |
| 21047 | case Expr::ObjCProtocolExprClass: |
| 21048 | case Expr::ObjCIvarRefExprClass: |
| 21049 | case Expr::ObjCPropertyRefExprClass: |
| 21050 | case Expr::ObjCSubscriptRefExprClass: |
| 21051 | case Expr::ObjCIsaExprClass: |
| 21052 | case Expr::ObjCAvailabilityCheckExprClass: |
| 21053 | case Expr::ShuffleVectorExprClass: |
| 21054 | case Expr::ConvertVectorExprClass: |
| 21055 | case Expr::BlockExprClass: |
| 21056 | case Expr::NoStmtClass: |
| 21057 | case Expr::OpaqueValueExprClass: |
| 21058 | case Expr::PackExpansionExprClass: |
| 21059 | case Expr::SubstNonTypeTemplateParmPackExprClass: |
| 21060 | case Expr::FunctionParmPackExprClass: |
| 21061 | case Expr::AsTypeExprClass: |
| 21062 | case Expr::ObjCIndirectCopyRestoreExprClass: |
| 21063 | case Expr::MaterializeTemporaryExprClass: |
| 21064 | case Expr::PseudoObjectExprClass: |
| 21065 | case Expr::AtomicExprClass: |
| 21066 | case Expr::LambdaExprClass: |
| 21067 | case Expr::CXXFoldExprClass: |
| 21068 | case Expr::CoawaitExprClass: |
| 21069 | case Expr::DependentCoawaitExprClass: |
| 21070 | case Expr::CoyieldExprClass: |
| 21071 | case Expr::SYCLUniqueStableNameExprClass: |
| 21072 | case Expr::CXXParenListInitExprClass: |
| 21073 | case Expr::HLSLOutArgExprClass: |
| 21074 | return ICEDiag(IK_NotICE, E->getBeginLoc()); |
| 21075 | |
| 21076 | case Expr::InitListExprClass: { |
| 21077 | // C++03 [dcl.init]p13: If T is a scalar type, then a declaration of the |
| 21078 | // form "T x = { a };" is equivalent to "T x = a;". |
| 21079 | // Unless we're initializing a reference, T is a scalar as it is known to be |
| 21080 | // of integral or enumeration type. |
| 21081 | if (E->isPRValue()) |
| 21082 | if (cast<InitListExpr>(Val: E)->getNumInits() == 1) |
| 21083 | return CheckICE(E: cast<InitListExpr>(Val: E)->getInit(Init: 0), Ctx); |
| 21084 | return ICEDiag(IK_NotICE, E->getBeginLoc()); |
| 21085 | } |
| 21086 | |
| 21087 | case Expr::SizeOfPackExprClass: |
| 21088 | case Expr::GNUNullExprClass: |
| 21089 | case Expr::SourceLocExprClass: |
| 21090 | case Expr::EmbedExprClass: |
| 21091 | case Expr::OpenACCAsteriskSizeExprClass: |
| 21092 | return NoDiag(); |
| 21093 | |
| 21094 | case Expr::PackIndexingExprClass: |
| 21095 | return CheckICE(E: cast<PackIndexingExpr>(Val: E)->getSelectedExpr(), Ctx); |
| 21096 | |
| 21097 | case Expr::SubstNonTypeTemplateParmExprClass: |
| 21098 | return |
| 21099 | CheckICE(E: cast<SubstNonTypeTemplateParmExpr>(Val: E)->getReplacement(), Ctx); |
| 21100 | |
| 21101 | case Expr::ConstantExprClass: |
| 21102 | return CheckICE(E: cast<ConstantExpr>(Val: E)->getSubExpr(), Ctx); |
| 21103 | |
| 21104 | case Expr::ParenExprClass: |
| 21105 | return CheckICE(E: cast<ParenExpr>(Val: E)->getSubExpr(), Ctx); |
| 21106 | case Expr::GenericSelectionExprClass: |
| 21107 | return CheckICE(E: cast<GenericSelectionExpr>(Val: E)->getResultExpr(), Ctx); |
| 21108 | case Expr::IntegerLiteralClass: |
| 21109 | case Expr::FixedPointLiteralClass: |
| 21110 | case Expr::CharacterLiteralClass: |
| 21111 | case Expr::ObjCBoolLiteralExprClass: |
| 21112 | case Expr::CXXBoolLiteralExprClass: |
| 21113 | case Expr::CXXScalarValueInitExprClass: |
| 21114 | case Expr::TypeTraitExprClass: |
| 21115 | case Expr::ConceptSpecializationExprClass: |
| 21116 | case Expr::RequiresExprClass: |
| 21117 | case Expr::ArrayTypeTraitExprClass: |
| 21118 | case Expr::ExpressionTraitExprClass: |
| 21119 | case Expr::CXXNoexceptExprClass: |
| 21120 | return NoDiag(); |
| 21121 | case Expr::CallExprClass: |
| 21122 | case Expr::CXXOperatorCallExprClass: { |
| 21123 | // C99 6.6/3 allows function calls within unevaluated subexpressions of |
| 21124 | // constant expressions, but they can never be ICEs because an ICE cannot |
| 21125 | // contain an operand of (pointer to) function type. |
| 21126 | const CallExpr *CE = cast<CallExpr>(Val: E); |
| 21127 | if (CE->getBuiltinCallee()) |
| 21128 | return CheckEvalInICE(E, Ctx); |
| 21129 | return ICEDiag(IK_NotICE, E->getBeginLoc()); |
| 21130 | } |
| 21131 | case Expr::CXXRewrittenBinaryOperatorClass: |
| 21132 | return CheckICE(E: cast<CXXRewrittenBinaryOperator>(Val: E)->getSemanticForm(), |
| 21133 | Ctx); |
| 21134 | case Expr::DeclRefExprClass: { |
| 21135 | const NamedDecl *D = cast<DeclRefExpr>(Val: E)->getDecl(); |
| 21136 | if (isa<EnumConstantDecl>(Val: D)) |
| 21137 | return NoDiag(); |
| 21138 | |
| 21139 | // C++ and OpenCL (FIXME: spec reference?) allow reading const-qualified |
| 21140 | // integer variables in constant expressions: |
| 21141 | // |
| 21142 | // C++ 7.1.5.1p2 |
| 21143 | // A variable of non-volatile const-qualified integral or enumeration |
| 21144 | // type initialized by an ICE can be used in ICEs. |
| 21145 | // |
| 21146 | // We sometimes use CheckICE to check the C++98 rules in C++11 mode. In |
| 21147 | // that mode, use of reference variables should not be allowed. |
| 21148 | const VarDecl *VD = dyn_cast<VarDecl>(Val: D); |
| 21149 | if (VD && VD->isUsableInConstantExpressions(C: Ctx) && |
| 21150 | !VD->getType()->isReferenceType()) |
| 21151 | return NoDiag(); |
| 21152 | |
| 21153 | return ICEDiag(IK_NotICE, E->getBeginLoc()); |
| 21154 | } |
| 21155 | case Expr::UnaryOperatorClass: { |
| 21156 | const UnaryOperator *Exp = cast<UnaryOperator>(Val: E); |
| 21157 | switch (Exp->getOpcode()) { |
| 21158 | case UO_PostInc: |
| 21159 | case UO_PostDec: |
| 21160 | case UO_PreInc: |
| 21161 | case UO_PreDec: |
| 21162 | case UO_AddrOf: |
| 21163 | case UO_Deref: |
| 21164 | case UO_Coawait: |
| 21165 | // C99 6.6/3 allows increment and decrement within unevaluated |
| 21166 | // subexpressions of constant expressions, but they can never be ICEs |
| 21167 | // because an ICE cannot contain an lvalue operand. |
| 21168 | return ICEDiag(IK_NotICE, E->getBeginLoc()); |
| 21169 | case UO_Extension: |
| 21170 | case UO_LNot: |
| 21171 | case UO_Plus: |
| 21172 | case UO_Minus: |
| 21173 | case UO_Not: |
| 21174 | case UO_Real: |
| 21175 | case UO_Imag: |
| 21176 | return CheckICE(E: Exp->getSubExpr(), Ctx); |
| 21177 | } |
| 21178 | llvm_unreachable("invalid unary operator class" ); |
| 21179 | } |
| 21180 | case Expr::OffsetOfExprClass: { |
| 21181 | // Note that per C99, offsetof must be an ICE. And AFAIK, using |
| 21182 | // EvaluateAsRValue matches the proposed gcc behavior for cases like |
| 21183 | // "offsetof(struct s{int x[4];}, x[1.0])". This doesn't affect |
| 21184 | // compliance: we should warn earlier for offsetof expressions with |
| 21185 | // array subscripts that aren't ICEs, and if the array subscripts |
| 21186 | // are ICEs, the value of the offsetof must be an integer constant. |
| 21187 | return CheckEvalInICE(E, Ctx); |
| 21188 | } |
| 21189 | case Expr::UnaryExprOrTypeTraitExprClass: { |
| 21190 | const UnaryExprOrTypeTraitExpr *Exp = cast<UnaryExprOrTypeTraitExpr>(Val: E); |
| 21191 | if ((Exp->getKind() == UETT_SizeOf) && |
| 21192 | Exp->getTypeOfArgument()->isVariableArrayType()) |
| 21193 | return ICEDiag(IK_NotICE, E->getBeginLoc()); |
| 21194 | if (Exp->getKind() == UETT_CountOf) { |
| 21195 | QualType ArgTy = Exp->getTypeOfArgument(); |
| 21196 | if (ArgTy->isVariableArrayType()) { |
| 21197 | // We need to look whether the array is multidimensional. If it is, |
| 21198 | // then we want to check the size expression manually to see whether |
| 21199 | // it is an ICE or not. |
| 21200 | const auto *VAT = Ctx.getAsVariableArrayType(T: ArgTy); |
| 21201 | if (VAT->getElementType()->isArrayType()) |
| 21202 | // Variable array size expression could be missing (e.g. int a[*][10]) |
| 21203 | // In that case, it can't be a constant expression. |
| 21204 | return VAT->getSizeExpr() ? CheckICE(E: VAT->getSizeExpr(), Ctx) |
| 21205 | : ICEDiag(IK_NotICE, E->getBeginLoc()); |
| 21206 | |
| 21207 | // Otherwise, this is a regular VLA, which is definitely not an ICE. |
| 21208 | return ICEDiag(IK_NotICE, E->getBeginLoc()); |
| 21209 | } |
| 21210 | } |
| 21211 | return NoDiag(); |
| 21212 | } |
| 21213 | case Expr::BinaryOperatorClass: { |
| 21214 | const BinaryOperator *Exp = cast<BinaryOperator>(Val: E); |
| 21215 | switch (Exp->getOpcode()) { |
| 21216 | case BO_PtrMemD: |
| 21217 | case BO_PtrMemI: |
| 21218 | case BO_Assign: |
| 21219 | case BO_MulAssign: |
| 21220 | case BO_DivAssign: |
| 21221 | case BO_RemAssign: |
| 21222 | case BO_AddAssign: |
| 21223 | case BO_SubAssign: |
| 21224 | case BO_ShlAssign: |
| 21225 | case BO_ShrAssign: |
| 21226 | case BO_AndAssign: |
| 21227 | case BO_XorAssign: |
| 21228 | case BO_OrAssign: |
| 21229 | // C99 6.6/3 allows assignments within unevaluated subexpressions of |
| 21230 | // constant expressions, but they can never be ICEs because an ICE cannot |
| 21231 | // contain an lvalue operand. |
| 21232 | return ICEDiag(IK_NotICE, E->getBeginLoc()); |
| 21233 | |
| 21234 | case BO_Mul: |
| 21235 | case BO_Div: |
| 21236 | case BO_Rem: |
| 21237 | case BO_Add: |
| 21238 | case BO_Sub: |
| 21239 | case BO_Shl: |
| 21240 | case BO_Shr: |
| 21241 | case BO_LT: |
| 21242 | case BO_GT: |
| 21243 | case BO_LE: |
| 21244 | case BO_GE: |
| 21245 | case BO_EQ: |
| 21246 | case BO_NE: |
| 21247 | case BO_And: |
| 21248 | case BO_Xor: |
| 21249 | case BO_Or: |
| 21250 | case BO_Comma: |
| 21251 | case BO_Cmp: { |
| 21252 | ICEDiag LHSResult = CheckICE(E: Exp->getLHS(), Ctx); |
| 21253 | ICEDiag RHSResult = CheckICE(E: Exp->getRHS(), Ctx); |
| 21254 | if (Exp->getOpcode() == BO_Div || |
| 21255 | Exp->getOpcode() == BO_Rem) { |
| 21256 | // EvaluateAsRValue gives an error for undefined Div/Rem, so make sure |
| 21257 | // we don't evaluate one. |
| 21258 | if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICE) { |
| 21259 | llvm::APSInt REval = Exp->getRHS()->EvaluateKnownConstInt(Ctx); |
| 21260 | if (REval == 0) |
| 21261 | return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc()); |
| 21262 | if (REval.isSigned() && REval.isAllOnes()) { |
| 21263 | llvm::APSInt LEval = Exp->getLHS()->EvaluateKnownConstInt(Ctx); |
| 21264 | if (LEval.isMinSignedValue()) |
| 21265 | return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc()); |
| 21266 | } |
| 21267 | } |
| 21268 | } |
| 21269 | if (Exp->getOpcode() == BO_Comma) { |
| 21270 | if (Ctx.getLangOpts().C99) { |
| 21271 | // C99 6.6p3 introduces a strange edge case: comma can be in an ICE |
| 21272 | // if it isn't evaluated. |
| 21273 | if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICE) |
| 21274 | return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc()); |
| 21275 | } else { |
| 21276 | // In both C89 and C++, commas in ICEs are illegal. |
| 21277 | return ICEDiag(IK_NotICE, E->getBeginLoc()); |
| 21278 | } |
| 21279 | } |
| 21280 | return Worst(A: LHSResult, B: RHSResult); |
| 21281 | } |
| 21282 | case BO_LAnd: |
| 21283 | case BO_LOr: { |
| 21284 | ICEDiag LHSResult = CheckICE(E: Exp->getLHS(), Ctx); |
| 21285 | ICEDiag RHSResult = CheckICE(E: Exp->getRHS(), Ctx); |
| 21286 | if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICEIfUnevaluated) { |
| 21287 | // Rare case where the RHS has a comma "side-effect"; we need |
| 21288 | // to actually check the condition to see whether the side |
| 21289 | // with the comma is evaluated. |
| 21290 | if ((Exp->getOpcode() == BO_LAnd) != |
| 21291 | (Exp->getLHS()->EvaluateKnownConstInt(Ctx) == 0)) |
| 21292 | return RHSResult; |
| 21293 | return NoDiag(); |
| 21294 | } |
| 21295 | |
| 21296 | return Worst(A: LHSResult, B: RHSResult); |
| 21297 | } |
| 21298 | } |
| 21299 | llvm_unreachable("invalid binary operator kind" ); |
| 21300 | } |
| 21301 | case Expr::ImplicitCastExprClass: |
| 21302 | case Expr::CStyleCastExprClass: |
| 21303 | case Expr::CXXFunctionalCastExprClass: |
| 21304 | case Expr::CXXStaticCastExprClass: |
| 21305 | case Expr::CXXReinterpretCastExprClass: |
| 21306 | case Expr::CXXConstCastExprClass: |
| 21307 | case Expr::ObjCBridgedCastExprClass: { |
| 21308 | const Expr *SubExpr = cast<CastExpr>(Val: E)->getSubExpr(); |
| 21309 | if (isa<ExplicitCastExpr>(Val: E)) { |
| 21310 | if (const FloatingLiteral *FL |
| 21311 | = dyn_cast<FloatingLiteral>(Val: SubExpr->IgnoreParenImpCasts())) { |
| 21312 | unsigned DestWidth = Ctx.getIntWidth(T: E->getType()); |
| 21313 | bool DestSigned = E->getType()->isSignedIntegerOrEnumerationType(); |
| 21314 | APSInt IgnoredVal(DestWidth, !DestSigned); |
| 21315 | bool Ignored; |
| 21316 | // If the value does not fit in the destination type, the behavior is |
| 21317 | // undefined, so we are not required to treat it as a constant |
| 21318 | // expression. |
| 21319 | if (FL->getValue().convertToInteger(Result&: IgnoredVal, |
| 21320 | RM: llvm::APFloat::rmTowardZero, |
| 21321 | IsExact: &Ignored) & APFloat::opInvalidOp) |
| 21322 | return ICEDiag(IK_NotICE, E->getBeginLoc()); |
| 21323 | return NoDiag(); |
| 21324 | } |
| 21325 | } |
| 21326 | switch (cast<CastExpr>(Val: E)->getCastKind()) { |
| 21327 | case CK_LValueToRValue: |
| 21328 | case CK_AtomicToNonAtomic: |
| 21329 | case CK_NonAtomicToAtomic: |
| 21330 | case CK_NoOp: |
| 21331 | case CK_IntegralToBoolean: |
| 21332 | case CK_IntegralCast: |
| 21333 | return CheckICE(E: SubExpr, Ctx); |
| 21334 | default: |
| 21335 | return ICEDiag(IK_NotICE, E->getBeginLoc()); |
| 21336 | } |
| 21337 | } |
| 21338 | case Expr::BinaryConditionalOperatorClass: { |
| 21339 | const BinaryConditionalOperator *Exp = cast<BinaryConditionalOperator>(Val: E); |
| 21340 | ICEDiag CommonResult = CheckICE(E: Exp->getCommon(), Ctx); |
| 21341 | if (CommonResult.Kind == IK_NotICE) return CommonResult; |
| 21342 | ICEDiag FalseResult = CheckICE(E: Exp->getFalseExpr(), Ctx); |
| 21343 | if (FalseResult.Kind == IK_NotICE) return FalseResult; |
| 21344 | if (CommonResult.Kind == IK_ICEIfUnevaluated) return CommonResult; |
| 21345 | if (FalseResult.Kind == IK_ICEIfUnevaluated && |
| 21346 | Exp->getCommon()->EvaluateKnownConstInt(Ctx) != 0) return NoDiag(); |
| 21347 | return FalseResult; |
| 21348 | } |
| 21349 | case Expr::ConditionalOperatorClass: { |
| 21350 | const ConditionalOperator *Exp = cast<ConditionalOperator>(Val: E); |
| 21351 | // If the condition (ignoring parens) is a __builtin_constant_p call, |
| 21352 | // then only the true side is actually considered in an integer constant |
| 21353 | // expression, and it is fully evaluated. This is an important GNU |
| 21354 | // extension. See GCC PR38377 for discussion. |
| 21355 | if (const CallExpr *CallCE |
| 21356 | = dyn_cast<CallExpr>(Val: Exp->getCond()->IgnoreParenCasts())) |
| 21357 | if (CallCE->getBuiltinCallee() == Builtin::BI__builtin_constant_p) |
| 21358 | return CheckEvalInICE(E, Ctx); |
| 21359 | ICEDiag CondResult = CheckICE(E: Exp->getCond(), Ctx); |
| 21360 | if (CondResult.Kind == IK_NotICE) |
| 21361 | return CondResult; |
| 21362 | |
| 21363 | ICEDiag TrueResult = CheckICE(E: Exp->getTrueExpr(), Ctx); |
| 21364 | ICEDiag FalseResult = CheckICE(E: Exp->getFalseExpr(), Ctx); |
| 21365 | |
| 21366 | if (TrueResult.Kind == IK_NotICE) |
| 21367 | return TrueResult; |
| 21368 | if (FalseResult.Kind == IK_NotICE) |
| 21369 | return FalseResult; |
| 21370 | if (CondResult.Kind == IK_ICEIfUnevaluated) |
| 21371 | return CondResult; |
| 21372 | if (TrueResult.Kind == IK_ICE && FalseResult.Kind == IK_ICE) |
| 21373 | return NoDiag(); |
| 21374 | // Rare case where the diagnostics depend on which side is evaluated |
| 21375 | // Note that if we get here, CondResult is 0, and at least one of |
| 21376 | // TrueResult and FalseResult is non-zero. |
| 21377 | if (Exp->getCond()->EvaluateKnownConstInt(Ctx) == 0) |
| 21378 | return FalseResult; |
| 21379 | return TrueResult; |
| 21380 | } |
| 21381 | case Expr::CXXDefaultArgExprClass: |
| 21382 | return CheckICE(E: cast<CXXDefaultArgExpr>(Val: E)->getExpr(), Ctx); |
| 21383 | case Expr::CXXDefaultInitExprClass: |
| 21384 | return CheckICE(E: cast<CXXDefaultInitExpr>(Val: E)->getExpr(), Ctx); |
| 21385 | case Expr::ChooseExprClass: { |
| 21386 | return CheckICE(E: cast<ChooseExpr>(Val: E)->getChosenSubExpr(), Ctx); |
| 21387 | } |
| 21388 | case Expr::BuiltinBitCastExprClass: { |
| 21389 | if (!checkBitCastConstexprEligibility(Info: nullptr, Ctx, BCE: cast<CastExpr>(Val: E))) |
| 21390 | return ICEDiag(IK_NotICE, E->getBeginLoc()); |
| 21391 | return CheckICE(E: cast<CastExpr>(Val: E)->getSubExpr(), Ctx); |
| 21392 | } |
| 21393 | } |
| 21394 | |
| 21395 | llvm_unreachable("Invalid StmtClass!" ); |
| 21396 | } |
| 21397 | |
| 21398 | /// Evaluate an expression as a C++11 integral constant expression. |
| 21399 | static bool EvaluateCPlusPlus11IntegralConstantExpr(const ASTContext &Ctx, |
| 21400 | const Expr *E, |
| 21401 | llvm::APSInt *Value) { |
| 21402 | if (!E->getType()->isIntegralOrUnscopedEnumerationType()) |
| 21403 | return false; |
| 21404 | |
| 21405 | APValue Result; |
| 21406 | if (!E->isCXX11ConstantExpr(Ctx, Result: &Result)) |
| 21407 | return false; |
| 21408 | |
| 21409 | if (!Result.isInt()) |
| 21410 | return false; |
| 21411 | |
| 21412 | if (Value) *Value = Result.getInt(); |
| 21413 | return true; |
| 21414 | } |
| 21415 | |
| 21416 | bool Expr::isIntegerConstantExpr(const ASTContext &Ctx) const { |
| 21417 | assert(!isValueDependent() && |
| 21418 | "Expression evaluator can't be called on a dependent expression." ); |
| 21419 | |
| 21420 | ExprTimeTraceScope TimeScope(this, Ctx, "isIntegerConstantExpr" ); |
| 21421 | |
| 21422 | if (Ctx.getLangOpts().CPlusPlus11) |
| 21423 | return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, E: this, Value: nullptr); |
| 21424 | |
| 21425 | ICEDiag D = CheckICE(E: this, Ctx); |
| 21426 | if (D.Kind != IK_ICE) |
| 21427 | return false; |
| 21428 | return true; |
| 21429 | } |
| 21430 | |
| 21431 | std::optional<llvm::APSInt> |
| 21432 | Expr::getIntegerConstantExpr(const ASTContext &Ctx) const { |
| 21433 | if (isValueDependent()) { |
| 21434 | // Expression evaluator can't succeed on a dependent expression. |
| 21435 | return std::nullopt; |
| 21436 | } |
| 21437 | |
| 21438 | if (Ctx.getLangOpts().CPlusPlus11) { |
| 21439 | APSInt Value; |
| 21440 | if (EvaluateCPlusPlus11IntegralConstantExpr(Ctx, E: this, Value: &Value)) |
| 21441 | return Value; |
| 21442 | return std::nullopt; |
| 21443 | } |
| 21444 | |
| 21445 | if (!isIntegerConstantExpr(Ctx)) |
| 21446 | return std::nullopt; |
| 21447 | |
| 21448 | // The only possible side-effects here are due to UB discovered in the |
| 21449 | // evaluation (for instance, INT_MAX + 1). In such a case, we are still |
| 21450 | // required to treat the expression as an ICE, so we produce the folded |
| 21451 | // value. |
| 21452 | EvalResult ExprResult; |
| 21453 | Expr::EvalStatus Status; |
| 21454 | EvalInfo Info(Ctx, Status, EvaluationMode::IgnoreSideEffects); |
| 21455 | Info.InConstantContext = true; |
| 21456 | |
| 21457 | if (!::EvaluateAsInt(E: this, ExprResult, Ctx, AllowSideEffects: SE_AllowSideEffects, Info)) |
| 21458 | llvm_unreachable("ICE cannot be evaluated!" ); |
| 21459 | |
| 21460 | return ExprResult.Val.getInt(); |
| 21461 | } |
| 21462 | |
| 21463 | bool Expr::isCXX98IntegralConstantExpr(const ASTContext &Ctx) const { |
| 21464 | assert(!isValueDependent() && |
| 21465 | "Expression evaluator can't be called on a dependent expression." ); |
| 21466 | |
| 21467 | return CheckICE(E: this, Ctx).Kind == IK_ICE; |
| 21468 | } |
| 21469 | |
| 21470 | bool Expr::isCXX11ConstantExpr(const ASTContext &Ctx, APValue *Result) const { |
| 21471 | assert(!isValueDependent() && |
| 21472 | "Expression evaluator can't be called on a dependent expression." ); |
| 21473 | |
| 21474 | // We support this checking in C++98 mode in order to diagnose compatibility |
| 21475 | // issues. |
| 21476 | assert(Ctx.getLangOpts().CPlusPlus); |
| 21477 | |
| 21478 | bool IsConst; |
| 21479 | APValue Scratch; |
| 21480 | if (FastEvaluateAsRValue(Exp: this, Result&: Scratch, Ctx, IsConst) && Scratch.hasValue()) { |
| 21481 | if (Result) |
| 21482 | *Result = Scratch; |
| 21483 | return true; |
| 21484 | } |
| 21485 | |
| 21486 | // Build evaluation settings. |
| 21487 | Expr::EvalStatus Status; |
| 21488 | SmallVector<PartialDiagnosticAt, 8> Diags; |
| 21489 | Status.Diag = &Diags; |
| 21490 | EvalInfo Info(Ctx, Status, EvaluationMode::ConstantExpression); |
| 21491 | |
| 21492 | bool IsConstExpr = |
| 21493 | ::EvaluateAsRValue(Info, E: this, Result&: Result ? *Result : Scratch) && |
| 21494 | // FIXME: We don't produce a diagnostic for this, but the callers that |
| 21495 | // call us on arbitrary full-expressions should generally not care. |
| 21496 | Info.discardCleanups() && !Status.HasSideEffects; |
| 21497 | |
| 21498 | return IsConstExpr && Diags.empty(); |
| 21499 | } |
| 21500 | |
| 21501 | bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx, |
| 21502 | const FunctionDecl *Callee, |
| 21503 | ArrayRef<const Expr*> Args, |
| 21504 | const Expr *This) const { |
| 21505 | assert(!isValueDependent() && |
| 21506 | "Expression evaluator can't be called on a dependent expression." ); |
| 21507 | |
| 21508 | llvm::TimeTraceScope TimeScope("EvaluateWithSubstitution" , [&] { |
| 21509 | std::string Name; |
| 21510 | llvm::raw_string_ostream OS(Name); |
| 21511 | Callee->getNameForDiagnostic(OS, Policy: Ctx.getPrintingPolicy(), |
| 21512 | /*Qualified=*/true); |
| 21513 | return Name; |
| 21514 | }); |
| 21515 | |
| 21516 | Expr::EvalStatus Status; |
| 21517 | EvalInfo Info(Ctx, Status, EvaluationMode::ConstantExpressionUnevaluated); |
| 21518 | Info.InConstantContext = true; |
| 21519 | |
| 21520 | LValue ThisVal; |
| 21521 | const LValue *ThisPtr = nullptr; |
| 21522 | if (This) { |
| 21523 | #ifndef NDEBUG |
| 21524 | auto *MD = dyn_cast<CXXMethodDecl>(Callee); |
| 21525 | assert(MD && "Don't provide `this` for non-methods." ); |
| 21526 | assert(MD->isImplicitObjectMemberFunction() && |
| 21527 | "Don't provide `this` for methods without an implicit object." ); |
| 21528 | #endif |
| 21529 | if (!This->isValueDependent() && |
| 21530 | EvaluateObjectArgument(Info, Object: This, This&: ThisVal) && |
| 21531 | !Info.EvalStatus.HasSideEffects) |
| 21532 | ThisPtr = &ThisVal; |
| 21533 | |
| 21534 | // Ignore any side-effects from a failed evaluation. This is safe because |
| 21535 | // they can't interfere with any other argument evaluation. |
| 21536 | Info.EvalStatus.HasSideEffects = false; |
| 21537 | } |
| 21538 | |
| 21539 | CallRef Call = Info.CurrentCall->createCall(Callee); |
| 21540 | for (ArrayRef<const Expr*>::iterator I = Args.begin(), E = Args.end(); |
| 21541 | I != E; ++I) { |
| 21542 | unsigned Idx = I - Args.begin(); |
| 21543 | if (Idx >= Callee->getNumParams()) |
| 21544 | break; |
| 21545 | const ParmVarDecl *PVD = Callee->getParamDecl(i: Idx); |
| 21546 | if ((*I)->isValueDependent() || |
| 21547 | !EvaluateCallArg(PVD, Arg: *I, Call, Info) || |
| 21548 | Info.EvalStatus.HasSideEffects) { |
| 21549 | // If evaluation fails, throw away the argument entirely. |
| 21550 | if (APValue *Slot = Info.getParamSlot(Call, PVD)) |
| 21551 | *Slot = APValue(); |
| 21552 | } |
| 21553 | |
| 21554 | // Ignore any side-effects from a failed evaluation. This is safe because |
| 21555 | // they can't interfere with any other argument evaluation. |
| 21556 | Info.EvalStatus.HasSideEffects = false; |
| 21557 | } |
| 21558 | |
| 21559 | // Parameter cleanups happen in the caller and are not part of this |
| 21560 | // evaluation. |
| 21561 | Info.discardCleanups(); |
| 21562 | Info.EvalStatus.HasSideEffects = false; |
| 21563 | |
| 21564 | // Build fake call to Callee. |
| 21565 | CallStackFrame Frame(Info, Callee->getLocation(), Callee, ThisPtr, This, |
| 21566 | Call); |
| 21567 | // FIXME: Missing ExprWithCleanups in enable_if conditions? |
| 21568 | FullExpressionRAII Scope(Info); |
| 21569 | return Evaluate(Result&: Value, Info, E: this) && Scope.destroy() && |
| 21570 | !Info.EvalStatus.HasSideEffects; |
| 21571 | } |
| 21572 | |
| 21573 | bool Expr::isPotentialConstantExpr(const FunctionDecl *FD, |
| 21574 | SmallVectorImpl< |
| 21575 | PartialDiagnosticAt> &Diags) { |
| 21576 | // FIXME: It would be useful to check constexpr function templates, but at the |
| 21577 | // moment the constant expression evaluator cannot cope with the non-rigorous |
| 21578 | // ASTs which we build for dependent expressions. |
| 21579 | if (FD->isDependentContext()) |
| 21580 | return true; |
| 21581 | |
| 21582 | llvm::TimeTraceScope TimeScope("isPotentialConstantExpr" , [&] { |
| 21583 | std::string Name; |
| 21584 | llvm::raw_string_ostream OS(Name); |
| 21585 | FD->getNameForDiagnostic(OS, Policy: FD->getASTContext().getPrintingPolicy(), |
| 21586 | /*Qualified=*/true); |
| 21587 | return Name; |
| 21588 | }); |
| 21589 | |
| 21590 | Expr::EvalStatus Status; |
| 21591 | Status.Diag = &Diags; |
| 21592 | |
| 21593 | EvalInfo Info(FD->getASTContext(), Status, |
| 21594 | EvaluationMode::ConstantExpression); |
| 21595 | Info.InConstantContext = true; |
| 21596 | Info.CheckingPotentialConstantExpression = true; |
| 21597 | |
| 21598 | // The constexpr VM attempts to compile all methods to bytecode here. |
| 21599 | if (Info.EnableNewConstInterp) { |
| 21600 | Info.Ctx.getInterpContext().isPotentialConstantExpr(Parent&: Info, FD); |
| 21601 | return Diags.empty(); |
| 21602 | } |
| 21603 | |
| 21604 | const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Val: FD); |
| 21605 | const CXXRecordDecl *RD = MD ? MD->getParent()->getCanonicalDecl() : nullptr; |
| 21606 | |
| 21607 | // Fabricate an arbitrary expression on the stack and pretend that it |
| 21608 | // is a temporary being used as the 'this' pointer. |
| 21609 | LValue This; |
| 21610 | ImplicitValueInitExpr VIE(RD ? Info.Ctx.getCanonicalTagType(TD: RD) |
| 21611 | : Info.Ctx.IntTy); |
| 21612 | This.set(B: {&VIE, Info.CurrentCall->Index}); |
| 21613 | |
| 21614 | ArrayRef<const Expr*> Args; |
| 21615 | |
| 21616 | APValue Scratch; |
| 21617 | if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Val: FD)) { |
| 21618 | // Evaluate the call as a constant initializer, to allow the construction |
| 21619 | // of objects of non-literal types. |
| 21620 | Info.setEvaluatingDecl(Base: This.getLValueBase(), Value&: Scratch); |
| 21621 | HandleConstructorCall(E: &VIE, This, Args, Definition: CD, Info, Result&: Scratch); |
| 21622 | } else { |
| 21623 | SourceLocation Loc = FD->getLocation(); |
| 21624 | HandleFunctionCall( |
| 21625 | CallLoc: Loc, Callee: FD, ObjectArg: (MD && MD->isImplicitObjectMemberFunction()) ? &This : nullptr, |
| 21626 | E: &VIE, Args, Call: CallRef(), Body: FD->getBody(), Info, Result&: Scratch, |
| 21627 | /*ResultSlot=*/nullptr); |
| 21628 | } |
| 21629 | |
| 21630 | return Diags.empty(); |
| 21631 | } |
| 21632 | |
| 21633 | bool Expr::isPotentialConstantExprUnevaluated(Expr *E, |
| 21634 | const FunctionDecl *FD, |
| 21635 | SmallVectorImpl< |
| 21636 | PartialDiagnosticAt> &Diags) { |
| 21637 | assert(!E->isValueDependent() && |
| 21638 | "Expression evaluator can't be called on a dependent expression." ); |
| 21639 | |
| 21640 | Expr::EvalStatus Status; |
| 21641 | Status.Diag = &Diags; |
| 21642 | |
| 21643 | EvalInfo Info(FD->getASTContext(), Status, |
| 21644 | EvaluationMode::ConstantExpressionUnevaluated); |
| 21645 | Info.InConstantContext = true; |
| 21646 | Info.CheckingPotentialConstantExpression = true; |
| 21647 | |
| 21648 | if (Info.EnableNewConstInterp) { |
| 21649 | Info.Ctx.getInterpContext().isPotentialConstantExprUnevaluated(Parent&: Info, E, FD); |
| 21650 | return Diags.empty(); |
| 21651 | } |
| 21652 | |
| 21653 | // Fabricate a call stack frame to give the arguments a plausible cover story. |
| 21654 | CallStackFrame Frame(Info, SourceLocation(), FD, /*This=*/nullptr, |
| 21655 | /*CallExpr=*/nullptr, CallRef()); |
| 21656 | |
| 21657 | APValue ResultScratch; |
| 21658 | Evaluate(Result&: ResultScratch, Info, E); |
| 21659 | return Diags.empty(); |
| 21660 | } |
| 21661 | |
| 21662 | bool Expr::tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx, |
| 21663 | unsigned Type) const { |
| 21664 | if (!getType()->isPointerType()) |
| 21665 | return false; |
| 21666 | |
| 21667 | Expr::EvalStatus Status; |
| 21668 | EvalInfo Info(Ctx, Status, EvaluationMode::ConstantFold); |
| 21669 | if (Info.EnableNewConstInterp) { |
| 21670 | return Info.Ctx.getInterpContext().tryEvaluateObjectSize(Parent&: Info, E: this, Kind: Type, |
| 21671 | Result); |
| 21672 | } |
| 21673 | return tryEvaluateBuiltinObjectSize(E: this, Type, Info, Size&: Result); |
| 21674 | } |
| 21675 | |
| 21676 | static bool EvaluateBuiltinStrLen(const Expr *E, uint64_t &Result, |
| 21677 | EvalInfo &Info, std::string *StringResult) { |
| 21678 | if (!E->getType()->hasPointerRepresentation() || !E->isPRValue()) |
| 21679 | return false; |
| 21680 | |
| 21681 | LValue String; |
| 21682 | |
| 21683 | if (!EvaluatePointer(E, Result&: String, Info)) |
| 21684 | return false; |
| 21685 | |
| 21686 | QualType CharTy = E->getType()->getPointeeType(); |
| 21687 | |
| 21688 | // Fast path: if it's a string literal, search the string value. |
| 21689 | if (const StringLiteral *S = dyn_cast_or_null<StringLiteral>( |
| 21690 | Val: String.getLValueBase().dyn_cast<const Expr *>())) { |
| 21691 | StringRef Str = S->getBytes(); |
| 21692 | int64_t Off = String.Offset.getQuantity(); |
| 21693 | if (Off >= 0 && (uint64_t)Off <= (uint64_t)Str.size() && |
| 21694 | S->getCharByteWidth() == 1 && |
| 21695 | // FIXME: Add fast-path for wchar_t too. |
| 21696 | Info.Ctx.hasSameUnqualifiedType(T1: CharTy, T2: Info.Ctx.CharTy)) { |
| 21697 | Str = Str.substr(Start: Off); |
| 21698 | |
| 21699 | StringRef::size_type Pos = Str.find(C: 0); |
| 21700 | if (Pos != StringRef::npos) |
| 21701 | Str = Str.substr(Start: 0, N: Pos); |
| 21702 | |
| 21703 | Result = Str.size(); |
| 21704 | if (StringResult) |
| 21705 | *StringResult = Str; |
| 21706 | return true; |
| 21707 | } |
| 21708 | |
| 21709 | // Fall through to slow path. |
| 21710 | } |
| 21711 | |
| 21712 | // Slow path: scan the bytes of the string looking for the terminating 0. |
| 21713 | for (uint64_t Strlen = 0; /**/; ++Strlen) { |
| 21714 | APValue Char; |
| 21715 | if (!handleLValueToRValueConversion(Info, Conv: E, Type: CharTy, LVal: String, RVal&: Char) || |
| 21716 | !Char.isInt()) |
| 21717 | return false; |
| 21718 | if (!Char.getInt()) { |
| 21719 | Result = Strlen; |
| 21720 | return true; |
| 21721 | } else if (StringResult) |
| 21722 | StringResult->push_back(c: Char.getInt().getExtValue()); |
| 21723 | if (!HandleLValueArrayAdjustment(Info, E, LVal&: String, EltTy: CharTy, Adjustment: 1)) |
| 21724 | return false; |
| 21725 | } |
| 21726 | } |
| 21727 | |
| 21728 | std::optional<std::string> Expr::tryEvaluateString(ASTContext &Ctx) const { |
| 21729 | Expr::EvalStatus Status; |
| 21730 | EvalInfo Info(Ctx, Status, EvaluationMode::ConstantFold); |
| 21731 | uint64_t Result; |
| 21732 | std::string StringResult; |
| 21733 | |
| 21734 | if (Info.EnableNewConstInterp) { |
| 21735 | if (!Info.Ctx.getInterpContext().evaluateString(Parent&: Info, E: this, Result&: StringResult)) |
| 21736 | return std::nullopt; |
| 21737 | return StringResult; |
| 21738 | } |
| 21739 | |
| 21740 | if (EvaluateBuiltinStrLen(E: this, Result, Info, StringResult: &StringResult)) |
| 21741 | return StringResult; |
| 21742 | return std::nullopt; |
| 21743 | } |
| 21744 | |
| 21745 | template <typename T> |
| 21746 | static bool EvaluateCharRangeAsStringImpl(const Expr *, T &Result, |
| 21747 | const Expr *SizeExpression, |
| 21748 | const Expr *PtrExpression, |
| 21749 | ASTContext &Ctx, |
| 21750 | Expr::EvalResult &Status) { |
| 21751 | EvalInfo Info(Ctx, Status, EvaluationMode::ConstantExpression); |
| 21752 | Info.InConstantContext = true; |
| 21753 | |
| 21754 | if (Info.EnableNewConstInterp) |
| 21755 | return Info.Ctx.getInterpContext().evaluateCharRange(Info, SizeExpression, |
| 21756 | PtrExpression, Result); |
| 21757 | |
| 21758 | LValue String; |
| 21759 | FullExpressionRAII Scope(Info); |
| 21760 | APSInt SizeValue; |
| 21761 | if (!::EvaluateInteger(E: SizeExpression, Result&: SizeValue, Info)) |
| 21762 | return false; |
| 21763 | |
| 21764 | uint64_t Size = SizeValue.getZExtValue(); |
| 21765 | |
| 21766 | // FIXME: better protect against invalid or excessive sizes |
| 21767 | if constexpr (std::is_same_v<APValue, T>) |
| 21768 | Result = APValue(APValue::UninitArray{}, Size, Size); |
| 21769 | else { |
| 21770 | if (Size < Result.max_size()) |
| 21771 | Result.reserve(Size); |
| 21772 | } |
| 21773 | if (!::EvaluatePointer(E: PtrExpression, Result&: String, Info)) |
| 21774 | return false; |
| 21775 | |
| 21776 | QualType CharTy = PtrExpression->getType()->getPointeeType(); |
| 21777 | for (uint64_t I = 0; I < Size; ++I) { |
| 21778 | APValue Char; |
| 21779 | if (!handleLValueToRValueConversion(Info, Conv: PtrExpression, Type: CharTy, LVal: String, |
| 21780 | RVal&: Char)) |
| 21781 | return false; |
| 21782 | |
| 21783 | if constexpr (std::is_same_v<APValue, T>) { |
| 21784 | Result.getArrayInitializedElt(I) = std::move(Char); |
| 21785 | } else { |
| 21786 | APSInt C = Char.getInt(); |
| 21787 | |
| 21788 | assert(C.getBitWidth() <= 8 && |
| 21789 | "string element not representable in char" ); |
| 21790 | |
| 21791 | Result.push_back(static_cast<char>(C.getExtValue())); |
| 21792 | } |
| 21793 | |
| 21794 | if (!HandleLValueArrayAdjustment(Info, E: PtrExpression, LVal&: String, EltTy: CharTy, Adjustment: 1)) |
| 21795 | return false; |
| 21796 | } |
| 21797 | |
| 21798 | return Scope.destroy() && CheckMemoryLeaks(Info); |
| 21799 | } |
| 21800 | |
| 21801 | bool Expr::EvaluateCharRangeAsString(std::string &Result, |
| 21802 | const Expr *SizeExpression, |
| 21803 | const Expr *PtrExpression, ASTContext &Ctx, |
| 21804 | EvalResult &Status) const { |
| 21805 | return EvaluateCharRangeAsStringImpl(this, Result, SizeExpression, |
| 21806 | PtrExpression, Ctx, Status); |
| 21807 | } |
| 21808 | |
| 21809 | bool Expr::EvaluateCharRangeAsString(APValue &Result, |
| 21810 | const Expr *SizeExpression, |
| 21811 | const Expr *PtrExpression, ASTContext &Ctx, |
| 21812 | EvalResult &Status) const { |
| 21813 | return EvaluateCharRangeAsStringImpl(this, Result, SizeExpression, |
| 21814 | PtrExpression, Ctx, Status); |
| 21815 | } |
| 21816 | |
| 21817 | bool Expr::tryEvaluateStrLen(uint64_t &Result, ASTContext &Ctx) const { |
| 21818 | Expr::EvalStatus Status; |
| 21819 | EvalInfo Info(Ctx, Status, EvaluationMode::ConstantFold); |
| 21820 | |
| 21821 | if (Info.EnableNewConstInterp) |
| 21822 | return Info.Ctx.getInterpContext().evaluateStrlen(Parent&: Info, E: this, Result); |
| 21823 | |
| 21824 | return EvaluateBuiltinStrLen(E: this, Result, Info); |
| 21825 | } |
| 21826 | |
| 21827 | namespace { |
| 21828 | struct IsWithinLifetimeHandler { |
| 21829 | EvalInfo &Info; |
| 21830 | static constexpr AccessKinds AccessKind = AccessKinds::AK_IsWithinLifetime; |
| 21831 | using result_type = std::optional<bool>; |
| 21832 | std::optional<bool> failed() { return std::nullopt; } |
| 21833 | template <typename T> |
| 21834 | std::optional<bool> found(T &Subobj, QualType SubobjType) { |
| 21835 | return true; |
| 21836 | } |
| 21837 | }; |
| 21838 | |
| 21839 | std::optional<bool> EvaluateBuiltinIsWithinLifetime(IntExprEvaluator &IEE, |
| 21840 | const CallExpr *E) { |
| 21841 | EvalInfo &Info = IEE.Info; |
| 21842 | // Sometimes this is called during some sorts of constant folding / early |
| 21843 | // evaluation. These are meant for non-constant expressions and are not |
| 21844 | // necessary since this consteval builtin will never be evaluated at runtime. |
| 21845 | // Just fail to evaluate when not in a constant context. |
| 21846 | if (!Info.InConstantContext) |
| 21847 | return std::nullopt; |
| 21848 | assert(E->getBuiltinCallee() == Builtin::BI__builtin_is_within_lifetime); |
| 21849 | const Expr *Arg = E->getArg(Arg: 0); |
| 21850 | if (Arg->isValueDependent()) |
| 21851 | return std::nullopt; |
| 21852 | LValue Val; |
| 21853 | if (!EvaluatePointer(E: Arg, Result&: Val, Info)) |
| 21854 | return std::nullopt; |
| 21855 | |
| 21856 | if (Val.allowConstexprUnknown()) |
| 21857 | return true; |
| 21858 | |
| 21859 | auto Error = [&](int Diag) { |
| 21860 | bool CalledFromStd = false; |
| 21861 | const auto *Callee = Info.CurrentCall->getCallee(); |
| 21862 | if (Callee && Callee->isInStdNamespace()) { |
| 21863 | const IdentifierInfo *Identifier = Callee->getIdentifier(); |
| 21864 | CalledFromStd = Identifier && Identifier->isStr(Str: "is_within_lifetime" ); |
| 21865 | } |
| 21866 | Info.CCEDiag(Loc: CalledFromStd ? Info.CurrentCall->getCallRange().getBegin() |
| 21867 | : E->getExprLoc(), |
| 21868 | DiagId: diag::err_invalid_is_within_lifetime) |
| 21869 | << (CalledFromStd ? "std::is_within_lifetime" |
| 21870 | : "__builtin_is_within_lifetime" ) |
| 21871 | << Diag; |
| 21872 | return std::nullopt; |
| 21873 | }; |
| 21874 | // C++2c [meta.const.eval]p4: |
| 21875 | // During the evaluation of an expression E as a core constant expression, a |
| 21876 | // call to this function is ill-formed unless p points to an object that is |
| 21877 | // usable in constant expressions or whose complete object's lifetime began |
| 21878 | // within E. |
| 21879 | |
| 21880 | // Make sure it points to an object |
| 21881 | // nullptr does not point to an object |
| 21882 | if (Val.isNullPointer() || Val.getLValueBase().isNull()) |
| 21883 | return Error(0); |
| 21884 | QualType T = Val.getLValueBase().getType(); |
| 21885 | assert(!T->isFunctionType() && |
| 21886 | "Pointers to functions should have been typed as function pointers " |
| 21887 | "which would have been rejected earlier" ); |
| 21888 | assert(T->isObjectType()); |
| 21889 | // Hypothetical array element is not an object |
| 21890 | if (Val.getLValueDesignator().isOnePastTheEnd()) |
| 21891 | return Error(1); |
| 21892 | assert(Val.getLValueDesignator().isValidSubobject() && |
| 21893 | "Unchecked case for valid subobject" ); |
| 21894 | // All other ill-formed values should have failed EvaluatePointer, so the |
| 21895 | // object should be a pointer to an object that is usable in a constant |
| 21896 | // expression or whose complete lifetime began within the expression |
| 21897 | CompleteObject CO = |
| 21898 | findCompleteObject(Info, E, AK: AccessKinds::AK_IsWithinLifetime, LVal: Val, LValType: T); |
| 21899 | // The lifetime hasn't begun yet if we are still evaluating the |
| 21900 | // initializer ([basic.life]p(1.2)) |
| 21901 | if (Info.EvaluatingDeclValue && CO.Value == Info.EvaluatingDeclValue) |
| 21902 | return Error(2); |
| 21903 | |
| 21904 | if (!CO) |
| 21905 | return false; |
| 21906 | IsWithinLifetimeHandler handler{.Info: Info}; |
| 21907 | return findSubobject(Info, E, Obj: CO, Sub: Val.getLValueDesignator(), handler); |
| 21908 | } |
| 21909 | } // namespace |
| 21910 | |