1//===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8#include "../ExprConstShared.h"
9#include "Boolean.h"
10#include "Compiler.h"
11#include "EvalEmitter.h"
12#include "Interp.h"
13#include "InterpBuiltinBitCast.h"
14#include "PrimType.h"
15#include "clang/AST/OSLog.h"
16#include "clang/AST/RecordLayout.h"
17#include "clang/Basic/Builtins.h"
18#include "clang/Basic/TargetBuiltins.h"
19#include "clang/Basic/TargetInfo.h"
20#include "llvm/ADT/StringExtras.h"
21#include "llvm/Support/SipHash.h"
22
23namespace clang {
24namespace interp {
25
26LLVM_ATTRIBUTE_UNUSED static bool isNoopBuiltin(unsigned ID) {
27 switch (ID) {
28 case Builtin::BIas_const:
29 case Builtin::BIforward:
30 case Builtin::BIforward_like:
31 case Builtin::BImove:
32 case Builtin::BImove_if_noexcept:
33 case Builtin::BIaddressof:
34 case Builtin::BI__addressof:
35 case Builtin::BI__builtin_addressof:
36 case Builtin::BI__builtin_launder:
37 return true;
38 default:
39 return false;
40 }
41 return false;
42}
43
44static void discard(InterpStack &Stk, PrimType T) {
45 TYPE_SWITCH(T, { Stk.discard<T>(); });
46}
47
48static APSInt popToAPSInt(InterpStack &Stk, PrimType T) {
49 INT_TYPE_SWITCH(T, return Stk.pop<T>().toAPSInt());
50}
51
52/// Pushes \p Val on the stack as the type given by \p QT.
53static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) {
54 assert(QT->isSignedIntegerOrEnumerationType() ||
55 QT->isUnsignedIntegerOrEnumerationType());
56 std::optional<PrimType> T = S.getContext().classify(T: QT);
57 assert(T);
58
59 unsigned BitWidth = S.getASTContext().getTypeSize(T: QT);
60
61 if (T == PT_IntAPS) {
62 auto Result = S.allocAP<IntegralAP<true>>(BitWidth);
63 Result.copy(V: Val);
64 S.Stk.push<IntegralAP<true>>(Args&: Result);
65 return;
66 }
67
68 if (T == PT_IntAP) {
69 auto Result = S.allocAP<IntegralAP<false>>(BitWidth);
70 Result.copy(V: Val);
71 S.Stk.push<IntegralAP<false>>(Args&: Result);
72 return;
73 }
74
75 if (QT->isSignedIntegerOrEnumerationType()) {
76 int64_t V = Val.getSExtValue();
77 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
78 } else {
79 assert(QT->isUnsignedIntegerOrEnumerationType());
80 uint64_t V = Val.getZExtValue();
81 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
82 }
83}
84
85template <typename T>
86static void pushInteger(InterpState &S, T Val, QualType QT) {
87 if constexpr (std::is_same_v<T, APInt>)
88 pushInteger(S, Val: APSInt(Val, !std::is_signed_v<T>), QT);
89 else if constexpr (std::is_same_v<T, APSInt>)
90 pushInteger(S, Val, QT);
91 else
92 pushInteger(S,
93 Val: APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val),
94 std::is_signed_v<T>),
95 !std::is_signed_v<T>),
96 QT);
97}
98
99static void assignInteger(InterpState &S, const Pointer &Dest, PrimType ValueT,
100 const APSInt &Value) {
101
102 if (ValueT == PT_IntAPS) {
103 Dest.deref<IntegralAP<true>>() =
104 S.allocAP<IntegralAP<true>>(BitWidth: Value.getBitWidth());
105 Dest.deref<IntegralAP<true>>().copy(V: Value);
106 } else if (ValueT == PT_IntAP) {
107 Dest.deref<IntegralAP<false>>() =
108 S.allocAP<IntegralAP<false>>(BitWidth: Value.getBitWidth());
109 Dest.deref<IntegralAP<false>>().copy(V: Value);
110 } else {
111 INT_TYPE_SWITCH_NO_BOOL(
112 ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); });
113 }
114}
115
116static QualType getElemType(const Pointer &P) {
117 const Descriptor *Desc = P.getFieldDesc();
118 QualType T = Desc->getType();
119 if (Desc->isPrimitive())
120 return T;
121 if (T->isPointerType())
122 return T->getAs<PointerType>()->getPointeeType();
123 if (Desc->isArray())
124 return Desc->getElemQualType();
125 if (const auto *AT = T->getAsArrayTypeUnsafe())
126 return AT->getElementType();
127 return T;
128}
129
130static void diagnoseNonConstexprBuiltin(InterpState &S, CodePtr OpPC,
131 unsigned ID) {
132 if (!S.diagnosing())
133 return;
134
135 auto Loc = S.Current->getSource(PC: OpPC);
136 if (S.getLangOpts().CPlusPlus11)
137 S.CCEDiag(SI: Loc, DiagId: diag::note_constexpr_invalid_function)
138 << /*isConstexpr=*/0 << /*isConstructor=*/0
139 << S.getASTContext().BuiltinInfo.getQuotedName(ID);
140 else
141 S.CCEDiag(SI: Loc, DiagId: diag::note_invalid_subexpr_in_const_expr);
142}
143
144static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC,
145 const InterpFrame *Frame,
146 const CallExpr *Call) {
147 unsigned Depth = S.Current->getDepth();
148 auto isStdCall = [](const FunctionDecl *F) -> bool {
149 return F && F->isInStdNamespace() && F->getIdentifier() &&
150 F->getIdentifier()->isStr(Str: "is_constant_evaluated");
151 };
152 const InterpFrame *Caller = Frame->Caller;
153 // The current frame is the one for __builtin_is_constant_evaluated.
154 // The one above that, potentially the one for std::is_constant_evaluated().
155 if (S.inConstantContext() && !S.checkingPotentialConstantExpression() &&
156 S.getEvalStatus().Diag &&
157 (Depth == 0 || (Depth == 1 && isStdCall(Frame->getCallee())))) {
158 if (Caller && isStdCall(Frame->getCallee())) {
159 const Expr *E = Caller->getExpr(PC: Caller->getRetPC());
160 S.report(Loc: E->getExprLoc(),
161 DiagId: diag::warn_is_constant_evaluated_always_true_constexpr)
162 << "std::is_constant_evaluated" << E->getSourceRange();
163 } else {
164 S.report(Loc: Call->getExprLoc(),
165 DiagId: diag::warn_is_constant_evaluated_always_true_constexpr)
166 << "__builtin_is_constant_evaluated" << Call->getSourceRange();
167 }
168 }
169
170 S.Stk.push<Boolean>(Args: Boolean::from(Value: S.inConstantContext()));
171 return true;
172}
173
174// __builtin_assume(int)
175static bool interp__builtin_assume(InterpState &S, CodePtr OpPC,
176 const InterpFrame *Frame,
177 const CallExpr *Call) {
178 assert(Call->getNumArgs() == 1);
179 discard(Stk&: S.Stk, T: *S.getContext().classify(E: Call->getArg(Arg: 0)));
180 return true;
181}
182
183static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC,
184 const InterpFrame *Frame,
185 const CallExpr *Call, unsigned ID) {
186 uint64_t Limit = ~static_cast<uint64_t>(0);
187 if (ID == Builtin::BIstrncmp || ID == Builtin::BI__builtin_strncmp ||
188 ID == Builtin::BIwcsncmp || ID == Builtin::BI__builtin_wcsncmp)
189 Limit = popToAPSInt(Stk&: S.Stk, T: *S.getContext().classify(E: Call->getArg(Arg: 2)))
190 .getZExtValue();
191
192 const Pointer &B = S.Stk.pop<Pointer>();
193 const Pointer &A = S.Stk.pop<Pointer>();
194 if (ID == Builtin::BIstrcmp || ID == Builtin::BIstrncmp ||
195 ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp)
196 diagnoseNonConstexprBuiltin(S, OpPC, ID);
197
198 if (Limit == 0) {
199 pushInteger(S, Val: 0, QT: Call->getType());
200 return true;
201 }
202
203 if (!CheckLive(S, OpPC, Ptr: A, AK: AK_Read) || !CheckLive(S, OpPC, Ptr: B, AK: AK_Read))
204 return false;
205
206 if (A.isDummy() || B.isDummy())
207 return false;
208
209 bool IsWide = ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp ||
210 ID == Builtin::BI__builtin_wcscmp ||
211 ID == Builtin::BI__builtin_wcsncmp;
212 assert(A.getFieldDesc()->isPrimitiveArray());
213 assert(B.getFieldDesc()->isPrimitiveArray());
214
215 assert(getElemType(A).getTypePtr() == getElemType(B).getTypePtr());
216 PrimType ElemT = *S.getContext().classify(T: getElemType(P: A));
217
218 auto returnResult = [&](int V) -> bool {
219 pushInteger(S, Val: V, QT: Call->getType());
220 return true;
221 };
222
223 unsigned IndexA = A.getIndex();
224 unsigned IndexB = B.getIndex();
225 uint64_t Steps = 0;
226 for (;; ++IndexA, ++IndexB, ++Steps) {
227
228 if (Steps >= Limit)
229 break;
230 const Pointer &PA = A.atIndex(Idx: IndexA);
231 const Pointer &PB = B.atIndex(Idx: IndexB);
232 if (!CheckRange(S, OpPC, Ptr: PA, AK: AK_Read) ||
233 !CheckRange(S, OpPC, Ptr: PB, AK: AK_Read)) {
234 return false;
235 }
236
237 if (IsWide) {
238 INT_TYPE_SWITCH(ElemT, {
239 T CA = PA.deref<T>();
240 T CB = PB.deref<T>();
241 if (CA > CB)
242 return returnResult(1);
243 else if (CA < CB)
244 return returnResult(-1);
245 else if (CA.isZero() || CB.isZero())
246 return returnResult(0);
247 });
248 continue;
249 }
250
251 uint8_t CA = PA.deref<uint8_t>();
252 uint8_t CB = PB.deref<uint8_t>();
253
254 if (CA > CB)
255 return returnResult(1);
256 else if (CA < CB)
257 return returnResult(-1);
258 if (CA == 0 || CB == 0)
259 return returnResult(0);
260 }
261
262 return returnResult(0);
263}
264
265static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC,
266 const InterpFrame *Frame,
267 const CallExpr *Call, unsigned ID) {
268 const Pointer &StrPtr = S.Stk.pop<Pointer>();
269
270 if (ID == Builtin::BIstrlen || ID == Builtin::BIwcslen)
271 diagnoseNonConstexprBuiltin(S, OpPC, ID);
272
273 if (!CheckArray(S, OpPC, Ptr: StrPtr))
274 return false;
275
276 if (!CheckLive(S, OpPC, Ptr: StrPtr, AK: AK_Read))
277 return false;
278
279 if (!CheckDummy(S, OpPC, Ptr: StrPtr, AK: AK_Read))
280 return false;
281
282 assert(StrPtr.getFieldDesc()->isPrimitiveArray());
283 unsigned ElemSize = StrPtr.getFieldDesc()->getElemSize();
284
285 if (ID == Builtin::BI__builtin_wcslen || ID == Builtin::BIwcslen) {
286 [[maybe_unused]] const ASTContext &AC = S.getASTContext();
287 assert(ElemSize == AC.getTypeSizeInChars(AC.getWCharType()).getQuantity());
288 }
289
290 size_t Len = 0;
291 for (size_t I = StrPtr.getIndex();; ++I, ++Len) {
292 const Pointer &ElemPtr = StrPtr.atIndex(Idx: I);
293
294 if (!CheckRange(S, OpPC, Ptr: ElemPtr, AK: AK_Read))
295 return false;
296
297 uint32_t Val;
298 switch (ElemSize) {
299 case 1:
300 Val = ElemPtr.deref<uint8_t>();
301 break;
302 case 2:
303 Val = ElemPtr.deref<uint16_t>();
304 break;
305 case 4:
306 Val = ElemPtr.deref<uint32_t>();
307 break;
308 default:
309 llvm_unreachable("Unsupported char size");
310 }
311 if (Val == 0)
312 break;
313 }
314
315 pushInteger(S, Val: Len, QT: Call->getType());
316
317 return true;
318}
319
320static bool interp__builtin_nan(InterpState &S, CodePtr OpPC,
321 const InterpFrame *Frame, const CallExpr *Call,
322 bool Signaling) {
323 const Pointer &Arg = S.Stk.pop<Pointer>();
324
325 if (!CheckLoad(S, OpPC, Ptr: Arg))
326 return false;
327
328 assert(Arg.getFieldDesc()->isPrimitiveArray());
329
330 // Convert the given string to an integer using StringRef's API.
331 llvm::APInt Fill;
332 std::string Str;
333 assert(Arg.getNumElems() >= 1);
334 for (unsigned I = 0;; ++I) {
335 const Pointer &Elem = Arg.atIndex(Idx: I);
336
337 if (!CheckLoad(S, OpPC, Ptr: Elem))
338 return false;
339
340 if (Elem.deref<int8_t>() == 0)
341 break;
342
343 Str += Elem.deref<char>();
344 }
345
346 // Treat empty strings as if they were zero.
347 if (Str.empty())
348 Fill = llvm::APInt(32, 0);
349 else if (StringRef(Str).getAsInteger(Radix: 0, Result&: Fill))
350 return false;
351
352 const llvm::fltSemantics &TargetSemantics =
353 S.getASTContext().getFloatTypeSemantics(
354 T: Call->getDirectCallee()->getReturnType());
355
356 Floating Result = S.allocFloat(Sem: TargetSemantics);
357 if (S.getASTContext().getTargetInfo().isNan2008()) {
358 if (Signaling)
359 Result.copy(
360 F: llvm::APFloat::getSNaN(Sem: TargetSemantics, /*Negative=*/false, payload: &Fill));
361 else
362 Result.copy(
363 F: llvm::APFloat::getQNaN(Sem: TargetSemantics, /*Negative=*/false, payload: &Fill));
364 } else {
365 // Prior to IEEE 754-2008, architectures were allowed to choose whether
366 // the first bit of their significand was set for qNaN or sNaN. MIPS chose
367 // a different encoding to what became a standard in 2008, and for pre-
368 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
369 // sNaN. This is now known as "legacy NaN" encoding.
370 if (Signaling)
371 Result.copy(
372 F: llvm::APFloat::getQNaN(Sem: TargetSemantics, /*Negative=*/false, payload: &Fill));
373 else
374 Result.copy(
375 F: llvm::APFloat::getSNaN(Sem: TargetSemantics, /*Negative=*/false, payload: &Fill));
376 }
377
378 S.Stk.push<Floating>(Args&: Result);
379 return true;
380}
381
382static bool interp__builtin_inf(InterpState &S, CodePtr OpPC,
383 const InterpFrame *Frame,
384 const CallExpr *Call) {
385 const llvm::fltSemantics &TargetSemantics =
386 S.getASTContext().getFloatTypeSemantics(
387 T: Call->getDirectCallee()->getReturnType());
388
389 Floating Result = S.allocFloat(Sem: TargetSemantics);
390 Result.copy(F: APFloat::getInf(Sem: TargetSemantics));
391 S.Stk.push<Floating>(Args&: Result);
392 return true;
393}
394
395static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC,
396 const InterpFrame *Frame) {
397 const Floating &Arg2 = S.Stk.pop<Floating>();
398 const Floating &Arg1 = S.Stk.pop<Floating>();
399 Floating Result = S.allocFloat(Sem: Arg1.getSemantics());
400
401 APFloat Copy = Arg1.getAPFloat();
402 Copy.copySign(RHS: Arg2.getAPFloat());
403 Result.copy(F: Copy);
404 S.Stk.push<Floating>(Args&: Result);
405
406 return true;
407}
408
409static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC,
410 const InterpFrame *Frame, bool IsNumBuiltin) {
411 const Floating &RHS = S.Stk.pop<Floating>();
412 const Floating &LHS = S.Stk.pop<Floating>();
413 Floating Result = S.allocFloat(Sem: LHS.getSemantics());
414
415 if (IsNumBuiltin)
416 Result.copy(F: llvm::minimumnum(A: LHS.getAPFloat(), B: RHS.getAPFloat()));
417 else
418 Result.copy(F: minnum(A: LHS.getAPFloat(), B: RHS.getAPFloat()));
419 S.Stk.push<Floating>(Args&: Result);
420 return true;
421}
422
423static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC,
424 const InterpFrame *Frame, bool IsNumBuiltin) {
425 const Floating &RHS = S.Stk.pop<Floating>();
426 const Floating &LHS = S.Stk.pop<Floating>();
427 Floating Result = S.allocFloat(Sem: LHS.getSemantics());
428
429 if (IsNumBuiltin)
430 Result.copy(F: llvm::maximumnum(A: LHS.getAPFloat(), B: RHS.getAPFloat()));
431 else
432 Result.copy(F: maxnum(A: LHS.getAPFloat(), B: RHS.getAPFloat()));
433 S.Stk.push<Floating>(Args&: Result);
434 return true;
435}
436
437/// Defined as __builtin_isnan(...), to accommodate the fact that it can
438/// take a float, double, long double, etc.
439/// But for us, that's all a Floating anyway.
440static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC,
441 const InterpFrame *Frame,
442 const CallExpr *Call) {
443 const Floating &Arg = S.Stk.pop<Floating>();
444
445 pushInteger(S, Val: Arg.isNan(), QT: Call->getType());
446 return true;
447}
448
449static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC,
450 const InterpFrame *Frame,
451 const CallExpr *Call) {
452 const Floating &Arg = S.Stk.pop<Floating>();
453
454 pushInteger(S, Val: Arg.isSignaling(), QT: Call->getType());
455 return true;
456}
457
458static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC,
459 const InterpFrame *Frame, bool CheckSign,
460 const CallExpr *Call) {
461 const Floating &Arg = S.Stk.pop<Floating>();
462 bool IsInf = Arg.isInf();
463
464 if (CheckSign)
465 pushInteger(S, Val: IsInf ? (Arg.isNegative() ? -1 : 1) : 0, QT: Call->getType());
466 else
467 pushInteger(S, Val: Arg.isInf(), QT: Call->getType());
468 return true;
469}
470
471static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC,
472 const InterpFrame *Frame,
473 const CallExpr *Call) {
474 const Floating &Arg = S.Stk.pop<Floating>();
475
476 pushInteger(S, Val: Arg.isFinite(), QT: Call->getType());
477 return true;
478}
479
480static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC,
481 const InterpFrame *Frame,
482 const CallExpr *Call) {
483 const Floating &Arg = S.Stk.pop<Floating>();
484
485 pushInteger(S, Val: Arg.isNormal(), QT: Call->getType());
486 return true;
487}
488
489static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC,
490 const InterpFrame *Frame,
491 const CallExpr *Call) {
492 const Floating &Arg = S.Stk.pop<Floating>();
493
494 pushInteger(S, Val: Arg.isDenormal(), QT: Call->getType());
495 return true;
496}
497
498static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC,
499 const InterpFrame *Frame,
500 const CallExpr *Call) {
501 const Floating &Arg = S.Stk.pop<Floating>();
502
503 pushInteger(S, Val: Arg.isZero(), QT: Call->getType());
504 return true;
505}
506
507static bool interp__builtin_signbit(InterpState &S, CodePtr OpPC,
508 const InterpFrame *Frame,
509 const CallExpr *Call) {
510 const Floating &Arg = S.Stk.pop<Floating>();
511
512 pushInteger(S, Val: Arg.isNegative(), QT: Call->getType());
513 return true;
514}
515
516static bool interp_floating_comparison(InterpState &S, CodePtr OpPC,
517 const CallExpr *Call, unsigned ID) {
518 const Floating &RHS = S.Stk.pop<Floating>();
519 const Floating &LHS = S.Stk.pop<Floating>();
520
521 pushInteger(
522 S,
523 Val: [&] {
524 switch (ID) {
525 case Builtin::BI__builtin_isgreater:
526 return LHS > RHS;
527 case Builtin::BI__builtin_isgreaterequal:
528 return LHS >= RHS;
529 case Builtin::BI__builtin_isless:
530 return LHS < RHS;
531 case Builtin::BI__builtin_islessequal:
532 return LHS <= RHS;
533 case Builtin::BI__builtin_islessgreater: {
534 ComparisonCategoryResult cmp = LHS.compare(RHS);
535 return cmp == ComparisonCategoryResult::Less ||
536 cmp == ComparisonCategoryResult::Greater;
537 }
538 case Builtin::BI__builtin_isunordered:
539 return LHS.compare(RHS) == ComparisonCategoryResult::Unordered;
540 default:
541 llvm_unreachable("Unexpected builtin ID: Should be a floating point "
542 "comparison function");
543 }
544 }(),
545 QT: Call->getType());
546 return true;
547}
548
549/// First parameter to __builtin_isfpclass is the floating value, the
550/// second one is an integral value.
551static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC,
552 const InterpFrame *Frame,
553 const CallExpr *Call) {
554 PrimType FPClassArgT = *S.getContext().classify(T: Call->getArg(Arg: 1)->getType());
555 APSInt FPClassArg = popToAPSInt(Stk&: S.Stk, T: FPClassArgT);
556 const Floating &F = S.Stk.pop<Floating>();
557
558 int32_t Result = static_cast<int32_t>(
559 (F.classify() & std::move(FPClassArg)).getZExtValue());
560 pushInteger(S, Val: Result, QT: Call->getType());
561
562 return true;
563}
564
565/// Five int values followed by one floating value.
566/// __builtin_fpclassify(int, int, int, int, int, float)
567static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC,
568 const InterpFrame *Frame,
569 const CallExpr *Call) {
570 const Floating &Val = S.Stk.pop<Floating>();
571
572 PrimType IntT = *S.getContext().classify(E: Call->getArg(Arg: 0));
573 APSInt Values[5];
574 for (unsigned I = 0; I != 5; ++I)
575 Values[4 - I] = popToAPSInt(Stk&: S.Stk, T: IntT);
576
577 unsigned Index;
578 switch (Val.getCategory()) {
579 case APFloat::fcNaN:
580 Index = 0;
581 break;
582 case APFloat::fcInfinity:
583 Index = 1;
584 break;
585 case APFloat::fcNormal:
586 Index = Val.isDenormal() ? 3 : 2;
587 break;
588 case APFloat::fcZero:
589 Index = 4;
590 break;
591 }
592
593 // The last argument is first on the stack.
594 assert(Index <= 4);
595
596 pushInteger(S, Val: Values[Index], QT: Call->getType());
597 return true;
598}
599
600// The C standard says "fabs raises no floating-point exceptions,
601// even if x is a signaling NaN. The returned value is independent of
602// the current rounding direction mode." Therefore constant folding can
603// proceed without regard to the floating point settings.
604// Reference, WG14 N2478 F.10.4.3
605static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC,
606 const InterpFrame *Frame) {
607 const Floating &Val = S.Stk.pop<Floating>();
608 APFloat F = Val.getAPFloat();
609 if (!F.isNegative()) {
610 S.Stk.push<Floating>(Args: Val);
611 return true;
612 }
613
614 Floating Result = S.allocFloat(Sem: Val.getSemantics());
615 F.changeSign();
616 Result.copy(F);
617 S.Stk.push<Floating>(Args&: Result);
618 return true;
619}
620
621static bool interp__builtin_abs(InterpState &S, CodePtr OpPC,
622 const InterpFrame *Frame,
623 const CallExpr *Call) {
624 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
625 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
626 if (Val ==
627 APSInt(APInt::getSignedMinValue(numBits: Val.getBitWidth()), /*IsUnsigned=*/false))
628 return false;
629 if (Val.isNegative())
630 Val.negate();
631 pushInteger(S, Val, QT: Call->getType());
632 return true;
633}
634
635static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC,
636 const InterpFrame *Frame,
637 const CallExpr *Call) {
638 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
639 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
640 pushInteger(S, Val: Val.popcount(), QT: Call->getType());
641 return true;
642}
643
644static bool interp__builtin_parity(InterpState &S, CodePtr OpPC,
645 const InterpFrame *Frame,
646 const CallExpr *Call) {
647 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
648 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
649 pushInteger(S, Val: Val.popcount() % 2, QT: Call->getType());
650 return true;
651}
652
653static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC,
654 const InterpFrame *Frame,
655 const CallExpr *Call) {
656 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
657 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
658 pushInteger(S, Val: Val.getBitWidth() - Val.getSignificantBits(), QT: Call->getType());
659 return true;
660}
661
662static bool interp__builtin_bitreverse(InterpState &S, CodePtr OpPC,
663 const InterpFrame *Frame,
664 const CallExpr *Call) {
665 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
666 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
667 pushInteger(S, Val: Val.reverseBits(), QT: Call->getType());
668 return true;
669}
670
671static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC,
672 const InterpFrame *Frame,
673 const CallExpr *Call) {
674 // This is an unevaluated call, so there are no arguments on the stack.
675 assert(Call->getNumArgs() == 1);
676 const Expr *Arg = Call->getArg(Arg: 0);
677
678 GCCTypeClass ResultClass =
679 EvaluateBuiltinClassifyType(T: Arg->getType(), LangOpts: S.getLangOpts());
680 int32_t ReturnVal = static_cast<int32_t>(ResultClass);
681 pushInteger(S, Val: ReturnVal, QT: Call->getType());
682 return true;
683}
684
685// __builtin_expect(long, long)
686// __builtin_expect_with_probability(long, long, double)
687static bool interp__builtin_expect(InterpState &S, CodePtr OpPC,
688 const InterpFrame *Frame,
689 const CallExpr *Call) {
690 // The return value is simply the value of the first parameter.
691 // We ignore the probability.
692 unsigned NumArgs = Call->getNumArgs();
693 assert(NumArgs == 2 || NumArgs == 3);
694
695 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
696 if (NumArgs == 3)
697 S.Stk.discard<Floating>();
698 discard(Stk&: S.Stk, T: ArgT);
699
700 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
701 pushInteger(S, Val, QT: Call->getType());
702 return true;
703}
704
705/// rotateleft(value, amount)
706static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC,
707 const InterpFrame *Frame,
708 const CallExpr *Call, bool Right) {
709 PrimType AmountT = *S.getContext().classify(T: Call->getArg(Arg: 1)->getType());
710 PrimType ValueT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
711
712 APSInt Amount = popToAPSInt(Stk&: S.Stk, T: AmountT);
713 APSInt Value = popToAPSInt(Stk&: S.Stk, T: ValueT);
714
715 APSInt Result;
716 if (Right)
717 Result = APSInt(Value.rotr(rotateAmt: Amount.urem(RHS: Value.getBitWidth())),
718 /*IsUnsigned=*/true);
719 else // Left.
720 Result = APSInt(Value.rotl(rotateAmt: Amount.urem(RHS: Value.getBitWidth())),
721 /*IsUnsigned=*/true);
722
723 pushInteger(S, Val: Result, QT: Call->getType());
724 return true;
725}
726
727static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC,
728 const InterpFrame *Frame,
729 const CallExpr *Call) {
730 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
731 APSInt Value = popToAPSInt(Stk&: S.Stk, T: ArgT);
732
733 uint64_t N = Value.countr_zero();
734 pushInteger(S, Val: N == Value.getBitWidth() ? 0 : N + 1, QT: Call->getType());
735 return true;
736}
737
738static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC,
739 const InterpFrame *Frame,
740 const CallExpr *Call) {
741#ifndef NDEBUG
742 assert(Call->getArg(0)->isLValue());
743 PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
744 assert(PtrT == PT_Ptr &&
745 "Unsupported pointer type passed to __builtin_addressof()");
746#endif
747 return true;
748}
749
750static bool interp__builtin_move(InterpState &S, CodePtr OpPC,
751 const InterpFrame *Frame,
752 const CallExpr *Call) {
753 return Call->getDirectCallee()->isConstexpr();
754}
755
756static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC,
757 const InterpFrame *Frame,
758 const CallExpr *Call) {
759 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
760 APSInt Arg = popToAPSInt(Stk&: S.Stk, T: ArgT);
761
762 int Result = S.getASTContext().getTargetInfo().getEHDataRegisterNumber(
763 RegNo: Arg.getZExtValue());
764 pushInteger(S, Val: Result, QT: Call->getType());
765 return true;
766}
767
768// Two integral values followed by a pointer (lhs, rhs, resultOut)
769static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC,
770 const CallExpr *Call,
771 unsigned BuiltinOp) {
772 const Pointer &ResultPtr = S.Stk.pop<Pointer>();
773 if (ResultPtr.isDummy())
774 return false;
775
776 PrimType RHST = *S.getContext().classify(T: Call->getArg(Arg: 1)->getType());
777 PrimType LHST = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
778 APSInt RHS = popToAPSInt(Stk&: S.Stk, T: RHST);
779 APSInt LHS = popToAPSInt(Stk&: S.Stk, T: LHST);
780 QualType ResultType = Call->getArg(Arg: 2)->getType()->getPointeeType();
781 PrimType ResultT = *S.getContext().classify(T: ResultType);
782 bool Overflow;
783
784 APSInt Result;
785 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
786 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
787 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
788 bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
789 ResultType->isSignedIntegerOrEnumerationType();
790 bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
791 ResultType->isSignedIntegerOrEnumerationType();
792 uint64_t LHSSize = LHS.getBitWidth();
793 uint64_t RHSSize = RHS.getBitWidth();
794 uint64_t ResultSize = S.getASTContext().getTypeSize(T: ResultType);
795 uint64_t MaxBits = std::max(a: std::max(a: LHSSize, b: RHSSize), b: ResultSize);
796
797 // Add an additional bit if the signedness isn't uniformly agreed to. We
798 // could do this ONLY if there is a signed and an unsigned that both have
799 // MaxBits, but the code to check that is pretty nasty. The issue will be
800 // caught in the shrink-to-result later anyway.
801 if (IsSigned && !AllSigned)
802 ++MaxBits;
803
804 LHS = APSInt(LHS.extOrTrunc(width: MaxBits), !IsSigned);
805 RHS = APSInt(RHS.extOrTrunc(width: MaxBits), !IsSigned);
806 Result = APSInt(MaxBits, !IsSigned);
807 }
808
809 // Find largest int.
810 switch (BuiltinOp) {
811 default:
812 llvm_unreachable("Invalid value for BuiltinOp");
813 case Builtin::BI__builtin_add_overflow:
814 case Builtin::BI__builtin_sadd_overflow:
815 case Builtin::BI__builtin_saddl_overflow:
816 case Builtin::BI__builtin_saddll_overflow:
817 case Builtin::BI__builtin_uadd_overflow:
818 case Builtin::BI__builtin_uaddl_overflow:
819 case Builtin::BI__builtin_uaddll_overflow:
820 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
821 : LHS.uadd_ov(RHS, Overflow);
822 break;
823 case Builtin::BI__builtin_sub_overflow:
824 case Builtin::BI__builtin_ssub_overflow:
825 case Builtin::BI__builtin_ssubl_overflow:
826 case Builtin::BI__builtin_ssubll_overflow:
827 case Builtin::BI__builtin_usub_overflow:
828 case Builtin::BI__builtin_usubl_overflow:
829 case Builtin::BI__builtin_usubll_overflow:
830 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
831 : LHS.usub_ov(RHS, Overflow);
832 break;
833 case Builtin::BI__builtin_mul_overflow:
834 case Builtin::BI__builtin_smul_overflow:
835 case Builtin::BI__builtin_smull_overflow:
836 case Builtin::BI__builtin_smulll_overflow:
837 case Builtin::BI__builtin_umul_overflow:
838 case Builtin::BI__builtin_umull_overflow:
839 case Builtin::BI__builtin_umulll_overflow:
840 Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
841 : LHS.umul_ov(RHS, Overflow);
842 break;
843 }
844
845 // In the case where multiple sizes are allowed, truncate and see if
846 // the values are the same.
847 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
848 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
849 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
850 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
851 // since it will give us the behavior of a TruncOrSelf in the case where
852 // its parameter <= its size. We previously set Result to be at least the
853 // type-size of the result, so getTypeSize(ResultType) <= Resu
854 APSInt Temp = Result.extOrTrunc(width: S.getASTContext().getTypeSize(T: ResultType));
855 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
856
857 if (!APSInt::isSameValue(I1: Temp, I2: Result))
858 Overflow = true;
859 Result = std::move(Temp);
860 }
861
862 // Write Result to ResultPtr and put Overflow on the stack.
863 assignInteger(S, Dest: ResultPtr, ValueT: ResultT, Value: Result);
864 ResultPtr.initialize();
865 assert(Call->getDirectCallee()->getReturnType()->isBooleanType());
866 S.Stk.push<Boolean>(Args&: Overflow);
867 return true;
868}
869
870/// Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
871static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC,
872 const InterpFrame *Frame,
873 const CallExpr *Call, unsigned BuiltinOp) {
874 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
875 PrimType LHST = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
876 PrimType RHST = *S.getContext().classify(T: Call->getArg(Arg: 1)->getType());
877 APSInt CarryIn = popToAPSInt(Stk&: S.Stk, T: LHST);
878 APSInt RHS = popToAPSInt(Stk&: S.Stk, T: RHST);
879 APSInt LHS = popToAPSInt(Stk&: S.Stk, T: LHST);
880
881 APSInt CarryOut;
882
883 APSInt Result;
884 // Copy the number of bits and sign.
885 Result = LHS;
886 CarryOut = LHS;
887
888 bool FirstOverflowed = false;
889 bool SecondOverflowed = false;
890 switch (BuiltinOp) {
891 default:
892 llvm_unreachable("Invalid value for BuiltinOp");
893 case Builtin::BI__builtin_addcb:
894 case Builtin::BI__builtin_addcs:
895 case Builtin::BI__builtin_addc:
896 case Builtin::BI__builtin_addcl:
897 case Builtin::BI__builtin_addcll:
898 Result =
899 LHS.uadd_ov(RHS, Overflow&: FirstOverflowed).uadd_ov(RHS: CarryIn, Overflow&: SecondOverflowed);
900 break;
901 case Builtin::BI__builtin_subcb:
902 case Builtin::BI__builtin_subcs:
903 case Builtin::BI__builtin_subc:
904 case Builtin::BI__builtin_subcl:
905 case Builtin::BI__builtin_subcll:
906 Result =
907 LHS.usub_ov(RHS, Overflow&: FirstOverflowed).usub_ov(RHS: CarryIn, Overflow&: SecondOverflowed);
908 break;
909 }
910 // It is possible for both overflows to happen but CGBuiltin uses an OR so
911 // this is consistent.
912 CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
913
914 QualType CarryOutType = Call->getArg(Arg: 3)->getType()->getPointeeType();
915 PrimType CarryOutT = *S.getContext().classify(T: CarryOutType);
916 assignInteger(S, Dest: CarryOutPtr, ValueT: CarryOutT, Value: CarryOut);
917 CarryOutPtr.initialize();
918
919 assert(Call->getType() == Call->getArg(0)->getType());
920 pushInteger(S, Val: Result, QT: Call->getType());
921 return true;
922}
923
924static bool interp__builtin_clz(InterpState &S, CodePtr OpPC,
925 const InterpFrame *Frame, const CallExpr *Call,
926 unsigned BuiltinOp) {
927
928 std::optional<APSInt> Fallback;
929 if (BuiltinOp == Builtin::BI__builtin_clzg && Call->getNumArgs() == 2) {
930 PrimType FallbackT = *S.getContext().classify(E: Call->getArg(Arg: 1));
931 Fallback = popToAPSInt(Stk&: S.Stk, T: FallbackT);
932 }
933 PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0));
934 const APSInt &Val = popToAPSInt(Stk&: S.Stk, T: ValT);
935
936 // When the argument is 0, the result of GCC builtins is undefined, whereas
937 // for Microsoft intrinsics, the result is the bit-width of the argument.
938 bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
939 BuiltinOp != Builtin::BI__lzcnt &&
940 BuiltinOp != Builtin::BI__lzcnt64;
941
942 if (Val == 0) {
943 if (Fallback) {
944 pushInteger(S, Val: *Fallback, QT: Call->getType());
945 return true;
946 }
947
948 if (ZeroIsUndefined)
949 return false;
950 }
951
952 pushInteger(S, Val: Val.countl_zero(), QT: Call->getType());
953 return true;
954}
955
956static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC,
957 const InterpFrame *Frame, const CallExpr *Call,
958 unsigned BuiltinID) {
959 std::optional<APSInt> Fallback;
960 if (BuiltinID == Builtin::BI__builtin_ctzg && Call->getNumArgs() == 2) {
961 PrimType FallbackT = *S.getContext().classify(E: Call->getArg(Arg: 1));
962 Fallback = popToAPSInt(Stk&: S.Stk, T: FallbackT);
963 }
964 PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0));
965 const APSInt &Val = popToAPSInt(Stk&: S.Stk, T: ValT);
966
967 if (Val == 0) {
968 if (Fallback) {
969 pushInteger(S, Val: *Fallback, QT: Call->getType());
970 return true;
971 }
972 return false;
973 }
974
975 pushInteger(S, Val: Val.countr_zero(), QT: Call->getType());
976 return true;
977}
978
979static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC,
980 const InterpFrame *Frame,
981 const CallExpr *Call) {
982 PrimType ReturnT = *S.getContext().classify(T: Call->getType());
983 PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0));
984 const APSInt &Val = popToAPSInt(Stk&: S.Stk, T: ValT);
985 assert(Val.getActiveBits() <= 64);
986
987 INT_TYPE_SWITCH(ReturnT,
988 { S.Stk.push<T>(T::from(Val.byteSwap().getZExtValue())); });
989 return true;
990}
991
992/// bool __atomic_always_lock_free(size_t, void const volatile*)
993/// bool __atomic_is_lock_free(size_t, void const volatile*)
994static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC,
995 const InterpFrame *Frame,
996 const CallExpr *Call,
997 unsigned BuiltinOp) {
998 auto returnBool = [&S](bool Value) -> bool {
999 S.Stk.push<Boolean>(Args&: Value);
1000 return true;
1001 };
1002
1003 PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0));
1004 const Pointer &Ptr = S.Stk.pop<Pointer>();
1005 const APSInt &SizeVal = popToAPSInt(Stk&: S.Stk, T: ValT);
1006
1007 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
1008 // of two less than or equal to the maximum inline atomic width, we know it
1009 // is lock-free. If the size isn't a power of two, or greater than the
1010 // maximum alignment where we promote atomics, we know it is not lock-free
1011 // (at least not in the sense of atomic_is_lock_free). Otherwise,
1012 // the answer can only be determined at runtime; for example, 16-byte
1013 // atomics have lock-free implementations on some, but not all,
1014 // x86-64 processors.
1015
1016 // Check power-of-two.
1017 CharUnits Size = CharUnits::fromQuantity(Quantity: SizeVal.getZExtValue());
1018 if (Size.isPowerOfTwo()) {
1019 // Check against inlining width.
1020 unsigned InlineWidthBits =
1021 S.getASTContext().getTargetInfo().getMaxAtomicInlineWidth();
1022 if (Size <= S.getASTContext().toCharUnitsFromBits(BitSize: InlineWidthBits)) {
1023
1024 // OK, we will inline appropriately-aligned operations of this size,
1025 // and _Atomic(T) is appropriately-aligned.
1026 if (Size == CharUnits::One())
1027 return returnBool(true);
1028
1029 // Same for null pointers.
1030 assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
1031 if (Ptr.isZero())
1032 return returnBool(true);
1033
1034 if (Ptr.isIntegralPointer()) {
1035 uint64_t IntVal = Ptr.getIntegerRepresentation();
1036 if (APSInt(APInt(64, IntVal, false), true).isAligned(A: Size.getAsAlign()))
1037 return returnBool(true);
1038 }
1039
1040 const Expr *PtrArg = Call->getArg(Arg: 1);
1041 // Otherwise, check if the type's alignment against Size.
1042 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(Val: PtrArg)) {
1043 // Drop the potential implicit-cast to 'const volatile void*', getting
1044 // the underlying type.
1045 if (ICE->getCastKind() == CK_BitCast)
1046 PtrArg = ICE->getSubExpr();
1047 }
1048
1049 if (auto PtrTy = PtrArg->getType()->getAs<PointerType>()) {
1050 QualType PointeeType = PtrTy->getPointeeType();
1051 if (!PointeeType->isIncompleteType() &&
1052 S.getASTContext().getTypeAlignInChars(T: PointeeType) >= Size) {
1053 // OK, we will inline operations on this object.
1054 return returnBool(true);
1055 }
1056 }
1057 }
1058 }
1059
1060 if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
1061 return returnBool(false);
1062
1063 return false;
1064}
1065
1066/// bool __c11_atomic_is_lock_free(size_t)
1067static bool interp__builtin_c11_atomic_is_lock_free(InterpState &S,
1068 CodePtr OpPC,
1069 const InterpFrame *Frame,
1070 const CallExpr *Call) {
1071 PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0));
1072 const APSInt &SizeVal = popToAPSInt(Stk&: S.Stk, T: ValT);
1073
1074 auto returnBool = [&S](bool Value) -> bool {
1075 S.Stk.push<Boolean>(Args&: Value);
1076 return true;
1077 };
1078
1079 CharUnits Size = CharUnits::fromQuantity(Quantity: SizeVal.getZExtValue());
1080 if (Size.isPowerOfTwo()) {
1081 // Check against inlining width.
1082 unsigned InlineWidthBits =
1083 S.getASTContext().getTargetInfo().getMaxAtomicInlineWidth();
1084 if (Size <= S.getASTContext().toCharUnitsFromBits(BitSize: InlineWidthBits))
1085 return returnBool(true);
1086 }
1087
1088 return false; // returnBool(false);
1089}
1090
1091/// __builtin_complex(Float A, float B);
1092static bool interp__builtin_complex(InterpState &S, CodePtr OpPC,
1093 const InterpFrame *Frame,
1094 const CallExpr *Call) {
1095 const Floating &Arg2 = S.Stk.pop<Floating>();
1096 const Floating &Arg1 = S.Stk.pop<Floating>();
1097 Pointer &Result = S.Stk.peek<Pointer>();
1098
1099 Result.atIndex(Idx: 0).deref<Floating>() = Arg1;
1100 Result.atIndex(Idx: 0).initialize();
1101 Result.atIndex(Idx: 1).deref<Floating>() = Arg2;
1102 Result.atIndex(Idx: 1).initialize();
1103 Result.initialize();
1104
1105 return true;
1106}
1107
1108/// __builtin_is_aligned()
1109/// __builtin_align_up()
1110/// __builtin_align_down()
1111/// The first parameter is either an integer or a pointer.
1112/// The second parameter is the requested alignment as an integer.
1113static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC,
1114 const InterpFrame *Frame,
1115 const CallExpr *Call,
1116 unsigned BuiltinOp) {
1117 PrimType AlignmentT = *S.Ctx.classify(E: Call->getArg(Arg: 1));
1118 const APSInt &Alignment = popToAPSInt(Stk&: S.Stk, T: AlignmentT);
1119
1120 if (Alignment < 0 || !Alignment.isPowerOf2()) {
1121 S.FFDiag(E: Call, DiagId: diag::note_constexpr_invalid_alignment) << Alignment;
1122 return false;
1123 }
1124 unsigned SrcWidth = S.getASTContext().getIntWidth(T: Call->getArg(Arg: 0)->getType());
1125 APSInt MaxValue(APInt::getOneBitSet(numBits: SrcWidth, BitNo: SrcWidth - 1));
1126 if (APSInt::compareValues(I1: Alignment, I2: MaxValue) > 0) {
1127 S.FFDiag(E: Call, DiagId: diag::note_constexpr_alignment_too_big)
1128 << MaxValue << Call->getArg(Arg: 0)->getType() << Alignment;
1129 return false;
1130 }
1131
1132 // The first parameter is either an integer or a pointer (but not a function
1133 // pointer).
1134 PrimType FirstArgT = *S.Ctx.classify(E: Call->getArg(Arg: 0));
1135
1136 if (isIntegralType(T: FirstArgT)) {
1137 const APSInt &Src = popToAPSInt(Stk&: S.Stk, T: FirstArgT);
1138 APInt AlignMinusOne = Alignment.extOrTrunc(width: Src.getBitWidth()) - 1;
1139 if (BuiltinOp == Builtin::BI__builtin_align_up) {
1140 APSInt AlignedVal =
1141 APSInt((Src + AlignMinusOne) & ~AlignMinusOne, Src.isUnsigned());
1142 pushInteger(S, Val: AlignedVal, QT: Call->getType());
1143 } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
1144 APSInt AlignedVal = APSInt(Src & ~AlignMinusOne, Src.isUnsigned());
1145 pushInteger(S, Val: AlignedVal, QT: Call->getType());
1146 } else {
1147 assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
1148 S.Stk.push<Boolean>(Args: (Src & AlignMinusOne) == 0);
1149 }
1150 return true;
1151 }
1152
1153 assert(FirstArgT == PT_Ptr);
1154 const Pointer &Ptr = S.Stk.pop<Pointer>();
1155
1156 unsigned PtrOffset = Ptr.getByteOffset();
1157 PtrOffset = Ptr.getIndex();
1158 CharUnits BaseAlignment =
1159 S.getASTContext().getDeclAlign(D: Ptr.getDeclDesc()->asValueDecl());
1160 CharUnits PtrAlign =
1161 BaseAlignment.alignmentAtOffset(offset: CharUnits::fromQuantity(Quantity: PtrOffset));
1162
1163 if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
1164 if (PtrAlign.getQuantity() >= Alignment) {
1165 S.Stk.push<Boolean>(Args: true);
1166 return true;
1167 }
1168 // If the alignment is not known to be sufficient, some cases could still
1169 // be aligned at run time. However, if the requested alignment is less or
1170 // equal to the base alignment and the offset is not aligned, we know that
1171 // the run-time value can never be aligned.
1172 if (BaseAlignment.getQuantity() >= Alignment &&
1173 PtrAlign.getQuantity() < Alignment) {
1174 S.Stk.push<Boolean>(Args: false);
1175 return true;
1176 }
1177
1178 S.FFDiag(E: Call->getArg(Arg: 0), DiagId: diag::note_constexpr_alignment_compute)
1179 << Alignment;
1180 return false;
1181 }
1182
1183 assert(BuiltinOp == Builtin::BI__builtin_align_down ||
1184 BuiltinOp == Builtin::BI__builtin_align_up);
1185
1186 // For align_up/align_down, we can return the same value if the alignment
1187 // is known to be greater or equal to the requested value.
1188 if (PtrAlign.getQuantity() >= Alignment) {
1189 S.Stk.push<Pointer>(Args: Ptr);
1190 return true;
1191 }
1192
1193 // The alignment could be greater than the minimum at run-time, so we cannot
1194 // infer much about the resulting pointer value. One case is possible:
1195 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
1196 // can infer the correct index if the requested alignment is smaller than
1197 // the base alignment so we can perform the computation on the offset.
1198 if (BaseAlignment.getQuantity() >= Alignment) {
1199 assert(Alignment.getBitWidth() <= 64 &&
1200 "Cannot handle > 64-bit address-space");
1201 uint64_t Alignment64 = Alignment.getZExtValue();
1202 CharUnits NewOffset =
1203 CharUnits::fromQuantity(Quantity: BuiltinOp == Builtin::BI__builtin_align_down
1204 ? llvm::alignDown(Value: PtrOffset, Align: Alignment64)
1205 : llvm::alignTo(Value: PtrOffset, Align: Alignment64));
1206
1207 S.Stk.push<Pointer>(Args: Ptr.atIndex(Idx: NewOffset.getQuantity()));
1208 return true;
1209 }
1210
1211 // Otherwise, we cannot constant-evaluate the result.
1212 S.FFDiag(E: Call->getArg(Arg: 0), DiagId: diag::note_constexpr_alignment_adjust) << Alignment;
1213 return false;
1214}
1215
1216/// __builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
1217static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC,
1218 const InterpFrame *Frame,
1219 const CallExpr *Call) {
1220 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
1221
1222 std::optional<APSInt> ExtraOffset;
1223 if (Call->getNumArgs() == 3)
1224 ExtraOffset = popToAPSInt(Stk&: S.Stk, T: *S.Ctx.classify(E: Call->getArg(Arg: 2)));
1225
1226 APSInt Alignment = popToAPSInt(Stk&: S.Stk, T: *S.Ctx.classify(E: Call->getArg(Arg: 1)));
1227 const Pointer &Ptr = S.Stk.pop<Pointer>();
1228
1229 CharUnits Align = CharUnits::fromQuantity(Quantity: Alignment.getZExtValue());
1230
1231 // If there is a base object, then it must have the correct alignment.
1232 if (Ptr.isBlockPointer()) {
1233 CharUnits BaseAlignment;
1234 if (const auto *VD = Ptr.getDeclDesc()->asValueDecl())
1235 BaseAlignment = S.getASTContext().getDeclAlign(D: VD);
1236 else if (const auto *E = Ptr.getDeclDesc()->asExpr())
1237 BaseAlignment = GetAlignOfExpr(Ctx: S.getASTContext(), E, ExprKind: UETT_AlignOf);
1238
1239 if (BaseAlignment < Align) {
1240 S.CCEDiag(E: Call->getArg(Arg: 0),
1241 DiagId: diag::note_constexpr_baa_insufficient_alignment)
1242 << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
1243 return false;
1244 }
1245 }
1246
1247 APValue AV = Ptr.toAPValue(ASTCtx: S.getASTContext());
1248 CharUnits AVOffset = AV.getLValueOffset();
1249 if (ExtraOffset)
1250 AVOffset -= CharUnits::fromQuantity(Quantity: ExtraOffset->getZExtValue());
1251 if (AVOffset.alignTo(Align) != AVOffset) {
1252 if (Ptr.isBlockPointer())
1253 S.CCEDiag(E: Call->getArg(Arg: 0),
1254 DiagId: diag::note_constexpr_baa_insufficient_alignment)
1255 << 1 << AVOffset.getQuantity() << Align.getQuantity();
1256 else
1257 S.CCEDiag(E: Call->getArg(Arg: 0),
1258 DiagId: diag::note_constexpr_baa_value_insufficient_alignment)
1259 << AVOffset.getQuantity() << Align.getQuantity();
1260 return false;
1261 }
1262
1263 S.Stk.push<Pointer>(Args: Ptr);
1264 return true;
1265}
1266
1267static bool interp__builtin_ia32_bextr(InterpState &S, CodePtr OpPC,
1268 const InterpFrame *Frame,
1269 const CallExpr *Call) {
1270 if (Call->getNumArgs() != 2 || !Call->getArg(Arg: 0)->getType()->isIntegerType() ||
1271 !Call->getArg(Arg: 1)->getType()->isIntegerType())
1272 return false;
1273
1274 PrimType ValT = *S.Ctx.classify(E: Call->getArg(Arg: 0));
1275 PrimType IndexT = *S.Ctx.classify(E: Call->getArg(Arg: 1));
1276 APSInt Index = popToAPSInt(Stk&: S.Stk, T: IndexT);
1277 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ValT);
1278
1279 unsigned BitWidth = Val.getBitWidth();
1280 uint64_t Shift = Index.extractBitsAsZExtValue(numBits: 8, bitPosition: 0);
1281 uint64_t Length = Index.extractBitsAsZExtValue(numBits: 8, bitPosition: 8);
1282 Length = Length > BitWidth ? BitWidth : Length;
1283
1284 // Handle out of bounds cases.
1285 if (Length == 0 || Shift >= BitWidth) {
1286 pushInteger(S, Val: 0, QT: Call->getType());
1287 return true;
1288 }
1289
1290 uint64_t Result = Val.getZExtValue() >> Shift;
1291 Result &= llvm::maskTrailingOnes<uint64_t>(N: Length);
1292 pushInteger(S, Val: Result, QT: Call->getType());
1293 return true;
1294}
1295
1296static bool interp__builtin_ia32_bzhi(InterpState &S, CodePtr OpPC,
1297 const InterpFrame *Frame,
1298 const CallExpr *Call) {
1299 QualType CallType = Call->getType();
1300 if (Call->getNumArgs() != 2 || !Call->getArg(Arg: 0)->getType()->isIntegerType() ||
1301 !Call->getArg(Arg: 1)->getType()->isIntegerType() ||
1302 !CallType->isIntegerType())
1303 return false;
1304
1305 PrimType ValT = *S.Ctx.classify(E: Call->getArg(Arg: 0));
1306 PrimType IndexT = *S.Ctx.classify(E: Call->getArg(Arg: 1));
1307
1308 APSInt Idx = popToAPSInt(Stk&: S.Stk, T: IndexT);
1309 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ValT);
1310
1311 unsigned BitWidth = Val.getBitWidth();
1312 uint64_t Index = Idx.extractBitsAsZExtValue(numBits: 8, bitPosition: 0);
1313
1314 if (Index < BitWidth)
1315 Val.clearHighBits(hiBits: BitWidth - Index);
1316
1317 pushInteger(S, Val, QT: CallType);
1318 return true;
1319}
1320
1321static bool interp__builtin_ia32_lzcnt(InterpState &S, CodePtr OpPC,
1322 const InterpFrame *Frame,
1323 const CallExpr *Call) {
1324 QualType CallType = Call->getType();
1325 if (!CallType->isIntegerType() ||
1326 !Call->getArg(Arg: 0)->getType()->isIntegerType())
1327 return false;
1328
1329 APSInt Val = popToAPSInt(Stk&: S.Stk, T: *S.Ctx.classify(E: Call->getArg(Arg: 0)));
1330 pushInteger(S, Val: Val.countLeadingZeros(), QT: CallType);
1331 return true;
1332}
1333
1334static bool interp__builtin_ia32_tzcnt(InterpState &S, CodePtr OpPC,
1335 const InterpFrame *Frame,
1336 const CallExpr *Call) {
1337 QualType CallType = Call->getType();
1338 if (!CallType->isIntegerType() ||
1339 !Call->getArg(Arg: 0)->getType()->isIntegerType())
1340 return false;
1341
1342 APSInt Val = popToAPSInt(Stk&: S.Stk, T: *S.Ctx.classify(E: Call->getArg(Arg: 0)));
1343 pushInteger(S, Val: Val.countTrailingZeros(), QT: CallType);
1344 return true;
1345}
1346
1347static bool interp__builtin_ia32_pdep(InterpState &S, CodePtr OpPC,
1348 const InterpFrame *Frame,
1349 const CallExpr *Call) {
1350 if (Call->getNumArgs() != 2 || !Call->getArg(Arg: 0)->getType()->isIntegerType() ||
1351 !Call->getArg(Arg: 1)->getType()->isIntegerType())
1352 return false;
1353
1354 PrimType ValT = *S.Ctx.classify(E: Call->getArg(Arg: 0));
1355 PrimType MaskT = *S.Ctx.classify(E: Call->getArg(Arg: 1));
1356
1357 APSInt Mask = popToAPSInt(Stk&: S.Stk, T: MaskT);
1358 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ValT);
1359
1360 unsigned BitWidth = Val.getBitWidth();
1361 APInt Result = APInt::getZero(numBits: BitWidth);
1362 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
1363 if (Mask[I])
1364 Result.setBitVal(BitPosition: I, BitValue: Val[P++]);
1365 }
1366 pushInteger(S, Val: std::move(Result), QT: Call->getType());
1367 return true;
1368}
1369
1370static bool interp__builtin_ia32_pext(InterpState &S, CodePtr OpPC,
1371 const InterpFrame *Frame,
1372 const CallExpr *Call) {
1373 if (Call->getNumArgs() != 2 || !Call->getArg(Arg: 0)->getType()->isIntegerType() ||
1374 !Call->getArg(Arg: 1)->getType()->isIntegerType())
1375 return false;
1376
1377 PrimType ValT = *S.Ctx.classify(E: Call->getArg(Arg: 0));
1378 PrimType MaskT = *S.Ctx.classify(E: Call->getArg(Arg: 1));
1379
1380 APSInt Mask = popToAPSInt(Stk&: S.Stk, T: MaskT);
1381 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ValT);
1382
1383 unsigned BitWidth = Val.getBitWidth();
1384 APInt Result = APInt::getZero(numBits: BitWidth);
1385 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
1386 if (Mask[I])
1387 Result.setBitVal(BitPosition: P++, BitValue: Val[I]);
1388 }
1389 pushInteger(S, Val: std::move(Result), QT: Call->getType());
1390 return true;
1391}
1392
1393/// (CarryIn, LHS, RHS, Result)
1394static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S,
1395 CodePtr OpPC,
1396 const InterpFrame *Frame,
1397 const CallExpr *Call,
1398 unsigned BuiltinOp) {
1399 if (Call->getNumArgs() != 4 || !Call->getArg(Arg: 0)->getType()->isIntegerType() ||
1400 !Call->getArg(Arg: 1)->getType()->isIntegerType() ||
1401 !Call->getArg(Arg: 2)->getType()->isIntegerType())
1402 return false;
1403
1404 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
1405
1406 PrimType CarryInT = *S.getContext().classify(E: Call->getArg(Arg: 0));
1407 PrimType LHST = *S.getContext().classify(E: Call->getArg(Arg: 1));
1408 PrimType RHST = *S.getContext().classify(E: Call->getArg(Arg: 2));
1409 APSInt RHS = popToAPSInt(Stk&: S.Stk, T: RHST);
1410 APSInt LHS = popToAPSInt(Stk&: S.Stk, T: LHST);
1411 APSInt CarryIn = popToAPSInt(Stk&: S.Stk, T: CarryInT);
1412
1413 bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
1414 BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
1415
1416 unsigned BitWidth = LHS.getBitWidth();
1417 unsigned CarryInBit = CarryIn.ugt(RHS: 0) ? 1 : 0;
1418 APInt ExResult =
1419 IsAdd ? (LHS.zext(width: BitWidth + 1) + (RHS.zext(width: BitWidth + 1) + CarryInBit))
1420 : (LHS.zext(width: BitWidth + 1) - (RHS.zext(width: BitWidth + 1) + CarryInBit));
1421
1422 APInt Result = ExResult.extractBits(numBits: BitWidth, bitPosition: 0);
1423 APSInt CarryOut =
1424 APSInt(ExResult.extractBits(numBits: 1, bitPosition: BitWidth), /*IsUnsigned=*/true);
1425
1426 QualType CarryOutType = Call->getArg(Arg: 3)->getType()->getPointeeType();
1427 PrimType CarryOutT = *S.getContext().classify(T: CarryOutType);
1428 assignInteger(S, Dest: CarryOutPtr, ValueT: CarryOutT, Value: APSInt(std::move(Result), true));
1429
1430 pushInteger(S, Val: CarryOut, QT: Call->getType());
1431
1432 return true;
1433}
1434
1435static bool interp__builtin_os_log_format_buffer_size(InterpState &S,
1436 CodePtr OpPC,
1437 const InterpFrame *Frame,
1438 const CallExpr *Call) {
1439 analyze_os_log::OSLogBufferLayout Layout;
1440 analyze_os_log::computeOSLogBufferLayout(Ctx&: S.getASTContext(), E: Call, layout&: Layout);
1441 pushInteger(S, Val: Layout.size().getQuantity(), QT: Call->getType());
1442 return true;
1443}
1444
1445static bool
1446interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC,
1447 const InterpFrame *Frame,
1448 const CallExpr *Call) {
1449 const auto &Ptr = S.Stk.pop<Pointer>();
1450 assert(Ptr.getFieldDesc()->isPrimitiveArray());
1451
1452 // This should be created for a StringLiteral, so should alway shold at least
1453 // one array element.
1454 assert(Ptr.getFieldDesc()->getNumElems() >= 1);
1455 StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1);
1456 uint64_t Result = getPointerAuthStableSipHash(S: R);
1457 pushInteger(S, Val: Result, QT: Call->getType());
1458 return true;
1459}
1460
1461static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC,
1462 const InterpFrame *Frame,
1463 const CallExpr *Call) {
1464 // A call to __operator_new is only valid within std::allocate<>::allocate.
1465 // Walk up the call stack to find the appropriate caller and get the
1466 // element type from it.
1467 auto [NewCall, ElemType] = S.getStdAllocatorCaller(Name: "allocate");
1468
1469 if (ElemType.isNull()) {
1470 S.FFDiag(E: Call, DiagId: S.getLangOpts().CPlusPlus20
1471 ? diag::note_constexpr_new_untyped
1472 : diag::note_constexpr_new);
1473 return false;
1474 }
1475 assert(NewCall);
1476
1477 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
1478 S.FFDiag(E: Call, DiagId: diag::note_constexpr_new_not_complete_object_type)
1479 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
1480 return false;
1481 }
1482
1483 // We only care about the first parameter (the size), so discard all the
1484 // others.
1485 {
1486 unsigned NumArgs = Call->getNumArgs();
1487 assert(NumArgs >= 1);
1488
1489 // The std::nothrow_t arg never gets put on the stack.
1490 if (Call->getArg(Arg: NumArgs - 1)->getType()->isNothrowT())
1491 --NumArgs;
1492 auto Args = ArrayRef(Call->getArgs(), Call->getNumArgs());
1493 // First arg is needed.
1494 Args = Args.drop_front();
1495
1496 // Discard the rest.
1497 for (const Expr *Arg : Args)
1498 discard(Stk&: S.Stk, T: *S.getContext().classify(E: Arg));
1499 }
1500
1501 APSInt Bytes = popToAPSInt(Stk&: S.Stk, T: *S.getContext().classify(E: Call->getArg(Arg: 0)));
1502 CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(T: ElemType);
1503 assert(!ElemSize.isZero());
1504 // Divide the number of bytes by sizeof(ElemType), so we get the number of
1505 // elements we should allocate.
1506 APInt NumElems, Remainder;
1507 APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity());
1508 APInt::udivrem(LHS: Bytes, RHS: ElemSizeAP, Quotient&: NumElems, Remainder);
1509 if (Remainder != 0) {
1510 // This likely indicates a bug in the implementation of 'std::allocator'.
1511 S.FFDiag(E: Call, DiagId: diag::note_constexpr_operator_new_bad_size)
1512 << Bytes << APSInt(ElemSizeAP, true) << ElemType;
1513 return false;
1514 }
1515
1516 // NB: The same check we're using in CheckArraySize()
1517 if (NumElems.getActiveBits() >
1518 ConstantArrayType::getMaxSizeBits(Context: S.getASTContext()) ||
1519 NumElems.ugt(RHS: Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) {
1520 // FIXME: NoThrow check?
1521 const SourceInfo &Loc = S.Current->getSource(PC: OpPC);
1522 S.FFDiag(SI: Loc, DiagId: diag::note_constexpr_new_too_large)
1523 << NumElems.getZExtValue();
1524 return false;
1525 }
1526
1527 if (!CheckArraySize(S, OpPC, NumElems: NumElems.getZExtValue()))
1528 return false;
1529
1530 bool IsArray = NumElems.ugt(RHS: 1);
1531 std::optional<PrimType> ElemT = S.getContext().classify(T: ElemType);
1532 DynamicAllocator &Allocator = S.getAllocator();
1533 if (ElemT) {
1534 Block *B =
1535 Allocator.allocate(Source: NewCall, T: *ElemT, NumElements: NumElems.getZExtValue(),
1536 EvalID: S.Ctx.getEvalID(), AllocForm: DynamicAllocator::Form::Operator);
1537 assert(B);
1538 S.Stk.push<Pointer>(Args: Pointer(B).atIndex(Idx: 0));
1539 return true;
1540 }
1541
1542 assert(!ElemT);
1543
1544 // Composite arrays
1545 if (IsArray) {
1546 const Descriptor *Desc =
1547 S.P.createDescriptor(D: NewCall, Ty: ElemType.getTypePtr(),
1548 MDSize: IsArray ? std::nullopt : Descriptor::InlineDescMD);
1549 Block *B =
1550 Allocator.allocate(D: Desc, NumElements: NumElems.getZExtValue(), EvalID: S.Ctx.getEvalID(),
1551 AllocForm: DynamicAllocator::Form::Operator);
1552 assert(B);
1553 S.Stk.push<Pointer>(Args: Pointer(B).atIndex(Idx: 0));
1554 return true;
1555 }
1556
1557 // Records. Still allocate them as single-element arrays.
1558 QualType AllocType = S.getASTContext().getConstantArrayType(
1559 EltTy: ElemType, ArySize: NumElems, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
1560
1561 const Descriptor *Desc =
1562 S.P.createDescriptor(D: NewCall, Ty: AllocType.getTypePtr(),
1563 MDSize: IsArray ? std::nullopt : Descriptor::InlineDescMD);
1564 Block *B = Allocator.allocate(D: Desc, EvalID: S.getContext().getEvalID(),
1565 AllocForm: DynamicAllocator::Form::Operator);
1566 assert(B);
1567 S.Stk.push<Pointer>(Args: Pointer(B).atIndex(Idx: 0));
1568 return true;
1569}
1570
1571static bool interp__builtin_operator_delete(InterpState &S, CodePtr OpPC,
1572 const InterpFrame *Frame,
1573 const CallExpr *Call) {
1574 const Expr *Source = nullptr;
1575 const Block *BlockToDelete = nullptr;
1576
1577 if (S.checkingPotentialConstantExpression()) {
1578 S.Stk.discard<Pointer>();
1579 return false;
1580 }
1581
1582 // This is permitted only within a call to std::allocator<T>::deallocate.
1583 if (!S.getStdAllocatorCaller(Name: "deallocate")) {
1584 S.FFDiag(E: Call);
1585 S.Stk.discard<Pointer>();
1586 return true;
1587 }
1588
1589 {
1590 const Pointer &Ptr = S.Stk.pop<Pointer>();
1591
1592 if (Ptr.isZero()) {
1593 S.CCEDiag(E: Call, DiagId: diag::note_constexpr_deallocate_null);
1594 return true;
1595 }
1596
1597 Source = Ptr.getDeclDesc()->asExpr();
1598 BlockToDelete = Ptr.block();
1599
1600 if (!BlockToDelete->isDynamic()) {
1601 S.FFDiag(E: Call, DiagId: diag::note_constexpr_delete_not_heap_alloc)
1602 << Ptr.toDiagnosticString(Ctx: S.getASTContext());
1603 if (const auto *D = Ptr.getFieldDesc()->asDecl())
1604 S.Note(Loc: D->getLocation(), DiagId: diag::note_declared_at);
1605 }
1606 }
1607 assert(BlockToDelete);
1608
1609 DynamicAllocator &Allocator = S.getAllocator();
1610 const Descriptor *BlockDesc = BlockToDelete->getDescriptor();
1611 std::optional<DynamicAllocator::Form> AllocForm =
1612 Allocator.getAllocationForm(Source);
1613
1614 if (!Allocator.deallocate(Source, BlockToDelete, S)) {
1615 // Nothing has been deallocated, this must be a double-delete.
1616 const SourceInfo &Loc = S.Current->getSource(PC: OpPC);
1617 S.FFDiag(SI: Loc, DiagId: diag::note_constexpr_double_delete);
1618 return false;
1619 }
1620 assert(AllocForm);
1621
1622 return CheckNewDeleteForms(
1623 S, OpPC, AllocForm: *AllocForm, DeleteForm: DynamicAllocator::Form::Operator, D: BlockDesc, NewExpr: Source);
1624}
1625
1626static bool interp__builtin_arithmetic_fence(InterpState &S, CodePtr OpPC,
1627 const InterpFrame *Frame,
1628 const CallExpr *Call) {
1629 const Floating &Arg0 = S.Stk.pop<Floating>();
1630 S.Stk.push<Floating>(Args: Arg0);
1631 return true;
1632}
1633
1634static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC,
1635 const CallExpr *Call, unsigned ID) {
1636 const Pointer &Arg = S.Stk.pop<Pointer>();
1637 assert(Arg.getFieldDesc()->isPrimitiveArray());
1638
1639 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1640 assert(Call->getType() == ElemType);
1641 PrimType ElemT = *S.getContext().classify(T: ElemType);
1642 unsigned NumElems = Arg.getNumElems();
1643
1644 INT_TYPE_SWITCH_NO_BOOL(ElemT, {
1645 T Result = Arg.atIndex(0).deref<T>();
1646 unsigned BitWidth = Result.bitWidth();
1647 for (unsigned I = 1; I != NumElems; ++I) {
1648 T Elem = Arg.atIndex(I).deref<T>();
1649 T PrevResult = Result;
1650
1651 if (ID == Builtin::BI__builtin_reduce_add) {
1652 if (T::add(Result, Elem, BitWidth, &Result)) {
1653 unsigned OverflowBits = BitWidth + 1;
1654 (void)handleOverflow(S, OpPC,
1655 (PrevResult.toAPSInt(OverflowBits) +
1656 Elem.toAPSInt(OverflowBits)));
1657 return false;
1658 }
1659 } else if (ID == Builtin::BI__builtin_reduce_mul) {
1660 if (T::mul(Result, Elem, BitWidth, &Result)) {
1661 unsigned OverflowBits = BitWidth * 2;
1662 (void)handleOverflow(S, OpPC,
1663 (PrevResult.toAPSInt(OverflowBits) *
1664 Elem.toAPSInt(OverflowBits)));
1665 return false;
1666 }
1667
1668 } else if (ID == Builtin::BI__builtin_reduce_and) {
1669 (void)T::bitAnd(Result, Elem, BitWidth, &Result);
1670 } else if (ID == Builtin::BI__builtin_reduce_or) {
1671 (void)T::bitOr(Result, Elem, BitWidth, &Result);
1672 } else if (ID == Builtin::BI__builtin_reduce_xor) {
1673 (void)T::bitXor(Result, Elem, BitWidth, &Result);
1674 } else {
1675 llvm_unreachable("Unhandled vector reduce builtin");
1676 }
1677 }
1678 pushInteger(S, Result.toAPSInt(), Call->getType());
1679 });
1680
1681 return true;
1682}
1683
1684/// Can be called with an integer or vector as the first and only parameter.
1685static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC,
1686 const InterpFrame *Frame,
1687 const CallExpr *Call) {
1688 assert(Call->getNumArgs() == 1);
1689 if (Call->getArg(Arg: 0)->getType()->isIntegerType()) {
1690 PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType());
1691 APSInt Val = popToAPSInt(Stk&: S.Stk, T: ArgT);
1692 pushInteger(S, Val: Val.popcount(), QT: Call->getType());
1693 return true;
1694 }
1695 // Otherwise, the argument must be a vector.
1696 assert(Call->getArg(0)->getType()->isVectorType());
1697 const Pointer &Arg = S.Stk.pop<Pointer>();
1698 assert(Arg.getFieldDesc()->isPrimitiveArray());
1699 const Pointer &Dst = S.Stk.peek<Pointer>();
1700 assert(Dst.getFieldDesc()->isPrimitiveArray());
1701 assert(Arg.getFieldDesc()->getNumElems() ==
1702 Dst.getFieldDesc()->getNumElems());
1703
1704 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1705 PrimType ElemT = *S.getContext().classify(T: ElemType);
1706 unsigned NumElems = Arg.getNumElems();
1707
1708 // FIXME: Reading from uninitialized vector elements?
1709 for (unsigned I = 0; I != NumElems; ++I) {
1710 INT_TYPE_SWITCH_NO_BOOL(ElemT, {
1711 Dst.atIndex(I).deref<T>() =
1712 T::from(Arg.atIndex(I).deref<T>().toAPSInt().popcount());
1713 Dst.atIndex(I).initialize();
1714 });
1715 }
1716
1717 return true;
1718}
1719
1720static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC,
1721 const InterpFrame *Frame,
1722 const CallExpr *Call, unsigned ID) {
1723 assert(Call->getNumArgs() == 3);
1724 const ASTContext &ASTCtx = S.getASTContext();
1725 PrimType SizeT = *S.getContext().classify(E: Call->getArg(Arg: 2));
1726 APSInt Size = popToAPSInt(Stk&: S.Stk, T: SizeT);
1727 const Pointer SrcPtr = S.Stk.pop<Pointer>();
1728 const Pointer DestPtr = S.Stk.pop<Pointer>();
1729
1730 assert(!Size.isSigned() && "memcpy and friends take an unsigned size");
1731
1732 if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove)
1733 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1734
1735 bool Move =
1736 (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove ||
1737 ID == Builtin::BI__builtin_wmemmove || ID == Builtin::BIwmemmove);
1738 bool WChar = ID == Builtin::BIwmemcpy || ID == Builtin::BIwmemmove ||
1739 ID == Builtin::BI__builtin_wmemcpy ||
1740 ID == Builtin::BI__builtin_wmemmove;
1741
1742 // If the size is zero, we treat this as always being a valid no-op.
1743 if (Size.isZero()) {
1744 S.Stk.push<Pointer>(Args: DestPtr);
1745 return true;
1746 }
1747
1748 if (SrcPtr.isZero() || DestPtr.isZero()) {
1749 Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr);
1750 S.FFDiag(SI: S.Current->getSource(PC: OpPC), DiagId: diag::note_constexpr_memcpy_null)
1751 << /*IsMove=*/Move << /*IsWchar=*/WChar << !SrcPtr.isZero()
1752 << DiagPtr.toDiagnosticString(Ctx: ASTCtx);
1753 return false;
1754 }
1755
1756 // Diagnose integral src/dest pointers specially.
1757 if (SrcPtr.isIntegralPointer() || DestPtr.isIntegralPointer()) {
1758 std::string DiagVal = "(void *)";
1759 DiagVal += SrcPtr.isIntegralPointer()
1760 ? std::to_string(val: SrcPtr.getIntegerRepresentation())
1761 : std::to_string(val: DestPtr.getIntegerRepresentation());
1762 S.FFDiag(SI: S.Current->getSource(PC: OpPC), DiagId: diag::note_constexpr_memcpy_null)
1763 << Move << WChar << DestPtr.isIntegralPointer() << DiagVal;
1764 return false;
1765 }
1766
1767 // Can't read from dummy pointers.
1768 if (DestPtr.isDummy() || SrcPtr.isDummy())
1769 return false;
1770
1771 QualType DestElemType = getElemType(P: DestPtr);
1772 size_t RemainingDestElems;
1773 if (DestPtr.getFieldDesc()->isArray()) {
1774 RemainingDestElems = DestPtr.isUnknownSizeArray()
1775 ? 0
1776 : (DestPtr.getNumElems() - DestPtr.getIndex());
1777 } else {
1778 RemainingDestElems = 1;
1779 }
1780 unsigned DestElemSize = ASTCtx.getTypeSizeInChars(T: DestElemType).getQuantity();
1781
1782 if (WChar) {
1783 uint64_t WCharSize =
1784 ASTCtx.getTypeSizeInChars(T: ASTCtx.getWCharType()).getQuantity();
1785 Size *= APSInt(APInt(Size.getBitWidth(), WCharSize, /*IsSigned=*/false),
1786 /*IsUnsigend=*/true);
1787 }
1788
1789 if (Size.urem(RHS: DestElemSize) != 0) {
1790 S.FFDiag(SI: S.Current->getSource(PC: OpPC),
1791 DiagId: diag::note_constexpr_memcpy_unsupported)
1792 << Move << WChar << 0 << DestElemType << Size << DestElemSize;
1793 return false;
1794 }
1795
1796 QualType SrcElemType = getElemType(P: SrcPtr);
1797 size_t RemainingSrcElems;
1798 if (SrcPtr.getFieldDesc()->isArray()) {
1799 RemainingSrcElems = SrcPtr.isUnknownSizeArray()
1800 ? 0
1801 : (SrcPtr.getNumElems() - SrcPtr.getIndex());
1802 } else {
1803 RemainingSrcElems = 1;
1804 }
1805 unsigned SrcElemSize = ASTCtx.getTypeSizeInChars(T: SrcElemType).getQuantity();
1806
1807 if (!ASTCtx.hasSameUnqualifiedType(T1: DestElemType, T2: SrcElemType)) {
1808 S.FFDiag(SI: S.Current->getSource(PC: OpPC), DiagId: diag::note_constexpr_memcpy_type_pun)
1809 << Move << SrcElemType << DestElemType;
1810 return false;
1811 }
1812
1813 if (DestElemType->isIncompleteType() ||
1814 DestPtr.getType()->isIncompleteType()) {
1815 QualType DiagType =
1816 DestElemType->isIncompleteType() ? DestElemType : DestPtr.getType();
1817 S.FFDiag(SI: S.Current->getSource(PC: OpPC),
1818 DiagId: diag::note_constexpr_memcpy_incomplete_type)
1819 << Move << DiagType;
1820 return false;
1821 }
1822
1823 if (!DestElemType.isTriviallyCopyableType(Context: ASTCtx)) {
1824 S.FFDiag(SI: S.Current->getSource(PC: OpPC), DiagId: diag::note_constexpr_memcpy_nontrivial)
1825 << Move << DestElemType;
1826 return false;
1827 }
1828
1829 // Check if we have enough elements to read from and write to.
1830 size_t RemainingDestBytes = RemainingDestElems * DestElemSize;
1831 size_t RemainingSrcBytes = RemainingSrcElems * SrcElemSize;
1832 if (Size.ugt(RHS: RemainingDestBytes) || Size.ugt(RHS: RemainingSrcBytes)) {
1833 APInt N = Size.udiv(RHS: DestElemSize);
1834 S.FFDiag(SI: S.Current->getSource(PC: OpPC),
1835 DiagId: diag::note_constexpr_memcpy_unsupported)
1836 << Move << WChar << (Size.ugt(RHS: RemainingSrcBytes) ? 1 : 2)
1837 << DestElemType << toString(I: N, Radix: 10, /*Signed=*/false);
1838 return false;
1839 }
1840
1841 // Check for overlapping memory regions.
1842 if (!Move && Pointer::pointToSameBlock(A: SrcPtr, B: DestPtr)) {
1843 // Remove base casts.
1844 Pointer SrcP = SrcPtr;
1845 while (SrcP.isBaseClass())
1846 SrcP = SrcP.getBase();
1847
1848 Pointer DestP = DestPtr;
1849 while (DestP.isBaseClass())
1850 DestP = DestP.getBase();
1851
1852 unsigned SrcIndex = SrcP.expand().getIndex() * SrcP.elemSize();
1853 unsigned DstIndex = DestP.expand().getIndex() * DestP.elemSize();
1854 unsigned N = Size.getZExtValue();
1855
1856 if ((SrcIndex <= DstIndex && (SrcIndex + N) > DstIndex) ||
1857 (DstIndex <= SrcIndex && (DstIndex + N) > SrcIndex)) {
1858 S.FFDiag(SI: S.Current->getSource(PC: OpPC), DiagId: diag::note_constexpr_memcpy_overlap)
1859 << /*IsWChar=*/false;
1860 return false;
1861 }
1862 }
1863
1864 assert(Size.getZExtValue() % DestElemSize == 0);
1865 if (!DoMemcpy(S, OpPC, SrcPtr, DestPtr, Size: Bytes(Size.getZExtValue()).toBits()))
1866 return false;
1867
1868 S.Stk.push<Pointer>(Args: DestPtr);
1869 return true;
1870}
1871
1872/// Determine if T is a character type for which we guarantee that
1873/// sizeof(T) == 1.
1874static bool isOneByteCharacterType(QualType T) {
1875 return T->isCharType() || T->isChar8Type();
1876}
1877
1878static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC,
1879 const InterpFrame *Frame,
1880 const CallExpr *Call, unsigned ID) {
1881 assert(Call->getNumArgs() == 3);
1882 PrimType SizeT = *S.getContext().classify(E: Call->getArg(Arg: 2));
1883 const APSInt &Size = popToAPSInt(Stk&: S.Stk, T: SizeT);
1884 const Pointer &PtrB = S.Stk.pop<Pointer>();
1885 const Pointer &PtrA = S.Stk.pop<Pointer>();
1886
1887 if (ID == Builtin::BImemcmp || ID == Builtin::BIbcmp ||
1888 ID == Builtin::BIwmemcmp)
1889 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1890
1891 if (Size.isZero()) {
1892 pushInteger(S, Val: 0, QT: Call->getType());
1893 return true;
1894 }
1895
1896 bool IsWide =
1897 (ID == Builtin::BIwmemcmp || ID == Builtin::BI__builtin_wmemcmp);
1898
1899 const ASTContext &ASTCtx = S.getASTContext();
1900 QualType ElemTypeA = getElemType(P: PtrA);
1901 QualType ElemTypeB = getElemType(P: PtrB);
1902 // FIXME: This is an arbitrary limitation the current constant interpreter
1903 // had. We could remove this.
1904 if (!IsWide && (!isOneByteCharacterType(T: ElemTypeA) ||
1905 !isOneByteCharacterType(T: ElemTypeB))) {
1906 S.FFDiag(SI: S.Current->getSource(PC: OpPC),
1907 DiagId: diag::note_constexpr_memcmp_unsupported)
1908 << ASTCtx.BuiltinInfo.getQuotedName(ID) << PtrA.getType()
1909 << PtrB.getType();
1910 return false;
1911 }
1912
1913 if (PtrA.isDummy() || PtrB.isDummy())
1914 return false;
1915
1916 // Now, read both pointers to a buffer and compare those.
1917 BitcastBuffer BufferA(
1918 Bits(ASTCtx.getTypeSize(T: ElemTypeA) * PtrA.getNumElems()));
1919 readPointerToBuffer(Ctx: S.getContext(), FromPtr: PtrA, Buffer&: BufferA, ReturnOnUninit: false);
1920 // FIXME: The swapping here is UNDOING something we do when reading the
1921 // data into the buffer.
1922 if (ASTCtx.getTargetInfo().isBigEndian())
1923 swapBytes(M: BufferA.Data.get(), N: BufferA.byteSize().getQuantity());
1924
1925 BitcastBuffer BufferB(
1926 Bits(ASTCtx.getTypeSize(T: ElemTypeB) * PtrB.getNumElems()));
1927 readPointerToBuffer(Ctx: S.getContext(), FromPtr: PtrB, Buffer&: BufferB, ReturnOnUninit: false);
1928 // FIXME: The swapping here is UNDOING something we do when reading the
1929 // data into the buffer.
1930 if (ASTCtx.getTargetInfo().isBigEndian())
1931 swapBytes(M: BufferB.Data.get(), N: BufferB.byteSize().getQuantity());
1932
1933 size_t MinBufferSize = std::min(a: BufferA.byteSize().getQuantity(),
1934 b: BufferB.byteSize().getQuantity());
1935
1936 unsigned ElemSize = 1;
1937 if (IsWide)
1938 ElemSize = ASTCtx.getTypeSizeInChars(T: ASTCtx.getWCharType()).getQuantity();
1939 // The Size given for the wide variants is in wide-char units. Convert it
1940 // to bytes.
1941 size_t ByteSize = Size.getZExtValue() * ElemSize;
1942 size_t CmpSize = std::min(a: MinBufferSize, b: ByteSize);
1943
1944 for (size_t I = 0; I != CmpSize; I += ElemSize) {
1945 if (IsWide) {
1946 INT_TYPE_SWITCH(*S.getContext().classify(ASTCtx.getWCharType()), {
1947 T A = *reinterpret_cast<T *>(BufferA.Data.get() + I);
1948 T B = *reinterpret_cast<T *>(BufferB.Data.get() + I);
1949 if (A < B) {
1950 pushInteger(S, -1, Call->getType());
1951 return true;
1952 } else if (A > B) {
1953 pushInteger(S, 1, Call->getType());
1954 return true;
1955 }
1956 });
1957 } else {
1958 std::byte A = BufferA.Data[I];
1959 std::byte B = BufferB.Data[I];
1960
1961 if (A < B) {
1962 pushInteger(S, Val: -1, QT: Call->getType());
1963 return true;
1964 } else if (A > B) {
1965 pushInteger(S, Val: 1, QT: Call->getType());
1966 return true;
1967 }
1968 }
1969 }
1970
1971 // We compared CmpSize bytes above. If the limiting factor was the Size
1972 // passed, we're done and the result is equality (0).
1973 if (ByteSize <= CmpSize) {
1974 pushInteger(S, Val: 0, QT: Call->getType());
1975 return true;
1976 }
1977
1978 // However, if we read all the available bytes but were instructed to read
1979 // even more, diagnose this as a "read of dereferenced one-past-the-end
1980 // pointer". This is what would happen if we called CheckLoad() on every array
1981 // element.
1982 S.FFDiag(SI: S.Current->getSource(PC: OpPC), DiagId: diag::note_constexpr_access_past_end)
1983 << AK_Read << S.Current->getRange(PC: OpPC);
1984 return false;
1985}
1986
1987// __builtin_memchr(ptr, int, int)
1988// __builtin_strchr(ptr, int)
1989static bool interp__builtin_memchr(InterpState &S, CodePtr OpPC,
1990 const CallExpr *Call, unsigned ID) {
1991 if (ID == Builtin::BImemchr || ID == Builtin::BIwcschr ||
1992 ID == Builtin::BIstrchr || ID == Builtin::BIwmemchr)
1993 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1994
1995 std::optional<APSInt> MaxLength;
1996 PrimType DesiredT = *S.getContext().classify(E: Call->getArg(Arg: 1));
1997 if (Call->getNumArgs() == 3) {
1998 PrimType MaxT = *S.getContext().classify(E: Call->getArg(Arg: 2));
1999 MaxLength = popToAPSInt(Stk&: S.Stk, T: MaxT);
2000 }
2001 APSInt Desired = popToAPSInt(Stk&: S.Stk, T: DesiredT);
2002 const Pointer &Ptr = S.Stk.pop<Pointer>();
2003
2004 if (MaxLength && MaxLength->isZero()) {
2005 S.Stk.push<Pointer>();
2006 return true;
2007 }
2008
2009 if (Ptr.isDummy())
2010 return false;
2011
2012 // Null is only okay if the given size is 0.
2013 if (Ptr.isZero()) {
2014 S.FFDiag(SI: S.Current->getSource(PC: OpPC), DiagId: diag::note_constexpr_access_null)
2015 << AK_Read;
2016 return false;
2017 }
2018
2019 QualType ElemTy = Ptr.getFieldDesc()->isArray()
2020 ? Ptr.getFieldDesc()->getElemQualType()
2021 : Ptr.getFieldDesc()->getType();
2022 bool IsRawByte = ID == Builtin::BImemchr || ID == Builtin::BI__builtin_memchr;
2023
2024 // Give up on byte-oriented matching against multibyte elements.
2025 if (IsRawByte && !isOneByteCharacterType(T: ElemTy)) {
2026 S.FFDiag(SI: S.Current->getSource(PC: OpPC),
2027 DiagId: diag::note_constexpr_memchr_unsupported)
2028 << S.getASTContext().BuiltinInfo.getQuotedName(ID) << ElemTy;
2029 return false;
2030 }
2031
2032 if (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr) {
2033 // strchr compares directly to the passed integer, and therefore
2034 // always fails if given an int that is not a char.
2035 if (Desired !=
2036 Desired.trunc(width: S.getASTContext().getCharWidth()).getSExtValue()) {
2037 S.Stk.push<Pointer>();
2038 return true;
2039 }
2040 }
2041
2042 uint64_t DesiredVal;
2043 if (ID == Builtin::BIwmemchr || ID == Builtin::BI__builtin_wmemchr ||
2044 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr) {
2045 // wcschr and wmemchr are given a wchar_t to look for. Just use it.
2046 DesiredVal = Desired.getZExtValue();
2047 } else {
2048 DesiredVal = Desired.trunc(width: S.getASTContext().getCharWidth()).getZExtValue();
2049 }
2050
2051 bool StopAtZero =
2052 (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr ||
2053 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr);
2054
2055 PrimType ElemT =
2056 IsRawByte ? PT_Sint8 : *S.getContext().classify(T: getElemType(P: Ptr));
2057
2058 size_t Index = Ptr.getIndex();
2059 size_t Step = 0;
2060 for (;;) {
2061 const Pointer &ElemPtr =
2062 (Index + Step) > 0 ? Ptr.atIndex(Idx: Index + Step) : Ptr;
2063
2064 if (!CheckLoad(S, OpPC, Ptr: ElemPtr))
2065 return false;
2066
2067 uint64_t V;
2068 INT_TYPE_SWITCH_NO_BOOL(
2069 ElemT, { V = static_cast<uint64_t>(ElemPtr.deref<T>().toUnsigned()); });
2070
2071 if (V == DesiredVal) {
2072 S.Stk.push<Pointer>(Args: ElemPtr);
2073 return true;
2074 }
2075
2076 if (StopAtZero && V == 0)
2077 break;
2078
2079 ++Step;
2080 if (MaxLength && Step == MaxLength->getZExtValue())
2081 break;
2082 }
2083
2084 S.Stk.push<Pointer>();
2085 return true;
2086}
2087
2088static unsigned computeFullDescSize(const ASTContext &ASTCtx,
2089 const Descriptor *Desc) {
2090
2091 if (Desc->isPrimitive())
2092 return ASTCtx.getTypeSizeInChars(T: Desc->getType()).getQuantity();
2093
2094 if (Desc->isArray())
2095 return ASTCtx.getTypeSizeInChars(T: Desc->getElemQualType()).getQuantity() *
2096 Desc->getNumElems();
2097
2098 if (Desc->isRecord())
2099 return ASTCtx.getTypeSizeInChars(T: Desc->getType()).getQuantity();
2100
2101 llvm_unreachable("Unhandled descriptor type");
2102 return 0;
2103}
2104
2105static unsigned computePointerOffset(const ASTContext &ASTCtx,
2106 const Pointer &Ptr) {
2107 unsigned Result = 0;
2108
2109 Pointer P = Ptr;
2110 while (P.isArrayElement() || P.isField()) {
2111 P = P.expand();
2112 const Descriptor *D = P.getFieldDesc();
2113
2114 if (P.isArrayElement()) {
2115 unsigned ElemSize =
2116 ASTCtx.getTypeSizeInChars(T: D->getElemQualType()).getQuantity();
2117 if (P.isOnePastEnd())
2118 Result += ElemSize * P.getNumElems();
2119 else
2120 Result += ElemSize * P.getIndex();
2121 P = P.expand().getArray();
2122 } else if (P.isBaseClass()) {
2123
2124 const auto *RD = cast<CXXRecordDecl>(Val: D->asDecl());
2125 bool IsVirtual = Ptr.isVirtualBaseClass();
2126 P = P.getBase();
2127 const Record *BaseRecord = P.getRecord();
2128
2129 const ASTRecordLayout &Layout =
2130 ASTCtx.getASTRecordLayout(D: cast<CXXRecordDecl>(Val: BaseRecord->getDecl()));
2131 if (IsVirtual)
2132 Result += Layout.getVBaseClassOffset(VBase: RD).getQuantity();
2133 else
2134 Result += Layout.getBaseClassOffset(Base: RD).getQuantity();
2135 } else if (P.isField()) {
2136 const FieldDecl *FD = P.getField();
2137 const ASTRecordLayout &Layout =
2138 ASTCtx.getASTRecordLayout(D: FD->getParent());
2139 unsigned FieldIndex = FD->getFieldIndex();
2140 uint64_t FieldOffset =
2141 ASTCtx.toCharUnitsFromBits(BitSize: Layout.getFieldOffset(FieldNo: FieldIndex))
2142 .getQuantity();
2143 Result += FieldOffset;
2144 P = P.getBase();
2145 } else
2146 llvm_unreachable("Unhandled descriptor type");
2147 }
2148
2149 return Result;
2150}
2151
2152static bool interp__builtin_object_size(InterpState &S, CodePtr OpPC,
2153 const InterpFrame *Frame,
2154 const CallExpr *Call) {
2155 PrimType KindT = *S.getContext().classify(E: Call->getArg(Arg: 1));
2156 [[maybe_unused]] unsigned Kind = popToAPSInt(Stk&: S.Stk, T: KindT).getZExtValue();
2157
2158 assert(Kind <= 3 && "unexpected kind");
2159
2160 const Pointer &Ptr = S.Stk.pop<Pointer>();
2161
2162 if (Ptr.isZero())
2163 return false;
2164
2165 const Descriptor *DeclDesc = Ptr.getDeclDesc();
2166 if (!DeclDesc)
2167 return false;
2168
2169 const ASTContext &ASTCtx = S.getASTContext();
2170
2171 unsigned ByteOffset = computePointerOffset(ASTCtx, Ptr);
2172 unsigned FullSize = computeFullDescSize(ASTCtx, Desc: DeclDesc);
2173
2174 pushInteger(S, Val: FullSize - ByteOffset, QT: Call->getType());
2175
2176 return true;
2177}
2178
2179static bool interp__builtin_is_within_lifetime(InterpState &S, CodePtr OpPC,
2180 const CallExpr *Call) {
2181
2182 if (!S.inConstantContext())
2183 return false;
2184
2185 const Pointer &Ptr = S.Stk.pop<Pointer>();
2186
2187 auto Error = [&](int Diag) {
2188 bool CalledFromStd = false;
2189 const auto *Callee = S.Current->getCallee();
2190 if (Callee && Callee->isInStdNamespace()) {
2191 const IdentifierInfo *Identifier = Callee->getIdentifier();
2192 CalledFromStd = Identifier && Identifier->isStr(Str: "is_within_lifetime");
2193 }
2194 S.CCEDiag(SI: CalledFromStd
2195 ? S.Current->Caller->getSource(PC: S.Current->getRetPC())
2196 : S.Current->getSource(PC: OpPC),
2197 DiagId: diag::err_invalid_is_within_lifetime)
2198 << (CalledFromStd ? "std::is_within_lifetime"
2199 : "__builtin_is_within_lifetime")
2200 << Diag;
2201 return false;
2202 };
2203
2204 if (Ptr.isZero())
2205 return Error(0);
2206 if (Ptr.isOnePastEnd())
2207 return Error(1);
2208
2209 bool Result = true;
2210 if (!Ptr.isActive()) {
2211 Result = false;
2212 } else {
2213 if (!CheckLive(S, OpPC, Ptr, AK: AK_Read))
2214 return false;
2215 if (!CheckMutable(S, OpPC, Ptr))
2216 return false;
2217 }
2218
2219 pushInteger(S, Val: Result, QT: Call->getType());
2220 return true;
2221}
2222
2223bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
2224 uint32_t BuiltinID) {
2225 if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(ID: BuiltinID))
2226 return Invalid(S, OpPC);
2227
2228 const InterpFrame *Frame = S.Current;
2229 switch (BuiltinID) {
2230 case Builtin::BI__builtin_is_constant_evaluated:
2231 return interp__builtin_is_constant_evaluated(S, OpPC, Frame, Call);
2232
2233 case Builtin::BI__builtin_assume:
2234 case Builtin::BI__assume:
2235 return interp__builtin_assume(S, OpPC, Frame, Call);
2236
2237 case Builtin::BI__builtin_strcmp:
2238 case Builtin::BIstrcmp:
2239 case Builtin::BI__builtin_strncmp:
2240 case Builtin::BIstrncmp:
2241 case Builtin::BI__builtin_wcsncmp:
2242 case Builtin::BIwcsncmp:
2243 case Builtin::BI__builtin_wcscmp:
2244 case Builtin::BIwcscmp:
2245 return interp__builtin_strcmp(S, OpPC, Frame, Call, ID: BuiltinID);
2246
2247 case Builtin::BI__builtin_strlen:
2248 case Builtin::BIstrlen:
2249 case Builtin::BI__builtin_wcslen:
2250 case Builtin::BIwcslen:
2251 return interp__builtin_strlen(S, OpPC, Frame, Call, ID: BuiltinID);
2252
2253 case Builtin::BI__builtin_nan:
2254 case Builtin::BI__builtin_nanf:
2255 case Builtin::BI__builtin_nanl:
2256 case Builtin::BI__builtin_nanf16:
2257 case Builtin::BI__builtin_nanf128:
2258 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/false);
2259
2260 case Builtin::BI__builtin_nans:
2261 case Builtin::BI__builtin_nansf:
2262 case Builtin::BI__builtin_nansl:
2263 case Builtin::BI__builtin_nansf16:
2264 case Builtin::BI__builtin_nansf128:
2265 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/true);
2266
2267 case Builtin::BI__builtin_huge_val:
2268 case Builtin::BI__builtin_huge_valf:
2269 case Builtin::BI__builtin_huge_vall:
2270 case Builtin::BI__builtin_huge_valf16:
2271 case Builtin::BI__builtin_huge_valf128:
2272 case Builtin::BI__builtin_inf:
2273 case Builtin::BI__builtin_inff:
2274 case Builtin::BI__builtin_infl:
2275 case Builtin::BI__builtin_inff16:
2276 case Builtin::BI__builtin_inff128:
2277 return interp__builtin_inf(S, OpPC, Frame, Call);
2278
2279 case Builtin::BI__builtin_copysign:
2280 case Builtin::BI__builtin_copysignf:
2281 case Builtin::BI__builtin_copysignl:
2282 case Builtin::BI__builtin_copysignf128:
2283 return interp__builtin_copysign(S, OpPC, Frame);
2284
2285 case Builtin::BI__builtin_fmin:
2286 case Builtin::BI__builtin_fminf:
2287 case Builtin::BI__builtin_fminl:
2288 case Builtin::BI__builtin_fminf16:
2289 case Builtin::BI__builtin_fminf128:
2290 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/false);
2291
2292 case Builtin::BI__builtin_fminimum_num:
2293 case Builtin::BI__builtin_fminimum_numf:
2294 case Builtin::BI__builtin_fminimum_numl:
2295 case Builtin::BI__builtin_fminimum_numf16:
2296 case Builtin::BI__builtin_fminimum_numf128:
2297 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/true);
2298
2299 case Builtin::BI__builtin_fmax:
2300 case Builtin::BI__builtin_fmaxf:
2301 case Builtin::BI__builtin_fmaxl:
2302 case Builtin::BI__builtin_fmaxf16:
2303 case Builtin::BI__builtin_fmaxf128:
2304 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/false);
2305
2306 case Builtin::BI__builtin_fmaximum_num:
2307 case Builtin::BI__builtin_fmaximum_numf:
2308 case Builtin::BI__builtin_fmaximum_numl:
2309 case Builtin::BI__builtin_fmaximum_numf16:
2310 case Builtin::BI__builtin_fmaximum_numf128:
2311 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/true);
2312
2313 case Builtin::BI__builtin_isnan:
2314 return interp__builtin_isnan(S, OpPC, Frame, Call);
2315
2316 case Builtin::BI__builtin_issignaling:
2317 return interp__builtin_issignaling(S, OpPC, Frame, Call);
2318
2319 case Builtin::BI__builtin_isinf:
2320 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/CheckSign: false, Call);
2321
2322 case Builtin::BI__builtin_isinf_sign:
2323 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/CheckSign: true, Call);
2324
2325 case Builtin::BI__builtin_isfinite:
2326 return interp__builtin_isfinite(S, OpPC, Frame, Call);
2327
2328 case Builtin::BI__builtin_isnormal:
2329 return interp__builtin_isnormal(S, OpPC, Frame, Call);
2330
2331 case Builtin::BI__builtin_issubnormal:
2332 return interp__builtin_issubnormal(S, OpPC, Frame, Call);
2333
2334 case Builtin::BI__builtin_iszero:
2335 return interp__builtin_iszero(S, OpPC, Frame, Call);
2336
2337 case Builtin::BI__builtin_signbit:
2338 case Builtin::BI__builtin_signbitf:
2339 case Builtin::BI__builtin_signbitl:
2340 return interp__builtin_signbit(S, OpPC, Frame, Call);
2341
2342 case Builtin::BI__builtin_isgreater:
2343 case Builtin::BI__builtin_isgreaterequal:
2344 case Builtin::BI__builtin_isless:
2345 case Builtin::BI__builtin_islessequal:
2346 case Builtin::BI__builtin_islessgreater:
2347 case Builtin::BI__builtin_isunordered:
2348 return interp_floating_comparison(S, OpPC, Call, ID: BuiltinID);
2349
2350 case Builtin::BI__builtin_isfpclass:
2351 return interp__builtin_isfpclass(S, OpPC, Frame, Call);
2352
2353 case Builtin::BI__builtin_fpclassify:
2354 return interp__builtin_fpclassify(S, OpPC, Frame, Call);
2355
2356 case Builtin::BI__builtin_fabs:
2357 case Builtin::BI__builtin_fabsf:
2358 case Builtin::BI__builtin_fabsl:
2359 case Builtin::BI__builtin_fabsf128:
2360 return interp__builtin_fabs(S, OpPC, Frame);
2361
2362 case Builtin::BI__builtin_abs:
2363 case Builtin::BI__builtin_labs:
2364 case Builtin::BI__builtin_llabs:
2365 return interp__builtin_abs(S, OpPC, Frame, Call);
2366
2367 case Builtin::BI__builtin_popcount:
2368 case Builtin::BI__builtin_popcountl:
2369 case Builtin::BI__builtin_popcountll:
2370 case Builtin::BI__builtin_popcountg:
2371 case Builtin::BI__popcnt16: // Microsoft variants of popcount
2372 case Builtin::BI__popcnt:
2373 case Builtin::BI__popcnt64:
2374 return interp__builtin_popcount(S, OpPC, Frame, Call);
2375
2376 case Builtin::BI__builtin_parity:
2377 case Builtin::BI__builtin_parityl:
2378 case Builtin::BI__builtin_parityll:
2379 return interp__builtin_parity(S, OpPC, Frame, Call);
2380
2381 case Builtin::BI__builtin_clrsb:
2382 case Builtin::BI__builtin_clrsbl:
2383 case Builtin::BI__builtin_clrsbll:
2384 return interp__builtin_clrsb(S, OpPC, Frame, Call);
2385
2386 case Builtin::BI__builtin_bitreverse8:
2387 case Builtin::BI__builtin_bitreverse16:
2388 case Builtin::BI__builtin_bitreverse32:
2389 case Builtin::BI__builtin_bitreverse64:
2390 return interp__builtin_bitreverse(S, OpPC, Frame, Call);
2391
2392 case Builtin::BI__builtin_classify_type:
2393 return interp__builtin_classify_type(S, OpPC, Frame, Call);
2394
2395 case Builtin::BI__builtin_expect:
2396 case Builtin::BI__builtin_expect_with_probability:
2397 return interp__builtin_expect(S, OpPC, Frame, Call);
2398
2399 case Builtin::BI__builtin_rotateleft8:
2400 case Builtin::BI__builtin_rotateleft16:
2401 case Builtin::BI__builtin_rotateleft32:
2402 case Builtin::BI__builtin_rotateleft64:
2403 case Builtin::BI_rotl8: // Microsoft variants of rotate left
2404 case Builtin::BI_rotl16:
2405 case Builtin::BI_rotl:
2406 case Builtin::BI_lrotl:
2407 case Builtin::BI_rotl64:
2408 return interp__builtin_rotate(S, OpPC, Frame, Call, /*Right=*/false);
2409
2410 case Builtin::BI__builtin_rotateright8:
2411 case Builtin::BI__builtin_rotateright16:
2412 case Builtin::BI__builtin_rotateright32:
2413 case Builtin::BI__builtin_rotateright64:
2414 case Builtin::BI_rotr8: // Microsoft variants of rotate right
2415 case Builtin::BI_rotr16:
2416 case Builtin::BI_rotr:
2417 case Builtin::BI_lrotr:
2418 case Builtin::BI_rotr64:
2419 return interp__builtin_rotate(S, OpPC, Frame, Call, /*Right=*/true);
2420
2421 case Builtin::BI__builtin_ffs:
2422 case Builtin::BI__builtin_ffsl:
2423 case Builtin::BI__builtin_ffsll:
2424 return interp__builtin_ffs(S, OpPC, Frame, Call);
2425
2426 case Builtin::BIaddressof:
2427 case Builtin::BI__addressof:
2428 case Builtin::BI__builtin_addressof:
2429 assert(isNoopBuiltin(BuiltinID));
2430 return interp__builtin_addressof(S, OpPC, Frame, Call);
2431
2432 case Builtin::BIas_const:
2433 case Builtin::BIforward:
2434 case Builtin::BIforward_like:
2435 case Builtin::BImove:
2436 case Builtin::BImove_if_noexcept:
2437 assert(isNoopBuiltin(BuiltinID));
2438 return interp__builtin_move(S, OpPC, Frame, Call);
2439
2440 case Builtin::BI__builtin_eh_return_data_regno:
2441 return interp__builtin_eh_return_data_regno(S, OpPC, Frame, Call);
2442
2443 case Builtin::BI__builtin_launder:
2444 assert(isNoopBuiltin(BuiltinID));
2445 return true;
2446
2447 case Builtin::BI__builtin_add_overflow:
2448 case Builtin::BI__builtin_sub_overflow:
2449 case Builtin::BI__builtin_mul_overflow:
2450 case Builtin::BI__builtin_sadd_overflow:
2451 case Builtin::BI__builtin_uadd_overflow:
2452 case Builtin::BI__builtin_uaddl_overflow:
2453 case Builtin::BI__builtin_uaddll_overflow:
2454 case Builtin::BI__builtin_usub_overflow:
2455 case Builtin::BI__builtin_usubl_overflow:
2456 case Builtin::BI__builtin_usubll_overflow:
2457 case Builtin::BI__builtin_umul_overflow:
2458 case Builtin::BI__builtin_umull_overflow:
2459 case Builtin::BI__builtin_umulll_overflow:
2460 case Builtin::BI__builtin_saddl_overflow:
2461 case Builtin::BI__builtin_saddll_overflow:
2462 case Builtin::BI__builtin_ssub_overflow:
2463 case Builtin::BI__builtin_ssubl_overflow:
2464 case Builtin::BI__builtin_ssubll_overflow:
2465 case Builtin::BI__builtin_smul_overflow:
2466 case Builtin::BI__builtin_smull_overflow:
2467 case Builtin::BI__builtin_smulll_overflow:
2468 return interp__builtin_overflowop(S, OpPC, Call, BuiltinOp: BuiltinID);
2469
2470 case Builtin::BI__builtin_addcb:
2471 case Builtin::BI__builtin_addcs:
2472 case Builtin::BI__builtin_addc:
2473 case Builtin::BI__builtin_addcl:
2474 case Builtin::BI__builtin_addcll:
2475 case Builtin::BI__builtin_subcb:
2476 case Builtin::BI__builtin_subcs:
2477 case Builtin::BI__builtin_subc:
2478 case Builtin::BI__builtin_subcl:
2479 case Builtin::BI__builtin_subcll:
2480 return interp__builtin_carryop(S, OpPC, Frame, Call, BuiltinOp: BuiltinID);
2481
2482 case Builtin::BI__builtin_clz:
2483 case Builtin::BI__builtin_clzl:
2484 case Builtin::BI__builtin_clzll:
2485 case Builtin::BI__builtin_clzs:
2486 case Builtin::BI__builtin_clzg:
2487 case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
2488 case Builtin::BI__lzcnt:
2489 case Builtin::BI__lzcnt64:
2490 return interp__builtin_clz(S, OpPC, Frame, Call, BuiltinOp: BuiltinID);
2491
2492 case Builtin::BI__builtin_ctz:
2493 case Builtin::BI__builtin_ctzl:
2494 case Builtin::BI__builtin_ctzll:
2495 case Builtin::BI__builtin_ctzs:
2496 case Builtin::BI__builtin_ctzg:
2497 return interp__builtin_ctz(S, OpPC, Frame, Call, BuiltinID);
2498
2499 case Builtin::BI__builtin_bswap16:
2500 case Builtin::BI__builtin_bswap32:
2501 case Builtin::BI__builtin_bswap64:
2502 return interp__builtin_bswap(S, OpPC, Frame, Call);
2503
2504 case Builtin::BI__atomic_always_lock_free:
2505 case Builtin::BI__atomic_is_lock_free:
2506 return interp__builtin_atomic_lock_free(S, OpPC, Frame, Call, BuiltinOp: BuiltinID);
2507
2508 case Builtin::BI__c11_atomic_is_lock_free:
2509 return interp__builtin_c11_atomic_is_lock_free(S, OpPC, Frame, Call);
2510
2511 case Builtin::BI__builtin_complex:
2512 return interp__builtin_complex(S, OpPC, Frame, Call);
2513
2514 case Builtin::BI__builtin_is_aligned:
2515 case Builtin::BI__builtin_align_up:
2516 case Builtin::BI__builtin_align_down:
2517 return interp__builtin_is_aligned_up_down(S, OpPC, Frame, Call, BuiltinOp: BuiltinID);
2518
2519 case Builtin::BI__builtin_assume_aligned:
2520 return interp__builtin_assume_aligned(S, OpPC, Frame, Call);
2521
2522 case clang::X86::BI__builtin_ia32_bextr_u32:
2523 case clang::X86::BI__builtin_ia32_bextr_u64:
2524 case clang::X86::BI__builtin_ia32_bextri_u32:
2525 case clang::X86::BI__builtin_ia32_bextri_u64:
2526 return interp__builtin_ia32_bextr(S, OpPC, Frame, Call);
2527
2528 case clang::X86::BI__builtin_ia32_bzhi_si:
2529 case clang::X86::BI__builtin_ia32_bzhi_di:
2530 return interp__builtin_ia32_bzhi(S, OpPC, Frame, Call);
2531
2532 case clang::X86::BI__builtin_ia32_lzcnt_u16:
2533 case clang::X86::BI__builtin_ia32_lzcnt_u32:
2534 case clang::X86::BI__builtin_ia32_lzcnt_u64:
2535 return interp__builtin_ia32_lzcnt(S, OpPC, Frame, Call);
2536
2537 case clang::X86::BI__builtin_ia32_tzcnt_u16:
2538 case clang::X86::BI__builtin_ia32_tzcnt_u32:
2539 case clang::X86::BI__builtin_ia32_tzcnt_u64:
2540 return interp__builtin_ia32_tzcnt(S, OpPC, Frame, Call);
2541
2542 case clang::X86::BI__builtin_ia32_pdep_si:
2543 case clang::X86::BI__builtin_ia32_pdep_di:
2544 return interp__builtin_ia32_pdep(S, OpPC, Frame, Call);
2545
2546 case clang::X86::BI__builtin_ia32_pext_si:
2547 case clang::X86::BI__builtin_ia32_pext_di:
2548 return interp__builtin_ia32_pext(S, OpPC, Frame, Call);
2549
2550 case clang::X86::BI__builtin_ia32_addcarryx_u32:
2551 case clang::X86::BI__builtin_ia32_addcarryx_u64:
2552 case clang::X86::BI__builtin_ia32_subborrow_u32:
2553 case clang::X86::BI__builtin_ia32_subborrow_u64:
2554 return interp__builtin_ia32_addcarry_subborrow(S, OpPC, Frame, Call,
2555 BuiltinOp: BuiltinID);
2556
2557 case Builtin::BI__builtin_os_log_format_buffer_size:
2558 return interp__builtin_os_log_format_buffer_size(S, OpPC, Frame, Call);
2559
2560 case Builtin::BI__builtin_ptrauth_string_discriminator:
2561 return interp__builtin_ptrauth_string_discriminator(S, OpPC, Frame, Call);
2562
2563 case Builtin::BI__noop:
2564 pushInteger(S, Val: 0, QT: Call->getType());
2565 return true;
2566
2567 case Builtin::BI__builtin_operator_new:
2568 return interp__builtin_operator_new(S, OpPC, Frame, Call);
2569
2570 case Builtin::BI__builtin_operator_delete:
2571 return interp__builtin_operator_delete(S, OpPC, Frame, Call);
2572
2573 case Builtin::BI__arithmetic_fence:
2574 return interp__builtin_arithmetic_fence(S, OpPC, Frame, Call);
2575
2576 case Builtin::BI__builtin_reduce_add:
2577 case Builtin::BI__builtin_reduce_mul:
2578 case Builtin::BI__builtin_reduce_and:
2579 case Builtin::BI__builtin_reduce_or:
2580 case Builtin::BI__builtin_reduce_xor:
2581 return interp__builtin_vector_reduce(S, OpPC, Call, ID: BuiltinID);
2582
2583 case Builtin::BI__builtin_elementwise_popcount:
2584 return interp__builtin_elementwise_popcount(S, OpPC, Frame, Call);
2585
2586 case Builtin::BI__builtin_memcpy:
2587 case Builtin::BImemcpy:
2588 case Builtin::BI__builtin_wmemcpy:
2589 case Builtin::BIwmemcpy:
2590 case Builtin::BI__builtin_memmove:
2591 case Builtin::BImemmove:
2592 case Builtin::BI__builtin_wmemmove:
2593 case Builtin::BIwmemmove:
2594 return interp__builtin_memcpy(S, OpPC, Frame, Call, ID: BuiltinID);
2595
2596 case Builtin::BI__builtin_memcmp:
2597 case Builtin::BImemcmp:
2598 case Builtin::BI__builtin_bcmp:
2599 case Builtin::BIbcmp:
2600 case Builtin::BI__builtin_wmemcmp:
2601 case Builtin::BIwmemcmp:
2602 return interp__builtin_memcmp(S, OpPC, Frame, Call, ID: BuiltinID);
2603
2604 case Builtin::BImemchr:
2605 case Builtin::BI__builtin_memchr:
2606 case Builtin::BIstrchr:
2607 case Builtin::BI__builtin_strchr:
2608 case Builtin::BIwmemchr:
2609 case Builtin::BI__builtin_wmemchr:
2610 case Builtin::BIwcschr:
2611 case Builtin::BI__builtin_wcschr:
2612 case Builtin::BI__builtin_char_memchr:
2613 return interp__builtin_memchr(S, OpPC, Call, ID: BuiltinID);
2614
2615 case Builtin::BI__builtin_object_size:
2616 case Builtin::BI__builtin_dynamic_object_size:
2617 return interp__builtin_object_size(S, OpPC, Frame, Call);
2618
2619 case Builtin::BI__builtin_is_within_lifetime:
2620 return interp__builtin_is_within_lifetime(S, OpPC, Call);
2621
2622 default:
2623 S.FFDiag(Loc: S.Current->getLocation(PC: OpPC),
2624 DiagId: diag::note_invalid_subexpr_in_const_expr)
2625 << S.Current->getRange(PC: OpPC);
2626
2627 return false;
2628 }
2629
2630 llvm_unreachable("Unhandled builtin ID");
2631}
2632
2633bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
2634 ArrayRef<int64_t> ArrayIndices, int64_t &IntResult) {
2635 CharUnits Result;
2636 unsigned N = E->getNumComponents();
2637 assert(N > 0);
2638
2639 unsigned ArrayIndex = 0;
2640 QualType CurrentType = E->getTypeSourceInfo()->getType();
2641 for (unsigned I = 0; I != N; ++I) {
2642 const OffsetOfNode &Node = E->getComponent(Idx: I);
2643 switch (Node.getKind()) {
2644 case OffsetOfNode::Field: {
2645 const FieldDecl *MemberDecl = Node.getField();
2646 const RecordType *RT = CurrentType->getAs<RecordType>();
2647 if (!RT)
2648 return false;
2649 const RecordDecl *RD = RT->getDecl();
2650 if (RD->isInvalidDecl())
2651 return false;
2652 const ASTRecordLayout &RL = S.getASTContext().getASTRecordLayout(D: RD);
2653 unsigned FieldIndex = MemberDecl->getFieldIndex();
2654 assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type");
2655 Result +=
2656 S.getASTContext().toCharUnitsFromBits(BitSize: RL.getFieldOffset(FieldNo: FieldIndex));
2657 CurrentType = MemberDecl->getType().getNonReferenceType();
2658 break;
2659 }
2660 case OffsetOfNode::Array: {
2661 // When generating bytecode, we put all the index expressions as Sint64 on
2662 // the stack.
2663 int64_t Index = ArrayIndices[ArrayIndex];
2664 const ArrayType *AT = S.getASTContext().getAsArrayType(T: CurrentType);
2665 if (!AT)
2666 return false;
2667 CurrentType = AT->getElementType();
2668 CharUnits ElementSize = S.getASTContext().getTypeSizeInChars(T: CurrentType);
2669 Result += Index * ElementSize;
2670 ++ArrayIndex;
2671 break;
2672 }
2673 case OffsetOfNode::Base: {
2674 const CXXBaseSpecifier *BaseSpec = Node.getBase();
2675 if (BaseSpec->isVirtual())
2676 return false;
2677
2678 // Find the layout of the class whose base we are looking into.
2679 const RecordType *RT = CurrentType->getAs<RecordType>();
2680 if (!RT)
2681 return false;
2682 const RecordDecl *RD = RT->getDecl();
2683 if (RD->isInvalidDecl())
2684 return false;
2685 const ASTRecordLayout &RL = S.getASTContext().getASTRecordLayout(D: RD);
2686
2687 // Find the base class itself.
2688 CurrentType = BaseSpec->getType();
2689 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2690 if (!BaseRT)
2691 return false;
2692
2693 // Add the offset to the base.
2694 Result += RL.getBaseClassOffset(Base: cast<CXXRecordDecl>(Val: BaseRT->getDecl()));
2695 break;
2696 }
2697 case OffsetOfNode::Identifier:
2698 llvm_unreachable("Dependent OffsetOfExpr?");
2699 }
2700 }
2701
2702 IntResult = Result.getQuantity();
2703
2704 return true;
2705}
2706
2707bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC,
2708 const Pointer &Ptr, const APSInt &IntValue) {
2709
2710 const Record *R = Ptr.getRecord();
2711 assert(R);
2712 assert(R->getNumFields() == 1);
2713
2714 unsigned FieldOffset = R->getField(I: 0u)->Offset;
2715 const Pointer &FieldPtr = Ptr.atField(Off: FieldOffset);
2716 PrimType FieldT = *S.getContext().classify(T: FieldPtr.getType());
2717
2718 INT_TYPE_SWITCH(FieldT,
2719 FieldPtr.deref<T>() = T::from(IntValue.getSExtValue()));
2720 FieldPtr.initialize();
2721 return true;
2722}
2723
2724static void zeroAll(Pointer &Dest) {
2725 const Descriptor *Desc = Dest.getFieldDesc();
2726
2727 if (Desc->isPrimitive()) {
2728 TYPE_SWITCH(Desc->getPrimType(), {
2729 Dest.deref<T>().~T();
2730 new (&Dest.deref<T>()) T();
2731 });
2732 return;
2733 }
2734
2735 if (Desc->isRecord()) {
2736 const Record *R = Desc->ElemRecord;
2737 for (const Record::Field &F : R->fields()) {
2738 Pointer FieldPtr = Dest.atField(Off: F.Offset);
2739 zeroAll(Dest&: FieldPtr);
2740 }
2741 return;
2742 }
2743
2744 if (Desc->isPrimitiveArray()) {
2745 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
2746 TYPE_SWITCH(Desc->getPrimType(), {
2747 Dest.deref<T>().~T();
2748 new (&Dest.deref<T>()) T();
2749 });
2750 }
2751 return;
2752 }
2753
2754 if (Desc->isCompositeArray()) {
2755 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
2756 Pointer ElemPtr = Dest.atIndex(Idx: I).narrow();
2757 zeroAll(Dest&: ElemPtr);
2758 }
2759 return;
2760 }
2761}
2762
2763static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
2764 Pointer &Dest, bool Activate);
2765static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src,
2766 Pointer &Dest, bool Activate = false) {
2767 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
2768 const Descriptor *DestDesc = Dest.getFieldDesc();
2769
2770 auto copyField = [&](const Record::Field &F, bool Activate) -> bool {
2771 Pointer DestField = Dest.atField(Off: F.Offset);
2772 if (std::optional<PrimType> FT = S.Ctx.classify(T: F.Decl->getType())) {
2773 TYPE_SWITCH(*FT, {
2774 DestField.deref<T>() = Src.atField(F.Offset).deref<T>();
2775 if (Src.atField(F.Offset).isInitialized())
2776 DestField.initialize();
2777 if (Activate)
2778 DestField.activate();
2779 });
2780 return true;
2781 }
2782 // Composite field.
2783 return copyComposite(S, OpPC, Src: Src.atField(Off: F.Offset), Dest&: DestField, Activate);
2784 };
2785
2786 assert(SrcDesc->isRecord());
2787 assert(SrcDesc->ElemRecord == DestDesc->ElemRecord);
2788 const Record *R = DestDesc->ElemRecord;
2789 for (const Record::Field &F : R->fields()) {
2790 if (R->isUnion()) {
2791 // For unions, only copy the active field. Zero all others.
2792 const Pointer &SrcField = Src.atField(Off: F.Offset);
2793 if (SrcField.isActive()) {
2794 if (!copyField(F, /*Activate=*/true))
2795 return false;
2796 } else {
2797 Pointer DestField = Dest.atField(Off: F.Offset);
2798 zeroAll(Dest&: DestField);
2799 }
2800 } else {
2801 if (!copyField(F, Activate))
2802 return false;
2803 }
2804 }
2805
2806 for (const Record::Base &B : R->bases()) {
2807 Pointer DestBase = Dest.atField(Off: B.Offset);
2808 if (!copyRecord(S, OpPC, Src: Src.atField(Off: B.Offset), Dest&: DestBase, Activate))
2809 return false;
2810 }
2811
2812 Dest.initialize();
2813 return true;
2814}
2815
2816static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
2817 Pointer &Dest, bool Activate = false) {
2818 assert(Src.isLive() && Dest.isLive());
2819
2820 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
2821 const Descriptor *DestDesc = Dest.getFieldDesc();
2822
2823 assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive());
2824
2825 if (DestDesc->isPrimitiveArray()) {
2826 assert(SrcDesc->isPrimitiveArray());
2827 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
2828 PrimType ET = DestDesc->getPrimType();
2829 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
2830 Pointer DestElem = Dest.atIndex(Idx: I);
2831 TYPE_SWITCH(ET, {
2832 DestElem.deref<T>() = Src.atIndex(I).deref<T>();
2833 DestElem.initialize();
2834 });
2835 }
2836 return true;
2837 }
2838
2839 if (DestDesc->isCompositeArray()) {
2840 assert(SrcDesc->isCompositeArray());
2841 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
2842 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
2843 const Pointer &SrcElem = Src.atIndex(Idx: I).narrow();
2844 Pointer DestElem = Dest.atIndex(Idx: I).narrow();
2845 if (!copyComposite(S, OpPC, Src: SrcElem, Dest&: DestElem, Activate))
2846 return false;
2847 }
2848 return true;
2849 }
2850
2851 if (DestDesc->isRecord())
2852 return copyRecord(S, OpPC, Src, Dest, Activate);
2853 return Invalid(S, OpPC);
2854}
2855
2856bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) {
2857 return copyComposite(S, OpPC, Src, Dest);
2858}
2859
2860} // namespace interp
2861} // namespace clang
2862