1 | //===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | #include "../ExprConstShared.h" |
9 | #include "Boolean.h" |
10 | #include "Interp.h" |
11 | #include "PrimType.h" |
12 | #include "clang/AST/OSLog.h" |
13 | #include "clang/AST/RecordLayout.h" |
14 | #include "clang/Basic/Builtins.h" |
15 | #include "clang/Basic/TargetInfo.h" |
16 | #include "llvm/Support/SipHash.h" |
17 | |
18 | namespace clang { |
19 | namespace interp { |
20 | |
21 | static unsigned callArgSize(const InterpState &S, const CallExpr *C) { |
22 | unsigned O = 0; |
23 | |
24 | for (const Expr *E : C->arguments()) { |
25 | O += align(Size: primSize(Type: *S.getContext().classify(E))); |
26 | } |
27 | |
28 | return O; |
29 | } |
30 | |
31 | template <typename T> |
32 | static T getParam(const InterpFrame *Frame, unsigned Index) { |
33 | assert(Frame->getFunction()->getNumParams() > Index); |
34 | unsigned Offset = Frame->getFunction()->getParamOffset(ParamIndex: Index); |
35 | return Frame->getParam<T>(Offset); |
36 | } |
37 | |
38 | PrimType getIntPrimType(const InterpState &S) { |
39 | const TargetInfo &TI = S.getCtx().getTargetInfo(); |
40 | unsigned IntWidth = TI.getIntWidth(); |
41 | |
42 | if (IntWidth == 32) |
43 | return PT_Sint32; |
44 | else if (IntWidth == 16) |
45 | return PT_Sint16; |
46 | llvm_unreachable("Int isn't 16 or 32 bit?" ); |
47 | } |
48 | |
49 | PrimType getLongPrimType(const InterpState &S) { |
50 | const TargetInfo &TI = S.getCtx().getTargetInfo(); |
51 | unsigned LongWidth = TI.getLongWidth(); |
52 | |
53 | if (LongWidth == 64) |
54 | return PT_Sint64; |
55 | else if (LongWidth == 32) |
56 | return PT_Sint32; |
57 | else if (LongWidth == 16) |
58 | return PT_Sint16; |
59 | llvm_unreachable("long isn't 16, 32 or 64 bit?" ); |
60 | } |
61 | |
62 | /// Peek an integer value from the stack into an APSInt. |
63 | static APSInt peekToAPSInt(InterpStack &Stk, PrimType T, size_t Offset = 0) { |
64 | if (Offset == 0) |
65 | Offset = align(Size: primSize(Type: T)); |
66 | |
67 | APSInt R; |
68 | INT_TYPE_SWITCH(T, R = Stk.peek<T>(Offset).toAPSInt()); |
69 | |
70 | return R; |
71 | } |
72 | |
73 | /// Pushes \p Val on the stack as the type given by \p QT. |
74 | static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) { |
75 | assert(QT->isSignedIntegerOrEnumerationType() || |
76 | QT->isUnsignedIntegerOrEnumerationType()); |
77 | std::optional<PrimType> T = S.getContext().classify(T: QT); |
78 | assert(T); |
79 | |
80 | if (QT->isSignedIntegerOrEnumerationType()) { |
81 | int64_t V = Val.getSExtValue(); |
82 | INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V)); }); |
83 | } else { |
84 | assert(QT->isUnsignedIntegerOrEnumerationType()); |
85 | uint64_t V = Val.getZExtValue(); |
86 | INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V)); }); |
87 | } |
88 | } |
89 | |
90 | template <typename T> |
91 | static void pushInteger(InterpState &S, T Val, QualType QT) { |
92 | if constexpr (std::is_same_v<T, APInt>) |
93 | pushInteger(S, Val: APSInt(Val, !std::is_signed_v<T>), QT); |
94 | else |
95 | pushInteger(S, |
96 | Val: APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val), |
97 | std::is_signed_v<T>), |
98 | !std::is_signed_v<T>), |
99 | QT); |
100 | } |
101 | |
102 | static void assignInteger(Pointer &Dest, PrimType ValueT, const APSInt &Value) { |
103 | INT_TYPE_SWITCH_NO_BOOL( |
104 | ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); }); |
105 | } |
106 | |
107 | static bool retPrimValue(InterpState &S, CodePtr OpPC, APValue &Result, |
108 | std::optional<PrimType> &T) { |
109 | if (!T) |
110 | return RetVoid(S, PC&: OpPC, Result); |
111 | |
112 | #define RET_CASE(X) \ |
113 | case X: \ |
114 | return Ret<X>(S, OpPC, Result); |
115 | switch (*T) { |
116 | RET_CASE(PT_Ptr); |
117 | RET_CASE(PT_FnPtr); |
118 | RET_CASE(PT_Float); |
119 | RET_CASE(PT_Bool); |
120 | RET_CASE(PT_Sint8); |
121 | RET_CASE(PT_Uint8); |
122 | RET_CASE(PT_Sint16); |
123 | RET_CASE(PT_Uint16); |
124 | RET_CASE(PT_Sint32); |
125 | RET_CASE(PT_Uint32); |
126 | RET_CASE(PT_Sint64); |
127 | RET_CASE(PT_Uint64); |
128 | default: |
129 | llvm_unreachable("Unsupported return type for builtin function" ); |
130 | } |
131 | #undef RET_CASE |
132 | } |
133 | |
134 | static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC, |
135 | const InterpFrame *Frame, |
136 | const CallExpr *Call) { |
137 | // The current frame is the one for __builtin_is_constant_evaluated. |
138 | // The one above that, potentially the one for std::is_constant_evaluated(). |
139 | if (S.inConstantContext() && !S.checkingPotentialConstantExpression() && |
140 | Frame->Caller && S.getEvalStatus().Diag) { |
141 | auto isStdCall = [](const FunctionDecl *F) -> bool { |
142 | return F && F->isInStdNamespace() && F->getIdentifier() && |
143 | F->getIdentifier()->isStr(Str: "is_constant_evaluated" ); |
144 | }; |
145 | const InterpFrame *Caller = Frame->Caller; |
146 | |
147 | if (Caller->Caller && isStdCall(Caller->getCallee())) { |
148 | const Expr *E = Caller->Caller->getExpr(PC: Caller->getRetPC()); |
149 | S.report(Loc: E->getExprLoc(), |
150 | DiagId: diag::warn_is_constant_evaluated_always_true_constexpr) |
151 | << "std::is_constant_evaluated" << E->getSourceRange(); |
152 | } else { |
153 | const Expr *E = Frame->Caller->getExpr(PC: Frame->getRetPC()); |
154 | S.report(Loc: E->getExprLoc(), |
155 | DiagId: diag::warn_is_constant_evaluated_always_true_constexpr) |
156 | << "__builtin_is_constant_evaluated" << E->getSourceRange(); |
157 | } |
158 | } |
159 | |
160 | S.Stk.push<Boolean>(Args: Boolean::from(Value: S.inConstantContext())); |
161 | return true; |
162 | } |
163 | |
164 | static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC, |
165 | const InterpFrame *Frame, |
166 | const CallExpr *Call) { |
167 | const Pointer &A = getParam<Pointer>(Frame, Index: 0); |
168 | const Pointer &B = getParam<Pointer>(Frame, Index: 1); |
169 | |
170 | if (!CheckLive(S, OpPC, Ptr: A, AK: AK_Read) || !CheckLive(S, OpPC, Ptr: B, AK: AK_Read)) |
171 | return false; |
172 | |
173 | if (A.isDummy() || B.isDummy()) |
174 | return false; |
175 | |
176 | assert(A.getFieldDesc()->isPrimitiveArray()); |
177 | assert(B.getFieldDesc()->isPrimitiveArray()); |
178 | |
179 | unsigned IndexA = A.getIndex(); |
180 | unsigned IndexB = B.getIndex(); |
181 | int32_t Result = 0; |
182 | for (;; ++IndexA, ++IndexB) { |
183 | const Pointer &PA = A.atIndex(Idx: IndexA); |
184 | const Pointer &PB = B.atIndex(Idx: IndexB); |
185 | if (!CheckRange(S, OpPC, Ptr: PA, AK: AK_Read) || |
186 | !CheckRange(S, OpPC, Ptr: PB, AK: AK_Read)) { |
187 | return false; |
188 | } |
189 | uint8_t CA = PA.deref<uint8_t>(); |
190 | uint8_t CB = PB.deref<uint8_t>(); |
191 | |
192 | if (CA > CB) { |
193 | Result = 1; |
194 | break; |
195 | } else if (CA < CB) { |
196 | Result = -1; |
197 | break; |
198 | } |
199 | if (CA == 0 || CB == 0) |
200 | break; |
201 | } |
202 | |
203 | pushInteger(S, Val: Result, QT: Call->getType()); |
204 | return true; |
205 | } |
206 | |
207 | static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC, |
208 | const InterpFrame *Frame, |
209 | const CallExpr *Call) { |
210 | const Pointer &StrPtr = getParam<Pointer>(Frame, Index: 0); |
211 | |
212 | if (!CheckArray(S, OpPC, Ptr: StrPtr)) |
213 | return false; |
214 | |
215 | if (!CheckLive(S, OpPC, Ptr: StrPtr, AK: AK_Read)) |
216 | return false; |
217 | |
218 | if (!CheckDummy(S, OpPC, Ptr: StrPtr, AK: AK_Read)) |
219 | return false; |
220 | |
221 | assert(StrPtr.getFieldDesc()->isPrimitiveArray()); |
222 | |
223 | size_t Len = 0; |
224 | for (size_t I = StrPtr.getIndex();; ++I, ++Len) { |
225 | const Pointer &ElemPtr = StrPtr.atIndex(Idx: I); |
226 | |
227 | if (!CheckRange(S, OpPC, Ptr: ElemPtr, AK: AK_Read)) |
228 | return false; |
229 | |
230 | uint8_t Val = ElemPtr.deref<uint8_t>(); |
231 | if (Val == 0) |
232 | break; |
233 | } |
234 | |
235 | pushInteger(S, Val: Len, QT: Call->getType()); |
236 | |
237 | return true; |
238 | } |
239 | |
240 | static bool interp__builtin_nan(InterpState &S, CodePtr OpPC, |
241 | const InterpFrame *Frame, const Function *F, |
242 | bool Signaling) { |
243 | const Pointer &Arg = getParam<Pointer>(Frame, Index: 0); |
244 | |
245 | if (!CheckLoad(S, OpPC, Ptr: Arg)) |
246 | return false; |
247 | |
248 | assert(Arg.getFieldDesc()->isPrimitiveArray()); |
249 | |
250 | // Convert the given string to an integer using StringRef's API. |
251 | llvm::APInt Fill; |
252 | std::string Str; |
253 | assert(Arg.getNumElems() >= 1); |
254 | for (unsigned I = 0;; ++I) { |
255 | const Pointer &Elem = Arg.atIndex(Idx: I); |
256 | |
257 | if (!CheckLoad(S, OpPC, Ptr: Elem)) |
258 | return false; |
259 | |
260 | if (Elem.deref<int8_t>() == 0) |
261 | break; |
262 | |
263 | Str += Elem.deref<char>(); |
264 | } |
265 | |
266 | // Treat empty strings as if they were zero. |
267 | if (Str.empty()) |
268 | Fill = llvm::APInt(32, 0); |
269 | else if (StringRef(Str).getAsInteger(Radix: 0, Result&: Fill)) |
270 | return false; |
271 | |
272 | const llvm::fltSemantics &TargetSemantics = |
273 | S.getCtx().getFloatTypeSemantics(T: F->getDecl()->getReturnType()); |
274 | |
275 | Floating Result; |
276 | if (S.getCtx().getTargetInfo().isNan2008()) { |
277 | if (Signaling) |
278 | Result = Floating( |
279 | llvm::APFloat::getSNaN(Sem: TargetSemantics, /*Negative=*/false, payload: &Fill)); |
280 | else |
281 | Result = Floating( |
282 | llvm::APFloat::getQNaN(Sem: TargetSemantics, /*Negative=*/false, payload: &Fill)); |
283 | } else { |
284 | // Prior to IEEE 754-2008, architectures were allowed to choose whether |
285 | // the first bit of their significand was set for qNaN or sNaN. MIPS chose |
286 | // a different encoding to what became a standard in 2008, and for pre- |
287 | // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as |
288 | // sNaN. This is now known as "legacy NaN" encoding. |
289 | if (Signaling) |
290 | Result = Floating( |
291 | llvm::APFloat::getQNaN(Sem: TargetSemantics, /*Negative=*/false, payload: &Fill)); |
292 | else |
293 | Result = Floating( |
294 | llvm::APFloat::getSNaN(Sem: TargetSemantics, /*Negative=*/false, payload: &Fill)); |
295 | } |
296 | |
297 | S.Stk.push<Floating>(Args&: Result); |
298 | return true; |
299 | } |
300 | |
301 | static bool interp__builtin_inf(InterpState &S, CodePtr OpPC, |
302 | const InterpFrame *Frame, const Function *F) { |
303 | const llvm::fltSemantics &TargetSemantics = |
304 | S.getCtx().getFloatTypeSemantics(T: F->getDecl()->getReturnType()); |
305 | |
306 | S.Stk.push<Floating>(Args: Floating::getInf(Sem: TargetSemantics)); |
307 | return true; |
308 | } |
309 | |
310 | static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC, |
311 | const InterpFrame *Frame, |
312 | const Function *F) { |
313 | const Floating &Arg1 = getParam<Floating>(Frame, Index: 0); |
314 | const Floating &Arg2 = getParam<Floating>(Frame, Index: 1); |
315 | |
316 | APFloat Copy = Arg1.getAPFloat(); |
317 | Copy.copySign(RHS: Arg2.getAPFloat()); |
318 | S.Stk.push<Floating>(Args: Floating(Copy)); |
319 | |
320 | return true; |
321 | } |
322 | |
323 | static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC, |
324 | const InterpFrame *Frame, const Function *F) { |
325 | const Floating &LHS = getParam<Floating>(Frame, Index: 0); |
326 | const Floating &RHS = getParam<Floating>(Frame, Index: 1); |
327 | |
328 | Floating Result; |
329 | |
330 | // When comparing zeroes, return -0.0 if one of the zeroes is negative. |
331 | if (LHS.isZero() && RHS.isZero() && RHS.isNegative()) |
332 | Result = RHS; |
333 | else if (LHS.isNan() || RHS < LHS) |
334 | Result = RHS; |
335 | else |
336 | Result = LHS; |
337 | |
338 | S.Stk.push<Floating>(Args&: Result); |
339 | return true; |
340 | } |
341 | |
342 | static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC, |
343 | const InterpFrame *Frame, |
344 | const Function *Func) { |
345 | const Floating &LHS = getParam<Floating>(Frame, Index: 0); |
346 | const Floating &RHS = getParam<Floating>(Frame, Index: 1); |
347 | |
348 | Floating Result; |
349 | |
350 | // When comparing zeroes, return +0.0 if one of the zeroes is positive. |
351 | if (LHS.isZero() && RHS.isZero() && LHS.isNegative()) |
352 | Result = RHS; |
353 | else if (LHS.isNan() || RHS > LHS) |
354 | Result = RHS; |
355 | else |
356 | Result = LHS; |
357 | |
358 | S.Stk.push<Floating>(Args&: Result); |
359 | return true; |
360 | } |
361 | |
362 | /// Defined as __builtin_isnan(...), to accommodate the fact that it can |
363 | /// take a float, double, long double, etc. |
364 | /// But for us, that's all a Floating anyway. |
365 | static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC, |
366 | const InterpFrame *Frame, const Function *F, |
367 | const CallExpr *Call) { |
368 | const Floating &Arg = S.Stk.peek<Floating>(); |
369 | |
370 | pushInteger(S, Val: Arg.isNan(), QT: Call->getType()); |
371 | return true; |
372 | } |
373 | |
374 | static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC, |
375 | const InterpFrame *Frame, |
376 | const Function *F, |
377 | const CallExpr *Call) { |
378 | const Floating &Arg = S.Stk.peek<Floating>(); |
379 | |
380 | pushInteger(S, Val: Arg.isSignaling(), QT: Call->getType()); |
381 | return true; |
382 | } |
383 | |
384 | static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC, |
385 | const InterpFrame *Frame, const Function *F, |
386 | bool CheckSign, const CallExpr *Call) { |
387 | const Floating &Arg = S.Stk.peek<Floating>(); |
388 | bool IsInf = Arg.isInf(); |
389 | |
390 | if (CheckSign) |
391 | pushInteger(S, Val: IsInf ? (Arg.isNegative() ? -1 : 1) : 0, QT: Call->getType()); |
392 | else |
393 | pushInteger(S, Val: Arg.isInf(), QT: Call->getType()); |
394 | return true; |
395 | } |
396 | |
397 | static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC, |
398 | const InterpFrame *Frame, |
399 | const Function *F, const CallExpr *Call) { |
400 | const Floating &Arg = S.Stk.peek<Floating>(); |
401 | |
402 | pushInteger(S, Val: Arg.isFinite(), QT: Call->getType()); |
403 | return true; |
404 | } |
405 | |
406 | static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC, |
407 | const InterpFrame *Frame, |
408 | const Function *F, const CallExpr *Call) { |
409 | const Floating &Arg = S.Stk.peek<Floating>(); |
410 | |
411 | pushInteger(S, Val: Arg.isNormal(), QT: Call->getType()); |
412 | return true; |
413 | } |
414 | |
415 | static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC, |
416 | const InterpFrame *Frame, |
417 | const Function *F, |
418 | const CallExpr *Call) { |
419 | const Floating &Arg = S.Stk.peek<Floating>(); |
420 | |
421 | pushInteger(S, Val: Arg.isDenormal(), QT: Call->getType()); |
422 | return true; |
423 | } |
424 | |
425 | static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC, |
426 | const InterpFrame *Frame, const Function *F, |
427 | const CallExpr *Call) { |
428 | const Floating &Arg = S.Stk.peek<Floating>(); |
429 | |
430 | pushInteger(S, Val: Arg.isZero(), QT: Call->getType()); |
431 | return true; |
432 | } |
433 | |
434 | /// First parameter to __builtin_isfpclass is the floating value, the |
435 | /// second one is an integral value. |
436 | static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC, |
437 | const InterpFrame *Frame, |
438 | const Function *Func, |
439 | const CallExpr *Call) { |
440 | PrimType FPClassArgT = *S.getContext().classify(T: Call->getArg(Arg: 1)->getType()); |
441 | APSInt FPClassArg = peekToAPSInt(Stk&: S.Stk, T: FPClassArgT); |
442 | const Floating &F = |
443 | S.Stk.peek<Floating>(Offset: align(Size: primSize(Type: FPClassArgT) + primSize(Type: PT_Float))); |
444 | |
445 | int32_t Result = |
446 | static_cast<int32_t>((F.classify() & FPClassArg).getZExtValue()); |
447 | pushInteger(S, Val: Result, QT: Call->getType()); |
448 | |
449 | return true; |
450 | } |
451 | |
452 | /// Five int values followed by one floating value. |
453 | static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC, |
454 | const InterpFrame *Frame, |
455 | const Function *Func, |
456 | const CallExpr *Call) { |
457 | const Floating &Val = S.Stk.peek<Floating>(); |
458 | |
459 | unsigned Index; |
460 | switch (Val.getCategory()) { |
461 | case APFloat::fcNaN: |
462 | Index = 0; |
463 | break; |
464 | case APFloat::fcInfinity: |
465 | Index = 1; |
466 | break; |
467 | case APFloat::fcNormal: |
468 | Index = Val.isDenormal() ? 3 : 2; |
469 | break; |
470 | case APFloat::fcZero: |
471 | Index = 4; |
472 | break; |
473 | } |
474 | |
475 | // The last argument is first on the stack. |
476 | assert(Index <= 4); |
477 | unsigned IntSize = primSize(Type: getIntPrimType(S)); |
478 | unsigned Offset = |
479 | align(Size: primSize(Type: PT_Float)) + ((1 + (4 - Index)) * align(Size: IntSize)); |
480 | |
481 | APSInt I = peekToAPSInt(Stk&: S.Stk, T: getIntPrimType(S), Offset); |
482 | pushInteger(S, Val: I, QT: Call->getType()); |
483 | return true; |
484 | } |
485 | |
486 | // The C standard says "fabs raises no floating-point exceptions, |
487 | // even if x is a signaling NaN. The returned value is independent of |
488 | // the current rounding direction mode." Therefore constant folding can |
489 | // proceed without regard to the floating point settings. |
490 | // Reference, WG14 N2478 F.10.4.3 |
491 | static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC, |
492 | const InterpFrame *Frame, |
493 | const Function *Func) { |
494 | const Floating &Val = getParam<Floating>(Frame, Index: 0); |
495 | |
496 | S.Stk.push<Floating>(Args: Floating::abs(F: Val)); |
497 | return true; |
498 | } |
499 | |
500 | static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, |
501 | const InterpFrame *Frame, |
502 | const Function *Func, |
503 | const CallExpr *Call) { |
504 | PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType()); |
505 | APSInt Val = peekToAPSInt(Stk&: S.Stk, T: ArgT); |
506 | pushInteger(S, Val: Val.popcount(), QT: Call->getType()); |
507 | return true; |
508 | } |
509 | |
510 | static bool interp__builtin_parity(InterpState &S, CodePtr OpPC, |
511 | const InterpFrame *Frame, |
512 | const Function *Func, const CallExpr *Call) { |
513 | PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType()); |
514 | APSInt Val = peekToAPSInt(Stk&: S.Stk, T: ArgT); |
515 | pushInteger(S, Val: Val.popcount() % 2, QT: Call->getType()); |
516 | return true; |
517 | } |
518 | |
519 | static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC, |
520 | const InterpFrame *Frame, |
521 | const Function *Func, const CallExpr *Call) { |
522 | PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType()); |
523 | APSInt Val = peekToAPSInt(Stk&: S.Stk, T: ArgT); |
524 | pushInteger(S, Val: Val.getBitWidth() - Val.getSignificantBits(), QT: Call->getType()); |
525 | return true; |
526 | } |
527 | |
528 | static bool interp__builtin_bitreverse(InterpState &S, CodePtr OpPC, |
529 | const InterpFrame *Frame, |
530 | const Function *Func, |
531 | const CallExpr *Call) { |
532 | PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType()); |
533 | APSInt Val = peekToAPSInt(Stk&: S.Stk, T: ArgT); |
534 | pushInteger(S, Val: Val.reverseBits(), QT: Call->getType()); |
535 | return true; |
536 | } |
537 | |
538 | static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC, |
539 | const InterpFrame *Frame, |
540 | const Function *Func, |
541 | const CallExpr *Call) { |
542 | // This is an unevaluated call, so there are no arguments on the stack. |
543 | assert(Call->getNumArgs() == 1); |
544 | const Expr *Arg = Call->getArg(Arg: 0); |
545 | |
546 | GCCTypeClass ResultClass = |
547 | EvaluateBuiltinClassifyType(T: Arg->getType(), LangOpts: S.getLangOpts()); |
548 | int32_t ReturnVal = static_cast<int32_t>(ResultClass); |
549 | pushInteger(S, Val: ReturnVal, QT: Call->getType()); |
550 | return true; |
551 | } |
552 | |
553 | // __builtin_expect(long, long) |
554 | // __builtin_expect_with_probability(long, long, double) |
555 | static bool interp__builtin_expect(InterpState &S, CodePtr OpPC, |
556 | const InterpFrame *Frame, |
557 | const Function *Func, const CallExpr *Call) { |
558 | // The return value is simply the value of the first parameter. |
559 | // We ignore the probability. |
560 | unsigned NumArgs = Call->getNumArgs(); |
561 | assert(NumArgs == 2 || NumArgs == 3); |
562 | |
563 | PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType()); |
564 | unsigned Offset = align(Size: primSize(Type: getLongPrimType(S))) * 2; |
565 | if (NumArgs == 3) |
566 | Offset += align(Size: primSize(Type: PT_Float)); |
567 | |
568 | APSInt Val = peekToAPSInt(Stk&: S.Stk, T: ArgT, Offset); |
569 | pushInteger(S, Val, QT: Call->getType()); |
570 | return true; |
571 | } |
572 | |
573 | /// rotateleft(value, amount) |
574 | static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC, |
575 | const InterpFrame *Frame, |
576 | const Function *Func, const CallExpr *Call, |
577 | bool Right) { |
578 | PrimType AmountT = *S.getContext().classify(T: Call->getArg(Arg: 1)->getType()); |
579 | PrimType ValueT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType()); |
580 | |
581 | APSInt Amount = peekToAPSInt(Stk&: S.Stk, T: AmountT); |
582 | APSInt Value = peekToAPSInt( |
583 | Stk&: S.Stk, T: ValueT, Offset: align(Size: primSize(Type: AmountT)) + align(Size: primSize(Type: ValueT))); |
584 | |
585 | APSInt Result; |
586 | if (Right) |
587 | Result = APSInt(Value.rotr(rotateAmt: Amount.urem(RHS: Value.getBitWidth())), |
588 | /*IsUnsigned=*/true); |
589 | else // Left. |
590 | Result = APSInt(Value.rotl(rotateAmt: Amount.urem(RHS: Value.getBitWidth())), |
591 | /*IsUnsigned=*/true); |
592 | |
593 | pushInteger(S, Val: Result, QT: Call->getType()); |
594 | return true; |
595 | } |
596 | |
597 | static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC, |
598 | const InterpFrame *Frame, const Function *Func, |
599 | const CallExpr *Call) { |
600 | PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType()); |
601 | APSInt Value = peekToAPSInt(Stk&: S.Stk, T: ArgT); |
602 | |
603 | uint64_t N = Value.countr_zero(); |
604 | pushInteger(S, Val: N == Value.getBitWidth() ? 0 : N + 1, QT: Call->getType()); |
605 | return true; |
606 | } |
607 | |
608 | static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC, |
609 | const InterpFrame *Frame, |
610 | const Function *Func, |
611 | const CallExpr *Call) { |
612 | assert(Call->getArg(0)->isLValue()); |
613 | PrimType PtrT = S.getContext().classify(E: Call->getArg(Arg: 0)).value_or(u: PT_Ptr); |
614 | |
615 | if (PtrT == PT_FnPtr) { |
616 | const FunctionPointer &Arg = S.Stk.peek<FunctionPointer>(); |
617 | S.Stk.push<FunctionPointer>(Args: Arg); |
618 | } else if (PtrT == PT_Ptr) { |
619 | const Pointer &Arg = S.Stk.peek<Pointer>(); |
620 | S.Stk.push<Pointer>(Args: Arg); |
621 | } else { |
622 | assert(false && "Unsupported pointer type passed to __builtin_addressof()" ); |
623 | } |
624 | return true; |
625 | } |
626 | |
627 | static bool interp__builtin_move(InterpState &S, CodePtr OpPC, |
628 | const InterpFrame *Frame, const Function *Func, |
629 | const CallExpr *Call) { |
630 | |
631 | PrimType ArgT = S.getContext().classify(E: Call->getArg(Arg: 0)).value_or(u: PT_Ptr); |
632 | |
633 | TYPE_SWITCH(ArgT, const T &Arg = S.Stk.peek<T>(); S.Stk.push<T>(Arg);); |
634 | |
635 | return Func->getDecl()->isConstexpr(); |
636 | } |
637 | |
638 | static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC, |
639 | const InterpFrame *Frame, |
640 | const Function *Func, |
641 | const CallExpr *Call) { |
642 | PrimType ArgT = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType()); |
643 | APSInt Arg = peekToAPSInt(Stk&: S.Stk, T: ArgT); |
644 | |
645 | int Result = |
646 | S.getCtx().getTargetInfo().getEHDataRegisterNumber(RegNo: Arg.getZExtValue()); |
647 | pushInteger(S, Val: Result, QT: Call->getType()); |
648 | return true; |
649 | } |
650 | |
651 | /// Just takes the first Argument to the call and puts it on the stack. |
652 | static bool noopPointer(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, |
653 | const Function *Func, const CallExpr *Call) { |
654 | const Pointer &Arg = S.Stk.peek<Pointer>(); |
655 | S.Stk.push<Pointer>(Args: Arg); |
656 | return true; |
657 | } |
658 | |
659 | // Two integral values followed by a pointer (lhs, rhs, resultOut) |
660 | static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC, |
661 | const InterpFrame *Frame, |
662 | const Function *Func, |
663 | const CallExpr *Call) { |
664 | Pointer &ResultPtr = S.Stk.peek<Pointer>(); |
665 | if (ResultPtr.isDummy()) |
666 | return false; |
667 | |
668 | unsigned BuiltinOp = Func->getBuiltinID(); |
669 | PrimType RHST = *S.getContext().classify(T: Call->getArg(Arg: 1)->getType()); |
670 | PrimType LHST = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType()); |
671 | APSInt RHS = peekToAPSInt(Stk&: S.Stk, T: RHST, |
672 | Offset: align(Size: primSize(Type: PT_Ptr)) + align(Size: primSize(Type: RHST))); |
673 | APSInt LHS = peekToAPSInt(Stk&: S.Stk, T: LHST, |
674 | Offset: align(Size: primSize(Type: PT_Ptr)) + align(Size: primSize(Type: RHST)) + |
675 | align(Size: primSize(Type: LHST))); |
676 | QualType ResultType = Call->getArg(Arg: 2)->getType()->getPointeeType(); |
677 | PrimType ResultT = *S.getContext().classify(T: ResultType); |
678 | bool Overflow; |
679 | |
680 | APSInt Result; |
681 | if (BuiltinOp == Builtin::BI__builtin_add_overflow || |
682 | BuiltinOp == Builtin::BI__builtin_sub_overflow || |
683 | BuiltinOp == Builtin::BI__builtin_mul_overflow) { |
684 | bool IsSigned = LHS.isSigned() || RHS.isSigned() || |
685 | ResultType->isSignedIntegerOrEnumerationType(); |
686 | bool AllSigned = LHS.isSigned() && RHS.isSigned() && |
687 | ResultType->isSignedIntegerOrEnumerationType(); |
688 | uint64_t LHSSize = LHS.getBitWidth(); |
689 | uint64_t RHSSize = RHS.getBitWidth(); |
690 | uint64_t ResultSize = S.getCtx().getTypeSize(T: ResultType); |
691 | uint64_t MaxBits = std::max(a: std::max(a: LHSSize, b: RHSSize), b: ResultSize); |
692 | |
693 | // Add an additional bit if the signedness isn't uniformly agreed to. We |
694 | // could do this ONLY if there is a signed and an unsigned that both have |
695 | // MaxBits, but the code to check that is pretty nasty. The issue will be |
696 | // caught in the shrink-to-result later anyway. |
697 | if (IsSigned && !AllSigned) |
698 | ++MaxBits; |
699 | |
700 | LHS = APSInt(LHS.extOrTrunc(width: MaxBits), !IsSigned); |
701 | RHS = APSInt(RHS.extOrTrunc(width: MaxBits), !IsSigned); |
702 | Result = APSInt(MaxBits, !IsSigned); |
703 | } |
704 | |
705 | // Find largest int. |
706 | switch (BuiltinOp) { |
707 | default: |
708 | llvm_unreachable("Invalid value for BuiltinOp" ); |
709 | case Builtin::BI__builtin_add_overflow: |
710 | case Builtin::BI__builtin_sadd_overflow: |
711 | case Builtin::BI__builtin_saddl_overflow: |
712 | case Builtin::BI__builtin_saddll_overflow: |
713 | case Builtin::BI__builtin_uadd_overflow: |
714 | case Builtin::BI__builtin_uaddl_overflow: |
715 | case Builtin::BI__builtin_uaddll_overflow: |
716 | Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow) |
717 | : LHS.uadd_ov(RHS, Overflow); |
718 | break; |
719 | case Builtin::BI__builtin_sub_overflow: |
720 | case Builtin::BI__builtin_ssub_overflow: |
721 | case Builtin::BI__builtin_ssubl_overflow: |
722 | case Builtin::BI__builtin_ssubll_overflow: |
723 | case Builtin::BI__builtin_usub_overflow: |
724 | case Builtin::BI__builtin_usubl_overflow: |
725 | case Builtin::BI__builtin_usubll_overflow: |
726 | Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow) |
727 | : LHS.usub_ov(RHS, Overflow); |
728 | break; |
729 | case Builtin::BI__builtin_mul_overflow: |
730 | case Builtin::BI__builtin_smul_overflow: |
731 | case Builtin::BI__builtin_smull_overflow: |
732 | case Builtin::BI__builtin_smulll_overflow: |
733 | case Builtin::BI__builtin_umul_overflow: |
734 | case Builtin::BI__builtin_umull_overflow: |
735 | case Builtin::BI__builtin_umulll_overflow: |
736 | Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow) |
737 | : LHS.umul_ov(RHS, Overflow); |
738 | break; |
739 | } |
740 | |
741 | // In the case where multiple sizes are allowed, truncate and see if |
742 | // the values are the same. |
743 | if (BuiltinOp == Builtin::BI__builtin_add_overflow || |
744 | BuiltinOp == Builtin::BI__builtin_sub_overflow || |
745 | BuiltinOp == Builtin::BI__builtin_mul_overflow) { |
746 | // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead, |
747 | // since it will give us the behavior of a TruncOrSelf in the case where |
748 | // its parameter <= its size. We previously set Result to be at least the |
749 | // type-size of the result, so getTypeSize(ResultType) <= Resu |
750 | APSInt Temp = Result.extOrTrunc(width: S.getCtx().getTypeSize(T: ResultType)); |
751 | Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType()); |
752 | |
753 | if (!APSInt::isSameValue(I1: Temp, I2: Result)) |
754 | Overflow = true; |
755 | Result = Temp; |
756 | } |
757 | |
758 | // Write Result to ResultPtr and put Overflow on the stacl. |
759 | assignInteger(Dest&: ResultPtr, ValueT: ResultT, Value: Result); |
760 | ResultPtr.initialize(); |
761 | assert(Func->getDecl()->getReturnType()->isBooleanType()); |
762 | S.Stk.push<Boolean>(Args&: Overflow); |
763 | return true; |
764 | } |
765 | |
766 | /// Three integral values followed by a pointer (lhs, rhs, carry, carryOut). |
767 | static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC, |
768 | const InterpFrame *Frame, |
769 | const Function *Func, |
770 | const CallExpr *Call) { |
771 | unsigned BuiltinOp = Func->getBuiltinID(); |
772 | PrimType LHST = *S.getContext().classify(T: Call->getArg(Arg: 0)->getType()); |
773 | PrimType RHST = *S.getContext().classify(T: Call->getArg(Arg: 1)->getType()); |
774 | PrimType CarryT = *S.getContext().classify(T: Call->getArg(Arg: 2)->getType()); |
775 | APSInt RHS = peekToAPSInt(Stk&: S.Stk, T: RHST, |
776 | Offset: align(Size: primSize(Type: PT_Ptr)) + align(Size: primSize(Type: CarryT)) + |
777 | align(Size: primSize(Type: RHST))); |
778 | APSInt LHS = |
779 | peekToAPSInt(Stk&: S.Stk, T: LHST, |
780 | Offset: align(Size: primSize(Type: PT_Ptr)) + align(Size: primSize(Type: RHST)) + |
781 | align(Size: primSize(Type: CarryT)) + align(Size: primSize(Type: LHST))); |
782 | APSInt CarryIn = peekToAPSInt( |
783 | Stk&: S.Stk, T: LHST, Offset: align(Size: primSize(Type: PT_Ptr)) + align(Size: primSize(Type: CarryT))); |
784 | APSInt CarryOut; |
785 | |
786 | APSInt Result; |
787 | // Copy the number of bits and sign. |
788 | Result = LHS; |
789 | CarryOut = LHS; |
790 | |
791 | bool FirstOverflowed = false; |
792 | bool SecondOverflowed = false; |
793 | switch (BuiltinOp) { |
794 | default: |
795 | llvm_unreachable("Invalid value for BuiltinOp" ); |
796 | case Builtin::BI__builtin_addcb: |
797 | case Builtin::BI__builtin_addcs: |
798 | case Builtin::BI__builtin_addc: |
799 | case Builtin::BI__builtin_addcl: |
800 | case Builtin::BI__builtin_addcll: |
801 | Result = |
802 | LHS.uadd_ov(RHS, Overflow&: FirstOverflowed).uadd_ov(RHS: CarryIn, Overflow&: SecondOverflowed); |
803 | break; |
804 | case Builtin::BI__builtin_subcb: |
805 | case Builtin::BI__builtin_subcs: |
806 | case Builtin::BI__builtin_subc: |
807 | case Builtin::BI__builtin_subcl: |
808 | case Builtin::BI__builtin_subcll: |
809 | Result = |
810 | LHS.usub_ov(RHS, Overflow&: FirstOverflowed).usub_ov(RHS: CarryIn, Overflow&: SecondOverflowed); |
811 | break; |
812 | } |
813 | // It is possible for both overflows to happen but CGBuiltin uses an OR so |
814 | // this is consistent. |
815 | CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed); |
816 | |
817 | Pointer &CarryOutPtr = S.Stk.peek<Pointer>(); |
818 | QualType CarryOutType = Call->getArg(Arg: 3)->getType()->getPointeeType(); |
819 | PrimType CarryOutT = *S.getContext().classify(T: CarryOutType); |
820 | assignInteger(Dest&: CarryOutPtr, ValueT: CarryOutT, Value: CarryOut); |
821 | CarryOutPtr.initialize(); |
822 | |
823 | assert(Call->getType() == Call->getArg(0)->getType()); |
824 | pushInteger(S, Val: Result, QT: Call->getType()); |
825 | return true; |
826 | } |
827 | |
828 | static bool interp__builtin_clz(InterpState &S, CodePtr OpPC, |
829 | const InterpFrame *Frame, const Function *Func, |
830 | const CallExpr *Call) { |
831 | unsigned CallSize = callArgSize(S, C: Call); |
832 | unsigned BuiltinOp = Func->getBuiltinID(); |
833 | PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0)); |
834 | const APSInt &Val = peekToAPSInt(Stk&: S.Stk, T: ValT, Offset: CallSize); |
835 | |
836 | // When the argument is 0, the result of GCC builtins is undefined, whereas |
837 | // for Microsoft intrinsics, the result is the bit-width of the argument. |
838 | bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 && |
839 | BuiltinOp != Builtin::BI__lzcnt && |
840 | BuiltinOp != Builtin::BI__lzcnt64; |
841 | |
842 | if (Val == 0) { |
843 | if (Func->getBuiltinID() == Builtin::BI__builtin_clzg && |
844 | Call->getNumArgs() == 2) { |
845 | // We have a fallback parameter. |
846 | PrimType FallbackT = *S.getContext().classify(E: Call->getArg(Arg: 1)); |
847 | const APSInt &Fallback = peekToAPSInt(Stk&: S.Stk, T: FallbackT); |
848 | pushInteger(S, Val: Fallback, QT: Call->getType()); |
849 | return true; |
850 | } |
851 | |
852 | if (ZeroIsUndefined) |
853 | return false; |
854 | } |
855 | |
856 | pushInteger(S, Val: Val.countl_zero(), QT: Call->getType()); |
857 | return true; |
858 | } |
859 | |
860 | static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC, |
861 | const InterpFrame *Frame, const Function *Func, |
862 | const CallExpr *Call) { |
863 | unsigned CallSize = callArgSize(S, C: Call); |
864 | PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0)); |
865 | const APSInt &Val = peekToAPSInt(Stk&: S.Stk, T: ValT, Offset: CallSize); |
866 | |
867 | if (Val == 0) { |
868 | if (Func->getBuiltinID() == Builtin::BI__builtin_ctzg && |
869 | Call->getNumArgs() == 2) { |
870 | // We have a fallback parameter. |
871 | PrimType FallbackT = *S.getContext().classify(E: Call->getArg(Arg: 1)); |
872 | const APSInt &Fallback = peekToAPSInt(Stk&: S.Stk, T: FallbackT); |
873 | pushInteger(S, Val: Fallback, QT: Call->getType()); |
874 | return true; |
875 | } |
876 | return false; |
877 | } |
878 | |
879 | pushInteger(S, Val: Val.countr_zero(), QT: Call->getType()); |
880 | return true; |
881 | } |
882 | |
883 | static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC, |
884 | const InterpFrame *Frame, |
885 | const Function *Func, const CallExpr *Call) { |
886 | PrimType ReturnT = *S.getContext().classify(T: Call->getType()); |
887 | PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0)); |
888 | const APSInt &Val = peekToAPSInt(Stk&: S.Stk, T: ValT); |
889 | assert(Val.getActiveBits() <= 64); |
890 | |
891 | INT_TYPE_SWITCH(ReturnT, |
892 | { S.Stk.push<T>(T::from(Val.byteSwap().getZExtValue())); }); |
893 | return true; |
894 | } |
895 | |
896 | /// bool __atomic_always_lock_free(size_t, void const volatile*) |
897 | /// bool __atomic_is_lock_free(size_t, void const volatile*) |
898 | /// bool __c11_atomic_is_lock_free(size_t) |
899 | static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC, |
900 | const InterpFrame *Frame, |
901 | const Function *Func, |
902 | const CallExpr *Call) { |
903 | unsigned BuiltinOp = Func->getBuiltinID(); |
904 | |
905 | PrimType ValT = *S.getContext().classify(E: Call->getArg(Arg: 0)); |
906 | unsigned SizeValOffset = 0; |
907 | if (BuiltinOp != Builtin::BI__c11_atomic_is_lock_free) |
908 | SizeValOffset = align(Size: primSize(Type: ValT)) + align(Size: primSize(Type: PT_Ptr)); |
909 | const APSInt &SizeVal = peekToAPSInt(Stk&: S.Stk, T: ValT, Offset: SizeValOffset); |
910 | |
911 | auto returnBool = [&S](bool Value) -> bool { |
912 | S.Stk.push<Boolean>(Args&: Value); |
913 | return true; |
914 | }; |
915 | |
916 | // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power |
917 | // of two less than or equal to the maximum inline atomic width, we know it |
918 | // is lock-free. If the size isn't a power of two, or greater than the |
919 | // maximum alignment where we promote atomics, we know it is not lock-free |
920 | // (at least not in the sense of atomic_is_lock_free). Otherwise, |
921 | // the answer can only be determined at runtime; for example, 16-byte |
922 | // atomics have lock-free implementations on some, but not all, |
923 | // x86-64 processors. |
924 | |
925 | // Check power-of-two. |
926 | CharUnits Size = CharUnits::fromQuantity(Quantity: SizeVal.getZExtValue()); |
927 | if (Size.isPowerOfTwo()) { |
928 | // Check against inlining width. |
929 | unsigned InlineWidthBits = |
930 | S.getCtx().getTargetInfo().getMaxAtomicInlineWidth(); |
931 | if (Size <= S.getCtx().toCharUnitsFromBits(BitSize: InlineWidthBits)) { |
932 | |
933 | // OK, we will inline appropriately-aligned operations of this size, |
934 | // and _Atomic(T) is appropriately-aligned. |
935 | if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free || |
936 | Size == CharUnits::One()) |
937 | return returnBool(true); |
938 | |
939 | // Same for null pointers. |
940 | assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free); |
941 | const Pointer &Ptr = S.Stk.peek<Pointer>(); |
942 | if (Ptr.isZero()) |
943 | return returnBool(true); |
944 | |
945 | QualType PointeeType = Call->getArg(Arg: 1) |
946 | ->IgnoreImpCasts() |
947 | ->getType() |
948 | ->castAs<PointerType>() |
949 | ->getPointeeType(); |
950 | // OK, we will inline operations on this object. |
951 | if (!PointeeType->isIncompleteType() && |
952 | S.getCtx().getTypeAlignInChars(T: PointeeType) >= Size) |
953 | return returnBool(true); |
954 | } |
955 | } |
956 | |
957 | if (BuiltinOp == Builtin::BI__atomic_always_lock_free) |
958 | return returnBool(false); |
959 | |
960 | return false; |
961 | } |
962 | |
963 | /// __builtin_complex(Float A, float B); |
964 | static bool interp__builtin_complex(InterpState &S, CodePtr OpPC, |
965 | const InterpFrame *Frame, |
966 | const Function *Func, |
967 | const CallExpr *Call) { |
968 | const Floating &Arg2 = S.Stk.peek<Floating>(); |
969 | const Floating &Arg1 = S.Stk.peek<Floating>(Offset: align(Size: primSize(Type: PT_Float)) * 2); |
970 | Pointer &Result = S.Stk.peek<Pointer>(Offset: align(Size: primSize(Type: PT_Float)) * 2 + |
971 | align(Size: primSize(Type: PT_Ptr))); |
972 | |
973 | Result.atIndex(Idx: 0).deref<Floating>() = Arg1; |
974 | Result.atIndex(Idx: 0).initialize(); |
975 | Result.atIndex(Idx: 1).deref<Floating>() = Arg2; |
976 | Result.atIndex(Idx: 1).initialize(); |
977 | Result.initialize(); |
978 | |
979 | return true; |
980 | } |
981 | |
982 | /// __builtin_is_aligned() |
983 | /// __builtin_align_up() |
984 | /// __builtin_align_down() |
985 | /// The first parameter is either an integer or a pointer. |
986 | /// The second parameter is the requested alignment as an integer. |
987 | static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, |
988 | const InterpFrame *Frame, |
989 | const Function *Func, |
990 | const CallExpr *Call) { |
991 | unsigned BuiltinOp = Func->getBuiltinID(); |
992 | unsigned CallSize = callArgSize(S, C: Call); |
993 | |
994 | PrimType AlignmentT = *S.Ctx.classify(E: Call->getArg(Arg: 1)); |
995 | const APSInt &Alignment = peekToAPSInt(Stk&: S.Stk, T: AlignmentT); |
996 | |
997 | if (Alignment < 0 || !Alignment.isPowerOf2()) { |
998 | S.FFDiag(E: Call, DiagId: diag::note_constexpr_invalid_alignment) << Alignment; |
999 | return false; |
1000 | } |
1001 | unsigned SrcWidth = S.getCtx().getIntWidth(T: Call->getArg(Arg: 0)->getType()); |
1002 | APSInt MaxValue(APInt::getOneBitSet(numBits: SrcWidth, BitNo: SrcWidth - 1)); |
1003 | if (APSInt::compareValues(I1: Alignment, I2: MaxValue) > 0) { |
1004 | S.FFDiag(E: Call, DiagId: diag::note_constexpr_alignment_too_big) |
1005 | << MaxValue << Call->getArg(Arg: 0)->getType() << Alignment; |
1006 | return false; |
1007 | } |
1008 | |
1009 | // The first parameter is either an integer or a pointer (but not a function |
1010 | // pointer). |
1011 | PrimType FirstArgT = *S.Ctx.classify(E: Call->getArg(Arg: 0)); |
1012 | |
1013 | if (isIntegralType(T: FirstArgT)) { |
1014 | const APSInt &Src = peekToAPSInt(Stk&: S.Stk, T: FirstArgT, Offset: CallSize); |
1015 | APSInt Align = Alignment.extOrTrunc(width: Src.getBitWidth()); |
1016 | if (BuiltinOp == Builtin::BI__builtin_align_up) { |
1017 | APSInt AlignedVal = |
1018 | APSInt((Src + (Align - 1)) & ~(Align - 1), Src.isUnsigned()); |
1019 | pushInteger(S, Val: AlignedVal, QT: Call->getType()); |
1020 | } else if (BuiltinOp == Builtin::BI__builtin_align_down) { |
1021 | APSInt AlignedVal = APSInt(Src & ~(Align - 1), Src.isUnsigned()); |
1022 | pushInteger(S, Val: AlignedVal, QT: Call->getType()); |
1023 | } else { |
1024 | assert(*S.Ctx.classify(Call->getType()) == PT_Bool); |
1025 | S.Stk.push<Boolean>(Args: (Src & (Align - 1)) == 0); |
1026 | } |
1027 | return true; |
1028 | } |
1029 | |
1030 | assert(FirstArgT == PT_Ptr); |
1031 | const Pointer &Ptr = S.Stk.peek<Pointer>(Offset: CallSize); |
1032 | |
1033 | unsigned PtrOffset = Ptr.getByteOffset(); |
1034 | PtrOffset = Ptr.getIndex(); |
1035 | CharUnits BaseAlignment = |
1036 | S.getCtx().getDeclAlign(D: Ptr.getDeclDesc()->asValueDecl()); |
1037 | CharUnits PtrAlign = |
1038 | BaseAlignment.alignmentAtOffset(offset: CharUnits::fromQuantity(Quantity: PtrOffset)); |
1039 | |
1040 | if (BuiltinOp == Builtin::BI__builtin_is_aligned) { |
1041 | if (PtrAlign.getQuantity() >= Alignment) { |
1042 | S.Stk.push<Boolean>(Args: true); |
1043 | return true; |
1044 | } |
1045 | // If the alignment is not known to be sufficient, some cases could still |
1046 | // be aligned at run time. However, if the requested alignment is less or |
1047 | // equal to the base alignment and the offset is not aligned, we know that |
1048 | // the run-time value can never be aligned. |
1049 | if (BaseAlignment.getQuantity() >= Alignment && |
1050 | PtrAlign.getQuantity() < Alignment) { |
1051 | S.Stk.push<Boolean>(Args: false); |
1052 | return true; |
1053 | } |
1054 | |
1055 | S.FFDiag(E: Call->getArg(Arg: 0), DiagId: diag::note_constexpr_alignment_compute) |
1056 | << Alignment; |
1057 | return false; |
1058 | } |
1059 | |
1060 | assert(BuiltinOp == Builtin::BI__builtin_align_down || |
1061 | BuiltinOp == Builtin::BI__builtin_align_up); |
1062 | |
1063 | // For align_up/align_down, we can return the same value if the alignment |
1064 | // is known to be greater or equal to the requested value. |
1065 | if (PtrAlign.getQuantity() >= Alignment) { |
1066 | S.Stk.push<Pointer>(Args: Ptr); |
1067 | return true; |
1068 | } |
1069 | |
1070 | // The alignment could be greater than the minimum at run-time, so we cannot |
1071 | // infer much about the resulting pointer value. One case is possible: |
1072 | // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we |
1073 | // can infer the correct index if the requested alignment is smaller than |
1074 | // the base alignment so we can perform the computation on the offset. |
1075 | if (BaseAlignment.getQuantity() >= Alignment) { |
1076 | assert(Alignment.getBitWidth() <= 64 && |
1077 | "Cannot handle > 64-bit address-space" ); |
1078 | uint64_t Alignment64 = Alignment.getZExtValue(); |
1079 | CharUnits NewOffset = |
1080 | CharUnits::fromQuantity(Quantity: BuiltinOp == Builtin::BI__builtin_align_down |
1081 | ? llvm::alignDown(Value: PtrOffset, Align: Alignment64) |
1082 | : llvm::alignTo(Value: PtrOffset, Align: Alignment64)); |
1083 | |
1084 | S.Stk.push<Pointer>(Args: Ptr.atIndex(Idx: NewOffset.getQuantity())); |
1085 | return true; |
1086 | } |
1087 | |
1088 | // Otherwise, we cannot constant-evaluate the result. |
1089 | S.FFDiag(E: Call->getArg(Arg: 0), DiagId: diag::note_constexpr_alignment_adjust) << Alignment; |
1090 | return false; |
1091 | } |
1092 | |
1093 | static bool interp__builtin_os_log_format_buffer_size(InterpState &S, |
1094 | CodePtr OpPC, |
1095 | const InterpFrame *Frame, |
1096 | const Function *Func, |
1097 | const CallExpr *Call) { |
1098 | analyze_os_log::OSLogBufferLayout Layout; |
1099 | analyze_os_log::computeOSLogBufferLayout(Ctx&: S.getCtx(), E: Call, layout&: Layout); |
1100 | pushInteger(S, Val: Layout.size().getQuantity(), QT: Call->getType()); |
1101 | return true; |
1102 | } |
1103 | |
1104 | static bool interp__builtin_ptrauth_string_discriminator( |
1105 | InterpState &S, CodePtr OpPC, const InterpFrame *Frame, |
1106 | const Function *Func, const CallExpr *Call) { |
1107 | const auto &Ptr = S.Stk.peek<Pointer>(); |
1108 | assert(Ptr.getFieldDesc()->isPrimitiveArray()); |
1109 | |
1110 | StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1); |
1111 | uint64_t Result = getPointerAuthStableSipHash(S: R); |
1112 | pushInteger(S, Val: Result, QT: Call->getType()); |
1113 | return true; |
1114 | } |
1115 | |
1116 | bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F, |
1117 | const CallExpr *Call) { |
1118 | const InterpFrame *Frame = S.Current; |
1119 | APValue Dummy; |
1120 | |
1121 | std::optional<PrimType> ReturnT = S.getContext().classify(E: Call); |
1122 | |
1123 | switch (F->getBuiltinID()) { |
1124 | case Builtin::BI__builtin_is_constant_evaluated: |
1125 | if (!interp__builtin_is_constant_evaluated(S, OpPC, Frame, Call)) |
1126 | return false; |
1127 | break; |
1128 | case Builtin::BI__builtin_assume: |
1129 | case Builtin::BI__assume: |
1130 | break; |
1131 | case Builtin::BI__builtin_strcmp: |
1132 | if (!interp__builtin_strcmp(S, OpPC, Frame, Call)) |
1133 | return false; |
1134 | break; |
1135 | case Builtin::BI__builtin_strlen: |
1136 | if (!interp__builtin_strlen(S, OpPC, Frame, Call)) |
1137 | return false; |
1138 | break; |
1139 | case Builtin::BI__builtin_nan: |
1140 | case Builtin::BI__builtin_nanf: |
1141 | case Builtin::BI__builtin_nanl: |
1142 | case Builtin::BI__builtin_nanf16: |
1143 | case Builtin::BI__builtin_nanf128: |
1144 | if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/false)) |
1145 | return false; |
1146 | break; |
1147 | case Builtin::BI__builtin_nans: |
1148 | case Builtin::BI__builtin_nansf: |
1149 | case Builtin::BI__builtin_nansl: |
1150 | case Builtin::BI__builtin_nansf16: |
1151 | case Builtin::BI__builtin_nansf128: |
1152 | if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/true)) |
1153 | return false; |
1154 | break; |
1155 | |
1156 | case Builtin::BI__builtin_huge_val: |
1157 | case Builtin::BI__builtin_huge_valf: |
1158 | case Builtin::BI__builtin_huge_vall: |
1159 | case Builtin::BI__builtin_huge_valf16: |
1160 | case Builtin::BI__builtin_huge_valf128: |
1161 | case Builtin::BI__builtin_inf: |
1162 | case Builtin::BI__builtin_inff: |
1163 | case Builtin::BI__builtin_infl: |
1164 | case Builtin::BI__builtin_inff16: |
1165 | case Builtin::BI__builtin_inff128: |
1166 | if (!interp__builtin_inf(S, OpPC, Frame, F)) |
1167 | return false; |
1168 | break; |
1169 | case Builtin::BI__builtin_copysign: |
1170 | case Builtin::BI__builtin_copysignf: |
1171 | case Builtin::BI__builtin_copysignl: |
1172 | case Builtin::BI__builtin_copysignf128: |
1173 | if (!interp__builtin_copysign(S, OpPC, Frame, F)) |
1174 | return false; |
1175 | break; |
1176 | |
1177 | case Builtin::BI__builtin_fmin: |
1178 | case Builtin::BI__builtin_fminf: |
1179 | case Builtin::BI__builtin_fminl: |
1180 | case Builtin::BI__builtin_fminf16: |
1181 | case Builtin::BI__builtin_fminf128: |
1182 | if (!interp__builtin_fmin(S, OpPC, Frame, F)) |
1183 | return false; |
1184 | break; |
1185 | |
1186 | case Builtin::BI__builtin_fmax: |
1187 | case Builtin::BI__builtin_fmaxf: |
1188 | case Builtin::BI__builtin_fmaxl: |
1189 | case Builtin::BI__builtin_fmaxf16: |
1190 | case Builtin::BI__builtin_fmaxf128: |
1191 | if (!interp__builtin_fmax(S, OpPC, Frame, Func: F)) |
1192 | return false; |
1193 | break; |
1194 | |
1195 | case Builtin::BI__builtin_isnan: |
1196 | if (!interp__builtin_isnan(S, OpPC, Frame, F, Call)) |
1197 | return false; |
1198 | break; |
1199 | case Builtin::BI__builtin_issignaling: |
1200 | if (!interp__builtin_issignaling(S, OpPC, Frame, F, Call)) |
1201 | return false; |
1202 | break; |
1203 | |
1204 | case Builtin::BI__builtin_isinf: |
1205 | if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/CheckSign: false, Call)) |
1206 | return false; |
1207 | break; |
1208 | |
1209 | case Builtin::BI__builtin_isinf_sign: |
1210 | if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/CheckSign: true, Call)) |
1211 | return false; |
1212 | break; |
1213 | |
1214 | case Builtin::BI__builtin_isfinite: |
1215 | if (!interp__builtin_isfinite(S, OpPC, Frame, F, Call)) |
1216 | return false; |
1217 | break; |
1218 | case Builtin::BI__builtin_isnormal: |
1219 | if (!interp__builtin_isnormal(S, OpPC, Frame, F, Call)) |
1220 | return false; |
1221 | break; |
1222 | case Builtin::BI__builtin_issubnormal: |
1223 | if (!interp__builtin_issubnormal(S, OpPC, Frame, F, Call)) |
1224 | return false; |
1225 | break; |
1226 | case Builtin::BI__builtin_iszero: |
1227 | if (!interp__builtin_iszero(S, OpPC, Frame, F, Call)) |
1228 | return false; |
1229 | break; |
1230 | case Builtin::BI__builtin_isfpclass: |
1231 | if (!interp__builtin_isfpclass(S, OpPC, Frame, Func: F, Call)) |
1232 | return false; |
1233 | break; |
1234 | case Builtin::BI__builtin_fpclassify: |
1235 | if (!interp__builtin_fpclassify(S, OpPC, Frame, Func: F, Call)) |
1236 | return false; |
1237 | break; |
1238 | |
1239 | case Builtin::BI__builtin_fabs: |
1240 | case Builtin::BI__builtin_fabsf: |
1241 | case Builtin::BI__builtin_fabsl: |
1242 | case Builtin::BI__builtin_fabsf128: |
1243 | if (!interp__builtin_fabs(S, OpPC, Frame, Func: F)) |
1244 | return false; |
1245 | break; |
1246 | |
1247 | case Builtin::BI__builtin_popcount: |
1248 | case Builtin::BI__builtin_popcountl: |
1249 | case Builtin::BI__builtin_popcountll: |
1250 | case Builtin::BI__builtin_popcountg: |
1251 | case Builtin::BI__popcnt16: // Microsoft variants of popcount |
1252 | case Builtin::BI__popcnt: |
1253 | case Builtin::BI__popcnt64: |
1254 | if (!interp__builtin_popcount(S, OpPC, Frame, Func: F, Call)) |
1255 | return false; |
1256 | break; |
1257 | |
1258 | case Builtin::BI__builtin_parity: |
1259 | case Builtin::BI__builtin_parityl: |
1260 | case Builtin::BI__builtin_parityll: |
1261 | if (!interp__builtin_parity(S, OpPC, Frame, Func: F, Call)) |
1262 | return false; |
1263 | break; |
1264 | |
1265 | case Builtin::BI__builtin_clrsb: |
1266 | case Builtin::BI__builtin_clrsbl: |
1267 | case Builtin::BI__builtin_clrsbll: |
1268 | if (!interp__builtin_clrsb(S, OpPC, Frame, Func: F, Call)) |
1269 | return false; |
1270 | break; |
1271 | |
1272 | case Builtin::BI__builtin_bitreverse8: |
1273 | case Builtin::BI__builtin_bitreverse16: |
1274 | case Builtin::BI__builtin_bitreverse32: |
1275 | case Builtin::BI__builtin_bitreverse64: |
1276 | if (!interp__builtin_bitreverse(S, OpPC, Frame, Func: F, Call)) |
1277 | return false; |
1278 | break; |
1279 | |
1280 | case Builtin::BI__builtin_classify_type: |
1281 | if (!interp__builtin_classify_type(S, OpPC, Frame, Func: F, Call)) |
1282 | return false; |
1283 | break; |
1284 | |
1285 | case Builtin::BI__builtin_expect: |
1286 | case Builtin::BI__builtin_expect_with_probability: |
1287 | if (!interp__builtin_expect(S, OpPC, Frame, Func: F, Call)) |
1288 | return false; |
1289 | break; |
1290 | |
1291 | case Builtin::BI__builtin_rotateleft8: |
1292 | case Builtin::BI__builtin_rotateleft16: |
1293 | case Builtin::BI__builtin_rotateleft32: |
1294 | case Builtin::BI__builtin_rotateleft64: |
1295 | case Builtin::BI_rotl8: // Microsoft variants of rotate left |
1296 | case Builtin::BI_rotl16: |
1297 | case Builtin::BI_rotl: |
1298 | case Builtin::BI_lrotl: |
1299 | case Builtin::BI_rotl64: |
1300 | if (!interp__builtin_rotate(S, OpPC, Frame, Func: F, Call, /*Right=*/false)) |
1301 | return false; |
1302 | break; |
1303 | |
1304 | case Builtin::BI__builtin_rotateright8: |
1305 | case Builtin::BI__builtin_rotateright16: |
1306 | case Builtin::BI__builtin_rotateright32: |
1307 | case Builtin::BI__builtin_rotateright64: |
1308 | case Builtin::BI_rotr8: // Microsoft variants of rotate right |
1309 | case Builtin::BI_rotr16: |
1310 | case Builtin::BI_rotr: |
1311 | case Builtin::BI_lrotr: |
1312 | case Builtin::BI_rotr64: |
1313 | if (!interp__builtin_rotate(S, OpPC, Frame, Func: F, Call, /*Right=*/true)) |
1314 | return false; |
1315 | break; |
1316 | |
1317 | case Builtin::BI__builtin_ffs: |
1318 | case Builtin::BI__builtin_ffsl: |
1319 | case Builtin::BI__builtin_ffsll: |
1320 | if (!interp__builtin_ffs(S, OpPC, Frame, Func: F, Call)) |
1321 | return false; |
1322 | break; |
1323 | case Builtin::BIaddressof: |
1324 | case Builtin::BI__addressof: |
1325 | case Builtin::BI__builtin_addressof: |
1326 | if (!interp__builtin_addressof(S, OpPC, Frame, Func: F, Call)) |
1327 | return false; |
1328 | break; |
1329 | |
1330 | case Builtin::BIas_const: |
1331 | case Builtin::BIforward: |
1332 | case Builtin::BIforward_like: |
1333 | case Builtin::BImove: |
1334 | case Builtin::BImove_if_noexcept: |
1335 | if (!interp__builtin_move(S, OpPC, Frame, Func: F, Call)) |
1336 | return false; |
1337 | break; |
1338 | |
1339 | case Builtin::BI__builtin_eh_return_data_regno: |
1340 | if (!interp__builtin_eh_return_data_regno(S, OpPC, Frame, Func: F, Call)) |
1341 | return false; |
1342 | break; |
1343 | |
1344 | case Builtin::BI__builtin_launder: |
1345 | if (!noopPointer(S, OpPC, Frame, Func: F, Call)) |
1346 | return false; |
1347 | break; |
1348 | |
1349 | case Builtin::BI__builtin_add_overflow: |
1350 | case Builtin::BI__builtin_sub_overflow: |
1351 | case Builtin::BI__builtin_mul_overflow: |
1352 | case Builtin::BI__builtin_sadd_overflow: |
1353 | case Builtin::BI__builtin_uadd_overflow: |
1354 | case Builtin::BI__builtin_uaddl_overflow: |
1355 | case Builtin::BI__builtin_uaddll_overflow: |
1356 | case Builtin::BI__builtin_usub_overflow: |
1357 | case Builtin::BI__builtin_usubl_overflow: |
1358 | case Builtin::BI__builtin_usubll_overflow: |
1359 | case Builtin::BI__builtin_umul_overflow: |
1360 | case Builtin::BI__builtin_umull_overflow: |
1361 | case Builtin::BI__builtin_umulll_overflow: |
1362 | case Builtin::BI__builtin_saddl_overflow: |
1363 | case Builtin::BI__builtin_saddll_overflow: |
1364 | case Builtin::BI__builtin_ssub_overflow: |
1365 | case Builtin::BI__builtin_ssubl_overflow: |
1366 | case Builtin::BI__builtin_ssubll_overflow: |
1367 | case Builtin::BI__builtin_smul_overflow: |
1368 | case Builtin::BI__builtin_smull_overflow: |
1369 | case Builtin::BI__builtin_smulll_overflow: |
1370 | if (!interp__builtin_overflowop(S, OpPC, Frame, Func: F, Call)) |
1371 | return false; |
1372 | break; |
1373 | |
1374 | case Builtin::BI__builtin_addcb: |
1375 | case Builtin::BI__builtin_addcs: |
1376 | case Builtin::BI__builtin_addc: |
1377 | case Builtin::BI__builtin_addcl: |
1378 | case Builtin::BI__builtin_addcll: |
1379 | case Builtin::BI__builtin_subcb: |
1380 | case Builtin::BI__builtin_subcs: |
1381 | case Builtin::BI__builtin_subc: |
1382 | case Builtin::BI__builtin_subcl: |
1383 | case Builtin::BI__builtin_subcll: |
1384 | if (!interp__builtin_carryop(S, OpPC, Frame, Func: F, Call)) |
1385 | return false; |
1386 | break; |
1387 | |
1388 | case Builtin::BI__builtin_clz: |
1389 | case Builtin::BI__builtin_clzl: |
1390 | case Builtin::BI__builtin_clzll: |
1391 | case Builtin::BI__builtin_clzs: |
1392 | case Builtin::BI__builtin_clzg: |
1393 | case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes |
1394 | case Builtin::BI__lzcnt: |
1395 | case Builtin::BI__lzcnt64: |
1396 | if (!interp__builtin_clz(S, OpPC, Frame, Func: F, Call)) |
1397 | return false; |
1398 | break; |
1399 | |
1400 | case Builtin::BI__builtin_ctz: |
1401 | case Builtin::BI__builtin_ctzl: |
1402 | case Builtin::BI__builtin_ctzll: |
1403 | case Builtin::BI__builtin_ctzs: |
1404 | case Builtin::BI__builtin_ctzg: |
1405 | if (!interp__builtin_ctz(S, OpPC, Frame, Func: F, Call)) |
1406 | return false; |
1407 | break; |
1408 | |
1409 | case Builtin::BI__builtin_bswap16: |
1410 | case Builtin::BI__builtin_bswap32: |
1411 | case Builtin::BI__builtin_bswap64: |
1412 | if (!interp__builtin_bswap(S, OpPC, Frame, Func: F, Call)) |
1413 | return false; |
1414 | break; |
1415 | |
1416 | case Builtin::BI__atomic_always_lock_free: |
1417 | case Builtin::BI__atomic_is_lock_free: |
1418 | case Builtin::BI__c11_atomic_is_lock_free: |
1419 | if (!interp__builtin_atomic_lock_free(S, OpPC, Frame, Func: F, Call)) |
1420 | return false; |
1421 | break; |
1422 | |
1423 | case Builtin::BI__builtin_complex: |
1424 | if (!interp__builtin_complex(S, OpPC, Frame, Func: F, Call)) |
1425 | return false; |
1426 | break; |
1427 | |
1428 | case Builtin::BI__builtin_is_aligned: |
1429 | case Builtin::BI__builtin_align_up: |
1430 | case Builtin::BI__builtin_align_down: |
1431 | if (!interp__builtin_is_aligned_up_down(S, OpPC, Frame, Func: F, Call)) |
1432 | return false; |
1433 | break; |
1434 | |
1435 | case Builtin::BI__builtin_os_log_format_buffer_size: |
1436 | if (!interp__builtin_os_log_format_buffer_size(S, OpPC, Frame, Func: F, Call)) |
1437 | return false; |
1438 | break; |
1439 | |
1440 | case Builtin::BI__builtin_ptrauth_string_discriminator: |
1441 | if (!interp__builtin_ptrauth_string_discriminator(S, OpPC, Frame, Func: F, Call)) |
1442 | return false; |
1443 | break; |
1444 | |
1445 | default: |
1446 | S.FFDiag(Loc: S.Current->getLocation(PC: OpPC), |
1447 | DiagId: diag::note_invalid_subexpr_in_const_expr) |
1448 | << S.Current->getRange(PC: OpPC); |
1449 | |
1450 | return false; |
1451 | } |
1452 | |
1453 | return retPrimValue(S, OpPC, Result&: Dummy, T&: ReturnT); |
1454 | } |
1455 | |
1456 | bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E, |
1457 | llvm::ArrayRef<int64_t> ArrayIndices, |
1458 | int64_t &IntResult) { |
1459 | CharUnits Result; |
1460 | unsigned N = E->getNumComponents(); |
1461 | assert(N > 0); |
1462 | |
1463 | unsigned ArrayIndex = 0; |
1464 | QualType CurrentType = E->getTypeSourceInfo()->getType(); |
1465 | for (unsigned I = 0; I != N; ++I) { |
1466 | const OffsetOfNode &Node = E->getComponent(Idx: I); |
1467 | switch (Node.getKind()) { |
1468 | case OffsetOfNode::Field: { |
1469 | const FieldDecl *MemberDecl = Node.getField(); |
1470 | const RecordType *RT = CurrentType->getAs<RecordType>(); |
1471 | if (!RT) |
1472 | return false; |
1473 | const RecordDecl *RD = RT->getDecl(); |
1474 | if (RD->isInvalidDecl()) |
1475 | return false; |
1476 | const ASTRecordLayout &RL = S.getCtx().getASTRecordLayout(D: RD); |
1477 | unsigned FieldIndex = MemberDecl->getFieldIndex(); |
1478 | assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type" ); |
1479 | Result += S.getCtx().toCharUnitsFromBits(BitSize: RL.getFieldOffset(FieldNo: FieldIndex)); |
1480 | CurrentType = MemberDecl->getType().getNonReferenceType(); |
1481 | break; |
1482 | } |
1483 | case OffsetOfNode::Array: { |
1484 | // When generating bytecode, we put all the index expressions as Sint64 on |
1485 | // the stack. |
1486 | int64_t Index = ArrayIndices[ArrayIndex]; |
1487 | const ArrayType *AT = S.getCtx().getAsArrayType(T: CurrentType); |
1488 | if (!AT) |
1489 | return false; |
1490 | CurrentType = AT->getElementType(); |
1491 | CharUnits ElementSize = S.getCtx().getTypeSizeInChars(T: CurrentType); |
1492 | Result += Index * ElementSize; |
1493 | ++ArrayIndex; |
1494 | break; |
1495 | } |
1496 | case OffsetOfNode::Base: { |
1497 | const CXXBaseSpecifier *BaseSpec = Node.getBase(); |
1498 | if (BaseSpec->isVirtual()) |
1499 | return false; |
1500 | |
1501 | // Find the layout of the class whose base we are looking into. |
1502 | const RecordType *RT = CurrentType->getAs<RecordType>(); |
1503 | if (!RT) |
1504 | return false; |
1505 | const RecordDecl *RD = RT->getDecl(); |
1506 | if (RD->isInvalidDecl()) |
1507 | return false; |
1508 | const ASTRecordLayout &RL = S.getCtx().getASTRecordLayout(D: RD); |
1509 | |
1510 | // Find the base class itself. |
1511 | CurrentType = BaseSpec->getType(); |
1512 | const RecordType *BaseRT = CurrentType->getAs<RecordType>(); |
1513 | if (!BaseRT) |
1514 | return false; |
1515 | |
1516 | // Add the offset to the base. |
1517 | Result += RL.getBaseClassOffset(Base: cast<CXXRecordDecl>(Val: BaseRT->getDecl())); |
1518 | break; |
1519 | } |
1520 | case OffsetOfNode::Identifier: |
1521 | llvm_unreachable("Dependent OffsetOfExpr?" ); |
1522 | } |
1523 | } |
1524 | |
1525 | IntResult = Result.getQuantity(); |
1526 | |
1527 | return true; |
1528 | } |
1529 | |
1530 | bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC, |
1531 | const Pointer &Ptr, const APSInt &IntValue) { |
1532 | |
1533 | const Record *R = Ptr.getRecord(); |
1534 | assert(R); |
1535 | assert(R->getNumFields() == 1); |
1536 | |
1537 | unsigned FieldOffset = R->getField(I: 0u)->Offset; |
1538 | const Pointer &FieldPtr = Ptr.atField(Off: FieldOffset); |
1539 | PrimType FieldT = *S.getContext().classify(T: FieldPtr.getType()); |
1540 | |
1541 | INT_TYPE_SWITCH(FieldT, |
1542 | FieldPtr.deref<T>() = T::from(IntValue.getSExtValue())); |
1543 | FieldPtr.initialize(); |
1544 | return true; |
1545 | } |
1546 | |
1547 | bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) { |
1548 | assert(Src.isLive() && Dest.isLive()); |
1549 | |
1550 | [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc(); |
1551 | const Descriptor *DestDesc = Dest.getFieldDesc(); |
1552 | |
1553 | assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive()); |
1554 | |
1555 | if (DestDesc->isPrimitiveArray()) { |
1556 | assert(SrcDesc->isPrimitiveArray()); |
1557 | assert(SrcDesc->getNumElems() == DestDesc->getNumElems()); |
1558 | PrimType ET = DestDesc->getPrimType(); |
1559 | for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) { |
1560 | Pointer DestElem = Dest.atIndex(Idx: I); |
1561 | TYPE_SWITCH(ET, { |
1562 | DestElem.deref<T>() = Src.atIndex(I).deref<T>(); |
1563 | DestElem.initialize(); |
1564 | }); |
1565 | } |
1566 | return true; |
1567 | } |
1568 | |
1569 | if (DestDesc->isRecord()) { |
1570 | assert(SrcDesc->isRecord()); |
1571 | assert(SrcDesc->ElemRecord == DestDesc->ElemRecord); |
1572 | const Record *R = DestDesc->ElemRecord; |
1573 | for (const Record::Field &F : R->fields()) { |
1574 | Pointer DestField = Dest.atField(Off: F.Offset); |
1575 | if (std::optional<PrimType> FT = S.Ctx.classify(T: F.Decl->getType())) { |
1576 | TYPE_SWITCH(*FT, { |
1577 | DestField.deref<T>() = Src.atField(F.Offset).deref<T>(); |
1578 | DestField.initialize(); |
1579 | }); |
1580 | } else { |
1581 | return Invalid(S, OpPC); |
1582 | } |
1583 | } |
1584 | return true; |
1585 | } |
1586 | |
1587 | // FIXME: Composite types. |
1588 | |
1589 | return Invalid(S, OpPC); |
1590 | } |
1591 | |
1592 | } // namespace interp |
1593 | } // namespace clang |
1594 | |