1//===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines routines for folding instructions into constants.
10//
11// Also, to supplement the basic IR ConstantExpr simplifications,
12// this file defines some additional folding routines that can make use of
13// DataLayout information. These functions cannot go in IR due to library
14// dependency issues.
15//
16//===----------------------------------------------------------------------===//
17
18#include "llvm/Analysis/ConstantFolding.h"
19#include "llvm/ADT/APFloat.h"
20#include "llvm/ADT/APInt.h"
21#include "llvm/ADT/APSInt.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/DenseMap.h"
24#include "llvm/ADT/STLExtras.h"
25#include "llvm/ADT/SmallVector.h"
26#include "llvm/ADT/StringRef.h"
27#include "llvm/Analysis/TargetFolder.h"
28#include "llvm/Analysis/TargetLibraryInfo.h"
29#include "llvm/Analysis/ValueTracking.h"
30#include "llvm/Analysis/VectorUtils.h"
31#include "llvm/Config/config.h"
32#include "llvm/IR/Constant.h"
33#include "llvm/IR/ConstantFold.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/DerivedTypes.h"
37#include "llvm/IR/Function.h"
38#include "llvm/IR/GlobalValue.h"
39#include "llvm/IR/GlobalVariable.h"
40#include "llvm/IR/InstrTypes.h"
41#include "llvm/IR/Instruction.h"
42#include "llvm/IR/Instructions.h"
43#include "llvm/IR/IntrinsicInst.h"
44#include "llvm/IR/Intrinsics.h"
45#include "llvm/IR/IntrinsicsAArch64.h"
46#include "llvm/IR/IntrinsicsAMDGPU.h"
47#include "llvm/IR/IntrinsicsARM.h"
48#include "llvm/IR/IntrinsicsNVPTX.h"
49#include "llvm/IR/IntrinsicsWebAssembly.h"
50#include "llvm/IR/IntrinsicsX86.h"
51#include "llvm/IR/NVVMIntrinsicUtils.h"
52#include "llvm/IR/Operator.h"
53#include "llvm/IR/Type.h"
54#include "llvm/IR/Value.h"
55#include "llvm/Support/Casting.h"
56#include "llvm/Support/ErrorHandling.h"
57#include "llvm/Support/KnownBits.h"
58#include "llvm/Support/MathExtras.h"
59#include <cassert>
60#include <cerrno>
61#include <cfenv>
62#include <cmath>
63#include <cstdint>
64
65using namespace llvm;
66
67static cl::opt<bool> DisableFPCallFolding(
68 "disable-fp-call-folding",
69 cl::desc("Disable constant-folding of FP intrinsics and libcalls."),
70 cl::init(Val: false), cl::Hidden);
71
72namespace {
73
74//===----------------------------------------------------------------------===//
75// Constant Folding internal helper functions
76//===----------------------------------------------------------------------===//
77
78static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
79 Constant *C, Type *SrcEltTy,
80 unsigned NumSrcElts,
81 const DataLayout &DL) {
82 // Now that we know that the input value is a vector of integers, just shift
83 // and insert them into our result.
84 unsigned BitShift = DL.getTypeSizeInBits(Ty: SrcEltTy);
85 for (unsigned i = 0; i != NumSrcElts; ++i) {
86 Constant *Element;
87 if (DL.isLittleEndian())
88 Element = C->getAggregateElement(Elt: NumSrcElts - i - 1);
89 else
90 Element = C->getAggregateElement(Elt: i);
91
92 if (isa_and_nonnull<UndefValue>(Val: Element)) {
93 Result <<= BitShift;
94 continue;
95 }
96
97 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Val: Element);
98 if (!ElementCI)
99 return ConstantExpr::getBitCast(C, Ty: DestTy);
100
101 Result <<= BitShift;
102 Result |= ElementCI->getValue().zext(width: Result.getBitWidth());
103 }
104
105 return nullptr;
106}
107
108/// Constant fold bitcast, symbolically evaluating it with DataLayout.
109/// This always returns a non-null constant, but it may be a
110/// ConstantExpr if unfoldable.
111Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
112 assert(CastInst::castIsValid(Instruction::BitCast, C, DestTy) &&
113 "Invalid constantexpr bitcast!");
114
115 // Catch the obvious splat cases.
116 if (Constant *Res = ConstantFoldLoadFromUniformValue(C, Ty: DestTy, DL))
117 return Res;
118
119 if (auto *VTy = dyn_cast<VectorType>(Val: C->getType())) {
120 // Handle a vector->scalar integer/fp cast.
121 if (isa<IntegerType>(Val: DestTy) || DestTy->isFloatingPointTy()) {
122 unsigned NumSrcElts = cast<FixedVectorType>(Val: VTy)->getNumElements();
123 Type *SrcEltTy = VTy->getElementType();
124
125 // If the vector is a vector of floating point, convert it to vector of int
126 // to simplify things.
127 if (SrcEltTy->isFloatingPointTy()) {
128 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
129 auto *SrcIVTy = FixedVectorType::get(
130 ElementType: IntegerType::get(C&: C->getContext(), NumBits: FPWidth), NumElts: NumSrcElts);
131 // Ask IR to do the conversion now that #elts line up.
132 C = ConstantExpr::getBitCast(C, Ty: SrcIVTy);
133 }
134
135 APInt Result(DL.getTypeSizeInBits(Ty: DestTy), 0);
136 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
137 SrcEltTy, NumSrcElts, DL))
138 return CE;
139
140 if (isa<IntegerType>(Val: DestTy))
141 return ConstantInt::get(Ty: DestTy, V: Result);
142
143 APFloat FP(DestTy->getFltSemantics(), Result);
144 return ConstantFP::get(Context&: DestTy->getContext(), V: FP);
145 }
146 }
147
148 // The code below only handles casts to vectors currently.
149 auto *DestVTy = dyn_cast<VectorType>(Val: DestTy);
150 if (!DestVTy)
151 return ConstantExpr::getBitCast(C, Ty: DestTy);
152
153 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
154 // vector so the code below can handle it uniformly.
155 if (!isa<VectorType>(Val: C->getType()) &&
156 (isa<ConstantFP>(Val: C) || isa<ConstantInt>(Val: C))) {
157 Constant *Ops = C; // don't take the address of C!
158 return FoldBitCast(C: ConstantVector::get(V: Ops), DestTy, DL);
159 }
160
161 // Some of what follows may extend to cover scalable vectors but the current
162 // implementation is fixed length specific.
163 if (!isa<FixedVectorType>(Val: C->getType()))
164 return ConstantExpr::getBitCast(C, Ty: DestTy);
165
166 // If this is a bitcast from constant vector -> vector, fold it.
167 if (!isa<ConstantDataVector>(Val: C) && !isa<ConstantVector>(Val: C) &&
168 !isa<ConstantInt>(Val: C) && !isa<ConstantFP>(Val: C))
169 return ConstantExpr::getBitCast(C, Ty: DestTy);
170
171 // If the element types match, IR can fold it.
172 unsigned NumDstElt = cast<FixedVectorType>(Val: DestVTy)->getNumElements();
173 unsigned NumSrcElt = cast<FixedVectorType>(Val: C->getType())->getNumElements();
174 if (NumDstElt == NumSrcElt)
175 return ConstantExpr::getBitCast(C, Ty: DestTy);
176
177 Type *SrcEltTy = cast<VectorType>(Val: C->getType())->getElementType();
178 Type *DstEltTy = DestVTy->getElementType();
179
180 // Otherwise, we're changing the number of elements in a vector, which
181 // requires endianness information to do the right thing. For example,
182 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
183 // folds to (little endian):
184 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
185 // and to (big endian):
186 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
187
188 // First thing is first. We only want to think about integer here, so if
189 // we have something in FP form, recast it as integer.
190 if (DstEltTy->isFloatingPointTy()) {
191 // Fold to an vector of integers with same size as our FP type.
192 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
193 auto *DestIVTy = FixedVectorType::get(
194 ElementType: IntegerType::get(C&: C->getContext(), NumBits: FPWidth), NumElts: NumDstElt);
195 // Recursively handle this integer conversion, if possible.
196 C = FoldBitCast(C, DestTy: DestIVTy, DL);
197
198 // Finally, IR can handle this now that #elts line up.
199 return ConstantExpr::getBitCast(C, Ty: DestTy);
200 }
201
202 // Okay, we know the destination is integer, if the input is FP, convert
203 // it to integer first.
204 if (SrcEltTy->isFloatingPointTy()) {
205 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
206 auto *SrcIVTy = FixedVectorType::get(
207 ElementType: IntegerType::get(C&: C->getContext(), NumBits: FPWidth), NumElts: NumSrcElt);
208 // Ask IR to do the conversion now that #elts line up.
209 C = ConstantExpr::getBitCast(C, Ty: SrcIVTy);
210 assert((isa<ConstantVector>(C) || // FIXME: Remove ConstantVector.
211 isa<ConstantDataVector>(C) || isa<ConstantInt>(C)) &&
212 "Constant folding cannot fail for plain fp->int bitcast!");
213 }
214
215 // Now we know that the input and output vectors are both integer vectors
216 // of the same size, and that their #elements is not the same. Do the
217 // conversion here, which depends on whether the input or output has
218 // more elements.
219 bool isLittleEndian = DL.isLittleEndian();
220
221 SmallVector<Constant*, 32> Result;
222 if (NumDstElt < NumSrcElt) {
223 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
224 Constant *Zero = Constant::getNullValue(Ty: DstEltTy);
225 unsigned Ratio = NumSrcElt/NumDstElt;
226 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
227 unsigned SrcElt = 0;
228 for (unsigned i = 0; i != NumDstElt; ++i) {
229 // Build each element of the result.
230 Constant *Elt = Zero;
231 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
232 for (unsigned j = 0; j != Ratio; ++j) {
233 Constant *Src = C->getAggregateElement(Elt: SrcElt++);
234 if (isa_and_nonnull<UndefValue>(Val: Src))
235 Src = Constant::getNullValue(
236 Ty: cast<VectorType>(Val: C->getType())->getElementType());
237 else
238 Src = dyn_cast_or_null<ConstantInt>(Val: Src);
239 if (!Src) // Reject constantexpr elements.
240 return ConstantExpr::getBitCast(C, Ty: DestTy);
241
242 // Zero extend the element to the right size.
243 Src = ConstantFoldCastOperand(Opcode: Instruction::ZExt, C: Src, DestTy: Elt->getType(),
244 DL);
245 assert(Src && "Constant folding cannot fail on plain integers");
246
247 // Shift it to the right place, depending on endianness.
248 Src = ConstantFoldBinaryOpOperands(
249 Opcode: Instruction::Shl, LHS: Src, RHS: ConstantInt::get(Ty: Src->getType(), V: ShiftAmt),
250 DL);
251 assert(Src && "Constant folding cannot fail on plain integers");
252
253 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
254
255 // Mix it in.
256 Elt = ConstantFoldBinaryOpOperands(Opcode: Instruction::Or, LHS: Elt, RHS: Src, DL);
257 assert(Elt && "Constant folding cannot fail on plain integers");
258 }
259 Result.push_back(Elt);
260 }
261 return ConstantVector::get(V: Result);
262 }
263
264 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
265 unsigned Ratio = NumDstElt/NumSrcElt;
266 unsigned DstBitSize = DL.getTypeSizeInBits(Ty: DstEltTy);
267
268 // Loop over each source value, expanding into multiple results.
269 for (unsigned i = 0; i != NumSrcElt; ++i) {
270 auto *Element = C->getAggregateElement(Elt: i);
271
272 if (!Element) // Reject constantexpr elements.
273 return ConstantExpr::getBitCast(C, Ty: DestTy);
274
275 if (isa<UndefValue>(Val: Element)) {
276 // Correctly Propagate undef values.
277 Result.append(NumInputs: Ratio, Elt: UndefValue::get(T: DstEltTy));
278 continue;
279 }
280
281 auto *Src = dyn_cast<ConstantInt>(Val: Element);
282 if (!Src)
283 return ConstantExpr::getBitCast(C, Ty: DestTy);
284
285 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
286 for (unsigned j = 0; j != Ratio; ++j) {
287 // Shift the piece of the value into the right place, depending on
288 // endianness.
289 APInt Elt = Src->getValue().lshr(shiftAmt: ShiftAmt);
290 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
291
292 // Truncate and remember this piece.
293 Result.push_back(Elt: ConstantInt::get(Ty: DstEltTy, V: Elt.trunc(width: DstBitSize)));
294 }
295 }
296
297 return ConstantVector::get(V: Result);
298}
299
300} // end anonymous namespace
301
302/// If this constant is a constant offset from a global, return the global and
303/// the constant. Because of constantexprs, this function is recursive.
304bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
305 APInt &Offset, const DataLayout &DL,
306 DSOLocalEquivalent **DSOEquiv) {
307 if (DSOEquiv)
308 *DSOEquiv = nullptr;
309
310 // Trivial case, constant is the global.
311 if ((GV = dyn_cast<GlobalValue>(Val: C))) {
312 unsigned BitWidth = DL.getIndexTypeSizeInBits(Ty: GV->getType());
313 Offset = APInt(BitWidth, 0);
314 return true;
315 }
316
317 if (auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(Val: C)) {
318 if (DSOEquiv)
319 *DSOEquiv = FoundDSOEquiv;
320 GV = FoundDSOEquiv->getGlobalValue();
321 unsigned BitWidth = DL.getIndexTypeSizeInBits(Ty: GV->getType());
322 Offset = APInt(BitWidth, 0);
323 return true;
324 }
325
326 // Otherwise, if this isn't a constant expr, bail out.
327 auto *CE = dyn_cast<ConstantExpr>(Val: C);
328 if (!CE) return false;
329
330 // Look through ptr->int and ptr->ptr casts.
331 if (CE->getOpcode() == Instruction::PtrToInt ||
332 CE->getOpcode() == Instruction::PtrToAddr)
333 return IsConstantOffsetFromGlobal(C: CE->getOperand(i_nocapture: 0), GV, Offset, DL,
334 DSOEquiv);
335
336 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
337 auto *GEP = dyn_cast<GEPOperator>(Val: CE);
338 if (!GEP)
339 return false;
340
341 unsigned BitWidth = DL.getIndexTypeSizeInBits(Ty: GEP->getType());
342 APInt TmpOffset(BitWidth, 0);
343
344 // If the base isn't a global+constant, we aren't either.
345 if (!IsConstantOffsetFromGlobal(C: CE->getOperand(i_nocapture: 0), GV, Offset&: TmpOffset, DL,
346 DSOEquiv))
347 return false;
348
349 // Otherwise, add any offset that our operands provide.
350 if (!GEP->accumulateConstantOffset(DL, Offset&: TmpOffset))
351 return false;
352
353 Offset = TmpOffset;
354 return true;
355}
356
357Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
358 const DataLayout &DL) {
359 do {
360 Type *SrcTy = C->getType();
361 if (SrcTy == DestTy)
362 return C;
363
364 TypeSize DestSize = DL.getTypeSizeInBits(Ty: DestTy);
365 TypeSize SrcSize = DL.getTypeSizeInBits(Ty: SrcTy);
366 if (!TypeSize::isKnownGE(LHS: SrcSize, RHS: DestSize))
367 return nullptr;
368
369 // Catch the obvious splat cases (since all-zeros can coerce non-integral
370 // pointers legally).
371 if (Constant *Res = ConstantFoldLoadFromUniformValue(C, Ty: DestTy, DL))
372 return Res;
373
374 // If the type sizes are the same and a cast is legal, just directly
375 // cast the constant.
376 // But be careful not to coerce non-integral pointers illegally.
377 if (SrcSize == DestSize &&
378 DL.isNonIntegralPointerType(Ty: SrcTy->getScalarType()) ==
379 DL.isNonIntegralPointerType(Ty: DestTy->getScalarType())) {
380 Instruction::CastOps Cast = Instruction::BitCast;
381 // If we are going from a pointer to int or vice versa, we spell the cast
382 // differently.
383 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
384 Cast = Instruction::IntToPtr;
385 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
386 Cast = Instruction::PtrToInt;
387
388 if (CastInst::castIsValid(op: Cast, S: C, DstTy: DestTy))
389 return ConstantFoldCastOperand(Opcode: Cast, C, DestTy, DL);
390 }
391
392 // If this isn't an aggregate type, there is nothing we can do to drill down
393 // and find a bitcastable constant.
394 if (!SrcTy->isAggregateType() && !SrcTy->isVectorTy())
395 return nullptr;
396
397 // We're simulating a load through a pointer that was bitcast to point to
398 // a different type, so we can try to walk down through the initial
399 // elements of an aggregate to see if some part of the aggregate is
400 // castable to implement the "load" semantic model.
401 if (SrcTy->isStructTy()) {
402 // Struct types might have leading zero-length elements like [0 x i32],
403 // which are certainly not what we are looking for, so skip them.
404 unsigned Elem = 0;
405 Constant *ElemC;
406 do {
407 ElemC = C->getAggregateElement(Elt: Elem++);
408 } while (ElemC && DL.getTypeSizeInBits(Ty: ElemC->getType()).isZero());
409 C = ElemC;
410 } else {
411 // For non-byte-sized vector elements, the first element is not
412 // necessarily located at the vector base address.
413 if (auto *VT = dyn_cast<VectorType>(Val: SrcTy))
414 if (!DL.typeSizeEqualsStoreSize(Ty: VT->getElementType()))
415 return nullptr;
416
417 C = C->getAggregateElement(Elt: 0u);
418 }
419 } while (C);
420
421 return nullptr;
422}
423
424namespace {
425
426/// Recursive helper to read bits out of global. C is the constant being copied
427/// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
428/// results into and BytesLeft is the number of bytes left in
429/// the CurPtr buffer. DL is the DataLayout.
430bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
431 unsigned BytesLeft, const DataLayout &DL) {
432 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
433 "Out of range access");
434
435 // Reading type padding, return zero.
436 if (ByteOffset >= DL.getTypeStoreSize(Ty: C->getType()))
437 return true;
438
439 // If this element is zero or undefined, we can just return since *CurPtr is
440 // zero initialized.
441 if (isa<ConstantAggregateZero>(Val: C) || isa<UndefValue>(Val: C))
442 return true;
443
444 auto *CI = dyn_cast<ConstantInt>(Val: C);
445 if (CI && CI->getType()->isIntegerTy()) {
446 if ((CI->getBitWidth() & 7) != 0)
447 return false;
448 const APInt &Val = CI->getValue();
449 unsigned IntBytes = unsigned(CI->getBitWidth()/8);
450
451 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
452 unsigned n = ByteOffset;
453 if (!DL.isLittleEndian())
454 n = IntBytes - n - 1;
455 CurPtr[i] = Val.extractBits(numBits: 8, bitPosition: n * 8).getZExtValue();
456 ++ByteOffset;
457 }
458 return true;
459 }
460
461 auto *CFP = dyn_cast<ConstantFP>(Val: C);
462 if (CFP && CFP->getType()->isFloatingPointTy()) {
463 if (CFP->getType()->isDoubleTy()) {
464 C = FoldBitCast(C, DestTy: Type::getInt64Ty(C&: C->getContext()), DL);
465 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
466 }
467 if (CFP->getType()->isFloatTy()){
468 C = FoldBitCast(C, DestTy: Type::getInt32Ty(C&: C->getContext()), DL);
469 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
470 }
471 if (CFP->getType()->isHalfTy()){
472 C = FoldBitCast(C, DestTy: Type::getInt16Ty(C&: C->getContext()), DL);
473 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
474 }
475 return false;
476 }
477
478 if (auto *CS = dyn_cast<ConstantStruct>(Val: C)) {
479 const StructLayout *SL = DL.getStructLayout(Ty: CS->getType());
480 unsigned Index = SL->getElementContainingOffset(FixedOffset: ByteOffset);
481 uint64_t CurEltOffset = SL->getElementOffset(Idx: Index);
482 ByteOffset -= CurEltOffset;
483
484 while (true) {
485 // If the element access is to the element itself and not to tail padding,
486 // read the bytes from the element.
487 uint64_t EltSize = DL.getTypeAllocSize(Ty: CS->getOperand(i_nocapture: Index)->getType());
488
489 if (ByteOffset < EltSize &&
490 !ReadDataFromGlobal(C: CS->getOperand(i_nocapture: Index), ByteOffset, CurPtr,
491 BytesLeft, DL))
492 return false;
493
494 ++Index;
495
496 // Check to see if we read from the last struct element, if so we're done.
497 if (Index == CS->getType()->getNumElements())
498 return true;
499
500 // If we read all of the bytes we needed from this element we're done.
501 uint64_t NextEltOffset = SL->getElementOffset(Idx: Index);
502
503 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
504 return true;
505
506 // Move to the next element of the struct.
507 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
508 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
509 ByteOffset = 0;
510 CurEltOffset = NextEltOffset;
511 }
512 // not reached.
513 }
514
515 if (isa<ConstantArray>(Val: C) || isa<ConstantVector>(Val: C) ||
516 isa<ConstantDataSequential>(Val: C) || isa<ConstantInt>(Val: C) ||
517 isa<ConstantFP>(Val: C)) {
518 uint64_t NumElts, EltSize;
519 Type *EltTy;
520 if (auto *AT = dyn_cast<ArrayType>(Val: C->getType())) {
521 NumElts = AT->getNumElements();
522 EltTy = AT->getElementType();
523 EltSize = DL.getTypeAllocSize(Ty: EltTy);
524 } else {
525 NumElts = cast<FixedVectorType>(Val: C->getType())->getNumElements();
526 EltTy = cast<FixedVectorType>(Val: C->getType())->getElementType();
527 // TODO: For non-byte-sized vectors, current implementation assumes there is
528 // padding to the next byte boundary between elements.
529 if (!DL.typeSizeEqualsStoreSize(Ty: EltTy))
530 return false;
531
532 EltSize = DL.getTypeStoreSize(Ty: EltTy);
533 }
534 uint64_t Index = ByteOffset / EltSize;
535 uint64_t Offset = ByteOffset - Index * EltSize;
536
537 for (; Index != NumElts; ++Index) {
538 if (!ReadDataFromGlobal(C: C->getAggregateElement(Elt: Index), ByteOffset: Offset, CurPtr,
539 BytesLeft, DL))
540 return false;
541
542 uint64_t BytesWritten = EltSize - Offset;
543 assert(BytesWritten <= EltSize && "Not indexing into this element?");
544 if (BytesWritten >= BytesLeft)
545 return true;
546
547 Offset = 0;
548 BytesLeft -= BytesWritten;
549 CurPtr += BytesWritten;
550 }
551 return true;
552 }
553
554 if (auto *CE = dyn_cast<ConstantExpr>(Val: C)) {
555 if (CE->getOpcode() == Instruction::IntToPtr &&
556 CE->getOperand(i_nocapture: 0)->getType() == DL.getIntPtrType(CE->getType())) {
557 return ReadDataFromGlobal(C: CE->getOperand(i_nocapture: 0), ByteOffset, CurPtr,
558 BytesLeft, DL);
559 }
560 }
561
562 // Otherwise, unknown initializer type.
563 return false;
564}
565
566Constant *FoldReinterpretLoadFromConst(Constant *C, Type *LoadTy,
567 int64_t Offset, const DataLayout &DL) {
568 // Bail out early. Not expect to load from scalable global variable.
569 if (isa<ScalableVectorType>(Val: LoadTy))
570 return nullptr;
571
572 auto *IntType = dyn_cast<IntegerType>(Val: LoadTy);
573
574 // If this isn't an integer load we can't fold it directly.
575 if (!IntType) {
576 // If this is a non-integer load, we can try folding it as an int load and
577 // then bitcast the result. This can be useful for union cases. Note
578 // that address spaces don't matter here since we're not going to result in
579 // an actual new load.
580 if (!LoadTy->isFloatingPointTy() && !LoadTy->isPointerTy() &&
581 !LoadTy->isVectorTy())
582 return nullptr;
583
584 Type *MapTy = Type::getIntNTy(C&: C->getContext(),
585 N: DL.getTypeSizeInBits(Ty: LoadTy).getFixedValue());
586 if (Constant *Res = FoldReinterpretLoadFromConst(C, LoadTy: MapTy, Offset, DL)) {
587 if (Res->isNullValue() && !LoadTy->isX86_AMXTy())
588 // Materializing a zero can be done trivially without a bitcast
589 return Constant::getNullValue(Ty: LoadTy);
590 Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy;
591 Res = FoldBitCast(C: Res, DestTy: CastTy, DL);
592 if (LoadTy->isPtrOrPtrVectorTy()) {
593 // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr
594 if (Res->isNullValue() && !LoadTy->isX86_AMXTy())
595 return Constant::getNullValue(Ty: LoadTy);
596 if (DL.isNonIntegralPointerType(Ty: LoadTy->getScalarType()))
597 // Be careful not to replace a load of an addrspace value with an inttoptr here
598 return nullptr;
599 Res = ConstantExpr::getIntToPtr(C: Res, Ty: LoadTy);
600 }
601 return Res;
602 }
603 return nullptr;
604 }
605
606 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
607 if (BytesLoaded > 32 || BytesLoaded == 0)
608 return nullptr;
609
610 // If we're not accessing anything in this constant, the result is undefined.
611 if (Offset <= -1 * static_cast<int64_t>(BytesLoaded))
612 return PoisonValue::get(T: IntType);
613
614 // TODO: We should be able to support scalable types.
615 TypeSize InitializerSize = DL.getTypeAllocSize(Ty: C->getType());
616 if (InitializerSize.isScalable())
617 return nullptr;
618
619 // If we're not accessing anything in this constant, the result is undefined.
620 if (Offset >= (int64_t)InitializerSize.getFixedValue())
621 return PoisonValue::get(T: IntType);
622
623 unsigned char RawBytes[32] = {0};
624 unsigned char *CurPtr = RawBytes;
625 unsigned BytesLeft = BytesLoaded;
626
627 // If we're loading off the beginning of the global, some bytes may be valid.
628 if (Offset < 0) {
629 CurPtr += -Offset;
630 BytesLeft += Offset;
631 Offset = 0;
632 }
633
634 if (!ReadDataFromGlobal(C, ByteOffset: Offset, CurPtr, BytesLeft, DL))
635 return nullptr;
636
637 APInt ResultVal = APInt(IntType->getBitWidth(), 0);
638 if (DL.isLittleEndian()) {
639 ResultVal = RawBytes[BytesLoaded - 1];
640 for (unsigned i = 1; i != BytesLoaded; ++i) {
641 ResultVal <<= 8;
642 ResultVal |= RawBytes[BytesLoaded - 1 - i];
643 }
644 } else {
645 ResultVal = RawBytes[0];
646 for (unsigned i = 1; i != BytesLoaded; ++i) {
647 ResultVal <<= 8;
648 ResultVal |= RawBytes[i];
649 }
650 }
651
652 return ConstantInt::get(Context&: IntType->getContext(), V: ResultVal);
653}
654
655} // anonymous namespace
656
657// If GV is a constant with an initializer read its representation starting
658// at Offset and return it as a constant array of unsigned char. Otherwise
659// return null.
660Constant *llvm::ReadByteArrayFromGlobal(const GlobalVariable *GV,
661 uint64_t Offset) {
662 if (!GV->isConstant() || !GV->hasDefinitiveInitializer())
663 return nullptr;
664
665 const DataLayout &DL = GV->getDataLayout();
666 Constant *Init = const_cast<Constant *>(GV->getInitializer());
667 TypeSize InitSize = DL.getTypeAllocSize(Ty: Init->getType());
668 if (InitSize < Offset)
669 return nullptr;
670
671 uint64_t NBytes = InitSize - Offset;
672 if (NBytes > UINT16_MAX)
673 // Bail for large initializers in excess of 64K to avoid allocating
674 // too much memory.
675 // Offset is assumed to be less than or equal than InitSize (this
676 // is enforced in ReadDataFromGlobal).
677 return nullptr;
678
679 SmallVector<unsigned char, 256> RawBytes(static_cast<size_t>(NBytes));
680 unsigned char *CurPtr = RawBytes.data();
681
682 if (!ReadDataFromGlobal(C: Init, ByteOffset: Offset, CurPtr, BytesLeft: NBytes, DL))
683 return nullptr;
684
685 return ConstantDataArray::get(Context&: GV->getContext(), Elts&: RawBytes);
686}
687
688/// If this Offset points exactly to the start of an aggregate element, return
689/// that element, otherwise return nullptr.
690Constant *getConstantAtOffset(Constant *Base, APInt Offset,
691 const DataLayout &DL) {
692 if (Offset.isZero())
693 return Base;
694
695 if (!isa<ConstantAggregate>(Val: Base) && !isa<ConstantDataSequential>(Val: Base))
696 return nullptr;
697
698 Type *ElemTy = Base->getType();
699 SmallVector<APInt> Indices = DL.getGEPIndicesForOffset(ElemTy, Offset);
700 if (!Offset.isZero() || !Indices[0].isZero())
701 return nullptr;
702
703 Constant *C = Base;
704 for (const APInt &Index : drop_begin(RangeOrContainer&: Indices)) {
705 if (Index.isNegative() || Index.getActiveBits() >= 32)
706 return nullptr;
707
708 C = C->getAggregateElement(Elt: Index.getZExtValue());
709 if (!C)
710 return nullptr;
711 }
712
713 return C;
714}
715
716Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty,
717 const APInt &Offset,
718 const DataLayout &DL) {
719 if (Constant *AtOffset = getConstantAtOffset(Base: C, Offset, DL))
720 if (Constant *Result = ConstantFoldLoadThroughBitcast(C: AtOffset, DestTy: Ty, DL))
721 return Result;
722
723 // Explicitly check for out-of-bounds access, so we return poison even if the
724 // constant is a uniform value.
725 TypeSize Size = DL.getTypeAllocSize(Ty: C->getType());
726 if (!Size.isScalable() && Offset.sge(RHS: Size.getFixedValue()))
727 return PoisonValue::get(T: Ty);
728
729 // Try an offset-independent fold of a uniform value.
730 if (Constant *Result = ConstantFoldLoadFromUniformValue(C, Ty, DL))
731 return Result;
732
733 // Try hard to fold loads from bitcasted strange and non-type-safe things.
734 if (Offset.getSignificantBits() <= 64)
735 if (Constant *Result =
736 FoldReinterpretLoadFromConst(C, LoadTy: Ty, Offset: Offset.getSExtValue(), DL))
737 return Result;
738
739 return nullptr;
740}
741
742Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty,
743 const DataLayout &DL) {
744 return ConstantFoldLoadFromConst(C, Ty, Offset: APInt(64, 0), DL);
745}
746
747Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
748 APInt Offset,
749 const DataLayout &DL) {
750 // We can only fold loads from constant globals with a definitive initializer.
751 // Check this upfront, to skip expensive offset calculations.
752 auto *GV = dyn_cast<GlobalVariable>(Val: getUnderlyingObject(V: C));
753 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
754 return nullptr;
755
756 C = cast<Constant>(Val: C->stripAndAccumulateConstantOffsets(
757 DL, Offset, /* AllowNonInbounds */ true));
758
759 if (C == GV)
760 if (Constant *Result = ConstantFoldLoadFromConst(C: GV->getInitializer(), Ty,
761 Offset, DL))
762 return Result;
763
764 // If this load comes from anywhere in a uniform constant global, the value
765 // is always the same, regardless of the loaded offset.
766 return ConstantFoldLoadFromUniformValue(C: GV->getInitializer(), Ty, DL);
767}
768
769Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
770 const DataLayout &DL) {
771 APInt Offset(DL.getIndexTypeSizeInBits(Ty: C->getType()), 0);
772 return ConstantFoldLoadFromConstPtr(C, Ty, Offset: std::move(Offset), DL);
773}
774
775Constant *llvm::ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty,
776 const DataLayout &DL) {
777 if (isa<PoisonValue>(Val: C))
778 return PoisonValue::get(T: Ty);
779 if (isa<UndefValue>(Val: C))
780 return UndefValue::get(T: Ty);
781 // If padding is needed when storing C to memory, then it isn't considered as
782 // uniform.
783 if (!DL.typeSizeEqualsStoreSize(Ty: C->getType()))
784 return nullptr;
785 if (C->isNullValue() && !Ty->isX86_AMXTy())
786 return Constant::getNullValue(Ty);
787 if (C->isAllOnesValue() &&
788 (Ty->isIntOrIntVectorTy() || Ty->isFPOrFPVectorTy()))
789 return Constant::getAllOnesValue(Ty);
790 return nullptr;
791}
792
793namespace {
794
795/// One of Op0/Op1 is a constant expression.
796/// Attempt to symbolically evaluate the result of a binary operator merging
797/// these together. If target data info is available, it is provided as DL,
798/// otherwise DL is null.
799Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
800 const DataLayout &DL) {
801 // SROA
802
803 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
804 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
805 // bits.
806
807 if (Opc == Instruction::And) {
808 KnownBits Known0 = computeKnownBits(V: Op0, DL);
809 KnownBits Known1 = computeKnownBits(V: Op1, DL);
810 if ((Known1.One | Known0.Zero).isAllOnes()) {
811 // All the bits of Op0 that the 'and' could be masking are already zero.
812 return Op0;
813 }
814 if ((Known0.One | Known1.Zero).isAllOnes()) {
815 // All the bits of Op1 that the 'and' could be masking are already zero.
816 return Op1;
817 }
818
819 Known0 &= Known1;
820 if (Known0.isConstant())
821 return ConstantInt::get(Ty: Op0->getType(), V: Known0.getConstant());
822 }
823
824 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
825 // constant. This happens frequently when iterating over a global array.
826 if (Opc == Instruction::Sub) {
827 GlobalValue *GV1, *GV2;
828 APInt Offs1, Offs2;
829
830 if (IsConstantOffsetFromGlobal(C: Op0, GV&: GV1, Offset&: Offs1, DL))
831 if (IsConstantOffsetFromGlobal(C: Op1, GV&: GV2, Offset&: Offs2, DL) && GV1 == GV2) {
832 unsigned OpSize = DL.getTypeSizeInBits(Ty: Op0->getType());
833
834 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
835 // PtrToInt may change the bitwidth so we have convert to the right size
836 // first.
837 return ConstantInt::get(Ty: Op0->getType(), V: Offs1.zextOrTrunc(width: OpSize) -
838 Offs2.zextOrTrunc(width: OpSize));
839 }
840 }
841
842 return nullptr;
843}
844
845/// If array indices are not pointer-sized integers, explicitly cast them so
846/// that they aren't implicitly casted by the getelementptr.
847Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
848 Type *ResultTy, GEPNoWrapFlags NW,
849 std::optional<ConstantRange> InRange,
850 const DataLayout &DL, const TargetLibraryInfo *TLI) {
851 Type *IntIdxTy = DL.getIndexType(PtrTy: ResultTy);
852 Type *IntIdxScalarTy = IntIdxTy->getScalarType();
853
854 bool Any = false;
855 SmallVector<Constant*, 32> NewIdxs;
856 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
857 if ((i == 1 ||
858 !isa<StructType>(Val: GetElementPtrInst::getIndexedType(
859 Ty: SrcElemTy, IdxList: Ops.slice(N: 1, M: i - 1)))) &&
860 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
861 Any = true;
862 Type *NewType =
863 Ops[i]->getType()->isVectorTy() ? IntIdxTy : IntIdxScalarTy;
864 Constant *NewIdx = ConstantFoldCastOperand(
865 Opcode: CastInst::getCastOpcode(Val: Ops[i], SrcIsSigned: true, Ty: NewType, DstIsSigned: true), C: Ops[i], DestTy: NewType,
866 DL);
867 if (!NewIdx)
868 return nullptr;
869 NewIdxs.push_back(Elt: NewIdx);
870 } else
871 NewIdxs.push_back(Elt: Ops[i]);
872 }
873
874 if (!Any)
875 return nullptr;
876
877 Constant *C =
878 ConstantExpr::getGetElementPtr(Ty: SrcElemTy, C: Ops[0], IdxList: NewIdxs, NW, InRange);
879 return ConstantFoldConstant(C, DL, TLI);
880}
881
882/// If we can symbolically evaluate the GEP constant expression, do so.
883Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
884 ArrayRef<Constant *> Ops,
885 const DataLayout &DL,
886 const TargetLibraryInfo *TLI) {
887 Type *SrcElemTy = GEP->getSourceElementType();
888 Type *ResTy = GEP->getType();
889 if (!SrcElemTy->isSized() || isa<ScalableVectorType>(Val: SrcElemTy))
890 return nullptr;
891
892 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResultTy: ResTy, NW: GEP->getNoWrapFlags(),
893 InRange: GEP->getInRange(), DL, TLI))
894 return C;
895
896 Constant *Ptr = Ops[0];
897 if (!Ptr->getType()->isPointerTy())
898 return nullptr;
899
900 Type *IntIdxTy = DL.getIndexType(PtrTy: Ptr->getType());
901
902 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
903 if (!isa<ConstantInt>(Val: Ops[i]) || !Ops[i]->getType()->isIntegerTy())
904 return nullptr;
905
906 unsigned BitWidth = DL.getTypeSizeInBits(Ty: IntIdxTy);
907 APInt Offset = APInt(
908 BitWidth,
909 DL.getIndexedOffsetInType(
910 ElemTy: SrcElemTy, Indices: ArrayRef((Value *const *)Ops.data() + 1, Ops.size() - 1)),
911 /*isSigned=*/true, /*implicitTrunc=*/true);
912
913 std::optional<ConstantRange> InRange = GEP->getInRange();
914 if (InRange)
915 InRange = InRange->sextOrTrunc(BitWidth);
916
917 // If this is a GEP of a GEP, fold it all into a single GEP.
918 GEPNoWrapFlags NW = GEP->getNoWrapFlags();
919 bool Overflow = false;
920 while (auto *GEP = dyn_cast<GEPOperator>(Val: Ptr)) {
921 NW &= GEP->getNoWrapFlags();
922
923 SmallVector<Value *, 4> NestedOps(llvm::drop_begin(RangeOrContainer: GEP->operands()));
924
925 // Do not try the incorporate the sub-GEP if some index is not a number.
926 bool AllConstantInt = true;
927 for (Value *NestedOp : NestedOps)
928 if (!isa<ConstantInt>(Val: NestedOp)) {
929 AllConstantInt = false;
930 break;
931 }
932 if (!AllConstantInt)
933 break;
934
935 // Adjust inrange offset and intersect inrange attributes
936 if (auto GEPRange = GEP->getInRange()) {
937 auto AdjustedGEPRange = GEPRange->sextOrTrunc(BitWidth).subtract(CI: Offset);
938 InRange =
939 InRange ? InRange->intersectWith(CR: AdjustedGEPRange) : AdjustedGEPRange;
940 }
941
942 Ptr = cast<Constant>(Val: GEP->getOperand(i_nocapture: 0));
943 SrcElemTy = GEP->getSourceElementType();
944 Offset = Offset.sadd_ov(
945 RHS: APInt(BitWidth, DL.getIndexedOffsetInType(ElemTy: SrcElemTy, Indices: NestedOps),
946 /*isSigned=*/true, /*implicitTrunc=*/true),
947 Overflow);
948 }
949
950 // Preserving nusw (without inbounds) also requires that the offset
951 // additions did not overflow.
952 if (NW.hasNoUnsignedSignedWrap() && !NW.isInBounds() && Overflow)
953 NW = NW.withoutNoUnsignedSignedWrap();
954
955 // If the base value for this address is a literal integer value, fold the
956 // getelementptr to the resulting integer value casted to the pointer type.
957 APInt BaseIntVal(DL.getPointerTypeSizeInBits(Ptr->getType()), 0);
958 if (auto *CE = dyn_cast<ConstantExpr>(Val: Ptr)) {
959 if (CE->getOpcode() == Instruction::IntToPtr) {
960 if (auto *Base = dyn_cast<ConstantInt>(Val: CE->getOperand(i_nocapture: 0)))
961 BaseIntVal = Base->getValue().zextOrTrunc(width: BaseIntVal.getBitWidth());
962 }
963 }
964
965 if ((Ptr->isNullValue() || BaseIntVal != 0) &&
966 !DL.mustNotIntroduceIntToPtr(Ty: Ptr->getType())) {
967
968 // If the index size is smaller than the pointer size, add to the low
969 // bits only.
970 BaseIntVal.insertBits(SubBits: BaseIntVal.trunc(width: BitWidth) + Offset, bitPosition: 0);
971 Constant *C = ConstantInt::get(Context&: Ptr->getContext(), V: BaseIntVal);
972 return ConstantExpr::getIntToPtr(C, Ty: ResTy);
973 }
974
975 // Try to infer inbounds for GEPs of globals.
976 if (!NW.isInBounds() && Offset.isNonNegative()) {
977 bool CanBeNull, CanBeFreed;
978 uint64_t DerefBytes =
979 Ptr->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
980 if (DerefBytes != 0 && !CanBeNull && Offset.sle(RHS: DerefBytes))
981 NW |= GEPNoWrapFlags::inBounds();
982 }
983
984 // nusw + nneg -> nuw
985 if (NW.hasNoUnsignedSignedWrap() && Offset.isNonNegative())
986 NW |= GEPNoWrapFlags::noUnsignedWrap();
987
988 // Otherwise canonicalize this to a single ptradd.
989 LLVMContext &Ctx = Ptr->getContext();
990 return ConstantExpr::getPtrAdd(Ptr, Offset: ConstantInt::get(Context&: Ctx, V: Offset), NW,
991 InRange);
992}
993
994/// Attempt to constant fold an instruction with the
995/// specified opcode and operands. If successful, the constant result is
996/// returned, if not, null is returned. Note that this function can fail when
997/// attempting to fold instructions like loads and stores, which have no
998/// constant expression form.
999Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
1000 ArrayRef<Constant *> Ops,
1001 const DataLayout &DL,
1002 const TargetLibraryInfo *TLI,
1003 bool AllowNonDeterministic) {
1004 Type *DestTy = InstOrCE->getType();
1005
1006 if (Instruction::isUnaryOp(Opcode))
1007 return ConstantFoldUnaryOpOperand(Opcode, Op: Ops[0], DL);
1008
1009 if (Instruction::isBinaryOp(Opcode)) {
1010 switch (Opcode) {
1011 default:
1012 break;
1013 case Instruction::FAdd:
1014 case Instruction::FSub:
1015 case Instruction::FMul:
1016 case Instruction::FDiv:
1017 case Instruction::FRem:
1018 // Handle floating point instructions separately to account for denormals
1019 // TODO: If a constant expression is being folded rather than an
1020 // instruction, denormals will not be flushed/treated as zero
1021 if (const auto *I = dyn_cast<Instruction>(Val: InstOrCE)) {
1022 return ConstantFoldFPInstOperands(Opcode, LHS: Ops[0], RHS: Ops[1], DL, I,
1023 AllowNonDeterministic);
1024 }
1025 }
1026 return ConstantFoldBinaryOpOperands(Opcode, LHS: Ops[0], RHS: Ops[1], DL);
1027 }
1028
1029 if (Instruction::isCast(Opcode))
1030 return ConstantFoldCastOperand(Opcode, C: Ops[0], DestTy, DL);
1031
1032 if (auto *GEP = dyn_cast<GEPOperator>(Val: InstOrCE)) {
1033 Type *SrcElemTy = GEP->getSourceElementType();
1034 if (!ConstantExpr::isSupportedGetElementPtr(SrcElemTy))
1035 return nullptr;
1036
1037 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
1038 return C;
1039
1040 return ConstantExpr::getGetElementPtr(Ty: SrcElemTy, C: Ops[0], IdxList: Ops.slice(N: 1),
1041 NW: GEP->getNoWrapFlags(),
1042 InRange: GEP->getInRange());
1043 }
1044
1045 if (auto *CE = dyn_cast<ConstantExpr>(Val: InstOrCE))
1046 return CE->getWithOperands(Ops);
1047
1048 switch (Opcode) {
1049 default: return nullptr;
1050 case Instruction::ICmp:
1051 case Instruction::FCmp: {
1052 auto *C = cast<CmpInst>(Val: InstOrCE);
1053 return ConstantFoldCompareInstOperands(Predicate: C->getPredicate(), LHS: Ops[0], RHS: Ops[1],
1054 DL, TLI, I: C);
1055 }
1056 case Instruction::Freeze:
1057 return isGuaranteedNotToBeUndefOrPoison(V: Ops[0]) ? Ops[0] : nullptr;
1058 case Instruction::Call:
1059 if (auto *F = dyn_cast<Function>(Val: Ops.back())) {
1060 const auto *Call = cast<CallBase>(Val: InstOrCE);
1061 if (canConstantFoldCallTo(Call, F))
1062 return ConstantFoldCall(Call, F, Operands: Ops.slice(N: 0, M: Ops.size() - 1), TLI,
1063 AllowNonDeterministic);
1064 }
1065 return nullptr;
1066 case Instruction::Select:
1067 return ConstantFoldSelectInstruction(Cond: Ops[0], V1: Ops[1], V2: Ops[2]);
1068 case Instruction::ExtractElement:
1069 return ConstantExpr::getExtractElement(Vec: Ops[0], Idx: Ops[1]);
1070 case Instruction::ExtractValue:
1071 return ConstantFoldExtractValueInstruction(
1072 Agg: Ops[0], Idxs: cast<ExtractValueInst>(Val: InstOrCE)->getIndices());
1073 case Instruction::InsertElement:
1074 return ConstantExpr::getInsertElement(Vec: Ops[0], Elt: Ops[1], Idx: Ops[2]);
1075 case Instruction::InsertValue:
1076 return ConstantFoldInsertValueInstruction(
1077 Agg: Ops[0], Val: Ops[1], Idxs: cast<InsertValueInst>(Val: InstOrCE)->getIndices());
1078 case Instruction::ShuffleVector:
1079 return ConstantExpr::getShuffleVector(
1080 V1: Ops[0], V2: Ops[1], Mask: cast<ShuffleVectorInst>(Val: InstOrCE)->getShuffleMask());
1081 case Instruction::Load: {
1082 const auto *LI = dyn_cast<LoadInst>(Val: InstOrCE);
1083 if (LI->isVolatile())
1084 return nullptr;
1085 return ConstantFoldLoadFromConstPtr(C: Ops[0], Ty: LI->getType(), DL);
1086 }
1087 }
1088}
1089
1090} // end anonymous namespace
1091
1092//===----------------------------------------------------------------------===//
1093// Constant Folding public APIs
1094//===----------------------------------------------------------------------===//
1095
1096namespace {
1097
1098Constant *
1099ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
1100 const TargetLibraryInfo *TLI,
1101 SmallDenseMap<Constant *, Constant *> &FoldedOps) {
1102 if (!isa<ConstantVector>(Val: C) && !isa<ConstantExpr>(Val: C))
1103 return const_cast<Constant *>(C);
1104
1105 SmallVector<Constant *, 8> Ops;
1106 for (const Use &OldU : C->operands()) {
1107 Constant *OldC = cast<Constant>(Val: &OldU);
1108 Constant *NewC = OldC;
1109 // Recursively fold the ConstantExpr's operands. If we have already folded
1110 // a ConstantExpr, we don't have to process it again.
1111 if (isa<ConstantVector>(Val: OldC) || isa<ConstantExpr>(Val: OldC)) {
1112 auto It = FoldedOps.find(Val: OldC);
1113 if (It == FoldedOps.end()) {
1114 NewC = ConstantFoldConstantImpl(C: OldC, DL, TLI, FoldedOps);
1115 FoldedOps.insert(KV: {OldC, NewC});
1116 } else {
1117 NewC = It->second;
1118 }
1119 }
1120 Ops.push_back(Elt: NewC);
1121 }
1122
1123 if (auto *CE = dyn_cast<ConstantExpr>(Val: C)) {
1124 if (Constant *Res = ConstantFoldInstOperandsImpl(
1125 InstOrCE: CE, Opcode: CE->getOpcode(), Ops, DL, TLI, /*AllowNonDeterministic=*/true))
1126 return Res;
1127 return const_cast<Constant *>(C);
1128 }
1129
1130 assert(isa<ConstantVector>(C));
1131 return ConstantVector::get(V: Ops);
1132}
1133
1134} // end anonymous namespace
1135
1136Constant *llvm::ConstantFoldInstruction(const Instruction *I,
1137 const DataLayout &DL,
1138 const TargetLibraryInfo *TLI) {
1139 // Handle PHI nodes quickly here...
1140 if (auto *PN = dyn_cast<PHINode>(Val: I)) {
1141 Constant *CommonValue = nullptr;
1142
1143 SmallDenseMap<Constant *, Constant *> FoldedOps;
1144 for (Value *Incoming : PN->incoming_values()) {
1145 // If the incoming value is undef then skip it. Note that while we could
1146 // skip the value if it is equal to the phi node itself we choose not to
1147 // because that would break the rule that constant folding only applies if
1148 // all operands are constants.
1149 if (isa<UndefValue>(Val: Incoming))
1150 continue;
1151 // If the incoming value is not a constant, then give up.
1152 auto *C = dyn_cast<Constant>(Val: Incoming);
1153 if (!C)
1154 return nullptr;
1155 // Fold the PHI's operands.
1156 C = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1157 // If the incoming value is a different constant to
1158 // the one we saw previously, then give up.
1159 if (CommonValue && C != CommonValue)
1160 return nullptr;
1161 CommonValue = C;
1162 }
1163
1164 // If we reach here, all incoming values are the same constant or undef.
1165 return CommonValue ? CommonValue : UndefValue::get(T: PN->getType());
1166 }
1167
1168 // Scan the operand list, checking to see if they are all constants, if so,
1169 // hand off to ConstantFoldInstOperandsImpl.
1170 if (!all_of(Range: I->operands(), P: [](const Use &U) { return isa<Constant>(Val: U); }))
1171 return nullptr;
1172
1173 SmallDenseMap<Constant *, Constant *> FoldedOps;
1174 SmallVector<Constant *, 8> Ops;
1175 for (const Use &OpU : I->operands()) {
1176 auto *Op = cast<Constant>(Val: &OpU);
1177 // Fold the Instruction's operands.
1178 Op = ConstantFoldConstantImpl(C: Op, DL, TLI, FoldedOps);
1179 Ops.push_back(Elt: Op);
1180 }
1181
1182 return ConstantFoldInstOperands(I, Ops, DL, TLI);
1183}
1184
1185Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
1186 const TargetLibraryInfo *TLI) {
1187 SmallDenseMap<Constant *, Constant *> FoldedOps;
1188 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1189}
1190
1191Constant *llvm::ConstantFoldInstOperands(const Instruction *I,
1192 ArrayRef<Constant *> Ops,
1193 const DataLayout &DL,
1194 const TargetLibraryInfo *TLI,
1195 bool AllowNonDeterministic) {
1196 return ConstantFoldInstOperandsImpl(InstOrCE: I, Opcode: I->getOpcode(), Ops, DL, TLI,
1197 AllowNonDeterministic);
1198}
1199
1200Constant *llvm::ConstantFoldCompareInstOperands(
1201 unsigned IntPredicate, Constant *Ops0, Constant *Ops1, const DataLayout &DL,
1202 const TargetLibraryInfo *TLI, const Instruction *I) {
1203 CmpInst::Predicate Predicate = (CmpInst::Predicate)IntPredicate;
1204 // fold: icmp (inttoptr x), null -> icmp x, 0
1205 // fold: icmp null, (inttoptr x) -> icmp 0, x
1206 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1207 // fold: icmp 0, (ptrtoint x) -> icmp null, x
1208 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1209 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1210 //
1211 // FIXME: The following comment is out of data and the DataLayout is here now.
1212 // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1213 // around to know if bit truncation is happening.
1214 if (auto *CE0 = dyn_cast<ConstantExpr>(Val: Ops0)) {
1215 if (Ops1->isNullValue()) {
1216 if (CE0->getOpcode() == Instruction::IntToPtr) {
1217 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1218 // Convert the integer value to the right size to ensure we get the
1219 // proper extension or truncation.
1220 if (Constant *C = ConstantFoldIntegerCast(C: CE0->getOperand(i_nocapture: 0), DestTy: IntPtrTy,
1221 /*IsSigned*/ false, DL)) {
1222 Constant *Null = Constant::getNullValue(Ty: C->getType());
1223 return ConstantFoldCompareInstOperands(IntPredicate: Predicate, Ops0: C, Ops1: Null, DL, TLI);
1224 }
1225 }
1226
1227 // icmp only compares the address part of the pointer, so only do this
1228 // transform if the integer size matches the address size.
1229 if (CE0->getOpcode() == Instruction::PtrToInt ||
1230 CE0->getOpcode() == Instruction::PtrToAddr) {
1231 Type *AddrTy = DL.getAddressType(PtrTy: CE0->getOperand(i_nocapture: 0)->getType());
1232 if (CE0->getType() == AddrTy) {
1233 Constant *C = CE0->getOperand(i_nocapture: 0);
1234 Constant *Null = Constant::getNullValue(Ty: C->getType());
1235 return ConstantFoldCompareInstOperands(IntPredicate: Predicate, Ops0: C, Ops1: Null, DL, TLI);
1236 }
1237 }
1238 }
1239
1240 if (auto *CE1 = dyn_cast<ConstantExpr>(Val: Ops1)) {
1241 if (CE0->getOpcode() == CE1->getOpcode()) {
1242 if (CE0->getOpcode() == Instruction::IntToPtr) {
1243 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1244
1245 // Convert the integer value to the right size to ensure we get the
1246 // proper extension or truncation.
1247 Constant *C0 = ConstantFoldIntegerCast(C: CE0->getOperand(i_nocapture: 0), DestTy: IntPtrTy,
1248 /*IsSigned*/ false, DL);
1249 Constant *C1 = ConstantFoldIntegerCast(C: CE1->getOperand(i_nocapture: 0), DestTy: IntPtrTy,
1250 /*IsSigned*/ false, DL);
1251 if (C0 && C1)
1252 return ConstantFoldCompareInstOperands(IntPredicate: Predicate, Ops0: C0, Ops1: C1, DL, TLI);
1253 }
1254
1255 // icmp only compares the address part of the pointer, so only do this
1256 // transform if the integer size matches the address size.
1257 if (CE0->getOpcode() == Instruction::PtrToInt ||
1258 CE0->getOpcode() == Instruction::PtrToAddr) {
1259 Type *AddrTy = DL.getAddressType(PtrTy: CE0->getOperand(i_nocapture: 0)->getType());
1260 if (CE0->getType() == AddrTy &&
1261 CE0->getOperand(i_nocapture: 0)->getType() == CE1->getOperand(i_nocapture: 0)->getType()) {
1262 return ConstantFoldCompareInstOperands(
1263 IntPredicate: Predicate, Ops0: CE0->getOperand(i_nocapture: 0), Ops1: CE1->getOperand(i_nocapture: 0), DL, TLI);
1264 }
1265 }
1266 }
1267 }
1268
1269 // Convert pointer comparison (base+offset1) pred (base+offset2) into
1270 // offset1 pred offset2, for the case where the offset is inbounds. This
1271 // only works for equality and unsigned comparison, as inbounds permits
1272 // crossing the sign boundary. However, the offset comparison itself is
1273 // signed.
1274 if (Ops0->getType()->isPointerTy() && !ICmpInst::isSigned(predicate: Predicate)) {
1275 unsigned IndexWidth = DL.getIndexTypeSizeInBits(Ty: Ops0->getType());
1276 APInt Offset0(IndexWidth, 0);
1277 bool IsEqPred = ICmpInst::isEquality(P: Predicate);
1278 Value *Stripped0 = Ops0->stripAndAccumulateConstantOffsets(
1279 DL, Offset&: Offset0, /*AllowNonInbounds=*/IsEqPred,
1280 /*AllowInvariantGroup=*/false, /*ExternalAnalysis=*/nullptr,
1281 /*LookThroughIntToPtr=*/IsEqPred);
1282 APInt Offset1(IndexWidth, 0);
1283 Value *Stripped1 = Ops1->stripAndAccumulateConstantOffsets(
1284 DL, Offset&: Offset1, /*AllowNonInbounds=*/IsEqPred,
1285 /*AllowInvariantGroup=*/false, /*ExternalAnalysis=*/nullptr,
1286 /*LookThroughIntToPtr=*/IsEqPred);
1287 if (Stripped0 == Stripped1)
1288 return ConstantInt::getBool(
1289 Context&: Ops0->getContext(),
1290 V: ICmpInst::compare(LHS: Offset0, RHS: Offset1,
1291 Pred: ICmpInst::getSignedPredicate(Pred: Predicate)));
1292 }
1293 } else if (isa<ConstantExpr>(Val: Ops1)) {
1294 // If RHS is a constant expression, but the left side isn't, swap the
1295 // operands and try again.
1296 Predicate = ICmpInst::getSwappedPredicate(pred: Predicate);
1297 return ConstantFoldCompareInstOperands(IntPredicate: Predicate, Ops0: Ops1, Ops1: Ops0, DL, TLI);
1298 }
1299
1300 if (CmpInst::isFPPredicate(P: Predicate)) {
1301 // Flush any denormal constant float input according to denormal handling
1302 // mode.
1303 Ops0 = FlushFPConstant(Operand: Ops0, I, /*IsOutput=*/false);
1304 if (!Ops0)
1305 return nullptr;
1306 Ops1 = FlushFPConstant(Operand: Ops1, I, /*IsOutput=*/false);
1307 if (!Ops1)
1308 return nullptr;
1309 }
1310
1311 return ConstantFoldCompareInstruction(Predicate, C1: Ops0, C2: Ops1);
1312}
1313
1314Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op,
1315 const DataLayout &DL) {
1316 assert(Instruction::isUnaryOp(Opcode));
1317
1318 return ConstantFoldUnaryInstruction(Opcode, V: Op);
1319}
1320
1321Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
1322 Constant *RHS,
1323 const DataLayout &DL) {
1324 assert(Instruction::isBinaryOp(Opcode));
1325 if (isa<ConstantExpr>(Val: LHS) || isa<ConstantExpr>(Val: RHS))
1326 if (Constant *C = SymbolicallyEvaluateBinop(Opc: Opcode, Op0: LHS, Op1: RHS, DL))
1327 return C;
1328
1329 if (ConstantExpr::isDesirableBinOp(Opcode))
1330 return ConstantExpr::get(Opcode, C1: LHS, C2: RHS);
1331 return ConstantFoldBinaryInstruction(Opcode, V1: LHS, V2: RHS);
1332}
1333
1334static ConstantFP *flushDenormalConstant(Type *Ty, const APFloat &APF,
1335 DenormalMode::DenormalModeKind Mode) {
1336 switch (Mode) {
1337 case DenormalMode::Dynamic:
1338 return nullptr;
1339 case DenormalMode::IEEE:
1340 return ConstantFP::get(Context&: Ty->getContext(), V: APF);
1341 case DenormalMode::PreserveSign:
1342 return ConstantFP::get(
1343 Context&: Ty->getContext(),
1344 V: APFloat::getZero(Sem: APF.getSemantics(), Negative: APF.isNegative()));
1345 case DenormalMode::PositiveZero:
1346 return ConstantFP::get(Context&: Ty->getContext(),
1347 V: APFloat::getZero(Sem: APF.getSemantics(), Negative: false));
1348 default:
1349 break;
1350 }
1351
1352 llvm_unreachable("unknown denormal mode");
1353}
1354
1355/// Return the denormal mode that can be assumed when executing a floating point
1356/// operation at \p CtxI.
1357static DenormalMode getInstrDenormalMode(const Instruction *CtxI, Type *Ty) {
1358 if (!CtxI || !CtxI->getParent() || !CtxI->getFunction())
1359 return DenormalMode::getDynamic();
1360 return CtxI->getFunction()->getDenormalMode(FPType: Ty->getFltSemantics());
1361}
1362
1363static ConstantFP *flushDenormalConstantFP(ConstantFP *CFP,
1364 const Instruction *Inst,
1365 bool IsOutput) {
1366 const APFloat &APF = CFP->getValueAPF();
1367 if (!APF.isDenormal())
1368 return CFP;
1369
1370 DenormalMode Mode = getInstrDenormalMode(CtxI: Inst, Ty: CFP->getType());
1371 return flushDenormalConstant(Ty: CFP->getType(), APF,
1372 Mode: IsOutput ? Mode.Output : Mode.Input);
1373}
1374
1375Constant *llvm::FlushFPConstant(Constant *Operand, const Instruction *Inst,
1376 bool IsOutput) {
1377 if (ConstantFP *CFP = dyn_cast<ConstantFP>(Val: Operand))
1378 return flushDenormalConstantFP(CFP, Inst, IsOutput);
1379
1380 if (isa<ConstantAggregateZero, UndefValue>(Val: Operand))
1381 return Operand;
1382
1383 Type *Ty = Operand->getType();
1384 VectorType *VecTy = dyn_cast<VectorType>(Val: Ty);
1385 if (VecTy) {
1386 if (auto *Splat = dyn_cast_or_null<ConstantFP>(Val: Operand->getSplatValue())) {
1387 ConstantFP *Folded = flushDenormalConstantFP(CFP: Splat, Inst, IsOutput);
1388 if (!Folded)
1389 return nullptr;
1390 return ConstantVector::getSplat(EC: VecTy->getElementCount(), Elt: Folded);
1391 }
1392
1393 Ty = VecTy->getElementType();
1394 }
1395
1396 if (isa<ConstantExpr>(Val: Operand))
1397 return Operand;
1398
1399 if (const auto *CV = dyn_cast<ConstantVector>(Val: Operand)) {
1400 SmallVector<Constant *, 16> NewElts;
1401 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1402 Constant *Element = CV->getAggregateElement(Elt: i);
1403 if (isa<UndefValue>(Val: Element)) {
1404 NewElts.push_back(Elt: Element);
1405 continue;
1406 }
1407
1408 ConstantFP *CFP = dyn_cast<ConstantFP>(Val: Element);
1409 if (!CFP)
1410 return nullptr;
1411
1412 ConstantFP *Folded = flushDenormalConstantFP(CFP, Inst, IsOutput);
1413 if (!Folded)
1414 return nullptr;
1415 NewElts.push_back(Elt: Folded);
1416 }
1417
1418 return ConstantVector::get(V: NewElts);
1419 }
1420
1421 if (const auto *CDV = dyn_cast<ConstantDataVector>(Val: Operand)) {
1422 SmallVector<Constant *, 16> NewElts;
1423 for (unsigned I = 0, E = CDV->getNumElements(); I < E; ++I) {
1424 const APFloat &Elt = CDV->getElementAsAPFloat(i: I);
1425 if (!Elt.isDenormal()) {
1426 NewElts.push_back(Elt: ConstantFP::get(Ty, V: Elt));
1427 } else {
1428 DenormalMode Mode = getInstrDenormalMode(CtxI: Inst, Ty);
1429 ConstantFP *Folded =
1430 flushDenormalConstant(Ty, APF: Elt, Mode: IsOutput ? Mode.Output : Mode.Input);
1431 if (!Folded)
1432 return nullptr;
1433 NewElts.push_back(Elt: Folded);
1434 }
1435 }
1436
1437 return ConstantVector::get(V: NewElts);
1438 }
1439
1440 return nullptr;
1441}
1442
1443Constant *llvm::ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS,
1444 Constant *RHS, const DataLayout &DL,
1445 const Instruction *I,
1446 bool AllowNonDeterministic) {
1447 if (Instruction::isBinaryOp(Opcode)) {
1448 // Flush denormal inputs if needed.
1449 Constant *Op0 = FlushFPConstant(Operand: LHS, Inst: I, /* IsOutput */ false);
1450 if (!Op0)
1451 return nullptr;
1452 Constant *Op1 = FlushFPConstant(Operand: RHS, Inst: I, /* IsOutput */ false);
1453 if (!Op1)
1454 return nullptr;
1455
1456 // If nsz or an algebraic FMF flag is set, the result of the FP operation
1457 // may change due to future optimization. Don't constant fold them if
1458 // non-deterministic results are not allowed.
1459 if (!AllowNonDeterministic)
1460 if (auto *FP = dyn_cast_or_null<FPMathOperator>(Val: I))
1461 if (FP->hasNoSignedZeros() || FP->hasAllowReassoc() ||
1462 FP->hasAllowContract() || FP->hasAllowReciprocal())
1463 return nullptr;
1464
1465 // Calculate constant result.
1466 Constant *C = ConstantFoldBinaryOpOperands(Opcode, LHS: Op0, RHS: Op1, DL);
1467 if (!C)
1468 return nullptr;
1469
1470 // Flush denormal output if needed.
1471 C = FlushFPConstant(Operand: C, Inst: I, /* IsOutput */ true);
1472 if (!C)
1473 return nullptr;
1474
1475 // The precise NaN value is non-deterministic.
1476 if (!AllowNonDeterministic && C->isNaN())
1477 return nullptr;
1478
1479 return C;
1480 }
1481 // If instruction lacks a parent/function and the denormal mode cannot be
1482 // determined, use the default (IEEE).
1483 return ConstantFoldBinaryOpOperands(Opcode, LHS, RHS, DL);
1484}
1485
1486Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
1487 Type *DestTy, const DataLayout &DL) {
1488 assert(Instruction::isCast(Opcode));
1489
1490 if (auto *CE = dyn_cast<ConstantExpr>(Val: C))
1491 if (CE->isCast())
1492 if (unsigned NewOp = CastInst::isEliminableCastPair(
1493 firstOpcode: Instruction::CastOps(CE->getOpcode()),
1494 secondOpcode: Instruction::CastOps(Opcode), SrcTy: CE->getOperand(i_nocapture: 0)->getType(),
1495 MidTy: C->getType(), DstTy: DestTy, DL: &DL))
1496 return ConstantFoldCastOperand(Opcode: NewOp, C: CE->getOperand(i_nocapture: 0), DestTy, DL);
1497
1498 switch (Opcode) {
1499 default:
1500 llvm_unreachable("Missing case");
1501 case Instruction::PtrToAddr:
1502 case Instruction::PtrToInt:
1503 if (auto *CE = dyn_cast<ConstantExpr>(Val: C)) {
1504 Constant *FoldedValue = nullptr;
1505 // If the input is an inttoptr, eliminate the pair. This requires knowing
1506 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1507 if (CE->getOpcode() == Instruction::IntToPtr) {
1508 // zext/trunc the inttoptr to pointer/address size.
1509 Type *MidTy = Opcode == Instruction::PtrToInt
1510 ? DL.getAddressType(PtrTy: CE->getType())
1511 : DL.getIntPtrType(CE->getType());
1512 FoldedValue = ConstantFoldIntegerCast(C: CE->getOperand(i_nocapture: 0), DestTy: MidTy,
1513 /*IsSigned=*/false, DL);
1514 } else if (auto *GEP = dyn_cast<GEPOperator>(Val: CE)) {
1515 // If we have GEP, we can perform the following folds:
1516 // (ptrtoint/ptrtoaddr (gep null, x)) -> x
1517 // (ptrtoint/ptrtoaddr (gep (gep null, x), y) -> x + y, etc.
1518 unsigned BitWidth = DL.getIndexTypeSizeInBits(Ty: GEP->getType());
1519 APInt BaseOffset(BitWidth, 0);
1520 auto *Base = cast<Constant>(Val: GEP->stripAndAccumulateConstantOffsets(
1521 DL, Offset&: BaseOffset, /*AllowNonInbounds=*/true));
1522 if (Base->isNullValue()) {
1523 FoldedValue = ConstantInt::get(Context&: CE->getContext(), V: BaseOffset);
1524 } else {
1525 // ptrtoint/ptrtoaddr (gep i8, Ptr, (sub 0, V))
1526 // -> sub (ptrtoint/ptrtoaddr Ptr), V
1527 if (GEP->getNumIndices() == 1 &&
1528 GEP->getSourceElementType()->isIntegerTy(Bitwidth: 8)) {
1529 auto *Ptr = cast<Constant>(Val: GEP->getPointerOperand());
1530 auto *Sub = dyn_cast<ConstantExpr>(Val: GEP->getOperand(i_nocapture: 1));
1531 Type *IntIdxTy = DL.getIndexType(PtrTy: Ptr->getType());
1532 if (Sub && Sub->getType() == IntIdxTy &&
1533 Sub->getOpcode() == Instruction::Sub &&
1534 Sub->getOperand(i_nocapture: 0)->isNullValue())
1535 FoldedValue = ConstantExpr::getSub(
1536 C1: ConstantExpr::getCast(ops: Opcode, C: Ptr, Ty: IntIdxTy),
1537 C2: Sub->getOperand(i_nocapture: 1));
1538 }
1539 }
1540 }
1541 if (FoldedValue) {
1542 // Do a zext or trunc to get to the ptrtoint/ptrtoaddr dest size.
1543 return ConstantFoldIntegerCast(C: FoldedValue, DestTy, /*IsSigned=*/false,
1544 DL);
1545 }
1546 }
1547 break;
1548 case Instruction::IntToPtr:
1549 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1550 // the int size is >= the ptr size and the address spaces are the same.
1551 // This requires knowing the width of a pointer, so it can't be done in
1552 // ConstantExpr::getCast.
1553 if (auto *CE = dyn_cast<ConstantExpr>(Val: C)) {
1554 if (CE->getOpcode() == Instruction::PtrToInt) {
1555 Constant *SrcPtr = CE->getOperand(i_nocapture: 0);
1556 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1557 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1558
1559 if (MidIntSize >= SrcPtrSize) {
1560 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1561 if (SrcAS == DestTy->getPointerAddressSpace())
1562 return FoldBitCast(C: CE->getOperand(i_nocapture: 0), DestTy, DL);
1563 }
1564 }
1565 }
1566 break;
1567 case Instruction::Trunc:
1568 case Instruction::ZExt:
1569 case Instruction::SExt:
1570 case Instruction::FPTrunc:
1571 case Instruction::FPExt:
1572 case Instruction::UIToFP:
1573 case Instruction::SIToFP:
1574 case Instruction::FPToUI:
1575 case Instruction::FPToSI:
1576 case Instruction::AddrSpaceCast:
1577 break;
1578 case Instruction::BitCast:
1579 return FoldBitCast(C, DestTy, DL);
1580 }
1581
1582 if (ConstantExpr::isDesirableCastOp(Opcode))
1583 return ConstantExpr::getCast(ops: Opcode, C, Ty: DestTy);
1584 return ConstantFoldCastInstruction(opcode: Opcode, V: C, DestTy);
1585}
1586
1587Constant *llvm::ConstantFoldIntegerCast(Constant *C, Type *DestTy,
1588 bool IsSigned, const DataLayout &DL) {
1589 Type *SrcTy = C->getType();
1590 if (SrcTy == DestTy)
1591 return C;
1592 if (SrcTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
1593 return ConstantFoldCastOperand(Opcode: Instruction::Trunc, C, DestTy, DL);
1594 if (IsSigned)
1595 return ConstantFoldCastOperand(Opcode: Instruction::SExt, C, DestTy, DL);
1596 return ConstantFoldCastOperand(Opcode: Instruction::ZExt, C, DestTy, DL);
1597}
1598
1599//===----------------------------------------------------------------------===//
1600// Constant Folding for Calls
1601//
1602
1603bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
1604 if (Call->isNoBuiltin())
1605 return false;
1606 if (Call->getFunctionType() != F->getFunctionType())
1607 return false;
1608
1609 // Allow FP calls (both libcalls and intrinsics) to avoid being folded.
1610 // This can be useful for GPU targets or in cross-compilation scenarios
1611 // when the exact target FP behaviour is required, and the host compiler's
1612 // behaviour may be slightly different from the device's run-time behaviour.
1613 if (DisableFPCallFolding && (F->getReturnType()->isFloatingPointTy() ||
1614 any_of(Range: F->args(), P: [](const Argument &Arg) {
1615 return Arg.getType()->isFloatingPointTy();
1616 })))
1617 return false;
1618
1619 switch (F->getIntrinsicID()) {
1620 // Operations that do not operate floating-point numbers and do not depend on
1621 // FP environment can be folded even in strictfp functions.
1622 case Intrinsic::bswap:
1623 case Intrinsic::ctpop:
1624 case Intrinsic::ctlz:
1625 case Intrinsic::cttz:
1626 case Intrinsic::fshl:
1627 case Intrinsic::fshr:
1628 case Intrinsic::launder_invariant_group:
1629 case Intrinsic::strip_invariant_group:
1630 case Intrinsic::masked_load:
1631 case Intrinsic::get_active_lane_mask:
1632 case Intrinsic::abs:
1633 case Intrinsic::smax:
1634 case Intrinsic::smin:
1635 case Intrinsic::umax:
1636 case Intrinsic::umin:
1637 case Intrinsic::scmp:
1638 case Intrinsic::ucmp:
1639 case Intrinsic::sadd_with_overflow:
1640 case Intrinsic::uadd_with_overflow:
1641 case Intrinsic::ssub_with_overflow:
1642 case Intrinsic::usub_with_overflow:
1643 case Intrinsic::smul_with_overflow:
1644 case Intrinsic::umul_with_overflow:
1645 case Intrinsic::sadd_sat:
1646 case Intrinsic::uadd_sat:
1647 case Intrinsic::ssub_sat:
1648 case Intrinsic::usub_sat:
1649 case Intrinsic::smul_fix:
1650 case Intrinsic::smul_fix_sat:
1651 case Intrinsic::bitreverse:
1652 case Intrinsic::is_constant:
1653 case Intrinsic::vector_reduce_add:
1654 case Intrinsic::vector_reduce_mul:
1655 case Intrinsic::vector_reduce_and:
1656 case Intrinsic::vector_reduce_or:
1657 case Intrinsic::vector_reduce_xor:
1658 case Intrinsic::vector_reduce_smin:
1659 case Intrinsic::vector_reduce_smax:
1660 case Intrinsic::vector_reduce_umin:
1661 case Intrinsic::vector_reduce_umax:
1662 case Intrinsic::vector_extract:
1663 case Intrinsic::vector_insert:
1664 case Intrinsic::vector_interleave2:
1665 case Intrinsic::vector_interleave3:
1666 case Intrinsic::vector_interleave4:
1667 case Intrinsic::vector_interleave5:
1668 case Intrinsic::vector_interleave6:
1669 case Intrinsic::vector_interleave7:
1670 case Intrinsic::vector_interleave8:
1671 case Intrinsic::vector_deinterleave2:
1672 case Intrinsic::vector_deinterleave3:
1673 case Intrinsic::vector_deinterleave4:
1674 case Intrinsic::vector_deinterleave5:
1675 case Intrinsic::vector_deinterleave6:
1676 case Intrinsic::vector_deinterleave7:
1677 case Intrinsic::vector_deinterleave8:
1678 // Target intrinsics
1679 case Intrinsic::amdgcn_perm:
1680 case Intrinsic::amdgcn_wave_reduce_umin:
1681 case Intrinsic::amdgcn_wave_reduce_umax:
1682 case Intrinsic::amdgcn_wave_reduce_max:
1683 case Intrinsic::amdgcn_wave_reduce_min:
1684 case Intrinsic::amdgcn_wave_reduce_add:
1685 case Intrinsic::amdgcn_wave_reduce_sub:
1686 case Intrinsic::amdgcn_wave_reduce_and:
1687 case Intrinsic::amdgcn_wave_reduce_or:
1688 case Intrinsic::amdgcn_wave_reduce_xor:
1689 case Intrinsic::amdgcn_s_wqm:
1690 case Intrinsic::amdgcn_s_quadmask:
1691 case Intrinsic::amdgcn_s_bitreplicate:
1692 case Intrinsic::arm_mve_vctp8:
1693 case Intrinsic::arm_mve_vctp16:
1694 case Intrinsic::arm_mve_vctp32:
1695 case Intrinsic::arm_mve_vctp64:
1696 case Intrinsic::aarch64_sve_convert_from_svbool:
1697 case Intrinsic::wasm_alltrue:
1698 case Intrinsic::wasm_anytrue:
1699 case Intrinsic::wasm_dot:
1700 // WebAssembly float semantics are always known
1701 case Intrinsic::wasm_trunc_signed:
1702 case Intrinsic::wasm_trunc_unsigned:
1703 return true;
1704
1705 // Floating point operations cannot be folded in strictfp functions in
1706 // general case. They can be folded if FP environment is known to compiler.
1707 case Intrinsic::minnum:
1708 case Intrinsic::maxnum:
1709 case Intrinsic::minimum:
1710 case Intrinsic::maximum:
1711 case Intrinsic::minimumnum:
1712 case Intrinsic::maximumnum:
1713 case Intrinsic::log:
1714 case Intrinsic::log2:
1715 case Intrinsic::log10:
1716 case Intrinsic::exp:
1717 case Intrinsic::exp2:
1718 case Intrinsic::exp10:
1719 case Intrinsic::sqrt:
1720 case Intrinsic::sin:
1721 case Intrinsic::cos:
1722 case Intrinsic::sincos:
1723 case Intrinsic::sinh:
1724 case Intrinsic::cosh:
1725 case Intrinsic::atan:
1726 case Intrinsic::pow:
1727 case Intrinsic::powi:
1728 case Intrinsic::ldexp:
1729 case Intrinsic::fma:
1730 case Intrinsic::fmuladd:
1731 case Intrinsic::frexp:
1732 case Intrinsic::fptoui_sat:
1733 case Intrinsic::fptosi_sat:
1734 case Intrinsic::amdgcn_cos:
1735 case Intrinsic::amdgcn_cubeid:
1736 case Intrinsic::amdgcn_cubema:
1737 case Intrinsic::amdgcn_cubesc:
1738 case Intrinsic::amdgcn_cubetc:
1739 case Intrinsic::amdgcn_fmul_legacy:
1740 case Intrinsic::amdgcn_fma_legacy:
1741 case Intrinsic::amdgcn_fract:
1742 case Intrinsic::amdgcn_sin:
1743 // The intrinsics below depend on rounding mode in MXCSR.
1744 case Intrinsic::x86_sse_cvtss2si:
1745 case Intrinsic::x86_sse_cvtss2si64:
1746 case Intrinsic::x86_sse_cvttss2si:
1747 case Intrinsic::x86_sse_cvttss2si64:
1748 case Intrinsic::x86_sse2_cvtsd2si:
1749 case Intrinsic::x86_sse2_cvtsd2si64:
1750 case Intrinsic::x86_sse2_cvttsd2si:
1751 case Intrinsic::x86_sse2_cvttsd2si64:
1752 case Intrinsic::x86_avx512_vcvtss2si32:
1753 case Intrinsic::x86_avx512_vcvtss2si64:
1754 case Intrinsic::x86_avx512_cvttss2si:
1755 case Intrinsic::x86_avx512_cvttss2si64:
1756 case Intrinsic::x86_avx512_vcvtsd2si32:
1757 case Intrinsic::x86_avx512_vcvtsd2si64:
1758 case Intrinsic::x86_avx512_cvttsd2si:
1759 case Intrinsic::x86_avx512_cvttsd2si64:
1760 case Intrinsic::x86_avx512_vcvtss2usi32:
1761 case Intrinsic::x86_avx512_vcvtss2usi64:
1762 case Intrinsic::x86_avx512_cvttss2usi:
1763 case Intrinsic::x86_avx512_cvttss2usi64:
1764 case Intrinsic::x86_avx512_vcvtsd2usi32:
1765 case Intrinsic::x86_avx512_vcvtsd2usi64:
1766 case Intrinsic::x86_avx512_cvttsd2usi:
1767 case Intrinsic::x86_avx512_cvttsd2usi64:
1768
1769 // NVVM FMax intrinsics
1770 case Intrinsic::nvvm_fmax_d:
1771 case Intrinsic::nvvm_fmax_f:
1772 case Intrinsic::nvvm_fmax_ftz_f:
1773 case Intrinsic::nvvm_fmax_ftz_nan_f:
1774 case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f:
1775 case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f:
1776 case Intrinsic::nvvm_fmax_nan_f:
1777 case Intrinsic::nvvm_fmax_nan_xorsign_abs_f:
1778 case Intrinsic::nvvm_fmax_xorsign_abs_f:
1779
1780 // NVVM FMin intrinsics
1781 case Intrinsic::nvvm_fmin_d:
1782 case Intrinsic::nvvm_fmin_f:
1783 case Intrinsic::nvvm_fmin_ftz_f:
1784 case Intrinsic::nvvm_fmin_ftz_nan_f:
1785 case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f:
1786 case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f:
1787 case Intrinsic::nvvm_fmin_nan_f:
1788 case Intrinsic::nvvm_fmin_nan_xorsign_abs_f:
1789 case Intrinsic::nvvm_fmin_xorsign_abs_f:
1790
1791 // NVVM float/double to int32/uint32 conversion intrinsics
1792 case Intrinsic::nvvm_f2i_rm:
1793 case Intrinsic::nvvm_f2i_rn:
1794 case Intrinsic::nvvm_f2i_rp:
1795 case Intrinsic::nvvm_f2i_rz:
1796 case Intrinsic::nvvm_f2i_rm_ftz:
1797 case Intrinsic::nvvm_f2i_rn_ftz:
1798 case Intrinsic::nvvm_f2i_rp_ftz:
1799 case Intrinsic::nvvm_f2i_rz_ftz:
1800 case Intrinsic::nvvm_f2ui_rm:
1801 case Intrinsic::nvvm_f2ui_rn:
1802 case Intrinsic::nvvm_f2ui_rp:
1803 case Intrinsic::nvvm_f2ui_rz:
1804 case Intrinsic::nvvm_f2ui_rm_ftz:
1805 case Intrinsic::nvvm_f2ui_rn_ftz:
1806 case Intrinsic::nvvm_f2ui_rp_ftz:
1807 case Intrinsic::nvvm_f2ui_rz_ftz:
1808 case Intrinsic::nvvm_d2i_rm:
1809 case Intrinsic::nvvm_d2i_rn:
1810 case Intrinsic::nvvm_d2i_rp:
1811 case Intrinsic::nvvm_d2i_rz:
1812 case Intrinsic::nvvm_d2ui_rm:
1813 case Intrinsic::nvvm_d2ui_rn:
1814 case Intrinsic::nvvm_d2ui_rp:
1815 case Intrinsic::nvvm_d2ui_rz:
1816
1817 // NVVM float/double to int64/uint64 conversion intrinsics
1818 case Intrinsic::nvvm_f2ll_rm:
1819 case Intrinsic::nvvm_f2ll_rn:
1820 case Intrinsic::nvvm_f2ll_rp:
1821 case Intrinsic::nvvm_f2ll_rz:
1822 case Intrinsic::nvvm_f2ll_rm_ftz:
1823 case Intrinsic::nvvm_f2ll_rn_ftz:
1824 case Intrinsic::nvvm_f2ll_rp_ftz:
1825 case Intrinsic::nvvm_f2ll_rz_ftz:
1826 case Intrinsic::nvvm_f2ull_rm:
1827 case Intrinsic::nvvm_f2ull_rn:
1828 case Intrinsic::nvvm_f2ull_rp:
1829 case Intrinsic::nvvm_f2ull_rz:
1830 case Intrinsic::nvvm_f2ull_rm_ftz:
1831 case Intrinsic::nvvm_f2ull_rn_ftz:
1832 case Intrinsic::nvvm_f2ull_rp_ftz:
1833 case Intrinsic::nvvm_f2ull_rz_ftz:
1834 case Intrinsic::nvvm_d2ll_rm:
1835 case Intrinsic::nvvm_d2ll_rn:
1836 case Intrinsic::nvvm_d2ll_rp:
1837 case Intrinsic::nvvm_d2ll_rz:
1838 case Intrinsic::nvvm_d2ull_rm:
1839 case Intrinsic::nvvm_d2ull_rn:
1840 case Intrinsic::nvvm_d2ull_rp:
1841 case Intrinsic::nvvm_d2ull_rz:
1842
1843 // NVVM math intrinsics:
1844 case Intrinsic::nvvm_ceil_d:
1845 case Intrinsic::nvvm_ceil_f:
1846 case Intrinsic::nvvm_ceil_ftz_f:
1847
1848 case Intrinsic::nvvm_fabs:
1849 case Intrinsic::nvvm_fabs_ftz:
1850
1851 case Intrinsic::nvvm_floor_d:
1852 case Intrinsic::nvvm_floor_f:
1853 case Intrinsic::nvvm_floor_ftz_f:
1854
1855 case Intrinsic::nvvm_rcp_rm_d:
1856 case Intrinsic::nvvm_rcp_rm_f:
1857 case Intrinsic::nvvm_rcp_rm_ftz_f:
1858 case Intrinsic::nvvm_rcp_rn_d:
1859 case Intrinsic::nvvm_rcp_rn_f:
1860 case Intrinsic::nvvm_rcp_rn_ftz_f:
1861 case Intrinsic::nvvm_rcp_rp_d:
1862 case Intrinsic::nvvm_rcp_rp_f:
1863 case Intrinsic::nvvm_rcp_rp_ftz_f:
1864 case Intrinsic::nvvm_rcp_rz_d:
1865 case Intrinsic::nvvm_rcp_rz_f:
1866 case Intrinsic::nvvm_rcp_rz_ftz_f:
1867
1868 case Intrinsic::nvvm_round_d:
1869 case Intrinsic::nvvm_round_f:
1870 case Intrinsic::nvvm_round_ftz_f:
1871
1872 case Intrinsic::nvvm_saturate_d:
1873 case Intrinsic::nvvm_saturate_f:
1874 case Intrinsic::nvvm_saturate_ftz_f:
1875
1876 case Intrinsic::nvvm_sqrt_f:
1877 case Intrinsic::nvvm_sqrt_rn_d:
1878 case Intrinsic::nvvm_sqrt_rn_f:
1879 case Intrinsic::nvvm_sqrt_rn_ftz_f:
1880 return !Call->isStrictFP();
1881
1882 // NVVM add intrinsics with explicit rounding modes
1883 case Intrinsic::nvvm_add_rm_d:
1884 case Intrinsic::nvvm_add_rn_d:
1885 case Intrinsic::nvvm_add_rp_d:
1886 case Intrinsic::nvvm_add_rz_d:
1887 case Intrinsic::nvvm_add_rm_f:
1888 case Intrinsic::nvvm_add_rn_f:
1889 case Intrinsic::nvvm_add_rp_f:
1890 case Intrinsic::nvvm_add_rz_f:
1891 case Intrinsic::nvvm_add_rm_ftz_f:
1892 case Intrinsic::nvvm_add_rn_ftz_f:
1893 case Intrinsic::nvvm_add_rp_ftz_f:
1894 case Intrinsic::nvvm_add_rz_ftz_f:
1895
1896 // NVVM div intrinsics with explicit rounding modes
1897 case Intrinsic::nvvm_div_rm_d:
1898 case Intrinsic::nvvm_div_rn_d:
1899 case Intrinsic::nvvm_div_rp_d:
1900 case Intrinsic::nvvm_div_rz_d:
1901 case Intrinsic::nvvm_div_rm_f:
1902 case Intrinsic::nvvm_div_rn_f:
1903 case Intrinsic::nvvm_div_rp_f:
1904 case Intrinsic::nvvm_div_rz_f:
1905 case Intrinsic::nvvm_div_rm_ftz_f:
1906 case Intrinsic::nvvm_div_rn_ftz_f:
1907 case Intrinsic::nvvm_div_rp_ftz_f:
1908 case Intrinsic::nvvm_div_rz_ftz_f:
1909
1910 // NVVM mul intrinsics with explicit rounding modes
1911 case Intrinsic::nvvm_mul_rm_d:
1912 case Intrinsic::nvvm_mul_rn_d:
1913 case Intrinsic::nvvm_mul_rp_d:
1914 case Intrinsic::nvvm_mul_rz_d:
1915 case Intrinsic::nvvm_mul_rm_f:
1916 case Intrinsic::nvvm_mul_rn_f:
1917 case Intrinsic::nvvm_mul_rp_f:
1918 case Intrinsic::nvvm_mul_rz_f:
1919 case Intrinsic::nvvm_mul_rm_ftz_f:
1920 case Intrinsic::nvvm_mul_rn_ftz_f:
1921 case Intrinsic::nvvm_mul_rp_ftz_f:
1922 case Intrinsic::nvvm_mul_rz_ftz_f:
1923
1924 // NVVM fma intrinsics with explicit rounding modes
1925 case Intrinsic::nvvm_fma_rm_d:
1926 case Intrinsic::nvvm_fma_rn_d:
1927 case Intrinsic::nvvm_fma_rp_d:
1928 case Intrinsic::nvvm_fma_rz_d:
1929 case Intrinsic::nvvm_fma_rm_f:
1930 case Intrinsic::nvvm_fma_rn_f:
1931 case Intrinsic::nvvm_fma_rp_f:
1932 case Intrinsic::nvvm_fma_rz_f:
1933 case Intrinsic::nvvm_fma_rm_ftz_f:
1934 case Intrinsic::nvvm_fma_rn_ftz_f:
1935 case Intrinsic::nvvm_fma_rp_ftz_f:
1936 case Intrinsic::nvvm_fma_rz_ftz_f:
1937
1938 // Sign operations are actually bitwise operations, they do not raise
1939 // exceptions even for SNANs.
1940 case Intrinsic::fabs:
1941 case Intrinsic::copysign:
1942 case Intrinsic::is_fpclass:
1943 // Non-constrained variants of rounding operations means default FP
1944 // environment, they can be folded in any case.
1945 case Intrinsic::ceil:
1946 case Intrinsic::floor:
1947 case Intrinsic::round:
1948 case Intrinsic::roundeven:
1949 case Intrinsic::trunc:
1950 case Intrinsic::nearbyint:
1951 case Intrinsic::rint:
1952 case Intrinsic::canonicalize:
1953
1954 // Constrained intrinsics can be folded if FP environment is known
1955 // to compiler.
1956 case Intrinsic::experimental_constrained_fma:
1957 case Intrinsic::experimental_constrained_fmuladd:
1958 case Intrinsic::experimental_constrained_fadd:
1959 case Intrinsic::experimental_constrained_fsub:
1960 case Intrinsic::experimental_constrained_fmul:
1961 case Intrinsic::experimental_constrained_fdiv:
1962 case Intrinsic::experimental_constrained_frem:
1963 case Intrinsic::experimental_constrained_ceil:
1964 case Intrinsic::experimental_constrained_floor:
1965 case Intrinsic::experimental_constrained_round:
1966 case Intrinsic::experimental_constrained_roundeven:
1967 case Intrinsic::experimental_constrained_trunc:
1968 case Intrinsic::experimental_constrained_nearbyint:
1969 case Intrinsic::experimental_constrained_rint:
1970 case Intrinsic::experimental_constrained_fcmp:
1971 case Intrinsic::experimental_constrained_fcmps:
1972 return true;
1973 default:
1974 return false;
1975 case Intrinsic::not_intrinsic: break;
1976 }
1977
1978 if (!F->hasName() || Call->isStrictFP())
1979 return false;
1980
1981 // In these cases, the check of the length is required. We don't want to
1982 // return true for a name like "cos\0blah" which strcmp would return equal to
1983 // "cos", but has length 8.
1984 StringRef Name = F->getName();
1985 switch (Name[0]) {
1986 default:
1987 return false;
1988 // clang-format off
1989 case 'a':
1990 return Name == "acos" || Name == "acosf" ||
1991 Name == "asin" || Name == "asinf" ||
1992 Name == "atan" || Name == "atanf" ||
1993 Name == "atan2" || Name == "atan2f";
1994 case 'c':
1995 return Name == "ceil" || Name == "ceilf" ||
1996 Name == "cos" || Name == "cosf" ||
1997 Name == "cosh" || Name == "coshf";
1998 case 'e':
1999 return Name == "exp" || Name == "expf" || Name == "exp2" ||
2000 Name == "exp2f" || Name == "erf" || Name == "erff";
2001 case 'f':
2002 return Name == "fabs" || Name == "fabsf" ||
2003 Name == "floor" || Name == "floorf" ||
2004 Name == "fmod" || Name == "fmodf";
2005 case 'i':
2006 return Name == "ilogb" || Name == "ilogbf";
2007 case 'l':
2008 return Name == "log" || Name == "logf" || Name == "logl" ||
2009 Name == "log2" || Name == "log2f" || Name == "log10" ||
2010 Name == "log10f" || Name == "logb" || Name == "logbf" ||
2011 Name == "log1p" || Name == "log1pf";
2012 case 'n':
2013 return Name == "nearbyint" || Name == "nearbyintf";
2014 case 'p':
2015 return Name == "pow" || Name == "powf";
2016 case 'r':
2017 return Name == "remainder" || Name == "remainderf" ||
2018 Name == "rint" || Name == "rintf" ||
2019 Name == "round" || Name == "roundf" ||
2020 Name == "roundeven" || Name == "roundevenf";
2021 case 's':
2022 return Name == "sin" || Name == "sinf" ||
2023 Name == "sinh" || Name == "sinhf" ||
2024 Name == "sqrt" || Name == "sqrtf";
2025 case 't':
2026 return Name == "tan" || Name == "tanf" ||
2027 Name == "tanh" || Name == "tanhf" ||
2028 Name == "trunc" || Name == "truncf";
2029 case '_':
2030 // Check for various function names that get used for the math functions
2031 // when the header files are preprocessed with the macro
2032 // __FINITE_MATH_ONLY__ enabled.
2033 // The '12' here is the length of the shortest name that can match.
2034 // We need to check the size before looking at Name[1] and Name[2]
2035 // so we may as well check a limit that will eliminate mismatches.
2036 if (Name.size() < 12 || Name[1] != '_')
2037 return false;
2038 switch (Name[2]) {
2039 default:
2040 return false;
2041 case 'a':
2042 return Name == "__acos_finite" || Name == "__acosf_finite" ||
2043 Name == "__asin_finite" || Name == "__asinf_finite" ||
2044 Name == "__atan2_finite" || Name == "__atan2f_finite";
2045 case 'c':
2046 return Name == "__cosh_finite" || Name == "__coshf_finite";
2047 case 'e':
2048 return Name == "__exp_finite" || Name == "__expf_finite" ||
2049 Name == "__exp2_finite" || Name == "__exp2f_finite";
2050 case 'l':
2051 return Name == "__log_finite" || Name == "__logf_finite" ||
2052 Name == "__log10_finite" || Name == "__log10f_finite";
2053 case 'p':
2054 return Name == "__pow_finite" || Name == "__powf_finite";
2055 case 's':
2056 return Name == "__sinh_finite" || Name == "__sinhf_finite";
2057 }
2058 // clang-format on
2059 }
2060}
2061
2062namespace {
2063
2064Constant *GetConstantFoldFPValue(double V, Type *Ty) {
2065 if (Ty->isHalfTy() || Ty->isFloatTy()) {
2066 APFloat APF(V);
2067 bool unused;
2068 APF.convert(ToSemantics: Ty->getFltSemantics(), RM: APFloat::rmNearestTiesToEven, losesInfo: &unused);
2069 return ConstantFP::get(Context&: Ty->getContext(), V: APF);
2070 }
2071 if (Ty->isDoubleTy())
2072 return ConstantFP::get(Context&: Ty->getContext(), V: APFloat(V));
2073 llvm_unreachable("Can only constant fold half/float/double");
2074}
2075
2076#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
2077Constant *GetConstantFoldFPValue128(float128 V, Type *Ty) {
2078 if (Ty->isFP128Ty())
2079 return ConstantFP::get(Ty, V);
2080 llvm_unreachable("Can only constant fold fp128");
2081}
2082#endif
2083
2084/// Clear the floating-point exception state.
2085inline void llvm_fenv_clearexcept() {
2086#if HAVE_DECL_FE_ALL_EXCEPT
2087 feclearexcept(FE_ALL_EXCEPT);
2088#endif
2089 errno = 0;
2090}
2091
2092/// Test if a floating-point exception was raised.
2093inline bool llvm_fenv_testexcept() {
2094 int errno_val = errno;
2095 if (errno_val == ERANGE || errno_val == EDOM)
2096 return true;
2097#if HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
2098 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
2099 return true;
2100#endif
2101 return false;
2102}
2103
2104static APFloat FTZPreserveSign(const APFloat &V) {
2105 if (V.isDenormal())
2106 return APFloat::getZero(Sem: V.getSemantics(), Negative: V.isNegative());
2107 return V;
2108}
2109
2110static APFloat FlushToPositiveZero(const APFloat &V) {
2111 if (V.isDenormal())
2112 return APFloat::getZero(Sem: V.getSemantics(), Negative: false);
2113 return V;
2114}
2115
2116static APFloat FlushWithDenormKind(const APFloat &V,
2117 DenormalMode::DenormalModeKind DenormKind) {
2118 assert(DenormKind != DenormalMode::DenormalModeKind::Invalid &&
2119 DenormKind != DenormalMode::DenormalModeKind::Dynamic);
2120 switch (DenormKind) {
2121 case DenormalMode::DenormalModeKind::IEEE:
2122 return V;
2123 case DenormalMode::DenormalModeKind::PreserveSign:
2124 return FTZPreserveSign(V);
2125 case DenormalMode::DenormalModeKind::PositiveZero:
2126 return FlushToPositiveZero(V);
2127 default:
2128 llvm_unreachable("Invalid denormal mode!");
2129 }
2130}
2131
2132Constant *ConstantFoldFP(double (*NativeFP)(double), const APFloat &V, Type *Ty,
2133 DenormalMode DenormMode = DenormalMode::getIEEE()) {
2134 if (!DenormMode.isValid() ||
2135 DenormMode.Input == DenormalMode::DenormalModeKind::Dynamic ||
2136 DenormMode.Output == DenormalMode::DenormalModeKind::Dynamic)
2137 return nullptr;
2138
2139 llvm_fenv_clearexcept();
2140 auto Input = FlushWithDenormKind(V, DenormKind: DenormMode.Input);
2141 double Result = NativeFP(Input.convertToDouble());
2142 if (llvm_fenv_testexcept()) {
2143 llvm_fenv_clearexcept();
2144 return nullptr;
2145 }
2146
2147 Constant *Output = GetConstantFoldFPValue(V: Result, Ty);
2148 if (DenormMode.Output == DenormalMode::DenormalModeKind::IEEE)
2149 return Output;
2150 const auto *CFP = static_cast<ConstantFP *>(Output);
2151 const auto Res = FlushWithDenormKind(V: CFP->getValueAPF(), DenormKind: DenormMode.Output);
2152 return ConstantFP::get(Context&: Ty->getContext(), V: Res);
2153}
2154
2155#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
2156Constant *ConstantFoldFP128(float128 (*NativeFP)(float128), const APFloat &V,
2157 Type *Ty) {
2158 llvm_fenv_clearexcept();
2159 float128 Result = NativeFP(V.convertToQuad());
2160 if (llvm_fenv_testexcept()) {
2161 llvm_fenv_clearexcept();
2162 return nullptr;
2163 }
2164
2165 return GetConstantFoldFPValue128(V: Result, Ty);
2166}
2167#endif
2168
2169Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
2170 const APFloat &V, const APFloat &W, Type *Ty) {
2171 llvm_fenv_clearexcept();
2172 double Result = NativeFP(V.convertToDouble(), W.convertToDouble());
2173 if (llvm_fenv_testexcept()) {
2174 llvm_fenv_clearexcept();
2175 return nullptr;
2176 }
2177
2178 return GetConstantFoldFPValue(V: Result, Ty);
2179}
2180
2181Constant *constantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) {
2182 auto *OpVT = cast<VectorType>(Val: Op->getType());
2183
2184 // This is the same as the underlying binops - poison propagates.
2185 if (Op->containsPoisonElement())
2186 return PoisonValue::get(T: OpVT->getElementType());
2187
2188 // Shortcut non-accumulating reductions.
2189 if (Constant *SplatVal = Op->getSplatValue()) {
2190 switch (IID) {
2191 case Intrinsic::vector_reduce_and:
2192 case Intrinsic::vector_reduce_or:
2193 case Intrinsic::vector_reduce_smin:
2194 case Intrinsic::vector_reduce_smax:
2195 case Intrinsic::vector_reduce_umin:
2196 case Intrinsic::vector_reduce_umax:
2197 return SplatVal;
2198 case Intrinsic::vector_reduce_add:
2199 if (SplatVal->isNullValue())
2200 return SplatVal;
2201 break;
2202 case Intrinsic::vector_reduce_mul:
2203 if (SplatVal->isNullValue() || SplatVal->isOneValue())
2204 return SplatVal;
2205 break;
2206 case Intrinsic::vector_reduce_xor:
2207 if (SplatVal->isNullValue())
2208 return SplatVal;
2209 if (OpVT->getElementCount().isKnownMultipleOf(RHS: 2))
2210 return Constant::getNullValue(Ty: OpVT->getElementType());
2211 break;
2212 }
2213 }
2214
2215 FixedVectorType *VT = dyn_cast<FixedVectorType>(Val: OpVT);
2216 if (!VT)
2217 return nullptr;
2218
2219 // TODO: Handle undef.
2220 auto *EltC = dyn_cast_or_null<ConstantInt>(Val: Op->getAggregateElement(Elt: 0U));
2221 if (!EltC)
2222 return nullptr;
2223
2224 APInt Acc = EltC->getValue();
2225 for (unsigned I = 1, E = VT->getNumElements(); I != E; I++) {
2226 if (!(EltC = dyn_cast_or_null<ConstantInt>(Val: Op->getAggregateElement(Elt: I))))
2227 return nullptr;
2228 const APInt &X = EltC->getValue();
2229 switch (IID) {
2230 case Intrinsic::vector_reduce_add:
2231 Acc = Acc + X;
2232 break;
2233 case Intrinsic::vector_reduce_mul:
2234 Acc = Acc * X;
2235 break;
2236 case Intrinsic::vector_reduce_and:
2237 Acc = Acc & X;
2238 break;
2239 case Intrinsic::vector_reduce_or:
2240 Acc = Acc | X;
2241 break;
2242 case Intrinsic::vector_reduce_xor:
2243 Acc = Acc ^ X;
2244 break;
2245 case Intrinsic::vector_reduce_smin:
2246 Acc = APIntOps::smin(A: Acc, B: X);
2247 break;
2248 case Intrinsic::vector_reduce_smax:
2249 Acc = APIntOps::smax(A: Acc, B: X);
2250 break;
2251 case Intrinsic::vector_reduce_umin:
2252 Acc = APIntOps::umin(A: Acc, B: X);
2253 break;
2254 case Intrinsic::vector_reduce_umax:
2255 Acc = APIntOps::umax(A: Acc, B: X);
2256 break;
2257 }
2258 }
2259
2260 return ConstantInt::get(Context&: Op->getContext(), V: Acc);
2261}
2262
2263/// Attempt to fold an SSE floating point to integer conversion of a constant
2264/// floating point. If roundTowardZero is false, the default IEEE rounding is
2265/// used (toward nearest, ties to even). This matches the behavior of the
2266/// non-truncating SSE instructions in the default rounding mode. The desired
2267/// integer type Ty is used to select how many bits are available for the
2268/// result. Returns null if the conversion cannot be performed, otherwise
2269/// returns the Constant value resulting from the conversion.
2270Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
2271 Type *Ty, bool IsSigned) {
2272 // All of these conversion intrinsics form an integer of at most 64bits.
2273 unsigned ResultWidth = Ty->getIntegerBitWidth();
2274 assert(ResultWidth <= 64 &&
2275 "Can only constant fold conversions to 64 and 32 bit ints");
2276
2277 uint64_t UIntVal;
2278 bool isExact = false;
2279 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
2280 : APFloat::rmNearestTiesToEven;
2281 APFloat::opStatus status =
2282 Val.convertToInteger(Input: MutableArrayRef(UIntVal), Width: ResultWidth,
2283 IsSigned, RM: mode, IsExact: &isExact);
2284 if (status != APFloat::opOK &&
2285 (!roundTowardZero || status != APFloat::opInexact))
2286 return nullptr;
2287 return ConstantInt::get(Ty, V: UIntVal, IsSigned);
2288}
2289
2290double getValueAsDouble(ConstantFP *Op) {
2291 Type *Ty = Op->getType();
2292
2293 if (Ty->isBFloatTy() || Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())
2294 return Op->getValueAPF().convertToDouble();
2295
2296 bool unused;
2297 APFloat APF = Op->getValueAPF();
2298 APF.convert(ToSemantics: APFloat::IEEEdouble(), RM: APFloat::rmNearestTiesToEven, losesInfo: &unused);
2299 return APF.convertToDouble();
2300}
2301
2302static bool getConstIntOrUndef(Value *Op, const APInt *&C) {
2303 if (auto *CI = dyn_cast<ConstantInt>(Val: Op)) {
2304 C = &CI->getValue();
2305 return true;
2306 }
2307 if (isa<UndefValue>(Val: Op)) {
2308 C = nullptr;
2309 return true;
2310 }
2311 return false;
2312}
2313
2314/// Checks if the given intrinsic call, which evaluates to constant, is allowed
2315/// to be folded.
2316///
2317/// \param CI Constrained intrinsic call.
2318/// \param St Exception flags raised during constant evaluation.
2319static bool mayFoldConstrained(ConstrainedFPIntrinsic *CI,
2320 APFloat::opStatus St) {
2321 std::optional<RoundingMode> ORM = CI->getRoundingMode();
2322 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2323
2324 // If the operation does not change exception status flags, it is safe
2325 // to fold.
2326 if (St == APFloat::opStatus::opOK)
2327 return true;
2328
2329 // If evaluation raised FP exception, the result can depend on rounding
2330 // mode. If the latter is unknown, folding is not possible.
2331 if (ORM == RoundingMode::Dynamic)
2332 return false;
2333
2334 // If FP exceptions are ignored, fold the call, even if such exception is
2335 // raised.
2336 if (EB && *EB != fp::ExceptionBehavior::ebStrict)
2337 return true;
2338
2339 // Leave the calculation for runtime so that exception flags be correctly set
2340 // in hardware.
2341 return false;
2342}
2343
2344/// Returns the rounding mode that should be used for constant evaluation.
2345static RoundingMode
2346getEvaluationRoundingMode(const ConstrainedFPIntrinsic *CI) {
2347 std::optional<RoundingMode> ORM = CI->getRoundingMode();
2348 if (!ORM || *ORM == RoundingMode::Dynamic)
2349 // Even if the rounding mode is unknown, try evaluating the operation.
2350 // If it does not raise inexact exception, rounding was not applied,
2351 // so the result is exact and does not depend on rounding mode. Whether
2352 // other FP exceptions are raised, it does not depend on rounding mode.
2353 return RoundingMode::NearestTiesToEven;
2354 return *ORM;
2355}
2356
2357/// Try to constant fold llvm.canonicalize for the given caller and value.
2358static Constant *constantFoldCanonicalize(const Type *Ty, const CallBase *CI,
2359 const APFloat &Src) {
2360 // Zero, positive and negative, is always OK to fold.
2361 if (Src.isZero()) {
2362 // Get a fresh 0, since ppc_fp128 does have non-canonical zeros.
2363 return ConstantFP::get(
2364 Context&: CI->getContext(),
2365 V: APFloat::getZero(Sem: Src.getSemantics(), Negative: Src.isNegative()));
2366 }
2367
2368 if (!Ty->isIEEELikeFPTy())
2369 return nullptr;
2370
2371 // Zero is always canonical and the sign must be preserved.
2372 //
2373 // Denorms and nans may have special encodings, but it should be OK to fold a
2374 // totally average number.
2375 if (Src.isNormal() || Src.isInfinity())
2376 return ConstantFP::get(Context&: CI->getContext(), V: Src);
2377
2378 if (Src.isDenormal() && CI->getParent() && CI->getFunction()) {
2379 DenormalMode DenormMode =
2380 CI->getFunction()->getDenormalMode(FPType: Src.getSemantics());
2381
2382 if (DenormMode == DenormalMode::getIEEE())
2383 return ConstantFP::get(Context&: CI->getContext(), V: Src);
2384
2385 if (DenormMode.Input == DenormalMode::Dynamic)
2386 return nullptr;
2387
2388 // If we know if either input or output is flushed, we can fold.
2389 if ((DenormMode.Input == DenormalMode::Dynamic &&
2390 DenormMode.Output == DenormalMode::IEEE) ||
2391 (DenormMode.Input == DenormalMode::IEEE &&
2392 DenormMode.Output == DenormalMode::Dynamic))
2393 return nullptr;
2394
2395 bool IsPositive =
2396 (!Src.isNegative() || DenormMode.Input == DenormalMode::PositiveZero ||
2397 (DenormMode.Output == DenormalMode::PositiveZero &&
2398 DenormMode.Input == DenormalMode::IEEE));
2399
2400 return ConstantFP::get(Context&: CI->getContext(),
2401 V: APFloat::getZero(Sem: Src.getSemantics(), Negative: !IsPositive));
2402 }
2403
2404 return nullptr;
2405}
2406
2407static Constant *ConstantFoldScalarCall1(StringRef Name,
2408 Intrinsic::ID IntrinsicID,
2409 Type *Ty,
2410 ArrayRef<Constant *> Operands,
2411 const TargetLibraryInfo *TLI,
2412 const CallBase *Call) {
2413 assert(Operands.size() == 1 && "Wrong number of operands.");
2414
2415 if (IntrinsicID == Intrinsic::is_constant) {
2416 // We know we have a "Constant" argument. But we want to only
2417 // return true for manifest constants, not those that depend on
2418 // constants with unknowable values, e.g. GlobalValue or BlockAddress.
2419 if (Operands[0]->isManifestConstant())
2420 return ConstantInt::getTrue(Context&: Ty->getContext());
2421 return nullptr;
2422 }
2423
2424 if (isa<UndefValue>(Val: Operands[0])) {
2425 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
2426 // ctpop() is between 0 and bitwidth, pick 0 for undef.
2427 // fptoui.sat and fptosi.sat can always fold to zero (for a zero input).
2428 if (IntrinsicID == Intrinsic::cos ||
2429 IntrinsicID == Intrinsic::ctpop ||
2430 IntrinsicID == Intrinsic::fptoui_sat ||
2431 IntrinsicID == Intrinsic::fptosi_sat ||
2432 IntrinsicID == Intrinsic::canonicalize)
2433 return Constant::getNullValue(Ty);
2434 if (IntrinsicID == Intrinsic::bswap ||
2435 IntrinsicID == Intrinsic::bitreverse ||
2436 IntrinsicID == Intrinsic::launder_invariant_group ||
2437 IntrinsicID == Intrinsic::strip_invariant_group)
2438 return Operands[0];
2439 }
2440
2441 if (isa<ConstantPointerNull>(Val: Operands[0])) {
2442 // launder(null) == null == strip(null) iff in addrspace 0
2443 if (IntrinsicID == Intrinsic::launder_invariant_group ||
2444 IntrinsicID == Intrinsic::strip_invariant_group) {
2445 // If instruction is not yet put in a basic block (e.g. when cloning
2446 // a function during inlining), Call's caller may not be available.
2447 // So check Call's BB first before querying Call->getCaller.
2448 const Function *Caller =
2449 Call->getParent() ? Call->getCaller() : nullptr;
2450 if (Caller &&
2451 !NullPointerIsDefined(
2452 F: Caller, AS: Operands[0]->getType()->getPointerAddressSpace())) {
2453 return Operands[0];
2454 }
2455 return nullptr;
2456 }
2457 }
2458
2459 if (auto *Op = dyn_cast<ConstantFP>(Val: Operands[0])) {
2460 APFloat U = Op->getValueAPF();
2461
2462 if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
2463 IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
2464 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
2465
2466 if (U.isNaN())
2467 return nullptr;
2468
2469 unsigned Width = Ty->getIntegerBitWidth();
2470 APSInt Int(Width, !Signed);
2471 bool IsExact = false;
2472 APFloat::opStatus Status =
2473 U.convertToInteger(Result&: Int, RM: APFloat::rmTowardZero, IsExact: &IsExact);
2474
2475 if (Status == APFloat::opOK || Status == APFloat::opInexact)
2476 return ConstantInt::get(Ty, V: Int);
2477
2478 return nullptr;
2479 }
2480
2481 if (IntrinsicID == Intrinsic::fptoui_sat ||
2482 IntrinsicID == Intrinsic::fptosi_sat) {
2483 // convertToInteger() already has the desired saturation semantics.
2484 APSInt Int(Ty->getIntegerBitWidth(),
2485 IntrinsicID == Intrinsic::fptoui_sat);
2486 bool IsExact;
2487 U.convertToInteger(Result&: Int, RM: APFloat::rmTowardZero, IsExact: &IsExact);
2488 return ConstantInt::get(Ty, V: Int);
2489 }
2490
2491 if (IntrinsicID == Intrinsic::canonicalize)
2492 return constantFoldCanonicalize(Ty, CI: Call, Src: U);
2493
2494#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
2495 if (Ty->isFP128Ty()) {
2496 if (IntrinsicID == Intrinsic::log) {
2497 float128 Result = logf128(Op->getValueAPF().convertToQuad());
2498 return GetConstantFoldFPValue128(V: Result, Ty);
2499 }
2500
2501 LibFunc Fp128Func = NotLibFunc;
2502 if (TLI && TLI->getLibFunc(funcName: Name, F&: Fp128Func) && TLI->has(F: Fp128Func) &&
2503 Fp128Func == LibFunc_logl)
2504 return ConstantFoldFP128(logf128, Op->getValueAPF(), Ty);
2505 }
2506#endif
2507
2508 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy() &&
2509 !Ty->isIntegerTy())
2510 return nullptr;
2511
2512 // Use internal versions of these intrinsics.
2513
2514 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint ||
2515 IntrinsicID == Intrinsic::roundeven) {
2516 U.roundToIntegral(RM: APFloat::rmNearestTiesToEven);
2517 return ConstantFP::get(Ty, V: U);
2518 }
2519
2520 if (IntrinsicID == Intrinsic::round) {
2521 U.roundToIntegral(RM: APFloat::rmNearestTiesToAway);
2522 return ConstantFP::get(Ty, V: U);
2523 }
2524
2525 if (IntrinsicID == Intrinsic::roundeven) {
2526 U.roundToIntegral(RM: APFloat::rmNearestTiesToEven);
2527 return ConstantFP::get(Ty, V: U);
2528 }
2529
2530 if (IntrinsicID == Intrinsic::ceil) {
2531 U.roundToIntegral(RM: APFloat::rmTowardPositive);
2532 return ConstantFP::get(Ty, V: U);
2533 }
2534
2535 if (IntrinsicID == Intrinsic::floor) {
2536 U.roundToIntegral(RM: APFloat::rmTowardNegative);
2537 return ConstantFP::get(Ty, V: U);
2538 }
2539
2540 if (IntrinsicID == Intrinsic::trunc) {
2541 U.roundToIntegral(RM: APFloat::rmTowardZero);
2542 return ConstantFP::get(Ty, V: U);
2543 }
2544
2545 if (IntrinsicID == Intrinsic::fabs) {
2546 U.clearSign();
2547 return ConstantFP::get(Ty, V: U);
2548 }
2549
2550 if (IntrinsicID == Intrinsic::amdgcn_fract) {
2551 // The v_fract instruction behaves like the OpenCL spec, which defines
2552 // fract(x) as fmin(x - floor(x), 0x1.fffffep-1f): "The min() operator is
2553 // there to prevent fract(-small) from returning 1.0. It returns the
2554 // largest positive floating-point number less than 1.0."
2555 APFloat FloorU(U);
2556 FloorU.roundToIntegral(RM: APFloat::rmTowardNegative);
2557 APFloat FractU(U - FloorU);
2558 APFloat AlmostOne(U.getSemantics(), 1);
2559 AlmostOne.next(/*nextDown*/ true);
2560 return ConstantFP::get(Ty, V: minimum(A: FractU, B: AlmostOne));
2561 }
2562
2563 // Rounding operations (floor, trunc, ceil, round and nearbyint) do not
2564 // raise FP exceptions, unless the argument is signaling NaN.
2565
2566 std::optional<APFloat::roundingMode> RM;
2567 switch (IntrinsicID) {
2568 default:
2569 break;
2570 case Intrinsic::experimental_constrained_nearbyint:
2571 case Intrinsic::experimental_constrained_rint: {
2572 auto CI = cast<ConstrainedFPIntrinsic>(Val: Call);
2573 RM = CI->getRoundingMode();
2574 if (!RM || *RM == RoundingMode::Dynamic)
2575 return nullptr;
2576 break;
2577 }
2578 case Intrinsic::experimental_constrained_round:
2579 RM = APFloat::rmNearestTiesToAway;
2580 break;
2581 case Intrinsic::experimental_constrained_ceil:
2582 RM = APFloat::rmTowardPositive;
2583 break;
2584 case Intrinsic::experimental_constrained_floor:
2585 RM = APFloat::rmTowardNegative;
2586 break;
2587 case Intrinsic::experimental_constrained_trunc:
2588 RM = APFloat::rmTowardZero;
2589 break;
2590 }
2591 if (RM) {
2592 auto CI = cast<ConstrainedFPIntrinsic>(Val: Call);
2593 if (U.isFinite()) {
2594 APFloat::opStatus St = U.roundToIntegral(RM: *RM);
2595 if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
2596 St == APFloat::opInexact) {
2597 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2598 if (EB == fp::ebStrict)
2599 return nullptr;
2600 }
2601 } else if (U.isSignaling()) {
2602 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2603 if (EB && *EB != fp::ebIgnore)
2604 return nullptr;
2605 U = APFloat::getQNaN(Sem: U.getSemantics());
2606 }
2607 return ConstantFP::get(Ty, V: U);
2608 }
2609
2610 // NVVM float/double to signed/unsigned int32/int64 conversions:
2611 switch (IntrinsicID) {
2612 // f2i
2613 case Intrinsic::nvvm_f2i_rm:
2614 case Intrinsic::nvvm_f2i_rn:
2615 case Intrinsic::nvvm_f2i_rp:
2616 case Intrinsic::nvvm_f2i_rz:
2617 case Intrinsic::nvvm_f2i_rm_ftz:
2618 case Intrinsic::nvvm_f2i_rn_ftz:
2619 case Intrinsic::nvvm_f2i_rp_ftz:
2620 case Intrinsic::nvvm_f2i_rz_ftz:
2621 // f2ui
2622 case Intrinsic::nvvm_f2ui_rm:
2623 case Intrinsic::nvvm_f2ui_rn:
2624 case Intrinsic::nvvm_f2ui_rp:
2625 case Intrinsic::nvvm_f2ui_rz:
2626 case Intrinsic::nvvm_f2ui_rm_ftz:
2627 case Intrinsic::nvvm_f2ui_rn_ftz:
2628 case Intrinsic::nvvm_f2ui_rp_ftz:
2629 case Intrinsic::nvvm_f2ui_rz_ftz:
2630 // d2i
2631 case Intrinsic::nvvm_d2i_rm:
2632 case Intrinsic::nvvm_d2i_rn:
2633 case Intrinsic::nvvm_d2i_rp:
2634 case Intrinsic::nvvm_d2i_rz:
2635 // d2ui
2636 case Intrinsic::nvvm_d2ui_rm:
2637 case Intrinsic::nvvm_d2ui_rn:
2638 case Intrinsic::nvvm_d2ui_rp:
2639 case Intrinsic::nvvm_d2ui_rz:
2640 // f2ll
2641 case Intrinsic::nvvm_f2ll_rm:
2642 case Intrinsic::nvvm_f2ll_rn:
2643 case Intrinsic::nvvm_f2ll_rp:
2644 case Intrinsic::nvvm_f2ll_rz:
2645 case Intrinsic::nvvm_f2ll_rm_ftz:
2646 case Intrinsic::nvvm_f2ll_rn_ftz:
2647 case Intrinsic::nvvm_f2ll_rp_ftz:
2648 case Intrinsic::nvvm_f2ll_rz_ftz:
2649 // f2ull
2650 case Intrinsic::nvvm_f2ull_rm:
2651 case Intrinsic::nvvm_f2ull_rn:
2652 case Intrinsic::nvvm_f2ull_rp:
2653 case Intrinsic::nvvm_f2ull_rz:
2654 case Intrinsic::nvvm_f2ull_rm_ftz:
2655 case Intrinsic::nvvm_f2ull_rn_ftz:
2656 case Intrinsic::nvvm_f2ull_rp_ftz:
2657 case Intrinsic::nvvm_f2ull_rz_ftz:
2658 // d2ll
2659 case Intrinsic::nvvm_d2ll_rm:
2660 case Intrinsic::nvvm_d2ll_rn:
2661 case Intrinsic::nvvm_d2ll_rp:
2662 case Intrinsic::nvvm_d2ll_rz:
2663 // d2ull
2664 case Intrinsic::nvvm_d2ull_rm:
2665 case Intrinsic::nvvm_d2ull_rn:
2666 case Intrinsic::nvvm_d2ull_rp:
2667 case Intrinsic::nvvm_d2ull_rz: {
2668 // In float-to-integer conversion, NaN inputs are converted to 0.
2669 if (U.isNaN()) {
2670 // In float-to-integer conversion, NaN inputs are converted to 0
2671 // when the source and destination bitwidths are both less than 64.
2672 if (nvvm::FPToIntegerIntrinsicNaNZero(IntrinsicID))
2673 return ConstantInt::get(Ty, V: 0);
2674
2675 // Otherwise, the most significant bit is set.
2676 unsigned BitWidth = Ty->getIntegerBitWidth();
2677 uint64_t Val = 1ULL << (BitWidth - 1);
2678 return ConstantInt::get(Ty, V: APInt(BitWidth, Val, /*IsSigned=*/false));
2679 }
2680
2681 APFloat::roundingMode RMode =
2682 nvvm::GetFPToIntegerRoundingMode(IntrinsicID);
2683 bool IsFTZ = nvvm::FPToIntegerIntrinsicShouldFTZ(IntrinsicID);
2684 bool IsSigned = nvvm::FPToIntegerIntrinsicResultIsSigned(IntrinsicID);
2685
2686 APSInt ResInt(Ty->getIntegerBitWidth(), !IsSigned);
2687 auto FloatToRound = IsFTZ ? FTZPreserveSign(V: U) : U;
2688
2689 // Return max/min value for integers if the result is +/-inf or
2690 // is too large to fit in the result's integer bitwidth.
2691 bool IsExact = false;
2692 FloatToRound.convertToInteger(Result&: ResInt, RM: RMode, IsExact: &IsExact);
2693 return ConstantInt::get(Ty, V: ResInt);
2694 }
2695 }
2696
2697 /// We only fold functions with finite arguments. Folding NaN and inf is
2698 /// likely to be aborted with an exception anyway, and some host libms
2699 /// have known errors raising exceptions.
2700 if (!U.isFinite())
2701 return nullptr;
2702
2703 /// Currently APFloat versions of these functions do not exist, so we use
2704 /// the host native double versions. Float versions are not called
2705 /// directly but for all these it is true (float)(f((double)arg)) ==
2706 /// f(arg). Long double not supported yet.
2707 const APFloat &APF = Op->getValueAPF();
2708
2709 switch (IntrinsicID) {
2710 default: break;
2711 case Intrinsic::log:
2712 if (U.isZero())
2713 return ConstantFP::getInfinity(Ty, Negative: true);
2714 if (U.isNegative())
2715 return ConstantFP::getNaN(Ty);
2716 if (U.isExactlyValue(V: 1.0))
2717 return ConstantFP::getZero(Ty);
2718 return ConstantFoldFP(NativeFP: log, V: APF, Ty);
2719 case Intrinsic::log2:
2720 if (U.isZero())
2721 return ConstantFP::getInfinity(Ty, Negative: true);
2722 if (U.isNegative())
2723 return ConstantFP::getNaN(Ty);
2724 if (U.isExactlyValue(V: 1.0))
2725 return ConstantFP::getZero(Ty);
2726 // TODO: What about hosts that lack a C99 library?
2727 return ConstantFoldFP(NativeFP: log2, V: APF, Ty);
2728 case Intrinsic::log10:
2729 if (U.isZero())
2730 return ConstantFP::getInfinity(Ty, Negative: true);
2731 if (U.isNegative())
2732 return ConstantFP::getNaN(Ty);
2733 if (U.isExactlyValue(V: 1.0))
2734 return ConstantFP::getZero(Ty);
2735 // TODO: What about hosts that lack a C99 library?
2736 return ConstantFoldFP(NativeFP: log10, V: APF, Ty);
2737 case Intrinsic::exp:
2738 return ConstantFoldFP(NativeFP: exp, V: APF, Ty);
2739 case Intrinsic::exp2:
2740 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2741 return ConstantFoldBinaryFP(NativeFP: pow, V: APFloat(2.0), W: APF, Ty);
2742 case Intrinsic::exp10:
2743 // Fold exp10(x) as pow(10, x), in case the host lacks a C99 library.
2744 return ConstantFoldBinaryFP(NativeFP: pow, V: APFloat(10.0), W: APF, Ty);
2745 case Intrinsic::sin:
2746 return ConstantFoldFP(NativeFP: sin, V: APF, Ty);
2747 case Intrinsic::cos:
2748 return ConstantFoldFP(NativeFP: cos, V: APF, Ty);
2749 case Intrinsic::sinh:
2750 return ConstantFoldFP(NativeFP: sinh, V: APF, Ty);
2751 case Intrinsic::cosh:
2752 return ConstantFoldFP(NativeFP: cosh, V: APF, Ty);
2753 case Intrinsic::atan:
2754 // Implement optional behavior from C's Annex F for +/-0.0.
2755 if (U.isZero())
2756 return ConstantFP::get(Ty, V: U);
2757 return ConstantFoldFP(NativeFP: atan, V: APF, Ty);
2758 case Intrinsic::sqrt:
2759 return ConstantFoldFP(NativeFP: sqrt, V: APF, Ty);
2760
2761 // NVVM Intrinsics:
2762 case Intrinsic::nvvm_ceil_ftz_f:
2763 case Intrinsic::nvvm_ceil_f:
2764 case Intrinsic::nvvm_ceil_d:
2765 return ConstantFoldFP(
2766 NativeFP: ceil, V: APF, Ty,
2767 DenormMode: nvvm::GetNVVMDenormMode(
2768 ShouldFTZ: nvvm::UnaryMathIntrinsicShouldFTZ(IntrinsicID)));
2769
2770 case Intrinsic::nvvm_fabs_ftz:
2771 case Intrinsic::nvvm_fabs:
2772 return ConstantFoldFP(
2773 NativeFP: fabs, V: APF, Ty,
2774 DenormMode: nvvm::GetNVVMDenormMode(
2775 ShouldFTZ: nvvm::UnaryMathIntrinsicShouldFTZ(IntrinsicID)));
2776
2777 case Intrinsic::nvvm_floor_ftz_f:
2778 case Intrinsic::nvvm_floor_f:
2779 case Intrinsic::nvvm_floor_d:
2780 return ConstantFoldFP(
2781 NativeFP: floor, V: APF, Ty,
2782 DenormMode: nvvm::GetNVVMDenormMode(
2783 ShouldFTZ: nvvm::UnaryMathIntrinsicShouldFTZ(IntrinsicID)));
2784
2785 case Intrinsic::nvvm_rcp_rm_ftz_f:
2786 case Intrinsic::nvvm_rcp_rn_ftz_f:
2787 case Intrinsic::nvvm_rcp_rp_ftz_f:
2788 case Intrinsic::nvvm_rcp_rz_ftz_f:
2789 case Intrinsic::nvvm_rcp_rm_d:
2790 case Intrinsic::nvvm_rcp_rm_f:
2791 case Intrinsic::nvvm_rcp_rn_d:
2792 case Intrinsic::nvvm_rcp_rn_f:
2793 case Intrinsic::nvvm_rcp_rp_d:
2794 case Intrinsic::nvvm_rcp_rp_f:
2795 case Intrinsic::nvvm_rcp_rz_d:
2796 case Intrinsic::nvvm_rcp_rz_f: {
2797 APFloat::roundingMode RoundMode = nvvm::GetRCPRoundingMode(IntrinsicID);
2798 bool IsFTZ = nvvm::RCPShouldFTZ(IntrinsicID);
2799
2800 auto Denominator = IsFTZ ? FTZPreserveSign(V: APF) : APF;
2801 APFloat Res = APFloat::getOne(Sem: APF.getSemantics());
2802 APFloat::opStatus Status = Res.divide(RHS: Denominator, RM: RoundMode);
2803
2804 if (Status == APFloat::opOK || Status == APFloat::opInexact) {
2805 if (IsFTZ)
2806 Res = FTZPreserveSign(V: Res);
2807 return ConstantFP::get(Ty, V: Res);
2808 }
2809 return nullptr;
2810 }
2811
2812 case Intrinsic::nvvm_round_ftz_f:
2813 case Intrinsic::nvvm_round_f:
2814 case Intrinsic::nvvm_round_d: {
2815 // nvvm_round is lowered to PTX cvt.rni, which will round to nearest
2816 // integer, choosing even integer if source is equidistant between two
2817 // integers, so the semantics are closer to "rint" rather than "round".
2818 bool IsFTZ = nvvm::UnaryMathIntrinsicShouldFTZ(IntrinsicID);
2819 auto V = IsFTZ ? FTZPreserveSign(V: APF) : APF;
2820 V.roundToIntegral(RM: APFloat::rmNearestTiesToEven);
2821 return ConstantFP::get(Ty, V);
2822 }
2823
2824 case Intrinsic::nvvm_saturate_ftz_f:
2825 case Intrinsic::nvvm_saturate_d:
2826 case Intrinsic::nvvm_saturate_f: {
2827 bool IsFTZ = nvvm::UnaryMathIntrinsicShouldFTZ(IntrinsicID);
2828 auto V = IsFTZ ? FTZPreserveSign(V: APF) : APF;
2829 if (V.isNegative() || V.isZero() || V.isNaN())
2830 return ConstantFP::getZero(Ty);
2831 APFloat One = APFloat::getOne(Sem: APF.getSemantics());
2832 if (V > One)
2833 return ConstantFP::get(Ty, V: One);
2834 return ConstantFP::get(Ty, V: APF);
2835 }
2836
2837 case Intrinsic::nvvm_sqrt_rn_ftz_f:
2838 case Intrinsic::nvvm_sqrt_f:
2839 case Intrinsic::nvvm_sqrt_rn_d:
2840 case Intrinsic::nvvm_sqrt_rn_f:
2841 if (APF.isNegative())
2842 return nullptr;
2843 return ConstantFoldFP(
2844 NativeFP: sqrt, V: APF, Ty,
2845 DenormMode: nvvm::GetNVVMDenormMode(
2846 ShouldFTZ: nvvm::UnaryMathIntrinsicShouldFTZ(IntrinsicID)));
2847
2848 // AMDGCN Intrinsics:
2849 case Intrinsic::amdgcn_cos:
2850 case Intrinsic::amdgcn_sin: {
2851 double V = getValueAsDouble(Op);
2852 if (V < -256.0 || V > 256.0)
2853 // The gfx8 and gfx9 architectures handle arguments outside the range
2854 // [-256, 256] differently. This should be a rare case so bail out
2855 // rather than trying to handle the difference.
2856 return nullptr;
2857 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2858 double V4 = V * 4.0;
2859 if (V4 == floor(x: V4)) {
2860 // Force exact results for quarter-integer inputs.
2861 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2862 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2863 } else {
2864 if (IsCos)
2865 V = cos(x: V * 2.0 * numbers::pi);
2866 else
2867 V = sin(x: V * 2.0 * numbers::pi);
2868 }
2869 return GetConstantFoldFPValue(V, Ty);
2870 }
2871 }
2872
2873 if (!TLI)
2874 return nullptr;
2875
2876 LibFunc Func = NotLibFunc;
2877 if (!TLI->getLibFunc(funcName: Name, F&: Func))
2878 return nullptr;
2879
2880 switch (Func) {
2881 default:
2882 break;
2883 case LibFunc_acos:
2884 case LibFunc_acosf:
2885 case LibFunc_acos_finite:
2886 case LibFunc_acosf_finite:
2887 if (TLI->has(F: Func))
2888 return ConstantFoldFP(NativeFP: acos, V: APF, Ty);
2889 break;
2890 case LibFunc_asin:
2891 case LibFunc_asinf:
2892 case LibFunc_asin_finite:
2893 case LibFunc_asinf_finite:
2894 if (TLI->has(F: Func))
2895 return ConstantFoldFP(NativeFP: asin, V: APF, Ty);
2896 break;
2897 case LibFunc_atan:
2898 case LibFunc_atanf:
2899 // Implement optional behavior from C's Annex F for +/-0.0.
2900 if (U.isZero())
2901 return ConstantFP::get(Ty, V: U);
2902 if (TLI->has(F: Func))
2903 return ConstantFoldFP(NativeFP: atan, V: APF, Ty);
2904 break;
2905 case LibFunc_ceil:
2906 case LibFunc_ceilf:
2907 if (TLI->has(F: Func)) {
2908 U.roundToIntegral(RM: APFloat::rmTowardPositive);
2909 return ConstantFP::get(Ty, V: U);
2910 }
2911 break;
2912 case LibFunc_cos:
2913 case LibFunc_cosf:
2914 if (TLI->has(F: Func))
2915 return ConstantFoldFP(NativeFP: cos, V: APF, Ty);
2916 break;
2917 case LibFunc_cosh:
2918 case LibFunc_coshf:
2919 case LibFunc_cosh_finite:
2920 case LibFunc_coshf_finite:
2921 if (TLI->has(F: Func))
2922 return ConstantFoldFP(NativeFP: cosh, V: APF, Ty);
2923 break;
2924 case LibFunc_exp:
2925 case LibFunc_expf:
2926 case LibFunc_exp_finite:
2927 case LibFunc_expf_finite:
2928 if (TLI->has(F: Func))
2929 return ConstantFoldFP(NativeFP: exp, V: APF, Ty);
2930 break;
2931 case LibFunc_exp2:
2932 case LibFunc_exp2f:
2933 case LibFunc_exp2_finite:
2934 case LibFunc_exp2f_finite:
2935 if (TLI->has(F: Func))
2936 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2937 return ConstantFoldBinaryFP(NativeFP: pow, V: APFloat(2.0), W: APF, Ty);
2938 break;
2939 case LibFunc_fabs:
2940 case LibFunc_fabsf:
2941 if (TLI->has(F: Func)) {
2942 U.clearSign();
2943 return ConstantFP::get(Ty, V: U);
2944 }
2945 break;
2946 case LibFunc_floor:
2947 case LibFunc_floorf:
2948 if (TLI->has(F: Func)) {
2949 U.roundToIntegral(RM: APFloat::rmTowardNegative);
2950 return ConstantFP::get(Ty, V: U);
2951 }
2952 break;
2953 case LibFunc_log:
2954 case LibFunc_logf:
2955 case LibFunc_log_finite:
2956 case LibFunc_logf_finite:
2957 if (!APF.isNegative() && !APF.isZero() && TLI->has(F: Func))
2958 return ConstantFoldFP(NativeFP: log, V: APF, Ty);
2959 break;
2960 case LibFunc_log2:
2961 case LibFunc_log2f:
2962 case LibFunc_log2_finite:
2963 case LibFunc_log2f_finite:
2964 if (!APF.isNegative() && !APF.isZero() && TLI->has(F: Func))
2965 // TODO: What about hosts that lack a C99 library?
2966 return ConstantFoldFP(NativeFP: log2, V: APF, Ty);
2967 break;
2968 case LibFunc_log10:
2969 case LibFunc_log10f:
2970 case LibFunc_log10_finite:
2971 case LibFunc_log10f_finite:
2972 if (!APF.isNegative() && !APF.isZero() && TLI->has(F: Func))
2973 // TODO: What about hosts that lack a C99 library?
2974 return ConstantFoldFP(NativeFP: log10, V: APF, Ty);
2975 break;
2976 case LibFunc_ilogb:
2977 case LibFunc_ilogbf:
2978 if (!APF.isZero() && TLI->has(F: Func))
2979 return ConstantInt::get(Ty, V: ilogb(Arg: APF), IsSigned: true);
2980 break;
2981 case LibFunc_logb:
2982 case LibFunc_logbf:
2983 if (!APF.isZero() && TLI->has(F: Func))
2984 return ConstantFoldFP(NativeFP: logb, V: APF, Ty);
2985 break;
2986 case LibFunc_log1p:
2987 case LibFunc_log1pf:
2988 // Implement optional behavior from C's Annex F for +/-0.0.
2989 if (U.isZero())
2990 return ConstantFP::get(Ty, V: U);
2991 if (APF > APFloat::getOne(Sem: APF.getSemantics(), Negative: true) && TLI->has(F: Func))
2992 return ConstantFoldFP(NativeFP: log1p, V: APF, Ty);
2993 break;
2994 case LibFunc_logl:
2995 return nullptr;
2996 case LibFunc_erf:
2997 case LibFunc_erff:
2998 if (TLI->has(F: Func))
2999 return ConstantFoldFP(NativeFP: erf, V: APF, Ty);
3000 break;
3001 case LibFunc_nearbyint:
3002 case LibFunc_nearbyintf:
3003 case LibFunc_rint:
3004 case LibFunc_rintf:
3005 case LibFunc_roundeven:
3006 case LibFunc_roundevenf:
3007 if (TLI->has(F: Func)) {
3008 U.roundToIntegral(RM: APFloat::rmNearestTiesToEven);
3009 return ConstantFP::get(Ty, V: U);
3010 }
3011 break;
3012 case LibFunc_round:
3013 case LibFunc_roundf:
3014 if (TLI->has(F: Func)) {
3015 U.roundToIntegral(RM: APFloat::rmNearestTiesToAway);
3016 return ConstantFP::get(Ty, V: U);
3017 }
3018 break;
3019 case LibFunc_sin:
3020 case LibFunc_sinf:
3021 if (TLI->has(F: Func))
3022 return ConstantFoldFP(NativeFP: sin, V: APF, Ty);
3023 break;
3024 case LibFunc_sinh:
3025 case LibFunc_sinhf:
3026 case LibFunc_sinh_finite:
3027 case LibFunc_sinhf_finite:
3028 if (TLI->has(F: Func))
3029 return ConstantFoldFP(NativeFP: sinh, V: APF, Ty);
3030 break;
3031 case LibFunc_sqrt:
3032 case LibFunc_sqrtf:
3033 if (!APF.isNegative() && TLI->has(F: Func))
3034 return ConstantFoldFP(NativeFP: sqrt, V: APF, Ty);
3035 break;
3036 case LibFunc_tan:
3037 case LibFunc_tanf:
3038 if (TLI->has(F: Func))
3039 return ConstantFoldFP(NativeFP: tan, V: APF, Ty);
3040 break;
3041 case LibFunc_tanh:
3042 case LibFunc_tanhf:
3043 if (TLI->has(F: Func))
3044 return ConstantFoldFP(NativeFP: tanh, V: APF, Ty);
3045 break;
3046 case LibFunc_trunc:
3047 case LibFunc_truncf:
3048 if (TLI->has(F: Func)) {
3049 U.roundToIntegral(RM: APFloat::rmTowardZero);
3050 return ConstantFP::get(Ty, V: U);
3051 }
3052 break;
3053 }
3054 return nullptr;
3055 }
3056
3057 if (auto *Op = dyn_cast<ConstantInt>(Val: Operands[0])) {
3058 switch (IntrinsicID) {
3059 case Intrinsic::bswap:
3060 return ConstantInt::get(Context&: Ty->getContext(), V: Op->getValue().byteSwap());
3061 case Intrinsic::ctpop:
3062 return ConstantInt::get(Ty, V: Op->getValue().popcount());
3063 case Intrinsic::bitreverse:
3064 return ConstantInt::get(Context&: Ty->getContext(), V: Op->getValue().reverseBits());
3065 case Intrinsic::amdgcn_s_wqm: {
3066 uint64_t Val = Op->getZExtValue();
3067 Val |= (Val & 0x5555555555555555ULL) << 1 |
3068 ((Val >> 1) & 0x5555555555555555ULL);
3069 Val |= (Val & 0x3333333333333333ULL) << 2 |
3070 ((Val >> 2) & 0x3333333333333333ULL);
3071 return ConstantInt::get(Ty, V: Val);
3072 }
3073
3074 case Intrinsic::amdgcn_s_quadmask: {
3075 uint64_t Val = Op->getZExtValue();
3076 uint64_t QuadMask = 0;
3077 for (unsigned I = 0; I < Op->getBitWidth() / 4; ++I, Val >>= 4) {
3078 if (!(Val & 0xF))
3079 continue;
3080
3081 QuadMask |= (1ULL << I);
3082 }
3083 return ConstantInt::get(Ty, V: QuadMask);
3084 }
3085
3086 case Intrinsic::amdgcn_s_bitreplicate: {
3087 uint64_t Val = Op->getZExtValue();
3088 Val = (Val & 0x000000000000FFFFULL) | (Val & 0x00000000FFFF0000ULL) << 16;
3089 Val = (Val & 0x000000FF000000FFULL) | (Val & 0x0000FF000000FF00ULL) << 8;
3090 Val = (Val & 0x000F000F000F000FULL) | (Val & 0x00F000F000F000F0ULL) << 4;
3091 Val = (Val & 0x0303030303030303ULL) | (Val & 0x0C0C0C0C0C0C0C0CULL) << 2;
3092 Val = (Val & 0x1111111111111111ULL) | (Val & 0x2222222222222222ULL) << 1;
3093 Val = Val | Val << 1;
3094 return ConstantInt::get(Ty, V: Val);
3095 }
3096 }
3097 }
3098
3099 if (Operands[0]->getType()->isVectorTy()) {
3100 auto *Op = cast<Constant>(Val: Operands[0]);
3101 switch (IntrinsicID) {
3102 default: break;
3103 case Intrinsic::vector_reduce_add:
3104 case Intrinsic::vector_reduce_mul:
3105 case Intrinsic::vector_reduce_and:
3106 case Intrinsic::vector_reduce_or:
3107 case Intrinsic::vector_reduce_xor:
3108 case Intrinsic::vector_reduce_smin:
3109 case Intrinsic::vector_reduce_smax:
3110 case Intrinsic::vector_reduce_umin:
3111 case Intrinsic::vector_reduce_umax:
3112 if (Constant *C = constantFoldVectorReduce(IID: IntrinsicID, Op: Operands[0]))
3113 return C;
3114 break;
3115 case Intrinsic::x86_sse_cvtss2si:
3116 case Intrinsic::x86_sse_cvtss2si64:
3117 case Intrinsic::x86_sse2_cvtsd2si:
3118 case Intrinsic::x86_sse2_cvtsd2si64:
3119 if (ConstantFP *FPOp =
3120 dyn_cast_or_null<ConstantFP>(Val: Op->getAggregateElement(Elt: 0U)))
3121 return ConstantFoldSSEConvertToInt(Val: FPOp->getValueAPF(),
3122 /*roundTowardZero=*/false, Ty,
3123 /*IsSigned*/true);
3124 break;
3125 case Intrinsic::x86_sse_cvttss2si:
3126 case Intrinsic::x86_sse_cvttss2si64:
3127 case Intrinsic::x86_sse2_cvttsd2si:
3128 case Intrinsic::x86_sse2_cvttsd2si64:
3129 if (ConstantFP *FPOp =
3130 dyn_cast_or_null<ConstantFP>(Val: Op->getAggregateElement(Elt: 0U)))
3131 return ConstantFoldSSEConvertToInt(Val: FPOp->getValueAPF(),
3132 /*roundTowardZero=*/true, Ty,
3133 /*IsSigned*/true);
3134 break;
3135
3136 case Intrinsic::wasm_anytrue:
3137 return Op->isNullValue() ? ConstantInt::get(Ty, V: 0)
3138 : ConstantInt::get(Ty, V: 1);
3139
3140 case Intrinsic::wasm_alltrue:
3141 // Check each element individually
3142 unsigned E = cast<FixedVectorType>(Val: Op->getType())->getNumElements();
3143 for (unsigned I = 0; I != E; ++I) {
3144 Constant *Elt = Op->getAggregateElement(Elt: I);
3145 // Return false as soon as we find a non-true element.
3146 if (Elt && Elt->isNullValue())
3147 return ConstantInt::get(Ty, V: 0);
3148 // Bail as soon as we find an element we cannot prove to be true.
3149 if (!Elt || !isa<ConstantInt>(Val: Elt))
3150 return nullptr;
3151 }
3152
3153 return ConstantInt::get(Ty, V: 1);
3154 }
3155 }
3156
3157 return nullptr;
3158}
3159
3160static Constant *evaluateCompare(const APFloat &Op1, const APFloat &Op2,
3161 const ConstrainedFPIntrinsic *Call) {
3162 APFloat::opStatus St = APFloat::opOK;
3163 auto *FCmp = cast<ConstrainedFPCmpIntrinsic>(Val: Call);
3164 FCmpInst::Predicate Cond = FCmp->getPredicate();
3165 if (FCmp->isSignaling()) {
3166 if (Op1.isNaN() || Op2.isNaN())
3167 St = APFloat::opInvalidOp;
3168 } else {
3169 if (Op1.isSignaling() || Op2.isSignaling())
3170 St = APFloat::opInvalidOp;
3171 }
3172 bool Result = FCmpInst::compare(LHS: Op1, RHS: Op2, Pred: Cond);
3173 if (mayFoldConstrained(CI: const_cast<ConstrainedFPCmpIntrinsic *>(FCmp), St))
3174 return ConstantInt::get(Ty: Call->getType()->getScalarType(), V: Result);
3175 return nullptr;
3176}
3177
3178static Constant *ConstantFoldLibCall2(StringRef Name, Type *Ty,
3179 ArrayRef<Constant *> Operands,
3180 const TargetLibraryInfo *TLI) {
3181 if (!TLI)
3182 return nullptr;
3183
3184 LibFunc Func = NotLibFunc;
3185 if (!TLI->getLibFunc(funcName: Name, F&: Func))
3186 return nullptr;
3187
3188 const auto *Op1 = dyn_cast<ConstantFP>(Val: Operands[0]);
3189 if (!Op1)
3190 return nullptr;
3191
3192 const auto *Op2 = dyn_cast<ConstantFP>(Val: Operands[1]);
3193 if (!Op2)
3194 return nullptr;
3195
3196 const APFloat &Op1V = Op1->getValueAPF();
3197 const APFloat &Op2V = Op2->getValueAPF();
3198
3199 switch (Func) {
3200 default:
3201 break;
3202 case LibFunc_pow:
3203 case LibFunc_powf:
3204 case LibFunc_pow_finite:
3205 case LibFunc_powf_finite:
3206 if (TLI->has(F: Func))
3207 return ConstantFoldBinaryFP(NativeFP: pow, V: Op1V, W: Op2V, Ty);
3208 break;
3209 case LibFunc_fmod:
3210 case LibFunc_fmodf:
3211 if (TLI->has(F: Func)) {
3212 APFloat V = Op1->getValueAPF();
3213 if (APFloat::opStatus::opOK == V.mod(RHS: Op2->getValueAPF()))
3214 return ConstantFP::get(Ty, V);
3215 }
3216 break;
3217 case LibFunc_remainder:
3218 case LibFunc_remainderf:
3219 if (TLI->has(F: Func)) {
3220 APFloat V = Op1->getValueAPF();
3221 if (APFloat::opStatus::opOK == V.remainder(RHS: Op2->getValueAPF()))
3222 return ConstantFP::get(Ty, V);
3223 }
3224 break;
3225 case LibFunc_atan2:
3226 case LibFunc_atan2f:
3227 // atan2(+/-0.0, +/-0.0) is known to raise an exception on some libm
3228 // (Solaris), so we do not assume a known result for that.
3229 if (Op1V.isZero() && Op2V.isZero())
3230 return nullptr;
3231 [[fallthrough]];
3232 case LibFunc_atan2_finite:
3233 case LibFunc_atan2f_finite:
3234 if (TLI->has(F: Func))
3235 return ConstantFoldBinaryFP(NativeFP: atan2, V: Op1V, W: Op2V, Ty);
3236 break;
3237 }
3238
3239 return nullptr;
3240}
3241
3242static Constant *ConstantFoldIntrinsicCall2(Intrinsic::ID IntrinsicID, Type *Ty,
3243 ArrayRef<Constant *> Operands,
3244 const CallBase *Call) {
3245 assert(Operands.size() == 2 && "Wrong number of operands.");
3246
3247 if (Ty->isFloatingPointTy()) {
3248 // TODO: We should have undef handling for all of the FP intrinsics that
3249 // are attempted to be folded in this function.
3250 bool IsOp0Undef = isa<UndefValue>(Val: Operands[0]);
3251 bool IsOp1Undef = isa<UndefValue>(Val: Operands[1]);
3252 switch (IntrinsicID) {
3253 case Intrinsic::maxnum:
3254 case Intrinsic::minnum:
3255 case Intrinsic::maximum:
3256 case Intrinsic::minimum:
3257 case Intrinsic::maximumnum:
3258 case Intrinsic::minimumnum:
3259 case Intrinsic::nvvm_fmax_d:
3260 case Intrinsic::nvvm_fmin_d:
3261 // If one argument is undef, return the other argument.
3262 if (IsOp0Undef)
3263 return Operands[1];
3264 if (IsOp1Undef)
3265 return Operands[0];
3266 break;
3267
3268 case Intrinsic::nvvm_fmax_f:
3269 case Intrinsic::nvvm_fmax_ftz_f:
3270 case Intrinsic::nvvm_fmax_ftz_nan_f:
3271 case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f:
3272 case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f:
3273 case Intrinsic::nvvm_fmax_nan_f:
3274 case Intrinsic::nvvm_fmax_nan_xorsign_abs_f:
3275 case Intrinsic::nvvm_fmax_xorsign_abs_f:
3276
3277 case Intrinsic::nvvm_fmin_f:
3278 case Intrinsic::nvvm_fmin_ftz_f:
3279 case Intrinsic::nvvm_fmin_ftz_nan_f:
3280 case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f:
3281 case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f:
3282 case Intrinsic::nvvm_fmin_nan_f:
3283 case Intrinsic::nvvm_fmin_nan_xorsign_abs_f:
3284 case Intrinsic::nvvm_fmin_xorsign_abs_f:
3285 // If one arg is undef, the other arg can be returned only if it is
3286 // constant, as we may need to flush it to sign-preserving zero or
3287 // canonicalize the NaN.
3288 if (!IsOp0Undef && !IsOp1Undef)
3289 break;
3290 if (auto *Op = dyn_cast<ConstantFP>(Val: Operands[IsOp0Undef ? 1 : 0])) {
3291 if (Op->isNaN()) {
3292 APInt NVCanonicalNaN(32, 0x7fffffff);
3293 return ConstantFP::get(
3294 Ty, V: APFloat(Ty->getFltSemantics(), NVCanonicalNaN));
3295 }
3296 if (nvvm::FMinFMaxShouldFTZ(IntrinsicID))
3297 return ConstantFP::get(Ty, V: FTZPreserveSign(V: Op->getValueAPF()));
3298 else
3299 return Op;
3300 }
3301 break;
3302 }
3303 }
3304
3305 if (const auto *Op1 = dyn_cast<ConstantFP>(Val: Operands[0])) {
3306 const APFloat &Op1V = Op1->getValueAPF();
3307
3308 if (const auto *Op2 = dyn_cast<ConstantFP>(Val: Operands[1])) {
3309 if (Op2->getType() != Op1->getType())
3310 return nullptr;
3311 const APFloat &Op2V = Op2->getValueAPF();
3312
3313 if (const auto *ConstrIntr =
3314 dyn_cast_if_present<ConstrainedFPIntrinsic>(Val: Call)) {
3315 RoundingMode RM = getEvaluationRoundingMode(CI: ConstrIntr);
3316 APFloat Res = Op1V;
3317 APFloat::opStatus St;
3318 switch (IntrinsicID) {
3319 default:
3320 return nullptr;
3321 case Intrinsic::experimental_constrained_fadd:
3322 St = Res.add(RHS: Op2V, RM);
3323 break;
3324 case Intrinsic::experimental_constrained_fsub:
3325 St = Res.subtract(RHS: Op2V, RM);
3326 break;
3327 case Intrinsic::experimental_constrained_fmul:
3328 St = Res.multiply(RHS: Op2V, RM);
3329 break;
3330 case Intrinsic::experimental_constrained_fdiv:
3331 St = Res.divide(RHS: Op2V, RM);
3332 break;
3333 case Intrinsic::experimental_constrained_frem:
3334 St = Res.mod(RHS: Op2V);
3335 break;
3336 case Intrinsic::experimental_constrained_fcmp:
3337 case Intrinsic::experimental_constrained_fcmps:
3338 return evaluateCompare(Op1: Op1V, Op2: Op2V, Call: ConstrIntr);
3339 }
3340 if (mayFoldConstrained(CI: const_cast<ConstrainedFPIntrinsic *>(ConstrIntr),
3341 St))
3342 return ConstantFP::get(Ty, V: Res);
3343 return nullptr;
3344 }
3345
3346 switch (IntrinsicID) {
3347 default:
3348 break;
3349 case Intrinsic::copysign:
3350 return ConstantFP::get(Ty, V: APFloat::copySign(Value: Op1V, Sign: Op2V));
3351 case Intrinsic::minnum:
3352 if (Op1V.isSignaling() || Op2V.isSignaling())
3353 return nullptr;
3354 return ConstantFP::get(Ty, V: minnum(A: Op1V, B: Op2V));
3355 case Intrinsic::maxnum:
3356 if (Op1V.isSignaling() || Op2V.isSignaling())
3357 return nullptr;
3358 return ConstantFP::get(Ty, V: maxnum(A: Op1V, B: Op2V));
3359 case Intrinsic::minimum:
3360 return ConstantFP::get(Ty, V: minimum(A: Op1V, B: Op2V));
3361 case Intrinsic::maximum:
3362 return ConstantFP::get(Ty, V: maximum(A: Op1V, B: Op2V));
3363 case Intrinsic::minimumnum:
3364 return ConstantFP::get(Ty, V: minimumnum(A: Op1V, B: Op2V));
3365 case Intrinsic::maximumnum:
3366 return ConstantFP::get(Ty, V: maximumnum(A: Op1V, B: Op2V));
3367
3368 case Intrinsic::nvvm_fmax_d:
3369 case Intrinsic::nvvm_fmax_f:
3370 case Intrinsic::nvvm_fmax_ftz_f:
3371 case Intrinsic::nvvm_fmax_ftz_nan_f:
3372 case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f:
3373 case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f:
3374 case Intrinsic::nvvm_fmax_nan_f:
3375 case Intrinsic::nvvm_fmax_nan_xorsign_abs_f:
3376 case Intrinsic::nvvm_fmax_xorsign_abs_f:
3377
3378 case Intrinsic::nvvm_fmin_d:
3379 case Intrinsic::nvvm_fmin_f:
3380 case Intrinsic::nvvm_fmin_ftz_f:
3381 case Intrinsic::nvvm_fmin_ftz_nan_f:
3382 case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f:
3383 case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f:
3384 case Intrinsic::nvvm_fmin_nan_f:
3385 case Intrinsic::nvvm_fmin_nan_xorsign_abs_f:
3386 case Intrinsic::nvvm_fmin_xorsign_abs_f: {
3387
3388 bool ShouldCanonicalizeNaNs = !(IntrinsicID == Intrinsic::nvvm_fmax_d ||
3389 IntrinsicID == Intrinsic::nvvm_fmin_d);
3390 bool IsFTZ = nvvm::FMinFMaxShouldFTZ(IntrinsicID);
3391 bool IsNaNPropagating = nvvm::FMinFMaxPropagatesNaNs(IntrinsicID);
3392 bool IsXorSignAbs = nvvm::FMinFMaxIsXorSignAbs(IntrinsicID);
3393
3394 APFloat A = IsFTZ ? FTZPreserveSign(V: Op1V) : Op1V;
3395 APFloat B = IsFTZ ? FTZPreserveSign(V: Op2V) : Op2V;
3396
3397 bool XorSign = false;
3398 if (IsXorSignAbs) {
3399 XorSign = A.isNegative() ^ B.isNegative();
3400 A = abs(X: A);
3401 B = abs(X: B);
3402 }
3403
3404 bool IsFMax = false;
3405 switch (IntrinsicID) {
3406 case Intrinsic::nvvm_fmax_d:
3407 case Intrinsic::nvvm_fmax_f:
3408 case Intrinsic::nvvm_fmax_ftz_f:
3409 case Intrinsic::nvvm_fmax_ftz_nan_f:
3410 case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f:
3411 case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f:
3412 case Intrinsic::nvvm_fmax_nan_f:
3413 case Intrinsic::nvvm_fmax_nan_xorsign_abs_f:
3414 case Intrinsic::nvvm_fmax_xorsign_abs_f:
3415 IsFMax = true;
3416 break;
3417 }
3418 APFloat Res = IsFMax ? maximum(A, B) : minimum(A, B);
3419
3420 if (ShouldCanonicalizeNaNs) {
3421 APFloat NVCanonicalNaN(Res.getSemantics(), APInt(32, 0x7fffffff));
3422 if (A.isNaN() && B.isNaN())
3423 return ConstantFP::get(Ty, V: NVCanonicalNaN);
3424 else if (IsNaNPropagating && (A.isNaN() || B.isNaN()))
3425 return ConstantFP::get(Ty, V: NVCanonicalNaN);
3426 }
3427
3428 if (A.isNaN() && B.isNaN())
3429 return Operands[1];
3430 else if (A.isNaN())
3431 Res = B;
3432 else if (B.isNaN())
3433 Res = A;
3434
3435 if (IsXorSignAbs && XorSign != Res.isNegative())
3436 Res.changeSign();
3437
3438 return ConstantFP::get(Ty, V: Res);
3439 }
3440
3441 case Intrinsic::nvvm_add_rm_f:
3442 case Intrinsic::nvvm_add_rn_f:
3443 case Intrinsic::nvvm_add_rp_f:
3444 case Intrinsic::nvvm_add_rz_f:
3445 case Intrinsic::nvvm_add_rm_d:
3446 case Intrinsic::nvvm_add_rn_d:
3447 case Intrinsic::nvvm_add_rp_d:
3448 case Intrinsic::nvvm_add_rz_d:
3449 case Intrinsic::nvvm_add_rm_ftz_f:
3450 case Intrinsic::nvvm_add_rn_ftz_f:
3451 case Intrinsic::nvvm_add_rp_ftz_f:
3452 case Intrinsic::nvvm_add_rz_ftz_f: {
3453
3454 bool IsFTZ = nvvm::FAddShouldFTZ(IntrinsicID);
3455 APFloat A = IsFTZ ? FTZPreserveSign(V: Op1V) : Op1V;
3456 APFloat B = IsFTZ ? FTZPreserveSign(V: Op2V) : Op2V;
3457
3458 APFloat::roundingMode RoundMode =
3459 nvvm::GetFAddRoundingMode(IntrinsicID);
3460
3461 APFloat Res = A;
3462 APFloat::opStatus Status = Res.add(RHS: B, RM: RoundMode);
3463
3464 if (!Res.isNaN() &&
3465 (Status == APFloat::opOK || Status == APFloat::opInexact)) {
3466 Res = IsFTZ ? FTZPreserveSign(V: Res) : Res;
3467 return ConstantFP::get(Ty, V: Res);
3468 }
3469 return nullptr;
3470 }
3471
3472 case Intrinsic::nvvm_mul_rm_f:
3473 case Intrinsic::nvvm_mul_rn_f:
3474 case Intrinsic::nvvm_mul_rp_f:
3475 case Intrinsic::nvvm_mul_rz_f:
3476 case Intrinsic::nvvm_mul_rm_d:
3477 case Intrinsic::nvvm_mul_rn_d:
3478 case Intrinsic::nvvm_mul_rp_d:
3479 case Intrinsic::nvvm_mul_rz_d:
3480 case Intrinsic::nvvm_mul_rm_ftz_f:
3481 case Intrinsic::nvvm_mul_rn_ftz_f:
3482 case Intrinsic::nvvm_mul_rp_ftz_f:
3483 case Intrinsic::nvvm_mul_rz_ftz_f: {
3484
3485 bool IsFTZ = nvvm::FMulShouldFTZ(IntrinsicID);
3486 APFloat A = IsFTZ ? FTZPreserveSign(V: Op1V) : Op1V;
3487 APFloat B = IsFTZ ? FTZPreserveSign(V: Op2V) : Op2V;
3488
3489 APFloat::roundingMode RoundMode =
3490 nvvm::GetFMulRoundingMode(IntrinsicID);
3491
3492 APFloat Res = A;
3493 APFloat::opStatus Status = Res.multiply(RHS: B, RM: RoundMode);
3494
3495 if (!Res.isNaN() &&
3496 (Status == APFloat::opOK || Status == APFloat::opInexact)) {
3497 Res = IsFTZ ? FTZPreserveSign(V: Res) : Res;
3498 return ConstantFP::get(Ty, V: Res);
3499 }
3500 return nullptr;
3501 }
3502
3503 case Intrinsic::nvvm_div_rm_f:
3504 case Intrinsic::nvvm_div_rn_f:
3505 case Intrinsic::nvvm_div_rp_f:
3506 case Intrinsic::nvvm_div_rz_f:
3507 case Intrinsic::nvvm_div_rm_d:
3508 case Intrinsic::nvvm_div_rn_d:
3509 case Intrinsic::nvvm_div_rp_d:
3510 case Intrinsic::nvvm_div_rz_d:
3511 case Intrinsic::nvvm_div_rm_ftz_f:
3512 case Intrinsic::nvvm_div_rn_ftz_f:
3513 case Intrinsic::nvvm_div_rp_ftz_f:
3514 case Intrinsic::nvvm_div_rz_ftz_f: {
3515 bool IsFTZ = nvvm::FDivShouldFTZ(IntrinsicID);
3516 APFloat A = IsFTZ ? FTZPreserveSign(V: Op1V) : Op1V;
3517 APFloat B = IsFTZ ? FTZPreserveSign(V: Op2V) : Op2V;
3518 APFloat::roundingMode RoundMode =
3519 nvvm::GetFDivRoundingMode(IntrinsicID);
3520
3521 APFloat Res = A;
3522 APFloat::opStatus Status = Res.divide(RHS: B, RM: RoundMode);
3523 if (!Res.isNaN() &&
3524 (Status == APFloat::opOK || Status == APFloat::opInexact)) {
3525 Res = IsFTZ ? FTZPreserveSign(V: Res) : Res;
3526 return ConstantFP::get(Ty, V: Res);
3527 }
3528 return nullptr;
3529 }
3530 }
3531
3532 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
3533 return nullptr;
3534
3535 switch (IntrinsicID) {
3536 default:
3537 break;
3538 case Intrinsic::pow:
3539 return ConstantFoldBinaryFP(NativeFP: pow, V: Op1V, W: Op2V, Ty);
3540 case Intrinsic::amdgcn_fmul_legacy:
3541 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
3542 // NaN or infinity, gives +0.0.
3543 if (Op1V.isZero() || Op2V.isZero())
3544 return ConstantFP::getZero(Ty);
3545 return ConstantFP::get(Ty, V: Op1V * Op2V);
3546 }
3547
3548 } else if (auto *Op2C = dyn_cast<ConstantInt>(Val: Operands[1])) {
3549 switch (IntrinsicID) {
3550 case Intrinsic::ldexp: {
3551 return ConstantFP::get(
3552 Context&: Ty->getContext(),
3553 V: scalbn(X: Op1V, Exp: Op2C->getSExtValue(), RM: APFloat::rmNearestTiesToEven));
3554 }
3555 case Intrinsic::is_fpclass: {
3556 FPClassTest Mask = static_cast<FPClassTest>(Op2C->getZExtValue());
3557 bool Result =
3558 ((Mask & fcSNan) && Op1V.isNaN() && Op1V.isSignaling()) ||
3559 ((Mask & fcQNan) && Op1V.isNaN() && !Op1V.isSignaling()) ||
3560 ((Mask & fcNegInf) && Op1V.isNegInfinity()) ||
3561 ((Mask & fcNegNormal) && Op1V.isNormal() && Op1V.isNegative()) ||
3562 ((Mask & fcNegSubnormal) && Op1V.isDenormal() && Op1V.isNegative()) ||
3563 ((Mask & fcNegZero) && Op1V.isZero() && Op1V.isNegative()) ||
3564 ((Mask & fcPosZero) && Op1V.isZero() && !Op1V.isNegative()) ||
3565 ((Mask & fcPosSubnormal) && Op1V.isDenormal() && !Op1V.isNegative()) ||
3566 ((Mask & fcPosNormal) && Op1V.isNormal() && !Op1V.isNegative()) ||
3567 ((Mask & fcPosInf) && Op1V.isPosInfinity());
3568 return ConstantInt::get(Ty, V: Result);
3569 }
3570 case Intrinsic::powi: {
3571 int Exp = static_cast<int>(Op2C->getSExtValue());
3572 switch (Ty->getTypeID()) {
3573 case Type::HalfTyID:
3574 case Type::FloatTyID: {
3575 APFloat Res(static_cast<float>(std::pow(x: Op1V.convertToFloat(), y: Exp)));
3576 if (Ty->isHalfTy()) {
3577 bool Unused;
3578 Res.convert(ToSemantics: APFloat::IEEEhalf(), RM: APFloat::rmNearestTiesToEven,
3579 losesInfo: &Unused);
3580 }
3581 return ConstantFP::get(Ty, V: Res);
3582 }
3583 case Type::DoubleTyID:
3584 return ConstantFP::get(Ty, V: std::pow(x: Op1V.convertToDouble(), y: Exp));
3585 default:
3586 return nullptr;
3587 }
3588 }
3589 default:
3590 break;
3591 }
3592 }
3593 return nullptr;
3594 }
3595
3596 if (Operands[0]->getType()->isIntegerTy() &&
3597 Operands[1]->getType()->isIntegerTy()) {
3598 const APInt *C0, *C1;
3599 if (!getConstIntOrUndef(Op: Operands[0], C&: C0) ||
3600 !getConstIntOrUndef(Op: Operands[1], C&: C1))
3601 return nullptr;
3602
3603 switch (IntrinsicID) {
3604 default: break;
3605 case Intrinsic::smax:
3606 case Intrinsic::smin:
3607 case Intrinsic::umax:
3608 case Intrinsic::umin:
3609 if (!C0 && !C1)
3610 return UndefValue::get(T: Ty);
3611 if (!C0 || !C1)
3612 return MinMaxIntrinsic::getSaturationPoint(ID: IntrinsicID, Ty);
3613 return ConstantInt::get(
3614 Ty, V: ICmpInst::compare(LHS: *C0, RHS: *C1,
3615 Pred: MinMaxIntrinsic::getPredicate(ID: IntrinsicID))
3616 ? *C0
3617 : *C1);
3618
3619 case Intrinsic::scmp:
3620 case Intrinsic::ucmp:
3621 if (!C0 || !C1)
3622 return ConstantInt::get(Ty, V: 0);
3623
3624 int Res;
3625 if (IntrinsicID == Intrinsic::scmp)
3626 Res = C0->sgt(RHS: *C1) ? 1 : C0->slt(RHS: *C1) ? -1 : 0;
3627 else
3628 Res = C0->ugt(RHS: *C1) ? 1 : C0->ult(RHS: *C1) ? -1 : 0;
3629 return ConstantInt::get(Ty, V: Res, /*IsSigned=*/true);
3630
3631 case Intrinsic::usub_with_overflow:
3632 case Intrinsic::ssub_with_overflow:
3633 // X - undef -> { 0, false }
3634 // undef - X -> { 0, false }
3635 if (!C0 || !C1)
3636 return Constant::getNullValue(Ty);
3637 [[fallthrough]];
3638 case Intrinsic::uadd_with_overflow:
3639 case Intrinsic::sadd_with_overflow:
3640 // X + undef -> { -1, false }
3641 // undef + x -> { -1, false }
3642 if (!C0 || !C1) {
3643 return ConstantStruct::get(
3644 T: cast<StructType>(Val: Ty),
3645 V: {Constant::getAllOnesValue(Ty: Ty->getStructElementType(N: 0)),
3646 Constant::getNullValue(Ty: Ty->getStructElementType(N: 1))});
3647 }
3648 [[fallthrough]];
3649 case Intrinsic::smul_with_overflow:
3650 case Intrinsic::umul_with_overflow: {
3651 // undef * X -> { 0, false }
3652 // X * undef -> { 0, false }
3653 if (!C0 || !C1)
3654 return Constant::getNullValue(Ty);
3655
3656 APInt Res;
3657 bool Overflow;
3658 switch (IntrinsicID) {
3659 default: llvm_unreachable("Invalid case");
3660 case Intrinsic::sadd_with_overflow:
3661 Res = C0->sadd_ov(RHS: *C1, Overflow);
3662 break;
3663 case Intrinsic::uadd_with_overflow:
3664 Res = C0->uadd_ov(RHS: *C1, Overflow);
3665 break;
3666 case Intrinsic::ssub_with_overflow:
3667 Res = C0->ssub_ov(RHS: *C1, Overflow);
3668 break;
3669 case Intrinsic::usub_with_overflow:
3670 Res = C0->usub_ov(RHS: *C1, Overflow);
3671 break;
3672 case Intrinsic::smul_with_overflow:
3673 Res = C0->smul_ov(RHS: *C1, Overflow);
3674 break;
3675 case Intrinsic::umul_with_overflow:
3676 Res = C0->umul_ov(RHS: *C1, Overflow);
3677 break;
3678 }
3679 Constant *Ops[] = {
3680 ConstantInt::get(Context&: Ty->getContext(), V: Res),
3681 ConstantInt::get(Ty: Type::getInt1Ty(C&: Ty->getContext()), V: Overflow)
3682 };
3683 return ConstantStruct::get(T: cast<StructType>(Val: Ty), V: Ops);
3684 }
3685 case Intrinsic::uadd_sat:
3686 case Intrinsic::sadd_sat:
3687 if (!C0 && !C1)
3688 return UndefValue::get(T: Ty);
3689 if (!C0 || !C1)
3690 return Constant::getAllOnesValue(Ty);
3691 if (IntrinsicID == Intrinsic::uadd_sat)
3692 return ConstantInt::get(Ty, V: C0->uadd_sat(RHS: *C1));
3693 else
3694 return ConstantInt::get(Ty, V: C0->sadd_sat(RHS: *C1));
3695 case Intrinsic::usub_sat:
3696 case Intrinsic::ssub_sat:
3697 if (!C0 && !C1)
3698 return UndefValue::get(T: Ty);
3699 if (!C0 || !C1)
3700 return Constant::getNullValue(Ty);
3701 if (IntrinsicID == Intrinsic::usub_sat)
3702 return ConstantInt::get(Ty, V: C0->usub_sat(RHS: *C1));
3703 else
3704 return ConstantInt::get(Ty, V: C0->ssub_sat(RHS: *C1));
3705 case Intrinsic::cttz:
3706 case Intrinsic::ctlz:
3707 assert(C1 && "Must be constant int");
3708
3709 // cttz(0, 1) and ctlz(0, 1) are poison.
3710 if (C1->isOne() && (!C0 || C0->isZero()))
3711 return PoisonValue::get(T: Ty);
3712 if (!C0)
3713 return Constant::getNullValue(Ty);
3714 if (IntrinsicID == Intrinsic::cttz)
3715 return ConstantInt::get(Ty, V: C0->countr_zero());
3716 else
3717 return ConstantInt::get(Ty, V: C0->countl_zero());
3718
3719 case Intrinsic::abs:
3720 assert(C1 && "Must be constant int");
3721 assert((C1->isOne() || C1->isZero()) && "Must be 0 or 1");
3722
3723 // Undef or minimum val operand with poison min --> poison
3724 if (C1->isOne() && (!C0 || C0->isMinSignedValue()))
3725 return PoisonValue::get(T: Ty);
3726
3727 // Undef operand with no poison min --> 0 (sign bit must be clear)
3728 if (!C0)
3729 return Constant::getNullValue(Ty);
3730
3731 return ConstantInt::get(Ty, V: C0->abs());
3732 case Intrinsic::amdgcn_wave_reduce_umin:
3733 case Intrinsic::amdgcn_wave_reduce_umax:
3734 case Intrinsic::amdgcn_wave_reduce_max:
3735 case Intrinsic::amdgcn_wave_reduce_min:
3736 case Intrinsic::amdgcn_wave_reduce_add:
3737 case Intrinsic::amdgcn_wave_reduce_sub:
3738 case Intrinsic::amdgcn_wave_reduce_and:
3739 case Intrinsic::amdgcn_wave_reduce_or:
3740 case Intrinsic::amdgcn_wave_reduce_xor:
3741 return dyn_cast<Constant>(Val: Operands[0]);
3742 }
3743
3744 return nullptr;
3745 }
3746
3747 // Support ConstantVector in case we have an Undef in the top.
3748 if ((isa<ConstantVector>(Val: Operands[0]) ||
3749 isa<ConstantDataVector>(Val: Operands[0])) &&
3750 // Check for default rounding mode.
3751 // FIXME: Support other rounding modes?
3752 isa<ConstantInt>(Val: Operands[1]) &&
3753 cast<ConstantInt>(Val: Operands[1])->getValue() == 4) {
3754 auto *Op = cast<Constant>(Val: Operands[0]);
3755 switch (IntrinsicID) {
3756 default: break;
3757 case Intrinsic::x86_avx512_vcvtss2si32:
3758 case Intrinsic::x86_avx512_vcvtss2si64:
3759 case Intrinsic::x86_avx512_vcvtsd2si32:
3760 case Intrinsic::x86_avx512_vcvtsd2si64:
3761 if (ConstantFP *FPOp =
3762 dyn_cast_or_null<ConstantFP>(Val: Op->getAggregateElement(Elt: 0U)))
3763 return ConstantFoldSSEConvertToInt(Val: FPOp->getValueAPF(),
3764 /*roundTowardZero=*/false, Ty,
3765 /*IsSigned*/true);
3766 break;
3767 case Intrinsic::x86_avx512_vcvtss2usi32:
3768 case Intrinsic::x86_avx512_vcvtss2usi64:
3769 case Intrinsic::x86_avx512_vcvtsd2usi32:
3770 case Intrinsic::x86_avx512_vcvtsd2usi64:
3771 if (ConstantFP *FPOp =
3772 dyn_cast_or_null<ConstantFP>(Val: Op->getAggregateElement(Elt: 0U)))
3773 return ConstantFoldSSEConvertToInt(Val: FPOp->getValueAPF(),
3774 /*roundTowardZero=*/false, Ty,
3775 /*IsSigned*/false);
3776 break;
3777 case Intrinsic::x86_avx512_cvttss2si:
3778 case Intrinsic::x86_avx512_cvttss2si64:
3779 case Intrinsic::x86_avx512_cvttsd2si:
3780 case Intrinsic::x86_avx512_cvttsd2si64:
3781 if (ConstantFP *FPOp =
3782 dyn_cast_or_null<ConstantFP>(Val: Op->getAggregateElement(Elt: 0U)))
3783 return ConstantFoldSSEConvertToInt(Val: FPOp->getValueAPF(),
3784 /*roundTowardZero=*/true, Ty,
3785 /*IsSigned*/true);
3786 break;
3787 case Intrinsic::x86_avx512_cvttss2usi:
3788 case Intrinsic::x86_avx512_cvttss2usi64:
3789 case Intrinsic::x86_avx512_cvttsd2usi:
3790 case Intrinsic::x86_avx512_cvttsd2usi64:
3791 if (ConstantFP *FPOp =
3792 dyn_cast_or_null<ConstantFP>(Val: Op->getAggregateElement(Elt: 0U)))
3793 return ConstantFoldSSEConvertToInt(Val: FPOp->getValueAPF(),
3794 /*roundTowardZero=*/true, Ty,
3795 /*IsSigned*/false);
3796 break;
3797 }
3798 }
3799 return nullptr;
3800}
3801
3802static APFloat ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID,
3803 const APFloat &S0,
3804 const APFloat &S1,
3805 const APFloat &S2) {
3806 unsigned ID;
3807 const fltSemantics &Sem = S0.getSemantics();
3808 APFloat MA(Sem), SC(Sem), TC(Sem);
3809 if (abs(X: S2) >= abs(X: S0) && abs(X: S2) >= abs(X: S1)) {
3810 if (S2.isNegative() && S2.isNonZero() && !S2.isNaN()) {
3811 // S2 < 0
3812 ID = 5;
3813 SC = -S0;
3814 } else {
3815 ID = 4;
3816 SC = S0;
3817 }
3818 MA = S2;
3819 TC = -S1;
3820 } else if (abs(X: S1) >= abs(X: S0)) {
3821 if (S1.isNegative() && S1.isNonZero() && !S1.isNaN()) {
3822 // S1 < 0
3823 ID = 3;
3824 TC = -S2;
3825 } else {
3826 ID = 2;
3827 TC = S2;
3828 }
3829 MA = S1;
3830 SC = S0;
3831 } else {
3832 if (S0.isNegative() && S0.isNonZero() && !S0.isNaN()) {
3833 // S0 < 0
3834 ID = 1;
3835 SC = S2;
3836 } else {
3837 ID = 0;
3838 SC = -S2;
3839 }
3840 MA = S0;
3841 TC = -S1;
3842 }
3843 switch (IntrinsicID) {
3844 default:
3845 llvm_unreachable("unhandled amdgcn cube intrinsic");
3846 case Intrinsic::amdgcn_cubeid:
3847 return APFloat(Sem, ID);
3848 case Intrinsic::amdgcn_cubema:
3849 return MA + MA;
3850 case Intrinsic::amdgcn_cubesc:
3851 return SC;
3852 case Intrinsic::amdgcn_cubetc:
3853 return TC;
3854 }
3855}
3856
3857static Constant *ConstantFoldAMDGCNPermIntrinsic(ArrayRef<Constant *> Operands,
3858 Type *Ty) {
3859 const APInt *C0, *C1, *C2;
3860 if (!getConstIntOrUndef(Op: Operands[0], C&: C0) ||
3861 !getConstIntOrUndef(Op: Operands[1], C&: C1) ||
3862 !getConstIntOrUndef(Op: Operands[2], C&: C2))
3863 return nullptr;
3864
3865 if (!C2)
3866 return UndefValue::get(T: Ty);
3867
3868 APInt Val(32, 0);
3869 unsigned NumUndefBytes = 0;
3870 for (unsigned I = 0; I < 32; I += 8) {
3871 unsigned Sel = C2->extractBitsAsZExtValue(numBits: 8, bitPosition: I);
3872 unsigned B = 0;
3873
3874 if (Sel >= 13)
3875 B = 0xff;
3876 else if (Sel == 12)
3877 B = 0x00;
3878 else {
3879 const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
3880 if (!Src)
3881 ++NumUndefBytes;
3882 else if (Sel < 8)
3883 B = Src->extractBitsAsZExtValue(numBits: 8, bitPosition: (Sel & 3) * 8);
3884 else
3885 B = Src->extractBitsAsZExtValue(numBits: 1, bitPosition: (Sel & 1) ? 31 : 15) * 0xff;
3886 }
3887
3888 Val.insertBits(SubBits: B, bitPosition: I, numBits: 8);
3889 }
3890
3891 if (NumUndefBytes == 4)
3892 return UndefValue::get(T: Ty);
3893
3894 return ConstantInt::get(Ty, V: Val);
3895}
3896
3897static Constant *ConstantFoldScalarCall3(StringRef Name,
3898 Intrinsic::ID IntrinsicID,
3899 Type *Ty,
3900 ArrayRef<Constant *> Operands,
3901 const TargetLibraryInfo *TLI,
3902 const CallBase *Call) {
3903 assert(Operands.size() == 3 && "Wrong number of operands.");
3904
3905 if (const auto *Op1 = dyn_cast<ConstantFP>(Val: Operands[0])) {
3906 if (const auto *Op2 = dyn_cast<ConstantFP>(Val: Operands[1])) {
3907 if (const auto *Op3 = dyn_cast<ConstantFP>(Val: Operands[2])) {
3908 const APFloat &C1 = Op1->getValueAPF();
3909 const APFloat &C2 = Op2->getValueAPF();
3910 const APFloat &C3 = Op3->getValueAPF();
3911
3912 if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Val: Call)) {
3913 RoundingMode RM = getEvaluationRoundingMode(CI: ConstrIntr);
3914 APFloat Res = C1;
3915 APFloat::opStatus St;
3916 switch (IntrinsicID) {
3917 default:
3918 return nullptr;
3919 case Intrinsic::experimental_constrained_fma:
3920 case Intrinsic::experimental_constrained_fmuladd:
3921 St = Res.fusedMultiplyAdd(Multiplicand: C2, Addend: C3, RM);
3922 break;
3923 }
3924 if (mayFoldConstrained(
3925 CI: const_cast<ConstrainedFPIntrinsic *>(ConstrIntr), St))
3926 return ConstantFP::get(Ty, V: Res);
3927 return nullptr;
3928 }
3929
3930 switch (IntrinsicID) {
3931 default: break;
3932 case Intrinsic::amdgcn_fma_legacy: {
3933 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
3934 // NaN or infinity, gives +0.0.
3935 if (C1.isZero() || C2.isZero()) {
3936 // It's tempting to just return C3 here, but that would give the
3937 // wrong result if C3 was -0.0.
3938 return ConstantFP::get(Ty, V: APFloat(0.0f) + C3);
3939 }
3940 [[fallthrough]];
3941 }
3942 case Intrinsic::fma:
3943 case Intrinsic::fmuladd: {
3944 APFloat V = C1;
3945 V.fusedMultiplyAdd(Multiplicand: C2, Addend: C3, RM: APFloat::rmNearestTiesToEven);
3946 return ConstantFP::get(Ty, V);
3947 }
3948
3949 case Intrinsic::nvvm_fma_rm_f:
3950 case Intrinsic::nvvm_fma_rn_f:
3951 case Intrinsic::nvvm_fma_rp_f:
3952 case Intrinsic::nvvm_fma_rz_f:
3953 case Intrinsic::nvvm_fma_rm_d:
3954 case Intrinsic::nvvm_fma_rn_d:
3955 case Intrinsic::nvvm_fma_rp_d:
3956 case Intrinsic::nvvm_fma_rz_d:
3957 case Intrinsic::nvvm_fma_rm_ftz_f:
3958 case Intrinsic::nvvm_fma_rn_ftz_f:
3959 case Intrinsic::nvvm_fma_rp_ftz_f:
3960 case Intrinsic::nvvm_fma_rz_ftz_f: {
3961 bool IsFTZ = nvvm::FMAShouldFTZ(IntrinsicID);
3962 APFloat A = IsFTZ ? FTZPreserveSign(V: C1) : C1;
3963 APFloat B = IsFTZ ? FTZPreserveSign(V: C2) : C2;
3964 APFloat C = IsFTZ ? FTZPreserveSign(V: C3) : C3;
3965
3966 APFloat::roundingMode RoundMode =
3967 nvvm::GetFMARoundingMode(IntrinsicID);
3968
3969 APFloat Res = A;
3970 APFloat::opStatus Status = Res.fusedMultiplyAdd(Multiplicand: B, Addend: C, RM: RoundMode);
3971
3972 if (!Res.isNaN() &&
3973 (Status == APFloat::opOK || Status == APFloat::opInexact)) {
3974 Res = IsFTZ ? FTZPreserveSign(V: Res) : Res;
3975 return ConstantFP::get(Ty, V: Res);
3976 }
3977 return nullptr;
3978 }
3979
3980 case Intrinsic::amdgcn_cubeid:
3981 case Intrinsic::amdgcn_cubema:
3982 case Intrinsic::amdgcn_cubesc:
3983 case Intrinsic::amdgcn_cubetc: {
3984 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, S0: C1, S1: C2, S2: C3);
3985 return ConstantFP::get(Ty, V);
3986 }
3987 }
3988 }
3989 }
3990 }
3991
3992 if (IntrinsicID == Intrinsic::smul_fix ||
3993 IntrinsicID == Intrinsic::smul_fix_sat) {
3994 const APInt *C0, *C1;
3995 if (!getConstIntOrUndef(Op: Operands[0], C&: C0) ||
3996 !getConstIntOrUndef(Op: Operands[1], C&: C1))
3997 return nullptr;
3998
3999 // undef * C -> 0
4000 // C * undef -> 0
4001 if (!C0 || !C1)
4002 return Constant::getNullValue(Ty);
4003
4004 // This code performs rounding towards negative infinity in case the result
4005 // cannot be represented exactly for the given scale. Targets that do care
4006 // about rounding should use a target hook for specifying how rounding
4007 // should be done, and provide their own folding to be consistent with
4008 // rounding. This is the same approach as used by
4009 // DAGTypeLegalizer::ExpandIntRes_MULFIX.
4010 unsigned Scale = cast<ConstantInt>(Val: Operands[2])->getZExtValue();
4011 unsigned Width = C0->getBitWidth();
4012 assert(Scale < Width && "Illegal scale.");
4013 unsigned ExtendedWidth = Width * 2;
4014 APInt Product =
4015 (C0->sext(width: ExtendedWidth) * C1->sext(width: ExtendedWidth)).ashr(ShiftAmt: Scale);
4016 if (IntrinsicID == Intrinsic::smul_fix_sat) {
4017 APInt Max = APInt::getSignedMaxValue(numBits: Width).sext(width: ExtendedWidth);
4018 APInt Min = APInt::getSignedMinValue(numBits: Width).sext(width: ExtendedWidth);
4019 Product = APIntOps::smin(A: Product, B: Max);
4020 Product = APIntOps::smax(A: Product, B: Min);
4021 }
4022 return ConstantInt::get(Context&: Ty->getContext(), V: Product.sextOrTrunc(width: Width));
4023 }
4024
4025 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
4026 const APInt *C0, *C1, *C2;
4027 if (!getConstIntOrUndef(Op: Operands[0], C&: C0) ||
4028 !getConstIntOrUndef(Op: Operands[1], C&: C1) ||
4029 !getConstIntOrUndef(Op: Operands[2], C&: C2))
4030 return nullptr;
4031
4032 bool IsRight = IntrinsicID == Intrinsic::fshr;
4033 if (!C2)
4034 return Operands[IsRight ? 1 : 0];
4035 if (!C0 && !C1)
4036 return UndefValue::get(T: Ty);
4037
4038 // The shift amount is interpreted as modulo the bitwidth. If the shift
4039 // amount is effectively 0, avoid UB due to oversized inverse shift below.
4040 unsigned BitWidth = C2->getBitWidth();
4041 unsigned ShAmt = C2->urem(RHS: BitWidth);
4042 if (!ShAmt)
4043 return Operands[IsRight ? 1 : 0];
4044
4045 // (C0 << ShlAmt) | (C1 >> LshrAmt)
4046 unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
4047 unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
4048 if (!C0)
4049 return ConstantInt::get(Ty, V: C1->lshr(shiftAmt: LshrAmt));
4050 if (!C1)
4051 return ConstantInt::get(Ty, V: C0->shl(shiftAmt: ShlAmt));
4052 return ConstantInt::get(Ty, V: C0->shl(shiftAmt: ShlAmt) | C1->lshr(shiftAmt: LshrAmt));
4053 }
4054
4055 if (IntrinsicID == Intrinsic::amdgcn_perm)
4056 return ConstantFoldAMDGCNPermIntrinsic(Operands, Ty);
4057
4058 return nullptr;
4059}
4060
4061static Constant *ConstantFoldScalarCall(StringRef Name,
4062 Intrinsic::ID IntrinsicID,
4063 Type *Ty,
4064 ArrayRef<Constant *> Operands,
4065 const TargetLibraryInfo *TLI,
4066 const CallBase *Call) {
4067 if (IntrinsicID != Intrinsic::not_intrinsic &&
4068 any_of(Range&: Operands, P: IsaPred<PoisonValue>) &&
4069 intrinsicPropagatesPoison(IID: IntrinsicID))
4070 return PoisonValue::get(T: Ty);
4071
4072 if (Operands.size() == 1)
4073 return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call);
4074
4075 if (Operands.size() == 2) {
4076 if (Constant *FoldedLibCall =
4077 ConstantFoldLibCall2(Name, Ty, Operands, TLI)) {
4078 return FoldedLibCall;
4079 }
4080 return ConstantFoldIntrinsicCall2(IntrinsicID, Ty, Operands, Call);
4081 }
4082
4083 if (Operands.size() == 3)
4084 return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call);
4085
4086 return nullptr;
4087}
4088
4089static Constant *ConstantFoldFixedVectorCall(
4090 StringRef Name, Intrinsic::ID IntrinsicID, FixedVectorType *FVTy,
4091 ArrayRef<Constant *> Operands, const DataLayout &DL,
4092 const TargetLibraryInfo *TLI, const CallBase *Call) {
4093 SmallVector<Constant *, 4> Result(FVTy->getNumElements());
4094 SmallVector<Constant *, 4> Lane(Operands.size());
4095 Type *Ty = FVTy->getElementType();
4096
4097 switch (IntrinsicID) {
4098 case Intrinsic::masked_load: {
4099 auto *SrcPtr = Operands[0];
4100 auto *Mask = Operands[1];
4101 auto *Passthru = Operands[2];
4102
4103 Constant *VecData = ConstantFoldLoadFromConstPtr(C: SrcPtr, Ty: FVTy, DL);
4104
4105 SmallVector<Constant *, 32> NewElements;
4106 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
4107 auto *MaskElt = Mask->getAggregateElement(Elt: I);
4108 if (!MaskElt)
4109 break;
4110 auto *PassthruElt = Passthru->getAggregateElement(Elt: I);
4111 auto *VecElt = VecData ? VecData->getAggregateElement(Elt: I) : nullptr;
4112 if (isa<UndefValue>(Val: MaskElt)) {
4113 if (PassthruElt)
4114 NewElements.push_back(Elt: PassthruElt);
4115 else if (VecElt)
4116 NewElements.push_back(Elt: VecElt);
4117 else
4118 return nullptr;
4119 }
4120 if (MaskElt->isNullValue()) {
4121 if (!PassthruElt)
4122 return nullptr;
4123 NewElements.push_back(Elt: PassthruElt);
4124 } else if (MaskElt->isOneValue()) {
4125 if (!VecElt)
4126 return nullptr;
4127 NewElements.push_back(Elt: VecElt);
4128 } else {
4129 return nullptr;
4130 }
4131 }
4132 if (NewElements.size() != FVTy->getNumElements())
4133 return nullptr;
4134 return ConstantVector::get(V: NewElements);
4135 }
4136 case Intrinsic::arm_mve_vctp8:
4137 case Intrinsic::arm_mve_vctp16:
4138 case Intrinsic::arm_mve_vctp32:
4139 case Intrinsic::arm_mve_vctp64: {
4140 if (auto *Op = dyn_cast<ConstantInt>(Val: Operands[0])) {
4141 unsigned Lanes = FVTy->getNumElements();
4142 uint64_t Limit = Op->getZExtValue();
4143
4144 SmallVector<Constant *, 16> NCs;
4145 for (unsigned i = 0; i < Lanes; i++) {
4146 if (i < Limit)
4147 NCs.push_back(Elt: ConstantInt::getTrue(Ty));
4148 else
4149 NCs.push_back(Elt: ConstantInt::getFalse(Ty));
4150 }
4151 return ConstantVector::get(V: NCs);
4152 }
4153 return nullptr;
4154 }
4155 case Intrinsic::get_active_lane_mask: {
4156 auto *Op0 = dyn_cast<ConstantInt>(Val: Operands[0]);
4157 auto *Op1 = dyn_cast<ConstantInt>(Val: Operands[1]);
4158 if (Op0 && Op1) {
4159 unsigned Lanes = FVTy->getNumElements();
4160 uint64_t Base = Op0->getZExtValue();
4161 uint64_t Limit = Op1->getZExtValue();
4162
4163 SmallVector<Constant *, 16> NCs;
4164 for (unsigned i = 0; i < Lanes; i++) {
4165 if (Base + i < Limit)
4166 NCs.push_back(Elt: ConstantInt::getTrue(Ty));
4167 else
4168 NCs.push_back(Elt: ConstantInt::getFalse(Ty));
4169 }
4170 return ConstantVector::get(V: NCs);
4171 }
4172 return nullptr;
4173 }
4174 case Intrinsic::vector_extract: {
4175 auto *Idx = dyn_cast<ConstantInt>(Val: Operands[1]);
4176 Constant *Vec = Operands[0];
4177 if (!Idx || !isa<FixedVectorType>(Val: Vec->getType()))
4178 return nullptr;
4179
4180 unsigned NumElements = FVTy->getNumElements();
4181 unsigned VecNumElements =
4182 cast<FixedVectorType>(Val: Vec->getType())->getNumElements();
4183 unsigned StartingIndex = Idx->getZExtValue();
4184
4185 // Extracting entire vector is nop
4186 if (NumElements == VecNumElements && StartingIndex == 0)
4187 return Vec;
4188
4189 for (unsigned I = StartingIndex, E = StartingIndex + NumElements; I < E;
4190 ++I) {
4191 Constant *Elt = Vec->getAggregateElement(Elt: I);
4192 if (!Elt)
4193 return nullptr;
4194 Result[I - StartingIndex] = Elt;
4195 }
4196
4197 return ConstantVector::get(V: Result);
4198 }
4199 case Intrinsic::vector_insert: {
4200 Constant *Vec = Operands[0];
4201 Constant *SubVec = Operands[1];
4202 auto *Idx = dyn_cast<ConstantInt>(Val: Operands[2]);
4203 if (!Idx || !isa<FixedVectorType>(Val: Vec->getType()))
4204 return nullptr;
4205
4206 unsigned SubVecNumElements =
4207 cast<FixedVectorType>(Val: SubVec->getType())->getNumElements();
4208 unsigned VecNumElements =
4209 cast<FixedVectorType>(Val: Vec->getType())->getNumElements();
4210 unsigned IdxN = Idx->getZExtValue();
4211 // Replacing entire vector with a subvec is nop
4212 if (SubVecNumElements == VecNumElements && IdxN == 0)
4213 return SubVec;
4214
4215 for (unsigned I = 0; I < VecNumElements; ++I) {
4216 Constant *Elt;
4217 if (I < IdxN + SubVecNumElements)
4218 Elt = SubVec->getAggregateElement(Elt: I - IdxN);
4219 else
4220 Elt = Vec->getAggregateElement(Elt: I);
4221 if (!Elt)
4222 return nullptr;
4223 Result[I] = Elt;
4224 }
4225 return ConstantVector::get(V: Result);
4226 }
4227 case Intrinsic::vector_interleave2:
4228 case Intrinsic::vector_interleave3:
4229 case Intrinsic::vector_interleave4:
4230 case Intrinsic::vector_interleave5:
4231 case Intrinsic::vector_interleave6:
4232 case Intrinsic::vector_interleave7:
4233 case Intrinsic::vector_interleave8: {
4234 unsigned NumElements =
4235 cast<FixedVectorType>(Val: Operands[0]->getType())->getNumElements();
4236 unsigned NumOperands = Operands.size();
4237 for (unsigned I = 0; I < NumElements; ++I) {
4238 for (unsigned J = 0; J < NumOperands; ++J) {
4239 Constant *Elt = Operands[J]->getAggregateElement(Elt: I);
4240 if (!Elt)
4241 return nullptr;
4242 Result[NumOperands * I + J] = Elt;
4243 }
4244 }
4245 return ConstantVector::get(V: Result);
4246 }
4247 case Intrinsic::wasm_dot: {
4248 unsigned NumElements =
4249 cast<FixedVectorType>(Val: Operands[0]->getType())->getNumElements();
4250
4251 assert(NumElements == 8 && Result.size() == 4 &&
4252 "wasm dot takes i16x8 and produces i32x4");
4253 assert(Ty->isIntegerTy());
4254 int32_t MulVector[8];
4255
4256 for (unsigned I = 0; I < NumElements; ++I) {
4257 ConstantInt *Elt0 =
4258 cast<ConstantInt>(Val: Operands[0]->getAggregateElement(Elt: I));
4259 ConstantInt *Elt1 =
4260 cast<ConstantInt>(Val: Operands[1]->getAggregateElement(Elt: I));
4261
4262 MulVector[I] = Elt0->getSExtValue() * Elt1->getSExtValue();
4263 }
4264 for (unsigned I = 0; I < Result.size(); I++) {
4265 int64_t IAdd = (int64_t)MulVector[I * 2] + (int64_t)MulVector[I * 2 + 1];
4266 Result[I] = ConstantInt::getSigned(Ty, V: IAdd, /*ImplicitTrunc=*/true);
4267 }
4268
4269 return ConstantVector::get(V: Result);
4270 }
4271 default:
4272 break;
4273 }
4274
4275 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
4276 // Gather a column of constants.
4277 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
4278 // Some intrinsics use a scalar type for certain arguments.
4279 if (isVectorIntrinsicWithScalarOpAtArg(ID: IntrinsicID, ScalarOpdIdx: J, /*TTI=*/nullptr)) {
4280 Lane[J] = Operands[J];
4281 continue;
4282 }
4283
4284 Constant *Agg = Operands[J]->getAggregateElement(Elt: I);
4285 if (!Agg)
4286 return nullptr;
4287
4288 Lane[J] = Agg;
4289 }
4290
4291 // Use the regular scalar folding to simplify this column.
4292 Constant *Folded =
4293 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Operands: Lane, TLI, Call);
4294 if (!Folded)
4295 return nullptr;
4296 Result[I] = Folded;
4297 }
4298
4299 return ConstantVector::get(V: Result);
4300}
4301
4302static Constant *ConstantFoldScalableVectorCall(
4303 StringRef Name, Intrinsic::ID IntrinsicID, ScalableVectorType *SVTy,
4304 ArrayRef<Constant *> Operands, const DataLayout &DL,
4305 const TargetLibraryInfo *TLI, const CallBase *Call) {
4306 switch (IntrinsicID) {
4307 case Intrinsic::aarch64_sve_convert_from_svbool: {
4308 auto *Src = dyn_cast<Constant>(Val: Operands[0]);
4309 if (!Src || !Src->isNullValue())
4310 break;
4311
4312 return ConstantInt::getFalse(Ty: SVTy);
4313 }
4314 case Intrinsic::get_active_lane_mask: {
4315 auto *Op0 = dyn_cast<ConstantInt>(Val: Operands[0]);
4316 auto *Op1 = dyn_cast<ConstantInt>(Val: Operands[1]);
4317 if (Op0 && Op1 && Op0->getValue().uge(RHS: Op1->getValue()))
4318 return ConstantVector::getNullValue(Ty: SVTy);
4319 break;
4320 }
4321 case Intrinsic::vector_interleave2:
4322 case Intrinsic::vector_interleave3:
4323 case Intrinsic::vector_interleave4:
4324 case Intrinsic::vector_interleave5:
4325 case Intrinsic::vector_interleave6:
4326 case Intrinsic::vector_interleave7:
4327 case Intrinsic::vector_interleave8: {
4328 Constant *SplatVal = Operands[0]->getSplatValue();
4329 if (!SplatVal)
4330 return nullptr;
4331
4332 if (!llvm::all_equal(Range&: Operands))
4333 return nullptr;
4334
4335 return ConstantVector::getSplat(EC: SVTy->getElementCount(), Elt: SplatVal);
4336 }
4337 default:
4338 break;
4339 }
4340
4341 // If trivially vectorizable, try folding it via the scalar call if all
4342 // operands are splats.
4343
4344 // TODO: ConstantFoldFixedVectorCall should probably check this too?
4345 if (!isTriviallyVectorizable(ID: IntrinsicID))
4346 return nullptr;
4347
4348 SmallVector<Constant *, 4> SplatOps;
4349 for (auto [I, Op] : enumerate(First&: Operands)) {
4350 if (isVectorIntrinsicWithScalarOpAtArg(ID: IntrinsicID, ScalarOpdIdx: I, /*TTI=*/nullptr)) {
4351 SplatOps.push_back(Elt: Op);
4352 continue;
4353 }
4354 Constant *Splat = Op->getSplatValue();
4355 if (!Splat)
4356 return nullptr;
4357 SplatOps.push_back(Elt: Splat);
4358 }
4359 Constant *Folded = ConstantFoldScalarCall(
4360 Name, IntrinsicID, Ty: SVTy->getElementType(), Operands: SplatOps, TLI, Call);
4361 if (!Folded)
4362 return nullptr;
4363 return ConstantVector::getSplat(EC: SVTy->getElementCount(), Elt: Folded);
4364}
4365
4366static std::pair<Constant *, Constant *>
4367ConstantFoldScalarFrexpCall(Constant *Op, Type *IntTy) {
4368 if (isa<PoisonValue>(Val: Op))
4369 return {Op, PoisonValue::get(T: IntTy)};
4370
4371 auto *ConstFP = dyn_cast<ConstantFP>(Val: Op);
4372 if (!ConstFP)
4373 return {};
4374
4375 const APFloat &U = ConstFP->getValueAPF();
4376 int FrexpExp;
4377 APFloat FrexpMant = frexp(X: U, Exp&: FrexpExp, RM: APFloat::rmNearestTiesToEven);
4378 Constant *Result0 = ConstantFP::get(Ty: ConstFP->getType(), V: FrexpMant);
4379
4380 // The exponent is an "unspecified value" for inf/nan. We use zero to avoid
4381 // using undef.
4382 Constant *Result1 = FrexpMant.isFinite()
4383 ? ConstantInt::getSigned(Ty: IntTy, V: FrexpExp)
4384 : ConstantInt::getNullValue(Ty: IntTy);
4385 return {Result0, Result1};
4386}
4387
4388/// Handle intrinsics that return tuples, which may be tuples of vectors.
4389static Constant *
4390ConstantFoldStructCall(StringRef Name, Intrinsic::ID IntrinsicID,
4391 StructType *StTy, ArrayRef<Constant *> Operands,
4392 const DataLayout &DL, const TargetLibraryInfo *TLI,
4393 const CallBase *Call) {
4394
4395 switch (IntrinsicID) {
4396 case Intrinsic::frexp: {
4397 Type *Ty0 = StTy->getContainedType(i: 0);
4398 Type *Ty1 = StTy->getContainedType(i: 1)->getScalarType();
4399
4400 if (auto *FVTy0 = dyn_cast<FixedVectorType>(Val: Ty0)) {
4401 SmallVector<Constant *, 4> Results0(FVTy0->getNumElements());
4402 SmallVector<Constant *, 4> Results1(FVTy0->getNumElements());
4403
4404 for (unsigned I = 0, E = FVTy0->getNumElements(); I != E; ++I) {
4405 Constant *Lane = Operands[0]->getAggregateElement(Elt: I);
4406 std::tie(args&: Results0[I], args&: Results1[I]) =
4407 ConstantFoldScalarFrexpCall(Op: Lane, IntTy: Ty1);
4408 if (!Results0[I])
4409 return nullptr;
4410 }
4411
4412 return ConstantStruct::get(T: StTy, Vs: ConstantVector::get(V: Results0),
4413 Vs: ConstantVector::get(V: Results1));
4414 }
4415
4416 auto [Result0, Result1] = ConstantFoldScalarFrexpCall(Op: Operands[0], IntTy: Ty1);
4417 if (!Result0)
4418 return nullptr;
4419 return ConstantStruct::get(T: StTy, Vs: Result0, Vs: Result1);
4420 }
4421 case Intrinsic::sincos: {
4422 Type *Ty = StTy->getContainedType(i: 0);
4423 Type *TyScalar = Ty->getScalarType();
4424
4425 auto ConstantFoldScalarSincosCall =
4426 [&](Constant *Op) -> std::pair<Constant *, Constant *> {
4427 Constant *SinResult =
4428 ConstantFoldScalarCall(Name, IntrinsicID: Intrinsic::sin, Ty: TyScalar, Operands: Op, TLI, Call);
4429 Constant *CosResult =
4430 ConstantFoldScalarCall(Name, IntrinsicID: Intrinsic::cos, Ty: TyScalar, Operands: Op, TLI, Call);
4431 return std::make_pair(x&: SinResult, y&: CosResult);
4432 };
4433
4434 if (auto *FVTy = dyn_cast<FixedVectorType>(Val: Ty)) {
4435 SmallVector<Constant *> SinResults(FVTy->getNumElements());
4436 SmallVector<Constant *> CosResults(FVTy->getNumElements());
4437
4438 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
4439 Constant *Lane = Operands[0]->getAggregateElement(Elt: I);
4440 std::tie(args&: SinResults[I], args&: CosResults[I]) =
4441 ConstantFoldScalarSincosCall(Lane);
4442 if (!SinResults[I] || !CosResults[I])
4443 return nullptr;
4444 }
4445
4446 return ConstantStruct::get(T: StTy, Vs: ConstantVector::get(V: SinResults),
4447 Vs: ConstantVector::get(V: CosResults));
4448 }
4449
4450 auto [SinResult, CosResult] = ConstantFoldScalarSincosCall(Operands[0]);
4451 if (!SinResult || !CosResult)
4452 return nullptr;
4453 return ConstantStruct::get(T: StTy, Vs: SinResult, Vs: CosResult);
4454 }
4455 case Intrinsic::vector_deinterleave2:
4456 case Intrinsic::vector_deinterleave3:
4457 case Intrinsic::vector_deinterleave4:
4458 case Intrinsic::vector_deinterleave5:
4459 case Intrinsic::vector_deinterleave6:
4460 case Intrinsic::vector_deinterleave7:
4461 case Intrinsic::vector_deinterleave8: {
4462 unsigned NumResults = StTy->getNumElements();
4463 auto *Vec = Operands[0];
4464 auto *VecTy = cast<VectorType>(Val: Vec->getType());
4465
4466 ElementCount ResultEC =
4467 VecTy->getElementCount().divideCoefficientBy(RHS: NumResults);
4468
4469 if (auto *EltC = Vec->getSplatValue()) {
4470 auto *ResultVec = ConstantVector::getSplat(EC: ResultEC, Elt: EltC);
4471 SmallVector<Constant *, 8> Results(NumResults, ResultVec);
4472 return ConstantStruct::get(T: StTy, V: Results);
4473 }
4474
4475 if (!ResultEC.isFixed())
4476 return nullptr;
4477
4478 unsigned NumElements = ResultEC.getFixedValue();
4479 SmallVector<Constant *, 8> Results(NumResults);
4480 SmallVector<Constant *> Elements(NumElements);
4481 for (unsigned I = 0; I != NumResults; ++I) {
4482 for (unsigned J = 0; J != NumElements; ++J) {
4483 Constant *Elt = Vec->getAggregateElement(Elt: J * NumResults + I);
4484 if (!Elt)
4485 return nullptr;
4486 Elements[J] = Elt;
4487 }
4488 Results[I] = ConstantVector::get(V: Elements);
4489 }
4490 return ConstantStruct::get(T: StTy, V: Results);
4491 }
4492 default:
4493 // TODO: Constant folding of vector intrinsics that fall through here does
4494 // not work (e.g. overflow intrinsics)
4495 return ConstantFoldScalarCall(Name, IntrinsicID, Ty: StTy, Operands, TLI, Call);
4496 }
4497
4498 return nullptr;
4499}
4500
4501} // end anonymous namespace
4502
4503Constant *llvm::ConstantFoldBinaryIntrinsic(Intrinsic::ID ID, Constant *LHS,
4504 Constant *RHS, Type *Ty,
4505 Instruction *FMFSource) {
4506 auto *Call = dyn_cast_if_present<CallBase>(Val: FMFSource);
4507 // Ensure we check flags like StrictFP that might prevent this from getting
4508 // folded before generating a result.
4509 if (Call && !canConstantFoldCallTo(Call, F: Call->getCalledFunction()))
4510 return nullptr;
4511 return ConstantFoldIntrinsicCall2(IntrinsicID: ID, Ty, Operands: {LHS, RHS}, Call);
4512}
4513
4514Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F,
4515 ArrayRef<Constant *> Operands,
4516 const TargetLibraryInfo *TLI,
4517 bool AllowNonDeterministic) {
4518 if (Call->isNoBuiltin())
4519 return nullptr;
4520 if (!F->hasName())
4521 return nullptr;
4522
4523 // If this is not an intrinsic and not recognized as a library call, bail out.
4524 Intrinsic::ID IID = F->getIntrinsicID();
4525 if (IID == Intrinsic::not_intrinsic) {
4526 if (!TLI)
4527 return nullptr;
4528 LibFunc LibF;
4529 if (!TLI->getLibFunc(FDecl: *F, F&: LibF))
4530 return nullptr;
4531 }
4532
4533 // Conservatively assume that floating-point libcalls may be
4534 // non-deterministic.
4535 Type *Ty = F->getReturnType();
4536 if (!AllowNonDeterministic && Ty->isFPOrFPVectorTy())
4537 return nullptr;
4538
4539 StringRef Name = F->getName();
4540 if (auto *FVTy = dyn_cast<FixedVectorType>(Val: Ty))
4541 return ConstantFoldFixedVectorCall(
4542 Name, IntrinsicID: IID, FVTy, Operands, DL: F->getDataLayout(), TLI, Call);
4543
4544 if (auto *SVTy = dyn_cast<ScalableVectorType>(Val: Ty))
4545 return ConstantFoldScalableVectorCall(
4546 Name, IntrinsicID: IID, SVTy, Operands, DL: F->getDataLayout(), TLI, Call);
4547
4548 if (auto *StTy = dyn_cast<StructType>(Val: Ty))
4549 return ConstantFoldStructCall(Name, IntrinsicID: IID, StTy, Operands,
4550 DL: F->getDataLayout(), TLI, Call);
4551
4552 // TODO: If this is a library function, we already discovered that above,
4553 // so we should pass the LibFunc, not the name (and it might be better
4554 // still to separate intrinsic handling from libcalls).
4555 return ConstantFoldScalarCall(Name, IntrinsicID: IID, Ty, Operands, TLI, Call);
4556}
4557
4558bool llvm::isMathLibCallNoop(const CallBase *Call,
4559 const TargetLibraryInfo *TLI) {
4560 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
4561 // (and to some extent ConstantFoldScalarCall).
4562 if (Call->isNoBuiltin() || Call->isStrictFP())
4563 return false;
4564 Function *F = Call->getCalledFunction();
4565 if (!F)
4566 return false;
4567
4568 LibFunc Func;
4569 if (!TLI || !TLI->getLibFunc(FDecl: *F, F&: Func))
4570 return false;
4571
4572 if (Call->arg_size() == 1) {
4573 if (ConstantFP *OpC = dyn_cast<ConstantFP>(Val: Call->getArgOperand(i: 0))) {
4574 const APFloat &Op = OpC->getValueAPF();
4575 switch (Func) {
4576 case LibFunc_logl:
4577 case LibFunc_log:
4578 case LibFunc_logf:
4579 case LibFunc_log2l:
4580 case LibFunc_log2:
4581 case LibFunc_log2f:
4582 case LibFunc_log10l:
4583 case LibFunc_log10:
4584 case LibFunc_log10f:
4585 return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
4586
4587 case LibFunc_ilogb:
4588 return !Op.isNaN() && !Op.isZero() && !Op.isInfinity();
4589
4590 case LibFunc_expl:
4591 case LibFunc_exp:
4592 case LibFunc_expf:
4593 // FIXME: These boundaries are slightly conservative.
4594 if (OpC->getType()->isDoubleTy())
4595 return !(Op < APFloat(-745.0) || Op > APFloat(709.0));
4596 if (OpC->getType()->isFloatTy())
4597 return !(Op < APFloat(-103.0f) || Op > APFloat(88.0f));
4598 break;
4599
4600 case LibFunc_exp2l:
4601 case LibFunc_exp2:
4602 case LibFunc_exp2f:
4603 // FIXME: These boundaries are slightly conservative.
4604 if (OpC->getType()->isDoubleTy())
4605 return !(Op < APFloat(-1074.0) || Op > APFloat(1023.0));
4606 if (OpC->getType()->isFloatTy())
4607 return !(Op < APFloat(-149.0f) || Op > APFloat(127.0f));
4608 break;
4609
4610 case LibFunc_sinl:
4611 case LibFunc_sin:
4612 case LibFunc_sinf:
4613 case LibFunc_cosl:
4614 case LibFunc_cos:
4615 case LibFunc_cosf:
4616 return !Op.isInfinity();
4617
4618 case LibFunc_tanl:
4619 case LibFunc_tan:
4620 case LibFunc_tanf: {
4621 // FIXME: Stop using the host math library.
4622 // FIXME: The computation isn't done in the right precision.
4623 Type *Ty = OpC->getType();
4624 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy())
4625 return ConstantFoldFP(NativeFP: tan, V: OpC->getValueAPF(), Ty) != nullptr;
4626 break;
4627 }
4628
4629 case LibFunc_atan:
4630 case LibFunc_atanf:
4631 case LibFunc_atanl:
4632 // Per POSIX, this MAY fail if Op is denormal. We choose not failing.
4633 return true;
4634
4635 case LibFunc_asinl:
4636 case LibFunc_asin:
4637 case LibFunc_asinf:
4638 case LibFunc_acosl:
4639 case LibFunc_acos:
4640 case LibFunc_acosf:
4641 return !(Op < APFloat::getOne(Sem: Op.getSemantics(), Negative: true) ||
4642 Op > APFloat::getOne(Sem: Op.getSemantics()));
4643
4644 case LibFunc_sinh:
4645 case LibFunc_cosh:
4646 case LibFunc_sinhf:
4647 case LibFunc_coshf:
4648 case LibFunc_sinhl:
4649 case LibFunc_coshl:
4650 // FIXME: These boundaries are slightly conservative.
4651 if (OpC->getType()->isDoubleTy())
4652 return !(Op < APFloat(-710.0) || Op > APFloat(710.0));
4653 if (OpC->getType()->isFloatTy())
4654 return !(Op < APFloat(-89.0f) || Op > APFloat(89.0f));
4655 break;
4656
4657 case LibFunc_sqrtl:
4658 case LibFunc_sqrt:
4659 case LibFunc_sqrtf:
4660 return Op.isNaN() || Op.isZero() || !Op.isNegative();
4661
4662 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
4663 // maybe others?
4664 default:
4665 break;
4666 }
4667 }
4668 }
4669
4670 if (Call->arg_size() == 2) {
4671 ConstantFP *Op0C = dyn_cast<ConstantFP>(Val: Call->getArgOperand(i: 0));
4672 ConstantFP *Op1C = dyn_cast<ConstantFP>(Val: Call->getArgOperand(i: 1));
4673 if (Op0C && Op1C) {
4674 const APFloat &Op0 = Op0C->getValueAPF();
4675 const APFloat &Op1 = Op1C->getValueAPF();
4676
4677 switch (Func) {
4678 case LibFunc_powl:
4679 case LibFunc_pow:
4680 case LibFunc_powf: {
4681 // FIXME: Stop using the host math library.
4682 // FIXME: The computation isn't done in the right precision.
4683 Type *Ty = Op0C->getType();
4684 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
4685 if (Ty == Op1C->getType())
4686 return ConstantFoldBinaryFP(NativeFP: pow, V: Op0, W: Op1, Ty) != nullptr;
4687 }
4688 break;
4689 }
4690
4691 case LibFunc_fmodl:
4692 case LibFunc_fmod:
4693 case LibFunc_fmodf:
4694 case LibFunc_remainderl:
4695 case LibFunc_remainder:
4696 case LibFunc_remainderf:
4697 return Op0.isNaN() || Op1.isNaN() ||
4698 (!Op0.isInfinity() && !Op1.isZero());
4699
4700 case LibFunc_atan2:
4701 case LibFunc_atan2f:
4702 case LibFunc_atan2l:
4703 // Although IEEE-754 says atan2(+/-0.0, +/-0.0) are well-defined, and
4704 // GLIBC and MSVC do not appear to raise an error on those, we
4705 // cannot rely on that behavior. POSIX and C11 say that a domain error
4706 // may occur, so allow for that possibility.
4707 return !Op0.isZero() || !Op1.isZero();
4708
4709 default:
4710 break;
4711 }
4712 }
4713 }
4714
4715 return false;
4716}
4717
4718Constant *llvm::getLosslessInvCast(Constant *C, Type *InvCastTo,
4719 unsigned CastOp, const DataLayout &DL,
4720 PreservedCastFlags *Flags) {
4721 switch (CastOp) {
4722 case Instruction::BitCast:
4723 // Bitcast is always lossless.
4724 return ConstantFoldCastOperand(Opcode: Instruction::BitCast, C, DestTy: InvCastTo, DL);
4725 case Instruction::Trunc: {
4726 auto *ZExtC = ConstantFoldCastOperand(Opcode: Instruction::ZExt, C, DestTy: InvCastTo, DL);
4727 if (Flags) {
4728 // Truncation back on ZExt value is always NUW.
4729 Flags->NUW = true;
4730 // Test positivity of C.
4731 auto *SExtC =
4732 ConstantFoldCastOperand(Opcode: Instruction::SExt, C, DestTy: InvCastTo, DL);
4733 Flags->NSW = ZExtC == SExtC;
4734 }
4735 return ZExtC;
4736 }
4737 case Instruction::SExt:
4738 case Instruction::ZExt: {
4739 auto *InvC = ConstantExpr::getTrunc(C, Ty: InvCastTo);
4740 auto *CastInvC = ConstantFoldCastOperand(Opcode: CastOp, C: InvC, DestTy: C->getType(), DL);
4741 // Must satisfy CastOp(InvC) == C.
4742 if (!CastInvC || CastInvC != C)
4743 return nullptr;
4744 if (Flags && CastOp == Instruction::ZExt) {
4745 auto *SExtInvC =
4746 ConstantFoldCastOperand(Opcode: Instruction::SExt, C: InvC, DestTy: C->getType(), DL);
4747 // Test positivity of InvC.
4748 Flags->NNeg = CastInvC == SExtInvC;
4749 }
4750 return InvC;
4751 }
4752 case Instruction::FPExt: {
4753 Constant *InvC =
4754 ConstantFoldCastOperand(Opcode: Instruction::FPTrunc, C, DestTy: InvCastTo, DL);
4755 if (InvC) {
4756 Constant *CastInvC =
4757 ConstantFoldCastOperand(Opcode: CastOp, C: InvC, DestTy: C->getType(), DL);
4758 if (CastInvC == C)
4759 return InvC;
4760 }
4761 return nullptr;
4762 }
4763 default:
4764 return nullptr;
4765 }
4766}
4767
4768Constant *llvm::getLosslessUnsignedTrunc(Constant *C, Type *DestTy,
4769 const DataLayout &DL,
4770 PreservedCastFlags *Flags) {
4771 return getLosslessInvCast(C, InvCastTo: DestTy, CastOp: Instruction::ZExt, DL, Flags);
4772}
4773
4774Constant *llvm::getLosslessSignedTrunc(Constant *C, Type *DestTy,
4775 const DataLayout &DL,
4776 PreservedCastFlags *Flags) {
4777 return getLosslessInvCast(C, InvCastTo: DestTy, CastOp: Instruction::SExt, DL, Flags);
4778}
4779
4780void TargetFolder::anchor() {}
4781