1//===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines routines for folding instructions into constants.
10//
11// Also, to supplement the basic IR ConstantExpr simplifications,
12// this file defines some additional folding routines that can make use of
13// DataLayout information. These functions cannot go in IR due to library
14// dependency issues.
15//
16//===----------------------------------------------------------------------===//
17
18#include "llvm/Analysis/ConstantFolding.h"
19#include "llvm/ADT/APFloat.h"
20#include "llvm/ADT/APInt.h"
21#include "llvm/ADT/APSInt.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/DenseMap.h"
24#include "llvm/ADT/STLExtras.h"
25#include "llvm/ADT/SmallVector.h"
26#include "llvm/ADT/StringRef.h"
27#include "llvm/Analysis/TargetFolder.h"
28#include "llvm/Analysis/TargetLibraryInfo.h"
29#include "llvm/Analysis/ValueTracking.h"
30#include "llvm/Analysis/VectorUtils.h"
31#include "llvm/Config/config.h"
32#include "llvm/IR/Constant.h"
33#include "llvm/IR/ConstantFold.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/DerivedTypes.h"
37#include "llvm/IR/Function.h"
38#include "llvm/IR/GlobalValue.h"
39#include "llvm/IR/GlobalVariable.h"
40#include "llvm/IR/InstrTypes.h"
41#include "llvm/IR/Instruction.h"
42#include "llvm/IR/Instructions.h"
43#include "llvm/IR/IntrinsicInst.h"
44#include "llvm/IR/Intrinsics.h"
45#include "llvm/IR/IntrinsicsAArch64.h"
46#include "llvm/IR/IntrinsicsAMDGPU.h"
47#include "llvm/IR/IntrinsicsARM.h"
48#include "llvm/IR/IntrinsicsNVPTX.h"
49#include "llvm/IR/IntrinsicsWebAssembly.h"
50#include "llvm/IR/IntrinsicsX86.h"
51#include "llvm/IR/NVVMIntrinsicUtils.h"
52#include "llvm/IR/Operator.h"
53#include "llvm/IR/Type.h"
54#include "llvm/IR/Value.h"
55#include "llvm/Support/Casting.h"
56#include "llvm/Support/ErrorHandling.h"
57#include "llvm/Support/KnownBits.h"
58#include "llvm/Support/MathExtras.h"
59#include <cassert>
60#include <cerrno>
61#include <cfenv>
62#include <cmath>
63#include <cstdint>
64
65using namespace llvm;
66
67static cl::opt<bool> DisableFPCallFolding(
68 "disable-fp-call-folding",
69 cl::desc("Disable constant-folding of FP intrinsics and libcalls."),
70 cl::init(Val: false), cl::Hidden);
71
72namespace {
73
74//===----------------------------------------------------------------------===//
75// Constant Folding internal helper functions
76//===----------------------------------------------------------------------===//
77
78static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
79 Constant *C, Type *SrcEltTy,
80 unsigned NumSrcElts,
81 const DataLayout &DL) {
82 // Now that we know that the input value is a vector of integers, just shift
83 // and insert them into our result.
84 unsigned BitShift = DL.getTypeSizeInBits(Ty: SrcEltTy);
85 for (unsigned i = 0; i != NumSrcElts; ++i) {
86 Constant *Element;
87 if (DL.isLittleEndian())
88 Element = C->getAggregateElement(Elt: NumSrcElts - i - 1);
89 else
90 Element = C->getAggregateElement(Elt: i);
91
92 if (isa_and_nonnull<UndefValue>(Val: Element)) {
93 Result <<= BitShift;
94 continue;
95 }
96
97 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Val: Element);
98 if (!ElementCI)
99 return ConstantExpr::getBitCast(C, Ty: DestTy);
100
101 Result <<= BitShift;
102 Result |= ElementCI->getValue().zext(width: Result.getBitWidth());
103 }
104
105 return nullptr;
106}
107
108/// Constant fold bitcast, symbolically evaluating it with DataLayout.
109/// This always returns a non-null constant, but it may be a
110/// ConstantExpr if unfoldable.
111Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
112 assert(CastInst::castIsValid(Instruction::BitCast, C, DestTy) &&
113 "Invalid constantexpr bitcast!");
114
115 // Catch the obvious splat cases.
116 if (Constant *Res = ConstantFoldLoadFromUniformValue(C, Ty: DestTy, DL))
117 return Res;
118
119 if (auto *VTy = dyn_cast<VectorType>(Val: C->getType())) {
120 // Handle a vector->scalar integer/fp cast.
121 if (isa<IntegerType>(Val: DestTy) || DestTy->isFloatingPointTy()) {
122 unsigned NumSrcElts = cast<FixedVectorType>(Val: VTy)->getNumElements();
123 Type *SrcEltTy = VTy->getElementType();
124
125 // If the vector is a vector of floating point, convert it to vector of int
126 // to simplify things.
127 if (SrcEltTy->isFloatingPointTy()) {
128 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
129 auto *SrcIVTy = FixedVectorType::get(
130 ElementType: IntegerType::get(C&: C->getContext(), NumBits: FPWidth), NumElts: NumSrcElts);
131 // Ask IR to do the conversion now that #elts line up.
132 C = ConstantExpr::getBitCast(C, Ty: SrcIVTy);
133 }
134
135 APInt Result(DL.getTypeSizeInBits(Ty: DestTy), 0);
136 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
137 SrcEltTy, NumSrcElts, DL))
138 return CE;
139
140 if (isa<IntegerType>(Val: DestTy))
141 return ConstantInt::get(Ty: DestTy, V: Result);
142
143 APFloat FP(DestTy->getFltSemantics(), Result);
144 return ConstantFP::get(Context&: DestTy->getContext(), V: FP);
145 }
146 }
147
148 // The code below only handles casts to vectors currently.
149 auto *DestVTy = dyn_cast<VectorType>(Val: DestTy);
150 if (!DestVTy)
151 return ConstantExpr::getBitCast(C, Ty: DestTy);
152
153 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
154 // vector so the code below can handle it uniformly.
155 if (!isa<VectorType>(Val: C->getType()) &&
156 (isa<ConstantFP>(Val: C) || isa<ConstantInt>(Val: C))) {
157 Constant *Ops = C; // don't take the address of C!
158 return FoldBitCast(C: ConstantVector::get(V: Ops), DestTy, DL);
159 }
160
161 // Some of what follows may extend to cover scalable vectors but the current
162 // implementation is fixed length specific.
163 if (!isa<FixedVectorType>(Val: C->getType()))
164 return ConstantExpr::getBitCast(C, Ty: DestTy);
165
166 // If this is a bitcast from constant vector -> vector, fold it.
167 if (!isa<ConstantDataVector>(Val: C) && !isa<ConstantVector>(Val: C) &&
168 !isa<ConstantInt>(Val: C) && !isa<ConstantFP>(Val: C))
169 return ConstantExpr::getBitCast(C, Ty: DestTy);
170
171 // If the element types match, IR can fold it.
172 unsigned NumDstElt = cast<FixedVectorType>(Val: DestVTy)->getNumElements();
173 unsigned NumSrcElt = cast<FixedVectorType>(Val: C->getType())->getNumElements();
174 if (NumDstElt == NumSrcElt)
175 return ConstantExpr::getBitCast(C, Ty: DestTy);
176
177 Type *SrcEltTy = cast<VectorType>(Val: C->getType())->getElementType();
178 Type *DstEltTy = DestVTy->getElementType();
179
180 // Otherwise, we're changing the number of elements in a vector, which
181 // requires endianness information to do the right thing. For example,
182 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
183 // folds to (little endian):
184 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
185 // and to (big endian):
186 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
187
188 // First thing is first. We only want to think about integer here, so if
189 // we have something in FP form, recast it as integer.
190 if (DstEltTy->isFloatingPointTy()) {
191 // Fold to an vector of integers with same size as our FP type.
192 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
193 auto *DestIVTy = FixedVectorType::get(
194 ElementType: IntegerType::get(C&: C->getContext(), NumBits: FPWidth), NumElts: NumDstElt);
195 // Recursively handle this integer conversion, if possible.
196 C = FoldBitCast(C, DestTy: DestIVTy, DL);
197
198 // Finally, IR can handle this now that #elts line up.
199 return ConstantExpr::getBitCast(C, Ty: DestTy);
200 }
201
202 // Okay, we know the destination is integer, if the input is FP, convert
203 // it to integer first.
204 if (SrcEltTy->isFloatingPointTy()) {
205 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
206 auto *SrcIVTy = FixedVectorType::get(
207 ElementType: IntegerType::get(C&: C->getContext(), NumBits: FPWidth), NumElts: NumSrcElt);
208 // Ask IR to do the conversion now that #elts line up.
209 C = ConstantExpr::getBitCast(C, Ty: SrcIVTy);
210 assert((isa<ConstantVector>(C) || // FIXME: Remove ConstantVector.
211 isa<ConstantDataVector>(C) || isa<ConstantInt>(C)) &&
212 "Constant folding cannot fail for plain fp->int bitcast!");
213 }
214
215 // Now we know that the input and output vectors are both integer vectors
216 // of the same size, and that their #elements is not the same. Do the
217 // conversion here, which depends on whether the input or output has
218 // more elements.
219 bool isLittleEndian = DL.isLittleEndian();
220
221 SmallVector<Constant*, 32> Result;
222 if (NumDstElt < NumSrcElt) {
223 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
224 Constant *Zero = Constant::getNullValue(Ty: DstEltTy);
225 unsigned Ratio = NumSrcElt/NumDstElt;
226 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
227 unsigned SrcElt = 0;
228 for (unsigned i = 0; i != NumDstElt; ++i) {
229 // Build each element of the result.
230 Constant *Elt = Zero;
231 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
232 for (unsigned j = 0; j != Ratio; ++j) {
233 Constant *Src = C->getAggregateElement(Elt: SrcElt++);
234 if (isa_and_nonnull<UndefValue>(Val: Src))
235 Src = Constant::getNullValue(
236 Ty: cast<VectorType>(Val: C->getType())->getElementType());
237 else
238 Src = dyn_cast_or_null<ConstantInt>(Val: Src);
239 if (!Src) // Reject constantexpr elements.
240 return ConstantExpr::getBitCast(C, Ty: DestTy);
241
242 // Zero extend the element to the right size.
243 Src = ConstantFoldCastOperand(Opcode: Instruction::ZExt, C: Src, DestTy: Elt->getType(),
244 DL);
245 assert(Src && "Constant folding cannot fail on plain integers");
246
247 // Shift it to the right place, depending on endianness.
248 Src = ConstantFoldBinaryOpOperands(
249 Opcode: Instruction::Shl, LHS: Src, RHS: ConstantInt::get(Ty: Src->getType(), V: ShiftAmt),
250 DL);
251 assert(Src && "Constant folding cannot fail on plain integers");
252
253 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
254
255 // Mix it in.
256 Elt = ConstantFoldBinaryOpOperands(Opcode: Instruction::Or, LHS: Elt, RHS: Src, DL);
257 assert(Elt && "Constant folding cannot fail on plain integers");
258 }
259 Result.push_back(Elt);
260 }
261 return ConstantVector::get(V: Result);
262 }
263
264 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
265 unsigned Ratio = NumDstElt/NumSrcElt;
266 unsigned DstBitSize = DL.getTypeSizeInBits(Ty: DstEltTy);
267
268 // Loop over each source value, expanding into multiple results.
269 for (unsigned i = 0; i != NumSrcElt; ++i) {
270 auto *Element = C->getAggregateElement(Elt: i);
271
272 if (!Element) // Reject constantexpr elements.
273 return ConstantExpr::getBitCast(C, Ty: DestTy);
274
275 if (isa<UndefValue>(Val: Element)) {
276 // Correctly Propagate undef values.
277 Result.append(NumInputs: Ratio, Elt: UndefValue::get(T: DstEltTy));
278 continue;
279 }
280
281 auto *Src = dyn_cast<ConstantInt>(Val: Element);
282 if (!Src)
283 return ConstantExpr::getBitCast(C, Ty: DestTy);
284
285 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
286 for (unsigned j = 0; j != Ratio; ++j) {
287 // Shift the piece of the value into the right place, depending on
288 // endianness.
289 APInt Elt = Src->getValue().lshr(shiftAmt: ShiftAmt);
290 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
291
292 // Truncate and remember this piece.
293 Result.push_back(Elt: ConstantInt::get(Ty: DstEltTy, V: Elt.trunc(width: DstBitSize)));
294 }
295 }
296
297 return ConstantVector::get(V: Result);
298}
299
300} // end anonymous namespace
301
302/// If this constant is a constant offset from a global, return the global and
303/// the constant. Because of constantexprs, this function is recursive.
304bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
305 APInt &Offset, const DataLayout &DL,
306 DSOLocalEquivalent **DSOEquiv) {
307 if (DSOEquiv)
308 *DSOEquiv = nullptr;
309
310 // Trivial case, constant is the global.
311 if ((GV = dyn_cast<GlobalValue>(Val: C))) {
312 unsigned BitWidth = DL.getIndexTypeSizeInBits(Ty: GV->getType());
313 Offset = APInt(BitWidth, 0);
314 return true;
315 }
316
317 if (auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(Val: C)) {
318 if (DSOEquiv)
319 *DSOEquiv = FoundDSOEquiv;
320 GV = FoundDSOEquiv->getGlobalValue();
321 unsigned BitWidth = DL.getIndexTypeSizeInBits(Ty: GV->getType());
322 Offset = APInt(BitWidth, 0);
323 return true;
324 }
325
326 // Otherwise, if this isn't a constant expr, bail out.
327 auto *CE = dyn_cast<ConstantExpr>(Val: C);
328 if (!CE) return false;
329
330 // Look through ptr->int and ptr->ptr casts.
331 if (CE->getOpcode() == Instruction::PtrToInt ||
332 CE->getOpcode() == Instruction::BitCast)
333 return IsConstantOffsetFromGlobal(C: CE->getOperand(i_nocapture: 0), GV, Offset, DL,
334 DSOEquiv);
335
336 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
337 auto *GEP = dyn_cast<GEPOperator>(Val: CE);
338 if (!GEP)
339 return false;
340
341 unsigned BitWidth = DL.getIndexTypeSizeInBits(Ty: GEP->getType());
342 APInt TmpOffset(BitWidth, 0);
343
344 // If the base isn't a global+constant, we aren't either.
345 if (!IsConstantOffsetFromGlobal(C: CE->getOperand(i_nocapture: 0), GV, Offset&: TmpOffset, DL,
346 DSOEquiv))
347 return false;
348
349 // Otherwise, add any offset that our operands provide.
350 if (!GEP->accumulateConstantOffset(DL, Offset&: TmpOffset))
351 return false;
352
353 Offset = TmpOffset;
354 return true;
355}
356
357Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
358 const DataLayout &DL) {
359 do {
360 Type *SrcTy = C->getType();
361 if (SrcTy == DestTy)
362 return C;
363
364 TypeSize DestSize = DL.getTypeSizeInBits(Ty: DestTy);
365 TypeSize SrcSize = DL.getTypeSizeInBits(Ty: SrcTy);
366 if (!TypeSize::isKnownGE(LHS: SrcSize, RHS: DestSize))
367 return nullptr;
368
369 // Catch the obvious splat cases (since all-zeros can coerce non-integral
370 // pointers legally).
371 if (Constant *Res = ConstantFoldLoadFromUniformValue(C, Ty: DestTy, DL))
372 return Res;
373
374 // If the type sizes are the same and a cast is legal, just directly
375 // cast the constant.
376 // But be careful not to coerce non-integral pointers illegally.
377 if (SrcSize == DestSize &&
378 DL.isNonIntegralPointerType(Ty: SrcTy->getScalarType()) ==
379 DL.isNonIntegralPointerType(Ty: DestTy->getScalarType())) {
380 Instruction::CastOps Cast = Instruction::BitCast;
381 // If we are going from a pointer to int or vice versa, we spell the cast
382 // differently.
383 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
384 Cast = Instruction::IntToPtr;
385 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
386 Cast = Instruction::PtrToInt;
387
388 if (CastInst::castIsValid(op: Cast, S: C, DstTy: DestTy))
389 return ConstantFoldCastOperand(Opcode: Cast, C, DestTy, DL);
390 }
391
392 // If this isn't an aggregate type, there is nothing we can do to drill down
393 // and find a bitcastable constant.
394 if (!SrcTy->isAggregateType() && !SrcTy->isVectorTy())
395 return nullptr;
396
397 // We're simulating a load through a pointer that was bitcast to point to
398 // a different type, so we can try to walk down through the initial
399 // elements of an aggregate to see if some part of the aggregate is
400 // castable to implement the "load" semantic model.
401 if (SrcTy->isStructTy()) {
402 // Struct types might have leading zero-length elements like [0 x i32],
403 // which are certainly not what we are looking for, so skip them.
404 unsigned Elem = 0;
405 Constant *ElemC;
406 do {
407 ElemC = C->getAggregateElement(Elt: Elem++);
408 } while (ElemC && DL.getTypeSizeInBits(Ty: ElemC->getType()).isZero());
409 C = ElemC;
410 } else {
411 // For non-byte-sized vector elements, the first element is not
412 // necessarily located at the vector base address.
413 if (auto *VT = dyn_cast<VectorType>(Val: SrcTy))
414 if (!DL.typeSizeEqualsStoreSize(Ty: VT->getElementType()))
415 return nullptr;
416
417 C = C->getAggregateElement(Elt: 0u);
418 }
419 } while (C);
420
421 return nullptr;
422}
423
424namespace {
425
426/// Recursive helper to read bits out of global. C is the constant being copied
427/// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
428/// results into and BytesLeft is the number of bytes left in
429/// the CurPtr buffer. DL is the DataLayout.
430bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
431 unsigned BytesLeft, const DataLayout &DL) {
432 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
433 "Out of range access");
434
435 // Reading type padding, return zero.
436 if (ByteOffset >= DL.getTypeStoreSize(Ty: C->getType()))
437 return true;
438
439 // If this element is zero or undefined, we can just return since *CurPtr is
440 // zero initialized.
441 if (isa<ConstantAggregateZero>(Val: C) || isa<UndefValue>(Val: C))
442 return true;
443
444 if (auto *CI = dyn_cast<ConstantInt>(Val: C)) {
445 if ((CI->getBitWidth() & 7) != 0)
446 return false;
447 const APInt &Val = CI->getValue();
448 unsigned IntBytes = unsigned(CI->getBitWidth()/8);
449
450 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
451 unsigned n = ByteOffset;
452 if (!DL.isLittleEndian())
453 n = IntBytes - n - 1;
454 CurPtr[i] = Val.extractBits(numBits: 8, bitPosition: n * 8).getZExtValue();
455 ++ByteOffset;
456 }
457 return true;
458 }
459
460 if (auto *CFP = dyn_cast<ConstantFP>(Val: C)) {
461 if (CFP->getType()->isDoubleTy()) {
462 C = FoldBitCast(C, DestTy: Type::getInt64Ty(C&: C->getContext()), DL);
463 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
464 }
465 if (CFP->getType()->isFloatTy()){
466 C = FoldBitCast(C, DestTy: Type::getInt32Ty(C&: C->getContext()), DL);
467 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
468 }
469 if (CFP->getType()->isHalfTy()){
470 C = FoldBitCast(C, DestTy: Type::getInt16Ty(C&: C->getContext()), DL);
471 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
472 }
473 return false;
474 }
475
476 if (auto *CS = dyn_cast<ConstantStruct>(Val: C)) {
477 const StructLayout *SL = DL.getStructLayout(Ty: CS->getType());
478 unsigned Index = SL->getElementContainingOffset(FixedOffset: ByteOffset);
479 uint64_t CurEltOffset = SL->getElementOffset(Idx: Index);
480 ByteOffset -= CurEltOffset;
481
482 while (true) {
483 // If the element access is to the element itself and not to tail padding,
484 // read the bytes from the element.
485 uint64_t EltSize = DL.getTypeAllocSize(Ty: CS->getOperand(i_nocapture: Index)->getType());
486
487 if (ByteOffset < EltSize &&
488 !ReadDataFromGlobal(C: CS->getOperand(i_nocapture: Index), ByteOffset, CurPtr,
489 BytesLeft, DL))
490 return false;
491
492 ++Index;
493
494 // Check to see if we read from the last struct element, if so we're done.
495 if (Index == CS->getType()->getNumElements())
496 return true;
497
498 // If we read all of the bytes we needed from this element we're done.
499 uint64_t NextEltOffset = SL->getElementOffset(Idx: Index);
500
501 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
502 return true;
503
504 // Move to the next element of the struct.
505 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
506 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
507 ByteOffset = 0;
508 CurEltOffset = NextEltOffset;
509 }
510 // not reached.
511 }
512
513 if (isa<ConstantArray>(Val: C) || isa<ConstantVector>(Val: C) ||
514 isa<ConstantDataSequential>(Val: C)) {
515 uint64_t NumElts, EltSize;
516 Type *EltTy;
517 if (auto *AT = dyn_cast<ArrayType>(Val: C->getType())) {
518 NumElts = AT->getNumElements();
519 EltTy = AT->getElementType();
520 EltSize = DL.getTypeAllocSize(Ty: EltTy);
521 } else {
522 NumElts = cast<FixedVectorType>(Val: C->getType())->getNumElements();
523 EltTy = cast<FixedVectorType>(Val: C->getType())->getElementType();
524 // TODO: For non-byte-sized vectors, current implementation assumes there is
525 // padding to the next byte boundary between elements.
526 if (!DL.typeSizeEqualsStoreSize(Ty: EltTy))
527 return false;
528
529 EltSize = DL.getTypeStoreSize(Ty: EltTy);
530 }
531 uint64_t Index = ByteOffset / EltSize;
532 uint64_t Offset = ByteOffset - Index * EltSize;
533
534 for (; Index != NumElts; ++Index) {
535 if (!ReadDataFromGlobal(C: C->getAggregateElement(Elt: Index), ByteOffset: Offset, CurPtr,
536 BytesLeft, DL))
537 return false;
538
539 uint64_t BytesWritten = EltSize - Offset;
540 assert(BytesWritten <= EltSize && "Not indexing into this element?");
541 if (BytesWritten >= BytesLeft)
542 return true;
543
544 Offset = 0;
545 BytesLeft -= BytesWritten;
546 CurPtr += BytesWritten;
547 }
548 return true;
549 }
550
551 if (auto *CE = dyn_cast<ConstantExpr>(Val: C)) {
552 if (CE->getOpcode() == Instruction::IntToPtr &&
553 CE->getOperand(i_nocapture: 0)->getType() == DL.getIntPtrType(CE->getType())) {
554 return ReadDataFromGlobal(C: CE->getOperand(i_nocapture: 0), ByteOffset, CurPtr,
555 BytesLeft, DL);
556 }
557 }
558
559 // Otherwise, unknown initializer type.
560 return false;
561}
562
563Constant *FoldReinterpretLoadFromConst(Constant *C, Type *LoadTy,
564 int64_t Offset, const DataLayout &DL) {
565 // Bail out early. Not expect to load from scalable global variable.
566 if (isa<ScalableVectorType>(Val: LoadTy))
567 return nullptr;
568
569 auto *IntType = dyn_cast<IntegerType>(Val: LoadTy);
570
571 // If this isn't an integer load we can't fold it directly.
572 if (!IntType) {
573 // If this is a non-integer load, we can try folding it as an int load and
574 // then bitcast the result. This can be useful for union cases. Note
575 // that address spaces don't matter here since we're not going to result in
576 // an actual new load.
577 if (!LoadTy->isFloatingPointTy() && !LoadTy->isPointerTy() &&
578 !LoadTy->isVectorTy())
579 return nullptr;
580
581 Type *MapTy = Type::getIntNTy(C&: C->getContext(),
582 N: DL.getTypeSizeInBits(Ty: LoadTy).getFixedValue());
583 if (Constant *Res = FoldReinterpretLoadFromConst(C, LoadTy: MapTy, Offset, DL)) {
584 if (Res->isNullValue() && !LoadTy->isX86_AMXTy())
585 // Materializing a zero can be done trivially without a bitcast
586 return Constant::getNullValue(Ty: LoadTy);
587 Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy;
588 Res = FoldBitCast(C: Res, DestTy: CastTy, DL);
589 if (LoadTy->isPtrOrPtrVectorTy()) {
590 // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr
591 if (Res->isNullValue() && !LoadTy->isX86_AMXTy())
592 return Constant::getNullValue(Ty: LoadTy);
593 if (DL.isNonIntegralPointerType(Ty: LoadTy->getScalarType()))
594 // Be careful not to replace a load of an addrspace value with an inttoptr here
595 return nullptr;
596 Res = ConstantExpr::getIntToPtr(C: Res, Ty: LoadTy);
597 }
598 return Res;
599 }
600 return nullptr;
601 }
602
603 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
604 if (BytesLoaded > 32 || BytesLoaded == 0)
605 return nullptr;
606
607 // If we're not accessing anything in this constant, the result is undefined.
608 if (Offset <= -1 * static_cast<int64_t>(BytesLoaded))
609 return PoisonValue::get(T: IntType);
610
611 // TODO: We should be able to support scalable types.
612 TypeSize InitializerSize = DL.getTypeAllocSize(Ty: C->getType());
613 if (InitializerSize.isScalable())
614 return nullptr;
615
616 // If we're not accessing anything in this constant, the result is undefined.
617 if (Offset >= (int64_t)InitializerSize.getFixedValue())
618 return PoisonValue::get(T: IntType);
619
620 unsigned char RawBytes[32] = {0};
621 unsigned char *CurPtr = RawBytes;
622 unsigned BytesLeft = BytesLoaded;
623
624 // If we're loading off the beginning of the global, some bytes may be valid.
625 if (Offset < 0) {
626 CurPtr += -Offset;
627 BytesLeft += Offset;
628 Offset = 0;
629 }
630
631 if (!ReadDataFromGlobal(C, ByteOffset: Offset, CurPtr, BytesLeft, DL))
632 return nullptr;
633
634 APInt ResultVal = APInt(IntType->getBitWidth(), 0);
635 if (DL.isLittleEndian()) {
636 ResultVal = RawBytes[BytesLoaded - 1];
637 for (unsigned i = 1; i != BytesLoaded; ++i) {
638 ResultVal <<= 8;
639 ResultVal |= RawBytes[BytesLoaded - 1 - i];
640 }
641 } else {
642 ResultVal = RawBytes[0];
643 for (unsigned i = 1; i != BytesLoaded; ++i) {
644 ResultVal <<= 8;
645 ResultVal |= RawBytes[i];
646 }
647 }
648
649 return ConstantInt::get(Context&: IntType->getContext(), V: ResultVal);
650}
651
652} // anonymous namespace
653
654// If GV is a constant with an initializer read its representation starting
655// at Offset and return it as a constant array of unsigned char. Otherwise
656// return null.
657Constant *llvm::ReadByteArrayFromGlobal(const GlobalVariable *GV,
658 uint64_t Offset) {
659 if (!GV->isConstant() || !GV->hasDefinitiveInitializer())
660 return nullptr;
661
662 const DataLayout &DL = GV->getDataLayout();
663 Constant *Init = const_cast<Constant *>(GV->getInitializer());
664 TypeSize InitSize = DL.getTypeAllocSize(Ty: Init->getType());
665 if (InitSize < Offset)
666 return nullptr;
667
668 uint64_t NBytes = InitSize - Offset;
669 if (NBytes > UINT16_MAX)
670 // Bail for large initializers in excess of 64K to avoid allocating
671 // too much memory.
672 // Offset is assumed to be less than or equal than InitSize (this
673 // is enforced in ReadDataFromGlobal).
674 return nullptr;
675
676 SmallVector<unsigned char, 256> RawBytes(static_cast<size_t>(NBytes));
677 unsigned char *CurPtr = RawBytes.data();
678
679 if (!ReadDataFromGlobal(C: Init, ByteOffset: Offset, CurPtr, BytesLeft: NBytes, DL))
680 return nullptr;
681
682 return ConstantDataArray::get(Context&: GV->getContext(), Elts&: RawBytes);
683}
684
685/// If this Offset points exactly to the start of an aggregate element, return
686/// that element, otherwise return nullptr.
687Constant *getConstantAtOffset(Constant *Base, APInt Offset,
688 const DataLayout &DL) {
689 if (Offset.isZero())
690 return Base;
691
692 if (!isa<ConstantAggregate>(Val: Base) && !isa<ConstantDataSequential>(Val: Base))
693 return nullptr;
694
695 Type *ElemTy = Base->getType();
696 SmallVector<APInt> Indices = DL.getGEPIndicesForOffset(ElemTy, Offset);
697 if (!Offset.isZero() || !Indices[0].isZero())
698 return nullptr;
699
700 Constant *C = Base;
701 for (const APInt &Index : drop_begin(RangeOrContainer&: Indices)) {
702 if (Index.isNegative() || Index.getActiveBits() >= 32)
703 return nullptr;
704
705 C = C->getAggregateElement(Elt: Index.getZExtValue());
706 if (!C)
707 return nullptr;
708 }
709
710 return C;
711}
712
713Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty,
714 const APInt &Offset,
715 const DataLayout &DL) {
716 if (Constant *AtOffset = getConstantAtOffset(Base: C, Offset, DL))
717 if (Constant *Result = ConstantFoldLoadThroughBitcast(C: AtOffset, DestTy: Ty, DL))
718 return Result;
719
720 // Explicitly check for out-of-bounds access, so we return poison even if the
721 // constant is a uniform value.
722 TypeSize Size = DL.getTypeAllocSize(Ty: C->getType());
723 if (!Size.isScalable() && Offset.sge(RHS: Size.getFixedValue()))
724 return PoisonValue::get(T: Ty);
725
726 // Try an offset-independent fold of a uniform value.
727 if (Constant *Result = ConstantFoldLoadFromUniformValue(C, Ty, DL))
728 return Result;
729
730 // Try hard to fold loads from bitcasted strange and non-type-safe things.
731 if (Offset.getSignificantBits() <= 64)
732 if (Constant *Result =
733 FoldReinterpretLoadFromConst(C, LoadTy: Ty, Offset: Offset.getSExtValue(), DL))
734 return Result;
735
736 return nullptr;
737}
738
739Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty,
740 const DataLayout &DL) {
741 return ConstantFoldLoadFromConst(C, Ty, Offset: APInt(64, 0), DL);
742}
743
744Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
745 APInt Offset,
746 const DataLayout &DL) {
747 // We can only fold loads from constant globals with a definitive initializer.
748 // Check this upfront, to skip expensive offset calculations.
749 auto *GV = dyn_cast<GlobalVariable>(Val: getUnderlyingObject(V: C));
750 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
751 return nullptr;
752
753 C = cast<Constant>(Val: C->stripAndAccumulateConstantOffsets(
754 DL, Offset, /* AllowNonInbounds */ true));
755
756 if (C == GV)
757 if (Constant *Result = ConstantFoldLoadFromConst(C: GV->getInitializer(), Ty,
758 Offset, DL))
759 return Result;
760
761 // If this load comes from anywhere in a uniform constant global, the value
762 // is always the same, regardless of the loaded offset.
763 return ConstantFoldLoadFromUniformValue(C: GV->getInitializer(), Ty, DL);
764}
765
766Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
767 const DataLayout &DL) {
768 APInt Offset(DL.getIndexTypeSizeInBits(Ty: C->getType()), 0);
769 return ConstantFoldLoadFromConstPtr(C, Ty, Offset: std::move(Offset), DL);
770}
771
772Constant *llvm::ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty,
773 const DataLayout &DL) {
774 if (isa<PoisonValue>(Val: C))
775 return PoisonValue::get(T: Ty);
776 if (isa<UndefValue>(Val: C))
777 return UndefValue::get(T: Ty);
778 // If padding is needed when storing C to memory, then it isn't considered as
779 // uniform.
780 if (!DL.typeSizeEqualsStoreSize(Ty: C->getType()))
781 return nullptr;
782 if (C->isNullValue() && !Ty->isX86_AMXTy())
783 return Constant::getNullValue(Ty);
784 if (C->isAllOnesValue() &&
785 (Ty->isIntOrIntVectorTy() || Ty->isFPOrFPVectorTy()))
786 return Constant::getAllOnesValue(Ty);
787 return nullptr;
788}
789
790namespace {
791
792/// One of Op0/Op1 is a constant expression.
793/// Attempt to symbolically evaluate the result of a binary operator merging
794/// these together. If target data info is available, it is provided as DL,
795/// otherwise DL is null.
796Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
797 const DataLayout &DL) {
798 // SROA
799
800 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
801 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
802 // bits.
803
804 if (Opc == Instruction::And) {
805 KnownBits Known0 = computeKnownBits(V: Op0, DL);
806 KnownBits Known1 = computeKnownBits(V: Op1, DL);
807 if ((Known1.One | Known0.Zero).isAllOnes()) {
808 // All the bits of Op0 that the 'and' could be masking are already zero.
809 return Op0;
810 }
811 if ((Known0.One | Known1.Zero).isAllOnes()) {
812 // All the bits of Op1 that the 'and' could be masking are already zero.
813 return Op1;
814 }
815
816 Known0 &= Known1;
817 if (Known0.isConstant())
818 return ConstantInt::get(Ty: Op0->getType(), V: Known0.getConstant());
819 }
820
821 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
822 // constant. This happens frequently when iterating over a global array.
823 if (Opc == Instruction::Sub) {
824 GlobalValue *GV1, *GV2;
825 APInt Offs1, Offs2;
826
827 if (IsConstantOffsetFromGlobal(C: Op0, GV&: GV1, Offset&: Offs1, DL))
828 if (IsConstantOffsetFromGlobal(C: Op1, GV&: GV2, Offset&: Offs2, DL) && GV1 == GV2) {
829 unsigned OpSize = DL.getTypeSizeInBits(Ty: Op0->getType());
830
831 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
832 // PtrToInt may change the bitwidth so we have convert to the right size
833 // first.
834 return ConstantInt::get(Ty: Op0->getType(), V: Offs1.zextOrTrunc(width: OpSize) -
835 Offs2.zextOrTrunc(width: OpSize));
836 }
837 }
838
839 return nullptr;
840}
841
842/// If array indices are not pointer-sized integers, explicitly cast them so
843/// that they aren't implicitly casted by the getelementptr.
844Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
845 Type *ResultTy, GEPNoWrapFlags NW,
846 std::optional<ConstantRange> InRange,
847 const DataLayout &DL, const TargetLibraryInfo *TLI) {
848 Type *IntIdxTy = DL.getIndexType(PtrTy: ResultTy);
849 Type *IntIdxScalarTy = IntIdxTy->getScalarType();
850
851 bool Any = false;
852 SmallVector<Constant*, 32> NewIdxs;
853 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
854 if ((i == 1 ||
855 !isa<StructType>(Val: GetElementPtrInst::getIndexedType(
856 Ty: SrcElemTy, IdxList: Ops.slice(N: 1, M: i - 1)))) &&
857 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
858 Any = true;
859 Type *NewType =
860 Ops[i]->getType()->isVectorTy() ? IntIdxTy : IntIdxScalarTy;
861 Constant *NewIdx = ConstantFoldCastOperand(
862 Opcode: CastInst::getCastOpcode(Val: Ops[i], SrcIsSigned: true, Ty: NewType, DstIsSigned: true), C: Ops[i], DestTy: NewType,
863 DL);
864 if (!NewIdx)
865 return nullptr;
866 NewIdxs.push_back(Elt: NewIdx);
867 } else
868 NewIdxs.push_back(Elt: Ops[i]);
869 }
870
871 if (!Any)
872 return nullptr;
873
874 Constant *C =
875 ConstantExpr::getGetElementPtr(Ty: SrcElemTy, C: Ops[0], IdxList: NewIdxs, NW, InRange);
876 return ConstantFoldConstant(C, DL, TLI);
877}
878
879/// If we can symbolically evaluate the GEP constant expression, do so.
880Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
881 ArrayRef<Constant *> Ops,
882 const DataLayout &DL,
883 const TargetLibraryInfo *TLI) {
884 Type *SrcElemTy = GEP->getSourceElementType();
885 Type *ResTy = GEP->getType();
886 if (!SrcElemTy->isSized() || isa<ScalableVectorType>(Val: SrcElemTy))
887 return nullptr;
888
889 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResultTy: ResTy, NW: GEP->getNoWrapFlags(),
890 InRange: GEP->getInRange(), DL, TLI))
891 return C;
892
893 Constant *Ptr = Ops[0];
894 if (!Ptr->getType()->isPointerTy())
895 return nullptr;
896
897 Type *IntIdxTy = DL.getIndexType(PtrTy: Ptr->getType());
898
899 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
900 if (!isa<ConstantInt>(Val: Ops[i]) || !Ops[i]->getType()->isIntegerTy())
901 return nullptr;
902
903 unsigned BitWidth = DL.getTypeSizeInBits(Ty: IntIdxTy);
904 APInt Offset = APInt(
905 BitWidth,
906 DL.getIndexedOffsetInType(
907 ElemTy: SrcElemTy, Indices: ArrayRef((Value *const *)Ops.data() + 1, Ops.size() - 1)),
908 /*isSigned=*/true, /*implicitTrunc=*/true);
909
910 std::optional<ConstantRange> InRange = GEP->getInRange();
911 if (InRange)
912 InRange = InRange->sextOrTrunc(BitWidth);
913
914 // If this is a GEP of a GEP, fold it all into a single GEP.
915 GEPNoWrapFlags NW = GEP->getNoWrapFlags();
916 bool Overflow = false;
917 while (auto *GEP = dyn_cast<GEPOperator>(Val: Ptr)) {
918 NW &= GEP->getNoWrapFlags();
919
920 SmallVector<Value *, 4> NestedOps(llvm::drop_begin(RangeOrContainer: GEP->operands()));
921
922 // Do not try the incorporate the sub-GEP if some index is not a number.
923 bool AllConstantInt = true;
924 for (Value *NestedOp : NestedOps)
925 if (!isa<ConstantInt>(Val: NestedOp)) {
926 AllConstantInt = false;
927 break;
928 }
929 if (!AllConstantInt)
930 break;
931
932 // TODO: Try to intersect two inrange attributes?
933 if (!InRange) {
934 InRange = GEP->getInRange();
935 if (InRange)
936 // Adjust inrange by offset until now.
937 InRange = InRange->sextOrTrunc(BitWidth).subtract(CI: Offset);
938 }
939
940 Ptr = cast<Constant>(Val: GEP->getOperand(i_nocapture: 0));
941 SrcElemTy = GEP->getSourceElementType();
942 Offset = Offset.sadd_ov(
943 RHS: APInt(BitWidth, DL.getIndexedOffsetInType(ElemTy: SrcElemTy, Indices: NestedOps),
944 /*isSigned=*/true, /*implicitTrunc=*/true),
945 Overflow);
946 }
947
948 // Preserving nusw (without inbounds) also requires that the offset
949 // additions did not overflow.
950 if (NW.hasNoUnsignedSignedWrap() && !NW.isInBounds() && Overflow)
951 NW = NW.withoutNoUnsignedSignedWrap();
952
953 // If the base value for this address is a literal integer value, fold the
954 // getelementptr to the resulting integer value casted to the pointer type.
955 APInt BasePtr(DL.getPointerTypeSizeInBits(Ptr->getType()), 0);
956 if (auto *CE = dyn_cast<ConstantExpr>(Val: Ptr)) {
957 if (CE->getOpcode() == Instruction::IntToPtr) {
958 if (auto *Base = dyn_cast<ConstantInt>(Val: CE->getOperand(i_nocapture: 0)))
959 BasePtr = Base->getValue().zextOrTrunc(width: BasePtr.getBitWidth());
960 }
961 }
962
963 auto *PTy = cast<PointerType>(Val: Ptr->getType());
964 if ((Ptr->isNullValue() || BasePtr != 0) &&
965 !DL.isNonIntegralPointerType(PT: PTy)) {
966 // If the index size is smaller than the pointer size, add to the low
967 // bits only.
968 BasePtr.insertBits(SubBits: BasePtr.trunc(width: BitWidth) + Offset, bitPosition: 0);
969 Constant *C = ConstantInt::get(Context&: Ptr->getContext(), V: BasePtr);
970 return ConstantExpr::getIntToPtr(C, Ty: ResTy);
971 }
972
973 // Try to infer inbounds for GEPs of globals.
974 if (!NW.isInBounds() && Offset.isNonNegative()) {
975 bool CanBeNull, CanBeFreed;
976 uint64_t DerefBytes =
977 Ptr->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
978 if (DerefBytes != 0 && !CanBeNull && Offset.sle(RHS: DerefBytes))
979 NW |= GEPNoWrapFlags::inBounds();
980 }
981
982 // nusw + nneg -> nuw
983 if (NW.hasNoUnsignedSignedWrap() && Offset.isNonNegative())
984 NW |= GEPNoWrapFlags::noUnsignedWrap();
985
986 // Otherwise canonicalize this to a single ptradd.
987 LLVMContext &Ctx = Ptr->getContext();
988 return ConstantExpr::getGetElementPtr(Ty: Type::getInt8Ty(C&: Ctx), C: Ptr,
989 Idx: ConstantInt::get(Context&: Ctx, V: Offset), NW,
990 InRange);
991}
992
993/// Attempt to constant fold an instruction with the
994/// specified opcode and operands. If successful, the constant result is
995/// returned, if not, null is returned. Note that this function can fail when
996/// attempting to fold instructions like loads and stores, which have no
997/// constant expression form.
998Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
999 ArrayRef<Constant *> Ops,
1000 const DataLayout &DL,
1001 const TargetLibraryInfo *TLI,
1002 bool AllowNonDeterministic) {
1003 Type *DestTy = InstOrCE->getType();
1004
1005 if (Instruction::isUnaryOp(Opcode))
1006 return ConstantFoldUnaryOpOperand(Opcode, Op: Ops[0], DL);
1007
1008 if (Instruction::isBinaryOp(Opcode)) {
1009 switch (Opcode) {
1010 default:
1011 break;
1012 case Instruction::FAdd:
1013 case Instruction::FSub:
1014 case Instruction::FMul:
1015 case Instruction::FDiv:
1016 case Instruction::FRem:
1017 // Handle floating point instructions separately to account for denormals
1018 // TODO: If a constant expression is being folded rather than an
1019 // instruction, denormals will not be flushed/treated as zero
1020 if (const auto *I = dyn_cast<Instruction>(Val: InstOrCE)) {
1021 return ConstantFoldFPInstOperands(Opcode, LHS: Ops[0], RHS: Ops[1], DL, I,
1022 AllowNonDeterministic);
1023 }
1024 }
1025 return ConstantFoldBinaryOpOperands(Opcode, LHS: Ops[0], RHS: Ops[1], DL);
1026 }
1027
1028 if (Instruction::isCast(Opcode))
1029 return ConstantFoldCastOperand(Opcode, C: Ops[0], DestTy, DL);
1030
1031 if (auto *GEP = dyn_cast<GEPOperator>(Val: InstOrCE)) {
1032 Type *SrcElemTy = GEP->getSourceElementType();
1033 if (!ConstantExpr::isSupportedGetElementPtr(SrcElemTy))
1034 return nullptr;
1035
1036 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
1037 return C;
1038
1039 return ConstantExpr::getGetElementPtr(Ty: SrcElemTy, C: Ops[0], IdxList: Ops.slice(N: 1),
1040 NW: GEP->getNoWrapFlags(),
1041 InRange: GEP->getInRange());
1042 }
1043
1044 if (auto *CE = dyn_cast<ConstantExpr>(Val: InstOrCE))
1045 return CE->getWithOperands(Ops);
1046
1047 switch (Opcode) {
1048 default: return nullptr;
1049 case Instruction::ICmp:
1050 case Instruction::FCmp: {
1051 auto *C = cast<CmpInst>(Val: InstOrCE);
1052 return ConstantFoldCompareInstOperands(Predicate: C->getPredicate(), LHS: Ops[0], RHS: Ops[1],
1053 DL, TLI, I: C);
1054 }
1055 case Instruction::Freeze:
1056 return isGuaranteedNotToBeUndefOrPoison(V: Ops[0]) ? Ops[0] : nullptr;
1057 case Instruction::Call:
1058 if (auto *F = dyn_cast<Function>(Val: Ops.back())) {
1059 const auto *Call = cast<CallBase>(Val: InstOrCE);
1060 if (canConstantFoldCallTo(Call, F))
1061 return ConstantFoldCall(Call, F, Operands: Ops.slice(N: 0, M: Ops.size() - 1), TLI,
1062 AllowNonDeterministic);
1063 }
1064 return nullptr;
1065 case Instruction::Select:
1066 return ConstantFoldSelectInstruction(Cond: Ops[0], V1: Ops[1], V2: Ops[2]);
1067 case Instruction::ExtractElement:
1068 return ConstantExpr::getExtractElement(Vec: Ops[0], Idx: Ops[1]);
1069 case Instruction::ExtractValue:
1070 return ConstantFoldExtractValueInstruction(
1071 Agg: Ops[0], Idxs: cast<ExtractValueInst>(Val: InstOrCE)->getIndices());
1072 case Instruction::InsertElement:
1073 return ConstantExpr::getInsertElement(Vec: Ops[0], Elt: Ops[1], Idx: Ops[2]);
1074 case Instruction::InsertValue:
1075 return ConstantFoldInsertValueInstruction(
1076 Agg: Ops[0], Val: Ops[1], Idxs: cast<InsertValueInst>(Val: InstOrCE)->getIndices());
1077 case Instruction::ShuffleVector:
1078 return ConstantExpr::getShuffleVector(
1079 V1: Ops[0], V2: Ops[1], Mask: cast<ShuffleVectorInst>(Val: InstOrCE)->getShuffleMask());
1080 case Instruction::Load: {
1081 const auto *LI = dyn_cast<LoadInst>(Val: InstOrCE);
1082 if (LI->isVolatile())
1083 return nullptr;
1084 return ConstantFoldLoadFromConstPtr(C: Ops[0], Ty: LI->getType(), DL);
1085 }
1086 }
1087}
1088
1089} // end anonymous namespace
1090
1091//===----------------------------------------------------------------------===//
1092// Constant Folding public APIs
1093//===----------------------------------------------------------------------===//
1094
1095namespace {
1096
1097Constant *
1098ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
1099 const TargetLibraryInfo *TLI,
1100 SmallDenseMap<Constant *, Constant *> &FoldedOps) {
1101 if (!isa<ConstantVector>(Val: C) && !isa<ConstantExpr>(Val: C))
1102 return const_cast<Constant *>(C);
1103
1104 SmallVector<Constant *, 8> Ops;
1105 for (const Use &OldU : C->operands()) {
1106 Constant *OldC = cast<Constant>(Val: &OldU);
1107 Constant *NewC = OldC;
1108 // Recursively fold the ConstantExpr's operands. If we have already folded
1109 // a ConstantExpr, we don't have to process it again.
1110 if (isa<ConstantVector>(Val: OldC) || isa<ConstantExpr>(Val: OldC)) {
1111 auto It = FoldedOps.find(Val: OldC);
1112 if (It == FoldedOps.end()) {
1113 NewC = ConstantFoldConstantImpl(C: OldC, DL, TLI, FoldedOps);
1114 FoldedOps.insert(KV: {OldC, NewC});
1115 } else {
1116 NewC = It->second;
1117 }
1118 }
1119 Ops.push_back(Elt: NewC);
1120 }
1121
1122 if (auto *CE = dyn_cast<ConstantExpr>(Val: C)) {
1123 if (Constant *Res = ConstantFoldInstOperandsImpl(
1124 InstOrCE: CE, Opcode: CE->getOpcode(), Ops, DL, TLI, /*AllowNonDeterministic=*/true))
1125 return Res;
1126 return const_cast<Constant *>(C);
1127 }
1128
1129 assert(isa<ConstantVector>(C));
1130 return ConstantVector::get(V: Ops);
1131}
1132
1133} // end anonymous namespace
1134
1135Constant *llvm::ConstantFoldInstruction(const Instruction *I,
1136 const DataLayout &DL,
1137 const TargetLibraryInfo *TLI) {
1138 // Handle PHI nodes quickly here...
1139 if (auto *PN = dyn_cast<PHINode>(Val: I)) {
1140 Constant *CommonValue = nullptr;
1141
1142 SmallDenseMap<Constant *, Constant *> FoldedOps;
1143 for (Value *Incoming : PN->incoming_values()) {
1144 // If the incoming value is undef then skip it. Note that while we could
1145 // skip the value if it is equal to the phi node itself we choose not to
1146 // because that would break the rule that constant folding only applies if
1147 // all operands are constants.
1148 if (isa<UndefValue>(Val: Incoming))
1149 continue;
1150 // If the incoming value is not a constant, then give up.
1151 auto *C = dyn_cast<Constant>(Val: Incoming);
1152 if (!C)
1153 return nullptr;
1154 // Fold the PHI's operands.
1155 C = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1156 // If the incoming value is a different constant to
1157 // the one we saw previously, then give up.
1158 if (CommonValue && C != CommonValue)
1159 return nullptr;
1160 CommonValue = C;
1161 }
1162
1163 // If we reach here, all incoming values are the same constant or undef.
1164 return CommonValue ? CommonValue : UndefValue::get(T: PN->getType());
1165 }
1166
1167 // Scan the operand list, checking to see if they are all constants, if so,
1168 // hand off to ConstantFoldInstOperandsImpl.
1169 if (!all_of(Range: I->operands(), P: [](const Use &U) { return isa<Constant>(Val: U); }))
1170 return nullptr;
1171
1172 SmallDenseMap<Constant *, Constant *> FoldedOps;
1173 SmallVector<Constant *, 8> Ops;
1174 for (const Use &OpU : I->operands()) {
1175 auto *Op = cast<Constant>(Val: &OpU);
1176 // Fold the Instruction's operands.
1177 Op = ConstantFoldConstantImpl(C: Op, DL, TLI, FoldedOps);
1178 Ops.push_back(Elt: Op);
1179 }
1180
1181 return ConstantFoldInstOperands(I, Ops, DL, TLI);
1182}
1183
1184Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
1185 const TargetLibraryInfo *TLI) {
1186 SmallDenseMap<Constant *, Constant *> FoldedOps;
1187 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1188}
1189
1190Constant *llvm::ConstantFoldInstOperands(const Instruction *I,
1191 ArrayRef<Constant *> Ops,
1192 const DataLayout &DL,
1193 const TargetLibraryInfo *TLI,
1194 bool AllowNonDeterministic) {
1195 return ConstantFoldInstOperandsImpl(InstOrCE: I, Opcode: I->getOpcode(), Ops, DL, TLI,
1196 AllowNonDeterministic);
1197}
1198
1199Constant *llvm::ConstantFoldCompareInstOperands(
1200 unsigned IntPredicate, Constant *Ops0, Constant *Ops1, const DataLayout &DL,
1201 const TargetLibraryInfo *TLI, const Instruction *I) {
1202 CmpInst::Predicate Predicate = (CmpInst::Predicate)IntPredicate;
1203 // fold: icmp (inttoptr x), null -> icmp x, 0
1204 // fold: icmp null, (inttoptr x) -> icmp 0, x
1205 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1206 // fold: icmp 0, (ptrtoint x) -> icmp null, x
1207 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1208 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1209 //
1210 // FIXME: The following comment is out of data and the DataLayout is here now.
1211 // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1212 // around to know if bit truncation is happening.
1213 if (auto *CE0 = dyn_cast<ConstantExpr>(Val: Ops0)) {
1214 if (Ops1->isNullValue()) {
1215 if (CE0->getOpcode() == Instruction::IntToPtr) {
1216 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1217 // Convert the integer value to the right size to ensure we get the
1218 // proper extension or truncation.
1219 if (Constant *C = ConstantFoldIntegerCast(C: CE0->getOperand(i_nocapture: 0), DestTy: IntPtrTy,
1220 /*IsSigned*/ false, DL)) {
1221 Constant *Null = Constant::getNullValue(Ty: C->getType());
1222 return ConstantFoldCompareInstOperands(IntPredicate: Predicate, Ops0: C, Ops1: Null, DL, TLI);
1223 }
1224 }
1225
1226 // Only do this transformation if the int is intptrty in size, otherwise
1227 // there is a truncation or extension that we aren't modeling.
1228 if (CE0->getOpcode() == Instruction::PtrToInt) {
1229 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(i_nocapture: 0)->getType());
1230 if (CE0->getType() == IntPtrTy) {
1231 Constant *C = CE0->getOperand(i_nocapture: 0);
1232 Constant *Null = Constant::getNullValue(Ty: C->getType());
1233 return ConstantFoldCompareInstOperands(IntPredicate: Predicate, Ops0: C, Ops1: Null, DL, TLI);
1234 }
1235 }
1236 }
1237
1238 if (auto *CE1 = dyn_cast<ConstantExpr>(Val: Ops1)) {
1239 if (CE0->getOpcode() == CE1->getOpcode()) {
1240 if (CE0->getOpcode() == Instruction::IntToPtr) {
1241 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1242
1243 // Convert the integer value to the right size to ensure we get the
1244 // proper extension or truncation.
1245 Constant *C0 = ConstantFoldIntegerCast(C: CE0->getOperand(i_nocapture: 0), DestTy: IntPtrTy,
1246 /*IsSigned*/ false, DL);
1247 Constant *C1 = ConstantFoldIntegerCast(C: CE1->getOperand(i_nocapture: 0), DestTy: IntPtrTy,
1248 /*IsSigned*/ false, DL);
1249 if (C0 && C1)
1250 return ConstantFoldCompareInstOperands(IntPredicate: Predicate, Ops0: C0, Ops1: C1, DL, TLI);
1251 }
1252
1253 // Only do this transformation if the int is intptrty in size, otherwise
1254 // there is a truncation or extension that we aren't modeling.
1255 if (CE0->getOpcode() == Instruction::PtrToInt) {
1256 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(i_nocapture: 0)->getType());
1257 if (CE0->getType() == IntPtrTy &&
1258 CE0->getOperand(i_nocapture: 0)->getType() == CE1->getOperand(i_nocapture: 0)->getType()) {
1259 return ConstantFoldCompareInstOperands(
1260 IntPredicate: Predicate, Ops0: CE0->getOperand(i_nocapture: 0), Ops1: CE1->getOperand(i_nocapture: 0), DL, TLI);
1261 }
1262 }
1263 }
1264 }
1265
1266 // Convert pointer comparison (base+offset1) pred (base+offset2) into
1267 // offset1 pred offset2, for the case where the offset is inbounds. This
1268 // only works for equality and unsigned comparison, as inbounds permits
1269 // crossing the sign boundary. However, the offset comparison itself is
1270 // signed.
1271 if (Ops0->getType()->isPointerTy() && !ICmpInst::isSigned(predicate: Predicate)) {
1272 unsigned IndexWidth = DL.getIndexTypeSizeInBits(Ty: Ops0->getType());
1273 APInt Offset0(IndexWidth, 0);
1274 bool IsEqPred = ICmpInst::isEquality(P: Predicate);
1275 Value *Stripped0 = Ops0->stripAndAccumulateConstantOffsets(
1276 DL, Offset&: Offset0, /*AllowNonInbounds=*/IsEqPred,
1277 /*AllowInvariantGroup=*/false, /*ExternalAnalysis=*/nullptr,
1278 /*LookThroughIntToPtr=*/IsEqPred);
1279 APInt Offset1(IndexWidth, 0);
1280 Value *Stripped1 = Ops1->stripAndAccumulateConstantOffsets(
1281 DL, Offset&: Offset1, /*AllowNonInbounds=*/IsEqPred,
1282 /*AllowInvariantGroup=*/false, /*ExternalAnalysis=*/nullptr,
1283 /*LookThroughIntToPtr=*/IsEqPred);
1284 if (Stripped0 == Stripped1)
1285 return ConstantInt::getBool(
1286 Context&: Ops0->getContext(),
1287 V: ICmpInst::compare(LHS: Offset0, RHS: Offset1,
1288 Pred: ICmpInst::getSignedPredicate(Pred: Predicate)));
1289 }
1290 } else if (isa<ConstantExpr>(Val: Ops1)) {
1291 // If RHS is a constant expression, but the left side isn't, swap the
1292 // operands and try again.
1293 Predicate = ICmpInst::getSwappedPredicate(pred: Predicate);
1294 return ConstantFoldCompareInstOperands(IntPredicate: Predicate, Ops0: Ops1, Ops1: Ops0, DL, TLI);
1295 }
1296
1297 if (CmpInst::isFPPredicate(P: Predicate)) {
1298 // Flush any denormal constant float input according to denormal handling
1299 // mode.
1300 Ops0 = FlushFPConstant(Operand: Ops0, I, /*IsOutput=*/false);
1301 if (!Ops0)
1302 return nullptr;
1303 Ops1 = FlushFPConstant(Operand: Ops1, I, /*IsOutput=*/false);
1304 if (!Ops1)
1305 return nullptr;
1306 }
1307
1308 return ConstantFoldCompareInstruction(Predicate, C1: Ops0, C2: Ops1);
1309}
1310
1311Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op,
1312 const DataLayout &DL) {
1313 assert(Instruction::isUnaryOp(Opcode));
1314
1315 return ConstantFoldUnaryInstruction(Opcode, V: Op);
1316}
1317
1318Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
1319 Constant *RHS,
1320 const DataLayout &DL) {
1321 assert(Instruction::isBinaryOp(Opcode));
1322 if (isa<ConstantExpr>(Val: LHS) || isa<ConstantExpr>(Val: RHS))
1323 if (Constant *C = SymbolicallyEvaluateBinop(Opc: Opcode, Op0: LHS, Op1: RHS, DL))
1324 return C;
1325
1326 if (ConstantExpr::isDesirableBinOp(Opcode))
1327 return ConstantExpr::get(Opcode, C1: LHS, C2: RHS);
1328 return ConstantFoldBinaryInstruction(Opcode, V1: LHS, V2: RHS);
1329}
1330
1331static ConstantFP *flushDenormalConstant(Type *Ty, const APFloat &APF,
1332 DenormalMode::DenormalModeKind Mode) {
1333 switch (Mode) {
1334 case DenormalMode::Dynamic:
1335 return nullptr;
1336 case DenormalMode::IEEE:
1337 return ConstantFP::get(Context&: Ty->getContext(), V: APF);
1338 case DenormalMode::PreserveSign:
1339 return ConstantFP::get(
1340 Context&: Ty->getContext(),
1341 V: APFloat::getZero(Sem: APF.getSemantics(), Negative: APF.isNegative()));
1342 case DenormalMode::PositiveZero:
1343 return ConstantFP::get(Context&: Ty->getContext(),
1344 V: APFloat::getZero(Sem: APF.getSemantics(), Negative: false));
1345 default:
1346 break;
1347 }
1348
1349 llvm_unreachable("unknown denormal mode");
1350}
1351
1352/// Return the denormal mode that can be assumed when executing a floating point
1353/// operation at \p CtxI.
1354static DenormalMode getInstrDenormalMode(const Instruction *CtxI, Type *Ty) {
1355 if (!CtxI || !CtxI->getParent() || !CtxI->getFunction())
1356 return DenormalMode::getDynamic();
1357 return CtxI->getFunction()->getDenormalMode(FPType: Ty->getFltSemantics());
1358}
1359
1360static ConstantFP *flushDenormalConstantFP(ConstantFP *CFP,
1361 const Instruction *Inst,
1362 bool IsOutput) {
1363 const APFloat &APF = CFP->getValueAPF();
1364 if (!APF.isDenormal())
1365 return CFP;
1366
1367 DenormalMode Mode = getInstrDenormalMode(CtxI: Inst, Ty: CFP->getType());
1368 return flushDenormalConstant(Ty: CFP->getType(), APF,
1369 Mode: IsOutput ? Mode.Output : Mode.Input);
1370}
1371
1372Constant *llvm::FlushFPConstant(Constant *Operand, const Instruction *Inst,
1373 bool IsOutput) {
1374 if (ConstantFP *CFP = dyn_cast<ConstantFP>(Val: Operand))
1375 return flushDenormalConstantFP(CFP, Inst, IsOutput);
1376
1377 if (isa<ConstantAggregateZero, UndefValue, ConstantExpr>(Val: Operand))
1378 return Operand;
1379
1380 Type *Ty = Operand->getType();
1381 VectorType *VecTy = dyn_cast<VectorType>(Val: Ty);
1382 if (VecTy) {
1383 if (auto *Splat = dyn_cast_or_null<ConstantFP>(Val: Operand->getSplatValue())) {
1384 ConstantFP *Folded = flushDenormalConstantFP(CFP: Splat, Inst, IsOutput);
1385 if (!Folded)
1386 return nullptr;
1387 return ConstantVector::getSplat(EC: VecTy->getElementCount(), Elt: Folded);
1388 }
1389
1390 Ty = VecTy->getElementType();
1391 }
1392
1393 if (const auto *CV = dyn_cast<ConstantVector>(Val: Operand)) {
1394 SmallVector<Constant *, 16> NewElts;
1395 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1396 Constant *Element = CV->getAggregateElement(Elt: i);
1397 if (isa<UndefValue>(Val: Element)) {
1398 NewElts.push_back(Elt: Element);
1399 continue;
1400 }
1401
1402 ConstantFP *CFP = dyn_cast<ConstantFP>(Val: Element);
1403 if (!CFP)
1404 return nullptr;
1405
1406 ConstantFP *Folded = flushDenormalConstantFP(CFP, Inst, IsOutput);
1407 if (!Folded)
1408 return nullptr;
1409 NewElts.push_back(Elt: Folded);
1410 }
1411
1412 return ConstantVector::get(V: NewElts);
1413 }
1414
1415 if (const auto *CDV = dyn_cast<ConstantDataVector>(Val: Operand)) {
1416 SmallVector<Constant *, 16> NewElts;
1417 for (unsigned I = 0, E = CDV->getNumElements(); I < E; ++I) {
1418 const APFloat &Elt = CDV->getElementAsAPFloat(i: I);
1419 if (!Elt.isDenormal()) {
1420 NewElts.push_back(Elt: ConstantFP::get(Ty, V: Elt));
1421 } else {
1422 DenormalMode Mode = getInstrDenormalMode(CtxI: Inst, Ty);
1423 ConstantFP *Folded =
1424 flushDenormalConstant(Ty, APF: Elt, Mode: IsOutput ? Mode.Output : Mode.Input);
1425 if (!Folded)
1426 return nullptr;
1427 NewElts.push_back(Elt: Folded);
1428 }
1429 }
1430
1431 return ConstantVector::get(V: NewElts);
1432 }
1433
1434 return nullptr;
1435}
1436
1437Constant *llvm::ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS,
1438 Constant *RHS, const DataLayout &DL,
1439 const Instruction *I,
1440 bool AllowNonDeterministic) {
1441 if (Instruction::isBinaryOp(Opcode)) {
1442 // Flush denormal inputs if needed.
1443 Constant *Op0 = FlushFPConstant(Operand: LHS, Inst: I, /* IsOutput */ false);
1444 if (!Op0)
1445 return nullptr;
1446 Constant *Op1 = FlushFPConstant(Operand: RHS, Inst: I, /* IsOutput */ false);
1447 if (!Op1)
1448 return nullptr;
1449
1450 // If nsz or an algebraic FMF flag is set, the result of the FP operation
1451 // may change due to future optimization. Don't constant fold them if
1452 // non-deterministic results are not allowed.
1453 if (!AllowNonDeterministic)
1454 if (auto *FP = dyn_cast_or_null<FPMathOperator>(Val: I))
1455 if (FP->hasNoSignedZeros() || FP->hasAllowReassoc() ||
1456 FP->hasAllowContract() || FP->hasAllowReciprocal())
1457 return nullptr;
1458
1459 // Calculate constant result.
1460 Constant *C = ConstantFoldBinaryOpOperands(Opcode, LHS: Op0, RHS: Op1, DL);
1461 if (!C)
1462 return nullptr;
1463
1464 // Flush denormal output if needed.
1465 C = FlushFPConstant(Operand: C, Inst: I, /* IsOutput */ true);
1466 if (!C)
1467 return nullptr;
1468
1469 // The precise NaN value is non-deterministic.
1470 if (!AllowNonDeterministic && C->isNaN())
1471 return nullptr;
1472
1473 return C;
1474 }
1475 // If instruction lacks a parent/function and the denormal mode cannot be
1476 // determined, use the default (IEEE).
1477 return ConstantFoldBinaryOpOperands(Opcode, LHS, RHS, DL);
1478}
1479
1480Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
1481 Type *DestTy, const DataLayout &DL) {
1482 assert(Instruction::isCast(Opcode));
1483 switch (Opcode) {
1484 default:
1485 llvm_unreachable("Missing case");
1486 case Instruction::PtrToInt:
1487 if (auto *CE = dyn_cast<ConstantExpr>(Val: C)) {
1488 Constant *FoldedValue = nullptr;
1489 // If the input is a inttoptr, eliminate the pair. This requires knowing
1490 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1491 if (CE->getOpcode() == Instruction::IntToPtr) {
1492 // zext/trunc the inttoptr to pointer size.
1493 FoldedValue = ConstantFoldIntegerCast(C: CE->getOperand(i_nocapture: 0),
1494 DestTy: DL.getIntPtrType(CE->getType()),
1495 /*IsSigned=*/false, DL);
1496 } else if (auto *GEP = dyn_cast<GEPOperator>(Val: CE)) {
1497 // If we have GEP, we can perform the following folds:
1498 // (ptrtoint (gep null, x)) -> x
1499 // (ptrtoint (gep (gep null, x), y) -> x + y, etc.
1500 unsigned BitWidth = DL.getIndexTypeSizeInBits(Ty: GEP->getType());
1501 APInt BaseOffset(BitWidth, 0);
1502 auto *Base = cast<Constant>(Val: GEP->stripAndAccumulateConstantOffsets(
1503 DL, Offset&: BaseOffset, /*AllowNonInbounds=*/true));
1504 if (Base->isNullValue()) {
1505 FoldedValue = ConstantInt::get(Context&: CE->getContext(), V: BaseOffset);
1506 } else {
1507 // ptrtoint (gep i8, Ptr, (sub 0, V)) -> sub (ptrtoint Ptr), V
1508 if (GEP->getNumIndices() == 1 &&
1509 GEP->getSourceElementType()->isIntegerTy(Bitwidth: 8)) {
1510 auto *Ptr = cast<Constant>(Val: GEP->getPointerOperand());
1511 auto *Sub = dyn_cast<ConstantExpr>(Val: GEP->getOperand(i_nocapture: 1));
1512 Type *IntIdxTy = DL.getIndexType(PtrTy: Ptr->getType());
1513 if (Sub && Sub->getType() == IntIdxTy &&
1514 Sub->getOpcode() == Instruction::Sub &&
1515 Sub->getOperand(i_nocapture: 0)->isNullValue())
1516 FoldedValue = ConstantExpr::getSub(
1517 C1: ConstantExpr::getPtrToInt(C: Ptr, Ty: IntIdxTy), C2: Sub->getOperand(i_nocapture: 1));
1518 }
1519 }
1520 }
1521 if (FoldedValue) {
1522 // Do a zext or trunc to get to the ptrtoint dest size.
1523 return ConstantFoldIntegerCast(C: FoldedValue, DestTy, /*IsSigned=*/false,
1524 DL);
1525 }
1526 }
1527 break;
1528 case Instruction::IntToPtr:
1529 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1530 // the int size is >= the ptr size and the address spaces are the same.
1531 // This requires knowing the width of a pointer, so it can't be done in
1532 // ConstantExpr::getCast.
1533 if (auto *CE = dyn_cast<ConstantExpr>(Val: C)) {
1534 if (CE->getOpcode() == Instruction::PtrToInt) {
1535 Constant *SrcPtr = CE->getOperand(i_nocapture: 0);
1536 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1537 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1538
1539 if (MidIntSize >= SrcPtrSize) {
1540 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1541 if (SrcAS == DestTy->getPointerAddressSpace())
1542 return FoldBitCast(C: CE->getOperand(i_nocapture: 0), DestTy, DL);
1543 }
1544 }
1545 }
1546 break;
1547 case Instruction::Trunc:
1548 case Instruction::ZExt:
1549 case Instruction::SExt:
1550 case Instruction::FPTrunc:
1551 case Instruction::FPExt:
1552 case Instruction::UIToFP:
1553 case Instruction::SIToFP:
1554 case Instruction::FPToUI:
1555 case Instruction::FPToSI:
1556 case Instruction::AddrSpaceCast:
1557 break;
1558 case Instruction::BitCast:
1559 return FoldBitCast(C, DestTy, DL);
1560 }
1561
1562 if (ConstantExpr::isDesirableCastOp(Opcode))
1563 return ConstantExpr::getCast(ops: Opcode, C, Ty: DestTy);
1564 return ConstantFoldCastInstruction(opcode: Opcode, V: C, DestTy);
1565}
1566
1567Constant *llvm::ConstantFoldIntegerCast(Constant *C, Type *DestTy,
1568 bool IsSigned, const DataLayout &DL) {
1569 Type *SrcTy = C->getType();
1570 if (SrcTy == DestTy)
1571 return C;
1572 if (SrcTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
1573 return ConstantFoldCastOperand(Opcode: Instruction::Trunc, C, DestTy, DL);
1574 if (IsSigned)
1575 return ConstantFoldCastOperand(Opcode: Instruction::SExt, C, DestTy, DL);
1576 return ConstantFoldCastOperand(Opcode: Instruction::ZExt, C, DestTy, DL);
1577}
1578
1579//===----------------------------------------------------------------------===//
1580// Constant Folding for Calls
1581//
1582
1583bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
1584 if (Call->isNoBuiltin())
1585 return false;
1586 if (Call->getFunctionType() != F->getFunctionType())
1587 return false;
1588
1589 // Allow FP calls (both libcalls and intrinsics) to avoid being folded.
1590 // This can be useful for GPU targets or in cross-compilation scenarios
1591 // when the exact target FP behaviour is required, and the host compiler's
1592 // behaviour may be slightly different from the device's run-time behaviour.
1593 if (DisableFPCallFolding && (F->getReturnType()->isFloatingPointTy() ||
1594 any_of(Range: F->args(), P: [](const Argument &Arg) {
1595 return Arg.getType()->isFloatingPointTy();
1596 })))
1597 return false;
1598
1599 switch (F->getIntrinsicID()) {
1600 // Operations that do not operate floating-point numbers and do not depend on
1601 // FP environment can be folded even in strictfp functions.
1602 case Intrinsic::bswap:
1603 case Intrinsic::ctpop:
1604 case Intrinsic::ctlz:
1605 case Intrinsic::cttz:
1606 case Intrinsic::fshl:
1607 case Intrinsic::fshr:
1608 case Intrinsic::launder_invariant_group:
1609 case Intrinsic::strip_invariant_group:
1610 case Intrinsic::masked_load:
1611 case Intrinsic::get_active_lane_mask:
1612 case Intrinsic::abs:
1613 case Intrinsic::smax:
1614 case Intrinsic::smin:
1615 case Intrinsic::umax:
1616 case Intrinsic::umin:
1617 case Intrinsic::scmp:
1618 case Intrinsic::ucmp:
1619 case Intrinsic::sadd_with_overflow:
1620 case Intrinsic::uadd_with_overflow:
1621 case Intrinsic::ssub_with_overflow:
1622 case Intrinsic::usub_with_overflow:
1623 case Intrinsic::smul_with_overflow:
1624 case Intrinsic::umul_with_overflow:
1625 case Intrinsic::sadd_sat:
1626 case Intrinsic::uadd_sat:
1627 case Intrinsic::ssub_sat:
1628 case Intrinsic::usub_sat:
1629 case Intrinsic::smul_fix:
1630 case Intrinsic::smul_fix_sat:
1631 case Intrinsic::bitreverse:
1632 case Intrinsic::is_constant:
1633 case Intrinsic::vector_reduce_add:
1634 case Intrinsic::vector_reduce_mul:
1635 case Intrinsic::vector_reduce_and:
1636 case Intrinsic::vector_reduce_or:
1637 case Intrinsic::vector_reduce_xor:
1638 case Intrinsic::vector_reduce_smin:
1639 case Intrinsic::vector_reduce_smax:
1640 case Intrinsic::vector_reduce_umin:
1641 case Intrinsic::vector_reduce_umax:
1642 case Intrinsic::vector_extract:
1643 case Intrinsic::vector_insert:
1644 case Intrinsic::vector_interleave2:
1645 case Intrinsic::vector_deinterleave2:
1646 // Target intrinsics
1647 case Intrinsic::amdgcn_perm:
1648 case Intrinsic::amdgcn_wave_reduce_umin:
1649 case Intrinsic::amdgcn_wave_reduce_umax:
1650 case Intrinsic::amdgcn_s_wqm:
1651 case Intrinsic::amdgcn_s_quadmask:
1652 case Intrinsic::amdgcn_s_bitreplicate:
1653 case Intrinsic::arm_mve_vctp8:
1654 case Intrinsic::arm_mve_vctp16:
1655 case Intrinsic::arm_mve_vctp32:
1656 case Intrinsic::arm_mve_vctp64:
1657 case Intrinsic::aarch64_sve_convert_from_svbool:
1658 // WebAssembly float semantics are always known
1659 case Intrinsic::wasm_trunc_signed:
1660 case Intrinsic::wasm_trunc_unsigned:
1661 return true;
1662
1663 // Floating point operations cannot be folded in strictfp functions in
1664 // general case. They can be folded if FP environment is known to compiler.
1665 case Intrinsic::minnum:
1666 case Intrinsic::maxnum:
1667 case Intrinsic::minimum:
1668 case Intrinsic::maximum:
1669 case Intrinsic::minimumnum:
1670 case Intrinsic::maximumnum:
1671 case Intrinsic::log:
1672 case Intrinsic::log2:
1673 case Intrinsic::log10:
1674 case Intrinsic::exp:
1675 case Intrinsic::exp2:
1676 case Intrinsic::exp10:
1677 case Intrinsic::sqrt:
1678 case Intrinsic::sin:
1679 case Intrinsic::cos:
1680 case Intrinsic::sincos:
1681 case Intrinsic::sinh:
1682 case Intrinsic::cosh:
1683 case Intrinsic::atan:
1684 case Intrinsic::pow:
1685 case Intrinsic::powi:
1686 case Intrinsic::ldexp:
1687 case Intrinsic::fma:
1688 case Intrinsic::fmuladd:
1689 case Intrinsic::frexp:
1690 case Intrinsic::fptoui_sat:
1691 case Intrinsic::fptosi_sat:
1692 case Intrinsic::convert_from_fp16:
1693 case Intrinsic::convert_to_fp16:
1694 case Intrinsic::amdgcn_cos:
1695 case Intrinsic::amdgcn_cubeid:
1696 case Intrinsic::amdgcn_cubema:
1697 case Intrinsic::amdgcn_cubesc:
1698 case Intrinsic::amdgcn_cubetc:
1699 case Intrinsic::amdgcn_fmul_legacy:
1700 case Intrinsic::amdgcn_fma_legacy:
1701 case Intrinsic::amdgcn_fract:
1702 case Intrinsic::amdgcn_sin:
1703 // The intrinsics below depend on rounding mode in MXCSR.
1704 case Intrinsic::x86_sse_cvtss2si:
1705 case Intrinsic::x86_sse_cvtss2si64:
1706 case Intrinsic::x86_sse_cvttss2si:
1707 case Intrinsic::x86_sse_cvttss2si64:
1708 case Intrinsic::x86_sse2_cvtsd2si:
1709 case Intrinsic::x86_sse2_cvtsd2si64:
1710 case Intrinsic::x86_sse2_cvttsd2si:
1711 case Intrinsic::x86_sse2_cvttsd2si64:
1712 case Intrinsic::x86_avx512_vcvtss2si32:
1713 case Intrinsic::x86_avx512_vcvtss2si64:
1714 case Intrinsic::x86_avx512_cvttss2si:
1715 case Intrinsic::x86_avx512_cvttss2si64:
1716 case Intrinsic::x86_avx512_vcvtsd2si32:
1717 case Intrinsic::x86_avx512_vcvtsd2si64:
1718 case Intrinsic::x86_avx512_cvttsd2si:
1719 case Intrinsic::x86_avx512_cvttsd2si64:
1720 case Intrinsic::x86_avx512_vcvtss2usi32:
1721 case Intrinsic::x86_avx512_vcvtss2usi64:
1722 case Intrinsic::x86_avx512_cvttss2usi:
1723 case Intrinsic::x86_avx512_cvttss2usi64:
1724 case Intrinsic::x86_avx512_vcvtsd2usi32:
1725 case Intrinsic::x86_avx512_vcvtsd2usi64:
1726 case Intrinsic::x86_avx512_cvttsd2usi:
1727 case Intrinsic::x86_avx512_cvttsd2usi64:
1728
1729 // NVVM FMax intrinsics
1730 case Intrinsic::nvvm_fmax_d:
1731 case Intrinsic::nvvm_fmax_f:
1732 case Intrinsic::nvvm_fmax_ftz_f:
1733 case Intrinsic::nvvm_fmax_ftz_nan_f:
1734 case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f:
1735 case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f:
1736 case Intrinsic::nvvm_fmax_nan_f:
1737 case Intrinsic::nvvm_fmax_nan_xorsign_abs_f:
1738 case Intrinsic::nvvm_fmax_xorsign_abs_f:
1739
1740 // NVVM FMin intrinsics
1741 case Intrinsic::nvvm_fmin_d:
1742 case Intrinsic::nvvm_fmin_f:
1743 case Intrinsic::nvvm_fmin_ftz_f:
1744 case Intrinsic::nvvm_fmin_ftz_nan_f:
1745 case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f:
1746 case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f:
1747 case Intrinsic::nvvm_fmin_nan_f:
1748 case Intrinsic::nvvm_fmin_nan_xorsign_abs_f:
1749 case Intrinsic::nvvm_fmin_xorsign_abs_f:
1750
1751 // NVVM float/double to int32/uint32 conversion intrinsics
1752 case Intrinsic::nvvm_f2i_rm:
1753 case Intrinsic::nvvm_f2i_rn:
1754 case Intrinsic::nvvm_f2i_rp:
1755 case Intrinsic::nvvm_f2i_rz:
1756 case Intrinsic::nvvm_f2i_rm_ftz:
1757 case Intrinsic::nvvm_f2i_rn_ftz:
1758 case Intrinsic::nvvm_f2i_rp_ftz:
1759 case Intrinsic::nvvm_f2i_rz_ftz:
1760 case Intrinsic::nvvm_f2ui_rm:
1761 case Intrinsic::nvvm_f2ui_rn:
1762 case Intrinsic::nvvm_f2ui_rp:
1763 case Intrinsic::nvvm_f2ui_rz:
1764 case Intrinsic::nvvm_f2ui_rm_ftz:
1765 case Intrinsic::nvvm_f2ui_rn_ftz:
1766 case Intrinsic::nvvm_f2ui_rp_ftz:
1767 case Intrinsic::nvvm_f2ui_rz_ftz:
1768 case Intrinsic::nvvm_d2i_rm:
1769 case Intrinsic::nvvm_d2i_rn:
1770 case Intrinsic::nvvm_d2i_rp:
1771 case Intrinsic::nvvm_d2i_rz:
1772 case Intrinsic::nvvm_d2ui_rm:
1773 case Intrinsic::nvvm_d2ui_rn:
1774 case Intrinsic::nvvm_d2ui_rp:
1775 case Intrinsic::nvvm_d2ui_rz:
1776
1777 // NVVM float/double to int64/uint64 conversion intrinsics
1778 case Intrinsic::nvvm_f2ll_rm:
1779 case Intrinsic::nvvm_f2ll_rn:
1780 case Intrinsic::nvvm_f2ll_rp:
1781 case Intrinsic::nvvm_f2ll_rz:
1782 case Intrinsic::nvvm_f2ll_rm_ftz:
1783 case Intrinsic::nvvm_f2ll_rn_ftz:
1784 case Intrinsic::nvvm_f2ll_rp_ftz:
1785 case Intrinsic::nvvm_f2ll_rz_ftz:
1786 case Intrinsic::nvvm_f2ull_rm:
1787 case Intrinsic::nvvm_f2ull_rn:
1788 case Intrinsic::nvvm_f2ull_rp:
1789 case Intrinsic::nvvm_f2ull_rz:
1790 case Intrinsic::nvvm_f2ull_rm_ftz:
1791 case Intrinsic::nvvm_f2ull_rn_ftz:
1792 case Intrinsic::nvvm_f2ull_rp_ftz:
1793 case Intrinsic::nvvm_f2ull_rz_ftz:
1794 case Intrinsic::nvvm_d2ll_rm:
1795 case Intrinsic::nvvm_d2ll_rn:
1796 case Intrinsic::nvvm_d2ll_rp:
1797 case Intrinsic::nvvm_d2ll_rz:
1798 case Intrinsic::nvvm_d2ull_rm:
1799 case Intrinsic::nvvm_d2ull_rn:
1800 case Intrinsic::nvvm_d2ull_rp:
1801 case Intrinsic::nvvm_d2ull_rz:
1802 return !Call->isStrictFP();
1803
1804 // Sign operations are actually bitwise operations, they do not raise
1805 // exceptions even for SNANs.
1806 case Intrinsic::fabs:
1807 case Intrinsic::copysign:
1808 case Intrinsic::is_fpclass:
1809 // Non-constrained variants of rounding operations means default FP
1810 // environment, they can be folded in any case.
1811 case Intrinsic::ceil:
1812 case Intrinsic::floor:
1813 case Intrinsic::round:
1814 case Intrinsic::roundeven:
1815 case Intrinsic::trunc:
1816 case Intrinsic::nearbyint:
1817 case Intrinsic::rint:
1818 case Intrinsic::canonicalize:
1819 // Constrained intrinsics can be folded if FP environment is known
1820 // to compiler.
1821 case Intrinsic::experimental_constrained_fma:
1822 case Intrinsic::experimental_constrained_fmuladd:
1823 case Intrinsic::experimental_constrained_fadd:
1824 case Intrinsic::experimental_constrained_fsub:
1825 case Intrinsic::experimental_constrained_fmul:
1826 case Intrinsic::experimental_constrained_fdiv:
1827 case Intrinsic::experimental_constrained_frem:
1828 case Intrinsic::experimental_constrained_ceil:
1829 case Intrinsic::experimental_constrained_floor:
1830 case Intrinsic::experimental_constrained_round:
1831 case Intrinsic::experimental_constrained_roundeven:
1832 case Intrinsic::experimental_constrained_trunc:
1833 case Intrinsic::experimental_constrained_nearbyint:
1834 case Intrinsic::experimental_constrained_rint:
1835 case Intrinsic::experimental_constrained_fcmp:
1836 case Intrinsic::experimental_constrained_fcmps:
1837 return true;
1838 default:
1839 return false;
1840 case Intrinsic::not_intrinsic: break;
1841 }
1842
1843 if (!F->hasName() || Call->isStrictFP())
1844 return false;
1845
1846 // In these cases, the check of the length is required. We don't want to
1847 // return true for a name like "cos\0blah" which strcmp would return equal to
1848 // "cos", but has length 8.
1849 StringRef Name = F->getName();
1850 switch (Name[0]) {
1851 default:
1852 return false;
1853 case 'a':
1854 return Name == "acos" || Name == "acosf" ||
1855 Name == "asin" || Name == "asinf" ||
1856 Name == "atan" || Name == "atanf" ||
1857 Name == "atan2" || Name == "atan2f";
1858 case 'c':
1859 return Name == "ceil" || Name == "ceilf" ||
1860 Name == "cos" || Name == "cosf" ||
1861 Name == "cosh" || Name == "coshf";
1862 case 'e':
1863 return Name == "exp" || Name == "expf" || Name == "exp2" ||
1864 Name == "exp2f" || Name == "erf" || Name == "erff";
1865 case 'f':
1866 return Name == "fabs" || Name == "fabsf" ||
1867 Name == "floor" || Name == "floorf" ||
1868 Name == "fmod" || Name == "fmodf";
1869 case 'i':
1870 return Name == "ilogb" || Name == "ilogbf";
1871 case 'l':
1872 return Name == "log" || Name == "logf" || Name == "logl" ||
1873 Name == "log2" || Name == "log2f" || Name == "log10" ||
1874 Name == "log10f" || Name == "logb" || Name == "logbf" ||
1875 Name == "log1p" || Name == "log1pf";
1876 case 'n':
1877 return Name == "nearbyint" || Name == "nearbyintf";
1878 case 'p':
1879 return Name == "pow" || Name == "powf";
1880 case 'r':
1881 return Name == "remainder" || Name == "remainderf" ||
1882 Name == "rint" || Name == "rintf" ||
1883 Name == "round" || Name == "roundf";
1884 case 's':
1885 return Name == "sin" || Name == "sinf" ||
1886 Name == "sinh" || Name == "sinhf" ||
1887 Name == "sqrt" || Name == "sqrtf";
1888 case 't':
1889 return Name == "tan" || Name == "tanf" ||
1890 Name == "tanh" || Name == "tanhf" ||
1891 Name == "trunc" || Name == "truncf";
1892 case '_':
1893 // Check for various function names that get used for the math functions
1894 // when the header files are preprocessed with the macro
1895 // __FINITE_MATH_ONLY__ enabled.
1896 // The '12' here is the length of the shortest name that can match.
1897 // We need to check the size before looking at Name[1] and Name[2]
1898 // so we may as well check a limit that will eliminate mismatches.
1899 if (Name.size() < 12 || Name[1] != '_')
1900 return false;
1901 switch (Name[2]) {
1902 default:
1903 return false;
1904 case 'a':
1905 return Name == "__acos_finite" || Name == "__acosf_finite" ||
1906 Name == "__asin_finite" || Name == "__asinf_finite" ||
1907 Name == "__atan2_finite" || Name == "__atan2f_finite";
1908 case 'c':
1909 return Name == "__cosh_finite" || Name == "__coshf_finite";
1910 case 'e':
1911 return Name == "__exp_finite" || Name == "__expf_finite" ||
1912 Name == "__exp2_finite" || Name == "__exp2f_finite";
1913 case 'l':
1914 return Name == "__log_finite" || Name == "__logf_finite" ||
1915 Name == "__log10_finite" || Name == "__log10f_finite";
1916 case 'p':
1917 return Name == "__pow_finite" || Name == "__powf_finite";
1918 case 's':
1919 return Name == "__sinh_finite" || Name == "__sinhf_finite";
1920 }
1921 }
1922}
1923
1924namespace {
1925
1926Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1927 if (Ty->isHalfTy() || Ty->isFloatTy()) {
1928 APFloat APF(V);
1929 bool unused;
1930 APF.convert(ToSemantics: Ty->getFltSemantics(), RM: APFloat::rmNearestTiesToEven, losesInfo: &unused);
1931 return ConstantFP::get(Context&: Ty->getContext(), V: APF);
1932 }
1933 if (Ty->isDoubleTy())
1934 return ConstantFP::get(Context&: Ty->getContext(), V: APFloat(V));
1935 llvm_unreachable("Can only constant fold half/float/double");
1936}
1937
1938#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
1939Constant *GetConstantFoldFPValue128(float128 V, Type *Ty) {
1940 if (Ty->isFP128Ty())
1941 return ConstantFP::get(Ty, V);
1942 llvm_unreachable("Can only constant fold fp128");
1943}
1944#endif
1945
1946/// Clear the floating-point exception state.
1947inline void llvm_fenv_clearexcept() {
1948#if HAVE_DECL_FE_ALL_EXCEPT
1949 feclearexcept(FE_ALL_EXCEPT);
1950#endif
1951 errno = 0;
1952}
1953
1954/// Test if a floating-point exception was raised.
1955inline bool llvm_fenv_testexcept() {
1956 int errno_val = errno;
1957 if (errno_val == ERANGE || errno_val == EDOM)
1958 return true;
1959#if HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1960 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1961 return true;
1962#endif
1963 return false;
1964}
1965
1966static const APFloat FTZPreserveSign(const APFloat &V) {
1967 if (V.isDenormal())
1968 return APFloat::getZero(Sem: V.getSemantics(), Negative: V.isNegative());
1969 return V;
1970}
1971
1972Constant *ConstantFoldFP(double (*NativeFP)(double), const APFloat &V,
1973 Type *Ty) {
1974 llvm_fenv_clearexcept();
1975 double Result = NativeFP(V.convertToDouble());
1976 if (llvm_fenv_testexcept()) {
1977 llvm_fenv_clearexcept();
1978 return nullptr;
1979 }
1980
1981 return GetConstantFoldFPValue(V: Result, Ty);
1982}
1983
1984#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
1985Constant *ConstantFoldFP128(float128 (*NativeFP)(float128), const APFloat &V,
1986 Type *Ty) {
1987 llvm_fenv_clearexcept();
1988 float128 Result = NativeFP(V.convertToQuad());
1989 if (llvm_fenv_testexcept()) {
1990 llvm_fenv_clearexcept();
1991 return nullptr;
1992 }
1993
1994 return GetConstantFoldFPValue128(V: Result, Ty);
1995}
1996#endif
1997
1998Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
1999 const APFloat &V, const APFloat &W, Type *Ty) {
2000 llvm_fenv_clearexcept();
2001 double Result = NativeFP(V.convertToDouble(), W.convertToDouble());
2002 if (llvm_fenv_testexcept()) {
2003 llvm_fenv_clearexcept();
2004 return nullptr;
2005 }
2006
2007 return GetConstantFoldFPValue(V: Result, Ty);
2008}
2009
2010Constant *constantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) {
2011 FixedVectorType *VT = dyn_cast<FixedVectorType>(Val: Op->getType());
2012 if (!VT)
2013 return nullptr;
2014
2015 // This isn't strictly necessary, but handle the special/common case of zero:
2016 // all integer reductions of a zero input produce zero.
2017 if (isa<ConstantAggregateZero>(Val: Op))
2018 return ConstantInt::get(Ty: VT->getElementType(), V: 0);
2019
2020 // This is the same as the underlying binops - poison propagates.
2021 if (isa<PoisonValue>(Val: Op) || Op->containsPoisonElement())
2022 return PoisonValue::get(T: VT->getElementType());
2023
2024 // TODO: Handle undef.
2025 if (!isa<ConstantVector>(Val: Op) && !isa<ConstantDataVector>(Val: Op))
2026 return nullptr;
2027
2028 auto *EltC = dyn_cast<ConstantInt>(Val: Op->getAggregateElement(Elt: 0U));
2029 if (!EltC)
2030 return nullptr;
2031
2032 APInt Acc = EltC->getValue();
2033 for (unsigned I = 1, E = VT->getNumElements(); I != E; I++) {
2034 if (!(EltC = dyn_cast<ConstantInt>(Val: Op->getAggregateElement(Elt: I))))
2035 return nullptr;
2036 const APInt &X = EltC->getValue();
2037 switch (IID) {
2038 case Intrinsic::vector_reduce_add:
2039 Acc = Acc + X;
2040 break;
2041 case Intrinsic::vector_reduce_mul:
2042 Acc = Acc * X;
2043 break;
2044 case Intrinsic::vector_reduce_and:
2045 Acc = Acc & X;
2046 break;
2047 case Intrinsic::vector_reduce_or:
2048 Acc = Acc | X;
2049 break;
2050 case Intrinsic::vector_reduce_xor:
2051 Acc = Acc ^ X;
2052 break;
2053 case Intrinsic::vector_reduce_smin:
2054 Acc = APIntOps::smin(A: Acc, B: X);
2055 break;
2056 case Intrinsic::vector_reduce_smax:
2057 Acc = APIntOps::smax(A: Acc, B: X);
2058 break;
2059 case Intrinsic::vector_reduce_umin:
2060 Acc = APIntOps::umin(A: Acc, B: X);
2061 break;
2062 case Intrinsic::vector_reduce_umax:
2063 Acc = APIntOps::umax(A: Acc, B: X);
2064 break;
2065 }
2066 }
2067
2068 return ConstantInt::get(Context&: Op->getContext(), V: Acc);
2069}
2070
2071/// Attempt to fold an SSE floating point to integer conversion of a constant
2072/// floating point. If roundTowardZero is false, the default IEEE rounding is
2073/// used (toward nearest, ties to even). This matches the behavior of the
2074/// non-truncating SSE instructions in the default rounding mode. The desired
2075/// integer type Ty is used to select how many bits are available for the
2076/// result. Returns null if the conversion cannot be performed, otherwise
2077/// returns the Constant value resulting from the conversion.
2078Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
2079 Type *Ty, bool IsSigned) {
2080 // All of these conversion intrinsics form an integer of at most 64bits.
2081 unsigned ResultWidth = Ty->getIntegerBitWidth();
2082 assert(ResultWidth <= 64 &&
2083 "Can only constant fold conversions to 64 and 32 bit ints");
2084
2085 uint64_t UIntVal;
2086 bool isExact = false;
2087 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
2088 : APFloat::rmNearestTiesToEven;
2089 APFloat::opStatus status =
2090 Val.convertToInteger(Input: MutableArrayRef(UIntVal), Width: ResultWidth,
2091 IsSigned, RM: mode, IsExact: &isExact);
2092 if (status != APFloat::opOK &&
2093 (!roundTowardZero || status != APFloat::opInexact))
2094 return nullptr;
2095 return ConstantInt::get(Ty, V: UIntVal, IsSigned);
2096}
2097
2098double getValueAsDouble(ConstantFP *Op) {
2099 Type *Ty = Op->getType();
2100
2101 if (Ty->isBFloatTy() || Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())
2102 return Op->getValueAPF().convertToDouble();
2103
2104 bool unused;
2105 APFloat APF = Op->getValueAPF();
2106 APF.convert(ToSemantics: APFloat::IEEEdouble(), RM: APFloat::rmNearestTiesToEven, losesInfo: &unused);
2107 return APF.convertToDouble();
2108}
2109
2110static bool getConstIntOrUndef(Value *Op, const APInt *&C) {
2111 if (auto *CI = dyn_cast<ConstantInt>(Val: Op)) {
2112 C = &CI->getValue();
2113 return true;
2114 }
2115 if (isa<UndefValue>(Val: Op)) {
2116 C = nullptr;
2117 return true;
2118 }
2119 return false;
2120}
2121
2122/// Checks if the given intrinsic call, which evaluates to constant, is allowed
2123/// to be folded.
2124///
2125/// \param CI Constrained intrinsic call.
2126/// \param St Exception flags raised during constant evaluation.
2127static bool mayFoldConstrained(ConstrainedFPIntrinsic *CI,
2128 APFloat::opStatus St) {
2129 std::optional<RoundingMode> ORM = CI->getRoundingMode();
2130 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2131
2132 // If the operation does not change exception status flags, it is safe
2133 // to fold.
2134 if (St == APFloat::opStatus::opOK)
2135 return true;
2136
2137 // If evaluation raised FP exception, the result can depend on rounding
2138 // mode. If the latter is unknown, folding is not possible.
2139 if (ORM == RoundingMode::Dynamic)
2140 return false;
2141
2142 // If FP exceptions are ignored, fold the call, even if such exception is
2143 // raised.
2144 if (EB && *EB != fp::ExceptionBehavior::ebStrict)
2145 return true;
2146
2147 // Leave the calculation for runtime so that exception flags be correctly set
2148 // in hardware.
2149 return false;
2150}
2151
2152/// Returns the rounding mode that should be used for constant evaluation.
2153static RoundingMode
2154getEvaluationRoundingMode(const ConstrainedFPIntrinsic *CI) {
2155 std::optional<RoundingMode> ORM = CI->getRoundingMode();
2156 if (!ORM || *ORM == RoundingMode::Dynamic)
2157 // Even if the rounding mode is unknown, try evaluating the operation.
2158 // If it does not raise inexact exception, rounding was not applied,
2159 // so the result is exact and does not depend on rounding mode. Whether
2160 // other FP exceptions are raised, it does not depend on rounding mode.
2161 return RoundingMode::NearestTiesToEven;
2162 return *ORM;
2163}
2164
2165/// Try to constant fold llvm.canonicalize for the given caller and value.
2166static Constant *constantFoldCanonicalize(const Type *Ty, const CallBase *CI,
2167 const APFloat &Src) {
2168 // Zero, positive and negative, is always OK to fold.
2169 if (Src.isZero()) {
2170 // Get a fresh 0, since ppc_fp128 does have non-canonical zeros.
2171 return ConstantFP::get(
2172 Context&: CI->getContext(),
2173 V: APFloat::getZero(Sem: Src.getSemantics(), Negative: Src.isNegative()));
2174 }
2175
2176 if (!Ty->isIEEELikeFPTy())
2177 return nullptr;
2178
2179 // Zero is always canonical and the sign must be preserved.
2180 //
2181 // Denorms and nans may have special encodings, but it should be OK to fold a
2182 // totally average number.
2183 if (Src.isNormal() || Src.isInfinity())
2184 return ConstantFP::get(Context&: CI->getContext(), V: Src);
2185
2186 if (Src.isDenormal() && CI->getParent() && CI->getFunction()) {
2187 DenormalMode DenormMode =
2188 CI->getFunction()->getDenormalMode(FPType: Src.getSemantics());
2189
2190 if (DenormMode == DenormalMode::getIEEE())
2191 return ConstantFP::get(Context&: CI->getContext(), V: Src);
2192
2193 if (DenormMode.Input == DenormalMode::Dynamic)
2194 return nullptr;
2195
2196 // If we know if either input or output is flushed, we can fold.
2197 if ((DenormMode.Input == DenormalMode::Dynamic &&
2198 DenormMode.Output == DenormalMode::IEEE) ||
2199 (DenormMode.Input == DenormalMode::IEEE &&
2200 DenormMode.Output == DenormalMode::Dynamic))
2201 return nullptr;
2202
2203 bool IsPositive =
2204 (!Src.isNegative() || DenormMode.Input == DenormalMode::PositiveZero ||
2205 (DenormMode.Output == DenormalMode::PositiveZero &&
2206 DenormMode.Input == DenormalMode::IEEE));
2207
2208 return ConstantFP::get(Context&: CI->getContext(),
2209 V: APFloat::getZero(Sem: Src.getSemantics(), Negative: !IsPositive));
2210 }
2211
2212 return nullptr;
2213}
2214
2215static Constant *ConstantFoldScalarCall1(StringRef Name,
2216 Intrinsic::ID IntrinsicID,
2217 Type *Ty,
2218 ArrayRef<Constant *> Operands,
2219 const TargetLibraryInfo *TLI,
2220 const CallBase *Call) {
2221 assert(Operands.size() == 1 && "Wrong number of operands.");
2222
2223 if (IntrinsicID == Intrinsic::is_constant) {
2224 // We know we have a "Constant" argument. But we want to only
2225 // return true for manifest constants, not those that depend on
2226 // constants with unknowable values, e.g. GlobalValue or BlockAddress.
2227 if (Operands[0]->isManifestConstant())
2228 return ConstantInt::getTrue(Context&: Ty->getContext());
2229 return nullptr;
2230 }
2231
2232 if (isa<UndefValue>(Val: Operands[0])) {
2233 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
2234 // ctpop() is between 0 and bitwidth, pick 0 for undef.
2235 // fptoui.sat and fptosi.sat can always fold to zero (for a zero input).
2236 if (IntrinsicID == Intrinsic::cos ||
2237 IntrinsicID == Intrinsic::ctpop ||
2238 IntrinsicID == Intrinsic::fptoui_sat ||
2239 IntrinsicID == Intrinsic::fptosi_sat ||
2240 IntrinsicID == Intrinsic::canonicalize)
2241 return Constant::getNullValue(Ty);
2242 if (IntrinsicID == Intrinsic::bswap ||
2243 IntrinsicID == Intrinsic::bitreverse ||
2244 IntrinsicID == Intrinsic::launder_invariant_group ||
2245 IntrinsicID == Intrinsic::strip_invariant_group)
2246 return Operands[0];
2247 }
2248
2249 if (isa<ConstantPointerNull>(Val: Operands[0])) {
2250 // launder(null) == null == strip(null) iff in addrspace 0
2251 if (IntrinsicID == Intrinsic::launder_invariant_group ||
2252 IntrinsicID == Intrinsic::strip_invariant_group) {
2253 // If instruction is not yet put in a basic block (e.g. when cloning
2254 // a function during inlining), Call's caller may not be available.
2255 // So check Call's BB first before querying Call->getCaller.
2256 const Function *Caller =
2257 Call->getParent() ? Call->getCaller() : nullptr;
2258 if (Caller &&
2259 !NullPointerIsDefined(
2260 F: Caller, AS: Operands[0]->getType()->getPointerAddressSpace())) {
2261 return Operands[0];
2262 }
2263 return nullptr;
2264 }
2265 }
2266
2267 if (auto *Op = dyn_cast<ConstantFP>(Val: Operands[0])) {
2268 if (IntrinsicID == Intrinsic::convert_to_fp16) {
2269 APFloat Val(Op->getValueAPF());
2270
2271 bool lost = false;
2272 Val.convert(ToSemantics: APFloat::IEEEhalf(), RM: APFloat::rmNearestTiesToEven, losesInfo: &lost);
2273
2274 return ConstantInt::get(Context&: Ty->getContext(), V: Val.bitcastToAPInt());
2275 }
2276
2277 APFloat U = Op->getValueAPF();
2278
2279 if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
2280 IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
2281 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
2282
2283 if (U.isNaN())
2284 return nullptr;
2285
2286 unsigned Width = Ty->getIntegerBitWidth();
2287 APSInt Int(Width, !Signed);
2288 bool IsExact = false;
2289 APFloat::opStatus Status =
2290 U.convertToInteger(Result&: Int, RM: APFloat::rmTowardZero, IsExact: &IsExact);
2291
2292 if (Status == APFloat::opOK || Status == APFloat::opInexact)
2293 return ConstantInt::get(Ty, V: Int);
2294
2295 return nullptr;
2296 }
2297
2298 if (IntrinsicID == Intrinsic::fptoui_sat ||
2299 IntrinsicID == Intrinsic::fptosi_sat) {
2300 // convertToInteger() already has the desired saturation semantics.
2301 APSInt Int(Ty->getIntegerBitWidth(),
2302 IntrinsicID == Intrinsic::fptoui_sat);
2303 bool IsExact;
2304 U.convertToInteger(Result&: Int, RM: APFloat::rmTowardZero, IsExact: &IsExact);
2305 return ConstantInt::get(Ty, V: Int);
2306 }
2307
2308 if (IntrinsicID == Intrinsic::canonicalize)
2309 return constantFoldCanonicalize(Ty, CI: Call, Src: U);
2310
2311#if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
2312 if (Ty->isFP128Ty()) {
2313 if (IntrinsicID == Intrinsic::log) {
2314 float128 Result = logf128(Op->getValueAPF().convertToQuad());
2315 return GetConstantFoldFPValue128(V: Result, Ty);
2316 }
2317
2318 LibFunc Fp128Func = NotLibFunc;
2319 if (TLI && TLI->getLibFunc(funcName: Name, F&: Fp128Func) && TLI->has(F: Fp128Func) &&
2320 Fp128Func == LibFunc_logl)
2321 return ConstantFoldFP128(logf128, Op->getValueAPF(), Ty);
2322 }
2323#endif
2324
2325 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy() &&
2326 !Ty->isIntegerTy())
2327 return nullptr;
2328
2329 // Use internal versions of these intrinsics.
2330
2331 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) {
2332 U.roundToIntegral(RM: APFloat::rmNearestTiesToEven);
2333 return ConstantFP::get(Context&: Ty->getContext(), V: U);
2334 }
2335
2336 if (IntrinsicID == Intrinsic::round) {
2337 U.roundToIntegral(RM: APFloat::rmNearestTiesToAway);
2338 return ConstantFP::get(Context&: Ty->getContext(), V: U);
2339 }
2340
2341 if (IntrinsicID == Intrinsic::roundeven) {
2342 U.roundToIntegral(RM: APFloat::rmNearestTiesToEven);
2343 return ConstantFP::get(Context&: Ty->getContext(), V: U);
2344 }
2345
2346 if (IntrinsicID == Intrinsic::ceil) {
2347 U.roundToIntegral(RM: APFloat::rmTowardPositive);
2348 return ConstantFP::get(Context&: Ty->getContext(), V: U);
2349 }
2350
2351 if (IntrinsicID == Intrinsic::floor) {
2352 U.roundToIntegral(RM: APFloat::rmTowardNegative);
2353 return ConstantFP::get(Context&: Ty->getContext(), V: U);
2354 }
2355
2356 if (IntrinsicID == Intrinsic::trunc) {
2357 U.roundToIntegral(RM: APFloat::rmTowardZero);
2358 return ConstantFP::get(Context&: Ty->getContext(), V: U);
2359 }
2360
2361 if (IntrinsicID == Intrinsic::fabs) {
2362 U.clearSign();
2363 return ConstantFP::get(Context&: Ty->getContext(), V: U);
2364 }
2365
2366 if (IntrinsicID == Intrinsic::amdgcn_fract) {
2367 // The v_fract instruction behaves like the OpenCL spec, which defines
2368 // fract(x) as fmin(x - floor(x), 0x1.fffffep-1f): "The min() operator is
2369 // there to prevent fract(-small) from returning 1.0. It returns the
2370 // largest positive floating-point number less than 1.0."
2371 APFloat FloorU(U);
2372 FloorU.roundToIntegral(RM: APFloat::rmTowardNegative);
2373 APFloat FractU(U - FloorU);
2374 APFloat AlmostOne(U.getSemantics(), 1);
2375 AlmostOne.next(/*nextDown*/ true);
2376 return ConstantFP::get(Context&: Ty->getContext(), V: minimum(A: FractU, B: AlmostOne));
2377 }
2378
2379 // Rounding operations (floor, trunc, ceil, round and nearbyint) do not
2380 // raise FP exceptions, unless the argument is signaling NaN.
2381
2382 std::optional<APFloat::roundingMode> RM;
2383 switch (IntrinsicID) {
2384 default:
2385 break;
2386 case Intrinsic::experimental_constrained_nearbyint:
2387 case Intrinsic::experimental_constrained_rint: {
2388 auto CI = cast<ConstrainedFPIntrinsic>(Val: Call);
2389 RM = CI->getRoundingMode();
2390 if (!RM || *RM == RoundingMode::Dynamic)
2391 return nullptr;
2392 break;
2393 }
2394 case Intrinsic::experimental_constrained_round:
2395 RM = APFloat::rmNearestTiesToAway;
2396 break;
2397 case Intrinsic::experimental_constrained_ceil:
2398 RM = APFloat::rmTowardPositive;
2399 break;
2400 case Intrinsic::experimental_constrained_floor:
2401 RM = APFloat::rmTowardNegative;
2402 break;
2403 case Intrinsic::experimental_constrained_trunc:
2404 RM = APFloat::rmTowardZero;
2405 break;
2406 }
2407 if (RM) {
2408 auto CI = cast<ConstrainedFPIntrinsic>(Val: Call);
2409 if (U.isFinite()) {
2410 APFloat::opStatus St = U.roundToIntegral(RM: *RM);
2411 if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
2412 St == APFloat::opInexact) {
2413 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2414 if (EB == fp::ebStrict)
2415 return nullptr;
2416 }
2417 } else if (U.isSignaling()) {
2418 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2419 if (EB && *EB != fp::ebIgnore)
2420 return nullptr;
2421 U = APFloat::getQNaN(Sem: U.getSemantics());
2422 }
2423 return ConstantFP::get(Context&: Ty->getContext(), V: U);
2424 }
2425
2426 // NVVM float/double to signed/unsigned int32/int64 conversions:
2427 switch (IntrinsicID) {
2428 // f2i
2429 case Intrinsic::nvvm_f2i_rm:
2430 case Intrinsic::nvvm_f2i_rn:
2431 case Intrinsic::nvvm_f2i_rp:
2432 case Intrinsic::nvvm_f2i_rz:
2433 case Intrinsic::nvvm_f2i_rm_ftz:
2434 case Intrinsic::nvvm_f2i_rn_ftz:
2435 case Intrinsic::nvvm_f2i_rp_ftz:
2436 case Intrinsic::nvvm_f2i_rz_ftz:
2437 // f2ui
2438 case Intrinsic::nvvm_f2ui_rm:
2439 case Intrinsic::nvvm_f2ui_rn:
2440 case Intrinsic::nvvm_f2ui_rp:
2441 case Intrinsic::nvvm_f2ui_rz:
2442 case Intrinsic::nvvm_f2ui_rm_ftz:
2443 case Intrinsic::nvvm_f2ui_rn_ftz:
2444 case Intrinsic::nvvm_f2ui_rp_ftz:
2445 case Intrinsic::nvvm_f2ui_rz_ftz:
2446 // d2i
2447 case Intrinsic::nvvm_d2i_rm:
2448 case Intrinsic::nvvm_d2i_rn:
2449 case Intrinsic::nvvm_d2i_rp:
2450 case Intrinsic::nvvm_d2i_rz:
2451 // d2ui
2452 case Intrinsic::nvvm_d2ui_rm:
2453 case Intrinsic::nvvm_d2ui_rn:
2454 case Intrinsic::nvvm_d2ui_rp:
2455 case Intrinsic::nvvm_d2ui_rz:
2456 // f2ll
2457 case Intrinsic::nvvm_f2ll_rm:
2458 case Intrinsic::nvvm_f2ll_rn:
2459 case Intrinsic::nvvm_f2ll_rp:
2460 case Intrinsic::nvvm_f2ll_rz:
2461 case Intrinsic::nvvm_f2ll_rm_ftz:
2462 case Intrinsic::nvvm_f2ll_rn_ftz:
2463 case Intrinsic::nvvm_f2ll_rp_ftz:
2464 case Intrinsic::nvvm_f2ll_rz_ftz:
2465 // f2ull
2466 case Intrinsic::nvvm_f2ull_rm:
2467 case Intrinsic::nvvm_f2ull_rn:
2468 case Intrinsic::nvvm_f2ull_rp:
2469 case Intrinsic::nvvm_f2ull_rz:
2470 case Intrinsic::nvvm_f2ull_rm_ftz:
2471 case Intrinsic::nvvm_f2ull_rn_ftz:
2472 case Intrinsic::nvvm_f2ull_rp_ftz:
2473 case Intrinsic::nvvm_f2ull_rz_ftz:
2474 // d2ll
2475 case Intrinsic::nvvm_d2ll_rm:
2476 case Intrinsic::nvvm_d2ll_rn:
2477 case Intrinsic::nvvm_d2ll_rp:
2478 case Intrinsic::nvvm_d2ll_rz:
2479 // d2ull
2480 case Intrinsic::nvvm_d2ull_rm:
2481 case Intrinsic::nvvm_d2ull_rn:
2482 case Intrinsic::nvvm_d2ull_rp:
2483 case Intrinsic::nvvm_d2ull_rz: {
2484 // In float-to-integer conversion, NaN inputs are converted to 0.
2485 if (U.isNaN())
2486 return ConstantInt::get(Ty, V: 0);
2487
2488 APFloat::roundingMode RMode =
2489 nvvm::GetFPToIntegerRoundingMode(IntrinsicID);
2490 bool IsFTZ = nvvm::FPToIntegerIntrinsicShouldFTZ(IntrinsicID);
2491 bool IsSigned = nvvm::FPToIntegerIntrinsicResultIsSigned(IntrinsicID);
2492
2493 APSInt ResInt(Ty->getIntegerBitWidth(), !IsSigned);
2494 auto FloatToRound = IsFTZ ? FTZPreserveSign(V: U) : U;
2495
2496 bool IsExact = false;
2497 APFloat::opStatus Status =
2498 FloatToRound.convertToInteger(Result&: ResInt, RM: RMode, IsExact: &IsExact);
2499
2500 if (Status != APFloat::opInvalidOp)
2501 return ConstantInt::get(Ty, V: ResInt);
2502 return nullptr;
2503 }
2504 }
2505
2506 /// We only fold functions with finite arguments. Folding NaN and inf is
2507 /// likely to be aborted with an exception anyway, and some host libms
2508 /// have known errors raising exceptions.
2509 if (!U.isFinite())
2510 return nullptr;
2511
2512 /// Currently APFloat versions of these functions do not exist, so we use
2513 /// the host native double versions. Float versions are not called
2514 /// directly but for all these it is true (float)(f((double)arg)) ==
2515 /// f(arg). Long double not supported yet.
2516 const APFloat &APF = Op->getValueAPF();
2517
2518 switch (IntrinsicID) {
2519 default: break;
2520 case Intrinsic::log:
2521 return ConstantFoldFP(NativeFP: log, V: APF, Ty);
2522 case Intrinsic::log2:
2523 // TODO: What about hosts that lack a C99 library?
2524 return ConstantFoldFP(NativeFP: log2, V: APF, Ty);
2525 case Intrinsic::log10:
2526 // TODO: What about hosts that lack a C99 library?
2527 return ConstantFoldFP(NativeFP: log10, V: APF, Ty);
2528 case Intrinsic::exp:
2529 return ConstantFoldFP(NativeFP: exp, V: APF, Ty);
2530 case Intrinsic::exp2:
2531 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2532 return ConstantFoldBinaryFP(NativeFP: pow, V: APFloat(2.0), W: APF, Ty);
2533 case Intrinsic::exp10:
2534 // Fold exp10(x) as pow(10, x), in case the host lacks a C99 library.
2535 return ConstantFoldBinaryFP(NativeFP: pow, V: APFloat(10.0), W: APF, Ty);
2536 case Intrinsic::sin:
2537 return ConstantFoldFP(NativeFP: sin, V: APF, Ty);
2538 case Intrinsic::cos:
2539 return ConstantFoldFP(NativeFP: cos, V: APF, Ty);
2540 case Intrinsic::sinh:
2541 return ConstantFoldFP(NativeFP: sinh, V: APF, Ty);
2542 case Intrinsic::cosh:
2543 return ConstantFoldFP(NativeFP: cosh, V: APF, Ty);
2544 case Intrinsic::atan:
2545 // Implement optional behavior from C's Annex F for +/-0.0.
2546 if (U.isZero())
2547 return ConstantFP::get(Context&: Ty->getContext(), V: U);
2548 return ConstantFoldFP(NativeFP: atan, V: APF, Ty);
2549 case Intrinsic::sqrt:
2550 return ConstantFoldFP(NativeFP: sqrt, V: APF, Ty);
2551 case Intrinsic::amdgcn_cos:
2552 case Intrinsic::amdgcn_sin: {
2553 double V = getValueAsDouble(Op);
2554 if (V < -256.0 || V > 256.0)
2555 // The gfx8 and gfx9 architectures handle arguments outside the range
2556 // [-256, 256] differently. This should be a rare case so bail out
2557 // rather than trying to handle the difference.
2558 return nullptr;
2559 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2560 double V4 = V * 4.0;
2561 if (V4 == floor(x: V4)) {
2562 // Force exact results for quarter-integer inputs.
2563 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2564 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2565 } else {
2566 if (IsCos)
2567 V = cos(x: V * 2.0 * numbers::pi);
2568 else
2569 V = sin(x: V * 2.0 * numbers::pi);
2570 }
2571 return GetConstantFoldFPValue(V, Ty);
2572 }
2573 }
2574
2575 if (!TLI)
2576 return nullptr;
2577
2578 LibFunc Func = NotLibFunc;
2579 if (!TLI->getLibFunc(funcName: Name, F&: Func))
2580 return nullptr;
2581
2582 switch (Func) {
2583 default:
2584 break;
2585 case LibFunc_acos:
2586 case LibFunc_acosf:
2587 case LibFunc_acos_finite:
2588 case LibFunc_acosf_finite:
2589 if (TLI->has(F: Func))
2590 return ConstantFoldFP(NativeFP: acos, V: APF, Ty);
2591 break;
2592 case LibFunc_asin:
2593 case LibFunc_asinf:
2594 case LibFunc_asin_finite:
2595 case LibFunc_asinf_finite:
2596 if (TLI->has(F: Func))
2597 return ConstantFoldFP(NativeFP: asin, V: APF, Ty);
2598 break;
2599 case LibFunc_atan:
2600 case LibFunc_atanf:
2601 // Implement optional behavior from C's Annex F for +/-0.0.
2602 if (U.isZero())
2603 return ConstantFP::get(Context&: Ty->getContext(), V: U);
2604 if (TLI->has(F: Func))
2605 return ConstantFoldFP(NativeFP: atan, V: APF, Ty);
2606 break;
2607 case LibFunc_ceil:
2608 case LibFunc_ceilf:
2609 if (TLI->has(F: Func)) {
2610 U.roundToIntegral(RM: APFloat::rmTowardPositive);
2611 return ConstantFP::get(Context&: Ty->getContext(), V: U);
2612 }
2613 break;
2614 case LibFunc_cos:
2615 case LibFunc_cosf:
2616 if (TLI->has(F: Func))
2617 return ConstantFoldFP(NativeFP: cos, V: APF, Ty);
2618 break;
2619 case LibFunc_cosh:
2620 case LibFunc_coshf:
2621 case LibFunc_cosh_finite:
2622 case LibFunc_coshf_finite:
2623 if (TLI->has(F: Func))
2624 return ConstantFoldFP(NativeFP: cosh, V: APF, Ty);
2625 break;
2626 case LibFunc_exp:
2627 case LibFunc_expf:
2628 case LibFunc_exp_finite:
2629 case LibFunc_expf_finite:
2630 if (TLI->has(F: Func))
2631 return ConstantFoldFP(NativeFP: exp, V: APF, Ty);
2632 break;
2633 case LibFunc_exp2:
2634 case LibFunc_exp2f:
2635 case LibFunc_exp2_finite:
2636 case LibFunc_exp2f_finite:
2637 if (TLI->has(F: Func))
2638 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2639 return ConstantFoldBinaryFP(NativeFP: pow, V: APFloat(2.0), W: APF, Ty);
2640 break;
2641 case LibFunc_fabs:
2642 case LibFunc_fabsf:
2643 if (TLI->has(F: Func)) {
2644 U.clearSign();
2645 return ConstantFP::get(Context&: Ty->getContext(), V: U);
2646 }
2647 break;
2648 case LibFunc_floor:
2649 case LibFunc_floorf:
2650 if (TLI->has(F: Func)) {
2651 U.roundToIntegral(RM: APFloat::rmTowardNegative);
2652 return ConstantFP::get(Context&: Ty->getContext(), V: U);
2653 }
2654 break;
2655 case LibFunc_log:
2656 case LibFunc_logf:
2657 case LibFunc_log_finite:
2658 case LibFunc_logf_finite:
2659 if (!APF.isNegative() && !APF.isZero() && TLI->has(F: Func))
2660 return ConstantFoldFP(NativeFP: log, V: APF, Ty);
2661 break;
2662 case LibFunc_log2:
2663 case LibFunc_log2f:
2664 case LibFunc_log2_finite:
2665 case LibFunc_log2f_finite:
2666 if (!APF.isNegative() && !APF.isZero() && TLI->has(F: Func))
2667 // TODO: What about hosts that lack a C99 library?
2668 return ConstantFoldFP(NativeFP: log2, V: APF, Ty);
2669 break;
2670 case LibFunc_log10:
2671 case LibFunc_log10f:
2672 case LibFunc_log10_finite:
2673 case LibFunc_log10f_finite:
2674 if (!APF.isNegative() && !APF.isZero() && TLI->has(F: Func))
2675 // TODO: What about hosts that lack a C99 library?
2676 return ConstantFoldFP(NativeFP: log10, V: APF, Ty);
2677 break;
2678 case LibFunc_ilogb:
2679 case LibFunc_ilogbf:
2680 if (!APF.isZero() && TLI->has(F: Func))
2681 return ConstantInt::get(Ty, V: ilogb(Arg: APF), IsSigned: true);
2682 break;
2683 case LibFunc_logb:
2684 case LibFunc_logbf:
2685 if (!APF.isZero() && TLI->has(F: Func))
2686 return ConstantFoldFP(NativeFP: logb, V: APF, Ty);
2687 break;
2688 case LibFunc_log1p:
2689 case LibFunc_log1pf:
2690 // Implement optional behavior from C's Annex F for +/-0.0.
2691 if (U.isZero())
2692 return ConstantFP::get(Context&: Ty->getContext(), V: U);
2693 if (APF > APFloat::getOne(Sem: APF.getSemantics(), Negative: true) && TLI->has(F: Func))
2694 return ConstantFoldFP(NativeFP: log1p, V: APF, Ty);
2695 break;
2696 case LibFunc_logl:
2697 return nullptr;
2698 case LibFunc_erf:
2699 case LibFunc_erff:
2700 if (TLI->has(F: Func))
2701 return ConstantFoldFP(NativeFP: erf, V: APF, Ty);
2702 break;
2703 case LibFunc_nearbyint:
2704 case LibFunc_nearbyintf:
2705 case LibFunc_rint:
2706 case LibFunc_rintf:
2707 if (TLI->has(F: Func)) {
2708 U.roundToIntegral(RM: APFloat::rmNearestTiesToEven);
2709 return ConstantFP::get(Context&: Ty->getContext(), V: U);
2710 }
2711 break;
2712 case LibFunc_round:
2713 case LibFunc_roundf:
2714 if (TLI->has(F: Func)) {
2715 U.roundToIntegral(RM: APFloat::rmNearestTiesToAway);
2716 return ConstantFP::get(Context&: Ty->getContext(), V: U);
2717 }
2718 break;
2719 case LibFunc_sin:
2720 case LibFunc_sinf:
2721 if (TLI->has(F: Func))
2722 return ConstantFoldFP(NativeFP: sin, V: APF, Ty);
2723 break;
2724 case LibFunc_sinh:
2725 case LibFunc_sinhf:
2726 case LibFunc_sinh_finite:
2727 case LibFunc_sinhf_finite:
2728 if (TLI->has(F: Func))
2729 return ConstantFoldFP(NativeFP: sinh, V: APF, Ty);
2730 break;
2731 case LibFunc_sqrt:
2732 case LibFunc_sqrtf:
2733 if (!APF.isNegative() && TLI->has(F: Func))
2734 return ConstantFoldFP(NativeFP: sqrt, V: APF, Ty);
2735 break;
2736 case LibFunc_tan:
2737 case LibFunc_tanf:
2738 if (TLI->has(F: Func))
2739 return ConstantFoldFP(NativeFP: tan, V: APF, Ty);
2740 break;
2741 case LibFunc_tanh:
2742 case LibFunc_tanhf:
2743 if (TLI->has(F: Func))
2744 return ConstantFoldFP(NativeFP: tanh, V: APF, Ty);
2745 break;
2746 case LibFunc_trunc:
2747 case LibFunc_truncf:
2748 if (TLI->has(F: Func)) {
2749 U.roundToIntegral(RM: APFloat::rmTowardZero);
2750 return ConstantFP::get(Context&: Ty->getContext(), V: U);
2751 }
2752 break;
2753 }
2754 return nullptr;
2755 }
2756
2757 if (auto *Op = dyn_cast<ConstantInt>(Val: Operands[0])) {
2758 switch (IntrinsicID) {
2759 case Intrinsic::bswap:
2760 return ConstantInt::get(Context&: Ty->getContext(), V: Op->getValue().byteSwap());
2761 case Intrinsic::ctpop:
2762 return ConstantInt::get(Ty, V: Op->getValue().popcount());
2763 case Intrinsic::bitreverse:
2764 return ConstantInt::get(Context&: Ty->getContext(), V: Op->getValue().reverseBits());
2765 case Intrinsic::convert_from_fp16: {
2766 APFloat Val(APFloat::IEEEhalf(), Op->getValue());
2767
2768 bool lost = false;
2769 APFloat::opStatus status = Val.convert(
2770 ToSemantics: Ty->getFltSemantics(), RM: APFloat::rmNearestTiesToEven, losesInfo: &lost);
2771
2772 // Conversion is always precise.
2773 (void)status;
2774 assert(status != APFloat::opInexact && !lost &&
2775 "Precision lost during fp16 constfolding");
2776
2777 return ConstantFP::get(Context&: Ty->getContext(), V: Val);
2778 }
2779
2780 case Intrinsic::amdgcn_s_wqm: {
2781 uint64_t Val = Op->getZExtValue();
2782 Val |= (Val & 0x5555555555555555ULL) << 1 |
2783 ((Val >> 1) & 0x5555555555555555ULL);
2784 Val |= (Val & 0x3333333333333333ULL) << 2 |
2785 ((Val >> 2) & 0x3333333333333333ULL);
2786 return ConstantInt::get(Ty, V: Val);
2787 }
2788
2789 case Intrinsic::amdgcn_s_quadmask: {
2790 uint64_t Val = Op->getZExtValue();
2791 uint64_t QuadMask = 0;
2792 for (unsigned I = 0; I < Op->getBitWidth() / 4; ++I, Val >>= 4) {
2793 if (!(Val & 0xF))
2794 continue;
2795
2796 QuadMask |= (1ULL << I);
2797 }
2798 return ConstantInt::get(Ty, V: QuadMask);
2799 }
2800
2801 case Intrinsic::amdgcn_s_bitreplicate: {
2802 uint64_t Val = Op->getZExtValue();
2803 Val = (Val & 0x000000000000FFFFULL) | (Val & 0x00000000FFFF0000ULL) << 16;
2804 Val = (Val & 0x000000FF000000FFULL) | (Val & 0x0000FF000000FF00ULL) << 8;
2805 Val = (Val & 0x000F000F000F000FULL) | (Val & 0x00F000F000F000F0ULL) << 4;
2806 Val = (Val & 0x0303030303030303ULL) | (Val & 0x0C0C0C0C0C0C0C0CULL) << 2;
2807 Val = (Val & 0x1111111111111111ULL) | (Val & 0x2222222222222222ULL) << 1;
2808 Val = Val | Val << 1;
2809 return ConstantInt::get(Ty, V: Val);
2810 }
2811
2812 default:
2813 return nullptr;
2814 }
2815 }
2816
2817 switch (IntrinsicID) {
2818 default: break;
2819 case Intrinsic::vector_reduce_add:
2820 case Intrinsic::vector_reduce_mul:
2821 case Intrinsic::vector_reduce_and:
2822 case Intrinsic::vector_reduce_or:
2823 case Intrinsic::vector_reduce_xor:
2824 case Intrinsic::vector_reduce_smin:
2825 case Intrinsic::vector_reduce_smax:
2826 case Intrinsic::vector_reduce_umin:
2827 case Intrinsic::vector_reduce_umax:
2828 if (Constant *C = constantFoldVectorReduce(IID: IntrinsicID, Op: Operands[0]))
2829 return C;
2830 break;
2831 }
2832
2833 // Support ConstantVector in case we have an Undef in the top.
2834 if (isa<ConstantVector>(Val: Operands[0]) ||
2835 isa<ConstantDataVector>(Val: Operands[0])) {
2836 auto *Op = cast<Constant>(Val: Operands[0]);
2837 switch (IntrinsicID) {
2838 default: break;
2839 case Intrinsic::x86_sse_cvtss2si:
2840 case Intrinsic::x86_sse_cvtss2si64:
2841 case Intrinsic::x86_sse2_cvtsd2si:
2842 case Intrinsic::x86_sse2_cvtsd2si64:
2843 if (ConstantFP *FPOp =
2844 dyn_cast_or_null<ConstantFP>(Val: Op->getAggregateElement(Elt: 0U)))
2845 return ConstantFoldSSEConvertToInt(Val: FPOp->getValueAPF(),
2846 /*roundTowardZero=*/false, Ty,
2847 /*IsSigned*/true);
2848 break;
2849 case Intrinsic::x86_sse_cvttss2si:
2850 case Intrinsic::x86_sse_cvttss2si64:
2851 case Intrinsic::x86_sse2_cvttsd2si:
2852 case Intrinsic::x86_sse2_cvttsd2si64:
2853 if (ConstantFP *FPOp =
2854 dyn_cast_or_null<ConstantFP>(Val: Op->getAggregateElement(Elt: 0U)))
2855 return ConstantFoldSSEConvertToInt(Val: FPOp->getValueAPF(),
2856 /*roundTowardZero=*/true, Ty,
2857 /*IsSigned*/true);
2858 break;
2859 }
2860 }
2861
2862 return nullptr;
2863}
2864
2865static Constant *evaluateCompare(const APFloat &Op1, const APFloat &Op2,
2866 const ConstrainedFPIntrinsic *Call) {
2867 APFloat::opStatus St = APFloat::opOK;
2868 auto *FCmp = cast<ConstrainedFPCmpIntrinsic>(Val: Call);
2869 FCmpInst::Predicate Cond = FCmp->getPredicate();
2870 if (FCmp->isSignaling()) {
2871 if (Op1.isNaN() || Op2.isNaN())
2872 St = APFloat::opInvalidOp;
2873 } else {
2874 if (Op1.isSignaling() || Op2.isSignaling())
2875 St = APFloat::opInvalidOp;
2876 }
2877 bool Result = FCmpInst::compare(LHS: Op1, RHS: Op2, Pred: Cond);
2878 if (mayFoldConstrained(CI: const_cast<ConstrainedFPCmpIntrinsic *>(FCmp), St))
2879 return ConstantInt::get(Ty: Call->getType()->getScalarType(), V: Result);
2880 return nullptr;
2881}
2882
2883static Constant *ConstantFoldLibCall2(StringRef Name, Type *Ty,
2884 ArrayRef<Constant *> Operands,
2885 const TargetLibraryInfo *TLI) {
2886 if (!TLI)
2887 return nullptr;
2888
2889 LibFunc Func = NotLibFunc;
2890 if (!TLI->getLibFunc(funcName: Name, F&: Func))
2891 return nullptr;
2892
2893 const auto *Op1 = dyn_cast<ConstantFP>(Val: Operands[0]);
2894 if (!Op1)
2895 return nullptr;
2896
2897 const auto *Op2 = dyn_cast<ConstantFP>(Val: Operands[1]);
2898 if (!Op2)
2899 return nullptr;
2900
2901 const APFloat &Op1V = Op1->getValueAPF();
2902 const APFloat &Op2V = Op2->getValueAPF();
2903
2904 switch (Func) {
2905 default:
2906 break;
2907 case LibFunc_pow:
2908 case LibFunc_powf:
2909 case LibFunc_pow_finite:
2910 case LibFunc_powf_finite:
2911 if (TLI->has(F: Func))
2912 return ConstantFoldBinaryFP(NativeFP: pow, V: Op1V, W: Op2V, Ty);
2913 break;
2914 case LibFunc_fmod:
2915 case LibFunc_fmodf:
2916 if (TLI->has(F: Func)) {
2917 APFloat V = Op1->getValueAPF();
2918 if (APFloat::opStatus::opOK == V.mod(RHS: Op2->getValueAPF()))
2919 return ConstantFP::get(Context&: Ty->getContext(), V);
2920 }
2921 break;
2922 case LibFunc_remainder:
2923 case LibFunc_remainderf:
2924 if (TLI->has(F: Func)) {
2925 APFloat V = Op1->getValueAPF();
2926 if (APFloat::opStatus::opOK == V.remainder(RHS: Op2->getValueAPF()))
2927 return ConstantFP::get(Context&: Ty->getContext(), V);
2928 }
2929 break;
2930 case LibFunc_atan2:
2931 case LibFunc_atan2f:
2932 // atan2(+/-0.0, +/-0.0) is known to raise an exception on some libm
2933 // (Solaris), so we do not assume a known result for that.
2934 if (Op1V.isZero() && Op2V.isZero())
2935 return nullptr;
2936 [[fallthrough]];
2937 case LibFunc_atan2_finite:
2938 case LibFunc_atan2f_finite:
2939 if (TLI->has(F: Func))
2940 return ConstantFoldBinaryFP(NativeFP: atan2, V: Op1V, W: Op2V, Ty);
2941 break;
2942 }
2943
2944 return nullptr;
2945}
2946
2947static Constant *ConstantFoldIntrinsicCall2(Intrinsic::ID IntrinsicID, Type *Ty,
2948 ArrayRef<Constant *> Operands,
2949 const CallBase *Call) {
2950 assert(Operands.size() == 2 && "Wrong number of operands.");
2951
2952 if (Ty->isFloatingPointTy()) {
2953 // TODO: We should have undef handling for all of the FP intrinsics that
2954 // are attempted to be folded in this function.
2955 bool IsOp0Undef = isa<UndefValue>(Val: Operands[0]);
2956 bool IsOp1Undef = isa<UndefValue>(Val: Operands[1]);
2957 switch (IntrinsicID) {
2958 case Intrinsic::maxnum:
2959 case Intrinsic::minnum:
2960 case Intrinsic::maximum:
2961 case Intrinsic::minimum:
2962 case Intrinsic::maximumnum:
2963 case Intrinsic::minimumnum:
2964 case Intrinsic::nvvm_fmax_d:
2965 case Intrinsic::nvvm_fmin_d:
2966 // If one argument is undef, return the other argument.
2967 if (IsOp0Undef)
2968 return Operands[1];
2969 if (IsOp1Undef)
2970 return Operands[0];
2971 break;
2972
2973 case Intrinsic::nvvm_fmax_f:
2974 case Intrinsic::nvvm_fmax_ftz_f:
2975 case Intrinsic::nvvm_fmax_ftz_nan_f:
2976 case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f:
2977 case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f:
2978 case Intrinsic::nvvm_fmax_nan_f:
2979 case Intrinsic::nvvm_fmax_nan_xorsign_abs_f:
2980 case Intrinsic::nvvm_fmax_xorsign_abs_f:
2981
2982 case Intrinsic::nvvm_fmin_f:
2983 case Intrinsic::nvvm_fmin_ftz_f:
2984 case Intrinsic::nvvm_fmin_ftz_nan_f:
2985 case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f:
2986 case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f:
2987 case Intrinsic::nvvm_fmin_nan_f:
2988 case Intrinsic::nvvm_fmin_nan_xorsign_abs_f:
2989 case Intrinsic::nvvm_fmin_xorsign_abs_f:
2990 // If one arg is undef, the other arg can be returned only if it is
2991 // constant, as we may need to flush it to sign-preserving zero or
2992 // canonicalize the NaN.
2993 if (!IsOp0Undef && !IsOp1Undef)
2994 break;
2995 if (auto *Op = dyn_cast<ConstantFP>(Val: Operands[IsOp0Undef ? 1 : 0])) {
2996 if (Op->isNaN()) {
2997 APInt NVCanonicalNaN(32, 0x7fffffff);
2998 return ConstantFP::get(
2999 Ty, V: APFloat(Ty->getFltSemantics(), NVCanonicalNaN));
3000 }
3001 if (nvvm::FMinFMaxShouldFTZ(IntrinsicID))
3002 return ConstantFP::get(Ty, V: FTZPreserveSign(V: Op->getValueAPF()));
3003 else
3004 return Op;
3005 }
3006 break;
3007 }
3008 }
3009
3010 if (const auto *Op1 = dyn_cast<ConstantFP>(Val: Operands[0])) {
3011 const APFloat &Op1V = Op1->getValueAPF();
3012
3013 if (const auto *Op2 = dyn_cast<ConstantFP>(Val: Operands[1])) {
3014 if (Op2->getType() != Op1->getType())
3015 return nullptr;
3016 const APFloat &Op2V = Op2->getValueAPF();
3017
3018 if (const auto *ConstrIntr =
3019 dyn_cast_if_present<ConstrainedFPIntrinsic>(Val: Call)) {
3020 RoundingMode RM = getEvaluationRoundingMode(CI: ConstrIntr);
3021 APFloat Res = Op1V;
3022 APFloat::opStatus St;
3023 switch (IntrinsicID) {
3024 default:
3025 return nullptr;
3026 case Intrinsic::experimental_constrained_fadd:
3027 St = Res.add(RHS: Op2V, RM);
3028 break;
3029 case Intrinsic::experimental_constrained_fsub:
3030 St = Res.subtract(RHS: Op2V, RM);
3031 break;
3032 case Intrinsic::experimental_constrained_fmul:
3033 St = Res.multiply(RHS: Op2V, RM);
3034 break;
3035 case Intrinsic::experimental_constrained_fdiv:
3036 St = Res.divide(RHS: Op2V, RM);
3037 break;
3038 case Intrinsic::experimental_constrained_frem:
3039 St = Res.mod(RHS: Op2V);
3040 break;
3041 case Intrinsic::experimental_constrained_fcmp:
3042 case Intrinsic::experimental_constrained_fcmps:
3043 return evaluateCompare(Op1: Op1V, Op2: Op2V, Call: ConstrIntr);
3044 }
3045 if (mayFoldConstrained(CI: const_cast<ConstrainedFPIntrinsic *>(ConstrIntr),
3046 St))
3047 return ConstantFP::get(Context&: Ty->getContext(), V: Res);
3048 return nullptr;
3049 }
3050
3051 switch (IntrinsicID) {
3052 default:
3053 break;
3054 case Intrinsic::copysign:
3055 return ConstantFP::get(Context&: Ty->getContext(), V: APFloat::copySign(Value: Op1V, Sign: Op2V));
3056 case Intrinsic::minnum:
3057 return ConstantFP::get(Context&: Ty->getContext(), V: minnum(A: Op1V, B: Op2V));
3058 case Intrinsic::maxnum:
3059 return ConstantFP::get(Context&: Ty->getContext(), V: maxnum(A: Op1V, B: Op2V));
3060 case Intrinsic::minimum:
3061 return ConstantFP::get(Context&: Ty->getContext(), V: minimum(A: Op1V, B: Op2V));
3062 case Intrinsic::maximum:
3063 return ConstantFP::get(Context&: Ty->getContext(), V: maximum(A: Op1V, B: Op2V));
3064 case Intrinsic::minimumnum:
3065 return ConstantFP::get(Context&: Ty->getContext(), V: minimumnum(A: Op1V, B: Op2V));
3066 case Intrinsic::maximumnum:
3067 return ConstantFP::get(Context&: Ty->getContext(), V: maximumnum(A: Op1V, B: Op2V));
3068
3069 case Intrinsic::nvvm_fmax_d:
3070 case Intrinsic::nvvm_fmax_f:
3071 case Intrinsic::nvvm_fmax_ftz_f:
3072 case Intrinsic::nvvm_fmax_ftz_nan_f:
3073 case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f:
3074 case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f:
3075 case Intrinsic::nvvm_fmax_nan_f:
3076 case Intrinsic::nvvm_fmax_nan_xorsign_abs_f:
3077 case Intrinsic::nvvm_fmax_xorsign_abs_f:
3078
3079 case Intrinsic::nvvm_fmin_d:
3080 case Intrinsic::nvvm_fmin_f:
3081 case Intrinsic::nvvm_fmin_ftz_f:
3082 case Intrinsic::nvvm_fmin_ftz_nan_f:
3083 case Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f:
3084 case Intrinsic::nvvm_fmin_ftz_xorsign_abs_f:
3085 case Intrinsic::nvvm_fmin_nan_f:
3086 case Intrinsic::nvvm_fmin_nan_xorsign_abs_f:
3087 case Intrinsic::nvvm_fmin_xorsign_abs_f: {
3088
3089 bool ShouldCanonicalizeNaNs = !(IntrinsicID == Intrinsic::nvvm_fmax_d ||
3090 IntrinsicID == Intrinsic::nvvm_fmin_d);
3091 bool IsFTZ = nvvm::FMinFMaxShouldFTZ(IntrinsicID);
3092 bool IsNaNPropagating = nvvm::FMinFMaxPropagatesNaNs(IntrinsicID);
3093 bool IsXorSignAbs = nvvm::FMinFMaxIsXorSignAbs(IntrinsicID);
3094
3095 APFloat A = IsFTZ ? FTZPreserveSign(V: Op1V) : Op1V;
3096 APFloat B = IsFTZ ? FTZPreserveSign(V: Op2V) : Op2V;
3097
3098 bool XorSign = false;
3099 if (IsXorSignAbs) {
3100 XorSign = A.isNegative() ^ B.isNegative();
3101 A = abs(X: A);
3102 B = abs(X: B);
3103 }
3104
3105 bool IsFMax = false;
3106 switch (IntrinsicID) {
3107 case Intrinsic::nvvm_fmax_d:
3108 case Intrinsic::nvvm_fmax_f:
3109 case Intrinsic::nvvm_fmax_ftz_f:
3110 case Intrinsic::nvvm_fmax_ftz_nan_f:
3111 case Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f:
3112 case Intrinsic::nvvm_fmax_ftz_xorsign_abs_f:
3113 case Intrinsic::nvvm_fmax_nan_f:
3114 case Intrinsic::nvvm_fmax_nan_xorsign_abs_f:
3115 case Intrinsic::nvvm_fmax_xorsign_abs_f:
3116 IsFMax = true;
3117 break;
3118 }
3119 APFloat Res = IsFMax ? maximum(A, B) : minimum(A, B);
3120
3121 if (ShouldCanonicalizeNaNs) {
3122 APFloat NVCanonicalNaN(Res.getSemantics(), APInt(32, 0x7fffffff));
3123 if (A.isNaN() && B.isNaN())
3124 return ConstantFP::get(Ty, V: NVCanonicalNaN);
3125 else if (IsNaNPropagating && (A.isNaN() || B.isNaN()))
3126 return ConstantFP::get(Ty, V: NVCanonicalNaN);
3127 }
3128
3129 if (A.isNaN() && B.isNaN())
3130 return Operands[1];
3131 else if (A.isNaN())
3132 Res = B;
3133 else if (B.isNaN())
3134 Res = A;
3135
3136 if (IsXorSignAbs && XorSign != Res.isNegative())
3137 Res.changeSign();
3138
3139 return ConstantFP::get(Context&: Ty->getContext(), V: Res);
3140 }
3141 }
3142
3143 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
3144 return nullptr;
3145
3146 switch (IntrinsicID) {
3147 default:
3148 break;
3149 case Intrinsic::pow:
3150 return ConstantFoldBinaryFP(NativeFP: pow, V: Op1V, W: Op2V, Ty);
3151 case Intrinsic::amdgcn_fmul_legacy:
3152 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
3153 // NaN or infinity, gives +0.0.
3154 if (Op1V.isZero() || Op2V.isZero())
3155 return ConstantFP::getZero(Ty);
3156 return ConstantFP::get(Context&: Ty->getContext(), V: Op1V * Op2V);
3157 }
3158
3159 } else if (auto *Op2C = dyn_cast<ConstantInt>(Val: Operands[1])) {
3160 switch (IntrinsicID) {
3161 case Intrinsic::ldexp: {
3162 return ConstantFP::get(
3163 Context&: Ty->getContext(),
3164 V: scalbn(X: Op1V, Exp: Op2C->getSExtValue(), RM: APFloat::rmNearestTiesToEven));
3165 }
3166 case Intrinsic::is_fpclass: {
3167 FPClassTest Mask = static_cast<FPClassTest>(Op2C->getZExtValue());
3168 bool Result =
3169 ((Mask & fcSNan) && Op1V.isNaN() && Op1V.isSignaling()) ||
3170 ((Mask & fcQNan) && Op1V.isNaN() && !Op1V.isSignaling()) ||
3171 ((Mask & fcNegInf) && Op1V.isNegInfinity()) ||
3172 ((Mask & fcNegNormal) && Op1V.isNormal() && Op1V.isNegative()) ||
3173 ((Mask & fcNegSubnormal) && Op1V.isDenormal() && Op1V.isNegative()) ||
3174 ((Mask & fcNegZero) && Op1V.isZero() && Op1V.isNegative()) ||
3175 ((Mask & fcPosZero) && Op1V.isZero() && !Op1V.isNegative()) ||
3176 ((Mask & fcPosSubnormal) && Op1V.isDenormal() && !Op1V.isNegative()) ||
3177 ((Mask & fcPosNormal) && Op1V.isNormal() && !Op1V.isNegative()) ||
3178 ((Mask & fcPosInf) && Op1V.isPosInfinity());
3179 return ConstantInt::get(Ty, V: Result);
3180 }
3181 case Intrinsic::powi: {
3182 int Exp = static_cast<int>(Op2C->getSExtValue());
3183 switch (Ty->getTypeID()) {
3184 case Type::HalfTyID:
3185 case Type::FloatTyID: {
3186 APFloat Res(static_cast<float>(std::pow(x: Op1V.convertToFloat(), y: Exp)));
3187 if (Ty->isHalfTy()) {
3188 bool Unused;
3189 Res.convert(ToSemantics: APFloat::IEEEhalf(), RM: APFloat::rmNearestTiesToEven,
3190 losesInfo: &Unused);
3191 }
3192 return ConstantFP::get(Context&: Ty->getContext(), V: Res);
3193 }
3194 case Type::DoubleTyID:
3195 return ConstantFP::get(Ty, V: std::pow(x: Op1V.convertToDouble(), y: Exp));
3196 default:
3197 return nullptr;
3198 }
3199 }
3200 default:
3201 break;
3202 }
3203 }
3204 return nullptr;
3205 }
3206
3207 if (Operands[0]->getType()->isIntegerTy() &&
3208 Operands[1]->getType()->isIntegerTy()) {
3209 const APInt *C0, *C1;
3210 if (!getConstIntOrUndef(Op: Operands[0], C&: C0) ||
3211 !getConstIntOrUndef(Op: Operands[1], C&: C1))
3212 return nullptr;
3213
3214 switch (IntrinsicID) {
3215 default: break;
3216 case Intrinsic::smax:
3217 case Intrinsic::smin:
3218 case Intrinsic::umax:
3219 case Intrinsic::umin:
3220 if (!C0 && !C1)
3221 return UndefValue::get(T: Ty);
3222 if (!C0 || !C1)
3223 return MinMaxIntrinsic::getSaturationPoint(ID: IntrinsicID, Ty);
3224 return ConstantInt::get(
3225 Ty, V: ICmpInst::compare(LHS: *C0, RHS: *C1,
3226 Pred: MinMaxIntrinsic::getPredicate(ID: IntrinsicID))
3227 ? *C0
3228 : *C1);
3229
3230 case Intrinsic::scmp:
3231 case Intrinsic::ucmp:
3232 if (!C0 || !C1)
3233 return ConstantInt::get(Ty, V: 0);
3234
3235 int Res;
3236 if (IntrinsicID == Intrinsic::scmp)
3237 Res = C0->sgt(RHS: *C1) ? 1 : C0->slt(RHS: *C1) ? -1 : 0;
3238 else
3239 Res = C0->ugt(RHS: *C1) ? 1 : C0->ult(RHS: *C1) ? -1 : 0;
3240 return ConstantInt::get(Ty, V: Res, /*IsSigned=*/true);
3241
3242 case Intrinsic::usub_with_overflow:
3243 case Intrinsic::ssub_with_overflow:
3244 // X - undef -> { 0, false }
3245 // undef - X -> { 0, false }
3246 if (!C0 || !C1)
3247 return Constant::getNullValue(Ty);
3248 [[fallthrough]];
3249 case Intrinsic::uadd_with_overflow:
3250 case Intrinsic::sadd_with_overflow:
3251 // X + undef -> { -1, false }
3252 // undef + x -> { -1, false }
3253 if (!C0 || !C1) {
3254 return ConstantStruct::get(
3255 T: cast<StructType>(Val: Ty),
3256 V: {Constant::getAllOnesValue(Ty: Ty->getStructElementType(N: 0)),
3257 Constant::getNullValue(Ty: Ty->getStructElementType(N: 1))});
3258 }
3259 [[fallthrough]];
3260 case Intrinsic::smul_with_overflow:
3261 case Intrinsic::umul_with_overflow: {
3262 // undef * X -> { 0, false }
3263 // X * undef -> { 0, false }
3264 if (!C0 || !C1)
3265 return Constant::getNullValue(Ty);
3266
3267 APInt Res;
3268 bool Overflow;
3269 switch (IntrinsicID) {
3270 default: llvm_unreachable("Invalid case");
3271 case Intrinsic::sadd_with_overflow:
3272 Res = C0->sadd_ov(RHS: *C1, Overflow);
3273 break;
3274 case Intrinsic::uadd_with_overflow:
3275 Res = C0->uadd_ov(RHS: *C1, Overflow);
3276 break;
3277 case Intrinsic::ssub_with_overflow:
3278 Res = C0->ssub_ov(RHS: *C1, Overflow);
3279 break;
3280 case Intrinsic::usub_with_overflow:
3281 Res = C0->usub_ov(RHS: *C1, Overflow);
3282 break;
3283 case Intrinsic::smul_with_overflow:
3284 Res = C0->smul_ov(RHS: *C1, Overflow);
3285 break;
3286 case Intrinsic::umul_with_overflow:
3287 Res = C0->umul_ov(RHS: *C1, Overflow);
3288 break;
3289 }
3290 Constant *Ops[] = {
3291 ConstantInt::get(Context&: Ty->getContext(), V: Res),
3292 ConstantInt::get(Ty: Type::getInt1Ty(C&: Ty->getContext()), V: Overflow)
3293 };
3294 return ConstantStruct::get(T: cast<StructType>(Val: Ty), V: Ops);
3295 }
3296 case Intrinsic::uadd_sat:
3297 case Intrinsic::sadd_sat:
3298 if (!C0 && !C1)
3299 return UndefValue::get(T: Ty);
3300 if (!C0 || !C1)
3301 return Constant::getAllOnesValue(Ty);
3302 if (IntrinsicID == Intrinsic::uadd_sat)
3303 return ConstantInt::get(Ty, V: C0->uadd_sat(RHS: *C1));
3304 else
3305 return ConstantInt::get(Ty, V: C0->sadd_sat(RHS: *C1));
3306 case Intrinsic::usub_sat:
3307 case Intrinsic::ssub_sat:
3308 if (!C0 && !C1)
3309 return UndefValue::get(T: Ty);
3310 if (!C0 || !C1)
3311 return Constant::getNullValue(Ty);
3312 if (IntrinsicID == Intrinsic::usub_sat)
3313 return ConstantInt::get(Ty, V: C0->usub_sat(RHS: *C1));
3314 else
3315 return ConstantInt::get(Ty, V: C0->ssub_sat(RHS: *C1));
3316 case Intrinsic::cttz:
3317 case Intrinsic::ctlz:
3318 assert(C1 && "Must be constant int");
3319
3320 // cttz(0, 1) and ctlz(0, 1) are poison.
3321 if (C1->isOne() && (!C0 || C0->isZero()))
3322 return PoisonValue::get(T: Ty);
3323 if (!C0)
3324 return Constant::getNullValue(Ty);
3325 if (IntrinsicID == Intrinsic::cttz)
3326 return ConstantInt::get(Ty, V: C0->countr_zero());
3327 else
3328 return ConstantInt::get(Ty, V: C0->countl_zero());
3329
3330 case Intrinsic::abs:
3331 assert(C1 && "Must be constant int");
3332 assert((C1->isOne() || C1->isZero()) && "Must be 0 or 1");
3333
3334 // Undef or minimum val operand with poison min --> poison
3335 if (C1->isOne() && (!C0 || C0->isMinSignedValue()))
3336 return PoisonValue::get(T: Ty);
3337
3338 // Undef operand with no poison min --> 0 (sign bit must be clear)
3339 if (!C0)
3340 return Constant::getNullValue(Ty);
3341
3342 return ConstantInt::get(Ty, V: C0->abs());
3343 case Intrinsic::amdgcn_wave_reduce_umin:
3344 case Intrinsic::amdgcn_wave_reduce_umax:
3345 return dyn_cast<Constant>(Val: Operands[0]);
3346 }
3347
3348 return nullptr;
3349 }
3350
3351 // Support ConstantVector in case we have an Undef in the top.
3352 if ((isa<ConstantVector>(Val: Operands[0]) ||
3353 isa<ConstantDataVector>(Val: Operands[0])) &&
3354 // Check for default rounding mode.
3355 // FIXME: Support other rounding modes?
3356 isa<ConstantInt>(Val: Operands[1]) &&
3357 cast<ConstantInt>(Val: Operands[1])->getValue() == 4) {
3358 auto *Op = cast<Constant>(Val: Operands[0]);
3359 switch (IntrinsicID) {
3360 default: break;
3361 case Intrinsic::x86_avx512_vcvtss2si32:
3362 case Intrinsic::x86_avx512_vcvtss2si64:
3363 case Intrinsic::x86_avx512_vcvtsd2si32:
3364 case Intrinsic::x86_avx512_vcvtsd2si64:
3365 if (ConstantFP *FPOp =
3366 dyn_cast_or_null<ConstantFP>(Val: Op->getAggregateElement(Elt: 0U)))
3367 return ConstantFoldSSEConvertToInt(Val: FPOp->getValueAPF(),
3368 /*roundTowardZero=*/false, Ty,
3369 /*IsSigned*/true);
3370 break;
3371 case Intrinsic::x86_avx512_vcvtss2usi32:
3372 case Intrinsic::x86_avx512_vcvtss2usi64:
3373 case Intrinsic::x86_avx512_vcvtsd2usi32:
3374 case Intrinsic::x86_avx512_vcvtsd2usi64:
3375 if (ConstantFP *FPOp =
3376 dyn_cast_or_null<ConstantFP>(Val: Op->getAggregateElement(Elt: 0U)))
3377 return ConstantFoldSSEConvertToInt(Val: FPOp->getValueAPF(),
3378 /*roundTowardZero=*/false, Ty,
3379 /*IsSigned*/false);
3380 break;
3381 case Intrinsic::x86_avx512_cvttss2si:
3382 case Intrinsic::x86_avx512_cvttss2si64:
3383 case Intrinsic::x86_avx512_cvttsd2si:
3384 case Intrinsic::x86_avx512_cvttsd2si64:
3385 if (ConstantFP *FPOp =
3386 dyn_cast_or_null<ConstantFP>(Val: Op->getAggregateElement(Elt: 0U)))
3387 return ConstantFoldSSEConvertToInt(Val: FPOp->getValueAPF(),
3388 /*roundTowardZero=*/true, Ty,
3389 /*IsSigned*/true);
3390 break;
3391 case Intrinsic::x86_avx512_cvttss2usi:
3392 case Intrinsic::x86_avx512_cvttss2usi64:
3393 case Intrinsic::x86_avx512_cvttsd2usi:
3394 case Intrinsic::x86_avx512_cvttsd2usi64:
3395 if (ConstantFP *FPOp =
3396 dyn_cast_or_null<ConstantFP>(Val: Op->getAggregateElement(Elt: 0U)))
3397 return ConstantFoldSSEConvertToInt(Val: FPOp->getValueAPF(),
3398 /*roundTowardZero=*/true, Ty,
3399 /*IsSigned*/false);
3400 break;
3401 }
3402 }
3403 return nullptr;
3404}
3405
3406static APFloat ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID,
3407 const APFloat &S0,
3408 const APFloat &S1,
3409 const APFloat &S2) {
3410 unsigned ID;
3411 const fltSemantics &Sem = S0.getSemantics();
3412 APFloat MA(Sem), SC(Sem), TC(Sem);
3413 if (abs(X: S2) >= abs(X: S0) && abs(X: S2) >= abs(X: S1)) {
3414 if (S2.isNegative() && S2.isNonZero() && !S2.isNaN()) {
3415 // S2 < 0
3416 ID = 5;
3417 SC = -S0;
3418 } else {
3419 ID = 4;
3420 SC = S0;
3421 }
3422 MA = S2;
3423 TC = -S1;
3424 } else if (abs(X: S1) >= abs(X: S0)) {
3425 if (S1.isNegative() && S1.isNonZero() && !S1.isNaN()) {
3426 // S1 < 0
3427 ID = 3;
3428 TC = -S2;
3429 } else {
3430 ID = 2;
3431 TC = S2;
3432 }
3433 MA = S1;
3434 SC = S0;
3435 } else {
3436 if (S0.isNegative() && S0.isNonZero() && !S0.isNaN()) {
3437 // S0 < 0
3438 ID = 1;
3439 SC = S2;
3440 } else {
3441 ID = 0;
3442 SC = -S2;
3443 }
3444 MA = S0;
3445 TC = -S1;
3446 }
3447 switch (IntrinsicID) {
3448 default:
3449 llvm_unreachable("unhandled amdgcn cube intrinsic");
3450 case Intrinsic::amdgcn_cubeid:
3451 return APFloat(Sem, ID);
3452 case Intrinsic::amdgcn_cubema:
3453 return MA + MA;
3454 case Intrinsic::amdgcn_cubesc:
3455 return SC;
3456 case Intrinsic::amdgcn_cubetc:
3457 return TC;
3458 }
3459}
3460
3461static Constant *ConstantFoldAMDGCNPermIntrinsic(ArrayRef<Constant *> Operands,
3462 Type *Ty) {
3463 const APInt *C0, *C1, *C2;
3464 if (!getConstIntOrUndef(Op: Operands[0], C&: C0) ||
3465 !getConstIntOrUndef(Op: Operands[1], C&: C1) ||
3466 !getConstIntOrUndef(Op: Operands[2], C&: C2))
3467 return nullptr;
3468
3469 if (!C2)
3470 return UndefValue::get(T: Ty);
3471
3472 APInt Val(32, 0);
3473 unsigned NumUndefBytes = 0;
3474 for (unsigned I = 0; I < 32; I += 8) {
3475 unsigned Sel = C2->extractBitsAsZExtValue(numBits: 8, bitPosition: I);
3476 unsigned B = 0;
3477
3478 if (Sel >= 13)
3479 B = 0xff;
3480 else if (Sel == 12)
3481 B = 0x00;
3482 else {
3483 const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
3484 if (!Src)
3485 ++NumUndefBytes;
3486 else if (Sel < 8)
3487 B = Src->extractBitsAsZExtValue(numBits: 8, bitPosition: (Sel & 3) * 8);
3488 else
3489 B = Src->extractBitsAsZExtValue(numBits: 1, bitPosition: (Sel & 1) ? 31 : 15) * 0xff;
3490 }
3491
3492 Val.insertBits(SubBits: B, bitPosition: I, numBits: 8);
3493 }
3494
3495 if (NumUndefBytes == 4)
3496 return UndefValue::get(T: Ty);
3497
3498 return ConstantInt::get(Ty, V: Val);
3499}
3500
3501static Constant *ConstantFoldScalarCall3(StringRef Name,
3502 Intrinsic::ID IntrinsicID,
3503 Type *Ty,
3504 ArrayRef<Constant *> Operands,
3505 const TargetLibraryInfo *TLI,
3506 const CallBase *Call) {
3507 assert(Operands.size() == 3 && "Wrong number of operands.");
3508
3509 if (const auto *Op1 = dyn_cast<ConstantFP>(Val: Operands[0])) {
3510 if (const auto *Op2 = dyn_cast<ConstantFP>(Val: Operands[1])) {
3511 if (const auto *Op3 = dyn_cast<ConstantFP>(Val: Operands[2])) {
3512 const APFloat &C1 = Op1->getValueAPF();
3513 const APFloat &C2 = Op2->getValueAPF();
3514 const APFloat &C3 = Op3->getValueAPF();
3515
3516 if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Val: Call)) {
3517 RoundingMode RM = getEvaluationRoundingMode(CI: ConstrIntr);
3518 APFloat Res = C1;
3519 APFloat::opStatus St;
3520 switch (IntrinsicID) {
3521 default:
3522 return nullptr;
3523 case Intrinsic::experimental_constrained_fma:
3524 case Intrinsic::experimental_constrained_fmuladd:
3525 St = Res.fusedMultiplyAdd(Multiplicand: C2, Addend: C3, RM);
3526 break;
3527 }
3528 if (mayFoldConstrained(
3529 CI: const_cast<ConstrainedFPIntrinsic *>(ConstrIntr), St))
3530 return ConstantFP::get(Context&: Ty->getContext(), V: Res);
3531 return nullptr;
3532 }
3533
3534 switch (IntrinsicID) {
3535 default: break;
3536 case Intrinsic::amdgcn_fma_legacy: {
3537 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
3538 // NaN or infinity, gives +0.0.
3539 if (C1.isZero() || C2.isZero()) {
3540 // It's tempting to just return C3 here, but that would give the
3541 // wrong result if C3 was -0.0.
3542 return ConstantFP::get(Context&: Ty->getContext(), V: APFloat(0.0f) + C3);
3543 }
3544 [[fallthrough]];
3545 }
3546 case Intrinsic::fma:
3547 case Intrinsic::fmuladd: {
3548 APFloat V = C1;
3549 V.fusedMultiplyAdd(Multiplicand: C2, Addend: C3, RM: APFloat::rmNearestTiesToEven);
3550 return ConstantFP::get(Context&: Ty->getContext(), V);
3551 }
3552 case Intrinsic::amdgcn_cubeid:
3553 case Intrinsic::amdgcn_cubema:
3554 case Intrinsic::amdgcn_cubesc:
3555 case Intrinsic::amdgcn_cubetc: {
3556 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, S0: C1, S1: C2, S2: C3);
3557 return ConstantFP::get(Context&: Ty->getContext(), V);
3558 }
3559 }
3560 }
3561 }
3562 }
3563
3564 if (IntrinsicID == Intrinsic::smul_fix ||
3565 IntrinsicID == Intrinsic::smul_fix_sat) {
3566 const APInt *C0, *C1;
3567 if (!getConstIntOrUndef(Op: Operands[0], C&: C0) ||
3568 !getConstIntOrUndef(Op: Operands[1], C&: C1))
3569 return nullptr;
3570
3571 // undef * C -> 0
3572 // C * undef -> 0
3573 if (!C0 || !C1)
3574 return Constant::getNullValue(Ty);
3575
3576 // This code performs rounding towards negative infinity in case the result
3577 // cannot be represented exactly for the given scale. Targets that do care
3578 // about rounding should use a target hook for specifying how rounding
3579 // should be done, and provide their own folding to be consistent with
3580 // rounding. This is the same approach as used by
3581 // DAGTypeLegalizer::ExpandIntRes_MULFIX.
3582 unsigned Scale = cast<ConstantInt>(Val: Operands[2])->getZExtValue();
3583 unsigned Width = C0->getBitWidth();
3584 assert(Scale < Width && "Illegal scale.");
3585 unsigned ExtendedWidth = Width * 2;
3586 APInt Product =
3587 (C0->sext(width: ExtendedWidth) * C1->sext(width: ExtendedWidth)).ashr(ShiftAmt: Scale);
3588 if (IntrinsicID == Intrinsic::smul_fix_sat) {
3589 APInt Max = APInt::getSignedMaxValue(numBits: Width).sext(width: ExtendedWidth);
3590 APInt Min = APInt::getSignedMinValue(numBits: Width).sext(width: ExtendedWidth);
3591 Product = APIntOps::smin(A: Product, B: Max);
3592 Product = APIntOps::smax(A: Product, B: Min);
3593 }
3594 return ConstantInt::get(Context&: Ty->getContext(), V: Product.sextOrTrunc(width: Width));
3595 }
3596
3597 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
3598 const APInt *C0, *C1, *C2;
3599 if (!getConstIntOrUndef(Op: Operands[0], C&: C0) ||
3600 !getConstIntOrUndef(Op: Operands[1], C&: C1) ||
3601 !getConstIntOrUndef(Op: Operands[2], C&: C2))
3602 return nullptr;
3603
3604 bool IsRight = IntrinsicID == Intrinsic::fshr;
3605 if (!C2)
3606 return Operands[IsRight ? 1 : 0];
3607 if (!C0 && !C1)
3608 return UndefValue::get(T: Ty);
3609
3610 // The shift amount is interpreted as modulo the bitwidth. If the shift
3611 // amount is effectively 0, avoid UB due to oversized inverse shift below.
3612 unsigned BitWidth = C2->getBitWidth();
3613 unsigned ShAmt = C2->urem(RHS: BitWidth);
3614 if (!ShAmt)
3615 return Operands[IsRight ? 1 : 0];
3616
3617 // (C0 << ShlAmt) | (C1 >> LshrAmt)
3618 unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
3619 unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
3620 if (!C0)
3621 return ConstantInt::get(Ty, V: C1->lshr(shiftAmt: LshrAmt));
3622 if (!C1)
3623 return ConstantInt::get(Ty, V: C0->shl(shiftAmt: ShlAmt));
3624 return ConstantInt::get(Ty, V: C0->shl(shiftAmt: ShlAmt) | C1->lshr(shiftAmt: LshrAmt));
3625 }
3626
3627 if (IntrinsicID == Intrinsic::amdgcn_perm)
3628 return ConstantFoldAMDGCNPermIntrinsic(Operands, Ty);
3629
3630 return nullptr;
3631}
3632
3633static Constant *ConstantFoldScalarCall(StringRef Name,
3634 Intrinsic::ID IntrinsicID,
3635 Type *Ty,
3636 ArrayRef<Constant *> Operands,
3637 const TargetLibraryInfo *TLI,
3638 const CallBase *Call) {
3639 if (IntrinsicID != Intrinsic::not_intrinsic &&
3640 any_of(Range&: Operands, P: IsaPred<PoisonValue>) &&
3641 intrinsicPropagatesPoison(IID: IntrinsicID))
3642 return PoisonValue::get(T: Ty);
3643
3644 if (Operands.size() == 1)
3645 return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call);
3646
3647 if (Operands.size() == 2) {
3648 if (Constant *FoldedLibCall =
3649 ConstantFoldLibCall2(Name, Ty, Operands, TLI)) {
3650 return FoldedLibCall;
3651 }
3652 return ConstantFoldIntrinsicCall2(IntrinsicID, Ty, Operands, Call);
3653 }
3654
3655 if (Operands.size() == 3)
3656 return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call);
3657
3658 return nullptr;
3659}
3660
3661static Constant *ConstantFoldFixedVectorCall(
3662 StringRef Name, Intrinsic::ID IntrinsicID, FixedVectorType *FVTy,
3663 ArrayRef<Constant *> Operands, const DataLayout &DL,
3664 const TargetLibraryInfo *TLI, const CallBase *Call) {
3665 SmallVector<Constant *, 4> Result(FVTy->getNumElements());
3666 SmallVector<Constant *, 4> Lane(Operands.size());
3667 Type *Ty = FVTy->getElementType();
3668
3669 switch (IntrinsicID) {
3670 case Intrinsic::masked_load: {
3671 auto *SrcPtr = Operands[0];
3672 auto *Mask = Operands[2];
3673 auto *Passthru = Operands[3];
3674
3675 Constant *VecData = ConstantFoldLoadFromConstPtr(C: SrcPtr, Ty: FVTy, DL);
3676
3677 SmallVector<Constant *, 32> NewElements;
3678 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
3679 auto *MaskElt = Mask->getAggregateElement(Elt: I);
3680 if (!MaskElt)
3681 break;
3682 auto *PassthruElt = Passthru->getAggregateElement(Elt: I);
3683 auto *VecElt = VecData ? VecData->getAggregateElement(Elt: I) : nullptr;
3684 if (isa<UndefValue>(Val: MaskElt)) {
3685 if (PassthruElt)
3686 NewElements.push_back(Elt: PassthruElt);
3687 else if (VecElt)
3688 NewElements.push_back(Elt: VecElt);
3689 else
3690 return nullptr;
3691 }
3692 if (MaskElt->isNullValue()) {
3693 if (!PassthruElt)
3694 return nullptr;
3695 NewElements.push_back(Elt: PassthruElt);
3696 } else if (MaskElt->isOneValue()) {
3697 if (!VecElt)
3698 return nullptr;
3699 NewElements.push_back(Elt: VecElt);
3700 } else {
3701 return nullptr;
3702 }
3703 }
3704 if (NewElements.size() != FVTy->getNumElements())
3705 return nullptr;
3706 return ConstantVector::get(V: NewElements);
3707 }
3708 case Intrinsic::arm_mve_vctp8:
3709 case Intrinsic::arm_mve_vctp16:
3710 case Intrinsic::arm_mve_vctp32:
3711 case Intrinsic::arm_mve_vctp64: {
3712 if (auto *Op = dyn_cast<ConstantInt>(Val: Operands[0])) {
3713 unsigned Lanes = FVTy->getNumElements();
3714 uint64_t Limit = Op->getZExtValue();
3715
3716 SmallVector<Constant *, 16> NCs;
3717 for (unsigned i = 0; i < Lanes; i++) {
3718 if (i < Limit)
3719 NCs.push_back(Elt: ConstantInt::getTrue(Ty));
3720 else
3721 NCs.push_back(Elt: ConstantInt::getFalse(Ty));
3722 }
3723 return ConstantVector::get(V: NCs);
3724 }
3725 return nullptr;
3726 }
3727 case Intrinsic::get_active_lane_mask: {
3728 auto *Op0 = dyn_cast<ConstantInt>(Val: Operands[0]);
3729 auto *Op1 = dyn_cast<ConstantInt>(Val: Operands[1]);
3730 if (Op0 && Op1) {
3731 unsigned Lanes = FVTy->getNumElements();
3732 uint64_t Base = Op0->getZExtValue();
3733 uint64_t Limit = Op1->getZExtValue();
3734
3735 SmallVector<Constant *, 16> NCs;
3736 for (unsigned i = 0; i < Lanes; i++) {
3737 if (Base + i < Limit)
3738 NCs.push_back(Elt: ConstantInt::getTrue(Ty));
3739 else
3740 NCs.push_back(Elt: ConstantInt::getFalse(Ty));
3741 }
3742 return ConstantVector::get(V: NCs);
3743 }
3744 return nullptr;
3745 }
3746 case Intrinsic::vector_extract: {
3747 auto *Idx = dyn_cast<ConstantInt>(Val: Operands[1]);
3748 Constant *Vec = Operands[0];
3749 if (!Idx || !isa<FixedVectorType>(Val: Vec->getType()))
3750 return nullptr;
3751
3752 unsigned NumElements = FVTy->getNumElements();
3753 unsigned VecNumElements =
3754 cast<FixedVectorType>(Val: Vec->getType())->getNumElements();
3755 unsigned StartingIndex = Idx->getZExtValue();
3756
3757 // Extracting entire vector is nop
3758 if (NumElements == VecNumElements && StartingIndex == 0)
3759 return Vec;
3760
3761 for (unsigned I = StartingIndex, E = StartingIndex + NumElements; I < E;
3762 ++I) {
3763 Constant *Elt = Vec->getAggregateElement(Elt: I);
3764 if (!Elt)
3765 return nullptr;
3766 Result[I - StartingIndex] = Elt;
3767 }
3768
3769 return ConstantVector::get(V: Result);
3770 }
3771 case Intrinsic::vector_insert: {
3772 Constant *Vec = Operands[0];
3773 Constant *SubVec = Operands[1];
3774 auto *Idx = dyn_cast<ConstantInt>(Val: Operands[2]);
3775 if (!Idx || !isa<FixedVectorType>(Val: Vec->getType()))
3776 return nullptr;
3777
3778 unsigned SubVecNumElements =
3779 cast<FixedVectorType>(Val: SubVec->getType())->getNumElements();
3780 unsigned VecNumElements =
3781 cast<FixedVectorType>(Val: Vec->getType())->getNumElements();
3782 unsigned IdxN = Idx->getZExtValue();
3783 // Replacing entire vector with a subvec is nop
3784 if (SubVecNumElements == VecNumElements && IdxN == 0)
3785 return SubVec;
3786
3787 for (unsigned I = 0; I < VecNumElements; ++I) {
3788 Constant *Elt;
3789 if (I < IdxN + SubVecNumElements)
3790 Elt = SubVec->getAggregateElement(Elt: I - IdxN);
3791 else
3792 Elt = Vec->getAggregateElement(Elt: I);
3793 if (!Elt)
3794 return nullptr;
3795 Result[I] = Elt;
3796 }
3797 return ConstantVector::get(V: Result);
3798 }
3799 case Intrinsic::vector_interleave2: {
3800 unsigned NumElements =
3801 cast<FixedVectorType>(Val: Operands[0]->getType())->getNumElements();
3802 for (unsigned I = 0; I < NumElements; ++I) {
3803 Constant *Elt0 = Operands[0]->getAggregateElement(Elt: I);
3804 Constant *Elt1 = Operands[1]->getAggregateElement(Elt: I);
3805 if (!Elt0 || !Elt1)
3806 return nullptr;
3807 Result[2 * I] = Elt0;
3808 Result[2 * I + 1] = Elt1;
3809 }
3810 return ConstantVector::get(V: Result);
3811 }
3812 default:
3813 break;
3814 }
3815
3816 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
3817 // Gather a column of constants.
3818 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
3819 // Some intrinsics use a scalar type for certain arguments.
3820 if (isVectorIntrinsicWithScalarOpAtArg(ID: IntrinsicID, ScalarOpdIdx: J, /*TTI=*/nullptr)) {
3821 Lane[J] = Operands[J];
3822 continue;
3823 }
3824
3825 Constant *Agg = Operands[J]->getAggregateElement(Elt: I);
3826 if (!Agg)
3827 return nullptr;
3828
3829 Lane[J] = Agg;
3830 }
3831
3832 // Use the regular scalar folding to simplify this column.
3833 Constant *Folded =
3834 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Operands: Lane, TLI, Call);
3835 if (!Folded)
3836 return nullptr;
3837 Result[I] = Folded;
3838 }
3839
3840 return ConstantVector::get(V: Result);
3841}
3842
3843static Constant *ConstantFoldScalableVectorCall(
3844 StringRef Name, Intrinsic::ID IntrinsicID, ScalableVectorType *SVTy,
3845 ArrayRef<Constant *> Operands, const DataLayout &DL,
3846 const TargetLibraryInfo *TLI, const CallBase *Call) {
3847 switch (IntrinsicID) {
3848 case Intrinsic::aarch64_sve_convert_from_svbool: {
3849 auto *Src = dyn_cast<Constant>(Val: Operands[0]);
3850 if (!Src || !Src->isNullValue())
3851 break;
3852
3853 return ConstantInt::getFalse(Ty: SVTy);
3854 }
3855 default:
3856 break;
3857 }
3858
3859 // If trivially vectorizable, try folding it via the scalar call if all
3860 // operands are splats.
3861
3862 // TODO: ConstantFoldFixedVectorCall should probably check this too?
3863 if (!isTriviallyVectorizable(ID: IntrinsicID))
3864 return nullptr;
3865
3866 SmallVector<Constant *, 4> SplatOps;
3867 for (auto [I, Op] : enumerate(First&: Operands)) {
3868 if (isVectorIntrinsicWithScalarOpAtArg(ID: IntrinsicID, ScalarOpdIdx: I, /*TTI=*/nullptr)) {
3869 SplatOps.push_back(Elt: Op);
3870 continue;
3871 }
3872 Constant *Splat = Op->getSplatValue();
3873 if (!Splat)
3874 return nullptr;
3875 SplatOps.push_back(Elt: Splat);
3876 }
3877 Constant *Folded = ConstantFoldScalarCall(
3878 Name, IntrinsicID, Ty: SVTy->getElementType(), Operands: SplatOps, TLI, Call);
3879 if (!Folded)
3880 return nullptr;
3881 return ConstantVector::getSplat(EC: SVTy->getElementCount(), Elt: Folded);
3882}
3883
3884static std::pair<Constant *, Constant *>
3885ConstantFoldScalarFrexpCall(Constant *Op, Type *IntTy) {
3886 if (isa<PoisonValue>(Val: Op))
3887 return {Op, PoisonValue::get(T: IntTy)};
3888
3889 auto *ConstFP = dyn_cast<ConstantFP>(Val: Op);
3890 if (!ConstFP)
3891 return {};
3892
3893 const APFloat &U = ConstFP->getValueAPF();
3894 int FrexpExp;
3895 APFloat FrexpMant = frexp(X: U, Exp&: FrexpExp, RM: APFloat::rmNearestTiesToEven);
3896 Constant *Result0 = ConstantFP::get(Ty: ConstFP->getType(), V: FrexpMant);
3897
3898 // The exponent is an "unspecified value" for inf/nan. We use zero to avoid
3899 // using undef.
3900 Constant *Result1 = FrexpMant.isFinite()
3901 ? ConstantInt::getSigned(Ty: IntTy, V: FrexpExp)
3902 : ConstantInt::getNullValue(Ty: IntTy);
3903 return {Result0, Result1};
3904}
3905
3906/// Handle intrinsics that return tuples, which may be tuples of vectors.
3907static Constant *
3908ConstantFoldStructCall(StringRef Name, Intrinsic::ID IntrinsicID,
3909 StructType *StTy, ArrayRef<Constant *> Operands,
3910 const DataLayout &DL, const TargetLibraryInfo *TLI,
3911 const CallBase *Call) {
3912
3913 switch (IntrinsicID) {
3914 case Intrinsic::frexp: {
3915 Type *Ty0 = StTy->getContainedType(i: 0);
3916 Type *Ty1 = StTy->getContainedType(i: 1)->getScalarType();
3917
3918 if (auto *FVTy0 = dyn_cast<FixedVectorType>(Val: Ty0)) {
3919 SmallVector<Constant *, 4> Results0(FVTy0->getNumElements());
3920 SmallVector<Constant *, 4> Results1(FVTy0->getNumElements());
3921
3922 for (unsigned I = 0, E = FVTy0->getNumElements(); I != E; ++I) {
3923 Constant *Lane = Operands[0]->getAggregateElement(Elt: I);
3924 std::tie(args&: Results0[I], args&: Results1[I]) =
3925 ConstantFoldScalarFrexpCall(Op: Lane, IntTy: Ty1);
3926 if (!Results0[I])
3927 return nullptr;
3928 }
3929
3930 return ConstantStruct::get(T: StTy, Vs: ConstantVector::get(V: Results0),
3931 Vs: ConstantVector::get(V: Results1));
3932 }
3933
3934 auto [Result0, Result1] = ConstantFoldScalarFrexpCall(Op: Operands[0], IntTy: Ty1);
3935 if (!Result0)
3936 return nullptr;
3937 return ConstantStruct::get(T: StTy, Vs: Result0, Vs: Result1);
3938 }
3939 case Intrinsic::sincos: {
3940 Type *Ty = StTy->getContainedType(i: 0);
3941 Type *TyScalar = Ty->getScalarType();
3942
3943 auto ConstantFoldScalarSincosCall =
3944 [&](Constant *Op) -> std::pair<Constant *, Constant *> {
3945 Constant *SinResult =
3946 ConstantFoldScalarCall(Name, IntrinsicID: Intrinsic::sin, Ty: TyScalar, Operands: Op, TLI, Call);
3947 Constant *CosResult =
3948 ConstantFoldScalarCall(Name, IntrinsicID: Intrinsic::cos, Ty: TyScalar, Operands: Op, TLI, Call);
3949 return std::make_pair(x&: SinResult, y&: CosResult);
3950 };
3951
3952 if (auto *FVTy = dyn_cast<FixedVectorType>(Val: Ty)) {
3953 SmallVector<Constant *> SinResults(FVTy->getNumElements());
3954 SmallVector<Constant *> CosResults(FVTy->getNumElements());
3955
3956 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
3957 Constant *Lane = Operands[0]->getAggregateElement(Elt: I);
3958 std::tie(args&: SinResults[I], args&: CosResults[I]) =
3959 ConstantFoldScalarSincosCall(Lane);
3960 if (!SinResults[I] || !CosResults[I])
3961 return nullptr;
3962 }
3963
3964 return ConstantStruct::get(T: StTy, Vs: ConstantVector::get(V: SinResults),
3965 Vs: ConstantVector::get(V: CosResults));
3966 }
3967
3968 auto [SinResult, CosResult] = ConstantFoldScalarSincosCall(Operands[0]);
3969 if (!SinResult || !CosResult)
3970 return nullptr;
3971 return ConstantStruct::get(T: StTy, Vs: SinResult, Vs: CosResult);
3972 }
3973 case Intrinsic::vector_deinterleave2: {
3974 auto *Vec = Operands[0];
3975 auto *VecTy = cast<VectorType>(Val: Vec->getType());
3976
3977 if (auto *EltC = Vec->getSplatValue()) {
3978 ElementCount HalfEC = VecTy->getElementCount().divideCoefficientBy(RHS: 2);
3979 auto *HalfVec = ConstantVector::getSplat(EC: HalfEC, Elt: EltC);
3980 return ConstantStruct::get(T: StTy, Vs: HalfVec, Vs: HalfVec);
3981 }
3982
3983 if (!isa<FixedVectorType>(Val: Vec->getType()))
3984 return nullptr;
3985
3986 unsigned NumElements = VecTy->getElementCount().getFixedValue() / 2;
3987 SmallVector<Constant *, 4> Res0(NumElements), Res1(NumElements);
3988 for (unsigned I = 0; I < NumElements; ++I) {
3989 Constant *Elt0 = Vec->getAggregateElement(Elt: 2 * I);
3990 Constant *Elt1 = Vec->getAggregateElement(Elt: 2 * I + 1);
3991 if (!Elt0 || !Elt1)
3992 return nullptr;
3993 Res0[I] = Elt0;
3994 Res1[I] = Elt1;
3995 }
3996 return ConstantStruct::get(T: StTy, Vs: ConstantVector::get(V: Res0),
3997 Vs: ConstantVector::get(V: Res1));
3998 }
3999 default:
4000 // TODO: Constant folding of vector intrinsics that fall through here does
4001 // not work (e.g. overflow intrinsics)
4002 return ConstantFoldScalarCall(Name, IntrinsicID, Ty: StTy, Operands, TLI, Call);
4003 }
4004
4005 return nullptr;
4006}
4007
4008} // end anonymous namespace
4009
4010Constant *llvm::ConstantFoldBinaryIntrinsic(Intrinsic::ID ID, Constant *LHS,
4011 Constant *RHS, Type *Ty,
4012 Instruction *FMFSource) {
4013 auto *Call = dyn_cast_if_present<CallBase>(Val: FMFSource);
4014 // Ensure we check flags like StrictFP that might prevent this from getting
4015 // folded before generating a result.
4016 if (Call && !canConstantFoldCallTo(Call, F: Call->getCalledFunction()))
4017 return nullptr;
4018 return ConstantFoldIntrinsicCall2(IntrinsicID: ID, Ty, Operands: {LHS, RHS}, Call);
4019}
4020
4021Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F,
4022 ArrayRef<Constant *> Operands,
4023 const TargetLibraryInfo *TLI,
4024 bool AllowNonDeterministic) {
4025 if (Call->isNoBuiltin())
4026 return nullptr;
4027 if (!F->hasName())
4028 return nullptr;
4029
4030 // If this is not an intrinsic and not recognized as a library call, bail out.
4031 Intrinsic::ID IID = F->getIntrinsicID();
4032 if (IID == Intrinsic::not_intrinsic) {
4033 if (!TLI)
4034 return nullptr;
4035 LibFunc LibF;
4036 if (!TLI->getLibFunc(FDecl: *F, F&: LibF))
4037 return nullptr;
4038 }
4039
4040 // Conservatively assume that floating-point libcalls may be
4041 // non-deterministic.
4042 Type *Ty = F->getReturnType();
4043 if (!AllowNonDeterministic && Ty->isFPOrFPVectorTy())
4044 return nullptr;
4045
4046 StringRef Name = F->getName();
4047 if (auto *FVTy = dyn_cast<FixedVectorType>(Val: Ty))
4048 return ConstantFoldFixedVectorCall(
4049 Name, IntrinsicID: IID, FVTy, Operands, DL: F->getDataLayout(), TLI, Call);
4050
4051 if (auto *SVTy = dyn_cast<ScalableVectorType>(Val: Ty))
4052 return ConstantFoldScalableVectorCall(
4053 Name, IntrinsicID: IID, SVTy, Operands, DL: F->getDataLayout(), TLI, Call);
4054
4055 if (auto *StTy = dyn_cast<StructType>(Val: Ty))
4056 return ConstantFoldStructCall(Name, IntrinsicID: IID, StTy, Operands,
4057 DL: F->getDataLayout(), TLI, Call);
4058
4059 // TODO: If this is a library function, we already discovered that above,
4060 // so we should pass the LibFunc, not the name (and it might be better
4061 // still to separate intrinsic handling from libcalls).
4062 return ConstantFoldScalarCall(Name, IntrinsicID: IID, Ty, Operands, TLI, Call);
4063}
4064
4065bool llvm::isMathLibCallNoop(const CallBase *Call,
4066 const TargetLibraryInfo *TLI) {
4067 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
4068 // (and to some extent ConstantFoldScalarCall).
4069 if (Call->isNoBuiltin() || Call->isStrictFP())
4070 return false;
4071 Function *F = Call->getCalledFunction();
4072 if (!F)
4073 return false;
4074
4075 LibFunc Func;
4076 if (!TLI || !TLI->getLibFunc(FDecl: *F, F&: Func))
4077 return false;
4078
4079 if (Call->arg_size() == 1) {
4080 if (ConstantFP *OpC = dyn_cast<ConstantFP>(Val: Call->getArgOperand(i: 0))) {
4081 const APFloat &Op = OpC->getValueAPF();
4082 switch (Func) {
4083 case LibFunc_logl:
4084 case LibFunc_log:
4085 case LibFunc_logf:
4086 case LibFunc_log2l:
4087 case LibFunc_log2:
4088 case LibFunc_log2f:
4089 case LibFunc_log10l:
4090 case LibFunc_log10:
4091 case LibFunc_log10f:
4092 return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
4093
4094 case LibFunc_ilogb:
4095 return !Op.isNaN() && !Op.isZero() && !Op.isInfinity();
4096
4097 case LibFunc_expl:
4098 case LibFunc_exp:
4099 case LibFunc_expf:
4100 // FIXME: These boundaries are slightly conservative.
4101 if (OpC->getType()->isDoubleTy())
4102 return !(Op < APFloat(-745.0) || Op > APFloat(709.0));
4103 if (OpC->getType()->isFloatTy())
4104 return !(Op < APFloat(-103.0f) || Op > APFloat(88.0f));
4105 break;
4106
4107 case LibFunc_exp2l:
4108 case LibFunc_exp2:
4109 case LibFunc_exp2f:
4110 // FIXME: These boundaries are slightly conservative.
4111 if (OpC->getType()->isDoubleTy())
4112 return !(Op < APFloat(-1074.0) || Op > APFloat(1023.0));
4113 if (OpC->getType()->isFloatTy())
4114 return !(Op < APFloat(-149.0f) || Op > APFloat(127.0f));
4115 break;
4116
4117 case LibFunc_sinl:
4118 case LibFunc_sin:
4119 case LibFunc_sinf:
4120 case LibFunc_cosl:
4121 case LibFunc_cos:
4122 case LibFunc_cosf:
4123 return !Op.isInfinity();
4124
4125 case LibFunc_tanl:
4126 case LibFunc_tan:
4127 case LibFunc_tanf: {
4128 // FIXME: Stop using the host math library.
4129 // FIXME: The computation isn't done in the right precision.
4130 Type *Ty = OpC->getType();
4131 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy())
4132 return ConstantFoldFP(NativeFP: tan, V: OpC->getValueAPF(), Ty) != nullptr;
4133 break;
4134 }
4135
4136 case LibFunc_atan:
4137 case LibFunc_atanf:
4138 case LibFunc_atanl:
4139 // Per POSIX, this MAY fail if Op is denormal. We choose not failing.
4140 return true;
4141
4142 case LibFunc_asinl:
4143 case LibFunc_asin:
4144 case LibFunc_asinf:
4145 case LibFunc_acosl:
4146 case LibFunc_acos:
4147 case LibFunc_acosf:
4148 return !(Op < APFloat::getOne(Sem: Op.getSemantics(), Negative: true) ||
4149 Op > APFloat::getOne(Sem: Op.getSemantics()));
4150
4151 case LibFunc_sinh:
4152 case LibFunc_cosh:
4153 case LibFunc_sinhf:
4154 case LibFunc_coshf:
4155 case LibFunc_sinhl:
4156 case LibFunc_coshl:
4157 // FIXME: These boundaries are slightly conservative.
4158 if (OpC->getType()->isDoubleTy())
4159 return !(Op < APFloat(-710.0) || Op > APFloat(710.0));
4160 if (OpC->getType()->isFloatTy())
4161 return !(Op < APFloat(-89.0f) || Op > APFloat(89.0f));
4162 break;
4163
4164 case LibFunc_sqrtl:
4165 case LibFunc_sqrt:
4166 case LibFunc_sqrtf:
4167 return Op.isNaN() || Op.isZero() || !Op.isNegative();
4168
4169 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
4170 // maybe others?
4171 default:
4172 break;
4173 }
4174 }
4175 }
4176
4177 if (Call->arg_size() == 2) {
4178 ConstantFP *Op0C = dyn_cast<ConstantFP>(Val: Call->getArgOperand(i: 0));
4179 ConstantFP *Op1C = dyn_cast<ConstantFP>(Val: Call->getArgOperand(i: 1));
4180 if (Op0C && Op1C) {
4181 const APFloat &Op0 = Op0C->getValueAPF();
4182 const APFloat &Op1 = Op1C->getValueAPF();
4183
4184 switch (Func) {
4185 case LibFunc_powl:
4186 case LibFunc_pow:
4187 case LibFunc_powf: {
4188 // FIXME: Stop using the host math library.
4189 // FIXME: The computation isn't done in the right precision.
4190 Type *Ty = Op0C->getType();
4191 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
4192 if (Ty == Op1C->getType())
4193 return ConstantFoldBinaryFP(NativeFP: pow, V: Op0, W: Op1, Ty) != nullptr;
4194 }
4195 break;
4196 }
4197
4198 case LibFunc_fmodl:
4199 case LibFunc_fmod:
4200 case LibFunc_fmodf:
4201 case LibFunc_remainderl:
4202 case LibFunc_remainder:
4203 case LibFunc_remainderf:
4204 return Op0.isNaN() || Op1.isNaN() ||
4205 (!Op0.isInfinity() && !Op1.isZero());
4206
4207 case LibFunc_atan2:
4208 case LibFunc_atan2f:
4209 case LibFunc_atan2l:
4210 // Although IEEE-754 says atan2(+/-0.0, +/-0.0) are well-defined, and
4211 // GLIBC and MSVC do not appear to raise an error on those, we
4212 // cannot rely on that behavior. POSIX and C11 say that a domain error
4213 // may occur, so allow for that possibility.
4214 return !Op0.isZero() || !Op1.isZero();
4215
4216 default:
4217 break;
4218 }
4219 }
4220 }
4221
4222 return false;
4223}
4224
4225void TargetFolder::anchor() {}
4226