1//===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements routines for folding instructions into simpler forms
10// that do not require creating new instructions. This does constant folding
11// ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
12// returning a constant ("and i32 %x, 0" -> "0") or an already existing value
13// ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been
14// simplified: This is usually true and assuming it simplifies the logic (if
15// they have not been simplified then results are correct but maybe suboptimal).
16//
17//===----------------------------------------------------------------------===//
18
19#include "llvm/Analysis/InstructionSimplify.h"
20
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SetVector.h"
23#include "llvm/ADT/Statistic.h"
24#include "llvm/Analysis/AliasAnalysis.h"
25#include "llvm/Analysis/AssumptionCache.h"
26#include "llvm/Analysis/CaptureTracking.h"
27#include "llvm/Analysis/CmpInstAnalysis.h"
28#include "llvm/Analysis/ConstantFolding.h"
29#include "llvm/Analysis/FloatingPointPredicateUtils.h"
30#include "llvm/Analysis/InstSimplifyFolder.h"
31#include "llvm/Analysis/Loads.h"
32#include "llvm/Analysis/LoopAnalysisManager.h"
33#include "llvm/Analysis/MemoryBuiltins.h"
34#include "llvm/Analysis/OverflowInstAnalysis.h"
35#include "llvm/Analysis/TargetLibraryInfo.h"
36#include "llvm/Analysis/ValueTracking.h"
37#include "llvm/Analysis/VectorUtils.h"
38#include "llvm/IR/ConstantFPRange.h"
39#include "llvm/IR/ConstantRange.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/Dominators.h"
42#include "llvm/IR/InstrTypes.h"
43#include "llvm/IR/Instructions.h"
44#include "llvm/IR/IntrinsicsAArch64.h"
45#include "llvm/IR/Operator.h"
46#include "llvm/IR/PatternMatch.h"
47#include "llvm/IR/Statepoint.h"
48#include "llvm/Support/KnownBits.h"
49#include "llvm/Support/KnownFPClass.h"
50#include <algorithm>
51#include <optional>
52using namespace llvm;
53using namespace llvm::PatternMatch;
54
55#define DEBUG_TYPE "instsimplify"
56
57enum { RecursionLimit = 3 };
58
59STATISTIC(NumExpand, "Number of expansions");
60STATISTIC(NumReassoc, "Number of reassociations");
61
62static Value *simplifyAndInst(Value *, Value *, const SimplifyQuery &,
63 unsigned);
64static Value *simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned);
65static Value *simplifyFPUnOp(unsigned, Value *, const FastMathFlags &,
66 const SimplifyQuery &, unsigned);
67static Value *simplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &,
68 unsigned);
69static Value *simplifyBinOp(unsigned, Value *, Value *, const FastMathFlags &,
70 const SimplifyQuery &, unsigned);
71static Value *simplifyCmpInst(CmpPredicate, Value *, Value *,
72 const SimplifyQuery &, unsigned);
73static Value *simplifyICmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS,
74 const SimplifyQuery &Q, unsigned MaxRecurse);
75static Value *simplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned);
76static Value *simplifyXorInst(Value *, Value *, const SimplifyQuery &,
77 unsigned);
78static Value *simplifyCastInst(unsigned, Value *, Type *, const SimplifyQuery &,
79 unsigned);
80static Value *simplifyGEPInst(Type *, Value *, ArrayRef<Value *>,
81 GEPNoWrapFlags, const SimplifyQuery &, unsigned);
82static Value *simplifySelectInst(Value *, Value *, Value *,
83 const SimplifyQuery &, unsigned);
84static Value *simplifyInstructionWithOperands(Instruction *I,
85 ArrayRef<Value *> NewOps,
86 const SimplifyQuery &SQ,
87 unsigned MaxRecurse);
88
89/// For a boolean type or a vector of boolean type, return false or a vector
90/// with every element false.
91static Constant *getFalse(Type *Ty) { return ConstantInt::getFalse(Ty); }
92
93/// For a boolean type or a vector of boolean type, return true or a vector
94/// with every element true.
95static Constant *getTrue(Type *Ty) { return ConstantInt::getTrue(Ty); }
96
97/// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
98static bool isSameCompare(Value *V, CmpPredicate Pred, Value *LHS, Value *RHS) {
99 CmpInst *Cmp = dyn_cast<CmpInst>(Val: V);
100 if (!Cmp)
101 return false;
102 CmpInst::Predicate CPred = Cmp->getPredicate();
103 Value *CLHS = Cmp->getOperand(i_nocapture: 0), *CRHS = Cmp->getOperand(i_nocapture: 1);
104 if (CPred == Pred && CLHS == LHS && CRHS == RHS)
105 return true;
106 return CPred == CmpInst::getSwappedPredicate(pred: Pred) && CLHS == RHS &&
107 CRHS == LHS;
108}
109
110/// Simplify comparison with true or false branch of select:
111/// %sel = select i1 %cond, i32 %tv, i32 %fv
112/// %cmp = icmp sle i32 %sel, %rhs
113/// Compose new comparison by substituting %sel with either %tv or %fv
114/// and see if it simplifies.
115static Value *simplifyCmpSelCase(CmpPredicate Pred, Value *LHS, Value *RHS,
116 Value *Cond, const SimplifyQuery &Q,
117 unsigned MaxRecurse, Constant *TrueOrFalse) {
118 Value *SimplifiedCmp = simplifyCmpInst(Pred, LHS, RHS, Q, MaxRecurse);
119 if (SimplifiedCmp == Cond) {
120 // %cmp simplified to the select condition (%cond).
121 return TrueOrFalse;
122 } else if (!SimplifiedCmp && isSameCompare(V: Cond, Pred, LHS, RHS)) {
123 // It didn't simplify. However, if composed comparison is equivalent
124 // to the select condition (%cond) then we can replace it.
125 return TrueOrFalse;
126 }
127 return SimplifiedCmp;
128}
129
130/// Simplify comparison with true branch of select
131static Value *simplifyCmpSelTrueCase(CmpPredicate Pred, Value *LHS, Value *RHS,
132 Value *Cond, const SimplifyQuery &Q,
133 unsigned MaxRecurse) {
134 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
135 TrueOrFalse: getTrue(Ty: Cond->getType()));
136}
137
138/// Simplify comparison with false branch of select
139static Value *simplifyCmpSelFalseCase(CmpPredicate Pred, Value *LHS, Value *RHS,
140 Value *Cond, const SimplifyQuery &Q,
141 unsigned MaxRecurse) {
142 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
143 TrueOrFalse: getFalse(Ty: Cond->getType()));
144}
145
146/// We know comparison with both branches of select can be simplified, but they
147/// are not equal. This routine handles some logical simplifications.
148static Value *handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp,
149 Value *Cond,
150 const SimplifyQuery &Q,
151 unsigned MaxRecurse) {
152 // If the false value simplified to false, then the result of the compare
153 // is equal to "Cond && TCmp". This also catches the case when the false
154 // value simplified to false and the true value to true, returning "Cond".
155 // Folding select to and/or isn't poison-safe in general; impliesPoison
156 // checks whether folding it does not convert a well-defined value into
157 // poison.
158 if (match(V: FCmp, P: m_Zero()) && impliesPoison(ValAssumedPoison: TCmp, V: Cond))
159 if (Value *V = simplifyAndInst(Cond, TCmp, Q, MaxRecurse))
160 return V;
161 // If the true value simplified to true, then the result of the compare
162 // is equal to "Cond || FCmp".
163 if (match(V: TCmp, P: m_One()) && impliesPoison(ValAssumedPoison: FCmp, V: Cond))
164 if (Value *V = simplifyOrInst(Cond, FCmp, Q, MaxRecurse))
165 return V;
166 // Finally, if the false value simplified to true and the true value to
167 // false, then the result of the compare is equal to "!Cond".
168 if (match(V: FCmp, P: m_One()) && match(V: TCmp, P: m_Zero()))
169 if (Value *V = simplifyXorInst(
170 Cond, Constant::getAllOnesValue(Ty: Cond->getType()), Q, MaxRecurse))
171 return V;
172 return nullptr;
173}
174
175/// Does the given value dominate the specified phi node?
176static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
177 Instruction *I = dyn_cast<Instruction>(Val: V);
178 if (!I)
179 // Arguments and constants dominate all instructions.
180 return true;
181
182 // If we have a DominatorTree then do a precise test.
183 if (DT)
184 return DT->dominates(Def: I, User: P);
185
186 // Otherwise, if the instruction is in the entry block and is not an invoke,
187 // then it obviously dominates all phi nodes.
188 if (I->getParent()->isEntryBlock() && !isa<InvokeInst>(Val: I) &&
189 !isa<CallBrInst>(Val: I))
190 return true;
191
192 return false;
193}
194
195/// Try to simplify a binary operator of form "V op OtherOp" where V is
196/// "(B0 opex B1)" by distributing 'op' across 'opex' as
197/// "(B0 op OtherOp) opex (B1 op OtherOp)".
198static Value *expandBinOp(Instruction::BinaryOps Opcode, Value *V,
199 Value *OtherOp, Instruction::BinaryOps OpcodeToExpand,
200 const SimplifyQuery &Q, unsigned MaxRecurse) {
201 auto *B = dyn_cast<BinaryOperator>(Val: V);
202 if (!B || B->getOpcode() != OpcodeToExpand)
203 return nullptr;
204 Value *B0 = B->getOperand(i_nocapture: 0), *B1 = B->getOperand(i_nocapture: 1);
205 Value *L =
206 simplifyBinOp(Opcode, B0, OtherOp, Q.getWithoutUndef(), MaxRecurse);
207 if (!L)
208 return nullptr;
209 Value *R =
210 simplifyBinOp(Opcode, B1, OtherOp, Q.getWithoutUndef(), MaxRecurse);
211 if (!R)
212 return nullptr;
213
214 // Does the expanded pair of binops simplify to the existing binop?
215 if ((L == B0 && R == B1) ||
216 (Instruction::isCommutative(Opcode: OpcodeToExpand) && L == B1 && R == B0)) {
217 ++NumExpand;
218 return B;
219 }
220
221 // Otherwise, return "L op' R" if it simplifies.
222 Value *S = simplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse);
223 if (!S)
224 return nullptr;
225
226 ++NumExpand;
227 return S;
228}
229
230/// Try to simplify binops of form "A op (B op' C)" or the commuted variant by
231/// distributing op over op'.
232static Value *expandCommutativeBinOp(Instruction::BinaryOps Opcode, Value *L,
233 Value *R,
234 Instruction::BinaryOps OpcodeToExpand,
235 const SimplifyQuery &Q,
236 unsigned MaxRecurse) {
237 // Recursion is always used, so bail out at once if we already hit the limit.
238 if (!MaxRecurse--)
239 return nullptr;
240
241 if (Value *V = expandBinOp(Opcode, V: L, OtherOp: R, OpcodeToExpand, Q, MaxRecurse))
242 return V;
243 if (Value *V = expandBinOp(Opcode, V: R, OtherOp: L, OpcodeToExpand, Q, MaxRecurse))
244 return V;
245 return nullptr;
246}
247
248/// Generic simplifications for associative binary operations.
249/// Returns the simpler value, or null if none was found.
250static Value *simplifyAssociativeBinOp(Instruction::BinaryOps Opcode,
251 Value *LHS, Value *RHS,
252 const SimplifyQuery &Q,
253 unsigned MaxRecurse) {
254 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
255
256 // Recursion is always used, so bail out at once if we already hit the limit.
257 if (!MaxRecurse--)
258 return nullptr;
259
260 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(Val: LHS);
261 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(Val: RHS);
262
263 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
264 if (Op0 && Op0->getOpcode() == Opcode) {
265 Value *A = Op0->getOperand(i_nocapture: 0);
266 Value *B = Op0->getOperand(i_nocapture: 1);
267 Value *C = RHS;
268
269 // Does "B op C" simplify?
270 if (Value *V = simplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
271 // It does! Return "A op V" if it simplifies or is already available.
272 // If V equals B then "A op V" is just the LHS.
273 if (V == B)
274 return LHS;
275 // Otherwise return "A op V" if it simplifies.
276 if (Value *W = simplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
277 ++NumReassoc;
278 return W;
279 }
280 }
281 }
282
283 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
284 if (Op1 && Op1->getOpcode() == Opcode) {
285 Value *A = LHS;
286 Value *B = Op1->getOperand(i_nocapture: 0);
287 Value *C = Op1->getOperand(i_nocapture: 1);
288
289 // Does "A op B" simplify?
290 if (Value *V = simplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
291 // It does! Return "V op C" if it simplifies or is already available.
292 // If V equals B then "V op C" is just the RHS.
293 if (V == B)
294 return RHS;
295 // Otherwise return "V op C" if it simplifies.
296 if (Value *W = simplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
297 ++NumReassoc;
298 return W;
299 }
300 }
301 }
302
303 // The remaining transforms require commutativity as well as associativity.
304 if (!Instruction::isCommutative(Opcode))
305 return nullptr;
306
307 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
308 if (Op0 && Op0->getOpcode() == Opcode) {
309 Value *A = Op0->getOperand(i_nocapture: 0);
310 Value *B = Op0->getOperand(i_nocapture: 1);
311 Value *C = RHS;
312
313 // Does "C op A" simplify?
314 if (Value *V = simplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
315 // It does! Return "V op B" if it simplifies or is already available.
316 // If V equals A then "V op B" is just the LHS.
317 if (V == A)
318 return LHS;
319 // Otherwise return "V op B" if it simplifies.
320 if (Value *W = simplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
321 ++NumReassoc;
322 return W;
323 }
324 }
325 }
326
327 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
328 if (Op1 && Op1->getOpcode() == Opcode) {
329 Value *A = LHS;
330 Value *B = Op1->getOperand(i_nocapture: 0);
331 Value *C = Op1->getOperand(i_nocapture: 1);
332
333 // Does "C op A" simplify?
334 if (Value *V = simplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
335 // It does! Return "B op V" if it simplifies or is already available.
336 // If V equals C then "B op V" is just the RHS.
337 if (V == C)
338 return RHS;
339 // Otherwise return "B op V" if it simplifies.
340 if (Value *W = simplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
341 ++NumReassoc;
342 return W;
343 }
344 }
345 }
346
347 return nullptr;
348}
349
350/// In the case of a binary operation with a select instruction as an operand,
351/// try to simplify the binop by seeing whether evaluating it on both branches
352/// of the select results in the same value. Returns the common value if so,
353/// otherwise returns null.
354static Value *threadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS,
355 Value *RHS, const SimplifyQuery &Q,
356 unsigned MaxRecurse) {
357 // Recursion is always used, so bail out at once if we already hit the limit.
358 if (!MaxRecurse--)
359 return nullptr;
360
361 SelectInst *SI;
362 if (isa<SelectInst>(Val: LHS)) {
363 SI = cast<SelectInst>(Val: LHS);
364 } else {
365 assert(isa<SelectInst>(RHS) && "No select instruction operand!");
366 SI = cast<SelectInst>(Val: RHS);
367 }
368
369 // Evaluate the BinOp on the true and false branches of the select.
370 Value *TV;
371 Value *FV;
372 if (SI == LHS) {
373 TV = simplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
374 FV = simplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
375 } else {
376 TV = simplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
377 FV = simplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
378 }
379
380 // If they simplified to the same value, then return the common value.
381 // If they both failed to simplify then return null.
382 if (TV == FV)
383 return TV;
384
385 // If one branch simplified to undef, return the other one.
386 if (TV && Q.isUndefValue(V: TV))
387 return FV;
388 if (FV && Q.isUndefValue(V: FV))
389 return TV;
390
391 // If applying the operation did not change the true and false select values,
392 // then the result of the binop is the select itself.
393 if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
394 return SI;
395
396 // If one branch simplified and the other did not, and the simplified
397 // value is equal to the unsimplified one, return the simplified value.
398 // For example, select (cond, X, X & Z) & Z -> X & Z.
399 if ((FV && !TV) || (TV && !FV)) {
400 // Check that the simplified value has the form "X op Y" where "op" is the
401 // same as the original operation.
402 Instruction *Simplified = dyn_cast<Instruction>(Val: FV ? FV : TV);
403 if (Simplified && Simplified->getOpcode() == unsigned(Opcode) &&
404 !Simplified->hasPoisonGeneratingFlags()) {
405 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
406 // We already know that "op" is the same as for the simplified value. See
407 // if the operands match too. If so, return the simplified value.
408 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
409 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
410 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
411 if (Simplified->getOperand(i: 0) == UnsimplifiedLHS &&
412 Simplified->getOperand(i: 1) == UnsimplifiedRHS)
413 return Simplified;
414 if (Simplified->isCommutative() &&
415 Simplified->getOperand(i: 1) == UnsimplifiedLHS &&
416 Simplified->getOperand(i: 0) == UnsimplifiedRHS)
417 return Simplified;
418 }
419 }
420
421 return nullptr;
422}
423
424/// In the case of a comparison with a select instruction, try to simplify the
425/// comparison by seeing whether both branches of the select result in the same
426/// value. Returns the common value if so, otherwise returns null.
427/// For example, if we have:
428/// %tmp = select i1 %cmp, i32 1, i32 2
429/// %cmp1 = icmp sle i32 %tmp, 3
430/// We can simplify %cmp1 to true, because both branches of select are
431/// less than 3. We compose new comparison by substituting %tmp with both
432/// branches of select and see if it can be simplified.
433static Value *threadCmpOverSelect(CmpPredicate Pred, Value *LHS, Value *RHS,
434 const SimplifyQuery &Q, unsigned MaxRecurse) {
435 // Recursion is always used, so bail out at once if we already hit the limit.
436 if (!MaxRecurse--)
437 return nullptr;
438
439 // Make sure the select is on the LHS.
440 if (!isa<SelectInst>(Val: LHS)) {
441 std::swap(a&: LHS, b&: RHS);
442 Pred = CmpInst::getSwappedPredicate(pred: Pred);
443 }
444 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
445 SelectInst *SI = cast<SelectInst>(Val: LHS);
446 Value *Cond = SI->getCondition();
447 Value *TV = SI->getTrueValue();
448 Value *FV = SI->getFalseValue();
449
450 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
451 // Does "cmp TV, RHS" simplify?
452 Value *TCmp = simplifyCmpSelTrueCase(Pred, LHS: TV, RHS, Cond, Q, MaxRecurse);
453 if (!TCmp)
454 return nullptr;
455
456 // Does "cmp FV, RHS" simplify?
457 Value *FCmp = simplifyCmpSelFalseCase(Pred, LHS: FV, RHS, Cond, Q, MaxRecurse);
458 if (!FCmp)
459 return nullptr;
460
461 // If both sides simplified to the same value, then use it as the result of
462 // the original comparison.
463 if (TCmp == FCmp)
464 return TCmp;
465
466 // The remaining cases only make sense if the select condition has the same
467 // type as the result of the comparison, so bail out if this is not so.
468 if (Cond->getType()->isVectorTy() == RHS->getType()->isVectorTy())
469 return handleOtherCmpSelSimplifications(TCmp, FCmp, Cond, Q, MaxRecurse);
470
471 return nullptr;
472}
473
474/// In the case of a binary operation with an operand that is a PHI instruction,
475/// try to simplify the binop by seeing whether evaluating it on the incoming
476/// phi values yields the same result for every value. If so returns the common
477/// value, otherwise returns null.
478static Value *threadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS,
479 Value *RHS, const SimplifyQuery &Q,
480 unsigned MaxRecurse) {
481 // Recursion is always used, so bail out at once if we already hit the limit.
482 if (!MaxRecurse--)
483 return nullptr;
484
485 PHINode *PI;
486 if (isa<PHINode>(Val: LHS)) {
487 PI = cast<PHINode>(Val: LHS);
488 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
489 if (!valueDominatesPHI(V: RHS, P: PI, DT: Q.DT))
490 return nullptr;
491 } else {
492 assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
493 PI = cast<PHINode>(Val: RHS);
494 // Bail out if LHS and the phi may be mutually interdependent due to a loop.
495 if (!valueDominatesPHI(V: LHS, P: PI, DT: Q.DT))
496 return nullptr;
497 }
498
499 // Evaluate the BinOp on the incoming phi values.
500 Value *CommonValue = nullptr;
501 for (Use &Incoming : PI->incoming_values()) {
502 // If the incoming value is the phi node itself, it can safely be skipped.
503 if (Incoming == PI)
504 continue;
505 Instruction *InTI = PI->getIncomingBlock(U: Incoming)->getTerminator();
506 Value *V = PI == LHS
507 ? simplifyBinOp(Opcode, Incoming, RHS,
508 Q.getWithInstruction(I: InTI), MaxRecurse)
509 : simplifyBinOp(Opcode, LHS, Incoming,
510 Q.getWithInstruction(I: InTI), MaxRecurse);
511 // If the operation failed to simplify, or simplified to a different value
512 // to previously, then give up.
513 if (!V || (CommonValue && V != CommonValue))
514 return nullptr;
515 CommonValue = V;
516 }
517
518 return CommonValue;
519}
520
521/// In the case of a comparison with a PHI instruction, try to simplify the
522/// comparison by seeing whether comparing with all of the incoming phi values
523/// yields the same result every time. If so returns the common result,
524/// otherwise returns null.
525static Value *threadCmpOverPHI(CmpPredicate Pred, Value *LHS, Value *RHS,
526 const SimplifyQuery &Q, unsigned MaxRecurse) {
527 // Recursion is always used, so bail out at once if we already hit the limit.
528 if (!MaxRecurse--)
529 return nullptr;
530
531 // Make sure the phi is on the LHS.
532 if (!isa<PHINode>(Val: LHS)) {
533 std::swap(a&: LHS, b&: RHS);
534 Pred = CmpInst::getSwappedPredicate(pred: Pred);
535 }
536 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!");
537 PHINode *PI = cast<PHINode>(Val: LHS);
538
539 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
540 if (!valueDominatesPHI(V: RHS, P: PI, DT: Q.DT))
541 return nullptr;
542
543 // Evaluate the BinOp on the incoming phi values.
544 Value *CommonValue = nullptr;
545 for (unsigned u = 0, e = PI->getNumIncomingValues(); u < e; ++u) {
546 Value *Incoming = PI->getIncomingValue(i: u);
547 Instruction *InTI = PI->getIncomingBlock(i: u)->getTerminator();
548 // If the incoming value is the phi node itself, it can safely be skipped.
549 if (Incoming == PI)
550 continue;
551 // Change the context instruction to the "edge" that flows into the phi.
552 // This is important because that is where incoming is actually "evaluated"
553 // even though it is used later somewhere else.
554 Value *V = simplifyCmpInst(Pred, Incoming, RHS, Q.getWithInstruction(I: InTI),
555 MaxRecurse);
556 // If the operation failed to simplify, or simplified to a different value
557 // to previously, then give up.
558 if (!V || (CommonValue && V != CommonValue))
559 return nullptr;
560 CommonValue = V;
561 }
562
563 return CommonValue;
564}
565
566static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode,
567 Value *&Op0, Value *&Op1,
568 const SimplifyQuery &Q) {
569 if (auto *CLHS = dyn_cast<Constant>(Val: Op0)) {
570 if (auto *CRHS = dyn_cast<Constant>(Val: Op1)) {
571 switch (Opcode) {
572 default:
573 break;
574 case Instruction::FAdd:
575 case Instruction::FSub:
576 case Instruction::FMul:
577 case Instruction::FDiv:
578 case Instruction::FRem:
579 if (Q.CxtI != nullptr)
580 return ConstantFoldFPInstOperands(Opcode, LHS: CLHS, RHS: CRHS, DL: Q.DL, I: Q.CxtI);
581 }
582 return ConstantFoldBinaryOpOperands(Opcode, LHS: CLHS, RHS: CRHS, DL: Q.DL);
583 }
584
585 // Canonicalize the constant to the RHS if this is a commutative operation.
586 if (Instruction::isCommutative(Opcode))
587 std::swap(a&: Op0, b&: Op1);
588 }
589 return nullptr;
590}
591
592/// Given operands for an Add, see if we can fold the result.
593/// If not, this returns null.
594static Value *simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
595 const SimplifyQuery &Q, unsigned MaxRecurse) {
596 if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::Add, Op0, Op1, Q))
597 return C;
598
599 // X + poison -> poison
600 if (isa<PoisonValue>(Val: Op1))
601 return Op1;
602
603 // X + undef -> undef
604 if (Q.isUndefValue(V: Op1))
605 return Op1;
606
607 // X + 0 -> X
608 if (match(V: Op1, P: m_Zero()))
609 return Op0;
610
611 // If two operands are negative, return 0.
612 if (isKnownNegation(X: Op0, Y: Op1))
613 return Constant::getNullValue(Ty: Op0->getType());
614
615 // X + (Y - X) -> Y
616 // (Y - X) + X -> Y
617 // Eg: X + -X -> 0
618 Value *Y = nullptr;
619 if (match(V: Op1, P: m_Sub(L: m_Value(V&: Y), R: m_Specific(V: Op0))) ||
620 match(V: Op0, P: m_Sub(L: m_Value(V&: Y), R: m_Specific(V: Op1))))
621 return Y;
622
623 // X + ~X -> -1 since ~X = -X-1
624 Type *Ty = Op0->getType();
625 if (match(V: Op0, P: m_Not(V: m_Specific(V: Op1))) || match(V: Op1, P: m_Not(V: m_Specific(V: Op0))))
626 return Constant::getAllOnesValue(Ty);
627
628 // add nsw/nuw (xor Y, signmask), signmask --> Y
629 // The no-wrapping add guarantees that the top bit will be set by the add.
630 // Therefore, the xor must be clearing the already set sign bit of Y.
631 if ((IsNSW || IsNUW) && match(V: Op1, P: m_SignMask()) &&
632 match(V: Op0, P: m_Xor(L: m_Value(V&: Y), R: m_SignMask())))
633 return Y;
634
635 // add nuw %x, -1 -> -1, because %x can only be 0.
636 if (IsNUW && match(V: Op1, P: m_AllOnes()))
637 return Op1; // Which is -1.
638
639 /// i1 add -> xor.
640 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(BitWidth: 1))
641 if (Value *V = simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1))
642 return V;
643
644 // Try some generic simplifications for associative operations.
645 if (Value *V =
646 simplifyAssociativeBinOp(Opcode: Instruction::Add, LHS: Op0, RHS: Op1, Q, MaxRecurse))
647 return V;
648
649 // Threading Add over selects and phi nodes is pointless, so don't bother.
650 // Threading over the select in "A + select(cond, B, C)" means evaluating
651 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and
652 // only if B and C are equal. If B and C are equal then (since we assume
653 // that operands have already been simplified) "select(cond, B, C)" should
654 // have been simplified to the common value of B and C already. Analysing
655 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
656 // for threading over phi nodes.
657
658 return nullptr;
659}
660
661Value *llvm::simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
662 const SimplifyQuery &Query) {
663 return ::simplifyAddInst(Op0, Op1, IsNSW, IsNUW, Q: Query, MaxRecurse: RecursionLimit);
664}
665
666/// Compute the base pointer and cumulative constant offsets for V.
667///
668/// This strips all constant offsets off of V, leaving it the base pointer, and
669/// accumulates the total constant offset applied in the returned constant.
670/// It returns zero if there are no constant offsets applied.
671///
672/// This is very similar to stripAndAccumulateConstantOffsets(), except it
673/// normalizes the offset bitwidth to the stripped pointer type, not the
674/// original pointer type.
675static APInt stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V) {
676 assert(V->getType()->isPtrOrPtrVectorTy());
677
678 APInt Offset = APInt::getZero(numBits: DL.getIndexTypeSizeInBits(Ty: V->getType()));
679 V = V->stripAndAccumulateConstantOffsets(DL, Offset,
680 /*AllowNonInbounds=*/true);
681 // As that strip may trace through `addrspacecast`, need to sext or trunc
682 // the offset calculated.
683 return Offset.sextOrTrunc(width: DL.getIndexTypeSizeInBits(Ty: V->getType()));
684}
685
686/// Compute the constant difference between two pointer values.
687/// If the difference is not a constant, returns zero.
688static Constant *computePointerDifference(const DataLayout &DL, Value *LHS,
689 Value *RHS) {
690 APInt LHSOffset = stripAndComputeConstantOffsets(DL, V&: LHS);
691 APInt RHSOffset = stripAndComputeConstantOffsets(DL, V&: RHS);
692
693 // If LHS and RHS are not related via constant offsets to the same base
694 // value, there is nothing we can do here.
695 if (LHS != RHS)
696 return nullptr;
697
698 // Otherwise, the difference of LHS - RHS can be computed as:
699 // LHS - RHS
700 // = (LHSOffset + Base) - (RHSOffset + Base)
701 // = LHSOffset - RHSOffset
702 Constant *Res = ConstantInt::get(Context&: LHS->getContext(), V: LHSOffset - RHSOffset);
703 if (auto *VecTy = dyn_cast<VectorType>(Val: LHS->getType()))
704 Res = ConstantVector::getSplat(EC: VecTy->getElementCount(), Elt: Res);
705 return Res;
706}
707
708/// Test if there is a dominating equivalence condition for the
709/// two operands. If there is, try to reduce the binary operation
710/// between the two operands.
711/// Example: Op0 - Op1 --> 0 when Op0 == Op1
712static Value *simplifyByDomEq(unsigned Opcode, Value *Op0, Value *Op1,
713 const SimplifyQuery &Q, unsigned MaxRecurse) {
714 // Recursive run it can not get any benefit
715 if (MaxRecurse != RecursionLimit)
716 return nullptr;
717
718 std::optional<bool> Imp =
719 isImpliedByDomCondition(Pred: CmpInst::ICMP_EQ, LHS: Op0, RHS: Op1, ContextI: Q.CxtI, DL: Q.DL);
720 if (Imp && *Imp) {
721 Type *Ty = Op0->getType();
722 switch (Opcode) {
723 case Instruction::Sub:
724 case Instruction::Xor:
725 case Instruction::URem:
726 case Instruction::SRem:
727 return Constant::getNullValue(Ty);
728
729 case Instruction::SDiv:
730 case Instruction::UDiv:
731 return ConstantInt::get(Ty, V: 1);
732
733 case Instruction::And:
734 case Instruction::Or:
735 // Could be either one - choose Op1 since that's more likely a constant.
736 return Op1;
737 default:
738 break;
739 }
740 }
741 return nullptr;
742}
743
744/// Given operands for a Sub, see if we can fold the result.
745/// If not, this returns null.
746static Value *simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
747 const SimplifyQuery &Q, unsigned MaxRecurse) {
748 if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::Sub, Op0, Op1, Q))
749 return C;
750
751 // X - poison -> poison
752 // poison - X -> poison
753 if (isa<PoisonValue>(Val: Op0) || isa<PoisonValue>(Val: Op1))
754 return PoisonValue::get(T: Op0->getType());
755
756 // X - undef -> undef
757 // undef - X -> undef
758 if (Q.isUndefValue(V: Op0) || Q.isUndefValue(V: Op1))
759 return UndefValue::get(T: Op0->getType());
760
761 // X - 0 -> X
762 if (match(V: Op1, P: m_Zero()))
763 return Op0;
764
765 // X - X -> 0
766 if (Op0 == Op1)
767 return Constant::getNullValue(Ty: Op0->getType());
768
769 // Is this a negation?
770 if (match(V: Op0, P: m_Zero())) {
771 // 0 - X -> 0 if the sub is NUW.
772 if (IsNUW)
773 return Constant::getNullValue(Ty: Op0->getType());
774
775 KnownBits Known = computeKnownBits(V: Op1, Q);
776 if (Known.Zero.isMaxSignedValue()) {
777 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then
778 // Op1 must be 0 because negating the minimum signed value is undefined.
779 if (IsNSW)
780 return Constant::getNullValue(Ty: Op0->getType());
781
782 // 0 - X -> X if X is 0 or the minimum signed value.
783 return Op1;
784 }
785 }
786
787 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
788 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
789 Value *X = nullptr, *Y = nullptr, *Z = Op1;
790 if (MaxRecurse && match(V: Op0, P: m_Add(L: m_Value(V&: X), R: m_Value(V&: Y)))) { // (X + Y) - Z
791 // See if "V === Y - Z" simplifies.
792 if (Value *V = simplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse - 1))
793 // It does! Now see if "X + V" simplifies.
794 if (Value *W = simplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse - 1)) {
795 // It does, we successfully reassociated!
796 ++NumReassoc;
797 return W;
798 }
799 // See if "V === X - Z" simplifies.
800 if (Value *V = simplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse - 1))
801 // It does! Now see if "Y + V" simplifies.
802 if (Value *W = simplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse - 1)) {
803 // It does, we successfully reassociated!
804 ++NumReassoc;
805 return W;
806 }
807 }
808
809 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
810 // For example, X - (X + 1) -> -1
811 X = Op0;
812 if (MaxRecurse && match(V: Op1, P: m_Add(L: m_Value(V&: Y), R: m_Value(V&: Z)))) { // X - (Y + Z)
813 // See if "V === X - Y" simplifies.
814 if (Value *V = simplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse - 1))
815 // It does! Now see if "V - Z" simplifies.
816 if (Value *W = simplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse - 1)) {
817 // It does, we successfully reassociated!
818 ++NumReassoc;
819 return W;
820 }
821 // See if "V === X - Z" simplifies.
822 if (Value *V = simplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse - 1))
823 // It does! Now see if "V - Y" simplifies.
824 if (Value *W = simplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse - 1)) {
825 // It does, we successfully reassociated!
826 ++NumReassoc;
827 return W;
828 }
829 }
830
831 // Z - (X - Y) -> (Z - X) + Y if everything simplifies.
832 // For example, X - (X - Y) -> Y.
833 Z = Op0;
834 if (MaxRecurse && match(V: Op1, P: m_Sub(L: m_Value(V&: X), R: m_Value(V&: Y)))) // Z - (X - Y)
835 // See if "V === Z - X" simplifies.
836 if (Value *V = simplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse - 1))
837 // It does! Now see if "V + Y" simplifies.
838 if (Value *W = simplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse - 1)) {
839 // It does, we successfully reassociated!
840 ++NumReassoc;
841 return W;
842 }
843
844 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
845 if (MaxRecurse && match(V: Op0, P: m_Trunc(Op: m_Value(V&: X))) &&
846 match(V: Op1, P: m_Trunc(Op: m_Value(V&: Y))))
847 if (X->getType() == Y->getType())
848 // See if "V === X - Y" simplifies.
849 if (Value *V = simplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse - 1))
850 // It does! Now see if "trunc V" simplifies.
851 if (Value *W = simplifyCastInst(Instruction::Trunc, V, Op0->getType(),
852 Q, MaxRecurse - 1))
853 // It does, return the simplified "trunc V".
854 return W;
855
856 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
857 if (match(V: Op0, P: m_PtrToIntOrAddr(Op: m_Value(V&: X))) &&
858 match(V: Op1, P: m_PtrToIntOrAddr(Op: m_Value(V&: Y)))) {
859 if (Constant *Result = computePointerDifference(DL: Q.DL, LHS: X, RHS: Y))
860 return ConstantFoldIntegerCast(C: Result, DestTy: Op0->getType(), /*IsSigned*/ true,
861 DL: Q.DL);
862 }
863
864 // i1 sub -> xor.
865 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(BitWidth: 1))
866 if (Value *V = simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1))
867 return V;
868
869 // Threading Sub over selects and phi nodes is pointless, so don't bother.
870 // Threading over the select in "A - select(cond, B, C)" means evaluating
871 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and
872 // only if B and C are equal. If B and C are equal then (since we assume
873 // that operands have already been simplified) "select(cond, B, C)" should
874 // have been simplified to the common value of B and C already. Analysing
875 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
876 // for threading over phi nodes.
877
878 if (Value *V = simplifyByDomEq(Opcode: Instruction::Sub, Op0, Op1, Q, MaxRecurse))
879 return V;
880
881 // (sub nuw C_Mask, (xor X, C_Mask)) -> X
882 if (IsNUW) {
883 Value *X;
884 if (match(V: Op1, P: m_Xor(L: m_Value(V&: X), R: m_Specific(V: Op0))) &&
885 match(V: Op0, P: m_LowBitMask()))
886 return X;
887 }
888
889 return nullptr;
890}
891
892Value *llvm::simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
893 const SimplifyQuery &Q) {
894 return ::simplifySubInst(Op0, Op1, IsNSW, IsNUW, Q, MaxRecurse: RecursionLimit);
895}
896
897/// Given operands for a Mul, see if we can fold the result.
898/// If not, this returns null.
899static Value *simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
900 const SimplifyQuery &Q, unsigned MaxRecurse) {
901 if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::Mul, Op0, Op1, Q))
902 return C;
903
904 // X * poison -> poison
905 if (isa<PoisonValue>(Val: Op1))
906 return Op1;
907
908 // X * undef -> 0
909 // X * 0 -> 0
910 if (Q.isUndefValue(V: Op1) || match(V: Op1, P: m_Zero()))
911 return Constant::getNullValue(Ty: Op0->getType());
912
913 // X * 1 -> X
914 if (match(V: Op1, P: m_One()))
915 return Op0;
916
917 // (X / Y) * Y -> X if the division is exact.
918 Value *X = nullptr;
919 if (Q.IIQ.UseInstrInfo &&
920 (match(V: Op0,
921 P: m_Exact(SubPattern: m_IDiv(L: m_Value(V&: X), R: m_Specific(V: Op1)))) || // (X / Y) * Y
922 match(V: Op1, P: m_Exact(SubPattern: m_IDiv(L: m_Value(V&: X), R: m_Specific(V: Op0)))))) // Y * (X / Y)
923 return X;
924
925 if (Op0->getType()->isIntOrIntVectorTy(BitWidth: 1)) {
926 // mul i1 nsw is a special-case because -1 * -1 is poison (+1 is not
927 // representable). All other cases reduce to 0, so just return 0.
928 if (IsNSW)
929 return ConstantInt::getNullValue(Ty: Op0->getType());
930
931 // Treat "mul i1" as "and i1".
932 if (MaxRecurse)
933 if (Value *V = simplifyAndInst(Op0, Op1, Q, MaxRecurse - 1))
934 return V;
935 }
936
937 // Try some generic simplifications for associative operations.
938 if (Value *V =
939 simplifyAssociativeBinOp(Opcode: Instruction::Mul, LHS: Op0, RHS: Op1, Q, MaxRecurse))
940 return V;
941
942 // Mul distributes over Add. Try some generic simplifications based on this.
943 if (Value *V = expandCommutativeBinOp(Opcode: Instruction::Mul, L: Op0, R: Op1,
944 OpcodeToExpand: Instruction::Add, Q, MaxRecurse))
945 return V;
946
947 // If the operation is with the result of a select instruction, check whether
948 // operating on either branch of the select always yields the same value.
949 if (isa<SelectInst>(Val: Op0) || isa<SelectInst>(Val: Op1))
950 if (Value *V =
951 threadBinOpOverSelect(Opcode: Instruction::Mul, LHS: Op0, RHS: Op1, Q, MaxRecurse))
952 return V;
953
954 // If the operation is with the result of a phi instruction, check whether
955 // operating on all incoming values of the phi always yields the same value.
956 if (isa<PHINode>(Val: Op0) || isa<PHINode>(Val: Op1))
957 if (Value *V =
958 threadBinOpOverPHI(Opcode: Instruction::Mul, LHS: Op0, RHS: Op1, Q, MaxRecurse))
959 return V;
960
961 return nullptr;
962}
963
964Value *llvm::simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
965 const SimplifyQuery &Q) {
966 return ::simplifyMulInst(Op0, Op1, IsNSW, IsNUW, Q, MaxRecurse: RecursionLimit);
967}
968
969/// Given a predicate and two operands, return true if the comparison is true.
970/// This is a helper for div/rem simplification where we return some other value
971/// when we can prove a relationship between the operands.
972static bool isICmpTrue(CmpPredicate Pred, Value *LHS, Value *RHS,
973 const SimplifyQuery &Q, unsigned MaxRecurse) {
974 Value *V = simplifyICmpInst(Predicate: Pred, LHS, RHS, Q, MaxRecurse);
975 Constant *C = dyn_cast_or_null<Constant>(Val: V);
976 return (C && C->isAllOnesValue());
977}
978
979/// Return true if we can simplify X / Y to 0. Remainder can adapt that answer
980/// to simplify X % Y to X.
981static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q,
982 unsigned MaxRecurse, bool IsSigned) {
983 // Recursion is always used, so bail out at once if we already hit the limit.
984 if (!MaxRecurse--)
985 return false;
986
987 if (IsSigned) {
988 // (X srem Y) sdiv Y --> 0
989 if (match(V: X, P: m_SRem(L: m_Value(), R: m_Specific(V: Y))))
990 return true;
991
992 // |X| / |Y| --> 0
993 //
994 // We require that 1 operand is a simple constant. That could be extended to
995 // 2 variables if we computed the sign bit for each.
996 //
997 // Make sure that a constant is not the minimum signed value because taking
998 // the abs() of that is undefined.
999 Type *Ty = X->getType();
1000 const APInt *C;
1001 if (match(V: X, P: m_APInt(Res&: C)) && !C->isMinSignedValue()) {
1002 // Is the variable divisor magnitude always greater than the constant
1003 // dividend magnitude?
1004 // |Y| > |C| --> Y < -abs(C) or Y > abs(C)
1005 Constant *PosDividendC = ConstantInt::get(Ty, V: C->abs());
1006 Constant *NegDividendC = ConstantInt::get(Ty, V: -C->abs());
1007 if (isICmpTrue(Pred: CmpInst::ICMP_SLT, LHS: Y, RHS: NegDividendC, Q, MaxRecurse) ||
1008 isICmpTrue(Pred: CmpInst::ICMP_SGT, LHS: Y, RHS: PosDividendC, Q, MaxRecurse))
1009 return true;
1010 }
1011 if (match(V: Y, P: m_APInt(Res&: C))) {
1012 // Special-case: we can't take the abs() of a minimum signed value. If
1013 // that's the divisor, then all we have to do is prove that the dividend
1014 // is also not the minimum signed value.
1015 if (C->isMinSignedValue())
1016 return isICmpTrue(Pred: CmpInst::ICMP_NE, LHS: X, RHS: Y, Q, MaxRecurse);
1017
1018 // Is the variable dividend magnitude always less than the constant
1019 // divisor magnitude?
1020 // |X| < |C| --> X > -abs(C) and X < abs(C)
1021 Constant *PosDivisorC = ConstantInt::get(Ty, V: C->abs());
1022 Constant *NegDivisorC = ConstantInt::get(Ty, V: -C->abs());
1023 if (isICmpTrue(Pred: CmpInst::ICMP_SGT, LHS: X, RHS: NegDivisorC, Q, MaxRecurse) &&
1024 isICmpTrue(Pred: CmpInst::ICMP_SLT, LHS: X, RHS: PosDivisorC, Q, MaxRecurse))
1025 return true;
1026 }
1027 return false;
1028 }
1029
1030 // IsSigned == false.
1031
1032 // Is the unsigned dividend known to be less than a constant divisor?
1033 // TODO: Convert this (and above) to range analysis
1034 // ("computeConstantRangeIncludingKnownBits")?
1035 const APInt *C;
1036 if (match(V: Y, P: m_APInt(Res&: C)) && computeKnownBits(V: X, Q).getMaxValue().ult(RHS: *C))
1037 return true;
1038
1039 // Try again for any divisor:
1040 // Is the dividend unsigned less than the divisor?
1041 return isICmpTrue(Pred: ICmpInst::ICMP_ULT, LHS: X, RHS: Y, Q, MaxRecurse);
1042}
1043
1044/// Check for common or similar folds of integer division or integer remainder.
1045/// This applies to all 4 opcodes (sdiv/udiv/srem/urem).
1046static Value *simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0,
1047 Value *Op1, const SimplifyQuery &Q,
1048 unsigned MaxRecurse) {
1049 bool IsDiv = (Opcode == Instruction::SDiv || Opcode == Instruction::UDiv);
1050 bool IsSigned = (Opcode == Instruction::SDiv || Opcode == Instruction::SRem);
1051
1052 Type *Ty = Op0->getType();
1053
1054 // X / undef -> poison
1055 // X % undef -> poison
1056 if (Q.isUndefValue(V: Op1) || isa<PoisonValue>(Val: Op1))
1057 return PoisonValue::get(T: Ty);
1058
1059 // X / 0 -> poison
1060 // X % 0 -> poison
1061 // We don't need to preserve faults!
1062 if (match(V: Op1, P: m_Zero()))
1063 return PoisonValue::get(T: Ty);
1064
1065 // poison / X -> poison
1066 // poison % X -> poison
1067 if (isa<PoisonValue>(Val: Op0))
1068 return Op0;
1069
1070 // undef / X -> 0
1071 // undef % X -> 0
1072 if (Q.isUndefValue(V: Op0))
1073 return Constant::getNullValue(Ty);
1074
1075 // 0 / X -> 0
1076 // 0 % X -> 0
1077 if (match(V: Op0, P: m_Zero()))
1078 return Constant::getNullValue(Ty: Op0->getType());
1079
1080 // X / X -> 1
1081 // X % X -> 0
1082 if (Op0 == Op1)
1083 return IsDiv ? ConstantInt::get(Ty, V: 1) : Constant::getNullValue(Ty);
1084
1085 KnownBits Known = computeKnownBits(V: Op1, Q);
1086 // X / 0 -> poison
1087 // X % 0 -> poison
1088 // If the divisor is known to be zero, just return poison. This can happen in
1089 // some cases where its provable indirectly the denominator is zero but it's
1090 // not trivially simplifiable (i.e known zero through a phi node).
1091 if (Known.isZero())
1092 return PoisonValue::get(T: Ty);
1093
1094 // X / 1 -> X
1095 // X % 1 -> 0
1096 // If the divisor can only be zero or one, we can't have division-by-zero
1097 // or remainder-by-zero, so assume the divisor is 1.
1098 // e.g. 1, zext (i8 X), sdiv X (Y and 1)
1099 if (Known.countMinLeadingZeros() == Known.getBitWidth() - 1)
1100 return IsDiv ? Op0 : Constant::getNullValue(Ty);
1101
1102 // If X * Y does not overflow, then:
1103 // X * Y / Y -> X
1104 // X * Y % Y -> 0
1105 Value *X;
1106 if (match(V: Op0, P: m_c_Mul(L: m_Value(V&: X), R: m_Specific(V: Op1)))) {
1107 auto *Mul = cast<OverflowingBinaryOperator>(Val: Op0);
1108 // The multiplication can't overflow if it is defined not to, or if
1109 // X == A / Y for some A.
1110 if ((IsSigned && Q.IIQ.hasNoSignedWrap(Op: Mul)) ||
1111 (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Op: Mul)) ||
1112 (IsSigned && match(V: X, P: m_SDiv(L: m_Value(), R: m_Specific(V: Op1)))) ||
1113 (!IsSigned && match(V: X, P: m_UDiv(L: m_Value(), R: m_Specific(V: Op1))))) {
1114 return IsDiv ? X : Constant::getNullValue(Ty: Op0->getType());
1115 }
1116 }
1117
1118 if (isDivZero(X: Op0, Y: Op1, Q, MaxRecurse, IsSigned))
1119 return IsDiv ? Constant::getNullValue(Ty: Op0->getType()) : Op0;
1120
1121 if (Value *V = simplifyByDomEq(Opcode, Op0, Op1, Q, MaxRecurse))
1122 return V;
1123
1124 // If the operation is with the result of a select instruction, check whether
1125 // operating on either branch of the select always yields the same value.
1126 if (isa<SelectInst>(Val: Op0) || isa<SelectInst>(Val: Op1))
1127 if (Value *V = threadBinOpOverSelect(Opcode, LHS: Op0, RHS: Op1, Q, MaxRecurse))
1128 return V;
1129
1130 // If the operation is with the result of a phi instruction, check whether
1131 // operating on all incoming values of the phi always yields the same value.
1132 if (isa<PHINode>(Val: Op0) || isa<PHINode>(Val: Op1))
1133 if (Value *V = threadBinOpOverPHI(Opcode, LHS: Op0, RHS: Op1, Q, MaxRecurse))
1134 return V;
1135
1136 return nullptr;
1137}
1138
1139/// These are simplifications common to SDiv and UDiv.
1140static Value *simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1141 bool IsExact, const SimplifyQuery &Q,
1142 unsigned MaxRecurse) {
1143 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1144 return C;
1145
1146 if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse))
1147 return V;
1148
1149 const APInt *DivC;
1150 if (IsExact && match(V: Op1, P: m_APInt(Res&: DivC))) {
1151 // If this is an exact divide by a constant, then the dividend (Op0) must
1152 // have at least as many trailing zeros as the divisor to divide evenly. If
1153 // it has less trailing zeros, then the result must be poison.
1154 if (DivC->countr_zero()) {
1155 KnownBits KnownOp0 = computeKnownBits(V: Op0, Q);
1156 if (KnownOp0.countMaxTrailingZeros() < DivC->countr_zero())
1157 return PoisonValue::get(T: Op0->getType());
1158 }
1159
1160 // udiv exact (mul nsw X, C), C --> X
1161 // sdiv exact (mul nuw X, C), C --> X
1162 // where C is not a power of 2.
1163 Value *X;
1164 if (!DivC->isPowerOf2() &&
1165 (Opcode == Instruction::UDiv
1166 ? match(V: Op0, P: m_NSWMul(L: m_Value(V&: X), R: m_Specific(V: Op1)))
1167 : match(V: Op0, P: m_NUWMul(L: m_Value(V&: X), R: m_Specific(V: Op1)))))
1168 return X;
1169 }
1170
1171 return nullptr;
1172}
1173
1174/// These are simplifications common to SRem and URem.
1175static Value *simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1176 const SimplifyQuery &Q, unsigned MaxRecurse) {
1177 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1178 return C;
1179
1180 if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse))
1181 return V;
1182
1183 // (X << Y) % X -> 0
1184 if (Q.IIQ.UseInstrInfo) {
1185 if ((Opcode == Instruction::SRem &&
1186 match(V: Op0, P: m_NSWShl(L: m_Specific(V: Op1), R: m_Value()))) ||
1187 (Opcode == Instruction::URem &&
1188 match(V: Op0, P: m_NUWShl(L: m_Specific(V: Op1), R: m_Value()))))
1189 return Constant::getNullValue(Ty: Op0->getType());
1190
1191 const APInt *C0;
1192 if (match(V: Op1, P: m_APInt(Res&: C0))) {
1193 // (srem (mul nsw X, C1), C0) -> 0 if C1 s% C0 == 0
1194 // (urem (mul nuw X, C1), C0) -> 0 if C1 u% C0 == 0
1195 if (Opcode == Instruction::SRem
1196 ? match(V: Op0,
1197 P: m_NSWMul(L: m_Value(), R: m_CheckedInt(CheckFn: [C0](const APInt &C) {
1198 return C.srem(RHS: *C0).isZero();
1199 })))
1200 : match(V: Op0,
1201 P: m_NUWMul(L: m_Value(), R: m_CheckedInt(CheckFn: [C0](const APInt &C) {
1202 return C.urem(RHS: *C0).isZero();
1203 }))))
1204 return Constant::getNullValue(Ty: Op0->getType());
1205 }
1206 }
1207 return nullptr;
1208}
1209
1210/// Given operands for an SDiv, see if we can fold the result.
1211/// If not, this returns null.
1212static Value *simplifySDivInst(Value *Op0, Value *Op1, bool IsExact,
1213 const SimplifyQuery &Q, unsigned MaxRecurse) {
1214 // If two operands are negated and no signed overflow, return -1.
1215 if (isKnownNegation(X: Op0, Y: Op1, /*NeedNSW=*/true))
1216 return Constant::getAllOnesValue(Ty: Op0->getType());
1217
1218 return simplifyDiv(Opcode: Instruction::SDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1219}
1220
1221Value *llvm::simplifySDivInst(Value *Op0, Value *Op1, bool IsExact,
1222 const SimplifyQuery &Q) {
1223 return ::simplifySDivInst(Op0, Op1, IsExact, Q, MaxRecurse: RecursionLimit);
1224}
1225
1226/// Given operands for a UDiv, see if we can fold the result.
1227/// If not, this returns null.
1228static Value *simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact,
1229 const SimplifyQuery &Q, unsigned MaxRecurse) {
1230 return simplifyDiv(Opcode: Instruction::UDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1231}
1232
1233Value *llvm::simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact,
1234 const SimplifyQuery &Q) {
1235 return ::simplifyUDivInst(Op0, Op1, IsExact, Q, MaxRecurse: RecursionLimit);
1236}
1237
1238/// Given operands for an SRem, see if we can fold the result.
1239/// If not, this returns null.
1240static Value *simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1241 unsigned MaxRecurse) {
1242 // If the divisor is 0, the result is undefined, so assume the divisor is -1.
1243 // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0
1244 Value *X;
1245 if (match(V: Op1, P: m_SExt(Op: m_Value(V&: X))) && X->getType()->isIntOrIntVectorTy(BitWidth: 1))
1246 return ConstantInt::getNullValue(Ty: Op0->getType());
1247
1248 // If the two operands are negated, return 0.
1249 if (isKnownNegation(X: Op0, Y: Op1))
1250 return ConstantInt::getNullValue(Ty: Op0->getType());
1251
1252 return simplifyRem(Opcode: Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1253}
1254
1255Value *llvm::simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1256 return ::simplifySRemInst(Op0, Op1, Q, MaxRecurse: RecursionLimit);
1257}
1258
1259/// Given operands for a URem, see if we can fold the result.
1260/// If not, this returns null.
1261static Value *simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1262 unsigned MaxRecurse) {
1263 return simplifyRem(Opcode: Instruction::URem, Op0, Op1, Q, MaxRecurse);
1264}
1265
1266Value *llvm::simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1267 return ::simplifyURemInst(Op0, Op1, Q, MaxRecurse: RecursionLimit);
1268}
1269
1270/// Returns true if a shift by \c Amount always yields poison.
1271static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q) {
1272 Constant *C = dyn_cast<Constant>(Val: Amount);
1273 if (!C)
1274 return false;
1275
1276 // X shift by undef -> poison because it may shift by the bitwidth.
1277 if (Q.isUndefValue(V: C))
1278 return true;
1279
1280 // Shifting by the bitwidth or more is poison. This covers scalars and
1281 // fixed/scalable vectors with splat constants.
1282 const APInt *AmountC;
1283 if (match(V: C, P: m_APInt(Res&: AmountC)) && AmountC->uge(RHS: AmountC->getBitWidth()))
1284 return true;
1285
1286 // Try harder for fixed-length vectors:
1287 // If all lanes of a vector shift are poison, the whole shift is poison.
1288 if (isa<ConstantVector>(Val: C) || isa<ConstantDataVector>(Val: C)) {
1289 for (unsigned I = 0,
1290 E = cast<FixedVectorType>(Val: C->getType())->getNumElements();
1291 I != E; ++I)
1292 if (!isPoisonShift(Amount: C->getAggregateElement(Elt: I), Q))
1293 return false;
1294 return true;
1295 }
1296
1297 return false;
1298}
1299
1300/// Given operands for an Shl, LShr or AShr, see if we can fold the result.
1301/// If not, this returns null.
1302static Value *simplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
1303 Value *Op1, bool IsNSW, const SimplifyQuery &Q,
1304 unsigned MaxRecurse) {
1305 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1306 return C;
1307
1308 // poison shift by X -> poison
1309 if (isa<PoisonValue>(Val: Op0))
1310 return Op0;
1311
1312 // 0 shift by X -> 0
1313 if (match(V: Op0, P: m_Zero()))
1314 return Constant::getNullValue(Ty: Op0->getType());
1315
1316 // X shift by 0 -> X
1317 // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones
1318 // would be poison.
1319 Value *X;
1320 if (match(V: Op1, P: m_Zero()) ||
1321 (match(V: Op1, P: m_SExt(Op: m_Value(V&: X))) && X->getType()->isIntOrIntVectorTy(BitWidth: 1)))
1322 return Op0;
1323
1324 // Fold undefined shifts.
1325 if (isPoisonShift(Amount: Op1, Q))
1326 return PoisonValue::get(T: Op0->getType());
1327
1328 // If the operation is with the result of a select instruction, check whether
1329 // operating on either branch of the select always yields the same value.
1330 if (isa<SelectInst>(Val: Op0) || isa<SelectInst>(Val: Op1))
1331 if (Value *V = threadBinOpOverSelect(Opcode, LHS: Op0, RHS: Op1, Q, MaxRecurse))
1332 return V;
1333
1334 // If the operation is with the result of a phi instruction, check whether
1335 // operating on all incoming values of the phi always yields the same value.
1336 if (isa<PHINode>(Val: Op0) || isa<PHINode>(Val: Op1))
1337 if (Value *V = threadBinOpOverPHI(Opcode, LHS: Op0, RHS: Op1, Q, MaxRecurse))
1338 return V;
1339
1340 // If any bits in the shift amount make that value greater than or equal to
1341 // the number of bits in the type, the shift is undefined.
1342 KnownBits KnownAmt = computeKnownBits(V: Op1, Q);
1343 if (KnownAmt.getMinValue().uge(RHS: KnownAmt.getBitWidth()))
1344 return PoisonValue::get(T: Op0->getType());
1345
1346 // If all valid bits in the shift amount are known zero, the first operand is
1347 // unchanged.
1348 unsigned NumValidShiftBits = Log2_32_Ceil(Value: KnownAmt.getBitWidth());
1349 if (KnownAmt.countMinTrailingZeros() >= NumValidShiftBits)
1350 return Op0;
1351
1352 // Check for nsw shl leading to a poison value.
1353 if (IsNSW) {
1354 assert(Opcode == Instruction::Shl && "Expected shl for nsw instruction");
1355 KnownBits KnownVal = computeKnownBits(V: Op0, Q);
1356 KnownBits KnownShl = KnownBits::shl(LHS: KnownVal, RHS: KnownAmt);
1357
1358 if (KnownVal.Zero.isSignBitSet())
1359 KnownShl.Zero.setSignBit();
1360 if (KnownVal.One.isSignBitSet())
1361 KnownShl.One.setSignBit();
1362
1363 if (KnownShl.hasConflict())
1364 return PoisonValue::get(T: Op0->getType());
1365 }
1366
1367 return nullptr;
1368}
1369
1370/// Given operands for an LShr or AShr, see if we can fold the result. If not,
1371/// this returns null.
1372static Value *simplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0,
1373 Value *Op1, bool IsExact,
1374 const SimplifyQuery &Q, unsigned MaxRecurse) {
1375 if (Value *V =
1376 simplifyShift(Opcode, Op0, Op1, /*IsNSW*/ false, Q, MaxRecurse))
1377 return V;
1378
1379 // X >> X -> 0
1380 if (Op0 == Op1)
1381 return Constant::getNullValue(Ty: Op0->getType());
1382
1383 // undef >> X -> 0
1384 // undef >> X -> undef (if it's exact)
1385 if (Q.isUndefValue(V: Op0))
1386 return IsExact ? Op0 : Constant::getNullValue(Ty: Op0->getType());
1387
1388 // The low bit cannot be shifted out of an exact shift if it is set.
1389 // TODO: Generalize by counting trailing zeros (see fold for exact division).
1390 if (IsExact) {
1391 KnownBits Op0Known = computeKnownBits(V: Op0, Q);
1392 if (Op0Known.One[0])
1393 return Op0;
1394 }
1395
1396 return nullptr;
1397}
1398
1399/// Given operands for an Shl, see if we can fold the result.
1400/// If not, this returns null.
1401static Value *simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
1402 const SimplifyQuery &Q, unsigned MaxRecurse) {
1403 if (Value *V =
1404 simplifyShift(Opcode: Instruction::Shl, Op0, Op1, IsNSW, Q, MaxRecurse))
1405 return V;
1406
1407 Type *Ty = Op0->getType();
1408 // undef << X -> 0
1409 // undef << X -> undef if (if it's NSW/NUW)
1410 if (Q.isUndefValue(V: Op0))
1411 return IsNSW || IsNUW ? Op0 : Constant::getNullValue(Ty);
1412
1413 // (X >> A) << A -> X
1414 Value *X;
1415 if (Q.IIQ.UseInstrInfo &&
1416 match(V: Op0, P: m_Exact(SubPattern: m_Shr(L: m_Value(V&: X), R: m_Specific(V: Op1)))))
1417 return X;
1418
1419 // shl nuw i8 C, %x -> C iff C has sign bit set.
1420 if (IsNUW && match(V: Op0, P: m_Negative()))
1421 return Op0;
1422 // NOTE: could use computeKnownBits() / LazyValueInfo,
1423 // but the cost-benefit analysis suggests it isn't worth it.
1424
1425 // "nuw" guarantees that only zeros are shifted out, and "nsw" guarantees
1426 // that the sign-bit does not change, so the only input that does not
1427 // produce poison is 0, and "0 << (bitwidth-1) --> 0".
1428 if (IsNSW && IsNUW &&
1429 match(V: Op1, P: m_SpecificInt(V: Ty->getScalarSizeInBits() - 1)))
1430 return Constant::getNullValue(Ty);
1431
1432 return nullptr;
1433}
1434
1435Value *llvm::simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
1436 const SimplifyQuery &Q) {
1437 return ::simplifyShlInst(Op0, Op1, IsNSW, IsNUW, Q, MaxRecurse: RecursionLimit);
1438}
1439
1440/// Given operands for an LShr, see if we can fold the result.
1441/// If not, this returns null.
1442static Value *simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
1443 const SimplifyQuery &Q, unsigned MaxRecurse) {
1444 if (Value *V = simplifyRightShift(Opcode: Instruction::LShr, Op0, Op1, IsExact, Q,
1445 MaxRecurse))
1446 return V;
1447
1448 // (X << A) >> A -> X
1449 Value *X;
1450 if (Q.IIQ.UseInstrInfo && match(V: Op0, P: m_NUWShl(L: m_Value(V&: X), R: m_Specific(V: Op1))))
1451 return X;
1452
1453 // ((X << A) | Y) >> A -> X if effective width of Y is not larger than A.
1454 // We can return X as we do in the above case since OR alters no bits in X.
1455 // SimplifyDemandedBits in InstCombine can do more general optimization for
1456 // bit manipulation. This pattern aims to provide opportunities for other
1457 // optimizers by supporting a simple but common case in InstSimplify.
1458 Value *Y;
1459 const APInt *ShRAmt, *ShLAmt;
1460 if (Q.IIQ.UseInstrInfo && match(V: Op1, P: m_APInt(Res&: ShRAmt)) &&
1461 match(V: Op0, P: m_c_Or(L: m_NUWShl(L: m_Value(V&: X), R: m_APInt(Res&: ShLAmt)), R: m_Value(V&: Y))) &&
1462 *ShRAmt == *ShLAmt) {
1463 const KnownBits YKnown = computeKnownBits(V: Y, Q);
1464 const unsigned EffWidthY = YKnown.countMaxActiveBits();
1465 if (ShRAmt->uge(RHS: EffWidthY))
1466 return X;
1467 }
1468
1469 return nullptr;
1470}
1471
1472Value *llvm::simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
1473 const SimplifyQuery &Q) {
1474 return ::simplifyLShrInst(Op0, Op1, IsExact, Q, MaxRecurse: RecursionLimit);
1475}
1476
1477/// Given operands for an AShr, see if we can fold the result.
1478/// If not, this returns null.
1479static Value *simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
1480 const SimplifyQuery &Q, unsigned MaxRecurse) {
1481 if (Value *V = simplifyRightShift(Opcode: Instruction::AShr, Op0, Op1, IsExact, Q,
1482 MaxRecurse))
1483 return V;
1484
1485 // -1 >>a X --> -1
1486 // (-1 << X) a>> X --> -1
1487 // We could return the original -1 constant to preserve poison elements.
1488 if (match(V: Op0, P: m_AllOnes()) ||
1489 match(V: Op0, P: m_Shl(L: m_AllOnes(), R: m_Specific(V: Op1))))
1490 return Constant::getAllOnesValue(Ty: Op0->getType());
1491
1492 // (X << A) >> A -> X
1493 Value *X;
1494 if (Q.IIQ.UseInstrInfo && match(V: Op0, P: m_NSWShl(L: m_Value(V&: X), R: m_Specific(V: Op1))))
1495 return X;
1496
1497 // Arithmetic shifting an all-sign-bit value is a no-op.
1498 unsigned NumSignBits = ComputeNumSignBits(Op: Op0, DL: Q.DL, AC: Q.AC, CxtI: Q.CxtI, DT: Q.DT);
1499 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
1500 return Op0;
1501
1502 return nullptr;
1503}
1504
1505Value *llvm::simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
1506 const SimplifyQuery &Q) {
1507 return ::simplifyAShrInst(Op0, Op1, IsExact, Q, MaxRecurse: RecursionLimit);
1508}
1509
1510/// Commuted variants are assumed to be handled by calling this function again
1511/// with the parameters swapped.
1512static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
1513 ICmpInst *UnsignedICmp, bool IsAnd,
1514 const SimplifyQuery &Q) {
1515 Value *X, *Y;
1516
1517 CmpPredicate EqPred;
1518 if (!match(V: ZeroICmp, P: m_ICmp(Pred&: EqPred, L: m_Value(V&: Y), R: m_Zero())) ||
1519 !ICmpInst::isEquality(P: EqPred))
1520 return nullptr;
1521
1522 CmpPredicate UnsignedPred;
1523
1524 Value *A, *B;
1525 // Y = (A - B);
1526 if (match(V: Y, P: m_Sub(L: m_Value(V&: A), R: m_Value(V&: B)))) {
1527 if (match(V: UnsignedICmp,
1528 P: m_c_ICmp(Pred&: UnsignedPred, L: m_Specific(V: A), R: m_Specific(V: B))) &&
1529 ICmpInst::isUnsigned(predicate: UnsignedPred)) {
1530 // A >=/<= B || (A - B) != 0 <--> true
1531 if ((UnsignedPred == ICmpInst::ICMP_UGE ||
1532 UnsignedPred == ICmpInst::ICMP_ULE) &&
1533 EqPred == ICmpInst::ICMP_NE && !IsAnd)
1534 return ConstantInt::getTrue(Ty: UnsignedICmp->getType());
1535 // A </> B && (A - B) == 0 <--> false
1536 if ((UnsignedPred == ICmpInst::ICMP_ULT ||
1537 UnsignedPred == ICmpInst::ICMP_UGT) &&
1538 EqPred == ICmpInst::ICMP_EQ && IsAnd)
1539 return ConstantInt::getFalse(Ty: UnsignedICmp->getType());
1540
1541 // A </> B && (A - B) != 0 <--> A </> B
1542 // A </> B || (A - B) != 0 <--> (A - B) != 0
1543 if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT ||
1544 UnsignedPred == ICmpInst::ICMP_UGT))
1545 return IsAnd ? UnsignedICmp : ZeroICmp;
1546
1547 // A <=/>= B && (A - B) == 0 <--> (A - B) == 0
1548 // A <=/>= B || (A - B) == 0 <--> A <=/>= B
1549 if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE ||
1550 UnsignedPred == ICmpInst::ICMP_UGE))
1551 return IsAnd ? ZeroICmp : UnsignedICmp;
1552 }
1553
1554 // Given Y = (A - B)
1555 // Y >= A && Y != 0 --> Y >= A iff B != 0
1556 // Y < A || Y == 0 --> Y < A iff B != 0
1557 if (match(V: UnsignedICmp,
1558 P: m_c_ICmp(Pred&: UnsignedPred, L: m_Specific(V: Y), R: m_Specific(V: A)))) {
1559 if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
1560 EqPred == ICmpInst::ICMP_NE && isKnownNonZero(V: B, Q))
1561 return UnsignedICmp;
1562 if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
1563 EqPred == ICmpInst::ICMP_EQ && isKnownNonZero(V: B, Q))
1564 return UnsignedICmp;
1565 }
1566 }
1567
1568 if (match(V: UnsignedICmp, P: m_ICmp(Pred&: UnsignedPred, L: m_Value(V&: X), R: m_Specific(V: Y))) &&
1569 ICmpInst::isUnsigned(predicate: UnsignedPred))
1570 ;
1571 else if (match(V: UnsignedICmp,
1572 P: m_ICmp(Pred&: UnsignedPred, L: m_Specific(V: Y), R: m_Value(V&: X))) &&
1573 ICmpInst::isUnsigned(predicate: UnsignedPred))
1574 UnsignedPred = ICmpInst::getSwappedPredicate(pred: UnsignedPred);
1575 else
1576 return nullptr;
1577
1578 // X > Y && Y == 0 --> Y == 0 iff X != 0
1579 // X > Y || Y == 0 --> X > Y iff X != 0
1580 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1581 isKnownNonZero(V: X, Q))
1582 return IsAnd ? ZeroICmp : UnsignedICmp;
1583
1584 // X <= Y && Y != 0 --> X <= Y iff X != 0
1585 // X <= Y || Y != 0 --> Y != 0 iff X != 0
1586 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1587 isKnownNonZero(V: X, Q))
1588 return IsAnd ? UnsignedICmp : ZeroICmp;
1589
1590 // The transforms below here are expected to be handled more generally with
1591 // simplifyAndOrOfICmpsWithLimitConst() or in InstCombine's
1592 // foldAndOrOfICmpsWithConstEq(). If we are looking to trim optimizer overlap,
1593 // these are candidates for removal.
1594
1595 // X < Y && Y != 0 --> X < Y
1596 // X < Y || Y != 0 --> Y != 0
1597 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1598 return IsAnd ? UnsignedICmp : ZeroICmp;
1599
1600 // X >= Y && Y == 0 --> Y == 0
1601 // X >= Y || Y == 0 --> X >= Y
1602 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ)
1603 return IsAnd ? ZeroICmp : UnsignedICmp;
1604
1605 // X < Y && Y == 0 --> false
1606 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1607 IsAnd)
1608 return getFalse(Ty: UnsignedICmp->getType());
1609
1610 // X >= Y || Y != 0 --> true
1611 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE &&
1612 !IsAnd)
1613 return getTrue(Ty: UnsignedICmp->getType());
1614
1615 return nullptr;
1616}
1617
1618/// Test if a pair of compares with a shared operand and 2 constants has an
1619/// empty set intersection, full set union, or if one compare is a superset of
1620/// the other.
1621static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1,
1622 bool IsAnd) {
1623 // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
1624 if (Cmp0->getOperand(i_nocapture: 0) != Cmp1->getOperand(i_nocapture: 0))
1625 return nullptr;
1626
1627 const APInt *C0, *C1;
1628 if (!match(V: Cmp0->getOperand(i_nocapture: 1), P: m_APInt(Res&: C0)) ||
1629 !match(V: Cmp1->getOperand(i_nocapture: 1), P: m_APInt(Res&: C1)))
1630 return nullptr;
1631
1632 auto Range0 = ConstantRange::makeExactICmpRegion(Pred: Cmp0->getPredicate(), Other: *C0);
1633 auto Range1 = ConstantRange::makeExactICmpRegion(Pred: Cmp1->getPredicate(), Other: *C1);
1634
1635 // For and-of-compares, check if the intersection is empty:
1636 // (icmp X, C0) && (icmp X, C1) --> empty set --> false
1637 if (IsAnd && Range0.intersectWith(CR: Range1).isEmptySet())
1638 return getFalse(Ty: Cmp0->getType());
1639
1640 // For or-of-compares, check if the union is full:
1641 // (icmp X, C0) || (icmp X, C1) --> full set --> true
1642 if (!IsAnd && Range0.unionWith(CR: Range1).isFullSet())
1643 return getTrue(Ty: Cmp0->getType());
1644
1645 // Is one range a superset of the other?
1646 // If this is and-of-compares, take the smaller set:
1647 // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
1648 // If this is or-of-compares, take the larger set:
1649 // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
1650 if (Range0.contains(CR: Range1))
1651 return IsAnd ? Cmp1 : Cmp0;
1652 if (Range1.contains(CR: Range0))
1653 return IsAnd ? Cmp0 : Cmp1;
1654
1655 return nullptr;
1656}
1657
1658static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
1659 const InstrInfoQuery &IIQ) {
1660 // (icmp (add V, C0), C1) & (icmp V, C0)
1661 CmpPredicate Pred0, Pred1;
1662 const APInt *C0, *C1;
1663 Value *V;
1664 if (!match(V: Op0, P: m_ICmp(Pred&: Pred0, L: m_Add(L: m_Value(V), R: m_APInt(Res&: C0)), R: m_APInt(Res&: C1))))
1665 return nullptr;
1666
1667 if (!match(V: Op1, P: m_ICmp(Pred&: Pred1, L: m_Specific(V), R: m_Value())))
1668 return nullptr;
1669
1670 auto *AddInst = cast<OverflowingBinaryOperator>(Val: Op0->getOperand(i_nocapture: 0));
1671 if (AddInst->getOperand(i_nocapture: 1) != Op1->getOperand(i_nocapture: 1))
1672 return nullptr;
1673
1674 Type *ITy = Op0->getType();
1675 bool IsNSW = IIQ.hasNoSignedWrap(Op: AddInst);
1676 bool IsNUW = IIQ.hasNoUnsignedWrap(Op: AddInst);
1677
1678 const APInt Delta = *C1 - *C0;
1679 if (C0->isStrictlyPositive()) {
1680 if (Delta == 2) {
1681 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1682 return getFalse(Ty: ITy);
1683 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1684 return getFalse(Ty: ITy);
1685 }
1686 if (Delta == 1) {
1687 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1688 return getFalse(Ty: ITy);
1689 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1690 return getFalse(Ty: ITy);
1691 }
1692 }
1693 if (C0->getBoolValue() && IsNUW) {
1694 if (Delta == 2)
1695 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1696 return getFalse(Ty: ITy);
1697 if (Delta == 1)
1698 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1699 return getFalse(Ty: ITy);
1700 }
1701
1702 return nullptr;
1703}
1704
1705/// Try to simplify and/or of icmp with ctpop intrinsic.
1706static Value *simplifyAndOrOfICmpsWithCtpop(ICmpInst *Cmp0, ICmpInst *Cmp1,
1707 bool IsAnd) {
1708 CmpPredicate Pred0, Pred1;
1709 Value *X;
1710 const APInt *C;
1711 if (!match(V: Cmp0, P: m_ICmp(Pred&: Pred0, L: m_Intrinsic<Intrinsic::ctpop>(Op0: m_Value(V&: X)),
1712 R: m_APInt(Res&: C))) ||
1713 !match(V: Cmp1, P: m_ICmp(Pred&: Pred1, L: m_Specific(V: X), R: m_ZeroInt())) || C->isZero())
1714 return nullptr;
1715
1716 // (ctpop(X) == C) || (X != 0) --> X != 0 where C > 0
1717 if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_NE)
1718 return Cmp1;
1719 // (ctpop(X) != C) && (X == 0) --> X == 0 where C > 0
1720 if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_EQ)
1721 return Cmp1;
1722
1723 return nullptr;
1724}
1725
1726static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1,
1727 const SimplifyQuery &Q) {
1728 if (Value *X = simplifyUnsignedRangeCheck(ZeroICmp: Op0, UnsignedICmp: Op1, /*IsAnd=*/true, Q))
1729 return X;
1730 if (Value *X = simplifyUnsignedRangeCheck(ZeroICmp: Op1, UnsignedICmp: Op0, /*IsAnd=*/true, Q))
1731 return X;
1732
1733 if (Value *X = simplifyAndOrOfICmpsWithConstants(Cmp0: Op0, Cmp1: Op1, IsAnd: true))
1734 return X;
1735
1736 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Cmp0: Op0, Cmp1: Op1, IsAnd: true))
1737 return X;
1738 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Cmp0: Op1, Cmp1: Op0, IsAnd: true))
1739 return X;
1740
1741 if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, IIQ: Q.IIQ))
1742 return X;
1743 if (Value *X = simplifyAndOfICmpsWithAdd(Op0: Op1, Op1: Op0, IIQ: Q.IIQ))
1744 return X;
1745
1746 return nullptr;
1747}
1748
1749static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
1750 const InstrInfoQuery &IIQ) {
1751 // (icmp (add V, C0), C1) | (icmp V, C0)
1752 CmpPredicate Pred0, Pred1;
1753 const APInt *C0, *C1;
1754 Value *V;
1755 if (!match(V: Op0, P: m_ICmp(Pred&: Pred0, L: m_Add(L: m_Value(V), R: m_APInt(Res&: C0)), R: m_APInt(Res&: C1))))
1756 return nullptr;
1757
1758 if (!match(V: Op1, P: m_ICmp(Pred&: Pred1, L: m_Specific(V), R: m_Value())))
1759 return nullptr;
1760
1761 auto *AddInst = cast<BinaryOperator>(Val: Op0->getOperand(i_nocapture: 0));
1762 if (AddInst->getOperand(i_nocapture: 1) != Op1->getOperand(i_nocapture: 1))
1763 return nullptr;
1764
1765 Type *ITy = Op0->getType();
1766 bool IsNSW = IIQ.hasNoSignedWrap(Op: AddInst);
1767 bool IsNUW = IIQ.hasNoUnsignedWrap(Op: AddInst);
1768
1769 const APInt Delta = *C1 - *C0;
1770 if (C0->isStrictlyPositive()) {
1771 if (Delta == 2) {
1772 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1773 return getTrue(Ty: ITy);
1774 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1775 return getTrue(Ty: ITy);
1776 }
1777 if (Delta == 1) {
1778 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1779 return getTrue(Ty: ITy);
1780 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1781 return getTrue(Ty: ITy);
1782 }
1783 }
1784 if (C0->getBoolValue() && IsNUW) {
1785 if (Delta == 2)
1786 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1787 return getTrue(Ty: ITy);
1788 if (Delta == 1)
1789 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1790 return getTrue(Ty: ITy);
1791 }
1792
1793 return nullptr;
1794}
1795
1796static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1,
1797 const SimplifyQuery &Q) {
1798 if (Value *X = simplifyUnsignedRangeCheck(ZeroICmp: Op0, UnsignedICmp: Op1, /*IsAnd=*/false, Q))
1799 return X;
1800 if (Value *X = simplifyUnsignedRangeCheck(ZeroICmp: Op1, UnsignedICmp: Op0, /*IsAnd=*/false, Q))
1801 return X;
1802
1803 if (Value *X = simplifyAndOrOfICmpsWithConstants(Cmp0: Op0, Cmp1: Op1, IsAnd: false))
1804 return X;
1805
1806 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Cmp0: Op0, Cmp1: Op1, IsAnd: false))
1807 return X;
1808 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Cmp0: Op1, Cmp1: Op0, IsAnd: false))
1809 return X;
1810
1811 if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, IIQ: Q.IIQ))
1812 return X;
1813 if (Value *X = simplifyOrOfICmpsWithAdd(Op0: Op1, Op1: Op0, IIQ: Q.IIQ))
1814 return X;
1815
1816 return nullptr;
1817}
1818
1819/// Test if a pair of compares with a shared operand and 2 constants has an
1820/// empty set intersection, full set union, or if one compare is a superset of
1821/// the other.
1822static Value *simplifyAndOrOfFCmpsWithConstants(FCmpInst *Cmp0, FCmpInst *Cmp1,
1823 bool IsAnd) {
1824 // Look for this pattern: {and/or} (fcmp X, C0), (fcmp X, C1)).
1825 if (Cmp0->getOperand(i_nocapture: 0) != Cmp1->getOperand(i_nocapture: 0))
1826 return nullptr;
1827
1828 const APFloat *C0, *C1;
1829 if (!match(V: Cmp0->getOperand(i_nocapture: 1), P: m_APFloat(Res&: C0)) ||
1830 !match(V: Cmp1->getOperand(i_nocapture: 1), P: m_APFloat(Res&: C1)))
1831 return nullptr;
1832
1833 auto Range0 = ConstantFPRange::makeExactFCmpRegion(
1834 Pred: IsAnd ? Cmp0->getPredicate() : Cmp0->getInversePredicate(), Other: *C0);
1835 auto Range1 = ConstantFPRange::makeExactFCmpRegion(
1836 Pred: IsAnd ? Cmp1->getPredicate() : Cmp1->getInversePredicate(), Other: *C1);
1837
1838 if (!Range0 || !Range1)
1839 return nullptr;
1840
1841 // For and-of-compares, check if the intersection is empty:
1842 // (fcmp X, C0) && (fcmp X, C1) --> empty set --> false
1843 if (Range0->intersectWith(CR: *Range1).isEmptySet())
1844 return ConstantInt::getBool(Ty: Cmp0->getType(), V: !IsAnd);
1845
1846 // Is one range a superset of the other?
1847 // If this is and-of-compares, take the smaller set:
1848 // (fcmp ogt X, 4) && (fcmp ogt X, 42) --> fcmp ogt X, 42
1849 // If this is or-of-compares, take the larger set:
1850 // (fcmp ogt X, 4) || (fcmp ogt X, 42) --> fcmp ogt X, 4
1851 if (Range0->contains(CR: *Range1))
1852 return Cmp1;
1853 if (Range1->contains(CR: *Range0))
1854 return Cmp0;
1855
1856 return nullptr;
1857}
1858
1859static Value *simplifyAndOrOfFCmps(const SimplifyQuery &Q, FCmpInst *LHS,
1860 FCmpInst *RHS, bool IsAnd) {
1861 Value *LHS0 = LHS->getOperand(i_nocapture: 0), *LHS1 = LHS->getOperand(i_nocapture: 1);
1862 Value *RHS0 = RHS->getOperand(i_nocapture: 0), *RHS1 = RHS->getOperand(i_nocapture: 1);
1863 if (LHS0->getType() != RHS0->getType())
1864 return nullptr;
1865
1866 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1867 auto AbsOrSelfLHS0 = m_CombineOr(L: m_Specific(V: LHS0), R: m_FAbs(Op0: m_Specific(V: LHS0)));
1868 if ((PredL == FCmpInst::FCMP_ORD || PredL == FCmpInst::FCMP_UNO) &&
1869 ((FCmpInst::isOrdered(predicate: PredR) && IsAnd) ||
1870 (FCmpInst::isUnordered(predicate: PredR) && !IsAnd))) {
1871 // (fcmp ord X, 0) & (fcmp o** X/abs(X), Y) --> fcmp o** X/abs(X), Y
1872 // (fcmp uno X, 0) & (fcmp o** X/abs(X), Y) --> false
1873 // (fcmp uno X, 0) | (fcmp u** X/abs(X), Y) --> fcmp u** X/abs(X), Y
1874 // (fcmp ord X, 0) | (fcmp u** X/abs(X), Y) --> true
1875 if ((match(V: RHS0, P: AbsOrSelfLHS0) || match(V: RHS1, P: AbsOrSelfLHS0)) &&
1876 match(V: LHS1, P: m_PosZeroFP()))
1877 return FCmpInst::isOrdered(predicate: PredL) == FCmpInst::isOrdered(predicate: PredR)
1878 ? static_cast<Value *>(RHS)
1879 : ConstantInt::getBool(Ty: LHS->getType(), V: !IsAnd);
1880 }
1881
1882 auto AbsOrSelfRHS0 = m_CombineOr(L: m_Specific(V: RHS0), R: m_FAbs(Op0: m_Specific(V: RHS0)));
1883 if ((PredR == FCmpInst::FCMP_ORD || PredR == FCmpInst::FCMP_UNO) &&
1884 ((FCmpInst::isOrdered(predicate: PredL) && IsAnd) ||
1885 (FCmpInst::isUnordered(predicate: PredL) && !IsAnd))) {
1886 // (fcmp o** X/abs(X), Y) & (fcmp ord X, 0) --> fcmp o** X/abs(X), Y
1887 // (fcmp o** X/abs(X), Y) & (fcmp uno X, 0) --> false
1888 // (fcmp u** X/abs(X), Y) | (fcmp uno X, 0) --> fcmp u** X/abs(X), Y
1889 // (fcmp u** X/abs(X), Y) | (fcmp ord X, 0) --> true
1890 if ((match(V: LHS0, P: AbsOrSelfRHS0) || match(V: LHS1, P: AbsOrSelfRHS0)) &&
1891 match(V: RHS1, P: m_PosZeroFP()))
1892 return FCmpInst::isOrdered(predicate: PredL) == FCmpInst::isOrdered(predicate: PredR)
1893 ? static_cast<Value *>(LHS)
1894 : ConstantInt::getBool(Ty: LHS->getType(), V: !IsAnd);
1895 }
1896
1897 if (auto *V = simplifyAndOrOfFCmpsWithConstants(Cmp0: LHS, Cmp1: RHS, IsAnd))
1898 return V;
1899
1900 return nullptr;
1901}
1902
1903static Value *simplifyAndOrOfCmps(const SimplifyQuery &Q, Value *Op0,
1904 Value *Op1, bool IsAnd) {
1905 // Look through casts of the 'and' operands to find compares.
1906 auto *Cast0 = dyn_cast<CastInst>(Val: Op0);
1907 auto *Cast1 = dyn_cast<CastInst>(Val: Op1);
1908 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1909 Cast0->getSrcTy() == Cast1->getSrcTy()) {
1910 Op0 = Cast0->getOperand(i_nocapture: 0);
1911 Op1 = Cast1->getOperand(i_nocapture: 0);
1912 }
1913
1914 Value *V = nullptr;
1915 auto *ICmp0 = dyn_cast<ICmpInst>(Val: Op0);
1916 auto *ICmp1 = dyn_cast<ICmpInst>(Val: Op1);
1917 if (ICmp0 && ICmp1)
1918 V = IsAnd ? simplifyAndOfICmps(Op0: ICmp0, Op1: ICmp1, Q)
1919 : simplifyOrOfICmps(Op0: ICmp0, Op1: ICmp1, Q);
1920
1921 auto *FCmp0 = dyn_cast<FCmpInst>(Val: Op0);
1922 auto *FCmp1 = dyn_cast<FCmpInst>(Val: Op1);
1923 if (FCmp0 && FCmp1)
1924 V = simplifyAndOrOfFCmps(Q, LHS: FCmp0, RHS: FCmp1, IsAnd);
1925
1926 if (!V)
1927 return nullptr;
1928 if (!Cast0)
1929 return V;
1930
1931 // If we looked through casts, we can only handle a constant simplification
1932 // because we are not allowed to create a cast instruction here.
1933 if (auto *C = dyn_cast<Constant>(Val: V))
1934 return ConstantFoldCastOperand(Opcode: Cast0->getOpcode(), C, DestTy: Cast0->getType(),
1935 DL: Q.DL);
1936
1937 return nullptr;
1938}
1939
1940static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
1941 const SimplifyQuery &Q,
1942 bool AllowRefinement,
1943 SmallVectorImpl<Instruction *> *DropFlags,
1944 unsigned MaxRecurse);
1945
1946static Value *simplifyAndOrWithICmpEq(unsigned Opcode, Value *Op0, Value *Op1,
1947 const SimplifyQuery &Q,
1948 unsigned MaxRecurse) {
1949 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1950 "Must be and/or");
1951 CmpPredicate Pred;
1952 Value *A, *B;
1953 if (!match(V: Op0, P: m_ICmp(Pred, L: m_Value(V&: A), R: m_Value(V&: B))) ||
1954 !ICmpInst::isEquality(P: Pred))
1955 return nullptr;
1956
1957 auto Simplify = [&](Value *Res) -> Value * {
1958 Constant *Absorber = ConstantExpr::getBinOpAbsorber(Opcode, Ty: Res->getType());
1959
1960 // and (icmp eq a, b), x implies (a==b) inside x.
1961 // or (icmp ne a, b), x implies (a==b) inside x.
1962 // If x simplifies to true/false, we can simplify the and/or.
1963 if (Pred ==
1964 (Opcode == Instruction::And ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE)) {
1965 if (Res == Absorber)
1966 return Absorber;
1967 if (Res == ConstantExpr::getBinOpIdentity(Opcode, Ty: Res->getType()))
1968 return Op0;
1969 return nullptr;
1970 }
1971
1972 // If we have and (icmp ne a, b), x and for a==b we can simplify x to false,
1973 // then we can drop the icmp, as x will already be false in the case where
1974 // the icmp is false. Similar for or and true.
1975 if (Res == Absorber)
1976 return Op1;
1977 return nullptr;
1978 };
1979
1980 // In the final case (Res == Absorber with inverted predicate), it is safe to
1981 // refine poison during simplification, but not undef. For simplicity always
1982 // disable undef-based folds here.
1983 if (Value *Res = simplifyWithOpReplaced(V: Op1, Op: A, RepOp: B, Q: Q.getWithoutUndef(),
1984 /* AllowRefinement */ true,
1985 /* DropFlags */ nullptr, MaxRecurse))
1986 return Simplify(Res);
1987 if (Value *Res = simplifyWithOpReplaced(V: Op1, Op: B, RepOp: A, Q: Q.getWithoutUndef(),
1988 /* AllowRefinement */ true,
1989 /* DropFlags */ nullptr, MaxRecurse))
1990 return Simplify(Res);
1991
1992 return nullptr;
1993}
1994
1995/// Given a bitwise logic op, check if the operands are add/sub with a common
1996/// source value and inverted constant (identity: C - X -> ~(X + ~C)).
1997static Value *simplifyLogicOfAddSub(Value *Op0, Value *Op1,
1998 Instruction::BinaryOps Opcode) {
1999 assert(Op0->getType() == Op1->getType() && "Mismatched binop types");
2000 assert(BinaryOperator::isBitwiseLogicOp(Opcode) && "Expected logic op");
2001 Value *X;
2002 Constant *C1, *C2;
2003 if ((match(V: Op0, P: m_Add(L: m_Value(V&: X), R: m_Constant(C&: C1))) &&
2004 match(V: Op1, P: m_Sub(L: m_Constant(C&: C2), R: m_Specific(V: X)))) ||
2005 (match(V: Op1, P: m_Add(L: m_Value(V&: X), R: m_Constant(C&: C1))) &&
2006 match(V: Op0, P: m_Sub(L: m_Constant(C&: C2), R: m_Specific(V: X))))) {
2007 if (ConstantExpr::getNot(C: C1) == C2) {
2008 // (X + C) & (~C - X) --> (X + C) & ~(X + C) --> 0
2009 // (X + C) | (~C - X) --> (X + C) | ~(X + C) --> -1
2010 // (X + C) ^ (~C - X) --> (X + C) ^ ~(X + C) --> -1
2011 Type *Ty = Op0->getType();
2012 return Opcode == Instruction::And ? ConstantInt::getNullValue(Ty)
2013 : ConstantInt::getAllOnesValue(Ty);
2014 }
2015 }
2016 return nullptr;
2017}
2018
2019// Commutative patterns for and that will be tried with both operand orders.
2020static Value *simplifyAndCommutative(Value *Op0, Value *Op1,
2021 const SimplifyQuery &Q,
2022 unsigned MaxRecurse) {
2023 // ~A & A = 0
2024 if (match(V: Op0, P: m_Not(V: m_Specific(V: Op1))))
2025 return Constant::getNullValue(Ty: Op0->getType());
2026
2027 // (A | ?) & A = A
2028 if (match(V: Op0, P: m_c_Or(L: m_Specific(V: Op1), R: m_Value())))
2029 return Op1;
2030
2031 // (X | ~Y) & (X | Y) --> X
2032 Value *X, *Y;
2033 if (match(V: Op0, P: m_c_Or(L: m_Value(V&: X), R: m_Not(V: m_Value(V&: Y)))) &&
2034 match(V: Op1, P: m_c_Or(L: m_Specific(V: X), R: m_Specific(V: Y))))
2035 return X;
2036
2037 // If we have a multiplication overflow check that is being 'and'ed with a
2038 // check that one of the multipliers is not zero, we can omit the 'and', and
2039 // only keep the overflow check.
2040 if (isCheckForZeroAndMulWithOverflow(Op0, Op1, IsAnd: true))
2041 return Op1;
2042
2043 // -A & A = A if A is a power of two or zero.
2044 if (match(V: Op0, P: m_Neg(V: m_Specific(V: Op1))) &&
2045 isKnownToBeAPowerOfTwo(V: Op1, DL: Q.DL, /*OrZero*/ true, AC: Q.AC, CxtI: Q.CxtI, DT: Q.DT))
2046 return Op1;
2047
2048 // This is a similar pattern used for checking if a value is a power-of-2:
2049 // (A - 1) & A --> 0 (if A is a power-of-2 or 0)
2050 if (match(V: Op0, P: m_Add(L: m_Specific(V: Op1), R: m_AllOnes())) &&
2051 isKnownToBeAPowerOfTwo(V: Op1, DL: Q.DL, /*OrZero*/ true, AC: Q.AC, CxtI: Q.CxtI, DT: Q.DT))
2052 return Constant::getNullValue(Ty: Op1->getType());
2053
2054 // (x << N) & ((x << M) - 1) --> 0, where x is known to be a power of 2 and
2055 // M <= N.
2056 const APInt *Shift1, *Shift2;
2057 if (match(V: Op0, P: m_Shl(L: m_Value(V&: X), R: m_APInt(Res&: Shift1))) &&
2058 match(V: Op1, P: m_Add(L: m_Shl(L: m_Specific(V: X), R: m_APInt(Res&: Shift2)), R: m_AllOnes())) &&
2059 isKnownToBeAPowerOfTwo(V: X, DL: Q.DL, /*OrZero*/ true, AC: Q.AC, CxtI: Q.CxtI) &&
2060 Shift1->uge(RHS: *Shift2))
2061 return Constant::getNullValue(Ty: Op0->getType());
2062
2063 if (Value *V =
2064 simplifyAndOrWithICmpEq(Opcode: Instruction::And, Op0, Op1, Q, MaxRecurse))
2065 return V;
2066
2067 return nullptr;
2068}
2069
2070/// Given operands for an And, see if we can fold the result.
2071/// If not, this returns null.
2072static Value *simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2073 unsigned MaxRecurse) {
2074 if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::And, Op0, Op1, Q))
2075 return C;
2076
2077 // X & poison -> poison
2078 if (isa<PoisonValue>(Val: Op1))
2079 return Op1;
2080
2081 // X & undef -> 0
2082 if (Q.isUndefValue(V: Op1))
2083 return Constant::getNullValue(Ty: Op0->getType());
2084
2085 // X & X = X
2086 if (Op0 == Op1)
2087 return Op0;
2088
2089 // X & 0 = 0
2090 if (match(V: Op1, P: m_Zero()))
2091 return Constant::getNullValue(Ty: Op0->getType());
2092
2093 // X & -1 = X
2094 if (match(V: Op1, P: m_AllOnes()))
2095 return Op0;
2096
2097 if (Value *Res = simplifyAndCommutative(Op0, Op1, Q, MaxRecurse))
2098 return Res;
2099 if (Value *Res = simplifyAndCommutative(Op0: Op1, Op1: Op0, Q, MaxRecurse))
2100 return Res;
2101
2102 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Opcode: Instruction::And))
2103 return V;
2104
2105 // A mask that only clears known zeros of a shifted value is a no-op.
2106 const APInt *Mask;
2107 const APInt *ShAmt;
2108 Value *X, *Y;
2109 if (match(V: Op1, P: m_APInt(Res&: Mask))) {
2110 // If all bits in the inverted and shifted mask are clear:
2111 // and (shl X, ShAmt), Mask --> shl X, ShAmt
2112 if (match(V: Op0, P: m_Shl(L: m_Value(V&: X), R: m_APInt(Res&: ShAmt))) &&
2113 (~(*Mask)).lshr(ShiftAmt: *ShAmt).isZero())
2114 return Op0;
2115
2116 // If all bits in the inverted and shifted mask are clear:
2117 // and (lshr X, ShAmt), Mask --> lshr X, ShAmt
2118 if (match(V: Op0, P: m_LShr(L: m_Value(V&: X), R: m_APInt(Res&: ShAmt))) &&
2119 (~(*Mask)).shl(ShiftAmt: *ShAmt).isZero())
2120 return Op0;
2121 }
2122
2123 // and 2^x-1, 2^C --> 0 where x <= C.
2124 const APInt *PowerC;
2125 Value *Shift;
2126 if (match(V: Op1, P: m_Power2(V&: PowerC)) &&
2127 match(V: Op0, P: m_Add(L: m_Value(V&: Shift), R: m_AllOnes())) &&
2128 isKnownToBeAPowerOfTwo(V: Shift, DL: Q.DL, /*OrZero*/ false, AC: Q.AC, CxtI: Q.CxtI,
2129 DT: Q.DT)) {
2130 KnownBits Known = computeKnownBits(V: Shift, Q);
2131 // Use getActiveBits() to make use of the additional power of two knowledge
2132 if (PowerC->getActiveBits() >= Known.getMaxValue().getActiveBits())
2133 return ConstantInt::getNullValue(Ty: Op1->getType());
2134 }
2135
2136 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, IsAnd: true))
2137 return V;
2138
2139 // Try some generic simplifications for associative operations.
2140 if (Value *V =
2141 simplifyAssociativeBinOp(Opcode: Instruction::And, LHS: Op0, RHS: Op1, Q, MaxRecurse))
2142 return V;
2143
2144 // And distributes over Or. Try some generic simplifications based on this.
2145 if (Value *V = expandCommutativeBinOp(Opcode: Instruction::And, L: Op0, R: Op1,
2146 OpcodeToExpand: Instruction::Or, Q, MaxRecurse))
2147 return V;
2148
2149 // And distributes over Xor. Try some generic simplifications based on this.
2150 if (Value *V = expandCommutativeBinOp(Opcode: Instruction::And, L: Op0, R: Op1,
2151 OpcodeToExpand: Instruction::Xor, Q, MaxRecurse))
2152 return V;
2153
2154 if (isa<SelectInst>(Val: Op0) || isa<SelectInst>(Val: Op1)) {
2155 if (Op0->getType()->isIntOrIntVectorTy(BitWidth: 1)) {
2156 // A & (A && B) -> A && B
2157 if (match(V: Op1, P: m_Select(C: m_Specific(V: Op0), L: m_Value(), R: m_Zero())))
2158 return Op1;
2159 else if (match(V: Op0, P: m_Select(C: m_Specific(V: Op1), L: m_Value(), R: m_Zero())))
2160 return Op0;
2161 }
2162 // If the operation is with the result of a select instruction, check
2163 // whether operating on either branch of the select always yields the same
2164 // value.
2165 if (Value *V =
2166 threadBinOpOverSelect(Opcode: Instruction::And, LHS: Op0, RHS: Op1, Q, MaxRecurse))
2167 return V;
2168 }
2169
2170 // If the operation is with the result of a phi instruction, check whether
2171 // operating on all incoming values of the phi always yields the same value.
2172 if (isa<PHINode>(Val: Op0) || isa<PHINode>(Val: Op1))
2173 if (Value *V =
2174 threadBinOpOverPHI(Opcode: Instruction::And, LHS: Op0, RHS: Op1, Q, MaxRecurse))
2175 return V;
2176
2177 // Assuming the effective width of Y is not larger than A, i.e. all bits
2178 // from X and Y are disjoint in (X << A) | Y,
2179 // if the mask of this AND op covers all bits of X or Y, while it covers
2180 // no bits from the other, we can bypass this AND op. E.g.,
2181 // ((X << A) | Y) & Mask -> Y,
2182 // if Mask = ((1 << effective_width_of(Y)) - 1)
2183 // ((X << A) | Y) & Mask -> X << A,
2184 // if Mask = ((1 << effective_width_of(X)) - 1) << A
2185 // SimplifyDemandedBits in InstCombine can optimize the general case.
2186 // This pattern aims to help other passes for a common case.
2187 Value *XShifted;
2188 if (Q.IIQ.UseInstrInfo && match(V: Op1, P: m_APInt(Res&: Mask)) &&
2189 match(V: Op0, P: m_c_Or(L: m_CombineAnd(L: m_NUWShl(L: m_Value(V&: X), R: m_APInt(Res&: ShAmt)),
2190 R: m_Value(V&: XShifted)),
2191 R: m_Value(V&: Y)))) {
2192 const unsigned Width = Op0->getType()->getScalarSizeInBits();
2193 const unsigned ShftCnt = ShAmt->getLimitedValue(Limit: Width);
2194 const KnownBits YKnown = computeKnownBits(V: Y, Q);
2195 const unsigned EffWidthY = YKnown.countMaxActiveBits();
2196 if (EffWidthY <= ShftCnt) {
2197 const KnownBits XKnown = computeKnownBits(V: X, Q);
2198 const unsigned EffWidthX = XKnown.countMaxActiveBits();
2199 const APInt EffBitsY = APInt::getLowBitsSet(numBits: Width, loBitsSet: EffWidthY);
2200 const APInt EffBitsX = APInt::getLowBitsSet(numBits: Width, loBitsSet: EffWidthX) << ShftCnt;
2201 // If the mask is extracting all bits from X or Y as is, we can skip
2202 // this AND op.
2203 if (EffBitsY.isSubsetOf(RHS: *Mask) && !EffBitsX.intersects(RHS: *Mask))
2204 return Y;
2205 if (EffBitsX.isSubsetOf(RHS: *Mask) && !EffBitsY.intersects(RHS: *Mask))
2206 return XShifted;
2207 }
2208 }
2209
2210 // ((X | Y) ^ X ) & ((X | Y) ^ Y) --> 0
2211 // ((X | Y) ^ Y ) & ((X | Y) ^ X) --> 0
2212 BinaryOperator *Or;
2213 if (match(V: Op0, P: m_c_Xor(L: m_Value(V&: X),
2214 R: m_CombineAnd(L: m_BinOp(I&: Or),
2215 R: m_c_Or(L: m_Deferred(V: X), R: m_Value(V&: Y))))) &&
2216 match(V: Op1, P: m_c_Xor(L: m_Specific(V: Or), R: m_Specific(V: Y))))
2217 return Constant::getNullValue(Ty: Op0->getType());
2218
2219 const APInt *C1;
2220 Value *A;
2221 // (A ^ C) & (A ^ ~C) -> 0
2222 if (match(V: Op0, P: m_Xor(L: m_Value(V&: A), R: m_APInt(Res&: C1))) &&
2223 match(V: Op1, P: m_Xor(L: m_Specific(V: A), R: m_SpecificInt(V: ~*C1))))
2224 return Constant::getNullValue(Ty: Op0->getType());
2225
2226 if (Op0->getType()->isIntOrIntVectorTy(BitWidth: 1)) {
2227 if (std::optional<bool> Implied = isImpliedCondition(LHS: Op0, RHS: Op1, DL: Q.DL)) {
2228 // If Op0 is true implies Op1 is true, then Op0 is a subset of Op1.
2229 if (*Implied == true)
2230 return Op0;
2231 // If Op0 is true implies Op1 is false, then they are not true together.
2232 if (*Implied == false)
2233 return ConstantInt::getFalse(Ty: Op0->getType());
2234 }
2235 if (std::optional<bool> Implied = isImpliedCondition(LHS: Op1, RHS: Op0, DL: Q.DL)) {
2236 // If Op1 is true implies Op0 is true, then Op1 is a subset of Op0.
2237 if (*Implied)
2238 return Op1;
2239 // If Op1 is true implies Op0 is false, then they are not true together.
2240 if (!*Implied)
2241 return ConstantInt::getFalse(Ty: Op1->getType());
2242 }
2243 }
2244
2245 if (Value *V = simplifyByDomEq(Opcode: Instruction::And, Op0, Op1, Q, MaxRecurse))
2246 return V;
2247
2248 return nullptr;
2249}
2250
2251Value *llvm::simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2252 return ::simplifyAndInst(Op0, Op1, Q, MaxRecurse: RecursionLimit);
2253}
2254
2255// TODO: Many of these folds could use LogicalAnd/LogicalOr.
2256static Value *simplifyOrLogic(Value *X, Value *Y) {
2257 assert(X->getType() == Y->getType() && "Expected same type for 'or' ops");
2258 Type *Ty = X->getType();
2259
2260 // X | ~X --> -1
2261 if (match(V: Y, P: m_Not(V: m_Specific(V: X))))
2262 return ConstantInt::getAllOnesValue(Ty);
2263
2264 // X | ~(X & ?) = -1
2265 if (match(V: Y, P: m_Not(V: m_c_And(L: m_Specific(V: X), R: m_Value()))))
2266 return ConstantInt::getAllOnesValue(Ty);
2267
2268 // X | (X & ?) --> X
2269 if (match(V: Y, P: m_c_And(L: m_Specific(V: X), R: m_Value())))
2270 return X;
2271
2272 Value *A, *B;
2273
2274 // (A ^ B) | (A | B) --> A | B
2275 // (A ^ B) | (B | A) --> B | A
2276 if (match(V: X, P: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B))) &&
2277 match(V: Y, P: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B))))
2278 return Y;
2279
2280 // ~(A ^ B) | (A | B) --> -1
2281 // ~(A ^ B) | (B | A) --> -1
2282 if (match(V: X, P: m_Not(V: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B)))) &&
2283 match(V: Y, P: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B))))
2284 return ConstantInt::getAllOnesValue(Ty);
2285
2286 // (A & ~B) | (A ^ B) --> A ^ B
2287 // (~B & A) | (A ^ B) --> A ^ B
2288 // (A & ~B) | (B ^ A) --> B ^ A
2289 // (~B & A) | (B ^ A) --> B ^ A
2290 if (match(V: X, P: m_c_And(L: m_Value(V&: A), R: m_Not(V: m_Value(V&: B)))) &&
2291 match(V: Y, P: m_c_Xor(L: m_Specific(V: A), R: m_Specific(V: B))))
2292 return Y;
2293
2294 // (~A ^ B) | (A & B) --> ~A ^ B
2295 // (B ^ ~A) | (A & B) --> B ^ ~A
2296 // (~A ^ B) | (B & A) --> ~A ^ B
2297 // (B ^ ~A) | (B & A) --> B ^ ~A
2298 if (match(V: X, P: m_c_Xor(L: m_Not(V: m_Value(V&: A)), R: m_Value(V&: B))) &&
2299 match(V: Y, P: m_c_And(L: m_Specific(V: A), R: m_Specific(V: B))))
2300 return X;
2301
2302 // (~A | B) | (A ^ B) --> -1
2303 // (~A | B) | (B ^ A) --> -1
2304 // (B | ~A) | (A ^ B) --> -1
2305 // (B | ~A) | (B ^ A) --> -1
2306 if (match(V: X, P: m_c_Or(L: m_Not(V: m_Value(V&: A)), R: m_Value(V&: B))) &&
2307 match(V: Y, P: m_c_Xor(L: m_Specific(V: A), R: m_Specific(V: B))))
2308 return ConstantInt::getAllOnesValue(Ty);
2309
2310 // (~A & B) | ~(A | B) --> ~A
2311 // (~A & B) | ~(B | A) --> ~A
2312 // (B & ~A) | ~(A | B) --> ~A
2313 // (B & ~A) | ~(B | A) --> ~A
2314 Value *NotA;
2315 if (match(V: X, P: m_c_And(L: m_CombineAnd(L: m_Value(V&: NotA), R: m_Not(V: m_Value(V&: A))),
2316 R: m_Value(V&: B))) &&
2317 match(V: Y, P: m_Not(V: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B)))))
2318 return NotA;
2319 // The same is true of Logical And
2320 // TODO: This could share the logic of the version above if there was a
2321 // version of LogicalAnd that allowed more than just i1 types.
2322 if (match(V: X, P: m_c_LogicalAnd(L: m_CombineAnd(L: m_Value(V&: NotA), R: m_Not(V: m_Value(V&: A))),
2323 R: m_Value(V&: B))) &&
2324 match(V: Y, P: m_Not(V: m_c_LogicalOr(L: m_Specific(V: A), R: m_Specific(V: B)))))
2325 return NotA;
2326
2327 // ~(A ^ B) | (A & B) --> ~(A ^ B)
2328 // ~(A ^ B) | (B & A) --> ~(A ^ B)
2329 Value *NotAB;
2330 if (match(V: X, P: m_CombineAnd(L: m_Not(V: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B))),
2331 R: m_Value(V&: NotAB))) &&
2332 match(V: Y, P: m_c_And(L: m_Specific(V: A), R: m_Specific(V: B))))
2333 return NotAB;
2334
2335 // ~(A & B) | (A ^ B) --> ~(A & B)
2336 // ~(A & B) | (B ^ A) --> ~(A & B)
2337 if (match(V: X, P: m_CombineAnd(L: m_Not(V: m_And(L: m_Value(V&: A), R: m_Value(V&: B))),
2338 R: m_Value(V&: NotAB))) &&
2339 match(V: Y, P: m_c_Xor(L: m_Specific(V: A), R: m_Specific(V: B))))
2340 return NotAB;
2341
2342 return nullptr;
2343}
2344
2345/// Given operands for an Or, see if we can fold the result.
2346/// If not, this returns null.
2347static Value *simplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2348 unsigned MaxRecurse) {
2349 if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::Or, Op0, Op1, Q))
2350 return C;
2351
2352 // X | poison -> poison
2353 if (isa<PoisonValue>(Val: Op1))
2354 return Op1;
2355
2356 // X | undef -> -1
2357 // X | -1 = -1
2358 // Do not return Op1 because it may contain undef elements if it's a vector.
2359 if (Q.isUndefValue(V: Op1) || match(V: Op1, P: m_AllOnes()))
2360 return Constant::getAllOnesValue(Ty: Op0->getType());
2361
2362 // X | X = X
2363 // X | 0 = X
2364 if (Op0 == Op1 || match(V: Op1, P: m_Zero()))
2365 return Op0;
2366
2367 if (Value *R = simplifyOrLogic(X: Op0, Y: Op1))
2368 return R;
2369 if (Value *R = simplifyOrLogic(X: Op1, Y: Op0))
2370 return R;
2371
2372 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Opcode: Instruction::Or))
2373 return V;
2374
2375 // Rotated -1 is still -1:
2376 // (-1 << X) | (-1 >> (C - X)) --> -1
2377 // (-1 >> X) | (-1 << (C - X)) --> -1
2378 // ...with C <= bitwidth (and commuted variants).
2379 Value *X, *Y;
2380 if ((match(V: Op0, P: m_Shl(L: m_AllOnes(), R: m_Value(V&: X))) &&
2381 match(V: Op1, P: m_LShr(L: m_AllOnes(), R: m_Value(V&: Y)))) ||
2382 (match(V: Op1, P: m_Shl(L: m_AllOnes(), R: m_Value(V&: X))) &&
2383 match(V: Op0, P: m_LShr(L: m_AllOnes(), R: m_Value(V&: Y))))) {
2384 const APInt *C;
2385 if ((match(V: X, P: m_Sub(L: m_APInt(Res&: C), R: m_Specific(V: Y))) ||
2386 match(V: Y, P: m_Sub(L: m_APInt(Res&: C), R: m_Specific(V: X)))) &&
2387 C->ule(RHS: X->getType()->getScalarSizeInBits())) {
2388 return ConstantInt::getAllOnesValue(Ty: X->getType());
2389 }
2390 }
2391
2392 // A funnel shift (rotate) can be decomposed into simpler shifts. See if we
2393 // are mixing in another shift that is redundant with the funnel shift.
2394
2395 // (fshl X, ?, Y) | (shl X, Y) --> fshl X, ?, Y
2396 // (shl X, Y) | (fshl X, ?, Y) --> fshl X, ?, Y
2397 if (match(V: Op0,
2398 P: m_Intrinsic<Intrinsic::fshl>(Op0: m_Value(V&: X), Op1: m_Value(), Op2: m_Value(V&: Y))) &&
2399 match(V: Op1, P: m_Shl(L: m_Specific(V: X), R: m_Specific(V: Y))))
2400 return Op0;
2401 if (match(V: Op1,
2402 P: m_Intrinsic<Intrinsic::fshl>(Op0: m_Value(V&: X), Op1: m_Value(), Op2: m_Value(V&: Y))) &&
2403 match(V: Op0, P: m_Shl(L: m_Specific(V: X), R: m_Specific(V: Y))))
2404 return Op1;
2405
2406 // (fshr ?, X, Y) | (lshr X, Y) --> fshr ?, X, Y
2407 // (lshr X, Y) | (fshr ?, X, Y) --> fshr ?, X, Y
2408 if (match(V: Op0,
2409 P: m_Intrinsic<Intrinsic::fshr>(Op0: m_Value(), Op1: m_Value(V&: X), Op2: m_Value(V&: Y))) &&
2410 match(V: Op1, P: m_LShr(L: m_Specific(V: X), R: m_Specific(V: Y))))
2411 return Op0;
2412 if (match(V: Op1,
2413 P: m_Intrinsic<Intrinsic::fshr>(Op0: m_Value(), Op1: m_Value(V&: X), Op2: m_Value(V&: Y))) &&
2414 match(V: Op0, P: m_LShr(L: m_Specific(V: X), R: m_Specific(V: Y))))
2415 return Op1;
2416
2417 if (Value *V =
2418 simplifyAndOrWithICmpEq(Opcode: Instruction::Or, Op0, Op1, Q, MaxRecurse))
2419 return V;
2420 if (Value *V =
2421 simplifyAndOrWithICmpEq(Opcode: Instruction::Or, Op0: Op1, Op1: Op0, Q, MaxRecurse))
2422 return V;
2423
2424 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, IsAnd: false))
2425 return V;
2426
2427 // If we have a multiplication overflow check that is being 'and'ed with a
2428 // check that one of the multipliers is not zero, we can omit the 'and', and
2429 // only keep the overflow check.
2430 if (isCheckForZeroAndMulWithOverflow(Op0, Op1, IsAnd: false))
2431 return Op1;
2432 if (isCheckForZeroAndMulWithOverflow(Op0: Op1, Op1: Op0, IsAnd: false))
2433 return Op0;
2434
2435 // Try some generic simplifications for associative operations.
2436 if (Value *V =
2437 simplifyAssociativeBinOp(Opcode: Instruction::Or, LHS: Op0, RHS: Op1, Q, MaxRecurse))
2438 return V;
2439
2440 // Or distributes over And. Try some generic simplifications based on this.
2441 if (Value *V = expandCommutativeBinOp(Opcode: Instruction::Or, L: Op0, R: Op1,
2442 OpcodeToExpand: Instruction::And, Q, MaxRecurse))
2443 return V;
2444
2445 if (isa<SelectInst>(Val: Op0) || isa<SelectInst>(Val: Op1)) {
2446 if (Op0->getType()->isIntOrIntVectorTy(BitWidth: 1)) {
2447 // A | (A || B) -> A || B
2448 if (match(V: Op1, P: m_Select(C: m_Specific(V: Op0), L: m_One(), R: m_Value())))
2449 return Op1;
2450 else if (match(V: Op0, P: m_Select(C: m_Specific(V: Op1), L: m_One(), R: m_Value())))
2451 return Op0;
2452 }
2453 // If the operation is with the result of a select instruction, check
2454 // whether operating on either branch of the select always yields the same
2455 // value.
2456 if (Value *V =
2457 threadBinOpOverSelect(Opcode: Instruction::Or, LHS: Op0, RHS: Op1, Q, MaxRecurse))
2458 return V;
2459 }
2460
2461 // (A & C1)|(B & C2)
2462 Value *A, *B;
2463 const APInt *C1, *C2;
2464 if (match(V: Op0, P: m_And(L: m_Value(V&: A), R: m_APInt(Res&: C1))) &&
2465 match(V: Op1, P: m_And(L: m_Value(V&: B), R: m_APInt(Res&: C2)))) {
2466 if (*C1 == ~*C2) {
2467 // (A & C1)|(B & C2)
2468 // If we have: ((V + N) & C1) | (V & C2)
2469 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
2470 // replace with V+N.
2471 Value *N;
2472 if (C2->isMask() && // C2 == 0+1+
2473 match(V: A, P: m_c_Add(L: m_Specific(V: B), R: m_Value(V&: N)))) {
2474 // Add commutes, try both ways.
2475 if (MaskedValueIsZero(V: N, Mask: *C2, SQ: Q))
2476 return A;
2477 }
2478 // Or commutes, try both ways.
2479 if (C1->isMask() && match(V: B, P: m_c_Add(L: m_Specific(V: A), R: m_Value(V&: N)))) {
2480 // Add commutes, try both ways.
2481 if (MaskedValueIsZero(V: N, Mask: *C1, SQ: Q))
2482 return B;
2483 }
2484 }
2485 }
2486
2487 // If the operation is with the result of a phi instruction, check whether
2488 // operating on all incoming values of the phi always yields the same value.
2489 if (isa<PHINode>(Val: Op0) || isa<PHINode>(Val: Op1))
2490 if (Value *V = threadBinOpOverPHI(Opcode: Instruction::Or, LHS: Op0, RHS: Op1, Q, MaxRecurse))
2491 return V;
2492
2493 // (A ^ C) | (A ^ ~C) -> -1, i.e. all bits set to one.
2494 if (match(V: Op0, P: m_Xor(L: m_Value(V&: A), R: m_APInt(Res&: C1))) &&
2495 match(V: Op1, P: m_Xor(L: m_Specific(V: A), R: m_SpecificInt(V: ~*C1))))
2496 return Constant::getAllOnesValue(Ty: Op0->getType());
2497
2498 if (Op0->getType()->isIntOrIntVectorTy(BitWidth: 1)) {
2499 if (std::optional<bool> Implied =
2500 isImpliedCondition(LHS: Op0, RHS: Op1, DL: Q.DL, LHSIsTrue: false)) {
2501 // If Op0 is false implies Op1 is false, then Op1 is a subset of Op0.
2502 if (*Implied == false)
2503 return Op0;
2504 // If Op0 is false implies Op1 is true, then at least one is always true.
2505 if (*Implied == true)
2506 return ConstantInt::getTrue(Ty: Op0->getType());
2507 }
2508 if (std::optional<bool> Implied =
2509 isImpliedCondition(LHS: Op1, RHS: Op0, DL: Q.DL, LHSIsTrue: false)) {
2510 // If Op1 is false implies Op0 is false, then Op0 is a subset of Op1.
2511 if (*Implied == false)
2512 return Op1;
2513 // If Op1 is false implies Op0 is true, then at least one is always true.
2514 if (*Implied == true)
2515 return ConstantInt::getTrue(Ty: Op1->getType());
2516 }
2517 }
2518
2519 if (Value *V = simplifyByDomEq(Opcode: Instruction::Or, Op0, Op1, Q, MaxRecurse))
2520 return V;
2521
2522 return nullptr;
2523}
2524
2525Value *llvm::simplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2526 return ::simplifyOrInst(Op0, Op1, Q, MaxRecurse: RecursionLimit);
2527}
2528
2529/// Given operands for a Xor, see if we can fold the result.
2530/// If not, this returns null.
2531static Value *simplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2532 unsigned MaxRecurse) {
2533 if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::Xor, Op0, Op1, Q))
2534 return C;
2535
2536 // X ^ poison -> poison
2537 if (isa<PoisonValue>(Val: Op1))
2538 return Op1;
2539
2540 // A ^ undef -> undef
2541 if (Q.isUndefValue(V: Op1))
2542 return Op1;
2543
2544 // A ^ 0 = A
2545 if (match(V: Op1, P: m_Zero()))
2546 return Op0;
2547
2548 // A ^ A = 0
2549 if (Op0 == Op1)
2550 return Constant::getNullValue(Ty: Op0->getType());
2551
2552 // A ^ ~A = ~A ^ A = -1
2553 if (match(V: Op0, P: m_Not(V: m_Specific(V: Op1))) || match(V: Op1, P: m_Not(V: m_Specific(V: Op0))))
2554 return Constant::getAllOnesValue(Ty: Op0->getType());
2555
2556 auto foldAndOrNot = [](Value *X, Value *Y) -> Value * {
2557 Value *A, *B;
2558 // (~A & B) ^ (A | B) --> A -- There are 8 commuted variants.
2559 if (match(V: X, P: m_c_And(L: m_Not(V: m_Value(V&: A)), R: m_Value(V&: B))) &&
2560 match(V: Y, P: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B))))
2561 return A;
2562
2563 // (~A | B) ^ (A & B) --> ~A -- There are 8 commuted variants.
2564 // The 'not' op must contain a complete -1 operand (no undef elements for
2565 // vector) for the transform to be safe.
2566 Value *NotA;
2567 if (match(V: X, P: m_c_Or(L: m_CombineAnd(L: m_Not(V: m_Value(V&: A)), R: m_Value(V&: NotA)),
2568 R: m_Value(V&: B))) &&
2569 match(V: Y, P: m_c_And(L: m_Specific(V: A), R: m_Specific(V: B))))
2570 return NotA;
2571
2572 return nullptr;
2573 };
2574 if (Value *R = foldAndOrNot(Op0, Op1))
2575 return R;
2576 if (Value *R = foldAndOrNot(Op1, Op0))
2577 return R;
2578
2579 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Opcode: Instruction::Xor))
2580 return V;
2581
2582 // Try some generic simplifications for associative operations.
2583 if (Value *V =
2584 simplifyAssociativeBinOp(Opcode: Instruction::Xor, LHS: Op0, RHS: Op1, Q, MaxRecurse))
2585 return V;
2586
2587 // Threading Xor over selects and phi nodes is pointless, so don't bother.
2588 // Threading over the select in "A ^ select(cond, B, C)" means evaluating
2589 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and
2590 // only if B and C are equal. If B and C are equal then (since we assume
2591 // that operands have already been simplified) "select(cond, B, C)" should
2592 // have been simplified to the common value of B and C already. Analysing
2593 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
2594 // for threading over phi nodes.
2595
2596 if (Value *V = simplifyByDomEq(Opcode: Instruction::Xor, Op0, Op1, Q, MaxRecurse))
2597 return V;
2598
2599 // (xor (sub nuw C_Mask, X), C_Mask) -> X
2600 {
2601 Value *X;
2602 if (match(V: Op0, P: m_NUWSub(L: m_Specific(V: Op1), R: m_Value(V&: X))) &&
2603 match(V: Op1, P: m_LowBitMask()))
2604 return X;
2605 }
2606
2607 return nullptr;
2608}
2609
2610Value *llvm::simplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2611 return ::simplifyXorInst(Op0, Op1, Q, MaxRecurse: RecursionLimit);
2612}
2613
2614static Type *getCompareTy(Value *Op) {
2615 return CmpInst::makeCmpResultType(opnd_type: Op->getType());
2616}
2617
2618/// Rummage around inside V looking for something equivalent to the comparison
2619/// "LHS Pred RHS". Return such a value if found, otherwise return null.
2620/// Helper function for analyzing max/min idioms.
2621static Value *extractEquivalentCondition(Value *V, CmpPredicate Pred,
2622 Value *LHS, Value *RHS) {
2623 SelectInst *SI = dyn_cast<SelectInst>(Val: V);
2624 if (!SI)
2625 return nullptr;
2626 CmpInst *Cmp = dyn_cast<CmpInst>(Val: SI->getCondition());
2627 if (!Cmp)
2628 return nullptr;
2629 Value *CmpLHS = Cmp->getOperand(i_nocapture: 0), *CmpRHS = Cmp->getOperand(i_nocapture: 1);
2630 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
2631 return Cmp;
2632 if (Pred == CmpInst::getSwappedPredicate(pred: Cmp->getPredicate()) &&
2633 LHS == CmpRHS && RHS == CmpLHS)
2634 return Cmp;
2635 return nullptr;
2636}
2637
2638/// Return true if the underlying object (storage) must be disjoint from
2639/// storage returned by any noalias return call.
2640static bool isAllocDisjoint(const Value *V) {
2641 // For allocas, we consider only static ones (dynamic
2642 // allocas might be transformed into calls to malloc not simultaneously
2643 // live with the compared-to allocation). For globals, we exclude symbols
2644 // that might be resolve lazily to symbols in another dynamically-loaded
2645 // library (and, thus, could be malloc'ed by the implementation).
2646 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Val: V))
2647 return AI->isStaticAlloca();
2648 if (const GlobalValue *GV = dyn_cast<GlobalValue>(Val: V))
2649 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2650 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2651 !GV->isThreadLocal();
2652 if (const Argument *A = dyn_cast<Argument>(Val: V))
2653 return A->hasByValAttr();
2654 return false;
2655}
2656
2657/// Return true if V1 and V2 are each the base of some distict storage region
2658/// [V, object_size(V)] which do not overlap. Note that zero sized regions
2659/// *are* possible, and that zero sized regions do not overlap with any other.
2660static bool haveNonOverlappingStorage(const Value *V1, const Value *V2) {
2661 // Global variables always exist, so they always exist during the lifetime
2662 // of each other and all allocas. Global variables themselves usually have
2663 // non-overlapping storage, but since their addresses are constants, the
2664 // case involving two globals does not reach here and is instead handled in
2665 // constant folding.
2666 //
2667 // Two different allocas usually have different addresses...
2668 //
2669 // However, if there's an @llvm.stackrestore dynamically in between two
2670 // allocas, they may have the same address. It's tempting to reduce the
2671 // scope of the problem by only looking at *static* allocas here. That would
2672 // cover the majority of allocas while significantly reducing the likelihood
2673 // of having an @llvm.stackrestore pop up in the middle. However, it's not
2674 // actually impossible for an @llvm.stackrestore to pop up in the middle of
2675 // an entry block. Also, if we have a block that's not attached to a
2676 // function, we can't tell if it's "static" under the current definition.
2677 // Theoretically, this problem could be fixed by creating a new kind of
2678 // instruction kind specifically for static allocas. Such a new instruction
2679 // could be required to be at the top of the entry block, thus preventing it
2680 // from being subject to a @llvm.stackrestore. Instcombine could even
2681 // convert regular allocas into these special allocas. It'd be nifty.
2682 // However, until then, this problem remains open.
2683 //
2684 // So, we'll assume that two non-empty allocas have different addresses
2685 // for now.
2686 auto isByValArg = [](const Value *V) {
2687 const Argument *A = dyn_cast<Argument>(Val: V);
2688 return A && A->hasByValAttr();
2689 };
2690
2691 // Byval args are backed by store which does not overlap with each other,
2692 // allocas, or globals.
2693 if (isByValArg(V1))
2694 return isa<AllocaInst>(Val: V2) || isa<GlobalVariable>(Val: V2) || isByValArg(V2);
2695 if (isByValArg(V2))
2696 return isa<AllocaInst>(Val: V1) || isa<GlobalVariable>(Val: V1) || isByValArg(V1);
2697
2698 return isa<AllocaInst>(Val: V1) &&
2699 (isa<AllocaInst>(Val: V2) || isa<GlobalVariable>(Val: V2));
2700}
2701
2702// A significant optimization not implemented here is assuming that alloca
2703// addresses are not equal to incoming argument values. They don't *alias*,
2704// as we say, but that doesn't mean they aren't equal, so we take a
2705// conservative approach.
2706//
2707// This is inspired in part by C++11 5.10p1:
2708// "Two pointers of the same type compare equal if and only if they are both
2709// null, both point to the same function, or both represent the same
2710// address."
2711//
2712// This is pretty permissive.
2713//
2714// It's also partly due to C11 6.5.9p6:
2715// "Two pointers compare equal if and only if both are null pointers, both are
2716// pointers to the same object (including a pointer to an object and a
2717// subobject at its beginning) or function, both are pointers to one past the
2718// last element of the same array object, or one is a pointer to one past the
2719// end of one array object and the other is a pointer to the start of a
2720// different array object that happens to immediately follow the first array
2721// object in the address space.)
2722//
2723// C11's version is more restrictive, however there's no reason why an argument
2724// couldn't be a one-past-the-end value for a stack object in the caller and be
2725// equal to the beginning of a stack object in the callee.
2726//
2727// If the C and C++ standards are ever made sufficiently restrictive in this
2728// area, it may be possible to update LLVM's semantics accordingly and reinstate
2729// this optimization.
2730static Constant *computePointerICmp(CmpPredicate Pred, Value *LHS, Value *RHS,
2731 const SimplifyQuery &Q) {
2732 assert(LHS->getType() == RHS->getType() && "Must have same types");
2733 const DataLayout &DL = Q.DL;
2734 const TargetLibraryInfo *TLI = Q.TLI;
2735
2736 // We fold equality and unsigned predicates on pointer comparisons, but forbid
2737 // signed predicates since a GEP with inbounds could cross the sign boundary.
2738 if (CmpInst::isSigned(predicate: Pred))
2739 return nullptr;
2740
2741 // We have to switch to a signed predicate to handle negative indices from
2742 // the base pointer.
2743 Pred = ICmpInst::getSignedPredicate(Pred);
2744
2745 // Strip off any constant offsets so that we can reason about them.
2746 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
2747 // here and compare base addresses like AliasAnalysis does, however there are
2748 // numerous hazards. AliasAnalysis and its utilities rely on special rules
2749 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis
2750 // doesn't need to guarantee pointer inequality when it says NoAlias.
2751
2752 // Even if an non-inbounds GEP occurs along the path we can still optimize
2753 // equality comparisons concerning the result.
2754 bool AllowNonInbounds = ICmpInst::isEquality(P: Pred);
2755 unsigned IndexSize = DL.getIndexTypeSizeInBits(Ty: LHS->getType());
2756 APInt LHSOffset(IndexSize, 0), RHSOffset(IndexSize, 0);
2757 LHS = LHS->stripAndAccumulateConstantOffsets(DL, Offset&: LHSOffset, AllowNonInbounds);
2758 RHS = RHS->stripAndAccumulateConstantOffsets(DL, Offset&: RHSOffset, AllowNonInbounds);
2759
2760 // If LHS and RHS are related via constant offsets to the same base
2761 // value, we can replace it with an icmp which just compares the offsets.
2762 if (LHS == RHS)
2763 return ConstantInt::get(Ty: getCompareTy(Op: LHS),
2764 V: ICmpInst::compare(LHS: LHSOffset, RHS: RHSOffset, Pred));
2765
2766 // Various optimizations for (in)equality comparisons.
2767 if (ICmpInst::isEquality(P: Pred)) {
2768 // Different non-empty allocations that exist at the same time have
2769 // different addresses (if the program can tell). If the offsets are
2770 // within the bounds of their allocations (and not one-past-the-end!
2771 // so we can't use inbounds!), and their allocations aren't the same,
2772 // the pointers are not equal.
2773 if (haveNonOverlappingStorage(V1: LHS, V2: RHS)) {
2774 uint64_t LHSSize, RHSSize;
2775 ObjectSizeOpts Opts;
2776 Opts.EvalMode = ObjectSizeOpts::Mode::Min;
2777 auto *F = [](Value *V) -> Function * {
2778 if (auto *I = dyn_cast<Instruction>(Val: V))
2779 return I->getFunction();
2780 if (auto *A = dyn_cast<Argument>(Val: V))
2781 return A->getParent();
2782 return nullptr;
2783 }(LHS);
2784 Opts.NullIsUnknownSize = F ? NullPointerIsDefined(F) : true;
2785 if (getObjectSize(Ptr: LHS, Size&: LHSSize, DL, TLI, Opts) && LHSSize != 0 &&
2786 getObjectSize(Ptr: RHS, Size&: RHSSize, DL, TLI, Opts) && RHSSize != 0) {
2787 APInt Dist = LHSOffset - RHSOffset;
2788 if (Dist.isNonNegative() ? Dist.ult(RHS: LHSSize) : (-Dist).ult(RHS: RHSSize))
2789 return ConstantInt::get(Ty: getCompareTy(Op: LHS),
2790 V: !CmpInst::isTrueWhenEqual(predicate: Pred));
2791 }
2792 }
2793
2794 // If one side of the equality comparison must come from a noalias call
2795 // (meaning a system memory allocation function), and the other side must
2796 // come from a pointer that cannot overlap with dynamically-allocated
2797 // memory within the lifetime of the current function (allocas, byval
2798 // arguments, globals), then determine the comparison result here.
2799 SmallVector<const Value *, 8> LHSUObjs, RHSUObjs;
2800 getUnderlyingObjects(V: LHS, Objects&: LHSUObjs);
2801 getUnderlyingObjects(V: RHS, Objects&: RHSUObjs);
2802
2803 // Is the set of underlying objects all noalias calls?
2804 auto IsNAC = [](ArrayRef<const Value *> Objects) {
2805 return all_of(Range&: Objects, P: isNoAliasCall);
2806 };
2807
2808 // Is the set of underlying objects all things which must be disjoint from
2809 // noalias calls. We assume that indexing from such disjoint storage
2810 // into the heap is undefined, and thus offsets can be safely ignored.
2811 auto IsAllocDisjoint = [](ArrayRef<const Value *> Objects) {
2812 return all_of(Range&: Objects, P: ::isAllocDisjoint);
2813 };
2814
2815 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2816 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2817 return ConstantInt::get(Ty: getCompareTy(Op: LHS),
2818 V: !CmpInst::isTrueWhenEqual(predicate: Pred));
2819
2820 // Fold comparisons for non-escaping pointer even if the allocation call
2821 // cannot be elided. We cannot fold malloc comparison to null. Also, the
2822 // dynamic allocation call could be either of the operands. Note that
2823 // the other operand can not be based on the alloc - if it were, then
2824 // the cmp itself would be a capture.
2825 Value *MI = nullptr;
2826 if (isAllocLikeFn(V: LHS, TLI) && llvm::isKnownNonZero(V: RHS, Q))
2827 MI = LHS;
2828 else if (isAllocLikeFn(V: RHS, TLI) && llvm::isKnownNonZero(V: LHS, Q))
2829 MI = RHS;
2830 if (MI) {
2831 // FIXME: This is incorrect, see PR54002. While we can assume that the
2832 // allocation is at an address that makes the comparison false, this
2833 // requires that *all* comparisons to that address be false, which
2834 // InstSimplify cannot guarantee.
2835 struct CustomCaptureTracker : public CaptureTracker {
2836 bool Captured = false;
2837 void tooManyUses() override { Captured = true; }
2838 Action captured(const Use *U, UseCaptureInfo CI) override {
2839 // TODO(captures): Use UseCaptureInfo.
2840 if (auto *ICmp = dyn_cast<ICmpInst>(Val: U->getUser())) {
2841 // Comparison against value stored in global variable. Given the
2842 // pointer does not escape, its value cannot be guessed and stored
2843 // separately in a global variable.
2844 unsigned OtherIdx = 1 - U->getOperandNo();
2845 auto *LI = dyn_cast<LoadInst>(Val: ICmp->getOperand(i_nocapture: OtherIdx));
2846 if (LI && isa<GlobalVariable>(Val: LI->getPointerOperand()))
2847 return Continue;
2848 }
2849
2850 Captured = true;
2851 return Stop;
2852 }
2853 };
2854 CustomCaptureTracker Tracker;
2855 PointerMayBeCaptured(V: MI, Tracker: &Tracker);
2856 if (!Tracker.Captured)
2857 return ConstantInt::get(Ty: getCompareTy(Op: LHS),
2858 V: CmpInst::isFalseWhenEqual(predicate: Pred));
2859 }
2860 }
2861
2862 // Otherwise, fail.
2863 return nullptr;
2864}
2865
2866/// Fold an icmp when its operands have i1 scalar type.
2867static Value *simplifyICmpOfBools(CmpPredicate Pred, Value *LHS, Value *RHS,
2868 const SimplifyQuery &Q) {
2869 Type *ITy = getCompareTy(Op: LHS); // The return type.
2870 Type *OpTy = LHS->getType(); // The operand type.
2871 if (!OpTy->isIntOrIntVectorTy(BitWidth: 1))
2872 return nullptr;
2873
2874 // A boolean compared to true/false can be reduced in 14 out of the 20
2875 // (10 predicates * 2 constants) possible combinations. The other
2876 // 6 cases require a 'not' of the LHS.
2877
2878 auto ExtractNotLHS = [](Value *V) -> Value * {
2879 Value *X;
2880 if (match(V, P: m_Not(V: m_Value(V&: X))))
2881 return X;
2882 return nullptr;
2883 };
2884
2885 if (match(V: RHS, P: m_Zero())) {
2886 switch (Pred) {
2887 case CmpInst::ICMP_NE: // X != 0 -> X
2888 case CmpInst::ICMP_UGT: // X >u 0 -> X
2889 case CmpInst::ICMP_SLT: // X <s 0 -> X
2890 return LHS;
2891
2892 case CmpInst::ICMP_EQ: // not(X) == 0 -> X != 0 -> X
2893 case CmpInst::ICMP_ULE: // not(X) <=u 0 -> X >u 0 -> X
2894 case CmpInst::ICMP_SGE: // not(X) >=s 0 -> X <s 0 -> X
2895 if (Value *X = ExtractNotLHS(LHS))
2896 return X;
2897 break;
2898
2899 case CmpInst::ICMP_ULT: // X <u 0 -> false
2900 case CmpInst::ICMP_SGT: // X >s 0 -> false
2901 return getFalse(Ty: ITy);
2902
2903 case CmpInst::ICMP_UGE: // X >=u 0 -> true
2904 case CmpInst::ICMP_SLE: // X <=s 0 -> true
2905 return getTrue(Ty: ITy);
2906
2907 default:
2908 break;
2909 }
2910 } else if (match(V: RHS, P: m_One())) {
2911 switch (Pred) {
2912 case CmpInst::ICMP_EQ: // X == 1 -> X
2913 case CmpInst::ICMP_UGE: // X >=u 1 -> X
2914 case CmpInst::ICMP_SLE: // X <=s -1 -> X
2915 return LHS;
2916
2917 case CmpInst::ICMP_NE: // not(X) != 1 -> X == 1 -> X
2918 case CmpInst::ICMP_ULT: // not(X) <=u 1 -> X >=u 1 -> X
2919 case CmpInst::ICMP_SGT: // not(X) >s 1 -> X <=s -1 -> X
2920 if (Value *X = ExtractNotLHS(LHS))
2921 return X;
2922 break;
2923
2924 case CmpInst::ICMP_UGT: // X >u 1 -> false
2925 case CmpInst::ICMP_SLT: // X <s -1 -> false
2926 return getFalse(Ty: ITy);
2927
2928 case CmpInst::ICMP_ULE: // X <=u 1 -> true
2929 case CmpInst::ICMP_SGE: // X >=s -1 -> true
2930 return getTrue(Ty: ITy);
2931
2932 default:
2933 break;
2934 }
2935 }
2936
2937 switch (Pred) {
2938 default:
2939 break;
2940 case ICmpInst::ICMP_UGE:
2941 if (isImpliedCondition(LHS: RHS, RHS: LHS, DL: Q.DL).value_or(u: false))
2942 return getTrue(Ty: ITy);
2943 break;
2944 case ICmpInst::ICMP_SGE:
2945 /// For signed comparison, the values for an i1 are 0 and -1
2946 /// respectively. This maps into a truth table of:
2947 /// LHS | RHS | LHS >=s RHS | LHS implies RHS
2948 /// 0 | 0 | 1 (0 >= 0) | 1
2949 /// 0 | 1 | 1 (0 >= -1) | 1
2950 /// 1 | 0 | 0 (-1 >= 0) | 0
2951 /// 1 | 1 | 1 (-1 >= -1) | 1
2952 if (isImpliedCondition(LHS, RHS, DL: Q.DL).value_or(u: false))
2953 return getTrue(Ty: ITy);
2954 break;
2955 case ICmpInst::ICMP_ULE:
2956 if (isImpliedCondition(LHS, RHS, DL: Q.DL).value_or(u: false))
2957 return getTrue(Ty: ITy);
2958 break;
2959 case ICmpInst::ICMP_SLE:
2960 /// SLE follows the same logic as SGE with the LHS and RHS swapped.
2961 if (isImpliedCondition(LHS: RHS, RHS: LHS, DL: Q.DL).value_or(u: false))
2962 return getTrue(Ty: ITy);
2963 break;
2964 }
2965
2966 return nullptr;
2967}
2968
2969/// Try hard to fold icmp with zero RHS because this is a common case.
2970static Value *simplifyICmpWithZero(CmpPredicate Pred, Value *LHS, Value *RHS,
2971 const SimplifyQuery &Q) {
2972 if (!match(V: RHS, P: m_Zero()))
2973 return nullptr;
2974
2975 Type *ITy = getCompareTy(Op: LHS); // The return type.
2976 switch (Pred) {
2977 default:
2978 llvm_unreachable("Unknown ICmp predicate!");
2979 case ICmpInst::ICMP_ULT:
2980 return getFalse(Ty: ITy);
2981 case ICmpInst::ICMP_UGE:
2982 return getTrue(Ty: ITy);
2983 case ICmpInst::ICMP_EQ:
2984 case ICmpInst::ICMP_ULE:
2985 if (isKnownNonZero(V: LHS, Q))
2986 return getFalse(Ty: ITy);
2987 break;
2988 case ICmpInst::ICMP_NE:
2989 case ICmpInst::ICMP_UGT:
2990 if (isKnownNonZero(V: LHS, Q))
2991 return getTrue(Ty: ITy);
2992 break;
2993 case ICmpInst::ICMP_SLT: {
2994 KnownBits LHSKnown = computeKnownBits(V: LHS, Q);
2995 if (LHSKnown.isNegative())
2996 return getTrue(Ty: ITy);
2997 if (LHSKnown.isNonNegative())
2998 return getFalse(Ty: ITy);
2999 break;
3000 }
3001 case ICmpInst::ICMP_SLE: {
3002 KnownBits LHSKnown = computeKnownBits(V: LHS, Q);
3003 if (LHSKnown.isNegative())
3004 return getTrue(Ty: ITy);
3005 if (LHSKnown.isNonNegative() && isKnownNonZero(V: LHS, Q))
3006 return getFalse(Ty: ITy);
3007 break;
3008 }
3009 case ICmpInst::ICMP_SGE: {
3010 KnownBits LHSKnown = computeKnownBits(V: LHS, Q);
3011 if (LHSKnown.isNegative())
3012 return getFalse(Ty: ITy);
3013 if (LHSKnown.isNonNegative())
3014 return getTrue(Ty: ITy);
3015 break;
3016 }
3017 case ICmpInst::ICMP_SGT: {
3018 KnownBits LHSKnown = computeKnownBits(V: LHS, Q);
3019 if (LHSKnown.isNegative())
3020 return getFalse(Ty: ITy);
3021 if (LHSKnown.isNonNegative() && isKnownNonZero(V: LHS, Q))
3022 return getTrue(Ty: ITy);
3023 break;
3024 }
3025 }
3026
3027 return nullptr;
3028}
3029
3030static Value *simplifyICmpWithConstant(CmpPredicate Pred, Value *LHS,
3031 Value *RHS, const SimplifyQuery &Q) {
3032 Type *ITy = getCompareTy(Op: RHS); // The return type.
3033
3034 Value *X;
3035 const APInt *C;
3036 if (!match(V: RHS, P: m_APIntAllowPoison(Res&: C)))
3037 return nullptr;
3038
3039 // Sign-bit checks can be optimized to true/false after unsigned
3040 // floating-point casts:
3041 // icmp slt (bitcast (uitofp X)), 0 --> false
3042 // icmp sgt (bitcast (uitofp X)), -1 --> true
3043 if (match(V: LHS, P: m_ElementWiseBitCast(Op: m_UIToFP(Op: m_Value(V&: X))))) {
3044 bool TrueIfSigned;
3045 if (isSignBitCheck(Pred, RHS: *C, TrueIfSigned))
3046 return ConstantInt::getBool(Ty: ITy, V: !TrueIfSigned);
3047 }
3048
3049 // Rule out tautological comparisons (eg., ult 0 or uge 0).
3050 ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, Other: *C);
3051 if (RHS_CR.isEmptySet())
3052 return ConstantInt::getFalse(Ty: ITy);
3053 if (RHS_CR.isFullSet())
3054 return ConstantInt::getTrue(Ty: ITy);
3055
3056 ConstantRange LHS_CR =
3057 computeConstantRange(V: LHS, ForSigned: CmpInst::isSigned(predicate: Pred), UseInstrInfo: Q.IIQ.UseInstrInfo);
3058 if (!LHS_CR.isFullSet()) {
3059 if (RHS_CR.contains(CR: LHS_CR))
3060 return ConstantInt::getTrue(Ty: ITy);
3061 if (RHS_CR.inverse().contains(CR: LHS_CR))
3062 return ConstantInt::getFalse(Ty: ITy);
3063 }
3064
3065 // (mul nuw/nsw X, MulC) != C --> true (if C is not a multiple of MulC)
3066 // (mul nuw/nsw X, MulC) == C --> false (if C is not a multiple of MulC)
3067 const APInt *MulC;
3068 if (Q.IIQ.UseInstrInfo && ICmpInst::isEquality(P: Pred) &&
3069 ((match(V: LHS, P: m_NUWMul(L: m_Value(), R: m_APIntAllowPoison(Res&: MulC))) &&
3070 *MulC != 0 && C->urem(RHS: *MulC) != 0) ||
3071 (match(V: LHS, P: m_NSWMul(L: m_Value(), R: m_APIntAllowPoison(Res&: MulC))) &&
3072 *MulC != 0 && C->srem(RHS: *MulC) != 0)))
3073 return ConstantInt::get(Ty: ITy, V: Pred == ICmpInst::ICMP_NE);
3074
3075 if (Pred == ICmpInst::ICMP_UGE && C->isOne() && isKnownNonZero(V: LHS, Q))
3076 return ConstantInt::getTrue(Ty: ITy);
3077
3078 return nullptr;
3079}
3080
3081enum class MonotonicType { GreaterEq, LowerEq };
3082
3083/// Get values V_i such that V uge V_i (GreaterEq) or V ule V_i (LowerEq).
3084static void getUnsignedMonotonicValues(SmallPtrSetImpl<Value *> &Res, Value *V,
3085 MonotonicType Type,
3086 const SimplifyQuery &Q,
3087 unsigned Depth = 0) {
3088 if (!Res.insert(Ptr: V).second)
3089 return;
3090
3091 // Can be increased if useful.
3092 if (++Depth > 1)
3093 return;
3094
3095 auto *I = dyn_cast<Instruction>(Val: V);
3096 if (!I)
3097 return;
3098
3099 Value *X, *Y;
3100 if (Type == MonotonicType::GreaterEq) {
3101 if (match(V: I, P: m_Or(L: m_Value(V&: X), R: m_Value(V&: Y))) ||
3102 match(V: I, P: m_Intrinsic<Intrinsic::uadd_sat>(Op0: m_Value(V&: X), Op1: m_Value(V&: Y)))) {
3103 getUnsignedMonotonicValues(Res, V: X, Type, Q, Depth);
3104 getUnsignedMonotonicValues(Res, V: Y, Type, Q, Depth);
3105 }
3106 // X * Y >= X --> true
3107 if (match(V: I, P: m_NUWMul(L: m_Value(V&: X), R: m_Value(V&: Y)))) {
3108 if (isKnownNonZero(V: X, Q))
3109 getUnsignedMonotonicValues(Res, V: Y, Type, Q, Depth);
3110 if (isKnownNonZero(V: Y, Q))
3111 getUnsignedMonotonicValues(Res, V: X, Type, Q, Depth);
3112 }
3113 } else {
3114 assert(Type == MonotonicType::LowerEq);
3115 switch (I->getOpcode()) {
3116 case Instruction::And:
3117 getUnsignedMonotonicValues(Res, V: I->getOperand(i: 0), Type, Q, Depth);
3118 getUnsignedMonotonicValues(Res, V: I->getOperand(i: 1), Type, Q, Depth);
3119 break;
3120 case Instruction::URem:
3121 case Instruction::UDiv:
3122 case Instruction::LShr:
3123 getUnsignedMonotonicValues(Res, V: I->getOperand(i: 0), Type, Q, Depth);
3124 break;
3125 case Instruction::Call:
3126 if (match(V: I, P: m_Intrinsic<Intrinsic::usub_sat>(Op0: m_Value(V&: X))))
3127 getUnsignedMonotonicValues(Res, V: X, Type, Q, Depth);
3128 break;
3129 default:
3130 break;
3131 }
3132 }
3133}
3134
3135static Value *simplifyICmpUsingMonotonicValues(CmpPredicate Pred, Value *LHS,
3136 Value *RHS,
3137 const SimplifyQuery &Q) {
3138 if (Pred != ICmpInst::ICMP_UGE && Pred != ICmpInst::ICMP_ULT)
3139 return nullptr;
3140
3141 // We have LHS uge GreaterValues and LowerValues uge RHS. If any of the
3142 // GreaterValues and LowerValues are the same, it follows that LHS uge RHS.
3143 SmallPtrSet<Value *, 4> GreaterValues;
3144 SmallPtrSet<Value *, 4> LowerValues;
3145 getUnsignedMonotonicValues(Res&: GreaterValues, V: LHS, Type: MonotonicType::GreaterEq, Q);
3146 getUnsignedMonotonicValues(Res&: LowerValues, V: RHS, Type: MonotonicType::LowerEq, Q);
3147 for (Value *GV : GreaterValues)
3148 if (LowerValues.contains(Ptr: GV))
3149 return ConstantInt::getBool(Ty: getCompareTy(Op: LHS),
3150 V: Pred == ICmpInst::ICMP_UGE);
3151 return nullptr;
3152}
3153
3154static Value *simplifyICmpWithBinOpOnLHS(CmpPredicate Pred, BinaryOperator *LBO,
3155 Value *RHS, const SimplifyQuery &Q,
3156 unsigned MaxRecurse) {
3157 Type *ITy = getCompareTy(Op: RHS); // The return type.
3158
3159 Value *Y = nullptr;
3160 // icmp pred (or X, Y), X
3161 if (match(V: LBO, P: m_c_Or(L: m_Value(V&: Y), R: m_Specific(V: RHS)))) {
3162 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
3163 KnownBits RHSKnown = computeKnownBits(V: RHS, Q);
3164 KnownBits YKnown = computeKnownBits(V: Y, Q);
3165 if (RHSKnown.isNonNegative() && YKnown.isNegative())
3166 return Pred == ICmpInst::ICMP_SLT ? getTrue(Ty: ITy) : getFalse(Ty: ITy);
3167 if (RHSKnown.isNegative() || YKnown.isNonNegative())
3168 return Pred == ICmpInst::ICMP_SLT ? getFalse(Ty: ITy) : getTrue(Ty: ITy);
3169 }
3170 }
3171
3172 // icmp pred (urem X, Y), Y
3173 if (match(V: LBO, P: m_URem(L: m_Value(), R: m_Specific(V: RHS)))) {
3174 switch (Pred) {
3175 default:
3176 break;
3177 case ICmpInst::ICMP_SGT:
3178 case ICmpInst::ICMP_SGE: {
3179 KnownBits Known = computeKnownBits(V: RHS, Q);
3180 if (!Known.isNonNegative())
3181 break;
3182 [[fallthrough]];
3183 }
3184 case ICmpInst::ICMP_EQ:
3185 case ICmpInst::ICMP_UGT:
3186 case ICmpInst::ICMP_UGE:
3187 return getFalse(Ty: ITy);
3188 case ICmpInst::ICMP_SLT:
3189 case ICmpInst::ICMP_SLE: {
3190 KnownBits Known = computeKnownBits(V: RHS, Q);
3191 if (!Known.isNonNegative())
3192 break;
3193 [[fallthrough]];
3194 }
3195 case ICmpInst::ICMP_NE:
3196 case ICmpInst::ICMP_ULT:
3197 case ICmpInst::ICMP_ULE:
3198 return getTrue(Ty: ITy);
3199 }
3200 }
3201
3202 // If x is nonzero:
3203 // x >>u C <u x --> true for C != 0.
3204 // x >>u C != x --> true for C != 0.
3205 // x >>u C >=u x --> false for C != 0.
3206 // x >>u C == x --> false for C != 0.
3207 // x udiv C <u x --> true for C != 1.
3208 // x udiv C != x --> true for C != 1.
3209 // x udiv C >=u x --> false for C != 1.
3210 // x udiv C == x --> false for C != 1.
3211 // TODO: allow non-constant shift amount/divisor
3212 const APInt *C;
3213 if ((match(V: LBO, P: m_LShr(L: m_Specific(V: RHS), R: m_APInt(Res&: C))) && *C != 0) ||
3214 (match(V: LBO, P: m_UDiv(L: m_Specific(V: RHS), R: m_APInt(Res&: C))) && *C != 1)) {
3215 if (isKnownNonZero(V: RHS, Q)) {
3216 switch (Pred) {
3217 default:
3218 break;
3219 case ICmpInst::ICMP_EQ:
3220 case ICmpInst::ICMP_UGE:
3221 case ICmpInst::ICMP_UGT:
3222 return getFalse(Ty: ITy);
3223 case ICmpInst::ICMP_NE:
3224 case ICmpInst::ICMP_ULT:
3225 case ICmpInst::ICMP_ULE:
3226 return getTrue(Ty: ITy);
3227 }
3228 }
3229 }
3230
3231 // (x*C1)/C2 <= x for C1 <= C2.
3232 // This holds even if the multiplication overflows: Assume that x != 0 and
3233 // arithmetic is modulo M. For overflow to occur we must have C1 >= M/x and
3234 // thus C2 >= M/x. It follows that (x*C1)/C2 <= (M-1)/C2 <= ((M-1)*x)/M < x.
3235 //
3236 // Additionally, either the multiplication and division might be represented
3237 // as shifts:
3238 // (x*C1)>>C2 <= x for C1 < 2**C2.
3239 // (x<<C1)/C2 <= x for 2**C1 < C2.
3240 const APInt *C1, *C2;
3241 if ((match(V: LBO, P: m_UDiv(L: m_Mul(L: m_Specific(V: RHS), R: m_APInt(Res&: C1)), R: m_APInt(Res&: C2))) &&
3242 C1->ule(RHS: *C2)) ||
3243 (match(V: LBO, P: m_LShr(L: m_Mul(L: m_Specific(V: RHS), R: m_APInt(Res&: C1)), R: m_APInt(Res&: C2))) &&
3244 C1->ule(RHS: APInt(C2->getBitWidth(), 1) << *C2)) ||
3245 (match(V: LBO, P: m_UDiv(L: m_Shl(L: m_Specific(V: RHS), R: m_APInt(Res&: C1)), R: m_APInt(Res&: C2))) &&
3246 (APInt(C1->getBitWidth(), 1) << *C1).ule(RHS: *C2))) {
3247 if (Pred == ICmpInst::ICMP_UGT)
3248 return getFalse(Ty: ITy);
3249 if (Pred == ICmpInst::ICMP_ULE)
3250 return getTrue(Ty: ITy);
3251 }
3252
3253 // (sub C, X) == X, C is odd --> false
3254 // (sub C, X) != X, C is odd --> true
3255 if (match(V: LBO, P: m_Sub(L: m_APIntAllowPoison(Res&: C), R: m_Specific(V: RHS))) &&
3256 (*C & 1) == 1 && ICmpInst::isEquality(P: Pred))
3257 return (Pred == ICmpInst::ICMP_EQ) ? getFalse(Ty: ITy) : getTrue(Ty: ITy);
3258
3259 return nullptr;
3260}
3261
3262// If only one of the icmp's operands has NSW flags, try to prove that:
3263//
3264// icmp slt/sgt/sle/sge (x + C1), (x +nsw C2)
3265//
3266// is equivalent to:
3267//
3268// icmp slt/sgt/sle/sge C1, C2
3269//
3270// which is true if x + C2 has the NSW flags set and:
3271// *) C1 <= C2 && C1 >= 0, or
3272// *) C2 <= C1 && C1 <= 0.
3273//
3274static bool trySimplifyICmpWithAdds(CmpPredicate Pred, Value *LHS, Value *RHS,
3275 const InstrInfoQuery &IIQ) {
3276 // TODO: support other predicates.
3277 if (!ICmpInst::isSigned(predicate: Pred) || !IIQ.UseInstrInfo)
3278 return false;
3279
3280 // Canonicalize nsw add as RHS.
3281 if (!match(V: RHS, P: m_NSWAdd(L: m_Value(), R: m_Value())))
3282 std::swap(a&: LHS, b&: RHS);
3283 if (!match(V: RHS, P: m_NSWAdd(L: m_Value(), R: m_Value())))
3284 return false;
3285
3286 Value *X;
3287 const APInt *C1, *C2;
3288 if (!match(V: LHS, P: m_Add(L: m_Value(V&: X), R: m_APInt(Res&: C1))) ||
3289 !match(V: RHS, P: m_Add(L: m_Specific(V: X), R: m_APInt(Res&: C2))))
3290 return false;
3291
3292 return (C1->sle(RHS: *C2) && C1->isNonNegative()) ||
3293 (C2->sle(RHS: *C1) && C1->isNonPositive());
3294}
3295
3296/// TODO: A large part of this logic is duplicated in InstCombine's
3297/// foldICmpBinOp(). We should be able to share that and avoid the code
3298/// duplication.
3299static Value *simplifyICmpWithBinOp(CmpPredicate Pred, Value *LHS, Value *RHS,
3300 const SimplifyQuery &Q,
3301 unsigned MaxRecurse) {
3302 BinaryOperator *LBO = dyn_cast<BinaryOperator>(Val: LHS);
3303 BinaryOperator *RBO = dyn_cast<BinaryOperator>(Val: RHS);
3304 if (MaxRecurse && (LBO || RBO)) {
3305 // Analyze the case when either LHS or RHS is an add instruction.
3306 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
3307 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
3308 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
3309 if (LBO && LBO->getOpcode() == Instruction::Add) {
3310 A = LBO->getOperand(i_nocapture: 0);
3311 B = LBO->getOperand(i_nocapture: 1);
3312 NoLHSWrapProblem =
3313 ICmpInst::isEquality(P: Pred) ||
3314 (CmpInst::isUnsigned(predicate: Pred) &&
3315 Q.IIQ.hasNoUnsignedWrap(Op: cast<OverflowingBinaryOperator>(Val: LBO))) ||
3316 (CmpInst::isSigned(predicate: Pred) &&
3317 Q.IIQ.hasNoSignedWrap(Op: cast<OverflowingBinaryOperator>(Val: LBO)));
3318 }
3319 if (RBO && RBO->getOpcode() == Instruction::Add) {
3320 C = RBO->getOperand(i_nocapture: 0);
3321 D = RBO->getOperand(i_nocapture: 1);
3322 NoRHSWrapProblem =
3323 ICmpInst::isEquality(P: Pred) ||
3324 (CmpInst::isUnsigned(predicate: Pred) &&
3325 Q.IIQ.hasNoUnsignedWrap(Op: cast<OverflowingBinaryOperator>(Val: RBO))) ||
3326 (CmpInst::isSigned(predicate: Pred) &&
3327 Q.IIQ.hasNoSignedWrap(Op: cast<OverflowingBinaryOperator>(Val: RBO)));
3328 }
3329
3330 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
3331 if ((A == RHS || B == RHS) && NoLHSWrapProblem)
3332 if (Value *V = simplifyICmpInst(Predicate: Pred, LHS: A == RHS ? B : A,
3333 RHS: Constant::getNullValue(Ty: RHS->getType()), Q,
3334 MaxRecurse: MaxRecurse - 1))
3335 return V;
3336
3337 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
3338 if ((C == LHS || D == LHS) && NoRHSWrapProblem)
3339 if (Value *V =
3340 simplifyICmpInst(Predicate: Pred, LHS: Constant::getNullValue(Ty: LHS->getType()),
3341 RHS: C == LHS ? D : C, Q, MaxRecurse: MaxRecurse - 1))
3342 return V;
3343
3344 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
3345 bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) ||
3346 trySimplifyICmpWithAdds(Pred, LHS, RHS, IIQ: Q.IIQ);
3347 if (A && C && (A == C || A == D || B == C || B == D) && CanSimplify) {
3348 // Determine Y and Z in the form icmp (X+Y), (X+Z).
3349 Value *Y, *Z;
3350 if (A == C) {
3351 // C + B == C + D -> B == D
3352 Y = B;
3353 Z = D;
3354 } else if (A == D) {
3355 // D + B == C + D -> B == C
3356 Y = B;
3357 Z = C;
3358 } else if (B == C) {
3359 // A + C == C + D -> A == D
3360 Y = A;
3361 Z = D;
3362 } else {
3363 assert(B == D);
3364 // A + D == C + D -> A == C
3365 Y = A;
3366 Z = C;
3367 }
3368 if (Value *V = simplifyICmpInst(Predicate: Pred, LHS: Y, RHS: Z, Q, MaxRecurse: MaxRecurse - 1))
3369 return V;
3370 }
3371 }
3372
3373 if (LBO)
3374 if (Value *V = simplifyICmpWithBinOpOnLHS(Pred, LBO, RHS, Q, MaxRecurse))
3375 return V;
3376
3377 if (RBO)
3378 if (Value *V = simplifyICmpWithBinOpOnLHS(
3379 Pred: ICmpInst::getSwappedPredicate(pred: Pred), LBO: RBO, RHS: LHS, Q, MaxRecurse))
3380 return V;
3381
3382 // 0 - (zext X) pred C
3383 if (!CmpInst::isUnsigned(predicate: Pred) && match(V: LHS, P: m_Neg(V: m_ZExt(Op: m_Value())))) {
3384 const APInt *C;
3385 if (match(V: RHS, P: m_APInt(Res&: C))) {
3386 if (C->isStrictlyPositive()) {
3387 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_NE)
3388 return ConstantInt::getTrue(Ty: getCompareTy(Op: RHS));
3389 if (Pred == ICmpInst::ICMP_SGE || Pred == ICmpInst::ICMP_EQ)
3390 return ConstantInt::getFalse(Ty: getCompareTy(Op: RHS));
3391 }
3392 if (C->isNonNegative()) {
3393 if (Pred == ICmpInst::ICMP_SLE)
3394 return ConstantInt::getTrue(Ty: getCompareTy(Op: RHS));
3395 if (Pred == ICmpInst::ICMP_SGT)
3396 return ConstantInt::getFalse(Ty: getCompareTy(Op: RHS));
3397 }
3398 }
3399 }
3400
3401 // If C2 is a power-of-2 and C is not:
3402 // (C2 << X) == C --> false
3403 // (C2 << X) != C --> true
3404 const APInt *C;
3405 if (match(V: LHS, P: m_Shl(L: m_Power2(), R: m_Value())) &&
3406 match(V: RHS, P: m_APIntAllowPoison(Res&: C)) && !C->isPowerOf2()) {
3407 // C2 << X can equal zero in some circumstances.
3408 // This simplification might be unsafe if C is zero.
3409 //
3410 // We know it is safe if:
3411 // - The shift is nsw. We can't shift out the one bit.
3412 // - The shift is nuw. We can't shift out the one bit.
3413 // - C2 is one.
3414 // - C isn't zero.
3415 if (Q.IIQ.hasNoSignedWrap(Op: cast<OverflowingBinaryOperator>(Val: LBO)) ||
3416 Q.IIQ.hasNoUnsignedWrap(Op: cast<OverflowingBinaryOperator>(Val: LBO)) ||
3417 match(V: LHS, P: m_Shl(L: m_One(), R: m_Value())) || !C->isZero()) {
3418 if (Pred == ICmpInst::ICMP_EQ)
3419 return ConstantInt::getFalse(Ty: getCompareTy(Op: RHS));
3420 if (Pred == ICmpInst::ICMP_NE)
3421 return ConstantInt::getTrue(Ty: getCompareTy(Op: RHS));
3422 }
3423 }
3424
3425 // If C is a power-of-2:
3426 // (C << X) >u 0x8000 --> false
3427 // (C << X) <=u 0x8000 --> true
3428 if (match(V: LHS, P: m_Shl(L: m_Power2(), R: m_Value())) && match(V: RHS, P: m_SignMask())) {
3429 if (Pred == ICmpInst::ICMP_UGT)
3430 return ConstantInt::getFalse(Ty: getCompareTy(Op: RHS));
3431 if (Pred == ICmpInst::ICMP_ULE)
3432 return ConstantInt::getTrue(Ty: getCompareTy(Op: RHS));
3433 }
3434
3435 if (!MaxRecurse || !LBO || !RBO || LBO->getOpcode() != RBO->getOpcode())
3436 return nullptr;
3437
3438 if (LBO->getOperand(i_nocapture: 0) == RBO->getOperand(i_nocapture: 0)) {
3439 switch (LBO->getOpcode()) {
3440 default:
3441 break;
3442 case Instruction::Shl: {
3443 bool NUW = Q.IIQ.hasNoUnsignedWrap(Op: LBO) && Q.IIQ.hasNoUnsignedWrap(Op: RBO);
3444 bool NSW = Q.IIQ.hasNoSignedWrap(Op: LBO) && Q.IIQ.hasNoSignedWrap(Op: RBO);
3445 if (!NUW || (ICmpInst::isSigned(predicate: Pred) && !NSW) ||
3446 !isKnownNonZero(V: LBO->getOperand(i_nocapture: 0), Q))
3447 break;
3448 if (Value *V = simplifyICmpInst(Predicate: Pred, LHS: LBO->getOperand(i_nocapture: 1),
3449 RHS: RBO->getOperand(i_nocapture: 1), Q, MaxRecurse: MaxRecurse - 1))
3450 return V;
3451 break;
3452 }
3453 // If C1 & C2 == C1, A = X and/or C1, B = X and/or C2:
3454 // icmp ule A, B -> true
3455 // icmp ugt A, B -> false
3456 // icmp sle A, B -> true (C1 and C2 are the same sign)
3457 // icmp sgt A, B -> false (C1 and C2 are the same sign)
3458 case Instruction::And:
3459 case Instruction::Or: {
3460 const APInt *C1, *C2;
3461 if (ICmpInst::isRelational(P: Pred) &&
3462 match(V: LBO->getOperand(i_nocapture: 1), P: m_APInt(Res&: C1)) &&
3463 match(V: RBO->getOperand(i_nocapture: 1), P: m_APInt(Res&: C2))) {
3464 if (!C1->isSubsetOf(RHS: *C2)) {
3465 std::swap(a&: C1, b&: C2);
3466 Pred = ICmpInst::getSwappedPredicate(pred: Pred);
3467 }
3468 if (C1->isSubsetOf(RHS: *C2)) {
3469 if (Pred == ICmpInst::ICMP_ULE)
3470 return ConstantInt::getTrue(Ty: getCompareTy(Op: LHS));
3471 if (Pred == ICmpInst::ICMP_UGT)
3472 return ConstantInt::getFalse(Ty: getCompareTy(Op: LHS));
3473 if (C1->isNonNegative() == C2->isNonNegative()) {
3474 if (Pred == ICmpInst::ICMP_SLE)
3475 return ConstantInt::getTrue(Ty: getCompareTy(Op: LHS));
3476 if (Pred == ICmpInst::ICMP_SGT)
3477 return ConstantInt::getFalse(Ty: getCompareTy(Op: LHS));
3478 }
3479 }
3480 }
3481 break;
3482 }
3483 }
3484 }
3485
3486 if (LBO->getOperand(i_nocapture: 1) == RBO->getOperand(i_nocapture: 1)) {
3487 switch (LBO->getOpcode()) {
3488 default:
3489 break;
3490 case Instruction::UDiv:
3491 case Instruction::LShr:
3492 if (ICmpInst::isSigned(predicate: Pred) || !Q.IIQ.isExact(Op: LBO) ||
3493 !Q.IIQ.isExact(Op: RBO))
3494 break;
3495 if (Value *V = simplifyICmpInst(Predicate: Pred, LHS: LBO->getOperand(i_nocapture: 0),
3496 RHS: RBO->getOperand(i_nocapture: 0), Q, MaxRecurse: MaxRecurse - 1))
3497 return V;
3498 break;
3499 case Instruction::SDiv:
3500 if (!ICmpInst::isEquality(P: Pred) || !Q.IIQ.isExact(Op: LBO) ||
3501 !Q.IIQ.isExact(Op: RBO))
3502 break;
3503 if (Value *V = simplifyICmpInst(Predicate: Pred, LHS: LBO->getOperand(i_nocapture: 0),
3504 RHS: RBO->getOperand(i_nocapture: 0), Q, MaxRecurse: MaxRecurse - 1))
3505 return V;
3506 break;
3507 case Instruction::AShr:
3508 if (!Q.IIQ.isExact(Op: LBO) || !Q.IIQ.isExact(Op: RBO))
3509 break;
3510 if (Value *V = simplifyICmpInst(Predicate: Pred, LHS: LBO->getOperand(i_nocapture: 0),
3511 RHS: RBO->getOperand(i_nocapture: 0), Q, MaxRecurse: MaxRecurse - 1))
3512 return V;
3513 break;
3514 case Instruction::Shl: {
3515 bool NUW = Q.IIQ.hasNoUnsignedWrap(Op: LBO) && Q.IIQ.hasNoUnsignedWrap(Op: RBO);
3516 bool NSW = Q.IIQ.hasNoSignedWrap(Op: LBO) && Q.IIQ.hasNoSignedWrap(Op: RBO);
3517 if (!NUW && !NSW)
3518 break;
3519 if (!NSW && ICmpInst::isSigned(predicate: Pred))
3520 break;
3521 if (Value *V = simplifyICmpInst(Predicate: Pred, LHS: LBO->getOperand(i_nocapture: 0),
3522 RHS: RBO->getOperand(i_nocapture: 0), Q, MaxRecurse: MaxRecurse - 1))
3523 return V;
3524 break;
3525 }
3526 }
3527 }
3528 return nullptr;
3529}
3530
3531/// simplify integer comparisons where at least one operand of the compare
3532/// matches an integer min/max idiom.
3533static Value *simplifyICmpWithMinMax(CmpPredicate Pred, Value *LHS, Value *RHS,
3534 const SimplifyQuery &Q,
3535 unsigned MaxRecurse) {
3536 Type *ITy = getCompareTy(Op: LHS); // The return type.
3537 Value *A, *B;
3538 CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE;
3539 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
3540
3541 // Signed variants on "max(a,b)>=a -> true".
3542 if (match(V: LHS, P: m_SMax(L: m_Value(V&: A), R: m_Value(V&: B))) && (A == RHS || B == RHS)) {
3543 if (A != RHS)
3544 std::swap(a&: A, b&: B); // smax(A, B) pred A.
3545 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3546 // We analyze this as smax(A, B) pred A.
3547 P = Pred;
3548 } else if (match(V: RHS, P: m_SMax(L: m_Value(V&: A), R: m_Value(V&: B))) &&
3549 (A == LHS || B == LHS)) {
3550 if (A != LHS)
3551 std::swap(a&: A, b&: B); // A pred smax(A, B).
3552 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3553 // We analyze this as smax(A, B) swapped-pred A.
3554 P = CmpInst::getSwappedPredicate(pred: Pred);
3555 } else if (match(V: LHS, P: m_SMin(L: m_Value(V&: A), R: m_Value(V&: B))) &&
3556 (A == RHS || B == RHS)) {
3557 if (A != RHS)
3558 std::swap(a&: A, b&: B); // smin(A, B) pred A.
3559 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3560 // We analyze this as smax(-A, -B) swapped-pred -A.
3561 // Note that we do not need to actually form -A or -B thanks to EqP.
3562 P = CmpInst::getSwappedPredicate(pred: Pred);
3563 } else if (match(V: RHS, P: m_SMin(L: m_Value(V&: A), R: m_Value(V&: B))) &&
3564 (A == LHS || B == LHS)) {
3565 if (A != LHS)
3566 std::swap(a&: A, b&: B); // A pred smin(A, B).
3567 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3568 // We analyze this as smax(-A, -B) pred -A.
3569 // Note that we do not need to actually form -A or -B thanks to EqP.
3570 P = Pred;
3571 }
3572 if (P != CmpInst::BAD_ICMP_PREDICATE) {
3573 // Cases correspond to "max(A, B) p A".
3574 switch (P) {
3575 default:
3576 break;
3577 case CmpInst::ICMP_EQ:
3578 case CmpInst::ICMP_SLE:
3579 // Equivalent to "A EqP B". This may be the same as the condition tested
3580 // in the max/min; if so, we can just return that.
3581 if (Value *V = extractEquivalentCondition(V: LHS, Pred: EqP, LHS: A, RHS: B))
3582 return V;
3583 if (Value *V = extractEquivalentCondition(V: RHS, Pred: EqP, LHS: A, RHS: B))
3584 return V;
3585 // Otherwise, see if "A EqP B" simplifies.
3586 if (MaxRecurse)
3587 if (Value *V = simplifyICmpInst(Predicate: EqP, LHS: A, RHS: B, Q, MaxRecurse: MaxRecurse - 1))
3588 return V;
3589 break;
3590 case CmpInst::ICMP_NE:
3591 case CmpInst::ICMP_SGT: {
3592 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(pred: EqP);
3593 // Equivalent to "A InvEqP B". This may be the same as the condition
3594 // tested in the max/min; if so, we can just return that.
3595 if (Value *V = extractEquivalentCondition(V: LHS, Pred: InvEqP, LHS: A, RHS: B))
3596 return V;
3597 if (Value *V = extractEquivalentCondition(V: RHS, Pred: InvEqP, LHS: A, RHS: B))
3598 return V;
3599 // Otherwise, see if "A InvEqP B" simplifies.
3600 if (MaxRecurse)
3601 if (Value *V = simplifyICmpInst(Predicate: InvEqP, LHS: A, RHS: B, Q, MaxRecurse: MaxRecurse - 1))
3602 return V;
3603 break;
3604 }
3605 case CmpInst::ICMP_SGE:
3606 // Always true.
3607 return getTrue(Ty: ITy);
3608 case CmpInst::ICMP_SLT:
3609 // Always false.
3610 return getFalse(Ty: ITy);
3611 }
3612 }
3613
3614 // Unsigned variants on "max(a,b)>=a -> true".
3615 P = CmpInst::BAD_ICMP_PREDICATE;
3616 if (match(V: LHS, P: m_UMax(L: m_Value(V&: A), R: m_Value(V&: B))) && (A == RHS || B == RHS)) {
3617 if (A != RHS)
3618 std::swap(a&: A, b&: B); // umax(A, B) pred A.
3619 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3620 // We analyze this as umax(A, B) pred A.
3621 P = Pred;
3622 } else if (match(V: RHS, P: m_UMax(L: m_Value(V&: A), R: m_Value(V&: B))) &&
3623 (A == LHS || B == LHS)) {
3624 if (A != LHS)
3625 std::swap(a&: A, b&: B); // A pred umax(A, B).
3626 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3627 // We analyze this as umax(A, B) swapped-pred A.
3628 P = CmpInst::getSwappedPredicate(pred: Pred);
3629 } else if (match(V: LHS, P: m_UMin(L: m_Value(V&: A), R: m_Value(V&: B))) &&
3630 (A == RHS || B == RHS)) {
3631 if (A != RHS)
3632 std::swap(a&: A, b&: B); // umin(A, B) pred A.
3633 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3634 // We analyze this as umax(-A, -B) swapped-pred -A.
3635 // Note that we do not need to actually form -A or -B thanks to EqP.
3636 P = CmpInst::getSwappedPredicate(pred: Pred);
3637 } else if (match(V: RHS, P: m_UMin(L: m_Value(V&: A), R: m_Value(V&: B))) &&
3638 (A == LHS || B == LHS)) {
3639 if (A != LHS)
3640 std::swap(a&: A, b&: B); // A pred umin(A, B).
3641 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3642 // We analyze this as umax(-A, -B) pred -A.
3643 // Note that we do not need to actually form -A or -B thanks to EqP.
3644 P = Pred;
3645 }
3646 if (P != CmpInst::BAD_ICMP_PREDICATE) {
3647 // Cases correspond to "max(A, B) p A".
3648 switch (P) {
3649 default:
3650 break;
3651 case CmpInst::ICMP_EQ:
3652 case CmpInst::ICMP_ULE:
3653 // Equivalent to "A EqP B". This may be the same as the condition tested
3654 // in the max/min; if so, we can just return that.
3655 if (Value *V = extractEquivalentCondition(V: LHS, Pred: EqP, LHS: A, RHS: B))
3656 return V;
3657 if (Value *V = extractEquivalentCondition(V: RHS, Pred: EqP, LHS: A, RHS: B))
3658 return V;
3659 // Otherwise, see if "A EqP B" simplifies.
3660 if (MaxRecurse)
3661 if (Value *V = simplifyICmpInst(Predicate: EqP, LHS: A, RHS: B, Q, MaxRecurse: MaxRecurse - 1))
3662 return V;
3663 break;
3664 case CmpInst::ICMP_NE:
3665 case CmpInst::ICMP_UGT: {
3666 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(pred: EqP);
3667 // Equivalent to "A InvEqP B". This may be the same as the condition
3668 // tested in the max/min; if so, we can just return that.
3669 if (Value *V = extractEquivalentCondition(V: LHS, Pred: InvEqP, LHS: A, RHS: B))
3670 return V;
3671 if (Value *V = extractEquivalentCondition(V: RHS, Pred: InvEqP, LHS: A, RHS: B))
3672 return V;
3673 // Otherwise, see if "A InvEqP B" simplifies.
3674 if (MaxRecurse)
3675 if (Value *V = simplifyICmpInst(Predicate: InvEqP, LHS: A, RHS: B, Q, MaxRecurse: MaxRecurse - 1))
3676 return V;
3677 break;
3678 }
3679 case CmpInst::ICMP_UGE:
3680 return getTrue(Ty: ITy);
3681 case CmpInst::ICMP_ULT:
3682 return getFalse(Ty: ITy);
3683 }
3684 }
3685
3686 // Comparing 1 each of min/max with a common operand?
3687 // Canonicalize min operand to RHS.
3688 if (match(V: LHS, P: m_UMin(L: m_Value(), R: m_Value())) ||
3689 match(V: LHS, P: m_SMin(L: m_Value(), R: m_Value()))) {
3690 std::swap(a&: LHS, b&: RHS);
3691 Pred = ICmpInst::getSwappedPredicate(pred: Pred);
3692 }
3693
3694 Value *C, *D;
3695 if (match(V: LHS, P: m_SMax(L: m_Value(V&: A), R: m_Value(V&: B))) &&
3696 match(V: RHS, P: m_SMin(L: m_Value(V&: C), R: m_Value(V&: D))) &&
3697 (A == C || A == D || B == C || B == D)) {
3698 // smax(A, B) >=s smin(A, D) --> true
3699 if (Pred == CmpInst::ICMP_SGE)
3700 return getTrue(Ty: ITy);
3701 // smax(A, B) <s smin(A, D) --> false
3702 if (Pred == CmpInst::ICMP_SLT)
3703 return getFalse(Ty: ITy);
3704 } else if (match(V: LHS, P: m_UMax(L: m_Value(V&: A), R: m_Value(V&: B))) &&
3705 match(V: RHS, P: m_UMin(L: m_Value(V&: C), R: m_Value(V&: D))) &&
3706 (A == C || A == D || B == C || B == D)) {
3707 // umax(A, B) >=u umin(A, D) --> true
3708 if (Pred == CmpInst::ICMP_UGE)
3709 return getTrue(Ty: ITy);
3710 // umax(A, B) <u umin(A, D) --> false
3711 if (Pred == CmpInst::ICMP_ULT)
3712 return getFalse(Ty: ITy);
3713 }
3714
3715 return nullptr;
3716}
3717
3718static Value *simplifyICmpWithDominatingAssume(CmpPredicate Predicate,
3719 Value *LHS, Value *RHS,
3720 const SimplifyQuery &Q) {
3721 // Gracefully handle instructions that have not been inserted yet.
3722 if (!Q.AC || !Q.CxtI)
3723 return nullptr;
3724
3725 for (Value *AssumeBaseOp : {LHS, RHS}) {
3726 for (auto &AssumeVH : Q.AC->assumptionsFor(V: AssumeBaseOp)) {
3727 if (!AssumeVH)
3728 continue;
3729
3730 CallInst *Assume = cast<CallInst>(Val&: AssumeVH);
3731 if (std::optional<bool> Imp = isImpliedCondition(
3732 LHS: Assume->getArgOperand(i: 0), RHSPred: Predicate, RHSOp0: LHS, RHSOp1: RHS, DL: Q.DL))
3733 if (isValidAssumeForContext(I: Assume, CxtI: Q.CxtI, DT: Q.DT))
3734 return ConstantInt::get(Ty: getCompareTy(Op: LHS), V: *Imp);
3735 }
3736 }
3737
3738 return nullptr;
3739}
3740
3741static Value *simplifyICmpWithIntrinsicOnLHS(CmpPredicate Pred, Value *LHS,
3742 Value *RHS) {
3743 auto *II = dyn_cast<IntrinsicInst>(Val: LHS);
3744 if (!II)
3745 return nullptr;
3746
3747 switch (II->getIntrinsicID()) {
3748 case Intrinsic::uadd_sat:
3749 // uadd.sat(X, Y) uge X + Y
3750 if (match(V: RHS, P: m_c_Add(L: m_Specific(V: II->getArgOperand(i: 0)),
3751 R: m_Specific(V: II->getArgOperand(i: 1))))) {
3752 if (Pred == ICmpInst::ICMP_UGE)
3753 return ConstantInt::getTrue(Ty: getCompareTy(Op: II));
3754 if (Pred == ICmpInst::ICMP_ULT)
3755 return ConstantInt::getFalse(Ty: getCompareTy(Op: II));
3756 }
3757 return nullptr;
3758 case Intrinsic::usub_sat:
3759 // usub.sat(X, Y) ule X - Y
3760 if (match(V: RHS, P: m_Sub(L: m_Specific(V: II->getArgOperand(i: 0)),
3761 R: m_Specific(V: II->getArgOperand(i: 1))))) {
3762 if (Pred == ICmpInst::ICMP_ULE)
3763 return ConstantInt::getTrue(Ty: getCompareTy(Op: II));
3764 if (Pred == ICmpInst::ICMP_UGT)
3765 return ConstantInt::getFalse(Ty: getCompareTy(Op: II));
3766 }
3767 return nullptr;
3768 default:
3769 return nullptr;
3770 }
3771}
3772
3773/// Helper method to get range from metadata or attribute.
3774static std::optional<ConstantRange> getRange(Value *V,
3775 const InstrInfoQuery &IIQ) {
3776 if (Instruction *I = dyn_cast<Instruction>(Val: V))
3777 if (MDNode *MD = IIQ.getMetadata(I, KindID: LLVMContext::MD_range))
3778 return getConstantRangeFromMetadata(RangeMD: *MD);
3779
3780 if (const Argument *A = dyn_cast<Argument>(Val: V))
3781 return A->getRange();
3782 else if (const CallBase *CB = dyn_cast<CallBase>(Val: V))
3783 return CB->getRange();
3784
3785 return std::nullopt;
3786}
3787
3788/// Given operands for an ICmpInst, see if we can fold the result.
3789/// If not, this returns null.
3790static Value *simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS,
3791 const SimplifyQuery &Q, unsigned MaxRecurse) {
3792 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
3793
3794 if (Constant *CLHS = dyn_cast<Constant>(Val: LHS)) {
3795 if (Constant *CRHS = dyn_cast<Constant>(Val: RHS))
3796 return ConstantFoldCompareInstOperands(Predicate: Pred, LHS: CLHS, RHS: CRHS, DL: Q.DL, TLI: Q.TLI);
3797
3798 // If we have a constant, make sure it is on the RHS.
3799 std::swap(a&: LHS, b&: RHS);
3800 Pred = CmpInst::getSwappedPredicate(pred: Pred);
3801 }
3802 assert(!isa<UndefValue>(LHS) && "Unexpected icmp undef,%X");
3803
3804 Type *ITy = getCompareTy(Op: LHS); // The return type.
3805
3806 // icmp poison, X -> poison
3807 if (isa<PoisonValue>(Val: RHS))
3808 return PoisonValue::get(T: ITy);
3809
3810 // For EQ and NE, we can always pick a value for the undef to make the
3811 // predicate pass or fail, so we can return undef.
3812 // Matches behavior in llvm::ConstantFoldCompareInstruction.
3813 if (Q.isUndefValue(V: RHS) && ICmpInst::isEquality(P: Pred))
3814 return UndefValue::get(T: ITy);
3815
3816 // icmp X, X -> true/false
3817 // icmp X, undef -> true/false because undef could be X.
3818 if (LHS == RHS || Q.isUndefValue(V: RHS))
3819 return ConstantInt::get(Ty: ITy, V: CmpInst::isTrueWhenEqual(predicate: Pred));
3820
3821 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q))
3822 return V;
3823
3824 // TODO: Sink/common this with other potentially expensive calls that use
3825 // ValueTracking? See comment below for isKnownNonEqual().
3826 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
3827 return V;
3828
3829 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, Q))
3830 return V;
3831
3832 // If both operands have range metadata, use the metadata
3833 // to simplify the comparison.
3834 if (std::optional<ConstantRange> RhsCr = getRange(V: RHS, IIQ: Q.IIQ))
3835 if (std::optional<ConstantRange> LhsCr = getRange(V: LHS, IIQ: Q.IIQ)) {
3836 if (LhsCr->icmp(Pred, Other: *RhsCr))
3837 return ConstantInt::getTrue(Ty: ITy);
3838
3839 if (LhsCr->icmp(Pred: CmpInst::getInversePredicate(pred: Pred), Other: *RhsCr))
3840 return ConstantInt::getFalse(Ty: ITy);
3841 }
3842
3843 // Compare of cast, for example (zext X) != 0 -> X != 0
3844 if (isa<CastInst>(Val: LHS) && (isa<Constant>(Val: RHS) || isa<CastInst>(Val: RHS))) {
3845 Instruction *LI = cast<CastInst>(Val: LHS);
3846 Value *SrcOp = LI->getOperand(i: 0);
3847 Type *SrcTy = SrcOp->getType();
3848 Type *DstTy = LI->getType();
3849
3850 // Turn icmp (ptrtoint/ptrtoaddr x), (ptrtoint/ptrtoaddr/constant) into a
3851 // compare of the input if the integer type is the same size as the
3852 // pointer address type (icmp only compares the address of the pointer).
3853 if (MaxRecurse && (isa<PtrToIntInst, PtrToAddrInst>(Val: LI)) &&
3854 Q.DL.getAddressType(PtrTy: SrcTy) == DstTy) {
3855 if (Constant *RHSC = dyn_cast<Constant>(Val: RHS)) {
3856 // Transfer the cast to the constant.
3857 if (Value *V = simplifyICmpInst(Pred, LHS: SrcOp,
3858 RHS: ConstantExpr::getIntToPtr(C: RHSC, Ty: SrcTy),
3859 Q, MaxRecurse: MaxRecurse - 1))
3860 return V;
3861 } else if (isa<PtrToIntInst, PtrToAddrInst>(Val: RHS)) {
3862 auto *RI = cast<CastInst>(Val: RHS);
3863 if (RI->getOperand(i_nocapture: 0)->getType() == SrcTy)
3864 // Compare without the cast.
3865 if (Value *V = simplifyICmpInst(Pred, LHS: SrcOp, RHS: RI->getOperand(i_nocapture: 0), Q,
3866 MaxRecurse: MaxRecurse - 1))
3867 return V;
3868 }
3869 }
3870
3871 if (isa<ZExtInst>(Val: LHS)) {
3872 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
3873 // same type.
3874 if (ZExtInst *RI = dyn_cast<ZExtInst>(Val: RHS)) {
3875 if (MaxRecurse && SrcTy == RI->getOperand(i_nocapture: 0)->getType())
3876 // Compare X and Y. Note that signed predicates become unsigned.
3877 if (Value *V =
3878 simplifyICmpInst(Pred: ICmpInst::getUnsignedPredicate(Pred), LHS: SrcOp,
3879 RHS: RI->getOperand(i_nocapture: 0), Q, MaxRecurse: MaxRecurse - 1))
3880 return V;
3881 }
3882 // Fold (zext X) ule (sext X), (zext X) sge (sext X) to true.
3883 else if (SExtInst *RI = dyn_cast<SExtInst>(Val: RHS)) {
3884 if (SrcOp == RI->getOperand(i_nocapture: 0)) {
3885 if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_SGE)
3886 return ConstantInt::getTrue(Ty: ITy);
3887 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SLT)
3888 return ConstantInt::getFalse(Ty: ITy);
3889 }
3890 }
3891 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
3892 // too. If not, then try to deduce the result of the comparison.
3893 else if (match(V: RHS, P: m_ImmConstant())) {
3894 Constant *C = dyn_cast<Constant>(Val: RHS);
3895 assert(C != nullptr);
3896
3897 // Compute the constant that would happen if we truncated to SrcTy then
3898 // reextended to DstTy.
3899 Constant *Trunc =
3900 ConstantFoldCastOperand(Opcode: Instruction::Trunc, C, DestTy: SrcTy, DL: Q.DL);
3901 assert(Trunc && "Constant-fold of ImmConstant should not fail");
3902 Constant *RExt =
3903 ConstantFoldCastOperand(Opcode: CastInst::ZExt, C: Trunc, DestTy: DstTy, DL: Q.DL);
3904 assert(RExt && "Constant-fold of ImmConstant should not fail");
3905 Constant *AnyEq =
3906 ConstantFoldCompareInstOperands(Predicate: ICmpInst::ICMP_EQ, LHS: RExt, RHS: C, DL: Q.DL);
3907 assert(AnyEq && "Constant-fold of ImmConstant should not fail");
3908
3909 // If the re-extended constant didn't change any of the elements then
3910 // this is effectively also a case of comparing two zero-extended
3911 // values.
3912 if (AnyEq->isAllOnesValue() && MaxRecurse)
3913 if (Value *V = simplifyICmpInst(Pred: ICmpInst::getUnsignedPredicate(Pred),
3914 LHS: SrcOp, RHS: Trunc, Q, MaxRecurse: MaxRecurse - 1))
3915 return V;
3916
3917 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
3918 // there. Use this to work out the result of the comparison.
3919 if (AnyEq->isNullValue()) {
3920 switch (Pred) {
3921 default:
3922 llvm_unreachable("Unknown ICmp predicate!");
3923 // LHS <u RHS.
3924 case ICmpInst::ICMP_EQ:
3925 case ICmpInst::ICMP_UGT:
3926 case ICmpInst::ICMP_UGE:
3927 return Constant::getNullValue(Ty: ITy);
3928
3929 case ICmpInst::ICMP_NE:
3930 case ICmpInst::ICMP_ULT:
3931 case ICmpInst::ICMP_ULE:
3932 return Constant::getAllOnesValue(Ty: ITy);
3933
3934 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS
3935 // is non-negative then LHS <s RHS.
3936 case ICmpInst::ICMP_SGT:
3937 case ICmpInst::ICMP_SGE:
3938 return ConstantFoldCompareInstOperands(
3939 Predicate: ICmpInst::ICMP_SLT, LHS: C, RHS: Constant::getNullValue(Ty: C->getType()),
3940 DL: Q.DL);
3941 case ICmpInst::ICMP_SLT:
3942 case ICmpInst::ICMP_SLE:
3943 return ConstantFoldCompareInstOperands(
3944 Predicate: ICmpInst::ICMP_SGE, LHS: C, RHS: Constant::getNullValue(Ty: C->getType()),
3945 DL: Q.DL);
3946 }
3947 }
3948 }
3949 }
3950
3951 if (isa<SExtInst>(Val: LHS)) {
3952 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
3953 // same type.
3954 if (SExtInst *RI = dyn_cast<SExtInst>(Val: RHS)) {
3955 if (MaxRecurse && SrcTy == RI->getOperand(i_nocapture: 0)->getType())
3956 // Compare X and Y. Note that the predicate does not change.
3957 if (Value *V = simplifyICmpInst(Pred, LHS: SrcOp, RHS: RI->getOperand(i_nocapture: 0), Q,
3958 MaxRecurse: MaxRecurse - 1))
3959 return V;
3960 }
3961 // Fold (sext X) uge (zext X), (sext X) sle (zext X) to true.
3962 else if (ZExtInst *RI = dyn_cast<ZExtInst>(Val: RHS)) {
3963 if (SrcOp == RI->getOperand(i_nocapture: 0)) {
3964 if (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_SLE)
3965 return ConstantInt::getTrue(Ty: ITy);
3966 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SGT)
3967 return ConstantInt::getFalse(Ty: ITy);
3968 }
3969 }
3970 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
3971 // too. If not, then try to deduce the result of the comparison.
3972 else if (match(V: RHS, P: m_ImmConstant())) {
3973 Constant *C = cast<Constant>(Val: RHS);
3974
3975 // Compute the constant that would happen if we truncated to SrcTy then
3976 // reextended to DstTy.
3977 Constant *Trunc =
3978 ConstantFoldCastOperand(Opcode: Instruction::Trunc, C, DestTy: SrcTy, DL: Q.DL);
3979 assert(Trunc && "Constant-fold of ImmConstant should not fail");
3980 Constant *RExt =
3981 ConstantFoldCastOperand(Opcode: CastInst::SExt, C: Trunc, DestTy: DstTy, DL: Q.DL);
3982 assert(RExt && "Constant-fold of ImmConstant should not fail");
3983 Constant *AnyEq =
3984 ConstantFoldCompareInstOperands(Predicate: ICmpInst::ICMP_EQ, LHS: RExt, RHS: C, DL: Q.DL);
3985 assert(AnyEq && "Constant-fold of ImmConstant should not fail");
3986
3987 // If the re-extended constant didn't change then this is effectively
3988 // also a case of comparing two sign-extended values.
3989 if (AnyEq->isAllOnesValue() && MaxRecurse)
3990 if (Value *V =
3991 simplifyICmpInst(Pred, LHS: SrcOp, RHS: Trunc, Q, MaxRecurse: MaxRecurse - 1))
3992 return V;
3993
3994 // Otherwise the upper bits of LHS are all equal, while RHS has varying
3995 // bits there. Use this to work out the result of the comparison.
3996 if (AnyEq->isNullValue()) {
3997 switch (Pred) {
3998 default:
3999 llvm_unreachable("Unknown ICmp predicate!");
4000 case ICmpInst::ICMP_EQ:
4001 return Constant::getNullValue(Ty: ITy);
4002 case ICmpInst::ICMP_NE:
4003 return Constant::getAllOnesValue(Ty: ITy);
4004
4005 // If RHS is non-negative then LHS <s RHS. If RHS is negative then
4006 // LHS >s RHS.
4007 case ICmpInst::ICMP_SGT:
4008 case ICmpInst::ICMP_SGE:
4009 return ConstantFoldCompareInstOperands(
4010 Predicate: ICmpInst::ICMP_SLT, LHS: C, RHS: Constant::getNullValue(Ty: C->getType()),
4011 DL: Q.DL);
4012 case ICmpInst::ICMP_SLT:
4013 case ICmpInst::ICMP_SLE:
4014 return ConstantFoldCompareInstOperands(
4015 Predicate: ICmpInst::ICMP_SGE, LHS: C, RHS: Constant::getNullValue(Ty: C->getType()),
4016 DL: Q.DL);
4017
4018 // If LHS is non-negative then LHS <u RHS. If LHS is negative then
4019 // LHS >u RHS.
4020 case ICmpInst::ICMP_UGT:
4021 case ICmpInst::ICMP_UGE:
4022 // Comparison is true iff the LHS <s 0.
4023 if (MaxRecurse)
4024 if (Value *V = simplifyICmpInst(Pred: ICmpInst::ICMP_SLT, LHS: SrcOp,
4025 RHS: Constant::getNullValue(Ty: SrcTy), Q,
4026 MaxRecurse: MaxRecurse - 1))
4027 return V;
4028 break;
4029 case ICmpInst::ICMP_ULT:
4030 case ICmpInst::ICMP_ULE:
4031 // Comparison is true iff the LHS >=s 0.
4032 if (MaxRecurse)
4033 if (Value *V = simplifyICmpInst(Pred: ICmpInst::ICMP_SGE, LHS: SrcOp,
4034 RHS: Constant::getNullValue(Ty: SrcTy), Q,
4035 MaxRecurse: MaxRecurse - 1))
4036 return V;
4037 break;
4038 }
4039 }
4040 }
4041 }
4042 }
4043
4044 // icmp eq|ne X, Y -> false|true if X != Y
4045 // This is potentially expensive, and we have already computedKnownBits for
4046 // compares with 0 above here, so only try this for a non-zero compare.
4047 if (ICmpInst::isEquality(P: Pred) && !match(V: RHS, P: m_Zero()) &&
4048 isKnownNonEqual(V1: LHS, V2: RHS, SQ: Q)) {
4049 return Pred == ICmpInst::ICMP_NE ? getTrue(Ty: ITy) : getFalse(Ty: ITy);
4050 }
4051
4052 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse))
4053 return V;
4054
4055 if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse))
4056 return V;
4057
4058 if (Value *V = simplifyICmpWithIntrinsicOnLHS(Pred, LHS, RHS))
4059 return V;
4060 if (Value *V = simplifyICmpWithIntrinsicOnLHS(
4061 Pred: ICmpInst::getSwappedPredicate(pred: Pred), LHS: RHS, RHS: LHS))
4062 return V;
4063
4064 if (Value *V = simplifyICmpUsingMonotonicValues(Pred, LHS, RHS, Q))
4065 return V;
4066 if (Value *V = simplifyICmpUsingMonotonicValues(
4067 Pred: ICmpInst::getSwappedPredicate(pred: Pred), LHS: RHS, RHS: LHS, Q))
4068 return V;
4069
4070 if (Value *V = simplifyICmpWithDominatingAssume(Predicate: Pred, LHS, RHS, Q))
4071 return V;
4072
4073 if (std::optional<bool> Res =
4074 isImpliedByDomCondition(Pred, LHS, RHS, ContextI: Q.CxtI, DL: Q.DL))
4075 return ConstantInt::getBool(Ty: ITy, V: *Res);
4076
4077 // Simplify comparisons of related pointers using a powerful, recursive
4078 // GEP-walk when we have target data available..
4079 if (LHS->getType()->isPointerTy())
4080 if (auto *C = computePointerICmp(Pred, LHS, RHS, Q))
4081 return C;
4082
4083 // If the comparison is with the result of a select instruction, check whether
4084 // comparing with either branch of the select always yields the same value.
4085 if (isa<SelectInst>(Val: LHS) || isa<SelectInst>(Val: RHS))
4086 if (Value *V = threadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
4087 return V;
4088
4089 // If the comparison is with the result of a phi instruction, check whether
4090 // doing the compare with each incoming phi value yields a common result.
4091 if (isa<PHINode>(Val: LHS) || isa<PHINode>(Val: RHS))
4092 if (Value *V = threadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
4093 return V;
4094
4095 return nullptr;
4096}
4097
4098Value *llvm::simplifyICmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS,
4099 const SimplifyQuery &Q) {
4100 return ::simplifyICmpInst(Pred: Predicate, LHS, RHS, Q, MaxRecurse: RecursionLimit);
4101}
4102
4103/// Given operands for an FCmpInst, see if we can fold the result.
4104/// If not, this returns null.
4105static Value *simplifyFCmpInst(CmpPredicate Pred, Value *LHS, Value *RHS,
4106 FastMathFlags FMF, const SimplifyQuery &Q,
4107 unsigned MaxRecurse) {
4108 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
4109
4110 if (Constant *CLHS = dyn_cast<Constant>(Val: LHS)) {
4111 if (Constant *CRHS = dyn_cast<Constant>(Val: RHS)) {
4112 // if the folding isn't successfull, fall back to the rest of the logic
4113 if (auto *Result = ConstantFoldCompareInstOperands(Predicate: Pred, LHS: CLHS, RHS: CRHS, DL: Q.DL,
4114 TLI: Q.TLI, I: Q.CxtI))
4115 return Result;
4116 } else {
4117 // If we have a constant, make sure it is on the RHS.
4118 std::swap(a&: LHS, b&: RHS);
4119 Pred = CmpInst::getSwappedPredicate(pred: Pred);
4120 }
4121 }
4122
4123 // Fold trivial predicates.
4124 Type *RetTy = getCompareTy(Op: LHS);
4125 if (Pred == FCmpInst::FCMP_FALSE)
4126 return getFalse(Ty: RetTy);
4127 if (Pred == FCmpInst::FCMP_TRUE)
4128 return getTrue(Ty: RetTy);
4129
4130 // fcmp pred x, poison and fcmp pred poison, x
4131 // fold to poison
4132 if (isa<PoisonValue>(Val: LHS) || isa<PoisonValue>(Val: RHS))
4133 return PoisonValue::get(T: RetTy);
4134
4135 // fcmp pred x, undef and fcmp pred undef, x
4136 // fold to true if unordered, false if ordered
4137 if (Q.isUndefValue(V: LHS) || Q.isUndefValue(V: RHS)) {
4138 // Choosing NaN for the undef will always make unordered comparison succeed
4139 // and ordered comparison fail.
4140 return ConstantInt::get(Ty: RetTy, V: CmpInst::isUnordered(predicate: Pred));
4141 }
4142
4143 // fcmp x,x -> true/false. Not all compares are foldable.
4144 if (LHS == RHS) {
4145 if (CmpInst::isTrueWhenEqual(predicate: Pred))
4146 return getTrue(Ty: RetTy);
4147 if (CmpInst::isFalseWhenEqual(predicate: Pred))
4148 return getFalse(Ty: RetTy);
4149 }
4150
4151 // Fold (un)ordered comparison if we can determine there are no NaNs.
4152 //
4153 // This catches the 2 variable input case, constants are handled below as a
4154 // class-like compare.
4155 if (Pred == FCmpInst::FCMP_ORD || Pred == FCmpInst::FCMP_UNO) {
4156 KnownFPClass RHSClass = computeKnownFPClass(V: RHS, InterestedClasses: fcAllFlags, SQ: Q);
4157 KnownFPClass LHSClass = computeKnownFPClass(V: LHS, InterestedClasses: fcAllFlags, SQ: Q);
4158
4159 if (FMF.noNaNs() ||
4160 (RHSClass.isKnownNeverNaN() && LHSClass.isKnownNeverNaN()))
4161 return ConstantInt::get(Ty: RetTy, V: Pred == FCmpInst::FCMP_ORD);
4162
4163 if (RHSClass.isKnownAlwaysNaN() || LHSClass.isKnownAlwaysNaN())
4164 return ConstantInt::get(Ty: RetTy, V: Pred == CmpInst::FCMP_UNO);
4165 }
4166
4167 if (std::optional<bool> Res =
4168 isImpliedByDomCondition(Pred, LHS, RHS, ContextI: Q.CxtI, DL: Q.DL))
4169 return ConstantInt::getBool(Ty: RetTy, V: *Res);
4170
4171 const APFloat *C = nullptr;
4172 match(V: RHS, P: m_APFloatAllowPoison(Res&: C));
4173 std::optional<KnownFPClass> FullKnownClassLHS;
4174
4175 // Lazily compute the possible classes for LHS. Avoid computing it twice if
4176 // RHS is a 0.
4177 auto computeLHSClass = [=, &FullKnownClassLHS](FPClassTest InterestedFlags =
4178 fcAllFlags) {
4179 if (FullKnownClassLHS)
4180 return *FullKnownClassLHS;
4181 return computeKnownFPClass(V: LHS, FMF, InterestedClasses: InterestedFlags, SQ: Q);
4182 };
4183
4184 if (C && Q.CxtI) {
4185 // Fold out compares that express a class test.
4186 //
4187 // FIXME: Should be able to perform folds without context
4188 // instruction. Always pass in the context function?
4189
4190 const Function *ParentF = Q.CxtI->getFunction();
4191 auto [ClassVal, ClassTest] = fcmpToClassTest(Pred, F: *ParentF, LHS, ConstRHS: C);
4192 if (ClassVal) {
4193 FullKnownClassLHS = computeLHSClass();
4194 if ((FullKnownClassLHS->KnownFPClasses & ClassTest) == fcNone)
4195 return getFalse(Ty: RetTy);
4196 if ((FullKnownClassLHS->KnownFPClasses & ~ClassTest) == fcNone)
4197 return getTrue(Ty: RetTy);
4198 }
4199 }
4200
4201 // Handle fcmp with constant RHS.
4202 if (C) {
4203 // TODO: If we always required a context function, we wouldn't need to
4204 // special case nans.
4205 if (C->isNaN())
4206 return ConstantInt::get(Ty: RetTy, V: CmpInst::isUnordered(predicate: Pred));
4207
4208 // TODO: Need version fcmpToClassTest which returns implied class when the
4209 // compare isn't a complete class test. e.g. > 1.0 implies fcPositive, but
4210 // isn't implementable as a class call.
4211 if (C->isNegative() && !C->isNegZero()) {
4212 FPClassTest Interested = KnownFPClass::OrderedLessThanZeroMask;
4213
4214 // TODO: We can catch more cases by using a range check rather than
4215 // relying on CannotBeOrderedLessThanZero.
4216 switch (Pred) {
4217 case FCmpInst::FCMP_UGE:
4218 case FCmpInst::FCMP_UGT:
4219 case FCmpInst::FCMP_UNE: {
4220 KnownFPClass KnownClass = computeLHSClass(Interested);
4221
4222 // (X >= 0) implies (X > C) when (C < 0)
4223 if (KnownClass.cannotBeOrderedLessThanZero())
4224 return getTrue(Ty: RetTy);
4225 break;
4226 }
4227 case FCmpInst::FCMP_OEQ:
4228 case FCmpInst::FCMP_OLE:
4229 case FCmpInst::FCMP_OLT: {
4230 KnownFPClass KnownClass = computeLHSClass(Interested);
4231
4232 // (X >= 0) implies !(X < C) when (C < 0)
4233 if (KnownClass.cannotBeOrderedLessThanZero())
4234 return getFalse(Ty: RetTy);
4235 break;
4236 }
4237 default:
4238 break;
4239 }
4240 }
4241 // Check comparison of [minnum/maxnum with constant] with other constant.
4242 const APFloat *C2;
4243 if ((match(V: LHS, P: m_Intrinsic<Intrinsic::minnum>(Op0: m_Value(), Op1: m_APFloat(Res&: C2))) &&
4244 *C2 < *C) ||
4245 (match(V: LHS, P: m_Intrinsic<Intrinsic::maxnum>(Op0: m_Value(), Op1: m_APFloat(Res&: C2))) &&
4246 *C2 > *C)) {
4247 bool IsMaxNum =
4248 cast<IntrinsicInst>(Val: LHS)->getIntrinsicID() == Intrinsic::maxnum;
4249 // The ordered relationship and minnum/maxnum guarantee that we do not
4250 // have NaN constants, so ordered/unordered preds are handled the same.
4251 switch (Pred) {
4252 case FCmpInst::FCMP_OEQ:
4253 case FCmpInst::FCMP_UEQ:
4254 // minnum(X, LesserC) == C --> false
4255 // maxnum(X, GreaterC) == C --> false
4256 return getFalse(Ty: RetTy);
4257 case FCmpInst::FCMP_ONE:
4258 case FCmpInst::FCMP_UNE:
4259 // minnum(X, LesserC) != C --> true
4260 // maxnum(X, GreaterC) != C --> true
4261 return getTrue(Ty: RetTy);
4262 case FCmpInst::FCMP_OGE:
4263 case FCmpInst::FCMP_UGE:
4264 case FCmpInst::FCMP_OGT:
4265 case FCmpInst::FCMP_UGT:
4266 // minnum(X, LesserC) >= C --> false
4267 // minnum(X, LesserC) > C --> false
4268 // maxnum(X, GreaterC) >= C --> true
4269 // maxnum(X, GreaterC) > C --> true
4270 return ConstantInt::get(Ty: RetTy, V: IsMaxNum);
4271 case FCmpInst::FCMP_OLE:
4272 case FCmpInst::FCMP_ULE:
4273 case FCmpInst::FCMP_OLT:
4274 case FCmpInst::FCMP_ULT:
4275 // minnum(X, LesserC) <= C --> true
4276 // minnum(X, LesserC) < C --> true
4277 // maxnum(X, GreaterC) <= C --> false
4278 // maxnum(X, GreaterC) < C --> false
4279 return ConstantInt::get(Ty: RetTy, V: !IsMaxNum);
4280 default:
4281 // TRUE/FALSE/ORD/UNO should be handled before this.
4282 llvm_unreachable("Unexpected fcmp predicate");
4283 }
4284 }
4285 }
4286
4287 // TODO: Could fold this with above if there were a matcher which returned all
4288 // classes in a non-splat vector.
4289 if (match(V: RHS, P: m_AnyZeroFP())) {
4290 switch (Pred) {
4291 case FCmpInst::FCMP_OGE:
4292 case FCmpInst::FCMP_ULT: {
4293 FPClassTest Interested = KnownFPClass::OrderedLessThanZeroMask;
4294 if (!FMF.noNaNs())
4295 Interested |= fcNan;
4296
4297 KnownFPClass Known = computeLHSClass(Interested);
4298
4299 // Positive or zero X >= 0.0 --> true
4300 // Positive or zero X < 0.0 --> false
4301 if ((FMF.noNaNs() || Known.isKnownNeverNaN()) &&
4302 Known.cannotBeOrderedLessThanZero())
4303 return Pred == FCmpInst::FCMP_OGE ? getTrue(Ty: RetTy) : getFalse(Ty: RetTy);
4304 break;
4305 }
4306 case FCmpInst::FCMP_UGE:
4307 case FCmpInst::FCMP_OLT: {
4308 FPClassTest Interested = KnownFPClass::OrderedLessThanZeroMask;
4309 KnownFPClass Known = computeLHSClass(Interested);
4310
4311 // Positive or zero or nan X >= 0.0 --> true
4312 // Positive or zero or nan X < 0.0 --> false
4313 if (Known.cannotBeOrderedLessThanZero())
4314 return Pred == FCmpInst::FCMP_UGE ? getTrue(Ty: RetTy) : getFalse(Ty: RetTy);
4315 break;
4316 }
4317 default:
4318 break;
4319 }
4320 }
4321
4322 // If the comparison is with the result of a select instruction, check whether
4323 // comparing with either branch of the select always yields the same value.
4324 if (isa<SelectInst>(Val: LHS) || isa<SelectInst>(Val: RHS))
4325 if (Value *V = threadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
4326 return V;
4327
4328 // If the comparison is with the result of a phi instruction, check whether
4329 // doing the compare with each incoming phi value yields a common result.
4330 if (isa<PHINode>(Val: LHS) || isa<PHINode>(Val: RHS))
4331 if (Value *V = threadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
4332 return V;
4333
4334 return nullptr;
4335}
4336
4337Value *llvm::simplifyFCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS,
4338 FastMathFlags FMF, const SimplifyQuery &Q) {
4339 return ::simplifyFCmpInst(Pred: Predicate, LHS, RHS, FMF, Q, MaxRecurse: RecursionLimit);
4340}
4341
4342static Value *simplifyWithOpsReplaced(Value *V,
4343 ArrayRef<std::pair<Value *, Value *>> Ops,
4344 const SimplifyQuery &Q,
4345 bool AllowRefinement,
4346 SmallVectorImpl<Instruction *> *DropFlags,
4347 unsigned MaxRecurse) {
4348 assert((AllowRefinement || !Q.CanUseUndef) &&
4349 "If AllowRefinement=false then CanUseUndef=false");
4350 for (const auto &OpAndRepOp : Ops) {
4351 // We cannot replace a constant, and shouldn't even try.
4352 if (isa<Constant>(Val: OpAndRepOp.first))
4353 return nullptr;
4354
4355 // Trivial replacement.
4356 if (V == OpAndRepOp.first)
4357 return OpAndRepOp.second;
4358 }
4359
4360 if (!MaxRecurse--)
4361 return nullptr;
4362
4363 auto *I = dyn_cast<Instruction>(Val: V);
4364 if (!I)
4365 return nullptr;
4366
4367 // The arguments of a phi node might refer to a value from a previous
4368 // cycle iteration.
4369 if (isa<PHINode>(Val: I))
4370 return nullptr;
4371
4372 // Don't fold away llvm.is.constant checks based on assumptions.
4373 if (match(V: I, P: m_Intrinsic<Intrinsic::is_constant>()))
4374 return nullptr;
4375
4376 // Don't simplify freeze.
4377 if (isa<FreezeInst>(Val: I))
4378 return nullptr;
4379
4380 for (const auto &OpAndRepOp : Ops) {
4381 // For vector types, the simplification must hold per-lane, so forbid
4382 // potentially cross-lane operations like shufflevector.
4383 if (OpAndRepOp.first->getType()->isVectorTy() &&
4384 !isNotCrossLaneOperation(I))
4385 return nullptr;
4386 }
4387
4388 // Replace Op with RepOp in instruction operands.
4389 SmallVector<Value *, 8> NewOps;
4390 bool AnyReplaced = false;
4391 for (Value *InstOp : I->operands()) {
4392 if (Value *NewInstOp = simplifyWithOpsReplaced(
4393 V: InstOp, Ops, Q, AllowRefinement, DropFlags, MaxRecurse)) {
4394 NewOps.push_back(Elt: NewInstOp);
4395 AnyReplaced = InstOp != NewInstOp;
4396 } else {
4397 NewOps.push_back(Elt: InstOp);
4398 }
4399
4400 // Bail out if any operand is undef and SimplifyQuery disables undef
4401 // simplification. Constant folding currently doesn't respect this option.
4402 if (isa<UndefValue>(Val: NewOps.back()) && !Q.CanUseUndef)
4403 return nullptr;
4404 }
4405
4406 if (!AnyReplaced)
4407 return nullptr;
4408
4409 if (!AllowRefinement) {
4410 // General InstSimplify functions may refine the result, e.g. by returning
4411 // a constant for a potentially poison value. To avoid this, implement only
4412 // a few non-refining but profitable transforms here.
4413
4414 if (auto *BO = dyn_cast<BinaryOperator>(Val: I)) {
4415 unsigned Opcode = BO->getOpcode();
4416 // id op x -> x, x op id -> x
4417 // Exclude floats, because x op id may produce a different NaN value.
4418 if (!BO->getType()->isFPOrFPVectorTy()) {
4419 if (NewOps[0] == ConstantExpr::getBinOpIdentity(Opcode, Ty: I->getType()))
4420 return NewOps[1];
4421 if (NewOps[1] == ConstantExpr::getBinOpIdentity(Opcode, Ty: I->getType(),
4422 /* RHS */ AllowRHSConstant: true))
4423 return NewOps[0];
4424 }
4425
4426 // x & x -> x, x | x -> x
4427 if ((Opcode == Instruction::And || Opcode == Instruction::Or) &&
4428 NewOps[0] == NewOps[1]) {
4429 // or disjoint x, x results in poison.
4430 if (auto *PDI = dyn_cast<PossiblyDisjointInst>(Val: BO)) {
4431 if (PDI->isDisjoint()) {
4432 if (!DropFlags)
4433 return nullptr;
4434 DropFlags->push_back(Elt: BO);
4435 }
4436 }
4437 return NewOps[0];
4438 }
4439
4440 // x - x -> 0, x ^ x -> 0. This is non-refining, because x is non-poison
4441 // by assumption and this case never wraps, so nowrap flags can be
4442 // ignored.
4443 if ((Opcode == Instruction::Sub || Opcode == Instruction::Xor) &&
4444 NewOps[0] == NewOps[1] &&
4445 any_of(Range&: Ops, P: [=](const auto &Rep) { return NewOps[0] == Rep.second; }))
4446 return Constant::getNullValue(Ty: I->getType());
4447
4448 // If we are substituting an absorber constant into a binop and extra
4449 // poison can't leak if we remove the select -- because both operands of
4450 // the binop are based on the same value -- then it may be safe to replace
4451 // the value with the absorber constant. Examples:
4452 // (Op == 0) ? 0 : (Op & -Op) --> Op & -Op
4453 // (Op == 0) ? 0 : (Op * (binop Op, C)) --> Op * (binop Op, C)
4454 // (Op == -1) ? -1 : (Op | (binop C, Op) --> Op | (binop C, Op)
4455 Constant *Absorber = ConstantExpr::getBinOpAbsorber(Opcode, Ty: I->getType());
4456 if ((NewOps[0] == Absorber || NewOps[1] == Absorber) &&
4457 any_of(Range&: Ops,
4458 P: [=](const auto &Rep) { return impliesPoison(BO, Rep.first); }))
4459 return Absorber;
4460 }
4461
4462 if (auto *II = dyn_cast<IntrinsicInst>(Val: I)) {
4463 // `x == y ? 0 : ucmp(x, y)` where under the replacement y -> x,
4464 // `ucmp(x, x)` becomes `0`.
4465 if ((II->getIntrinsicID() == Intrinsic::scmp ||
4466 II->getIntrinsicID() == Intrinsic::ucmp) &&
4467 NewOps[0] == NewOps[1]) {
4468 if (II->hasPoisonGeneratingAnnotations()) {
4469 if (!DropFlags)
4470 return nullptr;
4471
4472 DropFlags->push_back(Elt: II);
4473 }
4474
4475 return ConstantInt::get(Ty: I->getType(), V: 0);
4476 }
4477 }
4478
4479 if (isa<GetElementPtrInst>(Val: I)) {
4480 // getelementptr x, 0 -> x.
4481 // This never returns poison, even if inbounds is set.
4482 if (NewOps.size() == 2 && match(V: NewOps[1], P: m_Zero()))
4483 return NewOps[0];
4484 }
4485 } else {
4486 // The simplification queries below may return the original value. Consider:
4487 // %div = udiv i32 %arg, %arg2
4488 // %mul = mul nsw i32 %div, %arg2
4489 // %cmp = icmp eq i32 %mul, %arg
4490 // %sel = select i1 %cmp, i32 %div, i32 undef
4491 // Replacing %arg by %mul, %div becomes "udiv i32 %mul, %arg2", which
4492 // simplifies back to %arg. This can only happen because %mul does not
4493 // dominate %div. To ensure a consistent return value contract, we make sure
4494 // that this case returns nullptr as well.
4495 auto PreventSelfSimplify = [V](Value *Simplified) {
4496 return Simplified != V ? Simplified : nullptr;
4497 };
4498
4499 return PreventSelfSimplify(
4500 ::simplifyInstructionWithOperands(I, NewOps, SQ: Q, MaxRecurse));
4501 }
4502
4503 // If all operands are constant after substituting Op for RepOp then we can
4504 // constant fold the instruction.
4505 SmallVector<Constant *, 8> ConstOps;
4506 for (Value *NewOp : NewOps) {
4507 if (Constant *ConstOp = dyn_cast<Constant>(Val: NewOp))
4508 ConstOps.push_back(Elt: ConstOp);
4509 else
4510 return nullptr;
4511 }
4512
4513 // Consider:
4514 // %cmp = icmp eq i32 %x, 2147483647
4515 // %add = add nsw i32 %x, 1
4516 // %sel = select i1 %cmp, i32 -2147483648, i32 %add
4517 //
4518 // We can't replace %sel with %add unless we strip away the flags (which
4519 // will be done in InstCombine).
4520 // TODO: This may be unsound, because it only catches some forms of
4521 // refinement.
4522 if (!AllowRefinement) {
4523 auto *II = dyn_cast<IntrinsicInst>(Val: I);
4524 if (canCreatePoison(Op: cast<Operator>(Val: I), ConsiderFlagsAndMetadata: !DropFlags)) {
4525 // abs cannot create poison if the value is known to never be int_min.
4526 if (II && II->getIntrinsicID() == Intrinsic::abs) {
4527 if (!ConstOps[0]->isNotMinSignedValue())
4528 return nullptr;
4529 } else
4530 return nullptr;
4531 }
4532
4533 if (DropFlags && II) {
4534 // If we're going to change the poison flag of abs/ctz to false, also
4535 // perform constant folding that way, so we get an integer instead of a
4536 // poison value here.
4537 switch (II->getIntrinsicID()) {
4538 case Intrinsic::abs:
4539 case Intrinsic::ctlz:
4540 case Intrinsic::cttz:
4541 ConstOps[1] = ConstantInt::getFalse(Context&: I->getContext());
4542 break;
4543 default:
4544 break;
4545 }
4546 }
4547
4548 Constant *Res = ConstantFoldInstOperands(I, Ops: ConstOps, DL: Q.DL, TLI: Q.TLI,
4549 /*AllowNonDeterministic=*/false);
4550 if (DropFlags && Res && I->hasPoisonGeneratingAnnotations())
4551 DropFlags->push_back(Elt: I);
4552 return Res;
4553 }
4554
4555 return ConstantFoldInstOperands(I, Ops: ConstOps, DL: Q.DL, TLI: Q.TLI,
4556 /*AllowNonDeterministic=*/false);
4557}
4558
4559static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
4560 const SimplifyQuery &Q,
4561 bool AllowRefinement,
4562 SmallVectorImpl<Instruction *> *DropFlags,
4563 unsigned MaxRecurse) {
4564 return simplifyWithOpsReplaced(V, Ops: {{Op, RepOp}}, Q, AllowRefinement,
4565 DropFlags, MaxRecurse);
4566}
4567
4568Value *llvm::simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
4569 const SimplifyQuery &Q,
4570 bool AllowRefinement,
4571 SmallVectorImpl<Instruction *> *DropFlags) {
4572 // If refinement is disabled, also disable undef simplifications (which are
4573 // always refinements) in SimplifyQuery.
4574 if (!AllowRefinement)
4575 return ::simplifyWithOpReplaced(V, Op, RepOp, Q: Q.getWithoutUndef(),
4576 AllowRefinement, DropFlags, MaxRecurse: RecursionLimit);
4577 return ::simplifyWithOpReplaced(V, Op, RepOp, Q, AllowRefinement, DropFlags,
4578 MaxRecurse: RecursionLimit);
4579}
4580
4581/// Try to simplify a select instruction when its condition operand is an
4582/// integer comparison where one operand of the compare is a constant.
4583static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X,
4584 const APInt *Y, bool TrueWhenUnset) {
4585 const APInt *C;
4586
4587 // (X & Y) == 0 ? X & ~Y : X --> X
4588 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y
4589 if (FalseVal == X && match(V: TrueVal, P: m_And(L: m_Specific(V: X), R: m_APInt(Res&: C))) &&
4590 *Y == ~*C)
4591 return TrueWhenUnset ? FalseVal : TrueVal;
4592
4593 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y
4594 // (X & Y) != 0 ? X : X & ~Y --> X
4595 if (TrueVal == X && match(V: FalseVal, P: m_And(L: m_Specific(V: X), R: m_APInt(Res&: C))) &&
4596 *Y == ~*C)
4597 return TrueWhenUnset ? FalseVal : TrueVal;
4598
4599 if (Y->isPowerOf2()) {
4600 // (X & Y) == 0 ? X | Y : X --> X | Y
4601 // (X & Y) != 0 ? X | Y : X --> X
4602 if (FalseVal == X && match(V: TrueVal, P: m_Or(L: m_Specific(V: X), R: m_APInt(Res&: C))) &&
4603 *Y == *C) {
4604 // We can't return the or if it has the disjoint flag.
4605 if (TrueWhenUnset && cast<PossiblyDisjointInst>(Val: TrueVal)->isDisjoint())
4606 return nullptr;
4607 return TrueWhenUnset ? TrueVal : FalseVal;
4608 }
4609
4610 // (X & Y) == 0 ? X : X | Y --> X
4611 // (X & Y) != 0 ? X : X | Y --> X | Y
4612 if (TrueVal == X && match(V: FalseVal, P: m_Or(L: m_Specific(V: X), R: m_APInt(Res&: C))) &&
4613 *Y == *C) {
4614 // We can't return the or if it has the disjoint flag.
4615 if (!TrueWhenUnset && cast<PossiblyDisjointInst>(Val: FalseVal)->isDisjoint())
4616 return nullptr;
4617 return TrueWhenUnset ? TrueVal : FalseVal;
4618 }
4619 }
4620
4621 return nullptr;
4622}
4623
4624static Value *simplifyCmpSelOfMaxMin(Value *CmpLHS, Value *CmpRHS,
4625 CmpPredicate Pred, Value *TVal,
4626 Value *FVal) {
4627 // Canonicalize common cmp+sel operand as CmpLHS.
4628 if (CmpRHS == TVal || CmpRHS == FVal) {
4629 std::swap(a&: CmpLHS, b&: CmpRHS);
4630 Pred = ICmpInst::getSwappedPredicate(pred: Pred);
4631 }
4632
4633 // Canonicalize common cmp+sel operand as TVal.
4634 if (CmpLHS == FVal) {
4635 std::swap(a&: TVal, b&: FVal);
4636 Pred = ICmpInst::getInversePredicate(pred: Pred);
4637 }
4638
4639 // A vector select may be shuffling together elements that are equivalent
4640 // based on the max/min/select relationship.
4641 Value *X = CmpLHS, *Y = CmpRHS;
4642 bool PeekedThroughSelectShuffle = false;
4643 auto *Shuf = dyn_cast<ShuffleVectorInst>(Val: FVal);
4644 if (Shuf && Shuf->isSelect()) {
4645 if (Shuf->getOperand(i_nocapture: 0) == Y)
4646 FVal = Shuf->getOperand(i_nocapture: 1);
4647 else if (Shuf->getOperand(i_nocapture: 1) == Y)
4648 FVal = Shuf->getOperand(i_nocapture: 0);
4649 else
4650 return nullptr;
4651 PeekedThroughSelectShuffle = true;
4652 }
4653
4654 // (X pred Y) ? X : max/min(X, Y)
4655 auto *MMI = dyn_cast<MinMaxIntrinsic>(Val: FVal);
4656 if (!MMI || TVal != X ||
4657 !match(V: FVal, P: m_c_MaxOrMin(L: m_Specific(V: X), R: m_Specific(V: Y))))
4658 return nullptr;
4659
4660 // (X > Y) ? X : max(X, Y) --> max(X, Y)
4661 // (X >= Y) ? X : max(X, Y) --> max(X, Y)
4662 // (X < Y) ? X : min(X, Y) --> min(X, Y)
4663 // (X <= Y) ? X : min(X, Y) --> min(X, Y)
4664 //
4665 // The equivalence allows a vector select (shuffle) of max/min and Y. Ex:
4666 // (X > Y) ? X : (Z ? max(X, Y) : Y)
4667 // If Z is true, this reduces as above, and if Z is false:
4668 // (X > Y) ? X : Y --> max(X, Y)
4669 ICmpInst::Predicate MMPred = MMI->getPredicate();
4670 if (MMPred == CmpInst::getStrictPredicate(pred: Pred))
4671 return MMI;
4672
4673 // Other transforms are not valid with a shuffle.
4674 if (PeekedThroughSelectShuffle)
4675 return nullptr;
4676
4677 // (X == Y) ? X : max/min(X, Y) --> max/min(X, Y)
4678 if (Pred == CmpInst::ICMP_EQ)
4679 return MMI;
4680
4681 // (X != Y) ? X : max/min(X, Y) --> X
4682 if (Pred == CmpInst::ICMP_NE)
4683 return X;
4684
4685 // (X < Y) ? X : max(X, Y) --> X
4686 // (X <= Y) ? X : max(X, Y) --> X
4687 // (X > Y) ? X : min(X, Y) --> X
4688 // (X >= Y) ? X : min(X, Y) --> X
4689 ICmpInst::Predicate InvPred = CmpInst::getInversePredicate(pred: Pred);
4690 if (MMPred == CmpInst::getStrictPredicate(pred: InvPred))
4691 return X;
4692
4693 return nullptr;
4694}
4695
4696/// An alternative way to test if a bit is set or not.
4697/// uses e.g. sgt/slt or trunc instead of eq/ne.
4698static Value *simplifySelectWithBitTest(Value *CondVal, Value *TrueVal,
4699 Value *FalseVal) {
4700 if (auto Res = decomposeBitTest(Cond: CondVal))
4701 return simplifySelectBitTest(TrueVal, FalseVal, X: Res->X, Y: &Res->Mask,
4702 TrueWhenUnset: Res->Pred == ICmpInst::ICMP_EQ);
4703
4704 return nullptr;
4705}
4706
4707/// Try to simplify a select instruction when its condition operand is an
4708/// integer equality or floating-point equivalence comparison.
4709static Value *simplifySelectWithEquivalence(
4710 ArrayRef<std::pair<Value *, Value *>> Replacements, Value *TrueVal,
4711 Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse) {
4712 Value *SimplifiedFalseVal =
4713 simplifyWithOpsReplaced(V: FalseVal, Ops: Replacements, Q: Q.getWithoutUndef(),
4714 /* AllowRefinement */ false,
4715 /* DropFlags */ nullptr, MaxRecurse);
4716 if (!SimplifiedFalseVal)
4717 SimplifiedFalseVal = FalseVal;
4718
4719 Value *SimplifiedTrueVal =
4720 simplifyWithOpsReplaced(V: TrueVal, Ops: Replacements, Q,
4721 /* AllowRefinement */ true,
4722 /* DropFlags */ nullptr, MaxRecurse);
4723 if (!SimplifiedTrueVal)
4724 SimplifiedTrueVal = TrueVal;
4725
4726 if (SimplifiedFalseVal == SimplifiedTrueVal)
4727 return FalseVal;
4728
4729 return nullptr;
4730}
4731
4732/// Try to simplify a select instruction when its condition operand is an
4733/// integer comparison.
4734static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal,
4735 Value *FalseVal,
4736 const SimplifyQuery &Q,
4737 unsigned MaxRecurse) {
4738 CmpPredicate Pred;
4739 Value *CmpLHS, *CmpRHS;
4740 if (!match(V: CondVal, P: m_ICmp(Pred, L: m_Value(V&: CmpLHS), R: m_Value(V&: CmpRHS))))
4741 return nullptr;
4742
4743 if (Value *V = simplifyCmpSelOfMaxMin(CmpLHS, CmpRHS, Pred, TVal: TrueVal, FVal: FalseVal))
4744 return V;
4745
4746 // Canonicalize ne to eq predicate.
4747 if (Pred == ICmpInst::ICMP_NE) {
4748 Pred = ICmpInst::ICMP_EQ;
4749 std::swap(a&: TrueVal, b&: FalseVal);
4750 }
4751
4752 // Check for integer min/max with a limit constant:
4753 // X > MIN_INT ? X : MIN_INT --> X
4754 // X < MAX_INT ? X : MAX_INT --> X
4755 if (TrueVal->getType()->isIntOrIntVectorTy()) {
4756 Value *X, *Y;
4757 SelectPatternFlavor SPF =
4758 matchDecomposedSelectPattern(CmpI: cast<ICmpInst>(Val: CondVal), TrueVal, FalseVal,
4759 LHS&: X, RHS&: Y)
4760 .Flavor;
4761 if (SelectPatternResult::isMinOrMax(SPF) && Pred == getMinMaxPred(SPF)) {
4762 APInt LimitC = getMinMaxLimit(SPF: getInverseMinMaxFlavor(SPF),
4763 BitWidth: X->getType()->getScalarSizeInBits());
4764 if (match(V: Y, P: m_SpecificInt(V: LimitC)))
4765 return X;
4766 }
4767 }
4768
4769 if (Pred == ICmpInst::ICMP_EQ && match(V: CmpRHS, P: m_Zero())) {
4770 Value *X;
4771 const APInt *Y;
4772 if (match(V: CmpLHS, P: m_And(L: m_Value(V&: X), R: m_APInt(Res&: Y))))
4773 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y,
4774 /*TrueWhenUnset=*/true))
4775 return V;
4776
4777 // Test for a bogus zero-shift-guard-op around funnel-shift or rotate.
4778 Value *ShAmt;
4779 auto isFsh = m_CombineOr(L: m_FShl(Op0: m_Value(V&: X), Op1: m_Value(), Op2: m_Value(V&: ShAmt)),
4780 R: m_FShr(Op0: m_Value(), Op1: m_Value(V&: X), Op2: m_Value(V&: ShAmt)));
4781 // (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X
4782 // (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X
4783 if (match(V: TrueVal, P: isFsh) && FalseVal == X && CmpLHS == ShAmt)
4784 return X;
4785
4786 // Test for a zero-shift-guard-op around rotates. These are used to
4787 // avoid UB from oversized shifts in raw IR rotate patterns, but the
4788 // intrinsics do not have that problem.
4789 // We do not allow this transform for the general funnel shift case because
4790 // that would not preserve the poison safety of the original code.
4791 auto isRotate =
4792 m_CombineOr(L: m_FShl(Op0: m_Value(V&: X), Op1: m_Deferred(V: X), Op2: m_Value(V&: ShAmt)),
4793 R: m_FShr(Op0: m_Value(V&: X), Op1: m_Deferred(V: X), Op2: m_Value(V&: ShAmt)));
4794 // (ShAmt == 0) ? X : fshl(X, X, ShAmt) --> fshl(X, X, ShAmt)
4795 // (ShAmt == 0) ? X : fshr(X, X, ShAmt) --> fshr(X, X, ShAmt)
4796 if (match(V: FalseVal, P: isRotate) && TrueVal == X && CmpLHS == ShAmt &&
4797 Pred == ICmpInst::ICMP_EQ)
4798 return FalseVal;
4799
4800 // X == 0 ? abs(X) : -abs(X) --> -abs(X)
4801 // X == 0 ? -abs(X) : abs(X) --> abs(X)
4802 if (match(V: TrueVal, P: m_Intrinsic<Intrinsic::abs>(Op0: m_Specific(V: CmpLHS))) &&
4803 match(V: FalseVal, P: m_Neg(V: m_Intrinsic<Intrinsic::abs>(Op0: m_Specific(V: CmpLHS)))))
4804 return FalseVal;
4805 if (match(V: TrueVal,
4806 P: m_Neg(V: m_Intrinsic<Intrinsic::abs>(Op0: m_Specific(V: CmpLHS)))) &&
4807 match(V: FalseVal, P: m_Intrinsic<Intrinsic::abs>(Op0: m_Specific(V: CmpLHS))))
4808 return FalseVal;
4809 }
4810
4811 // If we have a scalar equality comparison, then we know the value in one of
4812 // the arms of the select. See if substituting this value into the arm and
4813 // simplifying the result yields the same value as the other arm.
4814 if (Pred == ICmpInst::ICMP_EQ) {
4815 if (CmpLHS->getType()->isIntOrIntVectorTy() ||
4816 canReplacePointersIfEqual(From: CmpLHS, To: CmpRHS, DL: Q.DL))
4817 if (Value *V = simplifySelectWithEquivalence(Replacements: {{CmpLHS, CmpRHS}}, TrueVal,
4818 FalseVal, Q, MaxRecurse))
4819 return V;
4820 if (CmpLHS->getType()->isIntOrIntVectorTy() ||
4821 canReplacePointersIfEqual(From: CmpRHS, To: CmpLHS, DL: Q.DL))
4822 if (Value *V = simplifySelectWithEquivalence(Replacements: {{CmpRHS, CmpLHS}}, TrueVal,
4823 FalseVal, Q, MaxRecurse))
4824 return V;
4825
4826 Value *X;
4827 Value *Y;
4828 // select((X | Y) == 0 ? X : 0) --> 0 (commuted 2 ways)
4829 if (match(V: CmpLHS, P: m_Or(L: m_Value(V&: X), R: m_Value(V&: Y))) &&
4830 match(V: CmpRHS, P: m_Zero())) {
4831 // (X | Y) == 0 implies X == 0 and Y == 0.
4832 if (Value *V = simplifySelectWithEquivalence(
4833 Replacements: {{X, CmpRHS}, {Y, CmpRHS}}, TrueVal, FalseVal, Q, MaxRecurse))
4834 return V;
4835 }
4836
4837 // select((X & Y) == -1 ? X : -1) --> -1 (commuted 2 ways)
4838 if (match(V: CmpLHS, P: m_And(L: m_Value(V&: X), R: m_Value(V&: Y))) &&
4839 match(V: CmpRHS, P: m_AllOnes())) {
4840 // (X & Y) == -1 implies X == -1 and Y == -1.
4841 if (Value *V = simplifySelectWithEquivalence(
4842 Replacements: {{X, CmpRHS}, {Y, CmpRHS}}, TrueVal, FalseVal, Q, MaxRecurse))
4843 return V;
4844 }
4845 }
4846
4847 return nullptr;
4848}
4849
4850/// Try to simplify a select instruction when its condition operand is a
4851/// floating-point comparison.
4852static Value *simplifySelectWithFCmp(Value *Cond, Value *T, Value *F,
4853 const SimplifyQuery &Q,
4854 unsigned MaxRecurse) {
4855 CmpPredicate Pred;
4856 Value *CmpLHS, *CmpRHS;
4857 if (!match(V: Cond, P: m_FCmp(Pred, L: m_Value(V&: CmpLHS), R: m_Value(V&: CmpRHS))))
4858 return nullptr;
4859 FCmpInst *I = cast<FCmpInst>(Val: Cond);
4860
4861 bool IsEquiv = I->isEquivalence();
4862 if (I->isEquivalence(/*Invert=*/true)) {
4863 std::swap(a&: T, b&: F);
4864 Pred = FCmpInst::getInversePredicate(pred: Pred);
4865 IsEquiv = true;
4866 }
4867
4868 // This transforms is safe if at least one operand is known to not be zero.
4869 // Otherwise, the select can change the sign of a zero operand.
4870 if (IsEquiv) {
4871 if (Value *V = simplifySelectWithEquivalence(Replacements: {{CmpLHS, CmpRHS}}, TrueVal: T, FalseVal: F, Q,
4872 MaxRecurse))
4873 return V;
4874 if (Value *V = simplifySelectWithEquivalence(Replacements: {{CmpRHS, CmpLHS}}, TrueVal: T, FalseVal: F, Q,
4875 MaxRecurse))
4876 return V;
4877 }
4878
4879 // Canonicalize CmpLHS to be T, and CmpRHS to be F, if they're swapped.
4880 if (CmpLHS == F && CmpRHS == T)
4881 std::swap(a&: CmpLHS, b&: CmpRHS);
4882
4883 if (CmpLHS != T || CmpRHS != F)
4884 return nullptr;
4885
4886 // This transform is also safe if we do not have (do not care about) -0.0.
4887 if (Q.CxtI && isa<FPMathOperator>(Val: Q.CxtI) && Q.CxtI->hasNoSignedZeros()) {
4888 // (T == F) ? T : F --> F
4889 if (Pred == FCmpInst::FCMP_OEQ)
4890 return F;
4891
4892 // (T != F) ? T : F --> T
4893 if (Pred == FCmpInst::FCMP_UNE)
4894 return T;
4895 }
4896
4897 return nullptr;
4898}
4899
4900/// Look for the following pattern and simplify %to_fold to %identicalPhi.
4901/// Here %phi, %to_fold and %phi.next perform the same functionality as
4902/// %identicalPhi and hence the select instruction %to_fold can be folded
4903/// into %identicalPhi.
4904///
4905/// BB1:
4906/// %identicalPhi = phi [ X, %BB0 ], [ %identicalPhi.next, %BB1 ]
4907/// %phi = phi [ X, %BB0 ], [ %phi.next, %BB1 ]
4908/// ...
4909/// %identicalPhi.next = select %cmp, %val, %identicalPhi
4910/// (or select %cmp, %identicalPhi, %val)
4911/// %to_fold = select %cmp2, %identicalPhi, %phi
4912/// %phi.next = select %cmp, %val, %to_fold
4913/// (or select %cmp, %to_fold, %val)
4914///
4915/// Prove that %phi and %identicalPhi are the same by induction:
4916///
4917/// Base case: Both %phi and %identicalPhi are equal on entry to the loop.
4918/// Inductive case:
4919/// Suppose %phi and %identicalPhi are equal at iteration i.
4920/// We look at their values at iteration i+1 which are %phi.next and
4921/// %identicalPhi.next. They would have become different only when %cmp is
4922/// false and the corresponding values %to_fold and %identicalPhi differ
4923/// (similar reason for the other "or" case in the bracket).
4924///
4925/// The only condition when %to_fold and %identicalPh could differ is when %cmp2
4926/// is false and %to_fold is %phi, which contradicts our inductive hypothesis
4927/// that %phi and %identicalPhi are equal. Thus %phi and %identicalPhi are
4928/// always equal at iteration i+1.
4929bool isSelectWithIdenticalPHI(PHINode &PN, PHINode &IdenticalPN) {
4930 if (PN.getParent() != IdenticalPN.getParent())
4931 return false;
4932 if (PN.getNumIncomingValues() != 2)
4933 return false;
4934
4935 // Check that only the backedge incoming value is different.
4936 unsigned DiffVals = 0;
4937 BasicBlock *DiffValBB = nullptr;
4938 for (unsigned i = 0; i < 2; i++) {
4939 BasicBlock *PredBB = PN.getIncomingBlock(i);
4940 if (PN.getIncomingValue(i) !=
4941 IdenticalPN.getIncomingValueForBlock(BB: PredBB)) {
4942 DiffVals++;
4943 DiffValBB = PredBB;
4944 }
4945 }
4946 if (DiffVals != 1)
4947 return false;
4948 // Now check that the backedge incoming values are two select
4949 // instructions with the same condition. Either their true
4950 // values are the same, or their false values are the same.
4951 auto *SI = dyn_cast<SelectInst>(Val: PN.getIncomingValueForBlock(BB: DiffValBB));
4952 auto *IdenticalSI =
4953 dyn_cast<SelectInst>(Val: IdenticalPN.getIncomingValueForBlock(BB: DiffValBB));
4954 if (!SI || !IdenticalSI)
4955 return false;
4956 if (SI->getCondition() != IdenticalSI->getCondition())
4957 return false;
4958
4959 SelectInst *SIOtherVal = nullptr;
4960 Value *IdenticalSIOtherVal = nullptr;
4961 if (SI->getTrueValue() == IdenticalSI->getTrueValue()) {
4962 SIOtherVal = dyn_cast<SelectInst>(Val: SI->getFalseValue());
4963 IdenticalSIOtherVal = IdenticalSI->getFalseValue();
4964 } else if (SI->getFalseValue() == IdenticalSI->getFalseValue()) {
4965 SIOtherVal = dyn_cast<SelectInst>(Val: SI->getTrueValue());
4966 IdenticalSIOtherVal = IdenticalSI->getTrueValue();
4967 } else {
4968 return false;
4969 }
4970
4971 // Now check that the other values in select, i.e., %to_fold and
4972 // %identicalPhi, are essentially the same value.
4973 if (!SIOtherVal || IdenticalSIOtherVal != &IdenticalPN)
4974 return false;
4975 if (!(SIOtherVal->getTrueValue() == &IdenticalPN &&
4976 SIOtherVal->getFalseValue() == &PN) &&
4977 !(SIOtherVal->getTrueValue() == &PN &&
4978 SIOtherVal->getFalseValue() == &IdenticalPN))
4979 return false;
4980 return true;
4981}
4982
4983/// Given operands for a SelectInst, see if we can fold the result.
4984/// If not, this returns null.
4985static Value *simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4986 const SimplifyQuery &Q, unsigned MaxRecurse) {
4987 if (auto *CondC = dyn_cast<Constant>(Val: Cond)) {
4988 if (auto *TrueC = dyn_cast<Constant>(Val: TrueVal))
4989 if (auto *FalseC = dyn_cast<Constant>(Val: FalseVal))
4990 if (Constant *C = ConstantFoldSelectInstruction(Cond: CondC, V1: TrueC, V2: FalseC))
4991 return C;
4992
4993 // select poison, X, Y -> poison
4994 if (isa<PoisonValue>(Val: CondC))
4995 return PoisonValue::get(T: TrueVal->getType());
4996
4997 // select undef, X, Y -> X or Y
4998 if (Q.isUndefValue(V: CondC))
4999 return isa<Constant>(Val: FalseVal) ? FalseVal : TrueVal;
5000
5001 // select true, X, Y --> X
5002 // select false, X, Y --> Y
5003 // For vectors, allow undef/poison elements in the condition to match the
5004 // defined elements, so we can eliminate the select.
5005 if (match(V: CondC, P: m_One()))
5006 return TrueVal;
5007 if (match(V: CondC, P: m_Zero()))
5008 return FalseVal;
5009 }
5010
5011 assert(Cond->getType()->isIntOrIntVectorTy(1) &&
5012 "Select must have bool or bool vector condition");
5013 assert(TrueVal->getType() == FalseVal->getType() &&
5014 "Select must have same types for true/false ops");
5015
5016 if (Cond->getType() == TrueVal->getType()) {
5017 // select i1 Cond, i1 true, i1 false --> i1 Cond
5018 if (match(V: TrueVal, P: m_One()) && match(V: FalseVal, P: m_ZeroInt()))
5019 return Cond;
5020
5021 // (X && Y) ? X : Y --> Y (commuted 2 ways)
5022 if (match(V: Cond, P: m_c_LogicalAnd(L: m_Specific(V: TrueVal), R: m_Specific(V: FalseVal))))
5023 return FalseVal;
5024
5025 // (X || Y) ? X : Y --> X (commuted 2 ways)
5026 if (match(V: Cond, P: m_c_LogicalOr(L: m_Specific(V: TrueVal), R: m_Specific(V: FalseVal))))
5027 return TrueVal;
5028
5029 // (X || Y) ? false : X --> false (commuted 2 ways)
5030 if (match(V: Cond, P: m_c_LogicalOr(L: m_Specific(V: FalseVal), R: m_Value())) &&
5031 match(V: TrueVal, P: m_ZeroInt()))
5032 return ConstantInt::getFalse(Ty: Cond->getType());
5033
5034 // Match patterns that end in logical-and.
5035 if (match(V: FalseVal, P: m_ZeroInt())) {
5036 // !(X || Y) && X --> false (commuted 2 ways)
5037 if (match(V: Cond, P: m_Not(V: m_c_LogicalOr(L: m_Specific(V: TrueVal), R: m_Value()))))
5038 return ConstantInt::getFalse(Ty: Cond->getType());
5039 // X && !(X || Y) --> false (commuted 2 ways)
5040 if (match(V: TrueVal, P: m_Not(V: m_c_LogicalOr(L: m_Specific(V: Cond), R: m_Value()))))
5041 return ConstantInt::getFalse(Ty: Cond->getType());
5042
5043 // (X || Y) && Y --> Y (commuted 2 ways)
5044 if (match(V: Cond, P: m_c_LogicalOr(L: m_Specific(V: TrueVal), R: m_Value())))
5045 return TrueVal;
5046 // Y && (X || Y) --> Y (commuted 2 ways)
5047 if (match(V: TrueVal, P: m_c_LogicalOr(L: m_Specific(V: Cond), R: m_Value())))
5048 return Cond;
5049
5050 // (X || Y) && (X || !Y) --> X (commuted 8 ways)
5051 Value *X, *Y;
5052 if (match(V: Cond, P: m_c_LogicalOr(L: m_Value(V&: X), R: m_Not(V: m_Value(V&: Y)))) &&
5053 match(V: TrueVal, P: m_c_LogicalOr(L: m_Specific(V: X), R: m_Specific(V: Y))))
5054 return X;
5055 if (match(V: TrueVal, P: m_c_LogicalOr(L: m_Value(V&: X), R: m_Not(V: m_Value(V&: Y)))) &&
5056 match(V: Cond, P: m_c_LogicalOr(L: m_Specific(V: X), R: m_Specific(V: Y))))
5057 return X;
5058 }
5059
5060 // Match patterns that end in logical-or.
5061 if (match(V: TrueVal, P: m_One())) {
5062 // !(X && Y) || X --> true (commuted 2 ways)
5063 if (match(V: Cond, P: m_Not(V: m_c_LogicalAnd(L: m_Specific(V: FalseVal), R: m_Value()))))
5064 return ConstantInt::getTrue(Ty: Cond->getType());
5065 // X || !(X && Y) --> true (commuted 2 ways)
5066 if (match(V: FalseVal, P: m_Not(V: m_c_LogicalAnd(L: m_Specific(V: Cond), R: m_Value()))))
5067 return ConstantInt::getTrue(Ty: Cond->getType());
5068
5069 // (X && Y) || Y --> Y (commuted 2 ways)
5070 if (match(V: Cond, P: m_c_LogicalAnd(L: m_Specific(V: FalseVal), R: m_Value())))
5071 return FalseVal;
5072 // Y || (X && Y) --> Y (commuted 2 ways)
5073 if (match(V: FalseVal, P: m_c_LogicalAnd(L: m_Specific(V: Cond), R: m_Value())))
5074 return Cond;
5075 }
5076 }
5077
5078 // select ?, X, X -> X
5079 if (TrueVal == FalseVal)
5080 return TrueVal;
5081
5082 if (Cond == TrueVal) {
5083 // select i1 X, i1 X, i1 false --> X (logical-and)
5084 if (match(V: FalseVal, P: m_ZeroInt()))
5085 return Cond;
5086 // select i1 X, i1 X, i1 true --> true
5087 if (match(V: FalseVal, P: m_One()))
5088 return ConstantInt::getTrue(Ty: Cond->getType());
5089 }
5090 if (Cond == FalseVal) {
5091 // select i1 X, i1 true, i1 X --> X (logical-or)
5092 if (match(V: TrueVal, P: m_One()))
5093 return Cond;
5094 // select i1 X, i1 false, i1 X --> false
5095 if (match(V: TrueVal, P: m_ZeroInt()))
5096 return ConstantInt::getFalse(Ty: Cond->getType());
5097 }
5098
5099 // If the true or false value is poison, we can fold to the other value.
5100 // If the true or false value is undef, we can fold to the other value as
5101 // long as the other value isn't poison.
5102 // select ?, poison, X -> X
5103 // select ?, undef, X -> X
5104 if (isa<PoisonValue>(Val: TrueVal) ||
5105 (Q.isUndefValue(V: TrueVal) && impliesPoison(ValAssumedPoison: FalseVal, V: Cond)))
5106 return FalseVal;
5107 // select ?, X, poison -> X
5108 // select ?, X, undef -> X
5109 if (isa<PoisonValue>(Val: FalseVal) ||
5110 (Q.isUndefValue(V: FalseVal) && impliesPoison(ValAssumedPoison: TrueVal, V: Cond)))
5111 return TrueVal;
5112
5113 // Deal with partial undef vector constants: select ?, VecC, VecC' --> VecC''
5114 Constant *TrueC, *FalseC;
5115 if (isa<FixedVectorType>(Val: TrueVal->getType()) &&
5116 match(V: TrueVal, P: m_Constant(C&: TrueC)) &&
5117 match(V: FalseVal, P: m_Constant(C&: FalseC))) {
5118 unsigned NumElts =
5119 cast<FixedVectorType>(Val: TrueC->getType())->getNumElements();
5120 SmallVector<Constant *, 16> NewC;
5121 for (unsigned i = 0; i != NumElts; ++i) {
5122 // Bail out on incomplete vector constants.
5123 Constant *TEltC = TrueC->getAggregateElement(Elt: i);
5124 Constant *FEltC = FalseC->getAggregateElement(Elt: i);
5125 if (!TEltC || !FEltC)
5126 break;
5127
5128 // If the elements match (undef or not), that value is the result. If only
5129 // one element is undef, choose the defined element as the safe result.
5130 if (TEltC == FEltC)
5131 NewC.push_back(Elt: TEltC);
5132 else if (isa<PoisonValue>(Val: TEltC) ||
5133 (Q.isUndefValue(V: TEltC) && isGuaranteedNotToBePoison(V: FEltC)))
5134 NewC.push_back(Elt: FEltC);
5135 else if (isa<PoisonValue>(Val: FEltC) ||
5136 (Q.isUndefValue(V: FEltC) && isGuaranteedNotToBePoison(V: TEltC)))
5137 NewC.push_back(Elt: TEltC);
5138 else
5139 break;
5140 }
5141 if (NewC.size() == NumElts)
5142 return ConstantVector::get(V: NewC);
5143 }
5144
5145 if (Value *V =
5146 simplifySelectWithICmpCond(CondVal: Cond, TrueVal, FalseVal, Q, MaxRecurse))
5147 return V;
5148
5149 if (Value *V = simplifySelectWithBitTest(CondVal: Cond, TrueVal, FalseVal))
5150 return V;
5151
5152 if (Value *V = simplifySelectWithFCmp(Cond, T: TrueVal, F: FalseVal, Q, MaxRecurse))
5153 return V;
5154
5155 std::optional<bool> Imp = isImpliedByDomCondition(Cond, ContextI: Q.CxtI, DL: Q.DL);
5156 if (Imp)
5157 return *Imp ? TrueVal : FalseVal;
5158 // Look for same PHIs in the true and false values.
5159 if (auto *TruePHI = dyn_cast<PHINode>(Val: TrueVal))
5160 if (auto *FalsePHI = dyn_cast<PHINode>(Val: FalseVal)) {
5161 if (isSelectWithIdenticalPHI(PN&: *TruePHI, IdenticalPN&: *FalsePHI))
5162 return FalseVal;
5163 if (isSelectWithIdenticalPHI(PN&: *FalsePHI, IdenticalPN&: *TruePHI))
5164 return TrueVal;
5165 }
5166 return nullptr;
5167}
5168
5169Value *llvm::simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
5170 const SimplifyQuery &Q) {
5171 return ::simplifySelectInst(Cond, TrueVal, FalseVal, Q, MaxRecurse: RecursionLimit);
5172}
5173
5174/// Given operands for an GetElementPtrInst, see if we can fold the result.
5175/// If not, this returns null.
5176static Value *simplifyGEPInst(Type *SrcTy, Value *Ptr,
5177 ArrayRef<Value *> Indices, GEPNoWrapFlags NW,
5178 const SimplifyQuery &Q, unsigned) {
5179 // The type of the GEP pointer operand.
5180 unsigned AS =
5181 cast<PointerType>(Val: Ptr->getType()->getScalarType())->getAddressSpace();
5182
5183 // getelementptr P -> P.
5184 if (Indices.empty())
5185 return Ptr;
5186
5187 // Compute the (pointer) type returned by the GEP instruction.
5188 Type *LastType = GetElementPtrInst::getIndexedType(Ty: SrcTy, IdxList: Indices);
5189 Type *GEPTy = Ptr->getType();
5190 if (!GEPTy->isVectorTy()) {
5191 for (Value *Op : Indices) {
5192 // If one of the operands is a vector, the result type is a vector of
5193 // pointers. All vector operands must have the same number of elements.
5194 if (VectorType *VT = dyn_cast<VectorType>(Val: Op->getType())) {
5195 GEPTy = VectorType::get(ElementType: GEPTy, EC: VT->getElementCount());
5196 break;
5197 }
5198 }
5199 }
5200
5201 // All-zero GEP is a no-op, unless it performs a vector splat.
5202 if (Ptr->getType() == GEPTy && all_of(Range&: Indices, P: match_fn(P: m_Zero())))
5203 return Ptr;
5204
5205 // getelementptr poison, idx -> poison
5206 // getelementptr baseptr, poison -> poison
5207 if (isa<PoisonValue>(Val: Ptr) || any_of(Range&: Indices, P: IsaPred<PoisonValue>))
5208 return PoisonValue::get(T: GEPTy);
5209
5210 // getelementptr undef, idx -> undef
5211 if (Q.isUndefValue(V: Ptr))
5212 return UndefValue::get(T: GEPTy);
5213
5214 bool IsScalableVec =
5215 SrcTy->isScalableTy() || any_of(Range&: Indices, P: [](const Value *V) {
5216 return isa<ScalableVectorType>(Val: V->getType());
5217 });
5218
5219 if (Indices.size() == 1) {
5220 Type *Ty = SrcTy;
5221 if (!IsScalableVec && Ty->isSized()) {
5222 Value *P;
5223 uint64_t C;
5224 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
5225 // getelementptr P, N -> P if P points to a type of zero size.
5226 if (TyAllocSize == 0 && Ptr->getType() == GEPTy)
5227 return Ptr;
5228
5229 // The following transforms are only safe if the ptrtoint cast
5230 // doesn't truncate the address of the pointers. The non-address bits
5231 // must be the same, as the underlying objects are the same.
5232 if (Indices[0]->getType()->getScalarSizeInBits() >=
5233 Q.DL.getAddressSizeInBits(AS)) {
5234 auto CanSimplify = [GEPTy, &P, Ptr]() -> bool {
5235 return P->getType() == GEPTy &&
5236 getUnderlyingObject(V: P) == getUnderlyingObject(V: Ptr);
5237 };
5238 // getelementptr V, (sub P, V) -> P if P points to a type of size 1.
5239 if (TyAllocSize == 1 &&
5240 match(V: Indices[0], P: m_Sub(L: m_PtrToIntOrAddr(Op: m_Value(V&: P)),
5241 R: m_PtrToIntOrAddr(Op: m_Specific(V: Ptr)))) &&
5242 CanSimplify())
5243 return P;
5244
5245 // getelementptr V, (ashr (sub P, V), C) -> P if P points to a type of
5246 // size 1 << C.
5247 if (match(V: Indices[0], P: m_AShr(L: m_Sub(L: m_PtrToIntOrAddr(Op: m_Value(V&: P)),
5248 R: m_PtrToIntOrAddr(Op: m_Specific(V: Ptr))),
5249 R: m_ConstantInt(V&: C))) &&
5250 TyAllocSize == 1ULL << C && CanSimplify())
5251 return P;
5252
5253 // getelementptr V, (sdiv (sub P, V), C) -> P if P points to a type of
5254 // size C.
5255 if (match(V: Indices[0], P: m_SDiv(L: m_Sub(L: m_PtrToIntOrAddr(Op: m_Value(V&: P)),
5256 R: m_PtrToIntOrAddr(Op: m_Specific(V: Ptr))),
5257 R: m_SpecificInt(V: TyAllocSize))) &&
5258 CanSimplify())
5259 return P;
5260 }
5261 }
5262 }
5263
5264 if (!IsScalableVec && Q.DL.getTypeAllocSize(Ty: LastType) == 1 &&
5265 all_of(Range: Indices.drop_back(N: 1), P: match_fn(P: m_Zero()))) {
5266 unsigned IdxWidth =
5267 Q.DL.getIndexSizeInBits(AS: Ptr->getType()->getPointerAddressSpace());
5268 if (Q.DL.getTypeSizeInBits(Ty: Indices.back()->getType()) == IdxWidth) {
5269 APInt BasePtrOffset(IdxWidth, 0);
5270 Value *StrippedBasePtr =
5271 Ptr->stripAndAccumulateInBoundsConstantOffsets(DL: Q.DL, Offset&: BasePtrOffset);
5272
5273 // Avoid creating inttoptr of zero here: While LLVMs treatment of
5274 // inttoptr is generally conservative, this particular case is folded to
5275 // a null pointer, which will have incorrect provenance.
5276
5277 // gep (gep V, C), (sub 0, V) -> C
5278 if (match(V: Indices.back(),
5279 P: m_Neg(V: m_PtrToInt(Op: m_Specific(V: StrippedBasePtr)))) &&
5280 !BasePtrOffset.isZero()) {
5281 auto *CI = ConstantInt::get(Context&: GEPTy->getContext(), V: BasePtrOffset);
5282 return ConstantExpr::getIntToPtr(C: CI, Ty: GEPTy);
5283 }
5284 // gep (gep V, C), (xor V, -1) -> C-1
5285 if (match(V: Indices.back(),
5286 P: m_Xor(L: m_PtrToInt(Op: m_Specific(V: StrippedBasePtr)), R: m_AllOnes())) &&
5287 !BasePtrOffset.isOne()) {
5288 auto *CI = ConstantInt::get(Context&: GEPTy->getContext(), V: BasePtrOffset - 1);
5289 return ConstantExpr::getIntToPtr(C: CI, Ty: GEPTy);
5290 }
5291 }
5292 }
5293
5294 // Check to see if this is constant foldable.
5295 if (!isa<Constant>(Val: Ptr) || !all_of(Range&: Indices, P: IsaPred<Constant>))
5296 return nullptr;
5297
5298 if (!ConstantExpr::isSupportedGetElementPtr(SrcElemTy: SrcTy))
5299 return ConstantFoldGetElementPtr(Ty: SrcTy, C: cast<Constant>(Val: Ptr), InRange: std::nullopt,
5300 Idxs: Indices);
5301
5302 auto *CE =
5303 ConstantExpr::getGetElementPtr(Ty: SrcTy, C: cast<Constant>(Val: Ptr), IdxList: Indices, NW);
5304 return ConstantFoldConstant(C: CE, DL: Q.DL);
5305}
5306
5307Value *llvm::simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef<Value *> Indices,
5308 GEPNoWrapFlags NW, const SimplifyQuery &Q) {
5309 return ::simplifyGEPInst(SrcTy, Ptr, Indices, NW, Q, RecursionLimit);
5310}
5311
5312/// Given operands for an InsertValueInst, see if we can fold the result.
5313/// If not, this returns null.
5314static Value *simplifyInsertValueInst(Value *Agg, Value *Val,
5315 ArrayRef<unsigned> Idxs,
5316 const SimplifyQuery &Q, unsigned) {
5317 if (Constant *CAgg = dyn_cast<Constant>(Val: Agg))
5318 if (Constant *CVal = dyn_cast<Constant>(Val))
5319 return ConstantFoldInsertValueInstruction(Agg: CAgg, Val: CVal, Idxs);
5320
5321 // insertvalue x, poison, n -> x
5322 // insertvalue x, undef, n -> x if x cannot be poison
5323 if (isa<PoisonValue>(Val) ||
5324 (Q.isUndefValue(V: Val) && isGuaranteedNotToBePoison(V: Agg)))
5325 return Agg;
5326
5327 // insertvalue x, (extractvalue y, n), n
5328 if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val))
5329 if (EV->getAggregateOperand()->getType() == Agg->getType() &&
5330 EV->getIndices() == Idxs) {
5331 // insertvalue poison, (extractvalue y, n), n -> y
5332 // insertvalue undef, (extractvalue y, n), n -> y if y cannot be poison
5333 if (isa<PoisonValue>(Val: Agg) ||
5334 (Q.isUndefValue(V: Agg) &&
5335 isGuaranteedNotToBePoison(V: EV->getAggregateOperand())))
5336 return EV->getAggregateOperand();
5337
5338 // insertvalue y, (extractvalue y, n), n -> y
5339 if (Agg == EV->getAggregateOperand())
5340 return Agg;
5341 }
5342
5343 return nullptr;
5344}
5345
5346Value *llvm::simplifyInsertValueInst(Value *Agg, Value *Val,
5347 ArrayRef<unsigned> Idxs,
5348 const SimplifyQuery &Q) {
5349 return ::simplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit);
5350}
5351
5352Value *llvm::simplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx,
5353 const SimplifyQuery &Q) {
5354 // Try to constant fold.
5355 auto *VecC = dyn_cast<Constant>(Val: Vec);
5356 auto *ValC = dyn_cast<Constant>(Val);
5357 auto *IdxC = dyn_cast<Constant>(Val: Idx);
5358 if (VecC && ValC && IdxC)
5359 return ConstantExpr::getInsertElement(Vec: VecC, Elt: ValC, Idx: IdxC);
5360
5361 // For fixed-length vector, fold into poison if index is out of bounds.
5362 if (auto *CI = dyn_cast<ConstantInt>(Val: Idx)) {
5363 if (isa<FixedVectorType>(Val: Vec->getType()) &&
5364 CI->uge(Num: cast<FixedVectorType>(Val: Vec->getType())->getNumElements()))
5365 return PoisonValue::get(T: Vec->getType());
5366 }
5367
5368 // If index is undef, it might be out of bounds (see above case)
5369 if (Q.isUndefValue(V: Idx))
5370 return PoisonValue::get(T: Vec->getType());
5371
5372 // If the scalar is poison, or it is undef and there is no risk of
5373 // propagating poison from the vector value, simplify to the vector value.
5374 if (isa<PoisonValue>(Val) ||
5375 (Q.isUndefValue(V: Val) && isGuaranteedNotToBePoison(V: Vec)))
5376 return Vec;
5377
5378 // Inserting the splatted value into a constant splat does nothing.
5379 if (VecC && ValC && VecC->getSplatValue() == ValC)
5380 return Vec;
5381
5382 // If we are extracting a value from a vector, then inserting it into the same
5383 // place, that's the input vector:
5384 // insertelt Vec, (extractelt Vec, Idx), Idx --> Vec
5385 if (match(V: Val, P: m_ExtractElt(Val: m_Specific(V: Vec), Idx: m_Specific(V: Idx))))
5386 return Vec;
5387
5388 return nullptr;
5389}
5390
5391/// Given operands for an ExtractValueInst, see if we can fold the result.
5392/// If not, this returns null.
5393static Value *simplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
5394 const SimplifyQuery &, unsigned) {
5395 if (auto *CAgg = dyn_cast<Constant>(Val: Agg))
5396 return ConstantFoldExtractValueInstruction(Agg: CAgg, Idxs);
5397
5398 // extractvalue x, (insertvalue y, elt, n), n -> elt
5399 unsigned NumIdxs = Idxs.size();
5400 for (auto *IVI = dyn_cast<InsertValueInst>(Val: Agg); IVI != nullptr;
5401 IVI = dyn_cast<InsertValueInst>(Val: IVI->getAggregateOperand())) {
5402 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
5403 unsigned NumInsertValueIdxs = InsertValueIdxs.size();
5404 unsigned NumCommonIdxs = std::min(a: NumInsertValueIdxs, b: NumIdxs);
5405 if (InsertValueIdxs.slice(N: 0, M: NumCommonIdxs) ==
5406 Idxs.slice(N: 0, M: NumCommonIdxs)) {
5407 if (NumIdxs == NumInsertValueIdxs)
5408 return IVI->getInsertedValueOperand();
5409 break;
5410 }
5411 }
5412
5413 // Simplify umul_with_overflow where one operand is 1.
5414 Value *V;
5415 if (Idxs.size() == 1 &&
5416 (match(V: Agg,
5417 P: m_Intrinsic<Intrinsic::umul_with_overflow>(Op0: m_Value(V), Op1: m_One())) ||
5418 match(V: Agg, P: m_Intrinsic<Intrinsic::umul_with_overflow>(Op0: m_One(),
5419 Op1: m_Value(V))))) {
5420 if (Idxs[0] == 0)
5421 return V;
5422 assert(Idxs[0] == 1 && "invalid index");
5423 return getFalse(Ty: CmpInst::makeCmpResultType(opnd_type: V->getType()));
5424 }
5425
5426 return nullptr;
5427}
5428
5429Value *llvm::simplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
5430 const SimplifyQuery &Q) {
5431 return ::simplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit);
5432}
5433
5434/// Given operands for an ExtractElementInst, see if we can fold the result.
5435/// If not, this returns null.
5436static Value *simplifyExtractElementInst(Value *Vec, Value *Idx,
5437 const SimplifyQuery &Q, unsigned) {
5438 auto *VecVTy = cast<VectorType>(Val: Vec->getType());
5439 if (auto *CVec = dyn_cast<Constant>(Val: Vec)) {
5440 if (auto *CIdx = dyn_cast<Constant>(Val: Idx))
5441 return ConstantExpr::getExtractElement(Vec: CVec, Idx: CIdx);
5442
5443 if (Q.isUndefValue(V: Vec))
5444 return UndefValue::get(T: VecVTy->getElementType());
5445 }
5446
5447 // An undef extract index can be arbitrarily chosen to be an out-of-range
5448 // index value, which would result in the instruction being poison.
5449 if (Q.isUndefValue(V: Idx))
5450 return PoisonValue::get(T: VecVTy->getElementType());
5451
5452 // If extracting a specified index from the vector, see if we can recursively
5453 // find a previously computed scalar that was inserted into the vector.
5454 if (auto *IdxC = dyn_cast<ConstantInt>(Val: Idx)) {
5455 // For fixed-length vector, fold into undef if index is out of bounds.
5456 unsigned MinNumElts = VecVTy->getElementCount().getKnownMinValue();
5457 if (isa<FixedVectorType>(Val: VecVTy) && IdxC->getValue().uge(RHS: MinNumElts))
5458 return PoisonValue::get(T: VecVTy->getElementType());
5459 // Handle case where an element is extracted from a splat.
5460 if (IdxC->getValue().ult(RHS: MinNumElts))
5461 if (auto *Splat = getSplatValue(V: Vec))
5462 return Splat;
5463 if (Value *Elt = findScalarElement(V: Vec, EltNo: IdxC->getZExtValue()))
5464 return Elt;
5465 } else {
5466 // extractelt x, (insertelt y, elt, n), n -> elt
5467 // If the possibly-variable indices are trivially known to be equal
5468 // (because they are the same operand) then use the value that was
5469 // inserted directly.
5470 auto *IE = dyn_cast<InsertElementInst>(Val: Vec);
5471 if (IE && IE->getOperand(i_nocapture: 2) == Idx)
5472 return IE->getOperand(i_nocapture: 1);
5473
5474 // The index is not relevant if our vector is a splat.
5475 if (Value *Splat = getSplatValue(V: Vec))
5476 return Splat;
5477 }
5478 return nullptr;
5479}
5480
5481Value *llvm::simplifyExtractElementInst(Value *Vec, Value *Idx,
5482 const SimplifyQuery &Q) {
5483 return ::simplifyExtractElementInst(Vec, Idx, Q, RecursionLimit);
5484}
5485
5486/// See if we can fold the given phi. If not, returns null.
5487static Value *simplifyPHINode(PHINode *PN, ArrayRef<Value *> IncomingValues,
5488 const SimplifyQuery &Q) {
5489 // WARNING: no matter how worthwhile it may seem, we can not perform PHI CSE
5490 // here, because the PHI we may succeed simplifying to was not
5491 // def-reachable from the original PHI!
5492
5493 // If all of the PHI's incoming values are the same then replace the PHI node
5494 // with the common value.
5495 Value *CommonValue = nullptr;
5496 bool HasPoisonInput = false;
5497 bool HasUndefInput = false;
5498 for (Value *Incoming : IncomingValues) {
5499 // If the incoming value is the phi node itself, it can safely be skipped.
5500 if (Incoming == PN)
5501 continue;
5502 if (isa<PoisonValue>(Val: Incoming)) {
5503 HasPoisonInput = true;
5504 continue;
5505 }
5506 if (Q.isUndefValue(V: Incoming)) {
5507 // Remember that we saw an undef value, but otherwise ignore them.
5508 HasUndefInput = true;
5509 continue;
5510 }
5511 if (CommonValue && Incoming != CommonValue)
5512 return nullptr; // Not the same, bail out.
5513 CommonValue = Incoming;
5514 }
5515
5516 // If CommonValue is null then all of the incoming values were either undef,
5517 // poison or equal to the phi node itself.
5518 if (!CommonValue)
5519 return HasUndefInput ? UndefValue::get(T: PN->getType())
5520 : PoisonValue::get(T: PN->getType());
5521
5522 if (HasPoisonInput || HasUndefInput) {
5523 // If we have a PHI node like phi(X, undef, X), where X is defined by some
5524 // instruction, we cannot return X as the result of the PHI node unless it
5525 // dominates the PHI block.
5526 if (!valueDominatesPHI(V: CommonValue, P: PN, DT: Q.DT))
5527 return nullptr;
5528
5529 // Make sure we do not replace an undef value with poison.
5530 if (HasUndefInput &&
5531 !isGuaranteedNotToBePoison(V: CommonValue, AC: Q.AC, CtxI: Q.CxtI, DT: Q.DT))
5532 return nullptr;
5533 return CommonValue;
5534 }
5535
5536 return CommonValue;
5537}
5538
5539static Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
5540 const SimplifyQuery &Q, unsigned MaxRecurse) {
5541 if (auto *C = dyn_cast<Constant>(Val: Op))
5542 return ConstantFoldCastOperand(Opcode: CastOpc, C, DestTy: Ty, DL: Q.DL);
5543
5544 if (auto *CI = dyn_cast<CastInst>(Val: Op)) {
5545 auto *Src = CI->getOperand(i_nocapture: 0);
5546 Type *SrcTy = Src->getType();
5547 Type *MidTy = CI->getType();
5548 Type *DstTy = Ty;
5549 if (Src->getType() == Ty) {
5550 auto FirstOp = CI->getOpcode();
5551 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc);
5552 if (CastInst::isEliminableCastPair(firstOpcode: FirstOp, secondOpcode: SecondOp, SrcTy, MidTy, DstTy,
5553 DL: &Q.DL) == Instruction::BitCast)
5554 return Src;
5555 }
5556 }
5557
5558 // bitcast x -> x
5559 if (CastOpc == Instruction::BitCast)
5560 if (Op->getType() == Ty)
5561 return Op;
5562
5563 // ptrtoint (ptradd (Ptr, X - ptrtoint(Ptr))) -> X
5564 Value *Ptr, *X;
5565 if ((CastOpc == Instruction::PtrToInt || CastOpc == Instruction::PtrToAddr) &&
5566 match(V: Op,
5567 P: m_PtrAdd(PointerOp: m_Value(V&: Ptr),
5568 OffsetOp: m_Sub(L: m_Value(V&: X), R: m_PtrToIntOrAddr(Op: m_Deferred(V: Ptr))))) &&
5569 X->getType() == Ty && Ty == Q.DL.getIndexType(PtrTy: Ptr->getType()))
5570 return X;
5571
5572 return nullptr;
5573}
5574
5575Value *llvm::simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
5576 const SimplifyQuery &Q) {
5577 return ::simplifyCastInst(CastOpc, Op, Ty, Q, MaxRecurse: RecursionLimit);
5578}
5579
5580/// For the given destination element of a shuffle, peek through shuffles to
5581/// match a root vector source operand that contains that element in the same
5582/// vector lane (ie, the same mask index), so we can eliminate the shuffle(s).
5583static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
5584 int MaskVal, Value *RootVec,
5585 unsigned MaxRecurse) {
5586 if (!MaxRecurse--)
5587 return nullptr;
5588
5589 // Bail out if any mask value is undefined. That kind of shuffle may be
5590 // simplified further based on demanded bits or other folds.
5591 if (MaskVal == -1)
5592 return nullptr;
5593
5594 // The mask value chooses which source operand we need to look at next.
5595 int InVecNumElts = cast<FixedVectorType>(Val: Op0->getType())->getNumElements();
5596 int RootElt = MaskVal;
5597 Value *SourceOp = Op0;
5598 if (MaskVal >= InVecNumElts) {
5599 RootElt = MaskVal - InVecNumElts;
5600 SourceOp = Op1;
5601 }
5602
5603 // If the source operand is a shuffle itself, look through it to find the
5604 // matching root vector.
5605 if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(Val: SourceOp)) {
5606 return foldIdentityShuffles(
5607 DestElt, Op0: SourceShuf->getOperand(i_nocapture: 0), Op1: SourceShuf->getOperand(i_nocapture: 1),
5608 MaskVal: SourceShuf->getMaskValue(Elt: RootElt), RootVec, MaxRecurse);
5609 }
5610
5611 // The source operand is not a shuffle. Initialize the root vector value for
5612 // this shuffle if that has not been done yet.
5613 if (!RootVec)
5614 RootVec = SourceOp;
5615
5616 // Give up as soon as a source operand does not match the existing root value.
5617 if (RootVec != SourceOp)
5618 return nullptr;
5619
5620 // The element must be coming from the same lane in the source vector
5621 // (although it may have crossed lanes in intermediate shuffles).
5622 if (RootElt != DestElt)
5623 return nullptr;
5624
5625 return RootVec;
5626}
5627
5628static Value *simplifyShuffleVectorInst(Value *Op0, Value *Op1,
5629 ArrayRef<int> Mask, Type *RetTy,
5630 const SimplifyQuery &Q,
5631 unsigned MaxRecurse) {
5632 if (all_of(Range&: Mask, P: equal_to(Arg: PoisonMaskElem)))
5633 return PoisonValue::get(T: RetTy);
5634
5635 auto *InVecTy = cast<VectorType>(Val: Op0->getType());
5636 unsigned MaskNumElts = Mask.size();
5637 ElementCount InVecEltCount = InVecTy->getElementCount();
5638
5639 bool Scalable = InVecEltCount.isScalable();
5640
5641 SmallVector<int, 32> Indices;
5642 Indices.assign(in_start: Mask.begin(), in_end: Mask.end());
5643
5644 // Canonicalization: If mask does not select elements from an input vector,
5645 // replace that input vector with poison.
5646 if (!Scalable) {
5647 bool MaskSelects0 = false, MaskSelects1 = false;
5648 unsigned InVecNumElts = InVecEltCount.getKnownMinValue();
5649 for (unsigned i = 0; i != MaskNumElts; ++i) {
5650 if (Indices[i] == -1)
5651 continue;
5652 if ((unsigned)Indices[i] < InVecNumElts)
5653 MaskSelects0 = true;
5654 else
5655 MaskSelects1 = true;
5656 }
5657 if (!MaskSelects0)
5658 Op0 = PoisonValue::get(T: InVecTy);
5659 if (!MaskSelects1)
5660 Op1 = PoisonValue::get(T: InVecTy);
5661 }
5662
5663 auto *Op0Const = dyn_cast<Constant>(Val: Op0);
5664 auto *Op1Const = dyn_cast<Constant>(Val: Op1);
5665
5666 // If all operands are constant, constant fold the shuffle. This
5667 // transformation depends on the value of the mask which is not known at
5668 // compile time for scalable vectors
5669 if (Op0Const && Op1Const)
5670 return ConstantExpr::getShuffleVector(V1: Op0Const, V2: Op1Const, Mask);
5671
5672 // Canonicalization: if only one input vector is constant, it shall be the
5673 // second one. This transformation depends on the value of the mask which
5674 // is not known at compile time for scalable vectors
5675 if (!Scalable && Op0Const && !Op1Const) {
5676 std::swap(a&: Op0, b&: Op1);
5677 ShuffleVectorInst::commuteShuffleMask(Mask: Indices,
5678 InVecNumElts: InVecEltCount.getKnownMinValue());
5679 }
5680
5681 // A splat of an inserted scalar constant becomes a vector constant:
5682 // shuf (inselt ?, C, IndexC), undef, <IndexC, IndexC...> --> <C, C...>
5683 // NOTE: We may have commuted above, so analyze the updated Indices, not the
5684 // original mask constant.
5685 // NOTE: This transformation depends on the value of the mask which is not
5686 // known at compile time for scalable vectors
5687 Constant *C;
5688 ConstantInt *IndexC;
5689 if (!Scalable && match(V: Op0, P: m_InsertElt(Val: m_Value(), Elt: m_Constant(C),
5690 Idx: m_ConstantInt(CI&: IndexC)))) {
5691 // Match a splat shuffle mask of the insert index allowing undef elements.
5692 int InsertIndex = IndexC->getZExtValue();
5693 if (all_of(Range&: Indices, P: [InsertIndex](int MaskElt) {
5694 return MaskElt == InsertIndex || MaskElt == -1;
5695 })) {
5696 assert(isa<UndefValue>(Op1) && "Expected undef operand 1 for splat");
5697
5698 // Shuffle mask poisons become poison constant result elements.
5699 SmallVector<Constant *, 16> VecC(MaskNumElts, C);
5700 for (unsigned i = 0; i != MaskNumElts; ++i)
5701 if (Indices[i] == -1)
5702 VecC[i] = PoisonValue::get(T: C->getType());
5703 return ConstantVector::get(V: VecC);
5704 }
5705 }
5706
5707 // A shuffle of a splat is always the splat itself. Legal if the shuffle's
5708 // value type is same as the input vectors' type.
5709 if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Val: Op0))
5710 if (Q.isUndefValue(V: Op1) && RetTy == InVecTy &&
5711 all_equal(Range: OpShuf->getShuffleMask()))
5712 return Op0;
5713
5714 // All remaining transformation depend on the value of the mask, which is
5715 // not known at compile time for scalable vectors.
5716 if (Scalable)
5717 return nullptr;
5718
5719 // Don't fold a shuffle with undef mask elements. This may get folded in a
5720 // better way using demanded bits or other analysis.
5721 // TODO: Should we allow this?
5722 if (is_contained(Range&: Indices, Element: -1))
5723 return nullptr;
5724
5725 // Check if every element of this shuffle can be mapped back to the
5726 // corresponding element of a single root vector. If so, we don't need this
5727 // shuffle. This handles simple identity shuffles as well as chains of
5728 // shuffles that may widen/narrow and/or move elements across lanes and back.
5729 Value *RootVec = nullptr;
5730 for (unsigned i = 0; i != MaskNumElts; ++i) {
5731 // Note that recursion is limited for each vector element, so if any element
5732 // exceeds the limit, this will fail to simplify.
5733 RootVec =
5734 foldIdentityShuffles(DestElt: i, Op0, Op1, MaskVal: Indices[i], RootVec, MaxRecurse);
5735
5736 // We can't replace a widening/narrowing shuffle with one of its operands.
5737 if (!RootVec || RootVec->getType() != RetTy)
5738 return nullptr;
5739 }
5740 return RootVec;
5741}
5742
5743/// Given operands for a ShuffleVectorInst, fold the result or return null.
5744Value *llvm::simplifyShuffleVectorInst(Value *Op0, Value *Op1,
5745 ArrayRef<int> Mask, Type *RetTy,
5746 const SimplifyQuery &Q) {
5747 return ::simplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, MaxRecurse: RecursionLimit);
5748}
5749
5750static Constant *foldConstant(Instruction::UnaryOps Opcode, Value *&Op,
5751 const SimplifyQuery &Q) {
5752 if (auto *C = dyn_cast<Constant>(Val: Op))
5753 return ConstantFoldUnaryOpOperand(Opcode, Op: C, DL: Q.DL);
5754 return nullptr;
5755}
5756
5757/// Given the operand for an FNeg, see if we can fold the result. If not, this
5758/// returns null.
5759static Value *simplifyFNegInst(Value *Op, FastMathFlags FMF,
5760 const SimplifyQuery &Q, unsigned MaxRecurse) {
5761 if (Constant *C = foldConstant(Opcode: Instruction::FNeg, Op, Q))
5762 return C;
5763
5764 Value *X;
5765 // fneg (fneg X) ==> X
5766 if (match(V: Op, P: m_FNeg(X: m_Value(V&: X))))
5767 return X;
5768
5769 return nullptr;
5770}
5771
5772Value *llvm::simplifyFNegInst(Value *Op, FastMathFlags FMF,
5773 const SimplifyQuery &Q) {
5774 return ::simplifyFNegInst(Op, FMF, Q, MaxRecurse: RecursionLimit);
5775}
5776
5777/// Try to propagate existing NaN values when possible. If not, replace the
5778/// constant or elements in the constant with a canonical NaN.
5779static Constant *propagateNaN(Constant *In) {
5780 Type *Ty = In->getType();
5781 if (auto *VecTy = dyn_cast<FixedVectorType>(Val: Ty)) {
5782 unsigned NumElts = VecTy->getNumElements();
5783 SmallVector<Constant *, 32> NewC(NumElts);
5784 for (unsigned i = 0; i != NumElts; ++i) {
5785 Constant *EltC = In->getAggregateElement(Elt: i);
5786 // Poison elements propagate. NaN propagates except signaling is quieted.
5787 // Replace unknown or undef elements with canonical NaN.
5788 if (EltC && isa<PoisonValue>(Val: EltC))
5789 NewC[i] = EltC;
5790 else if (EltC && EltC->isNaN())
5791 NewC[i] = ConstantFP::get(
5792 Ty: EltC->getType(), V: cast<ConstantFP>(Val: EltC)->getValue().makeQuiet());
5793 else
5794 NewC[i] = ConstantFP::getNaN(Ty: VecTy->getElementType());
5795 }
5796 return ConstantVector::get(V: NewC);
5797 }
5798
5799 // If it is not a fixed vector, but not a simple NaN either, return a
5800 // canonical NaN.
5801 if (!In->isNaN())
5802 return ConstantFP::getNaN(Ty);
5803
5804 // If we known this is a NaN, and it's scalable vector, we must have a splat
5805 // on our hands. Grab that before splatting a QNaN constant.
5806 if (isa<ScalableVectorType>(Val: Ty)) {
5807 auto *Splat = In->getSplatValue();
5808 assert(Splat && Splat->isNaN() &&
5809 "Found a scalable-vector NaN but not a splat");
5810 In = Splat;
5811 }
5812
5813 // Propagate an existing QNaN constant. If it is an SNaN, make it quiet, but
5814 // preserve the sign/payload.
5815 return ConstantFP::get(Ty, V: cast<ConstantFP>(Val: In)->getValue().makeQuiet());
5816}
5817
5818/// Perform folds that are common to any floating-point operation. This implies
5819/// transforms based on poison/undef/NaN because the operation itself makes no
5820/// difference to the result.
5821static Constant *simplifyFPOp(ArrayRef<Value *> Ops, FastMathFlags FMF,
5822 const SimplifyQuery &Q,
5823 fp::ExceptionBehavior ExBehavior,
5824 RoundingMode Rounding) {
5825 // Poison is independent of anything else. It always propagates from an
5826 // operand to a math result.
5827 if (any_of(Range&: Ops, P: IsaPred<PoisonValue>))
5828 return PoisonValue::get(T: Ops[0]->getType());
5829
5830 for (Value *V : Ops) {
5831 bool IsNan = match(V, P: m_NaN());
5832 bool IsInf = match(V, P: m_Inf());
5833 bool IsUndef = Q.isUndefValue(V);
5834
5835 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
5836 // (an undef operand can be chosen to be Nan/Inf), then the result of
5837 // this operation is poison.
5838 if (FMF.noNaNs() && (IsNan || IsUndef))
5839 return PoisonValue::get(T: V->getType());
5840 if (FMF.noInfs() && (IsInf || IsUndef))
5841 return PoisonValue::get(T: V->getType());
5842
5843 if (isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding)) {
5844 // Undef does not propagate because undef means that all bits can take on
5845 // any value. If this is undef * NaN for example, then the result values
5846 // (at least the exponent bits) are limited. Assume the undef is a
5847 // canonical NaN and propagate that.
5848 if (IsUndef)
5849 return ConstantFP::getNaN(Ty: V->getType());
5850 if (IsNan)
5851 return propagateNaN(In: cast<Constant>(Val: V));
5852 } else if (ExBehavior != fp::ebStrict) {
5853 if (IsNan)
5854 return propagateNaN(In: cast<Constant>(Val: V));
5855 }
5856 }
5857 return nullptr;
5858}
5859
5860/// Given operands for an FAdd, see if we can fold the result. If not, this
5861/// returns null.
5862static Value *
5863simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5864 const SimplifyQuery &Q, unsigned MaxRecurse,
5865 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5866 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5867 if (isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding))
5868 if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::FAdd, Op0, Op1, Q))
5869 return C;
5870
5871 if (Constant *C = simplifyFPOp(Ops: {Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5872 return C;
5873
5874 // fadd X, -0 ==> X
5875 // With strict/constrained FP, we have these possible edge cases that do
5876 // not simplify to Op0:
5877 // fadd SNaN, -0.0 --> QNaN
5878 // fadd +0.0, -0.0 --> -0.0 (but only with round toward negative)
5879 if (canIgnoreSNaN(EB: ExBehavior, FMF) &&
5880 (!canRoundingModeBe(RM: Rounding, QRM: RoundingMode::TowardNegative) ||
5881 FMF.noSignedZeros()))
5882 if (match(V: Op1, P: m_NegZeroFP()))
5883 return Op0;
5884
5885 // fadd X, 0 ==> X, when we know X is not -0
5886 if (canIgnoreSNaN(EB: ExBehavior, FMF))
5887 if (match(V: Op1, P: m_PosZeroFP()) &&
5888 (FMF.noSignedZeros() || cannotBeNegativeZero(V: Op0, SQ: Q)))
5889 return Op0;
5890
5891 if (!isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding))
5892 return nullptr;
5893
5894 if (FMF.noNaNs()) {
5895 // With nnan: X + {+/-}Inf --> {+/-}Inf
5896 if (match(V: Op1, P: m_Inf()))
5897 return Op1;
5898
5899 // With nnan: -X + X --> 0.0 (and commuted variant)
5900 // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN.
5901 // Negative zeros are allowed because we always end up with positive zero:
5902 // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5903 // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5904 // X = 0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0
5905 // X = 0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0
5906 if (match(V: Op0, P: m_FSub(L: m_AnyZeroFP(), R: m_Specific(V: Op1))) ||
5907 match(V: Op1, P: m_FSub(L: m_AnyZeroFP(), R: m_Specific(V: Op0))))
5908 return ConstantFP::getZero(Ty: Op0->getType());
5909
5910 if (match(V: Op0, P: m_FNeg(X: m_Specific(V: Op1))) ||
5911 match(V: Op1, P: m_FNeg(X: m_Specific(V: Op0))))
5912 return ConstantFP::getZero(Ty: Op0->getType());
5913 }
5914
5915 // (X - Y) + Y --> X
5916 // Y + (X - Y) --> X
5917 Value *X;
5918 if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5919 (match(V: Op0, P: m_FSub(L: m_Value(V&: X), R: m_Specific(V: Op1))) ||
5920 match(V: Op1, P: m_FSub(L: m_Value(V&: X), R: m_Specific(V: Op0)))))
5921 return X;
5922
5923 return nullptr;
5924}
5925
5926/// Given operands for an FSub, see if we can fold the result. If not, this
5927/// returns null.
5928static Value *
5929simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5930 const SimplifyQuery &Q, unsigned MaxRecurse,
5931 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5932 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5933 if (isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding))
5934 if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::FSub, Op0, Op1, Q))
5935 return C;
5936
5937 if (Constant *C = simplifyFPOp(Ops: {Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5938 return C;
5939
5940 // fsub X, +0 ==> X
5941 if (canIgnoreSNaN(EB: ExBehavior, FMF) &&
5942 (!canRoundingModeBe(RM: Rounding, QRM: RoundingMode::TowardNegative) ||
5943 FMF.noSignedZeros()))
5944 if (match(V: Op1, P: m_PosZeroFP()))
5945 return Op0;
5946
5947 // fsub X, -0 ==> X, when we know X is not -0
5948 if (canIgnoreSNaN(EB: ExBehavior, FMF))
5949 if (match(V: Op1, P: m_NegZeroFP()) &&
5950 (FMF.noSignedZeros() || cannotBeNegativeZero(V: Op0, SQ: Q)))
5951 return Op0;
5952
5953 // fsub -0.0, (fsub -0.0, X) ==> X
5954 // fsub -0.0, (fneg X) ==> X
5955 Value *X;
5956 if (canIgnoreSNaN(EB: ExBehavior, FMF))
5957 if (match(V: Op0, P: m_NegZeroFP()) && match(V: Op1, P: m_FNeg(X: m_Value(V&: X))))
5958 return X;
5959
5960 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
5961 // fsub 0.0, (fneg X) ==> X if signed zeros are ignored.
5962 if (canIgnoreSNaN(EB: ExBehavior, FMF))
5963 if (FMF.noSignedZeros() && match(V: Op0, P: m_AnyZeroFP()) &&
5964 (match(V: Op1, P: m_FSub(L: m_AnyZeroFP(), R: m_Value(V&: X))) ||
5965 match(V: Op1, P: m_FNeg(X: m_Value(V&: X)))))
5966 return X;
5967
5968 if (!isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding))
5969 return nullptr;
5970
5971 if (FMF.noNaNs()) {
5972 // fsub nnan x, x ==> 0.0
5973 if (Op0 == Op1)
5974 return Constant::getNullValue(Ty: Op0->getType());
5975
5976 // With nnan: {+/-}Inf - X --> {+/-}Inf
5977 if (match(V: Op0, P: m_Inf()))
5978 return Op0;
5979
5980 // With nnan: X - {+/-}Inf --> {-/+}Inf
5981 if (match(V: Op1, P: m_Inf()))
5982 return foldConstant(Opcode: Instruction::FNeg, Op&: Op1, Q);
5983 }
5984
5985 // Y - (Y - X) --> X
5986 // (X + Y) - Y --> X
5987 if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5988 (match(V: Op1, P: m_FSub(L: m_Specific(V: Op0), R: m_Value(V&: X))) ||
5989 match(V: Op0, P: m_c_FAdd(L: m_Specific(V: Op1), R: m_Value(V&: X)))))
5990 return X;
5991
5992 return nullptr;
5993}
5994
5995static Value *simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
5996 const SimplifyQuery &Q, unsigned MaxRecurse,
5997 fp::ExceptionBehavior ExBehavior,
5998 RoundingMode Rounding) {
5999 if (Constant *C = simplifyFPOp(Ops: {Op0, Op1}, FMF, Q, ExBehavior, Rounding))
6000 return C;
6001
6002 if (!isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding))
6003 return nullptr;
6004
6005 // Canonicalize special constants as operand 1.
6006 if (match(V: Op0, P: m_FPOne()) || match(V: Op0, P: m_AnyZeroFP()))
6007 std::swap(a&: Op0, b&: Op1);
6008
6009 // X * 1.0 --> X
6010 if (match(V: Op1, P: m_FPOne()))
6011 return Op0;
6012
6013 if (match(V: Op1, P: m_AnyZeroFP())) {
6014 // X * 0.0 --> 0.0 (with nnan and nsz)
6015 if (FMF.noNaNs() && FMF.noSignedZeros())
6016 return ConstantFP::getZero(Ty: Op0->getType());
6017
6018 KnownFPClass Known = computeKnownFPClass(V: Op0, FMF, InterestedClasses: fcInf | fcNan, SQ: Q);
6019 if (Known.isKnownNever(Mask: fcInf | fcNan)) {
6020 // if nsz is set, return 0.0
6021 if (FMF.noSignedZeros())
6022 return ConstantFP::getZero(Ty: Op0->getType());
6023 // +normal number * (-)0.0 --> (-)0.0
6024 if (Known.SignBit == false)
6025 return Op1;
6026 // -normal number * (-)0.0 --> -(-)0.0
6027 if (Known.SignBit == true)
6028 return foldConstant(Opcode: Instruction::FNeg, Op&: Op1, Q);
6029 }
6030 }
6031
6032 // sqrt(X) * sqrt(X) --> X, if we can:
6033 // 1. Remove the intermediate rounding (reassociate).
6034 // 2. Ignore non-zero negative numbers because sqrt would produce NAN.
6035 // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0.
6036 Value *X;
6037 if (Op0 == Op1 && match(V: Op0, P: m_Sqrt(Op0: m_Value(V&: X))) && FMF.allowReassoc() &&
6038 FMF.noNaNs() && FMF.noSignedZeros())
6039 return X;
6040
6041 return nullptr;
6042}
6043
6044/// Given the operands for an FMul, see if we can fold the result
6045static Value *
6046simplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
6047 const SimplifyQuery &Q, unsigned MaxRecurse,
6048 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
6049 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
6050 if (isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding))
6051 if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::FMul, Op0, Op1, Q))
6052 return C;
6053
6054 // Now apply simplifications that do not require rounding.
6055 return simplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse, ExBehavior, Rounding);
6056}
6057
6058Value *llvm::simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
6059 const SimplifyQuery &Q,
6060 fp::ExceptionBehavior ExBehavior,
6061 RoundingMode Rounding) {
6062 return ::simplifyFAddInst(Op0, Op1, FMF, Q, MaxRecurse: RecursionLimit, ExBehavior,
6063 Rounding);
6064}
6065
6066Value *llvm::simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
6067 const SimplifyQuery &Q,
6068 fp::ExceptionBehavior ExBehavior,
6069 RoundingMode Rounding) {
6070 return ::simplifyFSubInst(Op0, Op1, FMF, Q, MaxRecurse: RecursionLimit, ExBehavior,
6071 Rounding);
6072}
6073
6074Value *llvm::simplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
6075 const SimplifyQuery &Q,
6076 fp::ExceptionBehavior ExBehavior,
6077 RoundingMode Rounding) {
6078 return ::simplifyFMulInst(Op0, Op1, FMF, Q, MaxRecurse: RecursionLimit, ExBehavior,
6079 Rounding);
6080}
6081
6082Value *llvm::simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
6083 const SimplifyQuery &Q,
6084 fp::ExceptionBehavior ExBehavior,
6085 RoundingMode Rounding) {
6086 return ::simplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse: RecursionLimit, ExBehavior,
6087 Rounding);
6088}
6089
6090static Value *
6091simplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
6092 const SimplifyQuery &Q, unsigned,
6093 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
6094 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
6095 if (isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding))
6096 if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::FDiv, Op0, Op1, Q))
6097 return C;
6098
6099 if (Constant *C = simplifyFPOp(Ops: {Op0, Op1}, FMF, Q, ExBehavior, Rounding))
6100 return C;
6101
6102 if (!isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding))
6103 return nullptr;
6104
6105 // X / 1.0 -> X
6106 if (match(V: Op1, P: m_FPOne()))
6107 return Op0;
6108
6109 // 0 / X -> 0
6110 // Requires that NaNs are off (X could be zero) and signed zeroes are
6111 // ignored (X could be positive or negative, so the output sign is unknown).
6112 if (FMF.noNaNs() && FMF.noSignedZeros() && match(V: Op0, P: m_AnyZeroFP()))
6113 return ConstantFP::getZero(Ty: Op0->getType());
6114
6115 if (FMF.noNaNs()) {
6116 // X / X -> 1.0 is legal when NaNs are ignored.
6117 // We can ignore infinities because INF/INF is NaN.
6118 if (Op0 == Op1)
6119 return ConstantFP::get(Ty: Op0->getType(), V: 1.0);
6120
6121 // (X * Y) / Y --> X if we can reassociate to the above form.
6122 Value *X;
6123 if (FMF.allowReassoc() && match(V: Op0, P: m_c_FMul(L: m_Value(V&: X), R: m_Specific(V: Op1))))
6124 return X;
6125
6126 // -X / X -> -1.0 and
6127 // X / -X -> -1.0 are legal when NaNs are ignored.
6128 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
6129 if (match(V: Op0, P: m_FNegNSZ(X: m_Specific(V: Op1))) ||
6130 match(V: Op1, P: m_FNegNSZ(X: m_Specific(V: Op0))))
6131 return ConstantFP::get(Ty: Op0->getType(), V: -1.0);
6132
6133 // nnan ninf X / [-]0.0 -> poison
6134 if (FMF.noInfs() && match(V: Op1, P: m_AnyZeroFP()))
6135 return PoisonValue::get(T: Op1->getType());
6136 }
6137
6138 return nullptr;
6139}
6140
6141Value *llvm::simplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
6142 const SimplifyQuery &Q,
6143 fp::ExceptionBehavior ExBehavior,
6144 RoundingMode Rounding) {
6145 return ::simplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
6146 Rounding);
6147}
6148
6149static Value *
6150simplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
6151 const SimplifyQuery &Q, unsigned,
6152 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
6153 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
6154 if (isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding))
6155 if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::FRem, Op0, Op1, Q))
6156 return C;
6157
6158 if (Constant *C = simplifyFPOp(Ops: {Op0, Op1}, FMF, Q, ExBehavior, Rounding))
6159 return C;
6160
6161 if (!isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding))
6162 return nullptr;
6163
6164 // Unlike fdiv, the result of frem always matches the sign of the dividend.
6165 // The constant match may include undef elements in a vector, so return a full
6166 // zero constant as the result.
6167 if (FMF.noNaNs()) {
6168 // +0 % X -> 0
6169 if (match(V: Op0, P: m_PosZeroFP()))
6170 return ConstantFP::getZero(Ty: Op0->getType());
6171 // -0 % X -> -0
6172 if (match(V: Op0, P: m_NegZeroFP()))
6173 return ConstantFP::getNegativeZero(Ty: Op0->getType());
6174 }
6175
6176 return nullptr;
6177}
6178
6179Value *llvm::simplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
6180 const SimplifyQuery &Q,
6181 fp::ExceptionBehavior ExBehavior,
6182 RoundingMode Rounding) {
6183 return ::simplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
6184 Rounding);
6185}
6186
6187//=== Helper functions for higher up the class hierarchy.
6188
6189/// Given the operand for a UnaryOperator, see if we can fold the result.
6190/// If not, this returns null.
6191static Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q,
6192 unsigned MaxRecurse) {
6193 switch (Opcode) {
6194 case Instruction::FNeg:
6195 return simplifyFNegInst(Op, FMF: FastMathFlags(), Q, MaxRecurse);
6196 default:
6197 llvm_unreachable("Unexpected opcode");
6198 }
6199}
6200
6201/// Given the operand for a UnaryOperator, see if we can fold the result.
6202/// If not, this returns null.
6203/// Try to use FastMathFlags when folding the result.
6204static Value *simplifyFPUnOp(unsigned Opcode, Value *Op,
6205 const FastMathFlags &FMF, const SimplifyQuery &Q,
6206 unsigned MaxRecurse) {
6207 switch (Opcode) {
6208 case Instruction::FNeg:
6209 return simplifyFNegInst(Op, FMF, Q, MaxRecurse);
6210 default:
6211 return simplifyUnOp(Opcode, Op, Q, MaxRecurse);
6212 }
6213}
6214
6215Value *llvm::simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q) {
6216 return ::simplifyUnOp(Opcode, Op, Q, MaxRecurse: RecursionLimit);
6217}
6218
6219Value *llvm::simplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF,
6220 const SimplifyQuery &Q) {
6221 return ::simplifyFPUnOp(Opcode, Op, FMF, Q, MaxRecurse: RecursionLimit);
6222}
6223
6224/// Given operands for a BinaryOperator, see if we can fold the result.
6225/// If not, this returns null.
6226static Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6227 const SimplifyQuery &Q, unsigned MaxRecurse) {
6228 switch (Opcode) {
6229 case Instruction::Add:
6230 return simplifyAddInst(Op0: LHS, Op1: RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
6231 MaxRecurse);
6232 case Instruction::Sub:
6233 return simplifySubInst(Op0: LHS, Op1: RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
6234 MaxRecurse);
6235 case Instruction::Mul:
6236 return simplifyMulInst(Op0: LHS, Op1: RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
6237 MaxRecurse);
6238 case Instruction::SDiv:
6239 return simplifySDivInst(Op0: LHS, Op1: RHS, /* IsExact */ false, Q, MaxRecurse);
6240 case Instruction::UDiv:
6241 return simplifyUDivInst(Op0: LHS, Op1: RHS, /* IsExact */ false, Q, MaxRecurse);
6242 case Instruction::SRem:
6243 return simplifySRemInst(Op0: LHS, Op1: RHS, Q, MaxRecurse);
6244 case Instruction::URem:
6245 return simplifyURemInst(Op0: LHS, Op1: RHS, Q, MaxRecurse);
6246 case Instruction::Shl:
6247 return simplifyShlInst(Op0: LHS, Op1: RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
6248 MaxRecurse);
6249 case Instruction::LShr:
6250 return simplifyLShrInst(Op0: LHS, Op1: RHS, /* IsExact */ false, Q, MaxRecurse);
6251 case Instruction::AShr:
6252 return simplifyAShrInst(Op0: LHS, Op1: RHS, /* IsExact */ false, Q, MaxRecurse);
6253 case Instruction::And:
6254 return simplifyAndInst(Op0: LHS, Op1: RHS, Q, MaxRecurse);
6255 case Instruction::Or:
6256 return simplifyOrInst(Op0: LHS, Op1: RHS, Q, MaxRecurse);
6257 case Instruction::Xor:
6258 return simplifyXorInst(Op0: LHS, Op1: RHS, Q, MaxRecurse);
6259 case Instruction::FAdd:
6260 return simplifyFAddInst(Op0: LHS, Op1: RHS, FMF: FastMathFlags(), Q, MaxRecurse);
6261 case Instruction::FSub:
6262 return simplifyFSubInst(Op0: LHS, Op1: RHS, FMF: FastMathFlags(), Q, MaxRecurse);
6263 case Instruction::FMul:
6264 return simplifyFMulInst(Op0: LHS, Op1: RHS, FMF: FastMathFlags(), Q, MaxRecurse);
6265 case Instruction::FDiv:
6266 return simplifyFDivInst(Op0: LHS, Op1: RHS, FMF: FastMathFlags(), Q, MaxRecurse);
6267 case Instruction::FRem:
6268 return simplifyFRemInst(Op0: LHS, Op1: RHS, FMF: FastMathFlags(), Q, MaxRecurse);
6269 default:
6270 llvm_unreachable("Unexpected opcode");
6271 }
6272}
6273
6274/// Given operands for a BinaryOperator, see if we can fold the result.
6275/// If not, this returns null.
6276/// Try to use FastMathFlags when folding the result.
6277static Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6278 const FastMathFlags &FMF, const SimplifyQuery &Q,
6279 unsigned MaxRecurse) {
6280 switch (Opcode) {
6281 case Instruction::FAdd:
6282 return simplifyFAddInst(Op0: LHS, Op1: RHS, FMF, Q, MaxRecurse);
6283 case Instruction::FSub:
6284 return simplifyFSubInst(Op0: LHS, Op1: RHS, FMF, Q, MaxRecurse);
6285 case Instruction::FMul:
6286 return simplifyFMulInst(Op0: LHS, Op1: RHS, FMF, Q, MaxRecurse);
6287 case Instruction::FDiv:
6288 return simplifyFDivInst(Op0: LHS, Op1: RHS, FMF, Q, MaxRecurse);
6289 default:
6290 return simplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
6291 }
6292}
6293
6294Value *llvm::simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6295 const SimplifyQuery &Q) {
6296 return ::simplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse: RecursionLimit);
6297}
6298
6299Value *llvm::simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6300 FastMathFlags FMF, const SimplifyQuery &Q) {
6301 return ::simplifyBinOp(Opcode, LHS, RHS, FMF, Q, MaxRecurse: RecursionLimit);
6302}
6303
6304/// Given operands for a CmpInst, see if we can fold the result.
6305static Value *simplifyCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS,
6306 const SimplifyQuery &Q, unsigned MaxRecurse) {
6307 if (CmpInst::isIntPredicate(P: Predicate))
6308 return simplifyICmpInst(Pred: Predicate, LHS, RHS, Q, MaxRecurse);
6309 return simplifyFCmpInst(Pred: Predicate, LHS, RHS, FMF: FastMathFlags(), Q, MaxRecurse);
6310}
6311
6312Value *llvm::simplifyCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS,
6313 const SimplifyQuery &Q) {
6314 return ::simplifyCmpInst(Predicate, LHS, RHS, Q, MaxRecurse: RecursionLimit);
6315}
6316
6317static bool isIdempotent(Intrinsic::ID ID) {
6318 switch (ID) {
6319 default:
6320 return false;
6321
6322 // Unary idempotent: f(f(x)) = f(x)
6323 case Intrinsic::fabs:
6324 case Intrinsic::floor:
6325 case Intrinsic::ceil:
6326 case Intrinsic::trunc:
6327 case Intrinsic::rint:
6328 case Intrinsic::nearbyint:
6329 case Intrinsic::round:
6330 case Intrinsic::roundeven:
6331 case Intrinsic::canonicalize:
6332 case Intrinsic::arithmetic_fence:
6333 return true;
6334 }
6335}
6336
6337/// Return true if the intrinsic rounds a floating-point value to an integral
6338/// floating-point value (not an integer type).
6339static bool removesFPFraction(Intrinsic::ID ID) {
6340 switch (ID) {
6341 default:
6342 return false;
6343
6344 case Intrinsic::floor:
6345 case Intrinsic::ceil:
6346 case Intrinsic::trunc:
6347 case Intrinsic::rint:
6348 case Intrinsic::nearbyint:
6349 case Intrinsic::round:
6350 case Intrinsic::roundeven:
6351 return true;
6352 }
6353}
6354
6355static Value *simplifyRelativeLoad(Constant *Ptr, Constant *Offset,
6356 const DataLayout &DL) {
6357 GlobalValue *PtrSym;
6358 APInt PtrOffset;
6359 if (!IsConstantOffsetFromGlobal(C: Ptr, GV&: PtrSym, Offset&: PtrOffset, DL))
6360 return nullptr;
6361
6362 Type *Int32Ty = Type::getInt32Ty(C&: Ptr->getContext());
6363
6364 auto *OffsetConstInt = dyn_cast<ConstantInt>(Val: Offset);
6365 if (!OffsetConstInt || OffsetConstInt->getBitWidth() > 64)
6366 return nullptr;
6367
6368 APInt OffsetInt = OffsetConstInt->getValue().sextOrTrunc(
6369 width: DL.getIndexTypeSizeInBits(Ty: Ptr->getType()));
6370 if (OffsetInt.srem(RHS: 4) != 0)
6371 return nullptr;
6372
6373 Constant *Loaded =
6374 ConstantFoldLoadFromConstPtr(C: Ptr, Ty: Int32Ty, Offset: std::move(OffsetInt), DL);
6375 if (!Loaded)
6376 return nullptr;
6377
6378 auto *LoadedCE = dyn_cast<ConstantExpr>(Val: Loaded);
6379 if (!LoadedCE)
6380 return nullptr;
6381
6382 if (LoadedCE->getOpcode() == Instruction::Trunc) {
6383 LoadedCE = dyn_cast<ConstantExpr>(Val: LoadedCE->getOperand(i_nocapture: 0));
6384 if (!LoadedCE)
6385 return nullptr;
6386 }
6387
6388 if (LoadedCE->getOpcode() != Instruction::Sub)
6389 return nullptr;
6390
6391 auto *LoadedLHS = dyn_cast<ConstantExpr>(Val: LoadedCE->getOperand(i_nocapture: 0));
6392 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
6393 return nullptr;
6394 auto *LoadedLHSPtr = LoadedLHS->getOperand(i_nocapture: 0);
6395
6396 Constant *LoadedRHS = LoadedCE->getOperand(i_nocapture: 1);
6397 GlobalValue *LoadedRHSSym;
6398 APInt LoadedRHSOffset;
6399 if (!IsConstantOffsetFromGlobal(C: LoadedRHS, GV&: LoadedRHSSym, Offset&: LoadedRHSOffset,
6400 DL) ||
6401 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
6402 return nullptr;
6403
6404 return LoadedLHSPtr;
6405}
6406
6407// TODO: Need to pass in FastMathFlags
6408static Value *simplifyLdexp(Value *Op0, Value *Op1, const SimplifyQuery &Q,
6409 bool IsStrict) {
6410 // ldexp(poison, x) -> poison
6411 // ldexp(x, poison) -> poison
6412 if (isa<PoisonValue>(Val: Op0) || isa<PoisonValue>(Val: Op1))
6413 return Op0;
6414
6415 // ldexp(undef, x) -> nan
6416 if (Q.isUndefValue(V: Op0))
6417 return ConstantFP::getNaN(Ty: Op0->getType());
6418
6419 if (!IsStrict) {
6420 // TODO: Could insert a canonicalize for strict
6421
6422 // ldexp(x, undef) -> x
6423 if (Q.isUndefValue(V: Op1))
6424 return Op0;
6425 }
6426
6427 const APFloat *C = nullptr;
6428 match(V: Op0, P: PatternMatch::m_APFloat(Res&: C));
6429
6430 // These cases should be safe, even with strictfp.
6431 // ldexp(0.0, x) -> 0.0
6432 // ldexp(-0.0, x) -> -0.0
6433 // ldexp(inf, x) -> inf
6434 // ldexp(-inf, x) -> -inf
6435 if (C && (C->isZero() || C->isInfinity()))
6436 return Op0;
6437
6438 // These are canonicalization dropping, could do it if we knew how we could
6439 // ignore denormal flushes and target handling of nan payload bits.
6440 if (IsStrict)
6441 return nullptr;
6442
6443 // TODO: Could quiet this with strictfp if the exception mode isn't strict.
6444 if (C && C->isNaN())
6445 return ConstantFP::get(Ty: Op0->getType(), V: C->makeQuiet());
6446
6447 // ldexp(x, 0) -> x
6448
6449 // TODO: Could fold this if we know the exception mode isn't
6450 // strict, we know the denormal mode and other target modes.
6451 if (match(V: Op1, P: PatternMatch::m_ZeroInt()))
6452 return Op0;
6453
6454 return nullptr;
6455}
6456
6457static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0,
6458 const SimplifyQuery &Q,
6459 const CallBase *Call) {
6460 // Idempotent functions return the same result when called repeatedly.
6461 Intrinsic::ID IID = F->getIntrinsicID();
6462 if (isIdempotent(ID: IID))
6463 if (auto *II = dyn_cast<IntrinsicInst>(Val: Op0))
6464 if (II->getIntrinsicID() == IID)
6465 return II;
6466
6467 if (removesFPFraction(ID: IID)) {
6468 // Converting from int or calling a rounding function always results in a
6469 // finite integral number or infinity. For those inputs, rounding functions
6470 // always return the same value, so the (2nd) rounding is eliminated. Ex:
6471 // floor (sitofp x) -> sitofp x
6472 // round (ceil x) -> ceil x
6473 auto *II = dyn_cast<IntrinsicInst>(Val: Op0);
6474 if ((II && removesFPFraction(ID: II->getIntrinsicID())) ||
6475 match(V: Op0, P: m_SIToFP(Op: m_Value())) || match(V: Op0, P: m_UIToFP(Op: m_Value())))
6476 return Op0;
6477 }
6478
6479 Value *X;
6480 switch (IID) {
6481 case Intrinsic::fabs: {
6482 KnownFPClass KnownClass = computeKnownFPClass(V: Op0, InterestedClasses: fcAllFlags, SQ: Q);
6483 if (KnownClass.SignBit == false)
6484 return Op0;
6485
6486 if (KnownClass.cannotBeOrderedLessThanZero() &&
6487 KnownClass.isKnownNeverNaN() && Call->hasNoSignedZeros())
6488 return Op0;
6489
6490 break;
6491 }
6492 case Intrinsic::bswap:
6493 // bswap(bswap(x)) -> x
6494 if (match(V: Op0, P: m_BSwap(Op0: m_Value(V&: X))))
6495 return X;
6496 break;
6497 case Intrinsic::bitreverse:
6498 // bitreverse(bitreverse(x)) -> x
6499 if (match(V: Op0, P: m_BitReverse(Op0: m_Value(V&: X))))
6500 return X;
6501 break;
6502 case Intrinsic::ctpop: {
6503 // ctpop(X) -> 1 iff X is non-zero power of 2.
6504 if (isKnownToBeAPowerOfTwo(V: Op0, DL: Q.DL, /*OrZero*/ false, AC: Q.AC, CxtI: Q.CxtI, DT: Q.DT))
6505 return ConstantInt::get(Ty: Op0->getType(), V: 1);
6506 // If everything but the lowest bit is zero, that bit is the pop-count. Ex:
6507 // ctpop(and X, 1) --> and X, 1
6508 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
6509 if (MaskedValueIsZero(V: Op0, Mask: APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - 1),
6510 SQ: Q))
6511 return Op0;
6512 break;
6513 }
6514 case Intrinsic::exp:
6515 // exp(log(x)) -> x
6516 if (Call->hasAllowReassoc() &&
6517 match(V: Op0, P: m_Intrinsic<Intrinsic::log>(Op0: m_Value(V&: X))))
6518 return X;
6519 break;
6520 case Intrinsic::exp2:
6521 // exp2(log2(x)) -> x
6522 if (Call->hasAllowReassoc() &&
6523 match(V: Op0, P: m_Intrinsic<Intrinsic::log2>(Op0: m_Value(V&: X))))
6524 return X;
6525 break;
6526 case Intrinsic::exp10:
6527 // exp10(log10(x)) -> x
6528 if (Call->hasAllowReassoc() &&
6529 match(V: Op0, P: m_Intrinsic<Intrinsic::log10>(Op0: m_Value(V&: X))))
6530 return X;
6531 break;
6532 case Intrinsic::log:
6533 // log(exp(x)) -> x
6534 if (Call->hasAllowReassoc() &&
6535 match(V: Op0, P: m_Intrinsic<Intrinsic::exp>(Op0: m_Value(V&: X))))
6536 return X;
6537 break;
6538 case Intrinsic::log2:
6539 // log2(exp2(x)) -> x
6540 if (Call->hasAllowReassoc() &&
6541 (match(V: Op0, P: m_Intrinsic<Intrinsic::exp2>(Op0: m_Value(V&: X))) ||
6542 match(V: Op0,
6543 P: m_Intrinsic<Intrinsic::pow>(Op0: m_SpecificFP(V: 2.0), Op1: m_Value(V&: X)))))
6544 return X;
6545 break;
6546 case Intrinsic::log10:
6547 // log10(pow(10.0, x)) -> x
6548 // log10(exp10(x)) -> x
6549 if (Call->hasAllowReassoc() &&
6550 (match(V: Op0, P: m_Intrinsic<Intrinsic::exp10>(Op0: m_Value(V&: X))) ||
6551 match(V: Op0,
6552 P: m_Intrinsic<Intrinsic::pow>(Op0: m_SpecificFP(V: 10.0), Op1: m_Value(V&: X)))))
6553 return X;
6554 break;
6555 case Intrinsic::vector_reverse:
6556 // vector.reverse(vector.reverse(x)) -> x
6557 if (match(V: Op0, P: m_VecReverse(Op0: m_Value(V&: X))))
6558 return X;
6559 // vector.reverse(splat(X)) -> splat(X)
6560 if (isSplatValue(V: Op0))
6561 return Op0;
6562 break;
6563 default:
6564 break;
6565 }
6566
6567 return nullptr;
6568}
6569
6570/// Given a min/max intrinsic, see if it can be removed based on having an
6571/// operand that is another min/max intrinsic with shared operand(s). The caller
6572/// is expected to swap the operand arguments to handle commutation.
6573static Value *foldMinMaxSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1) {
6574 Value *X, *Y;
6575 if (!match(V: Op0, P: m_MaxOrMin(L: m_Value(V&: X), R: m_Value(V&: Y))))
6576 return nullptr;
6577
6578 auto *MM0 = dyn_cast<IntrinsicInst>(Val: Op0);
6579 if (!MM0)
6580 return nullptr;
6581 Intrinsic::ID IID0 = MM0->getIntrinsicID();
6582
6583 if (Op1 == X || Op1 == Y ||
6584 match(V: Op1, P: m_c_MaxOrMin(L: m_Specific(V: X), R: m_Specific(V: Y)))) {
6585 // max (max X, Y), X --> max X, Y
6586 if (IID0 == IID)
6587 return MM0;
6588 // max (min X, Y), X --> X
6589 if (IID0 == getInverseMinMaxIntrinsic(MinMaxID: IID))
6590 return Op1;
6591 }
6592 return nullptr;
6593}
6594
6595/// Given a min/max intrinsic, see if it can be removed based on having an
6596/// operand that is another min/max intrinsic with shared operand(s). The caller
6597/// is expected to swap the operand arguments to handle commutation.
6598static Value *foldMinimumMaximumSharedOp(Intrinsic::ID IID, Value *Op0,
6599 Value *Op1) {
6600 auto IsMinimumMaximumIntrinsic = [](Intrinsic::ID ID) {
6601 switch (ID) {
6602 case Intrinsic::maxnum:
6603 case Intrinsic::minnum:
6604 case Intrinsic::maximum:
6605 case Intrinsic::minimum:
6606 case Intrinsic::maximumnum:
6607 case Intrinsic::minimumnum:
6608 return true;
6609 default:
6610 return false;
6611 }
6612 };
6613
6614 assert(IsMinimumMaximumIntrinsic(IID) && "Unsupported intrinsic");
6615
6616 auto *M0 = dyn_cast<IntrinsicInst>(Val: Op0);
6617 // If Op0 is not the same intrinsic as IID, do not process.
6618 // This is a difference with integer min/max handling. We do not process the
6619 // case like max(min(X,Y),min(X,Y)) => min(X,Y). But it can be handled by GVN.
6620 if (!M0 || M0->getIntrinsicID() != IID)
6621 return nullptr;
6622 Value *X0 = M0->getOperand(i_nocapture: 0);
6623 Value *Y0 = M0->getOperand(i_nocapture: 1);
6624 // Simple case, m(m(X,Y), X) => m(X, Y)
6625 // m(m(X,Y), Y) => m(X, Y)
6626 // For minimum/maximum, X is NaN => m(NaN, Y) == NaN and m(NaN, NaN) == NaN.
6627 // For minimum/maximum, Y is NaN => m(X, NaN) == NaN and m(NaN, NaN) == NaN.
6628 // For minnum/maxnum, X is NaN => m(NaN, Y) == Y and m(Y, Y) == Y.
6629 // For minnum/maxnum, Y is NaN => m(X, NaN) == X and m(X, NaN) == X.
6630 if (X0 == Op1 || Y0 == Op1)
6631 return M0;
6632
6633 auto *M1 = dyn_cast<IntrinsicInst>(Val: Op1);
6634 if (!M1 || !IsMinimumMaximumIntrinsic(M1->getIntrinsicID()))
6635 return nullptr;
6636 Value *X1 = M1->getOperand(i_nocapture: 0);
6637 Value *Y1 = M1->getOperand(i_nocapture: 1);
6638 Intrinsic::ID IID1 = M1->getIntrinsicID();
6639 // we have a case m(m(X,Y),m'(X,Y)) taking into account m' is commutative.
6640 // if m' is m or inversion of m => m(m(X,Y),m'(X,Y)) == m(X,Y).
6641 // For minimum/maximum, X is NaN => m(NaN,Y) == m'(NaN, Y) == NaN.
6642 // For minimum/maximum, Y is NaN => m(X,NaN) == m'(X, NaN) == NaN.
6643 // For minnum/maxnum, X is NaN => m(NaN,Y) == m'(NaN, Y) == Y.
6644 // For minnum/maxnum, Y is NaN => m(X,NaN) == m'(X, NaN) == X.
6645 if ((X0 == X1 && Y0 == Y1) || (X0 == Y1 && Y0 == X1))
6646 if (IID1 == IID || getInverseMinMaxIntrinsic(MinMaxID: IID1) == IID)
6647 return M0;
6648
6649 return nullptr;
6650}
6651
6652enum class MinMaxOptResult {
6653 CannotOptimize = 0,
6654 UseNewConstVal = 1,
6655 UseOtherVal = 2,
6656 // For undef/poison, we can choose to either propgate undef/poison or
6657 // use the LHS value depending on what will allow more optimization.
6658 UseEither = 3
6659};
6660// Get the optimized value for a min/max instruction with a single constant
6661// input (either undef or scalar constantFP). The result may indicate to
6662// use the non-const LHS value, use a new constant value instead (with NaNs
6663// quieted), or to choose either option in the case of undef/poison.
6664static MinMaxOptResult OptimizeConstMinMax(const Constant *RHSConst,
6665 const Intrinsic::ID IID,
6666 const CallBase *Call,
6667 Constant **OutNewConstVal) {
6668 assert(OutNewConstVal != nullptr);
6669
6670 bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum;
6671 bool ReturnsOtherForAllNaNs =
6672 IID == Intrinsic::minimumnum || IID == Intrinsic::maximumnum;
6673 bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum ||
6674 IID == Intrinsic::minimumnum;
6675
6676 // min/max(x, poison) -> either x or poison
6677 if (isa<UndefValue>(Val: RHSConst)) {
6678 *OutNewConstVal = const_cast<Constant *>(RHSConst);
6679 return MinMaxOptResult::UseEither;
6680 }
6681
6682 const ConstantFP *CFP = dyn_cast<ConstantFP>(Val: RHSConst);
6683 if (!CFP)
6684 return MinMaxOptResult::CannotOptimize;
6685 APFloat CAPF = CFP->getValueAPF();
6686
6687 // minnum(x, qnan) -> x
6688 // maxnum(x, qnan) -> x
6689 // minimum(X, nan) -> qnan
6690 // maximum(X, nan) -> qnan
6691 // minimumnum(X, nan) -> x
6692 // maximumnum(X, nan) -> x
6693 if (CAPF.isNaN()) {
6694 if (PropagateNaN) {
6695 *OutNewConstVal = ConstantFP::get(Ty: CFP->getType(), V: CAPF.makeQuiet());
6696 return MinMaxOptResult::UseNewConstVal;
6697 } else if (ReturnsOtherForAllNaNs || !CAPF.isSignaling()) {
6698 return MinMaxOptResult::UseOtherVal;
6699 }
6700 return MinMaxOptResult::CannotOptimize;
6701 }
6702
6703 if (CAPF.isInfinity() || (Call && Call->hasNoInfs() && CAPF.isLargest())) {
6704 // minimum(X, -inf) -> -inf if nnan
6705 // maximum(X, +inf) -> +inf if nnan
6706 // minimumnum(X, -inf) -> -inf
6707 // maximumnum(X, +inf) -> +inf
6708 if (CAPF.isNegative() == IsMin &&
6709 (ReturnsOtherForAllNaNs || (Call && Call->hasNoNaNs()))) {
6710 *OutNewConstVal = const_cast<Constant *>(RHSConst);
6711 return MinMaxOptResult::UseNewConstVal;
6712 }
6713
6714 // minnum(X, +inf) -> X if nnan
6715 // maxnum(X, -inf) -> X if nnan
6716 // minimum(X, +inf) -> X (ignoring quieting of sNaNs)
6717 // maximum(X, -inf) -> X (ignoring quieting of sNaNs)
6718 // minimumnum(X, +inf) -> X if nnan
6719 // maximumnum(X, -inf) -> X if nnan
6720 if (CAPF.isNegative() != IsMin &&
6721 (PropagateNaN || (Call && Call->hasNoNaNs())))
6722 return MinMaxOptResult::UseOtherVal;
6723 }
6724 return MinMaxOptResult::CannotOptimize;
6725}
6726
6727static Value *simplifySVEIntReduction(Intrinsic::ID IID, Type *ReturnType,
6728 Value *Op0, Value *Op1) {
6729 Constant *C0 = dyn_cast<Constant>(Val: Op0);
6730 Constant *C1 = dyn_cast<Constant>(Val: Op1);
6731 unsigned Width = ReturnType->getPrimitiveSizeInBits();
6732
6733 // All false predicate or reduction of neutral values ==> neutral result.
6734 switch (IID) {
6735 case Intrinsic::aarch64_sve_eorv:
6736 case Intrinsic::aarch64_sve_orv:
6737 case Intrinsic::aarch64_sve_saddv:
6738 case Intrinsic::aarch64_sve_uaddv:
6739 case Intrinsic::aarch64_sve_umaxv:
6740 if ((C0 && C0->isNullValue()) || (C1 && C1->isNullValue()))
6741 return ConstantInt::get(Ty: ReturnType, V: 0);
6742 break;
6743 case Intrinsic::aarch64_sve_andv:
6744 case Intrinsic::aarch64_sve_uminv:
6745 if ((C0 && C0->isNullValue()) || (C1 && C1->isAllOnesValue()))
6746 return ConstantInt::get(Ty: ReturnType, V: APInt::getMaxValue(numBits: Width));
6747 break;
6748 case Intrinsic::aarch64_sve_smaxv:
6749 if ((C0 && C0->isNullValue()) || (C1 && C1->isMinSignedValue()))
6750 return ConstantInt::get(Ty: ReturnType, V: APInt::getSignedMinValue(numBits: Width));
6751 break;
6752 case Intrinsic::aarch64_sve_sminv:
6753 if ((C0 && C0->isNullValue()) || (C1 && C1->isMaxSignedValue()))
6754 return ConstantInt::get(Ty: ReturnType, V: APInt::getSignedMaxValue(numBits: Width));
6755 break;
6756 }
6757
6758 switch (IID) {
6759 case Intrinsic::aarch64_sve_andv:
6760 case Intrinsic::aarch64_sve_orv:
6761 case Intrinsic::aarch64_sve_smaxv:
6762 case Intrinsic::aarch64_sve_sminv:
6763 case Intrinsic::aarch64_sve_umaxv:
6764 case Intrinsic::aarch64_sve_uminv:
6765 // sve_reduce_##(all, splat(X)) ==> X
6766 if (C0 && C0->isAllOnesValue()) {
6767 if (Value *SplatVal = getSplatValue(V: Op1)) {
6768 assert(SplatVal->getType() == ReturnType && "Unexpected result type!");
6769 return SplatVal;
6770 }
6771 }
6772 break;
6773 case Intrinsic::aarch64_sve_eorv:
6774 // sve_reduce_xor(all, splat(X)) ==> 0
6775 if (C0 && C0->isAllOnesValue())
6776 return ConstantInt::get(Ty: ReturnType, V: 0);
6777 break;
6778 }
6779
6780 return nullptr;
6781}
6782
6783Value *llvm::simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType,
6784 Value *Op0, Value *Op1,
6785 const SimplifyQuery &Q,
6786 const CallBase *Call) {
6787 unsigned BitWidth = ReturnType->getScalarSizeInBits();
6788 switch (IID) {
6789 case Intrinsic::get_active_lane_mask: {
6790 if (match(V: Op1, P: m_Zero()))
6791 return ConstantInt::getFalse(Ty: ReturnType);
6792
6793 const Function *F = Call->getFunction();
6794 auto *ScalableTy = dyn_cast<ScalableVectorType>(Val: ReturnType);
6795 Attribute Attr = F->getFnAttribute(Kind: Attribute::VScaleRange);
6796 if (ScalableTy && Attr.isValid()) {
6797 std::optional<unsigned> VScaleMax = Attr.getVScaleRangeMax();
6798 if (!VScaleMax)
6799 break;
6800 uint64_t MaxPossibleMaskElements =
6801 (uint64_t)ScalableTy->getMinNumElements() * (*VScaleMax);
6802
6803 const APInt *Op1Val;
6804 if (match(V: Op0, P: m_Zero()) && match(V: Op1, P: m_APInt(Res&: Op1Val)) &&
6805 Op1Val->uge(RHS: MaxPossibleMaskElements))
6806 return ConstantInt::getAllOnesValue(Ty: ReturnType);
6807 }
6808 break;
6809 }
6810 case Intrinsic::abs:
6811 // abs(abs(x)) -> abs(x). We don't need to worry about the nsw arg here.
6812 // It is always ok to pick the earlier abs. We'll just lose nsw if its only
6813 // on the outer abs.
6814 if (match(V: Op0, P: m_Intrinsic<Intrinsic::abs>(Op0: m_Value(), Op1: m_Value())))
6815 return Op0;
6816 break;
6817
6818 case Intrinsic::cttz: {
6819 Value *X;
6820 if (match(V: Op0, P: m_Shl(L: m_One(), R: m_Value(V&: X))))
6821 return X;
6822 break;
6823 }
6824 case Intrinsic::ctlz: {
6825 Value *X;
6826 if (match(V: Op0, P: m_LShr(L: m_Negative(), R: m_Value(V&: X))))
6827 return X;
6828 if (match(V: Op0, P: m_AShr(L: m_Negative(), R: m_Value())))
6829 return Constant::getNullValue(Ty: ReturnType);
6830 break;
6831 }
6832 case Intrinsic::ptrmask: {
6833 // NOTE: We can't apply this simplifications based on the value of Op1
6834 // because we need to preserve provenance.
6835 if (Q.isUndefValue(V: Op0) || match(V: Op0, P: m_Zero()))
6836 return Constant::getNullValue(Ty: Op0->getType());
6837
6838 assert(Op1->getType()->getScalarSizeInBits() ==
6839 Q.DL.getIndexTypeSizeInBits(Op0->getType()) &&
6840 "Invalid mask width");
6841 // If index-width (mask size) is less than pointer-size then mask is
6842 // 1-extended.
6843 if (match(V: Op1, P: m_PtrToIntOrAddr(Op: m_Specific(V: Op0))))
6844 return Op0;
6845
6846 // NOTE: We may have attributes associated with the return value of the
6847 // llvm.ptrmask intrinsic that will be lost when we just return the
6848 // operand. We should try to preserve them.
6849 if (match(V: Op1, P: m_AllOnes()) || Q.isUndefValue(V: Op1))
6850 return Op0;
6851
6852 Constant *C;
6853 if (match(V: Op1, P: m_ImmConstant(C))) {
6854 KnownBits PtrKnown = computeKnownBits(V: Op0, Q);
6855 // See if we only masking off bits we know are already zero due to
6856 // alignment.
6857 APInt IrrelevantPtrBits =
6858 PtrKnown.Zero.zextOrTrunc(width: C->getType()->getScalarSizeInBits());
6859 C = ConstantFoldBinaryOpOperands(
6860 Opcode: Instruction::Or, LHS: C, RHS: ConstantInt::get(Ty: C->getType(), V: IrrelevantPtrBits),
6861 DL: Q.DL);
6862 if (C != nullptr && C->isAllOnesValue())
6863 return Op0;
6864 }
6865 break;
6866 }
6867 case Intrinsic::smax:
6868 case Intrinsic::smin:
6869 case Intrinsic::umax:
6870 case Intrinsic::umin: {
6871 // If the arguments are the same, this is a no-op.
6872 if (Op0 == Op1)
6873 return Op0;
6874
6875 // Canonicalize immediate constant operand as Op1.
6876 if (match(V: Op0, P: m_ImmConstant()))
6877 std::swap(a&: Op0, b&: Op1);
6878
6879 // Assume undef is the limit value.
6880 if (Q.isUndefValue(V: Op1))
6881 return ConstantInt::get(
6882 Ty: ReturnType, V: MinMaxIntrinsic::getSaturationPoint(ID: IID, numBits: BitWidth));
6883
6884 const APInt *C;
6885 if (match(V: Op1, P: m_APIntAllowPoison(Res&: C))) {
6886 // Clamp to limit value. For example:
6887 // umax(i8 %x, i8 255) --> 255
6888 if (*C == MinMaxIntrinsic::getSaturationPoint(ID: IID, numBits: BitWidth))
6889 return ConstantInt::get(Ty: ReturnType, V: *C);
6890
6891 // If the constant op is the opposite of the limit value, the other must
6892 // be larger/smaller or equal. For example:
6893 // umin(i8 %x, i8 255) --> %x
6894 if (*C == MinMaxIntrinsic::getSaturationPoint(
6895 ID: getInverseMinMaxIntrinsic(MinMaxID: IID), numBits: BitWidth))
6896 return Op0;
6897
6898 // Remove nested call if constant operands allow it. Example:
6899 // max (max X, 7), 5 -> max X, 7
6900 auto *MinMax0 = dyn_cast<IntrinsicInst>(Val: Op0);
6901 if (MinMax0 && MinMax0->getIntrinsicID() == IID) {
6902 // TODO: loosen undef/splat restrictions for vector constants.
6903 Value *M00 = MinMax0->getOperand(i_nocapture: 0), *M01 = MinMax0->getOperand(i_nocapture: 1);
6904 const APInt *InnerC;
6905 if ((match(V: M00, P: m_APInt(Res&: InnerC)) || match(V: M01, P: m_APInt(Res&: InnerC))) &&
6906 ICmpInst::compare(LHS: *InnerC, RHS: *C,
6907 Pred: ICmpInst::getNonStrictPredicate(
6908 pred: MinMaxIntrinsic::getPredicate(ID: IID))))
6909 return Op0;
6910 }
6911 }
6912
6913 if (Value *V = foldMinMaxSharedOp(IID, Op0, Op1))
6914 return V;
6915 if (Value *V = foldMinMaxSharedOp(IID, Op0: Op1, Op1: Op0))
6916 return V;
6917
6918 ICmpInst::Predicate Pred =
6919 ICmpInst::getNonStrictPredicate(pred: MinMaxIntrinsic::getPredicate(ID: IID));
6920 if (isICmpTrue(Pred, LHS: Op0, RHS: Op1, Q: Q.getWithoutUndef(), MaxRecurse: RecursionLimit))
6921 return Op0;
6922 if (isICmpTrue(Pred, LHS: Op1, RHS: Op0, Q: Q.getWithoutUndef(), MaxRecurse: RecursionLimit))
6923 return Op1;
6924
6925 break;
6926 }
6927 case Intrinsic::scmp:
6928 case Intrinsic::ucmp: {
6929 // Fold to a constant if the relationship between operands can be
6930 // established with certainty
6931 if (isICmpTrue(Pred: CmpInst::ICMP_EQ, LHS: Op0, RHS: Op1, Q, MaxRecurse: RecursionLimit))
6932 return Constant::getNullValue(Ty: ReturnType);
6933
6934 ICmpInst::Predicate PredGT =
6935 IID == Intrinsic::scmp ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
6936 if (isICmpTrue(Pred: PredGT, LHS: Op0, RHS: Op1, Q, MaxRecurse: RecursionLimit))
6937 return ConstantInt::get(Ty: ReturnType, V: 1);
6938
6939 ICmpInst::Predicate PredLT =
6940 IID == Intrinsic::scmp ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
6941 if (isICmpTrue(Pred: PredLT, LHS: Op0, RHS: Op1, Q, MaxRecurse: RecursionLimit))
6942 return ConstantInt::getSigned(Ty: ReturnType, V: -1);
6943
6944 break;
6945 }
6946 case Intrinsic::usub_with_overflow:
6947 case Intrinsic::ssub_with_overflow:
6948 // X - X -> { 0, false }
6949 // X - undef -> { 0, false }
6950 // undef - X -> { 0, false }
6951 if (Op0 == Op1 || Q.isUndefValue(V: Op0) || Q.isUndefValue(V: Op1))
6952 return Constant::getNullValue(Ty: ReturnType);
6953 break;
6954 case Intrinsic::uadd_with_overflow:
6955 case Intrinsic::sadd_with_overflow:
6956 // X + undef -> { -1, false }
6957 // undef + x -> { -1, false }
6958 if (Q.isUndefValue(V: Op0) || Q.isUndefValue(V: Op1)) {
6959 return ConstantStruct::get(
6960 T: cast<StructType>(Val: ReturnType),
6961 V: {Constant::getAllOnesValue(Ty: ReturnType->getStructElementType(N: 0)),
6962 Constant::getNullValue(Ty: ReturnType->getStructElementType(N: 1))});
6963 }
6964 break;
6965 case Intrinsic::umul_with_overflow:
6966 case Intrinsic::smul_with_overflow:
6967 // 0 * X -> { 0, false }
6968 // X * 0 -> { 0, false }
6969 if (match(V: Op0, P: m_Zero()) || match(V: Op1, P: m_Zero()))
6970 return Constant::getNullValue(Ty: ReturnType);
6971 // undef * X -> { 0, false }
6972 // X * undef -> { 0, false }
6973 if (Q.isUndefValue(V: Op0) || Q.isUndefValue(V: Op1))
6974 return Constant::getNullValue(Ty: ReturnType);
6975 break;
6976 case Intrinsic::uadd_sat:
6977 // sat(MAX + X) -> MAX
6978 // sat(X + MAX) -> MAX
6979 if (match(V: Op0, P: m_AllOnes()) || match(V: Op1, P: m_AllOnes()))
6980 return Constant::getAllOnesValue(Ty: ReturnType);
6981 [[fallthrough]];
6982 case Intrinsic::sadd_sat:
6983 // sat(X + undef) -> -1
6984 // sat(undef + X) -> -1
6985 // For unsigned: Assume undef is MAX, thus we saturate to MAX (-1).
6986 // For signed: Assume undef is ~X, in which case X + ~X = -1.
6987 if (Q.isUndefValue(V: Op0) || Q.isUndefValue(V: Op1))
6988 return Constant::getAllOnesValue(Ty: ReturnType);
6989
6990 // X + 0 -> X
6991 if (match(V: Op1, P: m_Zero()))
6992 return Op0;
6993 // 0 + X -> X
6994 if (match(V: Op0, P: m_Zero()))
6995 return Op1;
6996 break;
6997 case Intrinsic::usub_sat:
6998 // sat(0 - X) -> 0, sat(X - MAX) -> 0
6999 if (match(V: Op0, P: m_Zero()) || match(V: Op1, P: m_AllOnes()))
7000 return Constant::getNullValue(Ty: ReturnType);
7001 [[fallthrough]];
7002 case Intrinsic::ssub_sat:
7003 // X - X -> 0, X - undef -> 0, undef - X -> 0
7004 if (Op0 == Op1 || Q.isUndefValue(V: Op0) || Q.isUndefValue(V: Op1))
7005 return Constant::getNullValue(Ty: ReturnType);
7006 // X - 0 -> X
7007 if (match(V: Op1, P: m_Zero()))
7008 return Op0;
7009 break;
7010 case Intrinsic::load_relative:
7011 if (auto *C0 = dyn_cast<Constant>(Val: Op0))
7012 if (auto *C1 = dyn_cast<Constant>(Val: Op1))
7013 return simplifyRelativeLoad(Ptr: C0, Offset: C1, DL: Q.DL);
7014 break;
7015 case Intrinsic::powi:
7016 if (auto *Power = dyn_cast<ConstantInt>(Val: Op1)) {
7017 // powi(x, 0) -> 1.0
7018 if (Power->isZero())
7019 return ConstantFP::get(Ty: Op0->getType(), V: 1.0);
7020 // powi(x, 1) -> x
7021 if (Power->isOne())
7022 return Op0;
7023 }
7024 break;
7025 case Intrinsic::ldexp:
7026 return simplifyLdexp(Op0, Op1, Q, IsStrict: false);
7027 case Intrinsic::copysign:
7028 // copysign X, X --> X
7029 if (Op0 == Op1)
7030 return Op0;
7031 // copysign -X, X --> X
7032 // copysign X, -X --> -X
7033 if (match(V: Op0, P: m_FNeg(X: m_Specific(V: Op1))) ||
7034 match(V: Op1, P: m_FNeg(X: m_Specific(V: Op0))))
7035 return Op1;
7036 break;
7037 case Intrinsic::is_fpclass: {
7038 uint64_t Mask = cast<ConstantInt>(Val: Op1)->getZExtValue();
7039 // If all tests are made, it doesn't matter what the value is.
7040 if ((Mask & fcAllFlags) == fcAllFlags)
7041 return ConstantInt::get(Ty: ReturnType, V: true);
7042 if ((Mask & fcAllFlags) == 0)
7043 return ConstantInt::get(Ty: ReturnType, V: false);
7044 if (Q.isUndefValue(V: Op0))
7045 return UndefValue::get(T: ReturnType);
7046 break;
7047 }
7048 case Intrinsic::maxnum:
7049 case Intrinsic::minnum:
7050 case Intrinsic::maximum:
7051 case Intrinsic::minimum:
7052 case Intrinsic::maximumnum:
7053 case Intrinsic::minimumnum: {
7054 // In some cases here, we deviate from exact IEEE-754 semantics to enable
7055 // optimizations (as allowed by the LLVM IR spec) by returning one of the
7056 // arguments unmodified instead of inserting an llvm.canonicalize to
7057 // transform input sNaNs into qNaNs,
7058
7059 // If the arguments are the same, this is a no-op (ignoring NaN quieting)
7060 if (Op0 == Op1)
7061 return Op0;
7062
7063 // Canonicalize constant operand as Op1.
7064 if (isa<Constant>(Val: Op0))
7065 std::swap(a&: Op0, b&: Op1);
7066
7067 if (Constant *C = dyn_cast<Constant>(Val: Op1)) {
7068 MinMaxOptResult OptResult = MinMaxOptResult::CannotOptimize;
7069 Constant *NewConst = nullptr;
7070
7071 if (VectorType *VTy = dyn_cast<VectorType>(Val: C->getType())) {
7072 ElementCount ElemCount = VTy->getElementCount();
7073
7074 if (Constant *SplatVal = C->getSplatValue()) {
7075 // Handle splat vectors (including scalable vectors)
7076 OptResult = OptimizeConstMinMax(RHSConst: SplatVal, IID, Call, OutNewConstVal: &NewConst);
7077 if (OptResult == MinMaxOptResult::UseNewConstVal)
7078 NewConst = ConstantVector::getSplat(EC: ElemCount, Elt: NewConst);
7079
7080 } else if (ElemCount.isFixed()) {
7081 // Storage to build up new const return value (with NaNs quieted)
7082 SmallVector<Constant *, 16> NewC(ElemCount.getFixedValue());
7083
7084 // Check elementwise whether we can optimize to either a constant
7085 // value or return the LHS value. We cannot mix and match LHS +
7086 // constant elements, as this would require inserting a new
7087 // VectorShuffle instruction, which is not allowed in simplifyBinOp.
7088 OptResult = MinMaxOptResult::UseEither;
7089 for (unsigned i = 0; i != ElemCount.getFixedValue(); ++i) {
7090 auto *Elt = C->getAggregateElement(Elt: i);
7091 if (!Elt) {
7092 OptResult = MinMaxOptResult::CannotOptimize;
7093 break;
7094 }
7095 auto ElemResult = OptimizeConstMinMax(RHSConst: Elt, IID, Call, OutNewConstVal: &NewConst);
7096 if (ElemResult == MinMaxOptResult::CannotOptimize ||
7097 (ElemResult != OptResult &&
7098 OptResult != MinMaxOptResult::UseEither &&
7099 ElemResult != MinMaxOptResult::UseEither)) {
7100 OptResult = MinMaxOptResult::CannotOptimize;
7101 break;
7102 }
7103 NewC[i] = NewConst;
7104 if (ElemResult != MinMaxOptResult::UseEither)
7105 OptResult = ElemResult;
7106 }
7107 if (OptResult == MinMaxOptResult::UseNewConstVal)
7108 NewConst = ConstantVector::get(V: NewC);
7109 }
7110 } else {
7111 // Handle scalar inputs
7112 OptResult = OptimizeConstMinMax(RHSConst: C, IID, Call, OutNewConstVal: &NewConst);
7113 }
7114
7115 if (OptResult == MinMaxOptResult::UseOtherVal ||
7116 OptResult == MinMaxOptResult::UseEither)
7117 return Op0; // Return the other arg (ignoring NaN quieting)
7118 else if (OptResult == MinMaxOptResult::UseNewConstVal)
7119 return NewConst;
7120 }
7121
7122 // Min/max of the same operation with common operand:
7123 // m(m(X, Y)), X --> m(X, Y) (4 commuted variants)
7124 if (Value *V = foldMinimumMaximumSharedOp(IID, Op0, Op1))
7125 return V;
7126 if (Value *V = foldMinimumMaximumSharedOp(IID, Op0: Op1, Op1: Op0))
7127 return V;
7128
7129 break;
7130 }
7131 case Intrinsic::vector_extract: {
7132 // (extract_vector (insert_vector _, X, 0), 0) -> X
7133 unsigned IdxN = cast<ConstantInt>(Val: Op1)->getZExtValue();
7134 Value *X = nullptr;
7135 if (match(V: Op0, P: m_Intrinsic<Intrinsic::vector_insert>(Op0: m_Value(), Op1: m_Value(V&: X),
7136 Op2: m_Zero())) &&
7137 IdxN == 0 && X->getType() == ReturnType)
7138 return X;
7139
7140 break;
7141 }
7142
7143 case Intrinsic::aarch64_sve_andv:
7144 case Intrinsic::aarch64_sve_eorv:
7145 case Intrinsic::aarch64_sve_orv:
7146 case Intrinsic::aarch64_sve_saddv:
7147 case Intrinsic::aarch64_sve_smaxv:
7148 case Intrinsic::aarch64_sve_sminv:
7149 case Intrinsic::aarch64_sve_uaddv:
7150 case Intrinsic::aarch64_sve_umaxv:
7151 case Intrinsic::aarch64_sve_uminv:
7152 return simplifySVEIntReduction(IID, ReturnType, Op0, Op1);
7153 default:
7154 break;
7155 }
7156
7157 return nullptr;
7158}
7159
7160static Value *simplifyIntrinsic(CallBase *Call, Value *Callee,
7161 ArrayRef<Value *> Args,
7162 const SimplifyQuery &Q) {
7163 // Operand bundles should not be in Args.
7164 assert(Call->arg_size() == Args.size());
7165 unsigned NumOperands = Args.size();
7166 Function *F = cast<Function>(Val: Callee);
7167 Intrinsic::ID IID = F->getIntrinsicID();
7168
7169 if (IID != Intrinsic::not_intrinsic && intrinsicPropagatesPoison(IID) &&
7170 any_of(Range&: Args, P: IsaPred<PoisonValue>))
7171 return PoisonValue::get(T: F->getReturnType());
7172 // Most of the intrinsics with no operands have some kind of side effect.
7173 // Don't simplify.
7174 if (!NumOperands) {
7175 switch (IID) {
7176 case Intrinsic::vscale: {
7177 Type *RetTy = F->getReturnType();
7178 ConstantRange CR = getVScaleRange(F: Call->getFunction(), BitWidth: 64);
7179 if (const APInt *C = CR.getSingleElement())
7180 return ConstantInt::get(Ty: RetTy, V: C->getZExtValue());
7181 return nullptr;
7182 }
7183 default:
7184 return nullptr;
7185 }
7186 }
7187
7188 if (NumOperands == 1)
7189 return simplifyUnaryIntrinsic(F, Op0: Args[0], Q, Call);
7190
7191 if (NumOperands == 2)
7192 return simplifyBinaryIntrinsic(IID, ReturnType: F->getReturnType(), Op0: Args[0], Op1: Args[1], Q,
7193 Call);
7194
7195 // Handle intrinsics with 3 or more arguments.
7196 switch (IID) {
7197 case Intrinsic::masked_load:
7198 case Intrinsic::masked_gather: {
7199 Value *MaskArg = Args[1];
7200 Value *PassthruArg = Args[2];
7201 // If the mask is all zeros or undef, the "passthru" argument is the result.
7202 if (maskIsAllZeroOrUndef(Mask: MaskArg))
7203 return PassthruArg;
7204 return nullptr;
7205 }
7206 case Intrinsic::fshl:
7207 case Intrinsic::fshr: {
7208 Value *Op0 = Args[0], *Op1 = Args[1], *ShAmtArg = Args[2];
7209
7210 // If both operands are undef, the result is undef.
7211 if (Q.isUndefValue(V: Op0) && Q.isUndefValue(V: Op1))
7212 return UndefValue::get(T: F->getReturnType());
7213
7214 // If shift amount is undef, assume it is zero.
7215 if (Q.isUndefValue(V: ShAmtArg))
7216 return Args[IID == Intrinsic::fshl ? 0 : 1];
7217
7218 const APInt *ShAmtC;
7219 if (match(V: ShAmtArg, P: m_APInt(Res&: ShAmtC))) {
7220 // If there's effectively no shift, return the 1st arg or 2nd arg.
7221 APInt BitWidth = APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth());
7222 if (ShAmtC->urem(RHS: BitWidth).isZero())
7223 return Args[IID == Intrinsic::fshl ? 0 : 1];
7224 }
7225
7226 // Rotating zero by anything is zero.
7227 if (match(V: Op0, P: m_Zero()) && match(V: Op1, P: m_Zero()))
7228 return ConstantInt::getNullValue(Ty: F->getReturnType());
7229
7230 // Rotating -1 by anything is -1.
7231 if (match(V: Op0, P: m_AllOnes()) && match(V: Op1, P: m_AllOnes()))
7232 return ConstantInt::getAllOnesValue(Ty: F->getReturnType());
7233
7234 return nullptr;
7235 }
7236 case Intrinsic::experimental_constrained_fma: {
7237 auto *FPI = cast<ConstrainedFPIntrinsic>(Val: Call);
7238 if (Value *V = simplifyFPOp(Ops: Args, FMF: {}, Q, ExBehavior: *FPI->getExceptionBehavior(),
7239 Rounding: *FPI->getRoundingMode()))
7240 return V;
7241 return nullptr;
7242 }
7243 case Intrinsic::fma:
7244 case Intrinsic::fmuladd: {
7245 if (Value *V = simplifyFPOp(Ops: Args, FMF: {}, Q, ExBehavior: fp::ebIgnore,
7246 Rounding: RoundingMode::NearestTiesToEven))
7247 return V;
7248 return nullptr;
7249 }
7250 case Intrinsic::smul_fix:
7251 case Intrinsic::smul_fix_sat: {
7252 Value *Op0 = Args[0];
7253 Value *Op1 = Args[1];
7254 Value *Op2 = Args[2];
7255 Type *ReturnType = F->getReturnType();
7256
7257 // Canonicalize constant operand as Op1 (ConstantFolding handles the case
7258 // when both Op0 and Op1 are constant so we do not care about that special
7259 // case here).
7260 if (isa<Constant>(Val: Op0))
7261 std::swap(a&: Op0, b&: Op1);
7262
7263 // X * 0 -> 0
7264 if (match(V: Op1, P: m_Zero()))
7265 return Constant::getNullValue(Ty: ReturnType);
7266
7267 // X * undef -> 0
7268 if (Q.isUndefValue(V: Op1))
7269 return Constant::getNullValue(Ty: ReturnType);
7270
7271 // X * (1 << Scale) -> X
7272 APInt ScaledOne =
7273 APInt::getOneBitSet(numBits: ReturnType->getScalarSizeInBits(),
7274 BitNo: cast<ConstantInt>(Val: Op2)->getZExtValue());
7275 if (ScaledOne.isNonNegative() && match(V: Op1, P: m_SpecificInt(V: ScaledOne)))
7276 return Op0;
7277
7278 return nullptr;
7279 }
7280 case Intrinsic::vector_insert: {
7281 Value *Vec = Args[0];
7282 Value *SubVec = Args[1];
7283 Value *Idx = Args[2];
7284 Type *ReturnType = F->getReturnType();
7285
7286 // (insert_vector Y, (extract_vector X, 0), 0) -> X
7287 // where: Y is X, or Y is undef
7288 unsigned IdxN = cast<ConstantInt>(Val: Idx)->getZExtValue();
7289 Value *X = nullptr;
7290 if (match(V: SubVec,
7291 P: m_Intrinsic<Intrinsic::vector_extract>(Op0: m_Value(V&: X), Op1: m_Zero())) &&
7292 (Q.isUndefValue(V: Vec) || Vec == X) && IdxN == 0 &&
7293 X->getType() == ReturnType)
7294 return X;
7295
7296 return nullptr;
7297 }
7298 case Intrinsic::vector_splice_left:
7299 case Intrinsic::vector_splice_right: {
7300 Value *Offset = Args[2];
7301 auto *Ty = cast<VectorType>(Val: F->getReturnType());
7302 if (Q.isUndefValue(V: Offset))
7303 return PoisonValue::get(T: Ty);
7304
7305 unsigned BitWidth = Offset->getType()->getScalarSizeInBits();
7306 ConstantRange NumElts(
7307 APInt(BitWidth, Ty->getElementCount().getKnownMinValue()));
7308 if (Ty->isScalableTy())
7309 NumElts = NumElts.multiply(Other: getVScaleRange(F: Call->getFunction(), BitWidth));
7310
7311 // If we know Offset > NumElts, simplify to poison.
7312 ConstantRange CR = computeConstantRangeIncludingKnownBits(V: Offset, ForSigned: false, SQ: Q);
7313 if (CR.getUnsignedMin().ugt(RHS: NumElts.getUnsignedMax()))
7314 return PoisonValue::get(T: Ty);
7315
7316 // splice.left(a, b, 0) --> a, splice.right(a, b, 0) --> b
7317 if (CR.isSingleElement() && CR.getSingleElement()->isZero())
7318 return IID == Intrinsic::vector_splice_left ? Args[0] : Args[1];
7319
7320 return nullptr;
7321 }
7322 case Intrinsic::experimental_constrained_fadd: {
7323 auto *FPI = cast<ConstrainedFPIntrinsic>(Val: Call);
7324 return simplifyFAddInst(Op0: Args[0], Op1: Args[1], FMF: FPI->getFastMathFlags(), Q,
7325 ExBehavior: *FPI->getExceptionBehavior(),
7326 Rounding: *FPI->getRoundingMode());
7327 }
7328 case Intrinsic::experimental_constrained_fsub: {
7329 auto *FPI = cast<ConstrainedFPIntrinsic>(Val: Call);
7330 return simplifyFSubInst(Op0: Args[0], Op1: Args[1], FMF: FPI->getFastMathFlags(), Q,
7331 ExBehavior: *FPI->getExceptionBehavior(),
7332 Rounding: *FPI->getRoundingMode());
7333 }
7334 case Intrinsic::experimental_constrained_fmul: {
7335 auto *FPI = cast<ConstrainedFPIntrinsic>(Val: Call);
7336 return simplifyFMulInst(Op0: Args[0], Op1: Args[1], FMF: FPI->getFastMathFlags(), Q,
7337 ExBehavior: *FPI->getExceptionBehavior(),
7338 Rounding: *FPI->getRoundingMode());
7339 }
7340 case Intrinsic::experimental_constrained_fdiv: {
7341 auto *FPI = cast<ConstrainedFPIntrinsic>(Val: Call);
7342 return simplifyFDivInst(Op0: Args[0], Op1: Args[1], FMF: FPI->getFastMathFlags(), Q,
7343 ExBehavior: *FPI->getExceptionBehavior(),
7344 Rounding: *FPI->getRoundingMode());
7345 }
7346 case Intrinsic::experimental_constrained_frem: {
7347 auto *FPI = cast<ConstrainedFPIntrinsic>(Val: Call);
7348 return simplifyFRemInst(Op0: Args[0], Op1: Args[1], FMF: FPI->getFastMathFlags(), Q,
7349 ExBehavior: *FPI->getExceptionBehavior(),
7350 Rounding: *FPI->getRoundingMode());
7351 }
7352 case Intrinsic::experimental_constrained_ldexp:
7353 return simplifyLdexp(Op0: Args[0], Op1: Args[1], Q, IsStrict: true);
7354 case Intrinsic::experimental_gc_relocate: {
7355 GCRelocateInst &GCR = *cast<GCRelocateInst>(Val: Call);
7356 Value *DerivedPtr = GCR.getDerivedPtr();
7357 Value *BasePtr = GCR.getBasePtr();
7358
7359 // Undef is undef, even after relocation.
7360 if (isa<UndefValue>(Val: DerivedPtr) || isa<UndefValue>(Val: BasePtr)) {
7361 return UndefValue::get(T: GCR.getType());
7362 }
7363
7364 if (auto *PT = dyn_cast<PointerType>(Val: GCR.getType())) {
7365 // For now, the assumption is that the relocation of null will be null
7366 // for most any collector. If this ever changes, a corresponding hook
7367 // should be added to GCStrategy and this code should check it first.
7368 if (isa<ConstantPointerNull>(Val: DerivedPtr)) {
7369 // Use null-pointer of gc_relocate's type to replace it.
7370 return ConstantPointerNull::get(T: PT);
7371 }
7372 }
7373 return nullptr;
7374 }
7375 case Intrinsic::experimental_vp_reverse: {
7376 Value *Vec = Call->getArgOperand(i: 0);
7377 Value *EVL = Call->getArgOperand(i: 2);
7378
7379 Value *X;
7380 // vp.reverse(vp.reverse(X)) == X (mask doesn't matter)
7381 if (match(V: Vec, P: m_Intrinsic<Intrinsic::experimental_vp_reverse>(
7382 Op0: m_Value(V&: X), Op1: m_Value(), Op2: m_Specific(V: EVL))))
7383 return X;
7384
7385 // vp.reverse(splat(X)) -> splat(X) (regardless of mask and EVL)
7386 if (isSplatValue(V: Vec))
7387 return Vec;
7388 return nullptr;
7389 }
7390 default:
7391 return nullptr;
7392 }
7393}
7394
7395static Value *tryConstantFoldCall(CallBase *Call, Value *Callee,
7396 ArrayRef<Value *> Args,
7397 const SimplifyQuery &Q) {
7398 auto *F = dyn_cast<Function>(Val: Callee);
7399 if (!F || !canConstantFoldCallTo(Call, F))
7400 return nullptr;
7401
7402 SmallVector<Constant *, 4> ConstantArgs;
7403 ConstantArgs.reserve(N: Args.size());
7404 for (Value *Arg : Args) {
7405 Constant *C = dyn_cast<Constant>(Val: Arg);
7406 if (!C) {
7407 if (isa<MetadataAsValue>(Val: Arg))
7408 continue;
7409 return nullptr;
7410 }
7411 ConstantArgs.push_back(Elt: C);
7412 }
7413
7414 return ConstantFoldCall(Call, F, Operands: ConstantArgs, TLI: Q.TLI);
7415}
7416
7417Value *llvm::simplifyCall(CallBase *Call, Value *Callee, ArrayRef<Value *> Args,
7418 const SimplifyQuery &Q) {
7419 // Args should not contain operand bundle operands.
7420 assert(Call->arg_size() == Args.size());
7421
7422 // musttail calls can only be simplified if they are also DCEd.
7423 // As we can't guarantee this here, don't simplify them.
7424 if (Call->isMustTailCall())
7425 return nullptr;
7426
7427 // call undef -> poison
7428 // call null -> poison
7429 if (isa<UndefValue>(Val: Callee) || isa<ConstantPointerNull>(Val: Callee))
7430 return PoisonValue::get(T: Call->getType());
7431
7432 if (Value *V = tryConstantFoldCall(Call, Callee, Args, Q))
7433 return V;
7434
7435 auto *F = dyn_cast<Function>(Val: Callee);
7436 if (F && F->isIntrinsic())
7437 if (Value *Ret = simplifyIntrinsic(Call, Callee, Args, Q))
7438 return Ret;
7439
7440 return nullptr;
7441}
7442
7443Value *llvm::simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q) {
7444 assert(isa<ConstrainedFPIntrinsic>(Call));
7445 SmallVector<Value *, 4> Args(Call->args());
7446 if (Value *V = tryConstantFoldCall(Call, Callee: Call->getCalledOperand(), Args, Q))
7447 return V;
7448 if (Value *Ret = simplifyIntrinsic(Call, Callee: Call->getCalledOperand(), Args, Q))
7449 return Ret;
7450 return nullptr;
7451}
7452
7453/// Given operands for a Freeze, see if we can fold the result.
7454static Value *simplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
7455 // Use a utility function defined in ValueTracking.
7456 if (llvm::isGuaranteedNotToBeUndefOrPoison(V: Op0, AC: Q.AC, CtxI: Q.CxtI, DT: Q.DT))
7457 return Op0;
7458 // We have room for improvement.
7459 return nullptr;
7460}
7461
7462Value *llvm::simplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
7463 return ::simplifyFreezeInst(Op0, Q);
7464}
7465
7466Value *llvm::simplifyLoadInst(LoadInst *LI, Value *PtrOp,
7467 const SimplifyQuery &Q) {
7468 if (LI->isVolatile())
7469 return nullptr;
7470
7471 if (auto *PtrOpC = dyn_cast<Constant>(Val: PtrOp))
7472 return ConstantFoldLoadFromConstPtr(C: PtrOpC, Ty: LI->getType(), DL: Q.DL);
7473
7474 // We can only fold the load if it is from a constant global with definitive
7475 // initializer. Skip expensive logic if this is not the case.
7476 auto *GV = dyn_cast<GlobalVariable>(Val: getUnderlyingObject(V: PtrOp));
7477 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
7478 return nullptr;
7479
7480 // If GlobalVariable's initializer is uniform, then return the constant
7481 // regardless of its offset.
7482 if (Constant *C = ConstantFoldLoadFromUniformValue(C: GV->getInitializer(),
7483 Ty: LI->getType(), DL: Q.DL))
7484 return C;
7485
7486 // Try to convert operand into a constant by stripping offsets while looking
7487 // through invariant.group intrinsics.
7488 APInt Offset(Q.DL.getIndexTypeSizeInBits(Ty: PtrOp->getType()), 0);
7489 PtrOp = PtrOp->stripAndAccumulateConstantOffsets(
7490 DL: Q.DL, Offset, /* AllowNonInbounts */ AllowNonInbounds: true,
7491 /* AllowInvariantGroup */ true);
7492 if (PtrOp == GV) {
7493 // Index size may have changed due to address space casts.
7494 Offset = Offset.sextOrTrunc(width: Q.DL.getIndexTypeSizeInBits(Ty: PtrOp->getType()));
7495 return ConstantFoldLoadFromConstPtr(C: GV, Ty: LI->getType(), Offset: std::move(Offset),
7496 DL: Q.DL);
7497 }
7498
7499 return nullptr;
7500}
7501
7502/// See if we can compute a simplified version of this instruction.
7503/// If not, this returns null.
7504
7505static Value *simplifyInstructionWithOperands(Instruction *I,
7506 ArrayRef<Value *> NewOps,
7507 const SimplifyQuery &SQ,
7508 unsigned MaxRecurse) {
7509 assert(I->getFunction() && "instruction should be inserted in a function");
7510 assert((!SQ.CxtI || SQ.CxtI->getFunction() == I->getFunction()) &&
7511 "context instruction should be in the same function");
7512
7513 const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I);
7514
7515 switch (I->getOpcode()) {
7516 default:
7517 if (all_of(Range&: NewOps, P: IsaPred<Constant>)) {
7518 SmallVector<Constant *, 8> NewConstOps(NewOps.size());
7519 transform(Range&: NewOps, d_first: NewConstOps.begin(),
7520 F: [](Value *V) { return cast<Constant>(Val: V); });
7521 return ConstantFoldInstOperands(I, Ops: NewConstOps, DL: Q.DL, TLI: Q.TLI);
7522 }
7523 return nullptr;
7524 case Instruction::FNeg:
7525 return simplifyFNegInst(Op: NewOps[0], FMF: I->getFastMathFlags(), Q, MaxRecurse);
7526 case Instruction::FAdd:
7527 return simplifyFAddInst(Op0: NewOps[0], Op1: NewOps[1], FMF: I->getFastMathFlags(), Q,
7528 MaxRecurse);
7529 case Instruction::Add:
7530 return simplifyAddInst(
7531 Op0: NewOps[0], Op1: NewOps[1], IsNSW: Q.IIQ.hasNoSignedWrap(Op: cast<BinaryOperator>(Val: I)),
7532 IsNUW: Q.IIQ.hasNoUnsignedWrap(Op: cast<BinaryOperator>(Val: I)), Q, MaxRecurse);
7533 case Instruction::FSub:
7534 return simplifyFSubInst(Op0: NewOps[0], Op1: NewOps[1], FMF: I->getFastMathFlags(), Q,
7535 MaxRecurse);
7536 case Instruction::Sub:
7537 return simplifySubInst(
7538 Op0: NewOps[0], Op1: NewOps[1], IsNSW: Q.IIQ.hasNoSignedWrap(Op: cast<BinaryOperator>(Val: I)),
7539 IsNUW: Q.IIQ.hasNoUnsignedWrap(Op: cast<BinaryOperator>(Val: I)), Q, MaxRecurse);
7540 case Instruction::FMul:
7541 return simplifyFMulInst(Op0: NewOps[0], Op1: NewOps[1], FMF: I->getFastMathFlags(), Q,
7542 MaxRecurse);
7543 case Instruction::Mul:
7544 return simplifyMulInst(
7545 Op0: NewOps[0], Op1: NewOps[1], IsNSW: Q.IIQ.hasNoSignedWrap(Op: cast<BinaryOperator>(Val: I)),
7546 IsNUW: Q.IIQ.hasNoUnsignedWrap(Op: cast<BinaryOperator>(Val: I)), Q, MaxRecurse);
7547 case Instruction::SDiv:
7548 return simplifySDivInst(Op0: NewOps[0], Op1: NewOps[1],
7549 IsExact: Q.IIQ.isExact(Op: cast<BinaryOperator>(Val: I)), Q,
7550 MaxRecurse);
7551 case Instruction::UDiv:
7552 return simplifyUDivInst(Op0: NewOps[0], Op1: NewOps[1],
7553 IsExact: Q.IIQ.isExact(Op: cast<BinaryOperator>(Val: I)), Q,
7554 MaxRecurse);
7555 case Instruction::FDiv:
7556 return simplifyFDivInst(Op0: NewOps[0], Op1: NewOps[1], FMF: I->getFastMathFlags(), Q,
7557 MaxRecurse);
7558 case Instruction::SRem:
7559 return simplifySRemInst(Op0: NewOps[0], Op1: NewOps[1], Q, MaxRecurse);
7560 case Instruction::URem:
7561 return simplifyURemInst(Op0: NewOps[0], Op1: NewOps[1], Q, MaxRecurse);
7562 case Instruction::FRem:
7563 return simplifyFRemInst(Op0: NewOps[0], Op1: NewOps[1], FMF: I->getFastMathFlags(), Q,
7564 MaxRecurse);
7565 case Instruction::Shl:
7566 return simplifyShlInst(
7567 Op0: NewOps[0], Op1: NewOps[1], IsNSW: Q.IIQ.hasNoSignedWrap(Op: cast<BinaryOperator>(Val: I)),
7568 IsNUW: Q.IIQ.hasNoUnsignedWrap(Op: cast<BinaryOperator>(Val: I)), Q, MaxRecurse);
7569 case Instruction::LShr:
7570 return simplifyLShrInst(Op0: NewOps[0], Op1: NewOps[1],
7571 IsExact: Q.IIQ.isExact(Op: cast<BinaryOperator>(Val: I)), Q,
7572 MaxRecurse);
7573 case Instruction::AShr:
7574 return simplifyAShrInst(Op0: NewOps[0], Op1: NewOps[1],
7575 IsExact: Q.IIQ.isExact(Op: cast<BinaryOperator>(Val: I)), Q,
7576 MaxRecurse);
7577 case Instruction::And:
7578 return simplifyAndInst(Op0: NewOps[0], Op1: NewOps[1], Q, MaxRecurse);
7579 case Instruction::Or:
7580 return simplifyOrInst(Op0: NewOps[0], Op1: NewOps[1], Q, MaxRecurse);
7581 case Instruction::Xor:
7582 return simplifyXorInst(Op0: NewOps[0], Op1: NewOps[1], Q, MaxRecurse);
7583 case Instruction::ICmp:
7584 return simplifyICmpInst(Pred: cast<ICmpInst>(Val: I)->getCmpPredicate(), LHS: NewOps[0],
7585 RHS: NewOps[1], Q, MaxRecurse);
7586 case Instruction::FCmp:
7587 return simplifyFCmpInst(Pred: cast<FCmpInst>(Val: I)->getPredicate(), LHS: NewOps[0],
7588 RHS: NewOps[1], FMF: I->getFastMathFlags(), Q, MaxRecurse);
7589 case Instruction::Select:
7590 return simplifySelectInst(Cond: NewOps[0], TrueVal: NewOps[1], FalseVal: NewOps[2], Q, MaxRecurse);
7591 case Instruction::GetElementPtr: {
7592 auto *GEPI = cast<GetElementPtrInst>(Val: I);
7593 return simplifyGEPInst(SrcTy: GEPI->getSourceElementType(), Ptr: NewOps[0],
7594 Indices: ArrayRef(NewOps).slice(N: 1), NW: GEPI->getNoWrapFlags(), Q,
7595 MaxRecurse);
7596 }
7597 case Instruction::InsertValue: {
7598 InsertValueInst *IV = cast<InsertValueInst>(Val: I);
7599 return simplifyInsertValueInst(Agg: NewOps[0], Val: NewOps[1], Idxs: IV->getIndices(), Q,
7600 MaxRecurse);
7601 }
7602 case Instruction::InsertElement:
7603 return simplifyInsertElementInst(Vec: NewOps[0], Val: NewOps[1], Idx: NewOps[2], Q);
7604 case Instruction::ExtractValue: {
7605 auto *EVI = cast<ExtractValueInst>(Val: I);
7606 return simplifyExtractValueInst(Agg: NewOps[0], Idxs: EVI->getIndices(), Q,
7607 MaxRecurse);
7608 }
7609 case Instruction::ExtractElement:
7610 return simplifyExtractElementInst(Vec: NewOps[0], Idx: NewOps[1], Q, MaxRecurse);
7611 case Instruction::ShuffleVector: {
7612 auto *SVI = cast<ShuffleVectorInst>(Val: I);
7613 return simplifyShuffleVectorInst(Op0: NewOps[0], Op1: NewOps[1],
7614 Mask: SVI->getShuffleMask(), RetTy: SVI->getType(), Q,
7615 MaxRecurse);
7616 }
7617 case Instruction::PHI:
7618 return simplifyPHINode(PN: cast<PHINode>(Val: I), IncomingValues: NewOps, Q);
7619 case Instruction::Call:
7620 return simplifyCall(
7621 Call: cast<CallInst>(Val: I), Callee: NewOps.back(),
7622 Args: NewOps.drop_back(N: 1 + cast<CallInst>(Val: I)->getNumTotalBundleOperands()), Q);
7623 case Instruction::Freeze:
7624 return llvm::simplifyFreezeInst(Op0: NewOps[0], Q);
7625#define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
7626#include "llvm/IR/Instruction.def"
7627#undef HANDLE_CAST_INST
7628 return simplifyCastInst(CastOpc: I->getOpcode(), Op: NewOps[0], Ty: I->getType(), Q,
7629 MaxRecurse);
7630 case Instruction::Alloca:
7631 // No simplifications for Alloca and it can't be constant folded.
7632 return nullptr;
7633 case Instruction::Load:
7634 return simplifyLoadInst(LI: cast<LoadInst>(Val: I), PtrOp: NewOps[0], Q);
7635 }
7636}
7637
7638Value *llvm::simplifyInstructionWithOperands(Instruction *I,
7639 ArrayRef<Value *> NewOps,
7640 const SimplifyQuery &SQ) {
7641 assert(NewOps.size() == I->getNumOperands() &&
7642 "Number of operands should match the instruction!");
7643 return ::simplifyInstructionWithOperands(I, NewOps, SQ, MaxRecurse: RecursionLimit);
7644}
7645
7646Value *llvm::simplifyInstruction(Instruction *I, const SimplifyQuery &SQ) {
7647 SmallVector<Value *, 8> Ops(I->operands());
7648 Value *Result = ::simplifyInstructionWithOperands(I, NewOps: Ops, SQ, MaxRecurse: RecursionLimit);
7649
7650 /// If called on unreachable code, the instruction may simplify to itself.
7651 /// Make life easier for users by detecting that case here, and returning a
7652 /// safe value instead.
7653 return Result == I ? PoisonValue::get(T: I->getType()) : Result;
7654}
7655
7656/// Implementation of recursive simplification through an instruction's
7657/// uses.
7658///
7659/// This is the common implementation of the recursive simplification routines.
7660/// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
7661/// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
7662/// instructions to process and attempt to simplify it using
7663/// InstructionSimplify. Recursively visited users which could not be
7664/// simplified themselves are to the optional UnsimplifiedUsers set for
7665/// further processing by the caller.
7666///
7667/// This routine returns 'true' only when *it* simplifies something. The passed
7668/// in simplified value does not count toward this.
7669static bool replaceAndRecursivelySimplifyImpl(
7670 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
7671 const DominatorTree *DT, AssumptionCache *AC,
7672 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) {
7673 bool Simplified = false;
7674 SmallSetVector<Instruction *, 8> Worklist;
7675 const DataLayout &DL = I->getDataLayout();
7676
7677 // If we have an explicit value to collapse to, do that round of the
7678 // simplification loop by hand initially.
7679 if (SimpleV) {
7680 for (User *U : I->users())
7681 if (U != I)
7682 Worklist.insert(X: cast<Instruction>(Val: U));
7683
7684 // Replace the instruction with its simplified value.
7685 I->replaceAllUsesWith(V: SimpleV);
7686
7687 if (!I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects())
7688 I->eraseFromParent();
7689 } else {
7690 Worklist.insert(X: I);
7691 }
7692
7693 // Note that we must test the size on each iteration, the worklist can grow.
7694 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
7695 I = Worklist[Idx];
7696
7697 // See if this instruction simplifies.
7698 SimpleV = simplifyInstruction(I, SQ: {DL, TLI, DT, AC});
7699 if (!SimpleV) {
7700 if (UnsimplifiedUsers)
7701 UnsimplifiedUsers->insert(X: I);
7702 continue;
7703 }
7704
7705 Simplified = true;
7706
7707 // Stash away all the uses of the old instruction so we can check them for
7708 // recursive simplifications after a RAUW. This is cheaper than checking all
7709 // uses of To on the recursive step in most cases.
7710 for (User *U : I->users())
7711 Worklist.insert(X: cast<Instruction>(Val: U));
7712
7713 // Replace the instruction with its simplified value.
7714 I->replaceAllUsesWith(V: SimpleV);
7715
7716 if (!I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects())
7717 I->eraseFromParent();
7718 }
7719 return Simplified;
7720}
7721
7722bool llvm::replaceAndRecursivelySimplify(
7723 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
7724 const DominatorTree *DT, AssumptionCache *AC,
7725 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) {
7726 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
7727 assert(SimpleV && "Must provide a simplified value.");
7728 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC,
7729 UnsimplifiedUsers);
7730}
7731
7732namespace llvm {
7733const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) {
7734 auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>();
7735 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
7736 auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
7737 auto *TLI = TLIWP ? &TLIWP->getTLI(F) : nullptr;
7738 auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>();
7739 auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr;
7740 return {F.getDataLayout(), TLI, DT, AC};
7741}
7742
7743const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR,
7744 const DataLayout &DL) {
7745 return {DL, &AR.TLI, &AR.DT, &AR.AC};
7746}
7747
7748template <class T, class... TArgs>
7749const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM,
7750 Function &F) {
7751 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F);
7752 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F);
7753 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F);
7754 return {F.getDataLayout(), TLI, DT, AC};
7755}
7756template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &,
7757 Function &);
7758
7759bool SimplifyQuery::isUndefValue(Value *V) const {
7760 if (!CanUseUndef)
7761 return false;
7762
7763 return match(V, P: m_Undef());
7764}
7765
7766} // namespace llvm
7767
7768void InstSimplifyFolder::anchor() {}
7769