1 | //===- InstructionSimplify.cpp - Fold instruction operands ----------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements routines for folding instructions into simpler forms |
10 | // that do not require creating new instructions. This does constant folding |
11 | // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either |
12 | // returning a constant ("and i32 %x, 0" -> "0") or an already existing value |
13 | // ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been |
14 | // simplified: This is usually true and assuming it simplifies the logic (if |
15 | // they have not been simplified then results are correct but maybe suboptimal). |
16 | // |
17 | //===----------------------------------------------------------------------===// |
18 | |
19 | #include "llvm/Analysis/InstructionSimplify.h" |
20 | |
21 | #include "llvm/ADT/STLExtras.h" |
22 | #include "llvm/ADT/SetVector.h" |
23 | #include "llvm/ADT/Statistic.h" |
24 | #include "llvm/Analysis/AliasAnalysis.h" |
25 | #include "llvm/Analysis/AssumptionCache.h" |
26 | #include "llvm/Analysis/CaptureTracking.h" |
27 | #include "llvm/Analysis/CmpInstAnalysis.h" |
28 | #include "llvm/Analysis/ConstantFolding.h" |
29 | #include "llvm/Analysis/InstSimplifyFolder.h" |
30 | #include "llvm/Analysis/LoopAnalysisManager.h" |
31 | #include "llvm/Analysis/MemoryBuiltins.h" |
32 | #include "llvm/Analysis/OverflowInstAnalysis.h" |
33 | #include "llvm/Analysis/ValueTracking.h" |
34 | #include "llvm/Analysis/VectorUtils.h" |
35 | #include "llvm/IR/ConstantRange.h" |
36 | #include "llvm/IR/DataLayout.h" |
37 | #include "llvm/IR/Dominators.h" |
38 | #include "llvm/IR/InstrTypes.h" |
39 | #include "llvm/IR/Instructions.h" |
40 | #include "llvm/IR/Operator.h" |
41 | #include "llvm/IR/PatternMatch.h" |
42 | #include "llvm/IR/Statepoint.h" |
43 | #include "llvm/Support/KnownBits.h" |
44 | #include <algorithm> |
45 | #include <optional> |
46 | using namespace llvm; |
47 | using namespace llvm::PatternMatch; |
48 | |
49 | #define DEBUG_TYPE "instsimplify" |
50 | |
51 | enum { RecursionLimit = 3 }; |
52 | |
53 | STATISTIC(NumExpand, "Number of expansions" ); |
54 | STATISTIC(NumReassoc, "Number of reassociations" ); |
55 | |
56 | static Value *simplifyAndInst(Value *, Value *, const SimplifyQuery &, |
57 | unsigned); |
58 | static Value *simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned); |
59 | static Value *simplifyFPUnOp(unsigned, Value *, const FastMathFlags &, |
60 | const SimplifyQuery &, unsigned); |
61 | static Value *simplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &, |
62 | unsigned); |
63 | static Value *simplifyBinOp(unsigned, Value *, Value *, const FastMathFlags &, |
64 | const SimplifyQuery &, unsigned); |
65 | static Value *simplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &, |
66 | unsigned); |
67 | static Value *simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, |
68 | const SimplifyQuery &Q, unsigned MaxRecurse); |
69 | static Value *simplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned); |
70 | static Value *simplifyXorInst(Value *, Value *, const SimplifyQuery &, |
71 | unsigned); |
72 | static Value *simplifyCastInst(unsigned, Value *, Type *, const SimplifyQuery &, |
73 | unsigned); |
74 | static Value *simplifyGEPInst(Type *, Value *, ArrayRef<Value *>, |
75 | GEPNoWrapFlags, const SimplifyQuery &, unsigned); |
76 | static Value *simplifySelectInst(Value *, Value *, Value *, |
77 | const SimplifyQuery &, unsigned); |
78 | static Value *simplifyInstructionWithOperands(Instruction *I, |
79 | ArrayRef<Value *> NewOps, |
80 | const SimplifyQuery &SQ, |
81 | unsigned MaxRecurse); |
82 | |
83 | static Value *foldSelectWithBinaryOp(Value *Cond, Value *TrueVal, |
84 | Value *FalseVal) { |
85 | BinaryOperator::BinaryOps BinOpCode; |
86 | if (auto *BO = dyn_cast<BinaryOperator>(Val: Cond)) |
87 | BinOpCode = BO->getOpcode(); |
88 | else |
89 | return nullptr; |
90 | |
91 | CmpInst::Predicate ExpectedPred, Pred1, Pred2; |
92 | if (BinOpCode == BinaryOperator::Or) { |
93 | ExpectedPred = ICmpInst::ICMP_NE; |
94 | } else if (BinOpCode == BinaryOperator::And) { |
95 | ExpectedPred = ICmpInst::ICMP_EQ; |
96 | } else |
97 | return nullptr; |
98 | |
99 | // %A = icmp eq %TV, %FV |
100 | // %B = icmp eq %X, %Y (and one of these is a select operand) |
101 | // %C = and %A, %B |
102 | // %D = select %C, %TV, %FV |
103 | // --> |
104 | // %FV |
105 | |
106 | // %A = icmp ne %TV, %FV |
107 | // %B = icmp ne %X, %Y (and one of these is a select operand) |
108 | // %C = or %A, %B |
109 | // %D = select %C, %TV, %FV |
110 | // --> |
111 | // %TV |
112 | Value *X, *Y; |
113 | if (!match(V: Cond, P: m_c_BinOp(L: m_c_ICmp(Pred&: Pred1, L: m_Specific(V: TrueVal), |
114 | R: m_Specific(V: FalseVal)), |
115 | R: m_ICmp(Pred&: Pred2, L: m_Value(V&: X), R: m_Value(V&: Y)))) || |
116 | Pred1 != Pred2 || Pred1 != ExpectedPred) |
117 | return nullptr; |
118 | |
119 | if (X == TrueVal || X == FalseVal || Y == TrueVal || Y == FalseVal) |
120 | return BinOpCode == BinaryOperator::Or ? TrueVal : FalseVal; |
121 | |
122 | return nullptr; |
123 | } |
124 | |
125 | /// For a boolean type or a vector of boolean type, return false or a vector |
126 | /// with every element false. |
127 | static Constant *getFalse(Type *Ty) { return ConstantInt::getFalse(Ty); } |
128 | |
129 | /// For a boolean type or a vector of boolean type, return true or a vector |
130 | /// with every element true. |
131 | static Constant *getTrue(Type *Ty) { return ConstantInt::getTrue(Ty); } |
132 | |
133 | /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"? |
134 | static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS, |
135 | Value *RHS) { |
136 | CmpInst *Cmp = dyn_cast<CmpInst>(Val: V); |
137 | if (!Cmp) |
138 | return false; |
139 | CmpInst::Predicate CPred = Cmp->getPredicate(); |
140 | Value *CLHS = Cmp->getOperand(i_nocapture: 0), *CRHS = Cmp->getOperand(i_nocapture: 1); |
141 | if (CPred == Pred && CLHS == LHS && CRHS == RHS) |
142 | return true; |
143 | return CPred == CmpInst::getSwappedPredicate(pred: Pred) && CLHS == RHS && |
144 | CRHS == LHS; |
145 | } |
146 | |
147 | /// Simplify comparison with true or false branch of select: |
148 | /// %sel = select i1 %cond, i32 %tv, i32 %fv |
149 | /// %cmp = icmp sle i32 %sel, %rhs |
150 | /// Compose new comparison by substituting %sel with either %tv or %fv |
151 | /// and see if it simplifies. |
152 | static Value *simplifyCmpSelCase(CmpInst::Predicate Pred, Value *LHS, |
153 | Value *RHS, Value *Cond, |
154 | const SimplifyQuery &Q, unsigned MaxRecurse, |
155 | Constant *TrueOrFalse) { |
156 | Value *SimplifiedCmp = simplifyCmpInst(Pred, LHS, RHS, Q, MaxRecurse); |
157 | if (SimplifiedCmp == Cond) { |
158 | // %cmp simplified to the select condition (%cond). |
159 | return TrueOrFalse; |
160 | } else if (!SimplifiedCmp && isSameCompare(V: Cond, Pred, LHS, RHS)) { |
161 | // It didn't simplify. However, if composed comparison is equivalent |
162 | // to the select condition (%cond) then we can replace it. |
163 | return TrueOrFalse; |
164 | } |
165 | return SimplifiedCmp; |
166 | } |
167 | |
168 | /// Simplify comparison with true branch of select |
169 | static Value *simplifyCmpSelTrueCase(CmpInst::Predicate Pred, Value *LHS, |
170 | Value *RHS, Value *Cond, |
171 | const SimplifyQuery &Q, |
172 | unsigned MaxRecurse) { |
173 | return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse, |
174 | TrueOrFalse: getTrue(Ty: Cond->getType())); |
175 | } |
176 | |
177 | /// Simplify comparison with false branch of select |
178 | static Value *simplifyCmpSelFalseCase(CmpInst::Predicate Pred, Value *LHS, |
179 | Value *RHS, Value *Cond, |
180 | const SimplifyQuery &Q, |
181 | unsigned MaxRecurse) { |
182 | return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse, |
183 | TrueOrFalse: getFalse(Ty: Cond->getType())); |
184 | } |
185 | |
186 | /// We know comparison with both branches of select can be simplified, but they |
187 | /// are not equal. This routine handles some logical simplifications. |
188 | static Value *handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp, |
189 | Value *Cond, |
190 | const SimplifyQuery &Q, |
191 | unsigned MaxRecurse) { |
192 | // If the false value simplified to false, then the result of the compare |
193 | // is equal to "Cond && TCmp". This also catches the case when the false |
194 | // value simplified to false and the true value to true, returning "Cond". |
195 | // Folding select to and/or isn't poison-safe in general; impliesPoison |
196 | // checks whether folding it does not convert a well-defined value into |
197 | // poison. |
198 | if (match(V: FCmp, P: m_Zero()) && impliesPoison(ValAssumedPoison: TCmp, V: Cond)) |
199 | if (Value *V = simplifyAndInst(Cond, TCmp, Q, MaxRecurse)) |
200 | return V; |
201 | // If the true value simplified to true, then the result of the compare |
202 | // is equal to "Cond || FCmp". |
203 | if (match(V: TCmp, P: m_One()) && impliesPoison(ValAssumedPoison: FCmp, V: Cond)) |
204 | if (Value *V = simplifyOrInst(Cond, FCmp, Q, MaxRecurse)) |
205 | return V; |
206 | // Finally, if the false value simplified to true and the true value to |
207 | // false, then the result of the compare is equal to "!Cond". |
208 | if (match(V: FCmp, P: m_One()) && match(V: TCmp, P: m_Zero())) |
209 | if (Value *V = simplifyXorInst( |
210 | Cond, Constant::getAllOnesValue(Ty: Cond->getType()), Q, MaxRecurse)) |
211 | return V; |
212 | return nullptr; |
213 | } |
214 | |
215 | /// Does the given value dominate the specified phi node? |
216 | static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) { |
217 | Instruction *I = dyn_cast<Instruction>(Val: V); |
218 | if (!I) |
219 | // Arguments and constants dominate all instructions. |
220 | return true; |
221 | |
222 | // If we have a DominatorTree then do a precise test. |
223 | if (DT) |
224 | return DT->dominates(Def: I, User: P); |
225 | |
226 | // Otherwise, if the instruction is in the entry block and is not an invoke, |
227 | // then it obviously dominates all phi nodes. |
228 | if (I->getParent()->isEntryBlock() && !isa<InvokeInst>(Val: I) && |
229 | !isa<CallBrInst>(Val: I)) |
230 | return true; |
231 | |
232 | return false; |
233 | } |
234 | |
235 | /// Try to simplify a binary operator of form "V op OtherOp" where V is |
236 | /// "(B0 opex B1)" by distributing 'op' across 'opex' as |
237 | /// "(B0 op OtherOp) opex (B1 op OtherOp)". |
238 | static Value *expandBinOp(Instruction::BinaryOps Opcode, Value *V, |
239 | Value *OtherOp, Instruction::BinaryOps OpcodeToExpand, |
240 | const SimplifyQuery &Q, unsigned MaxRecurse) { |
241 | auto *B = dyn_cast<BinaryOperator>(Val: V); |
242 | if (!B || B->getOpcode() != OpcodeToExpand) |
243 | return nullptr; |
244 | Value *B0 = B->getOperand(i_nocapture: 0), *B1 = B->getOperand(i_nocapture: 1); |
245 | Value *L = |
246 | simplifyBinOp(Opcode, B0, OtherOp, Q.getWithoutUndef(), MaxRecurse); |
247 | if (!L) |
248 | return nullptr; |
249 | Value *R = |
250 | simplifyBinOp(Opcode, B1, OtherOp, Q.getWithoutUndef(), MaxRecurse); |
251 | if (!R) |
252 | return nullptr; |
253 | |
254 | // Does the expanded pair of binops simplify to the existing binop? |
255 | if ((L == B0 && R == B1) || |
256 | (Instruction::isCommutative(Opcode: OpcodeToExpand) && L == B1 && R == B0)) { |
257 | ++NumExpand; |
258 | return B; |
259 | } |
260 | |
261 | // Otherwise, return "L op' R" if it simplifies. |
262 | Value *S = simplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse); |
263 | if (!S) |
264 | return nullptr; |
265 | |
266 | ++NumExpand; |
267 | return S; |
268 | } |
269 | |
270 | /// Try to simplify binops of form "A op (B op' C)" or the commuted variant by |
271 | /// distributing op over op'. |
272 | static Value *expandCommutativeBinOp(Instruction::BinaryOps Opcode, Value *L, |
273 | Value *R, |
274 | Instruction::BinaryOps OpcodeToExpand, |
275 | const SimplifyQuery &Q, |
276 | unsigned MaxRecurse) { |
277 | // Recursion is always used, so bail out at once if we already hit the limit. |
278 | if (!MaxRecurse--) |
279 | return nullptr; |
280 | |
281 | if (Value *V = expandBinOp(Opcode, V: L, OtherOp: R, OpcodeToExpand, Q, MaxRecurse)) |
282 | return V; |
283 | if (Value *V = expandBinOp(Opcode, V: R, OtherOp: L, OpcodeToExpand, Q, MaxRecurse)) |
284 | return V; |
285 | return nullptr; |
286 | } |
287 | |
288 | /// Generic simplifications for associative binary operations. |
289 | /// Returns the simpler value, or null if none was found. |
290 | static Value *simplifyAssociativeBinOp(Instruction::BinaryOps Opcode, |
291 | Value *LHS, Value *RHS, |
292 | const SimplifyQuery &Q, |
293 | unsigned MaxRecurse) { |
294 | assert(Instruction::isAssociative(Opcode) && "Not an associative operation!" ); |
295 | |
296 | // Recursion is always used, so bail out at once if we already hit the limit. |
297 | if (!MaxRecurse--) |
298 | return nullptr; |
299 | |
300 | BinaryOperator *Op0 = dyn_cast<BinaryOperator>(Val: LHS); |
301 | BinaryOperator *Op1 = dyn_cast<BinaryOperator>(Val: RHS); |
302 | |
303 | // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely. |
304 | if (Op0 && Op0->getOpcode() == Opcode) { |
305 | Value *A = Op0->getOperand(i_nocapture: 0); |
306 | Value *B = Op0->getOperand(i_nocapture: 1); |
307 | Value *C = RHS; |
308 | |
309 | // Does "B op C" simplify? |
310 | if (Value *V = simplifyBinOp(Opcode, B, C, Q, MaxRecurse)) { |
311 | // It does! Return "A op V" if it simplifies or is already available. |
312 | // If V equals B then "A op V" is just the LHS. |
313 | if (V == B) |
314 | return LHS; |
315 | // Otherwise return "A op V" if it simplifies. |
316 | if (Value *W = simplifyBinOp(Opcode, A, V, Q, MaxRecurse)) { |
317 | ++NumReassoc; |
318 | return W; |
319 | } |
320 | } |
321 | } |
322 | |
323 | // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely. |
324 | if (Op1 && Op1->getOpcode() == Opcode) { |
325 | Value *A = LHS; |
326 | Value *B = Op1->getOperand(i_nocapture: 0); |
327 | Value *C = Op1->getOperand(i_nocapture: 1); |
328 | |
329 | // Does "A op B" simplify? |
330 | if (Value *V = simplifyBinOp(Opcode, A, B, Q, MaxRecurse)) { |
331 | // It does! Return "V op C" if it simplifies or is already available. |
332 | // If V equals B then "V op C" is just the RHS. |
333 | if (V == B) |
334 | return RHS; |
335 | // Otherwise return "V op C" if it simplifies. |
336 | if (Value *W = simplifyBinOp(Opcode, V, C, Q, MaxRecurse)) { |
337 | ++NumReassoc; |
338 | return W; |
339 | } |
340 | } |
341 | } |
342 | |
343 | // The remaining transforms require commutativity as well as associativity. |
344 | if (!Instruction::isCommutative(Opcode)) |
345 | return nullptr; |
346 | |
347 | // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely. |
348 | if (Op0 && Op0->getOpcode() == Opcode) { |
349 | Value *A = Op0->getOperand(i_nocapture: 0); |
350 | Value *B = Op0->getOperand(i_nocapture: 1); |
351 | Value *C = RHS; |
352 | |
353 | // Does "C op A" simplify? |
354 | if (Value *V = simplifyBinOp(Opcode, C, A, Q, MaxRecurse)) { |
355 | // It does! Return "V op B" if it simplifies or is already available. |
356 | // If V equals A then "V op B" is just the LHS. |
357 | if (V == A) |
358 | return LHS; |
359 | // Otherwise return "V op B" if it simplifies. |
360 | if (Value *W = simplifyBinOp(Opcode, V, B, Q, MaxRecurse)) { |
361 | ++NumReassoc; |
362 | return W; |
363 | } |
364 | } |
365 | } |
366 | |
367 | // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely. |
368 | if (Op1 && Op1->getOpcode() == Opcode) { |
369 | Value *A = LHS; |
370 | Value *B = Op1->getOperand(i_nocapture: 0); |
371 | Value *C = Op1->getOperand(i_nocapture: 1); |
372 | |
373 | // Does "C op A" simplify? |
374 | if (Value *V = simplifyBinOp(Opcode, C, A, Q, MaxRecurse)) { |
375 | // It does! Return "B op V" if it simplifies or is already available. |
376 | // If V equals C then "B op V" is just the RHS. |
377 | if (V == C) |
378 | return RHS; |
379 | // Otherwise return "B op V" if it simplifies. |
380 | if (Value *W = simplifyBinOp(Opcode, B, V, Q, MaxRecurse)) { |
381 | ++NumReassoc; |
382 | return W; |
383 | } |
384 | } |
385 | } |
386 | |
387 | return nullptr; |
388 | } |
389 | |
390 | /// In the case of a binary operation with a select instruction as an operand, |
391 | /// try to simplify the binop by seeing whether evaluating it on both branches |
392 | /// of the select results in the same value. Returns the common value if so, |
393 | /// otherwise returns null. |
394 | static Value *threadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS, |
395 | Value *RHS, const SimplifyQuery &Q, |
396 | unsigned MaxRecurse) { |
397 | // Recursion is always used, so bail out at once if we already hit the limit. |
398 | if (!MaxRecurse--) |
399 | return nullptr; |
400 | |
401 | SelectInst *SI; |
402 | if (isa<SelectInst>(Val: LHS)) { |
403 | SI = cast<SelectInst>(Val: LHS); |
404 | } else { |
405 | assert(isa<SelectInst>(RHS) && "No select instruction operand!" ); |
406 | SI = cast<SelectInst>(Val: RHS); |
407 | } |
408 | |
409 | // Evaluate the BinOp on the true and false branches of the select. |
410 | Value *TV; |
411 | Value *FV; |
412 | if (SI == LHS) { |
413 | TV = simplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse); |
414 | FV = simplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse); |
415 | } else { |
416 | TV = simplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse); |
417 | FV = simplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse); |
418 | } |
419 | |
420 | // If they simplified to the same value, then return the common value. |
421 | // If they both failed to simplify then return null. |
422 | if (TV == FV) |
423 | return TV; |
424 | |
425 | // If one branch simplified to undef, return the other one. |
426 | if (TV && Q.isUndefValue(V: TV)) |
427 | return FV; |
428 | if (FV && Q.isUndefValue(V: FV)) |
429 | return TV; |
430 | |
431 | // If applying the operation did not change the true and false select values, |
432 | // then the result of the binop is the select itself. |
433 | if (TV == SI->getTrueValue() && FV == SI->getFalseValue()) |
434 | return SI; |
435 | |
436 | // If one branch simplified and the other did not, and the simplified |
437 | // value is equal to the unsimplified one, return the simplified value. |
438 | // For example, select (cond, X, X & Z) & Z -> X & Z. |
439 | if ((FV && !TV) || (TV && !FV)) { |
440 | // Check that the simplified value has the form "X op Y" where "op" is the |
441 | // same as the original operation. |
442 | Instruction *Simplified = dyn_cast<Instruction>(Val: FV ? FV : TV); |
443 | if (Simplified && Simplified->getOpcode() == unsigned(Opcode) && |
444 | !Simplified->hasPoisonGeneratingFlags()) { |
445 | // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS". |
446 | // We already know that "op" is the same as for the simplified value. See |
447 | // if the operands match too. If so, return the simplified value. |
448 | Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue(); |
449 | Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS; |
450 | Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch; |
451 | if (Simplified->getOperand(i: 0) == UnsimplifiedLHS && |
452 | Simplified->getOperand(i: 1) == UnsimplifiedRHS) |
453 | return Simplified; |
454 | if (Simplified->isCommutative() && |
455 | Simplified->getOperand(i: 1) == UnsimplifiedLHS && |
456 | Simplified->getOperand(i: 0) == UnsimplifiedRHS) |
457 | return Simplified; |
458 | } |
459 | } |
460 | |
461 | return nullptr; |
462 | } |
463 | |
464 | /// In the case of a comparison with a select instruction, try to simplify the |
465 | /// comparison by seeing whether both branches of the select result in the same |
466 | /// value. Returns the common value if so, otherwise returns null. |
467 | /// For example, if we have: |
468 | /// %tmp = select i1 %cmp, i32 1, i32 2 |
469 | /// %cmp1 = icmp sle i32 %tmp, 3 |
470 | /// We can simplify %cmp1 to true, because both branches of select are |
471 | /// less than 3. We compose new comparison by substituting %tmp with both |
472 | /// branches of select and see if it can be simplified. |
473 | static Value *threadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS, |
474 | Value *RHS, const SimplifyQuery &Q, |
475 | unsigned MaxRecurse) { |
476 | // Recursion is always used, so bail out at once if we already hit the limit. |
477 | if (!MaxRecurse--) |
478 | return nullptr; |
479 | |
480 | // Make sure the select is on the LHS. |
481 | if (!isa<SelectInst>(Val: LHS)) { |
482 | std::swap(a&: LHS, b&: RHS); |
483 | Pred = CmpInst::getSwappedPredicate(pred: Pred); |
484 | } |
485 | assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!" ); |
486 | SelectInst *SI = cast<SelectInst>(Val: LHS); |
487 | Value *Cond = SI->getCondition(); |
488 | Value *TV = SI->getTrueValue(); |
489 | Value *FV = SI->getFalseValue(); |
490 | |
491 | // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it. |
492 | // Does "cmp TV, RHS" simplify? |
493 | Value *TCmp = simplifyCmpSelTrueCase(Pred, LHS: TV, RHS, Cond, Q, MaxRecurse); |
494 | if (!TCmp) |
495 | return nullptr; |
496 | |
497 | // Does "cmp FV, RHS" simplify? |
498 | Value *FCmp = simplifyCmpSelFalseCase(Pred, LHS: FV, RHS, Cond, Q, MaxRecurse); |
499 | if (!FCmp) |
500 | return nullptr; |
501 | |
502 | // If both sides simplified to the same value, then use it as the result of |
503 | // the original comparison. |
504 | if (TCmp == FCmp) |
505 | return TCmp; |
506 | |
507 | // The remaining cases only make sense if the select condition has the same |
508 | // type as the result of the comparison, so bail out if this is not so. |
509 | if (Cond->getType()->isVectorTy() == RHS->getType()->isVectorTy()) |
510 | return handleOtherCmpSelSimplifications(TCmp, FCmp, Cond, Q, MaxRecurse); |
511 | |
512 | return nullptr; |
513 | } |
514 | |
515 | /// In the case of a binary operation with an operand that is a PHI instruction, |
516 | /// try to simplify the binop by seeing whether evaluating it on the incoming |
517 | /// phi values yields the same result for every value. If so returns the common |
518 | /// value, otherwise returns null. |
519 | static Value *threadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS, |
520 | Value *RHS, const SimplifyQuery &Q, |
521 | unsigned MaxRecurse) { |
522 | // Recursion is always used, so bail out at once if we already hit the limit. |
523 | if (!MaxRecurse--) |
524 | return nullptr; |
525 | |
526 | PHINode *PI; |
527 | if (isa<PHINode>(Val: LHS)) { |
528 | PI = cast<PHINode>(Val: LHS); |
529 | // Bail out if RHS and the phi may be mutually interdependent due to a loop. |
530 | if (!valueDominatesPHI(V: RHS, P: PI, DT: Q.DT)) |
531 | return nullptr; |
532 | } else { |
533 | assert(isa<PHINode>(RHS) && "No PHI instruction operand!" ); |
534 | PI = cast<PHINode>(Val: RHS); |
535 | // Bail out if LHS and the phi may be mutually interdependent due to a loop. |
536 | if (!valueDominatesPHI(V: LHS, P: PI, DT: Q.DT)) |
537 | return nullptr; |
538 | } |
539 | |
540 | // Evaluate the BinOp on the incoming phi values. |
541 | Value *CommonValue = nullptr; |
542 | for (Use &Incoming : PI->incoming_values()) { |
543 | // If the incoming value is the phi node itself, it can safely be skipped. |
544 | if (Incoming == PI) |
545 | continue; |
546 | Instruction *InTI = PI->getIncomingBlock(U: Incoming)->getTerminator(); |
547 | Value *V = PI == LHS |
548 | ? simplifyBinOp(Opcode, Incoming, RHS, |
549 | Q.getWithInstruction(I: InTI), MaxRecurse) |
550 | : simplifyBinOp(Opcode, LHS, Incoming, |
551 | Q.getWithInstruction(I: InTI), MaxRecurse); |
552 | // If the operation failed to simplify, or simplified to a different value |
553 | // to previously, then give up. |
554 | if (!V || (CommonValue && V != CommonValue)) |
555 | return nullptr; |
556 | CommonValue = V; |
557 | } |
558 | |
559 | return CommonValue; |
560 | } |
561 | |
562 | /// In the case of a comparison with a PHI instruction, try to simplify the |
563 | /// comparison by seeing whether comparing with all of the incoming phi values |
564 | /// yields the same result every time. If so returns the common result, |
565 | /// otherwise returns null. |
566 | static Value *threadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS, |
567 | const SimplifyQuery &Q, unsigned MaxRecurse) { |
568 | // Recursion is always used, so bail out at once if we already hit the limit. |
569 | if (!MaxRecurse--) |
570 | return nullptr; |
571 | |
572 | // Make sure the phi is on the LHS. |
573 | if (!isa<PHINode>(Val: LHS)) { |
574 | std::swap(a&: LHS, b&: RHS); |
575 | Pred = CmpInst::getSwappedPredicate(pred: Pred); |
576 | } |
577 | assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!" ); |
578 | PHINode *PI = cast<PHINode>(Val: LHS); |
579 | |
580 | // Bail out if RHS and the phi may be mutually interdependent due to a loop. |
581 | if (!valueDominatesPHI(V: RHS, P: PI, DT: Q.DT)) |
582 | return nullptr; |
583 | |
584 | // Evaluate the BinOp on the incoming phi values. |
585 | Value *CommonValue = nullptr; |
586 | for (unsigned u = 0, e = PI->getNumIncomingValues(); u < e; ++u) { |
587 | Value *Incoming = PI->getIncomingValue(i: u); |
588 | Instruction *InTI = PI->getIncomingBlock(i: u)->getTerminator(); |
589 | // If the incoming value is the phi node itself, it can safely be skipped. |
590 | if (Incoming == PI) |
591 | continue; |
592 | // Change the context instruction to the "edge" that flows into the phi. |
593 | // This is important because that is where incoming is actually "evaluated" |
594 | // even though it is used later somewhere else. |
595 | Value *V = simplifyCmpInst(Pred, Incoming, RHS, Q.getWithInstruction(I: InTI), |
596 | MaxRecurse); |
597 | // If the operation failed to simplify, or simplified to a different value |
598 | // to previously, then give up. |
599 | if (!V || (CommonValue && V != CommonValue)) |
600 | return nullptr; |
601 | CommonValue = V; |
602 | } |
603 | |
604 | return CommonValue; |
605 | } |
606 | |
607 | static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode, |
608 | Value *&Op0, Value *&Op1, |
609 | const SimplifyQuery &Q) { |
610 | if (auto *CLHS = dyn_cast<Constant>(Val: Op0)) { |
611 | if (auto *CRHS = dyn_cast<Constant>(Val: Op1)) { |
612 | switch (Opcode) { |
613 | default: |
614 | break; |
615 | case Instruction::FAdd: |
616 | case Instruction::FSub: |
617 | case Instruction::FMul: |
618 | case Instruction::FDiv: |
619 | case Instruction::FRem: |
620 | if (Q.CxtI != nullptr) |
621 | return ConstantFoldFPInstOperands(Opcode, LHS: CLHS, RHS: CRHS, DL: Q.DL, I: Q.CxtI); |
622 | } |
623 | return ConstantFoldBinaryOpOperands(Opcode, LHS: CLHS, RHS: CRHS, DL: Q.DL); |
624 | } |
625 | |
626 | // Canonicalize the constant to the RHS if this is a commutative operation. |
627 | if (Instruction::isCommutative(Opcode)) |
628 | std::swap(a&: Op0, b&: Op1); |
629 | } |
630 | return nullptr; |
631 | } |
632 | |
633 | /// Given operands for an Add, see if we can fold the result. |
634 | /// If not, this returns null. |
635 | static Value *simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, |
636 | const SimplifyQuery &Q, unsigned MaxRecurse) { |
637 | if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::Add, Op0, Op1, Q)) |
638 | return C; |
639 | |
640 | // X + poison -> poison |
641 | if (isa<PoisonValue>(Val: Op1)) |
642 | return Op1; |
643 | |
644 | // X + undef -> undef |
645 | if (Q.isUndefValue(V: Op1)) |
646 | return Op1; |
647 | |
648 | // X + 0 -> X |
649 | if (match(V: Op1, P: m_Zero())) |
650 | return Op0; |
651 | |
652 | // If two operands are negative, return 0. |
653 | if (isKnownNegation(X: Op0, Y: Op1)) |
654 | return Constant::getNullValue(Ty: Op0->getType()); |
655 | |
656 | // X + (Y - X) -> Y |
657 | // (Y - X) + X -> Y |
658 | // Eg: X + -X -> 0 |
659 | Value *Y = nullptr; |
660 | if (match(V: Op1, P: m_Sub(L: m_Value(V&: Y), R: m_Specific(V: Op0))) || |
661 | match(V: Op0, P: m_Sub(L: m_Value(V&: Y), R: m_Specific(V: Op1)))) |
662 | return Y; |
663 | |
664 | // X + ~X -> -1 since ~X = -X-1 |
665 | Type *Ty = Op0->getType(); |
666 | if (match(V: Op0, P: m_Not(V: m_Specific(V: Op1))) || match(V: Op1, P: m_Not(V: m_Specific(V: Op0)))) |
667 | return Constant::getAllOnesValue(Ty); |
668 | |
669 | // add nsw/nuw (xor Y, signmask), signmask --> Y |
670 | // The no-wrapping add guarantees that the top bit will be set by the add. |
671 | // Therefore, the xor must be clearing the already set sign bit of Y. |
672 | if ((IsNSW || IsNUW) && match(V: Op1, P: m_SignMask()) && |
673 | match(V: Op0, P: m_Xor(L: m_Value(V&: Y), R: m_SignMask()))) |
674 | return Y; |
675 | |
676 | // add nuw %x, -1 -> -1, because %x can only be 0. |
677 | if (IsNUW && match(V: Op1, P: m_AllOnes())) |
678 | return Op1; // Which is -1. |
679 | |
680 | /// i1 add -> xor. |
681 | if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(BitWidth: 1)) |
682 | if (Value *V = simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1)) |
683 | return V; |
684 | |
685 | // Try some generic simplifications for associative operations. |
686 | if (Value *V = |
687 | simplifyAssociativeBinOp(Opcode: Instruction::Add, LHS: Op0, RHS: Op1, Q, MaxRecurse)) |
688 | return V; |
689 | |
690 | // Threading Add over selects and phi nodes is pointless, so don't bother. |
691 | // Threading over the select in "A + select(cond, B, C)" means evaluating |
692 | // "A+B" and "A+C" and seeing if they are equal; but they are equal if and |
693 | // only if B and C are equal. If B and C are equal then (since we assume |
694 | // that operands have already been simplified) "select(cond, B, C)" should |
695 | // have been simplified to the common value of B and C already. Analysing |
696 | // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly |
697 | // for threading over phi nodes. |
698 | |
699 | return nullptr; |
700 | } |
701 | |
702 | Value *llvm::simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, |
703 | const SimplifyQuery &Query) { |
704 | return ::simplifyAddInst(Op0, Op1, IsNSW, IsNUW, Q: Query, MaxRecurse: RecursionLimit); |
705 | } |
706 | |
707 | /// Compute the base pointer and cumulative constant offsets for V. |
708 | /// |
709 | /// This strips all constant offsets off of V, leaving it the base pointer, and |
710 | /// accumulates the total constant offset applied in the returned constant. |
711 | /// It returns zero if there are no constant offsets applied. |
712 | /// |
713 | /// This is very similar to stripAndAccumulateConstantOffsets(), except it |
714 | /// normalizes the offset bitwidth to the stripped pointer type, not the |
715 | /// original pointer type. |
716 | static APInt stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V, |
717 | bool AllowNonInbounds = false) { |
718 | assert(V->getType()->isPtrOrPtrVectorTy()); |
719 | |
720 | APInt Offset = APInt::getZero(numBits: DL.getIndexTypeSizeInBits(Ty: V->getType())); |
721 | V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds); |
722 | // As that strip may trace through `addrspacecast`, need to sext or trunc |
723 | // the offset calculated. |
724 | return Offset.sextOrTrunc(width: DL.getIndexTypeSizeInBits(Ty: V->getType())); |
725 | } |
726 | |
727 | /// Compute the constant difference between two pointer values. |
728 | /// If the difference is not a constant, returns zero. |
729 | static Constant *computePointerDifference(const DataLayout &DL, Value *LHS, |
730 | Value *RHS) { |
731 | APInt LHSOffset = stripAndComputeConstantOffsets(DL, V&: LHS); |
732 | APInt RHSOffset = stripAndComputeConstantOffsets(DL, V&: RHS); |
733 | |
734 | // If LHS and RHS are not related via constant offsets to the same base |
735 | // value, there is nothing we can do here. |
736 | if (LHS != RHS) |
737 | return nullptr; |
738 | |
739 | // Otherwise, the difference of LHS - RHS can be computed as: |
740 | // LHS - RHS |
741 | // = (LHSOffset + Base) - (RHSOffset + Base) |
742 | // = LHSOffset - RHSOffset |
743 | Constant *Res = ConstantInt::get(Context&: LHS->getContext(), V: LHSOffset - RHSOffset); |
744 | if (auto *VecTy = dyn_cast<VectorType>(Val: LHS->getType())) |
745 | Res = ConstantVector::getSplat(EC: VecTy->getElementCount(), Elt: Res); |
746 | return Res; |
747 | } |
748 | |
749 | /// Test if there is a dominating equivalence condition for the |
750 | /// two operands. If there is, try to reduce the binary operation |
751 | /// between the two operands. |
752 | /// Example: Op0 - Op1 --> 0 when Op0 == Op1 |
753 | static Value *simplifyByDomEq(unsigned Opcode, Value *Op0, Value *Op1, |
754 | const SimplifyQuery &Q, unsigned MaxRecurse) { |
755 | // Recursive run it can not get any benefit |
756 | if (MaxRecurse != RecursionLimit) |
757 | return nullptr; |
758 | |
759 | std::optional<bool> Imp = |
760 | isImpliedByDomCondition(Pred: CmpInst::ICMP_EQ, LHS: Op0, RHS: Op1, ContextI: Q.CxtI, DL: Q.DL); |
761 | if (Imp && *Imp) { |
762 | Type *Ty = Op0->getType(); |
763 | switch (Opcode) { |
764 | case Instruction::Sub: |
765 | case Instruction::Xor: |
766 | case Instruction::URem: |
767 | case Instruction::SRem: |
768 | return Constant::getNullValue(Ty); |
769 | |
770 | case Instruction::SDiv: |
771 | case Instruction::UDiv: |
772 | return ConstantInt::get(Ty, V: 1); |
773 | |
774 | case Instruction::And: |
775 | case Instruction::Or: |
776 | // Could be either one - choose Op1 since that's more likely a constant. |
777 | return Op1; |
778 | default: |
779 | break; |
780 | } |
781 | } |
782 | return nullptr; |
783 | } |
784 | |
785 | /// Given operands for a Sub, see if we can fold the result. |
786 | /// If not, this returns null. |
787 | static Value *simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, |
788 | const SimplifyQuery &Q, unsigned MaxRecurse) { |
789 | if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::Sub, Op0, Op1, Q)) |
790 | return C; |
791 | |
792 | // X - poison -> poison |
793 | // poison - X -> poison |
794 | if (isa<PoisonValue>(Val: Op0) || isa<PoisonValue>(Val: Op1)) |
795 | return PoisonValue::get(T: Op0->getType()); |
796 | |
797 | // X - undef -> undef |
798 | // undef - X -> undef |
799 | if (Q.isUndefValue(V: Op0) || Q.isUndefValue(V: Op1)) |
800 | return UndefValue::get(T: Op0->getType()); |
801 | |
802 | // X - 0 -> X |
803 | if (match(V: Op1, P: m_Zero())) |
804 | return Op0; |
805 | |
806 | // X - X -> 0 |
807 | if (Op0 == Op1) |
808 | return Constant::getNullValue(Ty: Op0->getType()); |
809 | |
810 | // Is this a negation? |
811 | if (match(V: Op0, P: m_Zero())) { |
812 | // 0 - X -> 0 if the sub is NUW. |
813 | if (IsNUW) |
814 | return Constant::getNullValue(Ty: Op0->getType()); |
815 | |
816 | KnownBits Known = computeKnownBits(V: Op1, /* Depth */ 0, Q); |
817 | if (Known.Zero.isMaxSignedValue()) { |
818 | // Op1 is either 0 or the minimum signed value. If the sub is NSW, then |
819 | // Op1 must be 0 because negating the minimum signed value is undefined. |
820 | if (IsNSW) |
821 | return Constant::getNullValue(Ty: Op0->getType()); |
822 | |
823 | // 0 - X -> X if X is 0 or the minimum signed value. |
824 | return Op1; |
825 | } |
826 | } |
827 | |
828 | // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies. |
829 | // For example, (X + Y) - Y -> X; (Y + X) - Y -> X |
830 | Value *X = nullptr, *Y = nullptr, *Z = Op1; |
831 | if (MaxRecurse && match(V: Op0, P: m_Add(L: m_Value(V&: X), R: m_Value(V&: Y)))) { // (X + Y) - Z |
832 | // See if "V === Y - Z" simplifies. |
833 | if (Value *V = simplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse - 1)) |
834 | // It does! Now see if "X + V" simplifies. |
835 | if (Value *W = simplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse - 1)) { |
836 | // It does, we successfully reassociated! |
837 | ++NumReassoc; |
838 | return W; |
839 | } |
840 | // See if "V === X - Z" simplifies. |
841 | if (Value *V = simplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse - 1)) |
842 | // It does! Now see if "Y + V" simplifies. |
843 | if (Value *W = simplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse - 1)) { |
844 | // It does, we successfully reassociated! |
845 | ++NumReassoc; |
846 | return W; |
847 | } |
848 | } |
849 | |
850 | // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies. |
851 | // For example, X - (X + 1) -> -1 |
852 | X = Op0; |
853 | if (MaxRecurse && match(V: Op1, P: m_Add(L: m_Value(V&: Y), R: m_Value(V&: Z)))) { // X - (Y + Z) |
854 | // See if "V === X - Y" simplifies. |
855 | if (Value *V = simplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse - 1)) |
856 | // It does! Now see if "V - Z" simplifies. |
857 | if (Value *W = simplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse - 1)) { |
858 | // It does, we successfully reassociated! |
859 | ++NumReassoc; |
860 | return W; |
861 | } |
862 | // See if "V === X - Z" simplifies. |
863 | if (Value *V = simplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse - 1)) |
864 | // It does! Now see if "V - Y" simplifies. |
865 | if (Value *W = simplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse - 1)) { |
866 | // It does, we successfully reassociated! |
867 | ++NumReassoc; |
868 | return W; |
869 | } |
870 | } |
871 | |
872 | // Z - (X - Y) -> (Z - X) + Y if everything simplifies. |
873 | // For example, X - (X - Y) -> Y. |
874 | Z = Op0; |
875 | if (MaxRecurse && match(V: Op1, P: m_Sub(L: m_Value(V&: X), R: m_Value(V&: Y)))) // Z - (X - Y) |
876 | // See if "V === Z - X" simplifies. |
877 | if (Value *V = simplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse - 1)) |
878 | // It does! Now see if "V + Y" simplifies. |
879 | if (Value *W = simplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse - 1)) { |
880 | // It does, we successfully reassociated! |
881 | ++NumReassoc; |
882 | return W; |
883 | } |
884 | |
885 | // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies. |
886 | if (MaxRecurse && match(V: Op0, P: m_Trunc(Op: m_Value(V&: X))) && |
887 | match(V: Op1, P: m_Trunc(Op: m_Value(V&: Y)))) |
888 | if (X->getType() == Y->getType()) |
889 | // See if "V === X - Y" simplifies. |
890 | if (Value *V = simplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse - 1)) |
891 | // It does! Now see if "trunc V" simplifies. |
892 | if (Value *W = simplifyCastInst(Instruction::Trunc, V, Op0->getType(), |
893 | Q, MaxRecurse - 1)) |
894 | // It does, return the simplified "trunc V". |
895 | return W; |
896 | |
897 | // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...). |
898 | if (match(V: Op0, P: m_PtrToInt(Op: m_Value(V&: X))) && match(V: Op1, P: m_PtrToInt(Op: m_Value(V&: Y)))) |
899 | if (Constant *Result = computePointerDifference(DL: Q.DL, LHS: X, RHS: Y)) |
900 | return ConstantFoldIntegerCast(C: Result, DestTy: Op0->getType(), /*IsSigned*/ true, |
901 | DL: Q.DL); |
902 | |
903 | // i1 sub -> xor. |
904 | if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(BitWidth: 1)) |
905 | if (Value *V = simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1)) |
906 | return V; |
907 | |
908 | // Threading Sub over selects and phi nodes is pointless, so don't bother. |
909 | // Threading over the select in "A - select(cond, B, C)" means evaluating |
910 | // "A-B" and "A-C" and seeing if they are equal; but they are equal if and |
911 | // only if B and C are equal. If B and C are equal then (since we assume |
912 | // that operands have already been simplified) "select(cond, B, C)" should |
913 | // have been simplified to the common value of B and C already. Analysing |
914 | // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly |
915 | // for threading over phi nodes. |
916 | |
917 | if (Value *V = simplifyByDomEq(Opcode: Instruction::Sub, Op0, Op1, Q, MaxRecurse)) |
918 | return V; |
919 | |
920 | return nullptr; |
921 | } |
922 | |
923 | Value *llvm::simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, |
924 | const SimplifyQuery &Q) { |
925 | return ::simplifySubInst(Op0, Op1, IsNSW, IsNUW, Q, MaxRecurse: RecursionLimit); |
926 | } |
927 | |
928 | /// Given operands for a Mul, see if we can fold the result. |
929 | /// If not, this returns null. |
930 | static Value *simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, |
931 | const SimplifyQuery &Q, unsigned MaxRecurse) { |
932 | if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::Mul, Op0, Op1, Q)) |
933 | return C; |
934 | |
935 | // X * poison -> poison |
936 | if (isa<PoisonValue>(Val: Op1)) |
937 | return Op1; |
938 | |
939 | // X * undef -> 0 |
940 | // X * 0 -> 0 |
941 | if (Q.isUndefValue(V: Op1) || match(V: Op1, P: m_Zero())) |
942 | return Constant::getNullValue(Ty: Op0->getType()); |
943 | |
944 | // X * 1 -> X |
945 | if (match(V: Op1, P: m_One())) |
946 | return Op0; |
947 | |
948 | // (X / Y) * Y -> X if the division is exact. |
949 | Value *X = nullptr; |
950 | if (Q.IIQ.UseInstrInfo && |
951 | (match(V: Op0, |
952 | P: m_Exact(SubPattern: m_IDiv(L: m_Value(V&: X), R: m_Specific(V: Op1)))) || // (X / Y) * Y |
953 | match(V: Op1, P: m_Exact(SubPattern: m_IDiv(L: m_Value(V&: X), R: m_Specific(V: Op0)))))) // Y * (X / Y) |
954 | return X; |
955 | |
956 | if (Op0->getType()->isIntOrIntVectorTy(BitWidth: 1)) { |
957 | // mul i1 nsw is a special-case because -1 * -1 is poison (+1 is not |
958 | // representable). All other cases reduce to 0, so just return 0. |
959 | if (IsNSW) |
960 | return ConstantInt::getNullValue(Ty: Op0->getType()); |
961 | |
962 | // Treat "mul i1" as "and i1". |
963 | if (MaxRecurse) |
964 | if (Value *V = simplifyAndInst(Op0, Op1, Q, MaxRecurse - 1)) |
965 | return V; |
966 | } |
967 | |
968 | // Try some generic simplifications for associative operations. |
969 | if (Value *V = |
970 | simplifyAssociativeBinOp(Opcode: Instruction::Mul, LHS: Op0, RHS: Op1, Q, MaxRecurse)) |
971 | return V; |
972 | |
973 | // Mul distributes over Add. Try some generic simplifications based on this. |
974 | if (Value *V = expandCommutativeBinOp(Opcode: Instruction::Mul, L: Op0, R: Op1, |
975 | OpcodeToExpand: Instruction::Add, Q, MaxRecurse)) |
976 | return V; |
977 | |
978 | // If the operation is with the result of a select instruction, check whether |
979 | // operating on either branch of the select always yields the same value. |
980 | if (isa<SelectInst>(Val: Op0) || isa<SelectInst>(Val: Op1)) |
981 | if (Value *V = |
982 | threadBinOpOverSelect(Opcode: Instruction::Mul, LHS: Op0, RHS: Op1, Q, MaxRecurse)) |
983 | return V; |
984 | |
985 | // If the operation is with the result of a phi instruction, check whether |
986 | // operating on all incoming values of the phi always yields the same value. |
987 | if (isa<PHINode>(Val: Op0) || isa<PHINode>(Val: Op1)) |
988 | if (Value *V = |
989 | threadBinOpOverPHI(Opcode: Instruction::Mul, LHS: Op0, RHS: Op1, Q, MaxRecurse)) |
990 | return V; |
991 | |
992 | return nullptr; |
993 | } |
994 | |
995 | Value *llvm::simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, |
996 | const SimplifyQuery &Q) { |
997 | return ::simplifyMulInst(Op0, Op1, IsNSW, IsNUW, Q, MaxRecurse: RecursionLimit); |
998 | } |
999 | |
1000 | /// Given a predicate and two operands, return true if the comparison is true. |
1001 | /// This is a helper for div/rem simplification where we return some other value |
1002 | /// when we can prove a relationship between the operands. |
1003 | static bool isICmpTrue(ICmpInst::Predicate Pred, Value *LHS, Value *RHS, |
1004 | const SimplifyQuery &Q, unsigned MaxRecurse) { |
1005 | Value *V = simplifyICmpInst(Predicate: Pred, LHS, RHS, Q, MaxRecurse); |
1006 | Constant *C = dyn_cast_or_null<Constant>(Val: V); |
1007 | return (C && C->isAllOnesValue()); |
1008 | } |
1009 | |
1010 | /// Return true if we can simplify X / Y to 0. Remainder can adapt that answer |
1011 | /// to simplify X % Y to X. |
1012 | static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q, |
1013 | unsigned MaxRecurse, bool IsSigned) { |
1014 | // Recursion is always used, so bail out at once if we already hit the limit. |
1015 | if (!MaxRecurse--) |
1016 | return false; |
1017 | |
1018 | if (IsSigned) { |
1019 | // (X srem Y) sdiv Y --> 0 |
1020 | if (match(V: X, P: m_SRem(L: m_Value(), R: m_Specific(V: Y)))) |
1021 | return true; |
1022 | |
1023 | // |X| / |Y| --> 0 |
1024 | // |
1025 | // We require that 1 operand is a simple constant. That could be extended to |
1026 | // 2 variables if we computed the sign bit for each. |
1027 | // |
1028 | // Make sure that a constant is not the minimum signed value because taking |
1029 | // the abs() of that is undefined. |
1030 | Type *Ty = X->getType(); |
1031 | const APInt *C; |
1032 | if (match(V: X, P: m_APInt(Res&: C)) && !C->isMinSignedValue()) { |
1033 | // Is the variable divisor magnitude always greater than the constant |
1034 | // dividend magnitude? |
1035 | // |Y| > |C| --> Y < -abs(C) or Y > abs(C) |
1036 | Constant *PosDividendC = ConstantInt::get(Ty, V: C->abs()); |
1037 | Constant *NegDividendC = ConstantInt::get(Ty, V: -C->abs()); |
1038 | if (isICmpTrue(Pred: CmpInst::ICMP_SLT, LHS: Y, RHS: NegDividendC, Q, MaxRecurse) || |
1039 | isICmpTrue(Pred: CmpInst::ICMP_SGT, LHS: Y, RHS: PosDividendC, Q, MaxRecurse)) |
1040 | return true; |
1041 | } |
1042 | if (match(V: Y, P: m_APInt(Res&: C))) { |
1043 | // Special-case: we can't take the abs() of a minimum signed value. If |
1044 | // that's the divisor, then all we have to do is prove that the dividend |
1045 | // is also not the minimum signed value. |
1046 | if (C->isMinSignedValue()) |
1047 | return isICmpTrue(Pred: CmpInst::ICMP_NE, LHS: X, RHS: Y, Q, MaxRecurse); |
1048 | |
1049 | // Is the variable dividend magnitude always less than the constant |
1050 | // divisor magnitude? |
1051 | // |X| < |C| --> X > -abs(C) and X < abs(C) |
1052 | Constant *PosDivisorC = ConstantInt::get(Ty, V: C->abs()); |
1053 | Constant *NegDivisorC = ConstantInt::get(Ty, V: -C->abs()); |
1054 | if (isICmpTrue(Pred: CmpInst::ICMP_SGT, LHS: X, RHS: NegDivisorC, Q, MaxRecurse) && |
1055 | isICmpTrue(Pred: CmpInst::ICMP_SLT, LHS: X, RHS: PosDivisorC, Q, MaxRecurse)) |
1056 | return true; |
1057 | } |
1058 | return false; |
1059 | } |
1060 | |
1061 | // IsSigned == false. |
1062 | |
1063 | // Is the unsigned dividend known to be less than a constant divisor? |
1064 | // TODO: Convert this (and above) to range analysis |
1065 | // ("computeConstantRangeIncludingKnownBits")? |
1066 | const APInt *C; |
1067 | if (match(V: Y, P: m_APInt(Res&: C)) && |
1068 | computeKnownBits(V: X, /* Depth */ 0, Q).getMaxValue().ult(RHS: *C)) |
1069 | return true; |
1070 | |
1071 | // Try again for any divisor: |
1072 | // Is the dividend unsigned less than the divisor? |
1073 | return isICmpTrue(Pred: ICmpInst::ICMP_ULT, LHS: X, RHS: Y, Q, MaxRecurse); |
1074 | } |
1075 | |
1076 | /// Check for common or similar folds of integer division or integer remainder. |
1077 | /// This applies to all 4 opcodes (sdiv/udiv/srem/urem). |
1078 | static Value *simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0, |
1079 | Value *Op1, const SimplifyQuery &Q, |
1080 | unsigned MaxRecurse) { |
1081 | bool IsDiv = (Opcode == Instruction::SDiv || Opcode == Instruction::UDiv); |
1082 | bool IsSigned = (Opcode == Instruction::SDiv || Opcode == Instruction::SRem); |
1083 | |
1084 | Type *Ty = Op0->getType(); |
1085 | |
1086 | // X / undef -> poison |
1087 | // X % undef -> poison |
1088 | if (Q.isUndefValue(V: Op1) || isa<PoisonValue>(Val: Op1)) |
1089 | return PoisonValue::get(T: Ty); |
1090 | |
1091 | // X / 0 -> poison |
1092 | // X % 0 -> poison |
1093 | // We don't need to preserve faults! |
1094 | if (match(V: Op1, P: m_Zero())) |
1095 | return PoisonValue::get(T: Ty); |
1096 | |
1097 | // If any element of a constant divisor fixed width vector is zero or undef |
1098 | // the behavior is undefined and we can fold the whole op to poison. |
1099 | auto *Op1C = dyn_cast<Constant>(Val: Op1); |
1100 | auto *VTy = dyn_cast<FixedVectorType>(Val: Ty); |
1101 | if (Op1C && VTy) { |
1102 | unsigned NumElts = VTy->getNumElements(); |
1103 | for (unsigned i = 0; i != NumElts; ++i) { |
1104 | Constant *Elt = Op1C->getAggregateElement(Elt: i); |
1105 | if (Elt && (Elt->isNullValue() || Q.isUndefValue(V: Elt))) |
1106 | return PoisonValue::get(T: Ty); |
1107 | } |
1108 | } |
1109 | |
1110 | // poison / X -> poison |
1111 | // poison % X -> poison |
1112 | if (isa<PoisonValue>(Val: Op0)) |
1113 | return Op0; |
1114 | |
1115 | // undef / X -> 0 |
1116 | // undef % X -> 0 |
1117 | if (Q.isUndefValue(V: Op0)) |
1118 | return Constant::getNullValue(Ty); |
1119 | |
1120 | // 0 / X -> 0 |
1121 | // 0 % X -> 0 |
1122 | if (match(V: Op0, P: m_Zero())) |
1123 | return Constant::getNullValue(Ty: Op0->getType()); |
1124 | |
1125 | // X / X -> 1 |
1126 | // X % X -> 0 |
1127 | if (Op0 == Op1) |
1128 | return IsDiv ? ConstantInt::get(Ty, V: 1) : Constant::getNullValue(Ty); |
1129 | |
1130 | KnownBits Known = computeKnownBits(V: Op1, /* Depth */ 0, Q); |
1131 | // X / 0 -> poison |
1132 | // X % 0 -> poison |
1133 | // If the divisor is known to be zero, just return poison. This can happen in |
1134 | // some cases where its provable indirectly the denominator is zero but it's |
1135 | // not trivially simplifiable (i.e known zero through a phi node). |
1136 | if (Known.isZero()) |
1137 | return PoisonValue::get(T: Ty); |
1138 | |
1139 | // X / 1 -> X |
1140 | // X % 1 -> 0 |
1141 | // If the divisor can only be zero or one, we can't have division-by-zero |
1142 | // or remainder-by-zero, so assume the divisor is 1. |
1143 | // e.g. 1, zext (i8 X), sdiv X (Y and 1) |
1144 | if (Known.countMinLeadingZeros() == Known.getBitWidth() - 1) |
1145 | return IsDiv ? Op0 : Constant::getNullValue(Ty); |
1146 | |
1147 | // If X * Y does not overflow, then: |
1148 | // X * Y / Y -> X |
1149 | // X * Y % Y -> 0 |
1150 | Value *X; |
1151 | if (match(V: Op0, P: m_c_Mul(L: m_Value(V&: X), R: m_Specific(V: Op1)))) { |
1152 | auto *Mul = cast<OverflowingBinaryOperator>(Val: Op0); |
1153 | // The multiplication can't overflow if it is defined not to, or if |
1154 | // X == A / Y for some A. |
1155 | if ((IsSigned && Q.IIQ.hasNoSignedWrap(Op: Mul)) || |
1156 | (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Op: Mul)) || |
1157 | (IsSigned && match(V: X, P: m_SDiv(L: m_Value(), R: m_Specific(V: Op1)))) || |
1158 | (!IsSigned && match(V: X, P: m_UDiv(L: m_Value(), R: m_Specific(V: Op1))))) { |
1159 | return IsDiv ? X : Constant::getNullValue(Ty: Op0->getType()); |
1160 | } |
1161 | } |
1162 | |
1163 | if (isDivZero(X: Op0, Y: Op1, Q, MaxRecurse, IsSigned)) |
1164 | return IsDiv ? Constant::getNullValue(Ty: Op0->getType()) : Op0; |
1165 | |
1166 | if (Value *V = simplifyByDomEq(Opcode, Op0, Op1, Q, MaxRecurse)) |
1167 | return V; |
1168 | |
1169 | // If the operation is with the result of a select instruction, check whether |
1170 | // operating on either branch of the select always yields the same value. |
1171 | if (isa<SelectInst>(Val: Op0) || isa<SelectInst>(Val: Op1)) |
1172 | if (Value *V = threadBinOpOverSelect(Opcode, LHS: Op0, RHS: Op1, Q, MaxRecurse)) |
1173 | return V; |
1174 | |
1175 | // If the operation is with the result of a phi instruction, check whether |
1176 | // operating on all incoming values of the phi always yields the same value. |
1177 | if (isa<PHINode>(Val: Op0) || isa<PHINode>(Val: Op1)) |
1178 | if (Value *V = threadBinOpOverPHI(Opcode, LHS: Op0, RHS: Op1, Q, MaxRecurse)) |
1179 | return V; |
1180 | |
1181 | return nullptr; |
1182 | } |
1183 | |
1184 | /// These are simplifications common to SDiv and UDiv. |
1185 | static Value *simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, |
1186 | bool IsExact, const SimplifyQuery &Q, |
1187 | unsigned MaxRecurse) { |
1188 | if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) |
1189 | return C; |
1190 | |
1191 | if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse)) |
1192 | return V; |
1193 | |
1194 | const APInt *DivC; |
1195 | if (IsExact && match(V: Op1, P: m_APInt(Res&: DivC))) { |
1196 | // If this is an exact divide by a constant, then the dividend (Op0) must |
1197 | // have at least as many trailing zeros as the divisor to divide evenly. If |
1198 | // it has less trailing zeros, then the result must be poison. |
1199 | if (DivC->countr_zero()) { |
1200 | KnownBits KnownOp0 = computeKnownBits(V: Op0, /* Depth */ 0, Q); |
1201 | if (KnownOp0.countMaxTrailingZeros() < DivC->countr_zero()) |
1202 | return PoisonValue::get(T: Op0->getType()); |
1203 | } |
1204 | |
1205 | // udiv exact (mul nsw X, C), C --> X |
1206 | // sdiv exact (mul nuw X, C), C --> X |
1207 | // where C is not a power of 2. |
1208 | Value *X; |
1209 | if (!DivC->isPowerOf2() && |
1210 | (Opcode == Instruction::UDiv |
1211 | ? match(V: Op0, P: m_NSWMul(L: m_Value(V&: X), R: m_Specific(V: Op1))) |
1212 | : match(V: Op0, P: m_NUWMul(L: m_Value(V&: X), R: m_Specific(V: Op1))))) |
1213 | return X; |
1214 | } |
1215 | |
1216 | return nullptr; |
1217 | } |
1218 | |
1219 | /// These are simplifications common to SRem and URem. |
1220 | static Value *simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, |
1221 | const SimplifyQuery &Q, unsigned MaxRecurse) { |
1222 | if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) |
1223 | return C; |
1224 | |
1225 | if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse)) |
1226 | return V; |
1227 | |
1228 | // (X << Y) % X -> 0 |
1229 | if (Q.IIQ.UseInstrInfo) { |
1230 | if ((Opcode == Instruction::SRem && |
1231 | match(V: Op0, P: m_NSWShl(L: m_Specific(V: Op1), R: m_Value()))) || |
1232 | (Opcode == Instruction::URem && |
1233 | match(V: Op0, P: m_NUWShl(L: m_Specific(V: Op1), R: m_Value())))) |
1234 | return Constant::getNullValue(Ty: Op0->getType()); |
1235 | |
1236 | const APInt *C0; |
1237 | if (match(V: Op1, P: m_APInt(Res&: C0))) { |
1238 | // (srem (mul nsw X, C1), C0) -> 0 if C1 s% C0 == 0 |
1239 | // (urem (mul nuw X, C1), C0) -> 0 if C1 u% C0 == 0 |
1240 | if (Opcode == Instruction::SRem |
1241 | ? match(V: Op0, |
1242 | P: m_NSWMul(L: m_Value(), R: m_CheckedInt(CheckFn: [C0](const APInt &C) { |
1243 | return C.srem(RHS: *C0).isZero(); |
1244 | }))) |
1245 | : match(V: Op0, |
1246 | P: m_NUWMul(L: m_Value(), R: m_CheckedInt(CheckFn: [C0](const APInt &C) { |
1247 | return C.urem(RHS: *C0).isZero(); |
1248 | })))) |
1249 | return Constant::getNullValue(Ty: Op0->getType()); |
1250 | } |
1251 | } |
1252 | return nullptr; |
1253 | } |
1254 | |
1255 | /// Given operands for an SDiv, see if we can fold the result. |
1256 | /// If not, this returns null. |
1257 | static Value *simplifySDivInst(Value *Op0, Value *Op1, bool IsExact, |
1258 | const SimplifyQuery &Q, unsigned MaxRecurse) { |
1259 | // If two operands are negated and no signed overflow, return -1. |
1260 | if (isKnownNegation(X: Op0, Y: Op1, /*NeedNSW=*/true)) |
1261 | return Constant::getAllOnesValue(Ty: Op0->getType()); |
1262 | |
1263 | return simplifyDiv(Opcode: Instruction::SDiv, Op0, Op1, IsExact, Q, MaxRecurse); |
1264 | } |
1265 | |
1266 | Value *llvm::simplifySDivInst(Value *Op0, Value *Op1, bool IsExact, |
1267 | const SimplifyQuery &Q) { |
1268 | return ::simplifySDivInst(Op0, Op1, IsExact, Q, MaxRecurse: RecursionLimit); |
1269 | } |
1270 | |
1271 | /// Given operands for a UDiv, see if we can fold the result. |
1272 | /// If not, this returns null. |
1273 | static Value *simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact, |
1274 | const SimplifyQuery &Q, unsigned MaxRecurse) { |
1275 | return simplifyDiv(Opcode: Instruction::UDiv, Op0, Op1, IsExact, Q, MaxRecurse); |
1276 | } |
1277 | |
1278 | Value *llvm::simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact, |
1279 | const SimplifyQuery &Q) { |
1280 | return ::simplifyUDivInst(Op0, Op1, IsExact, Q, MaxRecurse: RecursionLimit); |
1281 | } |
1282 | |
1283 | /// Given operands for an SRem, see if we can fold the result. |
1284 | /// If not, this returns null. |
1285 | static Value *simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, |
1286 | unsigned MaxRecurse) { |
1287 | // If the divisor is 0, the result is undefined, so assume the divisor is -1. |
1288 | // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0 |
1289 | Value *X; |
1290 | if (match(V: Op1, P: m_SExt(Op: m_Value(V&: X))) && X->getType()->isIntOrIntVectorTy(BitWidth: 1)) |
1291 | return ConstantInt::getNullValue(Ty: Op0->getType()); |
1292 | |
1293 | // If the two operands are negated, return 0. |
1294 | if (isKnownNegation(X: Op0, Y: Op1)) |
1295 | return ConstantInt::getNullValue(Ty: Op0->getType()); |
1296 | |
1297 | return simplifyRem(Opcode: Instruction::SRem, Op0, Op1, Q, MaxRecurse); |
1298 | } |
1299 | |
1300 | Value *llvm::simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { |
1301 | return ::simplifySRemInst(Op0, Op1, Q, MaxRecurse: RecursionLimit); |
1302 | } |
1303 | |
1304 | /// Given operands for a URem, see if we can fold the result. |
1305 | /// If not, this returns null. |
1306 | static Value *simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, |
1307 | unsigned MaxRecurse) { |
1308 | return simplifyRem(Opcode: Instruction::URem, Op0, Op1, Q, MaxRecurse); |
1309 | } |
1310 | |
1311 | Value *llvm::simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { |
1312 | return ::simplifyURemInst(Op0, Op1, Q, MaxRecurse: RecursionLimit); |
1313 | } |
1314 | |
1315 | /// Returns true if a shift by \c Amount always yields poison. |
1316 | static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q) { |
1317 | Constant *C = dyn_cast<Constant>(Val: Amount); |
1318 | if (!C) |
1319 | return false; |
1320 | |
1321 | // X shift by undef -> poison because it may shift by the bitwidth. |
1322 | if (Q.isUndefValue(V: C)) |
1323 | return true; |
1324 | |
1325 | // Shifting by the bitwidth or more is poison. This covers scalars and |
1326 | // fixed/scalable vectors with splat constants. |
1327 | const APInt *AmountC; |
1328 | if (match(V: C, P: m_APInt(Res&: AmountC)) && AmountC->uge(RHS: AmountC->getBitWidth())) |
1329 | return true; |
1330 | |
1331 | // Try harder for fixed-length vectors: |
1332 | // If all lanes of a vector shift are poison, the whole shift is poison. |
1333 | if (isa<ConstantVector>(Val: C) || isa<ConstantDataVector>(Val: C)) { |
1334 | for (unsigned I = 0, |
1335 | E = cast<FixedVectorType>(Val: C->getType())->getNumElements(); |
1336 | I != E; ++I) |
1337 | if (!isPoisonShift(Amount: C->getAggregateElement(Elt: I), Q)) |
1338 | return false; |
1339 | return true; |
1340 | } |
1341 | |
1342 | return false; |
1343 | } |
1344 | |
1345 | /// Given operands for an Shl, LShr or AShr, see if we can fold the result. |
1346 | /// If not, this returns null. |
1347 | static Value *simplifyShift(Instruction::BinaryOps Opcode, Value *Op0, |
1348 | Value *Op1, bool IsNSW, const SimplifyQuery &Q, |
1349 | unsigned MaxRecurse) { |
1350 | if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) |
1351 | return C; |
1352 | |
1353 | // poison shift by X -> poison |
1354 | if (isa<PoisonValue>(Val: Op0)) |
1355 | return Op0; |
1356 | |
1357 | // 0 shift by X -> 0 |
1358 | if (match(V: Op0, P: m_Zero())) |
1359 | return Constant::getNullValue(Ty: Op0->getType()); |
1360 | |
1361 | // X shift by 0 -> X |
1362 | // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones |
1363 | // would be poison. |
1364 | Value *X; |
1365 | if (match(V: Op1, P: m_Zero()) || |
1366 | (match(V: Op1, P: m_SExt(Op: m_Value(V&: X))) && X->getType()->isIntOrIntVectorTy(BitWidth: 1))) |
1367 | return Op0; |
1368 | |
1369 | // Fold undefined shifts. |
1370 | if (isPoisonShift(Amount: Op1, Q)) |
1371 | return PoisonValue::get(T: Op0->getType()); |
1372 | |
1373 | // If the operation is with the result of a select instruction, check whether |
1374 | // operating on either branch of the select always yields the same value. |
1375 | if (isa<SelectInst>(Val: Op0) || isa<SelectInst>(Val: Op1)) |
1376 | if (Value *V = threadBinOpOverSelect(Opcode, LHS: Op0, RHS: Op1, Q, MaxRecurse)) |
1377 | return V; |
1378 | |
1379 | // If the operation is with the result of a phi instruction, check whether |
1380 | // operating on all incoming values of the phi always yields the same value. |
1381 | if (isa<PHINode>(Val: Op0) || isa<PHINode>(Val: Op1)) |
1382 | if (Value *V = threadBinOpOverPHI(Opcode, LHS: Op0, RHS: Op1, Q, MaxRecurse)) |
1383 | return V; |
1384 | |
1385 | // If any bits in the shift amount make that value greater than or equal to |
1386 | // the number of bits in the type, the shift is undefined. |
1387 | KnownBits KnownAmt = computeKnownBits(V: Op1, /* Depth */ 0, Q); |
1388 | if (KnownAmt.getMinValue().uge(RHS: KnownAmt.getBitWidth())) |
1389 | return PoisonValue::get(T: Op0->getType()); |
1390 | |
1391 | // If all valid bits in the shift amount are known zero, the first operand is |
1392 | // unchanged. |
1393 | unsigned NumValidShiftBits = Log2_32_Ceil(Value: KnownAmt.getBitWidth()); |
1394 | if (KnownAmt.countMinTrailingZeros() >= NumValidShiftBits) |
1395 | return Op0; |
1396 | |
1397 | // Check for nsw shl leading to a poison value. |
1398 | if (IsNSW) { |
1399 | assert(Opcode == Instruction::Shl && "Expected shl for nsw instruction" ); |
1400 | KnownBits KnownVal = computeKnownBits(V: Op0, /* Depth */ 0, Q); |
1401 | KnownBits KnownShl = KnownBits::shl(LHS: KnownVal, RHS: KnownAmt); |
1402 | |
1403 | if (KnownVal.Zero.isSignBitSet()) |
1404 | KnownShl.Zero.setSignBit(); |
1405 | if (KnownVal.One.isSignBitSet()) |
1406 | KnownShl.One.setSignBit(); |
1407 | |
1408 | if (KnownShl.hasConflict()) |
1409 | return PoisonValue::get(T: Op0->getType()); |
1410 | } |
1411 | |
1412 | return nullptr; |
1413 | } |
1414 | |
1415 | /// Given operands for an LShr or AShr, see if we can fold the result. If not, |
1416 | /// this returns null. |
1417 | static Value *simplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0, |
1418 | Value *Op1, bool IsExact, |
1419 | const SimplifyQuery &Q, unsigned MaxRecurse) { |
1420 | if (Value *V = |
1421 | simplifyShift(Opcode, Op0, Op1, /*IsNSW*/ false, Q, MaxRecurse)) |
1422 | return V; |
1423 | |
1424 | // X >> X -> 0 |
1425 | if (Op0 == Op1) |
1426 | return Constant::getNullValue(Ty: Op0->getType()); |
1427 | |
1428 | // undef >> X -> 0 |
1429 | // undef >> X -> undef (if it's exact) |
1430 | if (Q.isUndefValue(V: Op0)) |
1431 | return IsExact ? Op0 : Constant::getNullValue(Ty: Op0->getType()); |
1432 | |
1433 | // The low bit cannot be shifted out of an exact shift if it is set. |
1434 | // TODO: Generalize by counting trailing zeros (see fold for exact division). |
1435 | if (IsExact) { |
1436 | KnownBits Op0Known = computeKnownBits(V: Op0, /* Depth */ 0, Q); |
1437 | if (Op0Known.One[0]) |
1438 | return Op0; |
1439 | } |
1440 | |
1441 | return nullptr; |
1442 | } |
1443 | |
1444 | /// Given operands for an Shl, see if we can fold the result. |
1445 | /// If not, this returns null. |
1446 | static Value *simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, |
1447 | const SimplifyQuery &Q, unsigned MaxRecurse) { |
1448 | if (Value *V = |
1449 | simplifyShift(Opcode: Instruction::Shl, Op0, Op1, IsNSW, Q, MaxRecurse)) |
1450 | return V; |
1451 | |
1452 | Type *Ty = Op0->getType(); |
1453 | // undef << X -> 0 |
1454 | // undef << X -> undef if (if it's NSW/NUW) |
1455 | if (Q.isUndefValue(V: Op0)) |
1456 | return IsNSW || IsNUW ? Op0 : Constant::getNullValue(Ty); |
1457 | |
1458 | // (X >> A) << A -> X |
1459 | Value *X; |
1460 | if (Q.IIQ.UseInstrInfo && |
1461 | match(V: Op0, P: m_Exact(SubPattern: m_Shr(L: m_Value(V&: X), R: m_Specific(V: Op1))))) |
1462 | return X; |
1463 | |
1464 | // shl nuw i8 C, %x -> C iff C has sign bit set. |
1465 | if (IsNUW && match(V: Op0, P: m_Negative())) |
1466 | return Op0; |
1467 | // NOTE: could use computeKnownBits() / LazyValueInfo, |
1468 | // but the cost-benefit analysis suggests it isn't worth it. |
1469 | |
1470 | // "nuw" guarantees that only zeros are shifted out, and "nsw" guarantees |
1471 | // that the sign-bit does not change, so the only input that does not |
1472 | // produce poison is 0, and "0 << (bitwidth-1) --> 0". |
1473 | if (IsNSW && IsNUW && |
1474 | match(V: Op1, P: m_SpecificInt(V: Ty->getScalarSizeInBits() - 1))) |
1475 | return Constant::getNullValue(Ty); |
1476 | |
1477 | return nullptr; |
1478 | } |
1479 | |
1480 | Value *llvm::simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, |
1481 | const SimplifyQuery &Q) { |
1482 | return ::simplifyShlInst(Op0, Op1, IsNSW, IsNUW, Q, MaxRecurse: RecursionLimit); |
1483 | } |
1484 | |
1485 | /// Given operands for an LShr, see if we can fold the result. |
1486 | /// If not, this returns null. |
1487 | static Value *simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact, |
1488 | const SimplifyQuery &Q, unsigned MaxRecurse) { |
1489 | if (Value *V = simplifyRightShift(Opcode: Instruction::LShr, Op0, Op1, IsExact, Q, |
1490 | MaxRecurse)) |
1491 | return V; |
1492 | |
1493 | // (X << A) >> A -> X |
1494 | Value *X; |
1495 | if (Q.IIQ.UseInstrInfo && match(V: Op0, P: m_NUWShl(L: m_Value(V&: X), R: m_Specific(V: Op1)))) |
1496 | return X; |
1497 | |
1498 | // ((X << A) | Y) >> A -> X if effective width of Y is not larger than A. |
1499 | // We can return X as we do in the above case since OR alters no bits in X. |
1500 | // SimplifyDemandedBits in InstCombine can do more general optimization for |
1501 | // bit manipulation. This pattern aims to provide opportunities for other |
1502 | // optimizers by supporting a simple but common case in InstSimplify. |
1503 | Value *Y; |
1504 | const APInt *ShRAmt, *ShLAmt; |
1505 | if (Q.IIQ.UseInstrInfo && match(V: Op1, P: m_APInt(Res&: ShRAmt)) && |
1506 | match(V: Op0, P: m_c_Or(L: m_NUWShl(L: m_Value(V&: X), R: m_APInt(Res&: ShLAmt)), R: m_Value(V&: Y))) && |
1507 | *ShRAmt == *ShLAmt) { |
1508 | const KnownBits YKnown = computeKnownBits(V: Y, /* Depth */ 0, Q); |
1509 | const unsigned EffWidthY = YKnown.countMaxActiveBits(); |
1510 | if (ShRAmt->uge(RHS: EffWidthY)) |
1511 | return X; |
1512 | } |
1513 | |
1514 | return nullptr; |
1515 | } |
1516 | |
1517 | Value *llvm::simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact, |
1518 | const SimplifyQuery &Q) { |
1519 | return ::simplifyLShrInst(Op0, Op1, IsExact, Q, MaxRecurse: RecursionLimit); |
1520 | } |
1521 | |
1522 | /// Given operands for an AShr, see if we can fold the result. |
1523 | /// If not, this returns null. |
1524 | static Value *simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact, |
1525 | const SimplifyQuery &Q, unsigned MaxRecurse) { |
1526 | if (Value *V = simplifyRightShift(Opcode: Instruction::AShr, Op0, Op1, IsExact, Q, |
1527 | MaxRecurse)) |
1528 | return V; |
1529 | |
1530 | // -1 >>a X --> -1 |
1531 | // (-1 << X) a>> X --> -1 |
1532 | // We could return the original -1 constant to preserve poison elements. |
1533 | if (match(V: Op0, P: m_AllOnes()) || |
1534 | match(V: Op0, P: m_Shl(L: m_AllOnes(), R: m_Specific(V: Op1)))) |
1535 | return Constant::getAllOnesValue(Ty: Op0->getType()); |
1536 | |
1537 | // (X << A) >> A -> X |
1538 | Value *X; |
1539 | if (Q.IIQ.UseInstrInfo && match(V: Op0, P: m_NSWShl(L: m_Value(V&: X), R: m_Specific(V: Op1)))) |
1540 | return X; |
1541 | |
1542 | // Arithmetic shifting an all-sign-bit value is a no-op. |
1543 | unsigned NumSignBits = ComputeNumSignBits(Op: Op0, DL: Q.DL, Depth: 0, AC: Q.AC, CxtI: Q.CxtI, DT: Q.DT); |
1544 | if (NumSignBits == Op0->getType()->getScalarSizeInBits()) |
1545 | return Op0; |
1546 | |
1547 | return nullptr; |
1548 | } |
1549 | |
1550 | Value *llvm::simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact, |
1551 | const SimplifyQuery &Q) { |
1552 | return ::simplifyAShrInst(Op0, Op1, IsExact, Q, MaxRecurse: RecursionLimit); |
1553 | } |
1554 | |
1555 | /// Commuted variants are assumed to be handled by calling this function again |
1556 | /// with the parameters swapped. |
1557 | static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp, |
1558 | ICmpInst *UnsignedICmp, bool IsAnd, |
1559 | const SimplifyQuery &Q) { |
1560 | Value *X, *Y; |
1561 | |
1562 | ICmpInst::Predicate EqPred; |
1563 | if (!match(V: ZeroICmp, P: m_ICmp(Pred&: EqPred, L: m_Value(V&: Y), R: m_Zero())) || |
1564 | !ICmpInst::isEquality(P: EqPred)) |
1565 | return nullptr; |
1566 | |
1567 | ICmpInst::Predicate UnsignedPred; |
1568 | |
1569 | Value *A, *B; |
1570 | // Y = (A - B); |
1571 | if (match(V: Y, P: m_Sub(L: m_Value(V&: A), R: m_Value(V&: B)))) { |
1572 | if (match(V: UnsignedICmp, |
1573 | P: m_c_ICmp(Pred&: UnsignedPred, L: m_Specific(V: A), R: m_Specific(V: B))) && |
1574 | ICmpInst::isUnsigned(predicate: UnsignedPred)) { |
1575 | // A >=/<= B || (A - B) != 0 <--> true |
1576 | if ((UnsignedPred == ICmpInst::ICMP_UGE || |
1577 | UnsignedPred == ICmpInst::ICMP_ULE) && |
1578 | EqPred == ICmpInst::ICMP_NE && !IsAnd) |
1579 | return ConstantInt::getTrue(Ty: UnsignedICmp->getType()); |
1580 | // A </> B && (A - B) == 0 <--> false |
1581 | if ((UnsignedPred == ICmpInst::ICMP_ULT || |
1582 | UnsignedPred == ICmpInst::ICMP_UGT) && |
1583 | EqPred == ICmpInst::ICMP_EQ && IsAnd) |
1584 | return ConstantInt::getFalse(Ty: UnsignedICmp->getType()); |
1585 | |
1586 | // A </> B && (A - B) != 0 <--> A </> B |
1587 | // A </> B || (A - B) != 0 <--> (A - B) != 0 |
1588 | if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT || |
1589 | UnsignedPred == ICmpInst::ICMP_UGT)) |
1590 | return IsAnd ? UnsignedICmp : ZeroICmp; |
1591 | |
1592 | // A <=/>= B && (A - B) == 0 <--> (A - B) == 0 |
1593 | // A <=/>= B || (A - B) == 0 <--> A <=/>= B |
1594 | if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE || |
1595 | UnsignedPred == ICmpInst::ICMP_UGE)) |
1596 | return IsAnd ? ZeroICmp : UnsignedICmp; |
1597 | } |
1598 | |
1599 | // Given Y = (A - B) |
1600 | // Y >= A && Y != 0 --> Y >= A iff B != 0 |
1601 | // Y < A || Y == 0 --> Y < A iff B != 0 |
1602 | if (match(V: UnsignedICmp, |
1603 | P: m_c_ICmp(Pred&: UnsignedPred, L: m_Specific(V: Y), R: m_Specific(V: A)))) { |
1604 | if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd && |
1605 | EqPred == ICmpInst::ICMP_NE && isKnownNonZero(V: B, Q)) |
1606 | return UnsignedICmp; |
1607 | if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd && |
1608 | EqPred == ICmpInst::ICMP_EQ && isKnownNonZero(V: B, Q)) |
1609 | return UnsignedICmp; |
1610 | } |
1611 | } |
1612 | |
1613 | if (match(V: UnsignedICmp, P: m_ICmp(Pred&: UnsignedPred, L: m_Value(V&: X), R: m_Specific(V: Y))) && |
1614 | ICmpInst::isUnsigned(predicate: UnsignedPred)) |
1615 | ; |
1616 | else if (match(V: UnsignedICmp, |
1617 | P: m_ICmp(Pred&: UnsignedPred, L: m_Specific(V: Y), R: m_Value(V&: X))) && |
1618 | ICmpInst::isUnsigned(predicate: UnsignedPred)) |
1619 | UnsignedPred = ICmpInst::getSwappedPredicate(pred: UnsignedPred); |
1620 | else |
1621 | return nullptr; |
1622 | |
1623 | // X > Y && Y == 0 --> Y == 0 iff X != 0 |
1624 | // X > Y || Y == 0 --> X > Y iff X != 0 |
1625 | if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ && |
1626 | isKnownNonZero(V: X, Q)) |
1627 | return IsAnd ? ZeroICmp : UnsignedICmp; |
1628 | |
1629 | // X <= Y && Y != 0 --> X <= Y iff X != 0 |
1630 | // X <= Y || Y != 0 --> Y != 0 iff X != 0 |
1631 | if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE && |
1632 | isKnownNonZero(V: X, Q)) |
1633 | return IsAnd ? UnsignedICmp : ZeroICmp; |
1634 | |
1635 | // The transforms below here are expected to be handled more generally with |
1636 | // simplifyAndOrOfICmpsWithLimitConst() or in InstCombine's |
1637 | // foldAndOrOfICmpsWithConstEq(). If we are looking to trim optimizer overlap, |
1638 | // these are candidates for removal. |
1639 | |
1640 | // X < Y && Y != 0 --> X < Y |
1641 | // X < Y || Y != 0 --> Y != 0 |
1642 | if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE) |
1643 | return IsAnd ? UnsignedICmp : ZeroICmp; |
1644 | |
1645 | // X >= Y && Y == 0 --> Y == 0 |
1646 | // X >= Y || Y == 0 --> X >= Y |
1647 | if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ) |
1648 | return IsAnd ? ZeroICmp : UnsignedICmp; |
1649 | |
1650 | // X < Y && Y == 0 --> false |
1651 | if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ && |
1652 | IsAnd) |
1653 | return getFalse(Ty: UnsignedICmp->getType()); |
1654 | |
1655 | // X >= Y || Y != 0 --> true |
1656 | if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE && |
1657 | !IsAnd) |
1658 | return getTrue(Ty: UnsignedICmp->getType()); |
1659 | |
1660 | return nullptr; |
1661 | } |
1662 | |
1663 | /// Test if a pair of compares with a shared operand and 2 constants has an |
1664 | /// empty set intersection, full set union, or if one compare is a superset of |
1665 | /// the other. |
1666 | static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1, |
1667 | bool IsAnd) { |
1668 | // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)). |
1669 | if (Cmp0->getOperand(i_nocapture: 0) != Cmp1->getOperand(i_nocapture: 0)) |
1670 | return nullptr; |
1671 | |
1672 | const APInt *C0, *C1; |
1673 | if (!match(V: Cmp0->getOperand(i_nocapture: 1), P: m_APInt(Res&: C0)) || |
1674 | !match(V: Cmp1->getOperand(i_nocapture: 1), P: m_APInt(Res&: C1))) |
1675 | return nullptr; |
1676 | |
1677 | auto Range0 = ConstantRange::makeExactICmpRegion(Pred: Cmp0->getPredicate(), Other: *C0); |
1678 | auto Range1 = ConstantRange::makeExactICmpRegion(Pred: Cmp1->getPredicate(), Other: *C1); |
1679 | |
1680 | // For and-of-compares, check if the intersection is empty: |
1681 | // (icmp X, C0) && (icmp X, C1) --> empty set --> false |
1682 | if (IsAnd && Range0.intersectWith(CR: Range1).isEmptySet()) |
1683 | return getFalse(Ty: Cmp0->getType()); |
1684 | |
1685 | // For or-of-compares, check if the union is full: |
1686 | // (icmp X, C0) || (icmp X, C1) --> full set --> true |
1687 | if (!IsAnd && Range0.unionWith(CR: Range1).isFullSet()) |
1688 | return getTrue(Ty: Cmp0->getType()); |
1689 | |
1690 | // Is one range a superset of the other? |
1691 | // If this is and-of-compares, take the smaller set: |
1692 | // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42 |
1693 | // If this is or-of-compares, take the larger set: |
1694 | // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4 |
1695 | if (Range0.contains(CR: Range1)) |
1696 | return IsAnd ? Cmp1 : Cmp0; |
1697 | if (Range1.contains(CR: Range0)) |
1698 | return IsAnd ? Cmp0 : Cmp1; |
1699 | |
1700 | return nullptr; |
1701 | } |
1702 | |
1703 | static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, |
1704 | const InstrInfoQuery &IIQ) { |
1705 | // (icmp (add V, C0), C1) & (icmp V, C0) |
1706 | ICmpInst::Predicate Pred0, Pred1; |
1707 | const APInt *C0, *C1; |
1708 | Value *V; |
1709 | if (!match(V: Op0, P: m_ICmp(Pred&: Pred0, L: m_Add(L: m_Value(V), R: m_APInt(Res&: C0)), R: m_APInt(Res&: C1)))) |
1710 | return nullptr; |
1711 | |
1712 | if (!match(V: Op1, P: m_ICmp(Pred&: Pred1, L: m_Specific(V), R: m_Value()))) |
1713 | return nullptr; |
1714 | |
1715 | auto *AddInst = cast<OverflowingBinaryOperator>(Val: Op0->getOperand(i_nocapture: 0)); |
1716 | if (AddInst->getOperand(i_nocapture: 1) != Op1->getOperand(i_nocapture: 1)) |
1717 | return nullptr; |
1718 | |
1719 | Type *ITy = Op0->getType(); |
1720 | bool IsNSW = IIQ.hasNoSignedWrap(Op: AddInst); |
1721 | bool IsNUW = IIQ.hasNoUnsignedWrap(Op: AddInst); |
1722 | |
1723 | const APInt Delta = *C1 - *C0; |
1724 | if (C0->isStrictlyPositive()) { |
1725 | if (Delta == 2) { |
1726 | if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT) |
1727 | return getFalse(Ty: ITy); |
1728 | if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && IsNSW) |
1729 | return getFalse(Ty: ITy); |
1730 | } |
1731 | if (Delta == 1) { |
1732 | if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT) |
1733 | return getFalse(Ty: ITy); |
1734 | if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && IsNSW) |
1735 | return getFalse(Ty: ITy); |
1736 | } |
1737 | } |
1738 | if (C0->getBoolValue() && IsNUW) { |
1739 | if (Delta == 2) |
1740 | if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT) |
1741 | return getFalse(Ty: ITy); |
1742 | if (Delta == 1) |
1743 | if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT) |
1744 | return getFalse(Ty: ITy); |
1745 | } |
1746 | |
1747 | return nullptr; |
1748 | } |
1749 | |
1750 | /// Try to simplify and/or of icmp with ctpop intrinsic. |
1751 | static Value *simplifyAndOrOfICmpsWithCtpop(ICmpInst *Cmp0, ICmpInst *Cmp1, |
1752 | bool IsAnd) { |
1753 | ICmpInst::Predicate Pred0, Pred1; |
1754 | Value *X; |
1755 | const APInt *C; |
1756 | if (!match(V: Cmp0, P: m_ICmp(Pred&: Pred0, L: m_Intrinsic<Intrinsic::ctpop>(Op0: m_Value(V&: X)), |
1757 | R: m_APInt(Res&: C))) || |
1758 | !match(V: Cmp1, P: m_ICmp(Pred&: Pred1, L: m_Specific(V: X), R: m_ZeroInt())) || C->isZero()) |
1759 | return nullptr; |
1760 | |
1761 | // (ctpop(X) == C) || (X != 0) --> X != 0 where C > 0 |
1762 | if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_NE) |
1763 | return Cmp1; |
1764 | // (ctpop(X) != C) && (X == 0) --> X == 0 where C > 0 |
1765 | if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_EQ) |
1766 | return Cmp1; |
1767 | |
1768 | return nullptr; |
1769 | } |
1770 | |
1771 | static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1, |
1772 | const SimplifyQuery &Q) { |
1773 | if (Value *X = simplifyUnsignedRangeCheck(ZeroICmp: Op0, UnsignedICmp: Op1, /*IsAnd=*/true, Q)) |
1774 | return X; |
1775 | if (Value *X = simplifyUnsignedRangeCheck(ZeroICmp: Op1, UnsignedICmp: Op0, /*IsAnd=*/true, Q)) |
1776 | return X; |
1777 | |
1778 | if (Value *X = simplifyAndOrOfICmpsWithConstants(Cmp0: Op0, Cmp1: Op1, IsAnd: true)) |
1779 | return X; |
1780 | |
1781 | if (Value *X = simplifyAndOrOfICmpsWithCtpop(Cmp0: Op0, Cmp1: Op1, IsAnd: true)) |
1782 | return X; |
1783 | if (Value *X = simplifyAndOrOfICmpsWithCtpop(Cmp0: Op1, Cmp1: Op0, IsAnd: true)) |
1784 | return X; |
1785 | |
1786 | if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, IIQ: Q.IIQ)) |
1787 | return X; |
1788 | if (Value *X = simplifyAndOfICmpsWithAdd(Op0: Op1, Op1: Op0, IIQ: Q.IIQ)) |
1789 | return X; |
1790 | |
1791 | return nullptr; |
1792 | } |
1793 | |
1794 | static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, |
1795 | const InstrInfoQuery &IIQ) { |
1796 | // (icmp (add V, C0), C1) | (icmp V, C0) |
1797 | ICmpInst::Predicate Pred0, Pred1; |
1798 | const APInt *C0, *C1; |
1799 | Value *V; |
1800 | if (!match(V: Op0, P: m_ICmp(Pred&: Pred0, L: m_Add(L: m_Value(V), R: m_APInt(Res&: C0)), R: m_APInt(Res&: C1)))) |
1801 | return nullptr; |
1802 | |
1803 | if (!match(V: Op1, P: m_ICmp(Pred&: Pred1, L: m_Specific(V), R: m_Value()))) |
1804 | return nullptr; |
1805 | |
1806 | auto *AddInst = cast<BinaryOperator>(Val: Op0->getOperand(i_nocapture: 0)); |
1807 | if (AddInst->getOperand(i_nocapture: 1) != Op1->getOperand(i_nocapture: 1)) |
1808 | return nullptr; |
1809 | |
1810 | Type *ITy = Op0->getType(); |
1811 | bool IsNSW = IIQ.hasNoSignedWrap(Op: AddInst); |
1812 | bool IsNUW = IIQ.hasNoUnsignedWrap(Op: AddInst); |
1813 | |
1814 | const APInt Delta = *C1 - *C0; |
1815 | if (C0->isStrictlyPositive()) { |
1816 | if (Delta == 2) { |
1817 | if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE) |
1818 | return getTrue(Ty: ITy); |
1819 | if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && IsNSW) |
1820 | return getTrue(Ty: ITy); |
1821 | } |
1822 | if (Delta == 1) { |
1823 | if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE) |
1824 | return getTrue(Ty: ITy); |
1825 | if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && IsNSW) |
1826 | return getTrue(Ty: ITy); |
1827 | } |
1828 | } |
1829 | if (C0->getBoolValue() && IsNUW) { |
1830 | if (Delta == 2) |
1831 | if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE) |
1832 | return getTrue(Ty: ITy); |
1833 | if (Delta == 1) |
1834 | if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE) |
1835 | return getTrue(Ty: ITy); |
1836 | } |
1837 | |
1838 | return nullptr; |
1839 | } |
1840 | |
1841 | static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1, |
1842 | const SimplifyQuery &Q) { |
1843 | if (Value *X = simplifyUnsignedRangeCheck(ZeroICmp: Op0, UnsignedICmp: Op1, /*IsAnd=*/false, Q)) |
1844 | return X; |
1845 | if (Value *X = simplifyUnsignedRangeCheck(ZeroICmp: Op1, UnsignedICmp: Op0, /*IsAnd=*/false, Q)) |
1846 | return X; |
1847 | |
1848 | if (Value *X = simplifyAndOrOfICmpsWithConstants(Cmp0: Op0, Cmp1: Op1, IsAnd: false)) |
1849 | return X; |
1850 | |
1851 | if (Value *X = simplifyAndOrOfICmpsWithCtpop(Cmp0: Op0, Cmp1: Op1, IsAnd: false)) |
1852 | return X; |
1853 | if (Value *X = simplifyAndOrOfICmpsWithCtpop(Cmp0: Op1, Cmp1: Op0, IsAnd: false)) |
1854 | return X; |
1855 | |
1856 | if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, IIQ: Q.IIQ)) |
1857 | return X; |
1858 | if (Value *X = simplifyOrOfICmpsWithAdd(Op0: Op1, Op1: Op0, IIQ: Q.IIQ)) |
1859 | return X; |
1860 | |
1861 | return nullptr; |
1862 | } |
1863 | |
1864 | static Value *simplifyAndOrOfFCmps(const SimplifyQuery &Q, FCmpInst *LHS, |
1865 | FCmpInst *RHS, bool IsAnd) { |
1866 | Value *LHS0 = LHS->getOperand(i_nocapture: 0), *LHS1 = LHS->getOperand(i_nocapture: 1); |
1867 | Value *RHS0 = RHS->getOperand(i_nocapture: 0), *RHS1 = RHS->getOperand(i_nocapture: 1); |
1868 | if (LHS0->getType() != RHS0->getType()) |
1869 | return nullptr; |
1870 | |
1871 | FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); |
1872 | if ((PredL == FCmpInst::FCMP_ORD || PredL == FCmpInst::FCMP_UNO) && |
1873 | ((FCmpInst::isOrdered(predicate: PredR) && IsAnd) || |
1874 | (FCmpInst::isUnordered(predicate: PredR) && !IsAnd))) { |
1875 | // (fcmp ord X, 0) & (fcmp o** X, Y) --> fcmp o** X, Y |
1876 | // (fcmp uno X, 0) & (fcmp o** X, Y) --> false |
1877 | // (fcmp uno X, 0) | (fcmp u** X, Y) --> fcmp u** X, Y |
1878 | // (fcmp ord X, 0) | (fcmp u** X, Y) --> true |
1879 | if ((LHS0 == RHS0 || LHS0 == RHS1) && match(V: LHS1, P: m_PosZeroFP())) |
1880 | return FCmpInst::isOrdered(predicate: PredL) == FCmpInst::isOrdered(predicate: PredR) |
1881 | ? static_cast<Value *>(RHS) |
1882 | : ConstantInt::getBool(Ty: LHS->getType(), V: !IsAnd); |
1883 | } |
1884 | |
1885 | if ((PredR == FCmpInst::FCMP_ORD || PredR == FCmpInst::FCMP_UNO) && |
1886 | ((FCmpInst::isOrdered(predicate: PredL) && IsAnd) || |
1887 | (FCmpInst::isUnordered(predicate: PredL) && !IsAnd))) { |
1888 | // (fcmp o** X, Y) & (fcmp ord X, 0) --> fcmp o** X, Y |
1889 | // (fcmp o** X, Y) & (fcmp uno X, 0) --> false |
1890 | // (fcmp u** X, Y) | (fcmp uno X, 0) --> fcmp u** X, Y |
1891 | // (fcmp u** X, Y) | (fcmp ord X, 0) --> true |
1892 | if ((RHS0 == LHS0 || RHS0 == LHS1) && match(V: RHS1, P: m_PosZeroFP())) |
1893 | return FCmpInst::isOrdered(predicate: PredL) == FCmpInst::isOrdered(predicate: PredR) |
1894 | ? static_cast<Value *>(LHS) |
1895 | : ConstantInt::getBool(Ty: LHS->getType(), V: !IsAnd); |
1896 | } |
1897 | |
1898 | return nullptr; |
1899 | } |
1900 | |
1901 | static Value *simplifyAndOrOfCmps(const SimplifyQuery &Q, Value *Op0, |
1902 | Value *Op1, bool IsAnd) { |
1903 | // Look through casts of the 'and' operands to find compares. |
1904 | auto *Cast0 = dyn_cast<CastInst>(Val: Op0); |
1905 | auto *Cast1 = dyn_cast<CastInst>(Val: Op1); |
1906 | if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() && |
1907 | Cast0->getSrcTy() == Cast1->getSrcTy()) { |
1908 | Op0 = Cast0->getOperand(i_nocapture: 0); |
1909 | Op1 = Cast1->getOperand(i_nocapture: 0); |
1910 | } |
1911 | |
1912 | Value *V = nullptr; |
1913 | auto *ICmp0 = dyn_cast<ICmpInst>(Val: Op0); |
1914 | auto *ICmp1 = dyn_cast<ICmpInst>(Val: Op1); |
1915 | if (ICmp0 && ICmp1) |
1916 | V = IsAnd ? simplifyAndOfICmps(Op0: ICmp0, Op1: ICmp1, Q) |
1917 | : simplifyOrOfICmps(Op0: ICmp0, Op1: ICmp1, Q); |
1918 | |
1919 | auto *FCmp0 = dyn_cast<FCmpInst>(Val: Op0); |
1920 | auto *FCmp1 = dyn_cast<FCmpInst>(Val: Op1); |
1921 | if (FCmp0 && FCmp1) |
1922 | V = simplifyAndOrOfFCmps(Q, LHS: FCmp0, RHS: FCmp1, IsAnd); |
1923 | |
1924 | if (!V) |
1925 | return nullptr; |
1926 | if (!Cast0) |
1927 | return V; |
1928 | |
1929 | // If we looked through casts, we can only handle a constant simplification |
1930 | // because we are not allowed to create a cast instruction here. |
1931 | if (auto *C = dyn_cast<Constant>(Val: V)) |
1932 | return ConstantFoldCastOperand(Opcode: Cast0->getOpcode(), C, DestTy: Cast0->getType(), |
1933 | DL: Q.DL); |
1934 | |
1935 | return nullptr; |
1936 | } |
1937 | |
1938 | static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, |
1939 | const SimplifyQuery &Q, |
1940 | bool AllowRefinement, |
1941 | SmallVectorImpl<Instruction *> *DropFlags, |
1942 | unsigned MaxRecurse); |
1943 | |
1944 | static Value *simplifyAndOrWithICmpEq(unsigned Opcode, Value *Op0, Value *Op1, |
1945 | const SimplifyQuery &Q, |
1946 | unsigned MaxRecurse) { |
1947 | assert((Opcode == Instruction::And || Opcode == Instruction::Or) && |
1948 | "Must be and/or" ); |
1949 | ICmpInst::Predicate Pred; |
1950 | Value *A, *B; |
1951 | if (!match(V: Op0, P: m_ICmp(Pred, L: m_Value(V&: A), R: m_Value(V&: B))) || |
1952 | !ICmpInst::isEquality(P: Pred)) |
1953 | return nullptr; |
1954 | |
1955 | auto Simplify = [&](Value *Res) -> Value * { |
1956 | Constant *Absorber = ConstantExpr::getBinOpAbsorber(Opcode, Ty: Res->getType()); |
1957 | |
1958 | // and (icmp eq a, b), x implies (a==b) inside x. |
1959 | // or (icmp ne a, b), x implies (a==b) inside x. |
1960 | // If x simplifies to true/false, we can simplify the and/or. |
1961 | if (Pred == |
1962 | (Opcode == Instruction::And ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE)) { |
1963 | if (Res == Absorber) |
1964 | return Absorber; |
1965 | if (Res == ConstantExpr::getBinOpIdentity(Opcode, Ty: Res->getType())) |
1966 | return Op0; |
1967 | return nullptr; |
1968 | } |
1969 | |
1970 | // If we have and (icmp ne a, b), x and for a==b we can simplify x to false, |
1971 | // then we can drop the icmp, as x will already be false in the case where |
1972 | // the icmp is false. Similar for or and true. |
1973 | if (Res == Absorber) |
1974 | return Op1; |
1975 | return nullptr; |
1976 | }; |
1977 | |
1978 | // In the final case (Res == Absorber with inverted predicate), it is safe to |
1979 | // refine poison during simplification, but not undef. For simplicity always |
1980 | // disable undef-based folds here. |
1981 | if (Value *Res = simplifyWithOpReplaced(V: Op1, Op: A, RepOp: B, Q: Q.getWithoutUndef(), |
1982 | /* AllowRefinement */ true, |
1983 | /* DropFlags */ nullptr, MaxRecurse)) |
1984 | return Simplify(Res); |
1985 | if (Value *Res = simplifyWithOpReplaced(V: Op1, Op: B, RepOp: A, Q: Q.getWithoutUndef(), |
1986 | /* AllowRefinement */ true, |
1987 | /* DropFlags */ nullptr, MaxRecurse)) |
1988 | return Simplify(Res); |
1989 | |
1990 | return nullptr; |
1991 | } |
1992 | |
1993 | /// Given a bitwise logic op, check if the operands are add/sub with a common |
1994 | /// source value and inverted constant (identity: C - X -> ~(X + ~C)). |
1995 | static Value *simplifyLogicOfAddSub(Value *Op0, Value *Op1, |
1996 | Instruction::BinaryOps Opcode) { |
1997 | assert(Op0->getType() == Op1->getType() && "Mismatched binop types" ); |
1998 | assert(BinaryOperator::isBitwiseLogicOp(Opcode) && "Expected logic op" ); |
1999 | Value *X; |
2000 | Constant *C1, *C2; |
2001 | if ((match(V: Op0, P: m_Add(L: m_Value(V&: X), R: m_Constant(C&: C1))) && |
2002 | match(V: Op1, P: m_Sub(L: m_Constant(C&: C2), R: m_Specific(V: X)))) || |
2003 | (match(V: Op1, P: m_Add(L: m_Value(V&: X), R: m_Constant(C&: C1))) && |
2004 | match(V: Op0, P: m_Sub(L: m_Constant(C&: C2), R: m_Specific(V: X))))) { |
2005 | if (ConstantExpr::getNot(C: C1) == C2) { |
2006 | // (X + C) & (~C - X) --> (X + C) & ~(X + C) --> 0 |
2007 | // (X + C) | (~C - X) --> (X + C) | ~(X + C) --> -1 |
2008 | // (X + C) ^ (~C - X) --> (X + C) ^ ~(X + C) --> -1 |
2009 | Type *Ty = Op0->getType(); |
2010 | return Opcode == Instruction::And ? ConstantInt::getNullValue(Ty) |
2011 | : ConstantInt::getAllOnesValue(Ty); |
2012 | } |
2013 | } |
2014 | return nullptr; |
2015 | } |
2016 | |
2017 | // Commutative patterns for and that will be tried with both operand orders. |
2018 | static Value *simplifyAndCommutative(Value *Op0, Value *Op1, |
2019 | const SimplifyQuery &Q, |
2020 | unsigned MaxRecurse) { |
2021 | // ~A & A = 0 |
2022 | if (match(V: Op0, P: m_Not(V: m_Specific(V: Op1)))) |
2023 | return Constant::getNullValue(Ty: Op0->getType()); |
2024 | |
2025 | // (A | ?) & A = A |
2026 | if (match(V: Op0, P: m_c_Or(L: m_Specific(V: Op1), R: m_Value()))) |
2027 | return Op1; |
2028 | |
2029 | // (X | ~Y) & (X | Y) --> X |
2030 | Value *X, *Y; |
2031 | if (match(V: Op0, P: m_c_Or(L: m_Value(V&: X), R: m_Not(V: m_Value(V&: Y)))) && |
2032 | match(V: Op1, P: m_c_Or(L: m_Specific(V: X), R: m_Specific(V: Y)))) |
2033 | return X; |
2034 | |
2035 | // If we have a multiplication overflow check that is being 'and'ed with a |
2036 | // check that one of the multipliers is not zero, we can omit the 'and', and |
2037 | // only keep the overflow check. |
2038 | if (isCheckForZeroAndMulWithOverflow(Op0, Op1, IsAnd: true)) |
2039 | return Op1; |
2040 | |
2041 | // -A & A = A if A is a power of two or zero. |
2042 | if (match(V: Op0, P: m_Neg(V: m_Specific(V: Op1))) && |
2043 | isKnownToBeAPowerOfTwo(V: Op1, DL: Q.DL, /*OrZero*/ true, Depth: 0, AC: Q.AC, CxtI: Q.CxtI, DT: Q.DT)) |
2044 | return Op1; |
2045 | |
2046 | // This is a similar pattern used for checking if a value is a power-of-2: |
2047 | // (A - 1) & A --> 0 (if A is a power-of-2 or 0) |
2048 | if (match(V: Op0, P: m_Add(L: m_Specific(V: Op1), R: m_AllOnes())) && |
2049 | isKnownToBeAPowerOfTwo(V: Op1, DL: Q.DL, /*OrZero*/ true, Depth: 0, AC: Q.AC, CxtI: Q.CxtI, DT: Q.DT)) |
2050 | return Constant::getNullValue(Ty: Op1->getType()); |
2051 | |
2052 | // (x << N) & ((x << M) - 1) --> 0, where x is known to be a power of 2 and |
2053 | // M <= N. |
2054 | const APInt *Shift1, *Shift2; |
2055 | if (match(V: Op0, P: m_Shl(L: m_Value(V&: X), R: m_APInt(Res&: Shift1))) && |
2056 | match(V: Op1, P: m_Add(L: m_Shl(L: m_Specific(V: X), R: m_APInt(Res&: Shift2)), R: m_AllOnes())) && |
2057 | isKnownToBeAPowerOfTwo(V: X, DL: Q.DL, /*OrZero*/ true, /*Depth*/ 0, AC: Q.AC, |
2058 | CxtI: Q.CxtI) && |
2059 | Shift1->uge(RHS: *Shift2)) |
2060 | return Constant::getNullValue(Ty: Op0->getType()); |
2061 | |
2062 | if (Value *V = |
2063 | simplifyAndOrWithICmpEq(Opcode: Instruction::And, Op0, Op1, Q, MaxRecurse)) |
2064 | return V; |
2065 | |
2066 | return nullptr; |
2067 | } |
2068 | |
2069 | /// Given operands for an And, see if we can fold the result. |
2070 | /// If not, this returns null. |
2071 | static Value *simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, |
2072 | unsigned MaxRecurse) { |
2073 | if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::And, Op0, Op1, Q)) |
2074 | return C; |
2075 | |
2076 | // X & poison -> poison |
2077 | if (isa<PoisonValue>(Val: Op1)) |
2078 | return Op1; |
2079 | |
2080 | // X & undef -> 0 |
2081 | if (Q.isUndefValue(V: Op1)) |
2082 | return Constant::getNullValue(Ty: Op0->getType()); |
2083 | |
2084 | // X & X = X |
2085 | if (Op0 == Op1) |
2086 | return Op0; |
2087 | |
2088 | // X & 0 = 0 |
2089 | if (match(V: Op1, P: m_Zero())) |
2090 | return Constant::getNullValue(Ty: Op0->getType()); |
2091 | |
2092 | // X & -1 = X |
2093 | if (match(V: Op1, P: m_AllOnes())) |
2094 | return Op0; |
2095 | |
2096 | if (Value *Res = simplifyAndCommutative(Op0, Op1, Q, MaxRecurse)) |
2097 | return Res; |
2098 | if (Value *Res = simplifyAndCommutative(Op0: Op1, Op1: Op0, Q, MaxRecurse)) |
2099 | return Res; |
2100 | |
2101 | if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Opcode: Instruction::And)) |
2102 | return V; |
2103 | |
2104 | // A mask that only clears known zeros of a shifted value is a no-op. |
2105 | const APInt *Mask; |
2106 | const APInt *ShAmt; |
2107 | Value *X, *Y; |
2108 | if (match(V: Op1, P: m_APInt(Res&: Mask))) { |
2109 | // If all bits in the inverted and shifted mask are clear: |
2110 | // and (shl X, ShAmt), Mask --> shl X, ShAmt |
2111 | if (match(V: Op0, P: m_Shl(L: m_Value(V&: X), R: m_APInt(Res&: ShAmt))) && |
2112 | (~(*Mask)).lshr(ShiftAmt: *ShAmt).isZero()) |
2113 | return Op0; |
2114 | |
2115 | // If all bits in the inverted and shifted mask are clear: |
2116 | // and (lshr X, ShAmt), Mask --> lshr X, ShAmt |
2117 | if (match(V: Op0, P: m_LShr(L: m_Value(V&: X), R: m_APInt(Res&: ShAmt))) && |
2118 | (~(*Mask)).shl(ShiftAmt: *ShAmt).isZero()) |
2119 | return Op0; |
2120 | } |
2121 | |
2122 | // and 2^x-1, 2^C --> 0 where x <= C. |
2123 | const APInt *PowerC; |
2124 | Value *Shift; |
2125 | if (match(V: Op1, P: m_Power2(V&: PowerC)) && |
2126 | match(V: Op0, P: m_Add(L: m_Value(V&: Shift), R: m_AllOnes())) && |
2127 | isKnownToBeAPowerOfTwo(V: Shift, DL: Q.DL, /*OrZero*/ false, Depth: 0, AC: Q.AC, CxtI: Q.CxtI, |
2128 | DT: Q.DT)) { |
2129 | KnownBits Known = computeKnownBits(V: Shift, /* Depth */ 0, Q); |
2130 | // Use getActiveBits() to make use of the additional power of two knowledge |
2131 | if (PowerC->getActiveBits() >= Known.getMaxValue().getActiveBits()) |
2132 | return ConstantInt::getNullValue(Ty: Op1->getType()); |
2133 | } |
2134 | |
2135 | if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, IsAnd: true)) |
2136 | return V; |
2137 | |
2138 | // Try some generic simplifications for associative operations. |
2139 | if (Value *V = |
2140 | simplifyAssociativeBinOp(Opcode: Instruction::And, LHS: Op0, RHS: Op1, Q, MaxRecurse)) |
2141 | return V; |
2142 | |
2143 | // And distributes over Or. Try some generic simplifications based on this. |
2144 | if (Value *V = expandCommutativeBinOp(Opcode: Instruction::And, L: Op0, R: Op1, |
2145 | OpcodeToExpand: Instruction::Or, Q, MaxRecurse)) |
2146 | return V; |
2147 | |
2148 | // And distributes over Xor. Try some generic simplifications based on this. |
2149 | if (Value *V = expandCommutativeBinOp(Opcode: Instruction::And, L: Op0, R: Op1, |
2150 | OpcodeToExpand: Instruction::Xor, Q, MaxRecurse)) |
2151 | return V; |
2152 | |
2153 | if (isa<SelectInst>(Val: Op0) || isa<SelectInst>(Val: Op1)) { |
2154 | if (Op0->getType()->isIntOrIntVectorTy(BitWidth: 1)) { |
2155 | // A & (A && B) -> A && B |
2156 | if (match(V: Op1, P: m_Select(C: m_Specific(V: Op0), L: m_Value(), R: m_Zero()))) |
2157 | return Op1; |
2158 | else if (match(V: Op0, P: m_Select(C: m_Specific(V: Op1), L: m_Value(), R: m_Zero()))) |
2159 | return Op0; |
2160 | } |
2161 | // If the operation is with the result of a select instruction, check |
2162 | // whether operating on either branch of the select always yields the same |
2163 | // value. |
2164 | if (Value *V = |
2165 | threadBinOpOverSelect(Opcode: Instruction::And, LHS: Op0, RHS: Op1, Q, MaxRecurse)) |
2166 | return V; |
2167 | } |
2168 | |
2169 | // If the operation is with the result of a phi instruction, check whether |
2170 | // operating on all incoming values of the phi always yields the same value. |
2171 | if (isa<PHINode>(Val: Op0) || isa<PHINode>(Val: Op1)) |
2172 | if (Value *V = |
2173 | threadBinOpOverPHI(Opcode: Instruction::And, LHS: Op0, RHS: Op1, Q, MaxRecurse)) |
2174 | return V; |
2175 | |
2176 | // Assuming the effective width of Y is not larger than A, i.e. all bits |
2177 | // from X and Y are disjoint in (X << A) | Y, |
2178 | // if the mask of this AND op covers all bits of X or Y, while it covers |
2179 | // no bits from the other, we can bypass this AND op. E.g., |
2180 | // ((X << A) | Y) & Mask -> Y, |
2181 | // if Mask = ((1 << effective_width_of(Y)) - 1) |
2182 | // ((X << A) | Y) & Mask -> X << A, |
2183 | // if Mask = ((1 << effective_width_of(X)) - 1) << A |
2184 | // SimplifyDemandedBits in InstCombine can optimize the general case. |
2185 | // This pattern aims to help other passes for a common case. |
2186 | Value *XShifted; |
2187 | if (Q.IIQ.UseInstrInfo && match(V: Op1, P: m_APInt(Res&: Mask)) && |
2188 | match(V: Op0, P: m_c_Or(L: m_CombineAnd(L: m_NUWShl(L: m_Value(V&: X), R: m_APInt(Res&: ShAmt)), |
2189 | R: m_Value(V&: XShifted)), |
2190 | R: m_Value(V&: Y)))) { |
2191 | const unsigned Width = Op0->getType()->getScalarSizeInBits(); |
2192 | const unsigned ShftCnt = ShAmt->getLimitedValue(Limit: Width); |
2193 | const KnownBits YKnown = computeKnownBits(V: Y, /* Depth */ 0, Q); |
2194 | const unsigned EffWidthY = YKnown.countMaxActiveBits(); |
2195 | if (EffWidthY <= ShftCnt) { |
2196 | const KnownBits XKnown = computeKnownBits(V: X, /* Depth */ 0, Q); |
2197 | const unsigned EffWidthX = XKnown.countMaxActiveBits(); |
2198 | const APInt EffBitsY = APInt::getLowBitsSet(numBits: Width, loBitsSet: EffWidthY); |
2199 | const APInt EffBitsX = APInt::getLowBitsSet(numBits: Width, loBitsSet: EffWidthX) << ShftCnt; |
2200 | // If the mask is extracting all bits from X or Y as is, we can skip |
2201 | // this AND op. |
2202 | if (EffBitsY.isSubsetOf(RHS: *Mask) && !EffBitsX.intersects(RHS: *Mask)) |
2203 | return Y; |
2204 | if (EffBitsX.isSubsetOf(RHS: *Mask) && !EffBitsY.intersects(RHS: *Mask)) |
2205 | return XShifted; |
2206 | } |
2207 | } |
2208 | |
2209 | // ((X | Y) ^ X ) & ((X | Y) ^ Y) --> 0 |
2210 | // ((X | Y) ^ Y ) & ((X | Y) ^ X) --> 0 |
2211 | BinaryOperator *Or; |
2212 | if (match(V: Op0, P: m_c_Xor(L: m_Value(V&: X), |
2213 | R: m_CombineAnd(L: m_BinOp(I&: Or), |
2214 | R: m_c_Or(L: m_Deferred(V: X), R: m_Value(V&: Y))))) && |
2215 | match(V: Op1, P: m_c_Xor(L: m_Specific(V: Or), R: m_Specific(V: Y)))) |
2216 | return Constant::getNullValue(Ty: Op0->getType()); |
2217 | |
2218 | const APInt *C1; |
2219 | Value *A; |
2220 | // (A ^ C) & (A ^ ~C) -> 0 |
2221 | if (match(V: Op0, P: m_Xor(L: m_Value(V&: A), R: m_APInt(Res&: C1))) && |
2222 | match(V: Op1, P: m_Xor(L: m_Specific(V: A), R: m_SpecificInt(V: ~*C1)))) |
2223 | return Constant::getNullValue(Ty: Op0->getType()); |
2224 | |
2225 | if (Op0->getType()->isIntOrIntVectorTy(BitWidth: 1)) { |
2226 | if (std::optional<bool> Implied = isImpliedCondition(LHS: Op0, RHS: Op1, DL: Q.DL)) { |
2227 | // If Op0 is true implies Op1 is true, then Op0 is a subset of Op1. |
2228 | if (*Implied == true) |
2229 | return Op0; |
2230 | // If Op0 is true implies Op1 is false, then they are not true together. |
2231 | if (*Implied == false) |
2232 | return ConstantInt::getFalse(Ty: Op0->getType()); |
2233 | } |
2234 | if (std::optional<bool> Implied = isImpliedCondition(LHS: Op1, RHS: Op0, DL: Q.DL)) { |
2235 | // If Op1 is true implies Op0 is true, then Op1 is a subset of Op0. |
2236 | if (*Implied) |
2237 | return Op1; |
2238 | // If Op1 is true implies Op0 is false, then they are not true together. |
2239 | if (!*Implied) |
2240 | return ConstantInt::getFalse(Ty: Op1->getType()); |
2241 | } |
2242 | } |
2243 | |
2244 | if (Value *V = simplifyByDomEq(Opcode: Instruction::And, Op0, Op1, Q, MaxRecurse)) |
2245 | return V; |
2246 | |
2247 | return nullptr; |
2248 | } |
2249 | |
2250 | Value *llvm::simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { |
2251 | return ::simplifyAndInst(Op0, Op1, Q, MaxRecurse: RecursionLimit); |
2252 | } |
2253 | |
2254 | // TODO: Many of these folds could use LogicalAnd/LogicalOr. |
2255 | static Value *simplifyOrLogic(Value *X, Value *Y) { |
2256 | assert(X->getType() == Y->getType() && "Expected same type for 'or' ops" ); |
2257 | Type *Ty = X->getType(); |
2258 | |
2259 | // X | ~X --> -1 |
2260 | if (match(V: Y, P: m_Not(V: m_Specific(V: X)))) |
2261 | return ConstantInt::getAllOnesValue(Ty); |
2262 | |
2263 | // X | ~(X & ?) = -1 |
2264 | if (match(V: Y, P: m_Not(V: m_c_And(L: m_Specific(V: X), R: m_Value())))) |
2265 | return ConstantInt::getAllOnesValue(Ty); |
2266 | |
2267 | // X | (X & ?) --> X |
2268 | if (match(V: Y, P: m_c_And(L: m_Specific(V: X), R: m_Value()))) |
2269 | return X; |
2270 | |
2271 | Value *A, *B; |
2272 | |
2273 | // (A ^ B) | (A | B) --> A | B |
2274 | // (A ^ B) | (B | A) --> B | A |
2275 | if (match(V: X, P: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B))) && |
2276 | match(V: Y, P: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B)))) |
2277 | return Y; |
2278 | |
2279 | // ~(A ^ B) | (A | B) --> -1 |
2280 | // ~(A ^ B) | (B | A) --> -1 |
2281 | if (match(V: X, P: m_Not(V: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B)))) && |
2282 | match(V: Y, P: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B)))) |
2283 | return ConstantInt::getAllOnesValue(Ty); |
2284 | |
2285 | // (A & ~B) | (A ^ B) --> A ^ B |
2286 | // (~B & A) | (A ^ B) --> A ^ B |
2287 | // (A & ~B) | (B ^ A) --> B ^ A |
2288 | // (~B & A) | (B ^ A) --> B ^ A |
2289 | if (match(V: X, P: m_c_And(L: m_Value(V&: A), R: m_Not(V: m_Value(V&: B)))) && |
2290 | match(V: Y, P: m_c_Xor(L: m_Specific(V: A), R: m_Specific(V: B)))) |
2291 | return Y; |
2292 | |
2293 | // (~A ^ B) | (A & B) --> ~A ^ B |
2294 | // (B ^ ~A) | (A & B) --> B ^ ~A |
2295 | // (~A ^ B) | (B & A) --> ~A ^ B |
2296 | // (B ^ ~A) | (B & A) --> B ^ ~A |
2297 | if (match(V: X, P: m_c_Xor(L: m_Not(V: m_Value(V&: A)), R: m_Value(V&: B))) && |
2298 | match(V: Y, P: m_c_And(L: m_Specific(V: A), R: m_Specific(V: B)))) |
2299 | return X; |
2300 | |
2301 | // (~A | B) | (A ^ B) --> -1 |
2302 | // (~A | B) | (B ^ A) --> -1 |
2303 | // (B | ~A) | (A ^ B) --> -1 |
2304 | // (B | ~A) | (B ^ A) --> -1 |
2305 | if (match(V: X, P: m_c_Or(L: m_Not(V: m_Value(V&: A)), R: m_Value(V&: B))) && |
2306 | match(V: Y, P: m_c_Xor(L: m_Specific(V: A), R: m_Specific(V: B)))) |
2307 | return ConstantInt::getAllOnesValue(Ty); |
2308 | |
2309 | // (~A & B) | ~(A | B) --> ~A |
2310 | // (~A & B) | ~(B | A) --> ~A |
2311 | // (B & ~A) | ~(A | B) --> ~A |
2312 | // (B & ~A) | ~(B | A) --> ~A |
2313 | Value *NotA; |
2314 | if (match(V: X, P: m_c_And(L: m_CombineAnd(L: m_Value(V&: NotA), R: m_Not(V: m_Value(V&: A))), |
2315 | R: m_Value(V&: B))) && |
2316 | match(V: Y, P: m_Not(V: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B))))) |
2317 | return NotA; |
2318 | // The same is true of Logical And |
2319 | // TODO: This could share the logic of the version above if there was a |
2320 | // version of LogicalAnd that allowed more than just i1 types. |
2321 | if (match(V: X, P: m_c_LogicalAnd(L: m_CombineAnd(L: m_Value(V&: NotA), R: m_Not(V: m_Value(V&: A))), |
2322 | R: m_Value(V&: B))) && |
2323 | match(V: Y, P: m_Not(V: m_c_LogicalOr(L: m_Specific(V: A), R: m_Specific(V: B))))) |
2324 | return NotA; |
2325 | |
2326 | // ~(A ^ B) | (A & B) --> ~(A ^ B) |
2327 | // ~(A ^ B) | (B & A) --> ~(A ^ B) |
2328 | Value *NotAB; |
2329 | if (match(V: X, P: m_CombineAnd(L: m_Not(V: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B))), |
2330 | R: m_Value(V&: NotAB))) && |
2331 | match(V: Y, P: m_c_And(L: m_Specific(V: A), R: m_Specific(V: B)))) |
2332 | return NotAB; |
2333 | |
2334 | // ~(A & B) | (A ^ B) --> ~(A & B) |
2335 | // ~(A & B) | (B ^ A) --> ~(A & B) |
2336 | if (match(V: X, P: m_CombineAnd(L: m_Not(V: m_And(L: m_Value(V&: A), R: m_Value(V&: B))), |
2337 | R: m_Value(V&: NotAB))) && |
2338 | match(V: Y, P: m_c_Xor(L: m_Specific(V: A), R: m_Specific(V: B)))) |
2339 | return NotAB; |
2340 | |
2341 | return nullptr; |
2342 | } |
2343 | |
2344 | /// Given operands for an Or, see if we can fold the result. |
2345 | /// If not, this returns null. |
2346 | static Value *simplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, |
2347 | unsigned MaxRecurse) { |
2348 | if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::Or, Op0, Op1, Q)) |
2349 | return C; |
2350 | |
2351 | // X | poison -> poison |
2352 | if (isa<PoisonValue>(Val: Op1)) |
2353 | return Op1; |
2354 | |
2355 | // X | undef -> -1 |
2356 | // X | -1 = -1 |
2357 | // Do not return Op1 because it may contain undef elements if it's a vector. |
2358 | if (Q.isUndefValue(V: Op1) || match(V: Op1, P: m_AllOnes())) |
2359 | return Constant::getAllOnesValue(Ty: Op0->getType()); |
2360 | |
2361 | // X | X = X |
2362 | // X | 0 = X |
2363 | if (Op0 == Op1 || match(V: Op1, P: m_Zero())) |
2364 | return Op0; |
2365 | |
2366 | if (Value *R = simplifyOrLogic(X: Op0, Y: Op1)) |
2367 | return R; |
2368 | if (Value *R = simplifyOrLogic(X: Op1, Y: Op0)) |
2369 | return R; |
2370 | |
2371 | if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Opcode: Instruction::Or)) |
2372 | return V; |
2373 | |
2374 | // Rotated -1 is still -1: |
2375 | // (-1 << X) | (-1 >> (C - X)) --> -1 |
2376 | // (-1 >> X) | (-1 << (C - X)) --> -1 |
2377 | // ...with C <= bitwidth (and commuted variants). |
2378 | Value *X, *Y; |
2379 | if ((match(V: Op0, P: m_Shl(L: m_AllOnes(), R: m_Value(V&: X))) && |
2380 | match(V: Op1, P: m_LShr(L: m_AllOnes(), R: m_Value(V&: Y)))) || |
2381 | (match(V: Op1, P: m_Shl(L: m_AllOnes(), R: m_Value(V&: X))) && |
2382 | match(V: Op0, P: m_LShr(L: m_AllOnes(), R: m_Value(V&: Y))))) { |
2383 | const APInt *C; |
2384 | if ((match(V: X, P: m_Sub(L: m_APInt(Res&: C), R: m_Specific(V: Y))) || |
2385 | match(V: Y, P: m_Sub(L: m_APInt(Res&: C), R: m_Specific(V: X)))) && |
2386 | C->ule(RHS: X->getType()->getScalarSizeInBits())) { |
2387 | return ConstantInt::getAllOnesValue(Ty: X->getType()); |
2388 | } |
2389 | } |
2390 | |
2391 | // A funnel shift (rotate) can be decomposed into simpler shifts. See if we |
2392 | // are mixing in another shift that is redundant with the funnel shift. |
2393 | |
2394 | // (fshl X, ?, Y) | (shl X, Y) --> fshl X, ?, Y |
2395 | // (shl X, Y) | (fshl X, ?, Y) --> fshl X, ?, Y |
2396 | if (match(V: Op0, |
2397 | P: m_Intrinsic<Intrinsic::fshl>(Op0: m_Value(V&: X), Op1: m_Value(), Op2: m_Value(V&: Y))) && |
2398 | match(V: Op1, P: m_Shl(L: m_Specific(V: X), R: m_Specific(V: Y)))) |
2399 | return Op0; |
2400 | if (match(V: Op1, |
2401 | P: m_Intrinsic<Intrinsic::fshl>(Op0: m_Value(V&: X), Op1: m_Value(), Op2: m_Value(V&: Y))) && |
2402 | match(V: Op0, P: m_Shl(L: m_Specific(V: X), R: m_Specific(V: Y)))) |
2403 | return Op1; |
2404 | |
2405 | // (fshr ?, X, Y) | (lshr X, Y) --> fshr ?, X, Y |
2406 | // (lshr X, Y) | (fshr ?, X, Y) --> fshr ?, X, Y |
2407 | if (match(V: Op0, |
2408 | P: m_Intrinsic<Intrinsic::fshr>(Op0: m_Value(), Op1: m_Value(V&: X), Op2: m_Value(V&: Y))) && |
2409 | match(V: Op1, P: m_LShr(L: m_Specific(V: X), R: m_Specific(V: Y)))) |
2410 | return Op0; |
2411 | if (match(V: Op1, |
2412 | P: m_Intrinsic<Intrinsic::fshr>(Op0: m_Value(), Op1: m_Value(V&: X), Op2: m_Value(V&: Y))) && |
2413 | match(V: Op0, P: m_LShr(L: m_Specific(V: X), R: m_Specific(V: Y)))) |
2414 | return Op1; |
2415 | |
2416 | if (Value *V = |
2417 | simplifyAndOrWithICmpEq(Opcode: Instruction::Or, Op0, Op1, Q, MaxRecurse)) |
2418 | return V; |
2419 | if (Value *V = |
2420 | simplifyAndOrWithICmpEq(Opcode: Instruction::Or, Op0: Op1, Op1: Op0, Q, MaxRecurse)) |
2421 | return V; |
2422 | |
2423 | if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, IsAnd: false)) |
2424 | return V; |
2425 | |
2426 | // If we have a multiplication overflow check that is being 'and'ed with a |
2427 | // check that one of the multipliers is not zero, we can omit the 'and', and |
2428 | // only keep the overflow check. |
2429 | if (isCheckForZeroAndMulWithOverflow(Op0, Op1, IsAnd: false)) |
2430 | return Op1; |
2431 | if (isCheckForZeroAndMulWithOverflow(Op0: Op1, Op1: Op0, IsAnd: false)) |
2432 | return Op0; |
2433 | |
2434 | // Try some generic simplifications for associative operations. |
2435 | if (Value *V = |
2436 | simplifyAssociativeBinOp(Opcode: Instruction::Or, LHS: Op0, RHS: Op1, Q, MaxRecurse)) |
2437 | return V; |
2438 | |
2439 | // Or distributes over And. Try some generic simplifications based on this. |
2440 | if (Value *V = expandCommutativeBinOp(Opcode: Instruction::Or, L: Op0, R: Op1, |
2441 | OpcodeToExpand: Instruction::And, Q, MaxRecurse)) |
2442 | return V; |
2443 | |
2444 | if (isa<SelectInst>(Val: Op0) || isa<SelectInst>(Val: Op1)) { |
2445 | if (Op0->getType()->isIntOrIntVectorTy(BitWidth: 1)) { |
2446 | // A | (A || B) -> A || B |
2447 | if (match(V: Op1, P: m_Select(C: m_Specific(V: Op0), L: m_One(), R: m_Value()))) |
2448 | return Op1; |
2449 | else if (match(V: Op0, P: m_Select(C: m_Specific(V: Op1), L: m_One(), R: m_Value()))) |
2450 | return Op0; |
2451 | } |
2452 | // If the operation is with the result of a select instruction, check |
2453 | // whether operating on either branch of the select always yields the same |
2454 | // value. |
2455 | if (Value *V = |
2456 | threadBinOpOverSelect(Opcode: Instruction::Or, LHS: Op0, RHS: Op1, Q, MaxRecurse)) |
2457 | return V; |
2458 | } |
2459 | |
2460 | // (A & C1)|(B & C2) |
2461 | Value *A, *B; |
2462 | const APInt *C1, *C2; |
2463 | if (match(V: Op0, P: m_And(L: m_Value(V&: A), R: m_APInt(Res&: C1))) && |
2464 | match(V: Op1, P: m_And(L: m_Value(V&: B), R: m_APInt(Res&: C2)))) { |
2465 | if (*C1 == ~*C2) { |
2466 | // (A & C1)|(B & C2) |
2467 | // If we have: ((V + N) & C1) | (V & C2) |
2468 | // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0 |
2469 | // replace with V+N. |
2470 | Value *N; |
2471 | if (C2->isMask() && // C2 == 0+1+ |
2472 | match(V: A, P: m_c_Add(L: m_Specific(V: B), R: m_Value(V&: N)))) { |
2473 | // Add commutes, try both ways. |
2474 | if (MaskedValueIsZero(V: N, Mask: *C2, DL: Q)) |
2475 | return A; |
2476 | } |
2477 | // Or commutes, try both ways. |
2478 | if (C1->isMask() && match(V: B, P: m_c_Add(L: m_Specific(V: A), R: m_Value(V&: N)))) { |
2479 | // Add commutes, try both ways. |
2480 | if (MaskedValueIsZero(V: N, Mask: *C1, DL: Q)) |
2481 | return B; |
2482 | } |
2483 | } |
2484 | } |
2485 | |
2486 | // If the operation is with the result of a phi instruction, check whether |
2487 | // operating on all incoming values of the phi always yields the same value. |
2488 | if (isa<PHINode>(Val: Op0) || isa<PHINode>(Val: Op1)) |
2489 | if (Value *V = threadBinOpOverPHI(Opcode: Instruction::Or, LHS: Op0, RHS: Op1, Q, MaxRecurse)) |
2490 | return V; |
2491 | |
2492 | // (A ^ C) | (A ^ ~C) -> -1, i.e. all bits set to one. |
2493 | if (match(V: Op0, P: m_Xor(L: m_Value(V&: A), R: m_APInt(Res&: C1))) && |
2494 | match(V: Op1, P: m_Xor(L: m_Specific(V: A), R: m_SpecificInt(V: ~*C1)))) |
2495 | return Constant::getAllOnesValue(Ty: Op0->getType()); |
2496 | |
2497 | if (Op0->getType()->isIntOrIntVectorTy(BitWidth: 1)) { |
2498 | if (std::optional<bool> Implied = |
2499 | isImpliedCondition(LHS: Op0, RHS: Op1, DL: Q.DL, LHSIsTrue: false)) { |
2500 | // If Op0 is false implies Op1 is false, then Op1 is a subset of Op0. |
2501 | if (*Implied == false) |
2502 | return Op0; |
2503 | // If Op0 is false implies Op1 is true, then at least one is always true. |
2504 | if (*Implied == true) |
2505 | return ConstantInt::getTrue(Ty: Op0->getType()); |
2506 | } |
2507 | if (std::optional<bool> Implied = |
2508 | isImpliedCondition(LHS: Op1, RHS: Op0, DL: Q.DL, LHSIsTrue: false)) { |
2509 | // If Op1 is false implies Op0 is false, then Op0 is a subset of Op1. |
2510 | if (*Implied == false) |
2511 | return Op1; |
2512 | // If Op1 is false implies Op0 is true, then at least one is always true. |
2513 | if (*Implied == true) |
2514 | return ConstantInt::getTrue(Ty: Op1->getType()); |
2515 | } |
2516 | } |
2517 | |
2518 | if (Value *V = simplifyByDomEq(Opcode: Instruction::Or, Op0, Op1, Q, MaxRecurse)) |
2519 | return V; |
2520 | |
2521 | return nullptr; |
2522 | } |
2523 | |
2524 | Value *llvm::simplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { |
2525 | return ::simplifyOrInst(Op0, Op1, Q, MaxRecurse: RecursionLimit); |
2526 | } |
2527 | |
2528 | /// Given operands for a Xor, see if we can fold the result. |
2529 | /// If not, this returns null. |
2530 | static Value *simplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, |
2531 | unsigned MaxRecurse) { |
2532 | if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::Xor, Op0, Op1, Q)) |
2533 | return C; |
2534 | |
2535 | // X ^ poison -> poison |
2536 | if (isa<PoisonValue>(Val: Op1)) |
2537 | return Op1; |
2538 | |
2539 | // A ^ undef -> undef |
2540 | if (Q.isUndefValue(V: Op1)) |
2541 | return Op1; |
2542 | |
2543 | // A ^ 0 = A |
2544 | if (match(V: Op1, P: m_Zero())) |
2545 | return Op0; |
2546 | |
2547 | // A ^ A = 0 |
2548 | if (Op0 == Op1) |
2549 | return Constant::getNullValue(Ty: Op0->getType()); |
2550 | |
2551 | // A ^ ~A = ~A ^ A = -1 |
2552 | if (match(V: Op0, P: m_Not(V: m_Specific(V: Op1))) || match(V: Op1, P: m_Not(V: m_Specific(V: Op0)))) |
2553 | return Constant::getAllOnesValue(Ty: Op0->getType()); |
2554 | |
2555 | auto foldAndOrNot = [](Value *X, Value *Y) -> Value * { |
2556 | Value *A, *B; |
2557 | // (~A & B) ^ (A | B) --> A -- There are 8 commuted variants. |
2558 | if (match(V: X, P: m_c_And(L: m_Not(V: m_Value(V&: A)), R: m_Value(V&: B))) && |
2559 | match(V: Y, P: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B)))) |
2560 | return A; |
2561 | |
2562 | // (~A | B) ^ (A & B) --> ~A -- There are 8 commuted variants. |
2563 | // The 'not' op must contain a complete -1 operand (no undef elements for |
2564 | // vector) for the transform to be safe. |
2565 | Value *NotA; |
2566 | if (match(V: X, P: m_c_Or(L: m_CombineAnd(L: m_Not(V: m_Value(V&: A)), R: m_Value(V&: NotA)), |
2567 | R: m_Value(V&: B))) && |
2568 | match(V: Y, P: m_c_And(L: m_Specific(V: A), R: m_Specific(V: B)))) |
2569 | return NotA; |
2570 | |
2571 | return nullptr; |
2572 | }; |
2573 | if (Value *R = foldAndOrNot(Op0, Op1)) |
2574 | return R; |
2575 | if (Value *R = foldAndOrNot(Op1, Op0)) |
2576 | return R; |
2577 | |
2578 | if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Opcode: Instruction::Xor)) |
2579 | return V; |
2580 | |
2581 | // Try some generic simplifications for associative operations. |
2582 | if (Value *V = |
2583 | simplifyAssociativeBinOp(Opcode: Instruction::Xor, LHS: Op0, RHS: Op1, Q, MaxRecurse)) |
2584 | return V; |
2585 | |
2586 | // Threading Xor over selects and phi nodes is pointless, so don't bother. |
2587 | // Threading over the select in "A ^ select(cond, B, C)" means evaluating |
2588 | // "A^B" and "A^C" and seeing if they are equal; but they are equal if and |
2589 | // only if B and C are equal. If B and C are equal then (since we assume |
2590 | // that operands have already been simplified) "select(cond, B, C)" should |
2591 | // have been simplified to the common value of B and C already. Analysing |
2592 | // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly |
2593 | // for threading over phi nodes. |
2594 | |
2595 | if (Value *V = simplifyByDomEq(Opcode: Instruction::Xor, Op0, Op1, Q, MaxRecurse)) |
2596 | return V; |
2597 | |
2598 | return nullptr; |
2599 | } |
2600 | |
2601 | Value *llvm::simplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { |
2602 | return ::simplifyXorInst(Op0, Op1, Q, MaxRecurse: RecursionLimit); |
2603 | } |
2604 | |
2605 | static Type *getCompareTy(Value *Op) { |
2606 | return CmpInst::makeCmpResultType(opnd_type: Op->getType()); |
2607 | } |
2608 | |
2609 | /// Rummage around inside V looking for something equivalent to the comparison |
2610 | /// "LHS Pred RHS". Return such a value if found, otherwise return null. |
2611 | /// Helper function for analyzing max/min idioms. |
2612 | static Value *(Value *V, CmpInst::Predicate Pred, |
2613 | Value *LHS, Value *RHS) { |
2614 | SelectInst *SI = dyn_cast<SelectInst>(Val: V); |
2615 | if (!SI) |
2616 | return nullptr; |
2617 | CmpInst *Cmp = dyn_cast<CmpInst>(Val: SI->getCondition()); |
2618 | if (!Cmp) |
2619 | return nullptr; |
2620 | Value *CmpLHS = Cmp->getOperand(i_nocapture: 0), *CmpRHS = Cmp->getOperand(i_nocapture: 1); |
2621 | if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS) |
2622 | return Cmp; |
2623 | if (Pred == CmpInst::getSwappedPredicate(pred: Cmp->getPredicate()) && |
2624 | LHS == CmpRHS && RHS == CmpLHS) |
2625 | return Cmp; |
2626 | return nullptr; |
2627 | } |
2628 | |
2629 | /// Return true if the underlying object (storage) must be disjoint from |
2630 | /// storage returned by any noalias return call. |
2631 | static bool isAllocDisjoint(const Value *V) { |
2632 | // For allocas, we consider only static ones (dynamic |
2633 | // allocas might be transformed into calls to malloc not simultaneously |
2634 | // live with the compared-to allocation). For globals, we exclude symbols |
2635 | // that might be resolve lazily to symbols in another dynamically-loaded |
2636 | // library (and, thus, could be malloc'ed by the implementation). |
2637 | if (const AllocaInst *AI = dyn_cast<AllocaInst>(Val: V)) |
2638 | return AI->isStaticAlloca(); |
2639 | if (const GlobalValue *GV = dyn_cast<GlobalValue>(Val: V)) |
2640 | return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() || |
2641 | GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) && |
2642 | !GV->isThreadLocal(); |
2643 | if (const Argument *A = dyn_cast<Argument>(Val: V)) |
2644 | return A->hasByValAttr(); |
2645 | return false; |
2646 | } |
2647 | |
2648 | /// Return true if V1 and V2 are each the base of some distict storage region |
2649 | /// [V, object_size(V)] which do not overlap. Note that zero sized regions |
2650 | /// *are* possible, and that zero sized regions do not overlap with any other. |
2651 | static bool haveNonOverlappingStorage(const Value *V1, const Value *V2) { |
2652 | // Global variables always exist, so they always exist during the lifetime |
2653 | // of each other and all allocas. Global variables themselves usually have |
2654 | // non-overlapping storage, but since their addresses are constants, the |
2655 | // case involving two globals does not reach here and is instead handled in |
2656 | // constant folding. |
2657 | // |
2658 | // Two different allocas usually have different addresses... |
2659 | // |
2660 | // However, if there's an @llvm.stackrestore dynamically in between two |
2661 | // allocas, they may have the same address. It's tempting to reduce the |
2662 | // scope of the problem by only looking at *static* allocas here. That would |
2663 | // cover the majority of allocas while significantly reducing the likelihood |
2664 | // of having an @llvm.stackrestore pop up in the middle. However, it's not |
2665 | // actually impossible for an @llvm.stackrestore to pop up in the middle of |
2666 | // an entry block. Also, if we have a block that's not attached to a |
2667 | // function, we can't tell if it's "static" under the current definition. |
2668 | // Theoretically, this problem could be fixed by creating a new kind of |
2669 | // instruction kind specifically for static allocas. Such a new instruction |
2670 | // could be required to be at the top of the entry block, thus preventing it |
2671 | // from being subject to a @llvm.stackrestore. Instcombine could even |
2672 | // convert regular allocas into these special allocas. It'd be nifty. |
2673 | // However, until then, this problem remains open. |
2674 | // |
2675 | // So, we'll assume that two non-empty allocas have different addresses |
2676 | // for now. |
2677 | auto isByValArg = [](const Value *V) { |
2678 | const Argument *A = dyn_cast<Argument>(Val: V); |
2679 | return A && A->hasByValAttr(); |
2680 | }; |
2681 | |
2682 | // Byval args are backed by store which does not overlap with each other, |
2683 | // allocas, or globals. |
2684 | if (isByValArg(V1)) |
2685 | return isa<AllocaInst>(Val: V2) || isa<GlobalVariable>(Val: V2) || isByValArg(V2); |
2686 | if (isByValArg(V2)) |
2687 | return isa<AllocaInst>(Val: V1) || isa<GlobalVariable>(Val: V1) || isByValArg(V1); |
2688 | |
2689 | return isa<AllocaInst>(Val: V1) && |
2690 | (isa<AllocaInst>(Val: V2) || isa<GlobalVariable>(Val: V2)); |
2691 | } |
2692 | |
2693 | // A significant optimization not implemented here is assuming that alloca |
2694 | // addresses are not equal to incoming argument values. They don't *alias*, |
2695 | // as we say, but that doesn't mean they aren't equal, so we take a |
2696 | // conservative approach. |
2697 | // |
2698 | // This is inspired in part by C++11 5.10p1: |
2699 | // "Two pointers of the same type compare equal if and only if they are both |
2700 | // null, both point to the same function, or both represent the same |
2701 | // address." |
2702 | // |
2703 | // This is pretty permissive. |
2704 | // |
2705 | // It's also partly due to C11 6.5.9p6: |
2706 | // "Two pointers compare equal if and only if both are null pointers, both are |
2707 | // pointers to the same object (including a pointer to an object and a |
2708 | // subobject at its beginning) or function, both are pointers to one past the |
2709 | // last element of the same array object, or one is a pointer to one past the |
2710 | // end of one array object and the other is a pointer to the start of a |
2711 | // different array object that happens to immediately follow the first array |
2712 | // object in the address space.) |
2713 | // |
2714 | // C11's version is more restrictive, however there's no reason why an argument |
2715 | // couldn't be a one-past-the-end value for a stack object in the caller and be |
2716 | // equal to the beginning of a stack object in the callee. |
2717 | // |
2718 | // If the C and C++ standards are ever made sufficiently restrictive in this |
2719 | // area, it may be possible to update LLVM's semantics accordingly and reinstate |
2720 | // this optimization. |
2721 | static Constant *computePointerICmp(CmpInst::Predicate Pred, Value *LHS, |
2722 | Value *RHS, const SimplifyQuery &Q) { |
2723 | assert(LHS->getType() == RHS->getType() && "Must have same types" ); |
2724 | const DataLayout &DL = Q.DL; |
2725 | const TargetLibraryInfo *TLI = Q.TLI; |
2726 | |
2727 | // We can only fold certain predicates on pointer comparisons. |
2728 | switch (Pred) { |
2729 | default: |
2730 | return nullptr; |
2731 | |
2732 | // Equality comparisons are easy to fold. |
2733 | case CmpInst::ICMP_EQ: |
2734 | case CmpInst::ICMP_NE: |
2735 | break; |
2736 | |
2737 | // We can only handle unsigned relational comparisons because 'inbounds' on |
2738 | // a GEP only protects against unsigned wrapping. |
2739 | case CmpInst::ICMP_UGT: |
2740 | case CmpInst::ICMP_UGE: |
2741 | case CmpInst::ICMP_ULT: |
2742 | case CmpInst::ICMP_ULE: |
2743 | // However, we have to switch them to their signed variants to handle |
2744 | // negative indices from the base pointer. |
2745 | Pred = ICmpInst::getSignedPredicate(pred: Pred); |
2746 | break; |
2747 | } |
2748 | |
2749 | // Strip off any constant offsets so that we can reason about them. |
2750 | // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets |
2751 | // here and compare base addresses like AliasAnalysis does, however there are |
2752 | // numerous hazards. AliasAnalysis and its utilities rely on special rules |
2753 | // governing loads and stores which don't apply to icmps. Also, AliasAnalysis |
2754 | // doesn't need to guarantee pointer inequality when it says NoAlias. |
2755 | |
2756 | // Even if an non-inbounds GEP occurs along the path we can still optimize |
2757 | // equality comparisons concerning the result. |
2758 | bool AllowNonInbounds = ICmpInst::isEquality(P: Pred); |
2759 | unsigned IndexSize = DL.getIndexTypeSizeInBits(Ty: LHS->getType()); |
2760 | APInt LHSOffset(IndexSize, 0), RHSOffset(IndexSize, 0); |
2761 | LHS = LHS->stripAndAccumulateConstantOffsets(DL, Offset&: LHSOffset, AllowNonInbounds); |
2762 | RHS = RHS->stripAndAccumulateConstantOffsets(DL, Offset&: RHSOffset, AllowNonInbounds); |
2763 | |
2764 | // If LHS and RHS are related via constant offsets to the same base |
2765 | // value, we can replace it with an icmp which just compares the offsets. |
2766 | if (LHS == RHS) |
2767 | return ConstantInt::get(Ty: getCompareTy(Op: LHS), |
2768 | V: ICmpInst::compare(LHS: LHSOffset, RHS: RHSOffset, Pred)); |
2769 | |
2770 | // Various optimizations for (in)equality comparisons. |
2771 | if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) { |
2772 | // Different non-empty allocations that exist at the same time have |
2773 | // different addresses (if the program can tell). If the offsets are |
2774 | // within the bounds of their allocations (and not one-past-the-end! |
2775 | // so we can't use inbounds!), and their allocations aren't the same, |
2776 | // the pointers are not equal. |
2777 | if (haveNonOverlappingStorage(V1: LHS, V2: RHS)) { |
2778 | uint64_t LHSSize, RHSSize; |
2779 | ObjectSizeOpts Opts; |
2780 | Opts.EvalMode = ObjectSizeOpts::Mode::Min; |
2781 | auto *F = [](Value *V) -> Function * { |
2782 | if (auto *I = dyn_cast<Instruction>(Val: V)) |
2783 | return I->getFunction(); |
2784 | if (auto *A = dyn_cast<Argument>(Val: V)) |
2785 | return A->getParent(); |
2786 | return nullptr; |
2787 | }(LHS); |
2788 | Opts.NullIsUnknownSize = F ? NullPointerIsDefined(F) : true; |
2789 | if (getObjectSize(Ptr: LHS, Size&: LHSSize, DL, TLI, Opts) && |
2790 | getObjectSize(Ptr: RHS, Size&: RHSSize, DL, TLI, Opts)) { |
2791 | APInt Dist = LHSOffset - RHSOffset; |
2792 | if (Dist.isNonNegative() ? Dist.ult(RHS: LHSSize) : (-Dist).ult(RHS: RHSSize)) |
2793 | return ConstantInt::get(Ty: getCompareTy(Op: LHS), |
2794 | V: !CmpInst::isTrueWhenEqual(predicate: Pred)); |
2795 | } |
2796 | } |
2797 | |
2798 | // If one side of the equality comparison must come from a noalias call |
2799 | // (meaning a system memory allocation function), and the other side must |
2800 | // come from a pointer that cannot overlap with dynamically-allocated |
2801 | // memory within the lifetime of the current function (allocas, byval |
2802 | // arguments, globals), then determine the comparison result here. |
2803 | SmallVector<const Value *, 8> LHSUObjs, RHSUObjs; |
2804 | getUnderlyingObjects(V: LHS, Objects&: LHSUObjs); |
2805 | getUnderlyingObjects(V: RHS, Objects&: RHSUObjs); |
2806 | |
2807 | // Is the set of underlying objects all noalias calls? |
2808 | auto IsNAC = [](ArrayRef<const Value *> Objects) { |
2809 | return all_of(Range&: Objects, P: isNoAliasCall); |
2810 | }; |
2811 | |
2812 | // Is the set of underlying objects all things which must be disjoint from |
2813 | // noalias calls. We assume that indexing from such disjoint storage |
2814 | // into the heap is undefined, and thus offsets can be safely ignored. |
2815 | auto IsAllocDisjoint = [](ArrayRef<const Value *> Objects) { |
2816 | return all_of(Range&: Objects, P: ::isAllocDisjoint); |
2817 | }; |
2818 | |
2819 | if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) || |
2820 | (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs))) |
2821 | return ConstantInt::get(Ty: getCompareTy(Op: LHS), |
2822 | V: !CmpInst::isTrueWhenEqual(predicate: Pred)); |
2823 | |
2824 | // Fold comparisons for non-escaping pointer even if the allocation call |
2825 | // cannot be elided. We cannot fold malloc comparison to null. Also, the |
2826 | // dynamic allocation call could be either of the operands. Note that |
2827 | // the other operand can not be based on the alloc - if it were, then |
2828 | // the cmp itself would be a capture. |
2829 | Value *MI = nullptr; |
2830 | if (isAllocLikeFn(V: LHS, TLI) && llvm::isKnownNonZero(V: RHS, Q)) |
2831 | MI = LHS; |
2832 | else if (isAllocLikeFn(V: RHS, TLI) && llvm::isKnownNonZero(V: LHS, Q)) |
2833 | MI = RHS; |
2834 | if (MI) { |
2835 | // FIXME: This is incorrect, see PR54002. While we can assume that the |
2836 | // allocation is at an address that makes the comparison false, this |
2837 | // requires that *all* comparisons to that address be false, which |
2838 | // InstSimplify cannot guarantee. |
2839 | struct CustomCaptureTracker : public CaptureTracker { |
2840 | bool Captured = false; |
2841 | void tooManyUses() override { Captured = true; } |
2842 | bool captured(const Use *U) override { |
2843 | if (auto *ICmp = dyn_cast<ICmpInst>(Val: U->getUser())) { |
2844 | // Comparison against value stored in global variable. Given the |
2845 | // pointer does not escape, its value cannot be guessed and stored |
2846 | // separately in a global variable. |
2847 | unsigned OtherIdx = 1 - U->getOperandNo(); |
2848 | auto *LI = dyn_cast<LoadInst>(Val: ICmp->getOperand(i_nocapture: OtherIdx)); |
2849 | if (LI && isa<GlobalVariable>(Val: LI->getPointerOperand())) |
2850 | return false; |
2851 | } |
2852 | |
2853 | Captured = true; |
2854 | return true; |
2855 | } |
2856 | }; |
2857 | CustomCaptureTracker Tracker; |
2858 | PointerMayBeCaptured(V: MI, Tracker: &Tracker); |
2859 | if (!Tracker.Captured) |
2860 | return ConstantInt::get(Ty: getCompareTy(Op: LHS), |
2861 | V: CmpInst::isFalseWhenEqual(predicate: Pred)); |
2862 | } |
2863 | } |
2864 | |
2865 | // Otherwise, fail. |
2866 | return nullptr; |
2867 | } |
2868 | |
2869 | /// Fold an icmp when its operands have i1 scalar type. |
2870 | static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS, |
2871 | Value *RHS, const SimplifyQuery &Q) { |
2872 | Type *ITy = getCompareTy(Op: LHS); // The return type. |
2873 | Type *OpTy = LHS->getType(); // The operand type. |
2874 | if (!OpTy->isIntOrIntVectorTy(BitWidth: 1)) |
2875 | return nullptr; |
2876 | |
2877 | // A boolean compared to true/false can be reduced in 14 out of the 20 |
2878 | // (10 predicates * 2 constants) possible combinations. The other |
2879 | // 6 cases require a 'not' of the LHS. |
2880 | |
2881 | auto = [](Value *V) -> Value * { |
2882 | Value *X; |
2883 | if (match(V, P: m_Not(V: m_Value(V&: X)))) |
2884 | return X; |
2885 | return nullptr; |
2886 | }; |
2887 | |
2888 | if (match(V: RHS, P: m_Zero())) { |
2889 | switch (Pred) { |
2890 | case CmpInst::ICMP_NE: // X != 0 -> X |
2891 | case CmpInst::ICMP_UGT: // X >u 0 -> X |
2892 | case CmpInst::ICMP_SLT: // X <s 0 -> X |
2893 | return LHS; |
2894 | |
2895 | case CmpInst::ICMP_EQ: // not(X) == 0 -> X != 0 -> X |
2896 | case CmpInst::ICMP_ULE: // not(X) <=u 0 -> X >u 0 -> X |
2897 | case CmpInst::ICMP_SGE: // not(X) >=s 0 -> X <s 0 -> X |
2898 | if (Value *X = ExtractNotLHS(LHS)) |
2899 | return X; |
2900 | break; |
2901 | |
2902 | case CmpInst::ICMP_ULT: // X <u 0 -> false |
2903 | case CmpInst::ICMP_SGT: // X >s 0 -> false |
2904 | return getFalse(Ty: ITy); |
2905 | |
2906 | case CmpInst::ICMP_UGE: // X >=u 0 -> true |
2907 | case CmpInst::ICMP_SLE: // X <=s 0 -> true |
2908 | return getTrue(Ty: ITy); |
2909 | |
2910 | default: |
2911 | break; |
2912 | } |
2913 | } else if (match(V: RHS, P: m_One())) { |
2914 | switch (Pred) { |
2915 | case CmpInst::ICMP_EQ: // X == 1 -> X |
2916 | case CmpInst::ICMP_UGE: // X >=u 1 -> X |
2917 | case CmpInst::ICMP_SLE: // X <=s -1 -> X |
2918 | return LHS; |
2919 | |
2920 | case CmpInst::ICMP_NE: // not(X) != 1 -> X == 1 -> X |
2921 | case CmpInst::ICMP_ULT: // not(X) <=u 1 -> X >=u 1 -> X |
2922 | case CmpInst::ICMP_SGT: // not(X) >s 1 -> X <=s -1 -> X |
2923 | if (Value *X = ExtractNotLHS(LHS)) |
2924 | return X; |
2925 | break; |
2926 | |
2927 | case CmpInst::ICMP_UGT: // X >u 1 -> false |
2928 | case CmpInst::ICMP_SLT: // X <s -1 -> false |
2929 | return getFalse(Ty: ITy); |
2930 | |
2931 | case CmpInst::ICMP_ULE: // X <=u 1 -> true |
2932 | case CmpInst::ICMP_SGE: // X >=s -1 -> true |
2933 | return getTrue(Ty: ITy); |
2934 | |
2935 | default: |
2936 | break; |
2937 | } |
2938 | } |
2939 | |
2940 | switch (Pred) { |
2941 | default: |
2942 | break; |
2943 | case ICmpInst::ICMP_UGE: |
2944 | if (isImpliedCondition(LHS: RHS, RHS: LHS, DL: Q.DL).value_or(u: false)) |
2945 | return getTrue(Ty: ITy); |
2946 | break; |
2947 | case ICmpInst::ICMP_SGE: |
2948 | /// For signed comparison, the values for an i1 are 0 and -1 |
2949 | /// respectively. This maps into a truth table of: |
2950 | /// LHS | RHS | LHS >=s RHS | LHS implies RHS |
2951 | /// 0 | 0 | 1 (0 >= 0) | 1 |
2952 | /// 0 | 1 | 1 (0 >= -1) | 1 |
2953 | /// 1 | 0 | 0 (-1 >= 0) | 0 |
2954 | /// 1 | 1 | 1 (-1 >= -1) | 1 |
2955 | if (isImpliedCondition(LHS, RHS, DL: Q.DL).value_or(u: false)) |
2956 | return getTrue(Ty: ITy); |
2957 | break; |
2958 | case ICmpInst::ICMP_ULE: |
2959 | if (isImpliedCondition(LHS, RHS, DL: Q.DL).value_or(u: false)) |
2960 | return getTrue(Ty: ITy); |
2961 | break; |
2962 | case ICmpInst::ICMP_SLE: |
2963 | /// SLE follows the same logic as SGE with the LHS and RHS swapped. |
2964 | if (isImpliedCondition(LHS: RHS, RHS: LHS, DL: Q.DL).value_or(u: false)) |
2965 | return getTrue(Ty: ITy); |
2966 | break; |
2967 | } |
2968 | |
2969 | return nullptr; |
2970 | } |
2971 | |
2972 | /// Try hard to fold icmp with zero RHS because this is a common case. |
2973 | static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS, |
2974 | Value *RHS, const SimplifyQuery &Q) { |
2975 | if (!match(V: RHS, P: m_Zero())) |
2976 | return nullptr; |
2977 | |
2978 | Type *ITy = getCompareTy(Op: LHS); // The return type. |
2979 | switch (Pred) { |
2980 | default: |
2981 | llvm_unreachable("Unknown ICmp predicate!" ); |
2982 | case ICmpInst::ICMP_ULT: |
2983 | return getFalse(Ty: ITy); |
2984 | case ICmpInst::ICMP_UGE: |
2985 | return getTrue(Ty: ITy); |
2986 | case ICmpInst::ICMP_EQ: |
2987 | case ICmpInst::ICMP_ULE: |
2988 | if (isKnownNonZero(V: LHS, Q)) |
2989 | return getFalse(Ty: ITy); |
2990 | break; |
2991 | case ICmpInst::ICMP_NE: |
2992 | case ICmpInst::ICMP_UGT: |
2993 | if (isKnownNonZero(V: LHS, Q)) |
2994 | return getTrue(Ty: ITy); |
2995 | break; |
2996 | case ICmpInst::ICMP_SLT: { |
2997 | KnownBits LHSKnown = computeKnownBits(V: LHS, /* Depth */ 0, Q); |
2998 | if (LHSKnown.isNegative()) |
2999 | return getTrue(Ty: ITy); |
3000 | if (LHSKnown.isNonNegative()) |
3001 | return getFalse(Ty: ITy); |
3002 | break; |
3003 | } |
3004 | case ICmpInst::ICMP_SLE: { |
3005 | KnownBits LHSKnown = computeKnownBits(V: LHS, /* Depth */ 0, Q); |
3006 | if (LHSKnown.isNegative()) |
3007 | return getTrue(Ty: ITy); |
3008 | if (LHSKnown.isNonNegative() && isKnownNonZero(V: LHS, Q)) |
3009 | return getFalse(Ty: ITy); |
3010 | break; |
3011 | } |
3012 | case ICmpInst::ICMP_SGE: { |
3013 | KnownBits LHSKnown = computeKnownBits(V: LHS, /* Depth */ 0, Q); |
3014 | if (LHSKnown.isNegative()) |
3015 | return getFalse(Ty: ITy); |
3016 | if (LHSKnown.isNonNegative()) |
3017 | return getTrue(Ty: ITy); |
3018 | break; |
3019 | } |
3020 | case ICmpInst::ICMP_SGT: { |
3021 | KnownBits LHSKnown = computeKnownBits(V: LHS, /* Depth */ 0, Q); |
3022 | if (LHSKnown.isNegative()) |
3023 | return getFalse(Ty: ITy); |
3024 | if (LHSKnown.isNonNegative() && isKnownNonZero(V: LHS, Q)) |
3025 | return getTrue(Ty: ITy); |
3026 | break; |
3027 | } |
3028 | } |
3029 | |
3030 | return nullptr; |
3031 | } |
3032 | |
3033 | static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS, |
3034 | Value *RHS, const InstrInfoQuery &IIQ) { |
3035 | Type *ITy = getCompareTy(Op: RHS); // The return type. |
3036 | |
3037 | Value *X; |
3038 | const APInt *C; |
3039 | if (!match(V: RHS, P: m_APIntAllowPoison(Res&: C))) |
3040 | return nullptr; |
3041 | |
3042 | // Sign-bit checks can be optimized to true/false after unsigned |
3043 | // floating-point casts: |
3044 | // icmp slt (bitcast (uitofp X)), 0 --> false |
3045 | // icmp sgt (bitcast (uitofp X)), -1 --> true |
3046 | if (match(V: LHS, P: m_ElementWiseBitCast(Op: m_UIToFP(Op: m_Value(V&: X))))) { |
3047 | bool TrueIfSigned; |
3048 | if (isSignBitCheck(Pred, RHS: *C, TrueIfSigned)) |
3049 | return ConstantInt::getBool(Ty: ITy, V: !TrueIfSigned); |
3050 | } |
3051 | |
3052 | // Rule out tautological comparisons (eg., ult 0 or uge 0). |
3053 | ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, Other: *C); |
3054 | if (RHS_CR.isEmptySet()) |
3055 | return ConstantInt::getFalse(Ty: ITy); |
3056 | if (RHS_CR.isFullSet()) |
3057 | return ConstantInt::getTrue(Ty: ITy); |
3058 | |
3059 | ConstantRange LHS_CR = |
3060 | computeConstantRange(V: LHS, ForSigned: CmpInst::isSigned(predicate: Pred), UseInstrInfo: IIQ.UseInstrInfo); |
3061 | if (!LHS_CR.isFullSet()) { |
3062 | if (RHS_CR.contains(CR: LHS_CR)) |
3063 | return ConstantInt::getTrue(Ty: ITy); |
3064 | if (RHS_CR.inverse().contains(CR: LHS_CR)) |
3065 | return ConstantInt::getFalse(Ty: ITy); |
3066 | } |
3067 | |
3068 | // (mul nuw/nsw X, MulC) != C --> true (if C is not a multiple of MulC) |
3069 | // (mul nuw/nsw X, MulC) == C --> false (if C is not a multiple of MulC) |
3070 | const APInt *MulC; |
3071 | if (IIQ.UseInstrInfo && ICmpInst::isEquality(P: Pred) && |
3072 | ((match(V: LHS, P: m_NUWMul(L: m_Value(), R: m_APIntAllowPoison(Res&: MulC))) && |
3073 | *MulC != 0 && C->urem(RHS: *MulC) != 0) || |
3074 | (match(V: LHS, P: m_NSWMul(L: m_Value(), R: m_APIntAllowPoison(Res&: MulC))) && |
3075 | *MulC != 0 && C->srem(RHS: *MulC) != 0))) |
3076 | return ConstantInt::get(Ty: ITy, V: Pred == ICmpInst::ICMP_NE); |
3077 | |
3078 | return nullptr; |
3079 | } |
3080 | |
3081 | static Value *simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred, |
3082 | BinaryOperator *LBO, Value *RHS, |
3083 | const SimplifyQuery &Q, |
3084 | unsigned MaxRecurse) { |
3085 | Type *ITy = getCompareTy(Op: RHS); // The return type. |
3086 | |
3087 | Value *Y = nullptr; |
3088 | // icmp pred (or X, Y), X |
3089 | if (match(V: LBO, P: m_c_Or(L: m_Value(V&: Y), R: m_Specific(V: RHS)))) { |
3090 | if (Pred == ICmpInst::ICMP_ULT) |
3091 | return getFalse(Ty: ITy); |
3092 | if (Pred == ICmpInst::ICMP_UGE) |
3093 | return getTrue(Ty: ITy); |
3094 | |
3095 | if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) { |
3096 | KnownBits RHSKnown = computeKnownBits(V: RHS, /* Depth */ 0, Q); |
3097 | KnownBits YKnown = computeKnownBits(V: Y, /* Depth */ 0, Q); |
3098 | if (RHSKnown.isNonNegative() && YKnown.isNegative()) |
3099 | return Pred == ICmpInst::ICMP_SLT ? getTrue(Ty: ITy) : getFalse(Ty: ITy); |
3100 | if (RHSKnown.isNegative() || YKnown.isNonNegative()) |
3101 | return Pred == ICmpInst::ICMP_SLT ? getFalse(Ty: ITy) : getTrue(Ty: ITy); |
3102 | } |
3103 | } |
3104 | |
3105 | // icmp pred (and X, Y), X |
3106 | if (match(V: LBO, P: m_c_And(L: m_Value(), R: m_Specific(V: RHS)))) { |
3107 | if (Pred == ICmpInst::ICMP_UGT) |
3108 | return getFalse(Ty: ITy); |
3109 | if (Pred == ICmpInst::ICMP_ULE) |
3110 | return getTrue(Ty: ITy); |
3111 | } |
3112 | |
3113 | // icmp pred (urem X, Y), Y |
3114 | if (match(V: LBO, P: m_URem(L: m_Value(), R: m_Specific(V: RHS)))) { |
3115 | switch (Pred) { |
3116 | default: |
3117 | break; |
3118 | case ICmpInst::ICMP_SGT: |
3119 | case ICmpInst::ICMP_SGE: { |
3120 | KnownBits Known = computeKnownBits(V: RHS, /* Depth */ 0, Q); |
3121 | if (!Known.isNonNegative()) |
3122 | break; |
3123 | [[fallthrough]]; |
3124 | } |
3125 | case ICmpInst::ICMP_EQ: |
3126 | case ICmpInst::ICMP_UGT: |
3127 | case ICmpInst::ICMP_UGE: |
3128 | return getFalse(Ty: ITy); |
3129 | case ICmpInst::ICMP_SLT: |
3130 | case ICmpInst::ICMP_SLE: { |
3131 | KnownBits Known = computeKnownBits(V: RHS, /* Depth */ 0, Q); |
3132 | if (!Known.isNonNegative()) |
3133 | break; |
3134 | [[fallthrough]]; |
3135 | } |
3136 | case ICmpInst::ICMP_NE: |
3137 | case ICmpInst::ICMP_ULT: |
3138 | case ICmpInst::ICMP_ULE: |
3139 | return getTrue(Ty: ITy); |
3140 | } |
3141 | } |
3142 | |
3143 | // icmp pred (urem X, Y), X |
3144 | if (match(V: LBO, P: m_URem(L: m_Specific(V: RHS), R: m_Value()))) { |
3145 | if (Pred == ICmpInst::ICMP_ULE) |
3146 | return getTrue(Ty: ITy); |
3147 | if (Pred == ICmpInst::ICMP_UGT) |
3148 | return getFalse(Ty: ITy); |
3149 | } |
3150 | |
3151 | // x >>u y <=u x --> true. |
3152 | // x >>u y >u x --> false. |
3153 | // x udiv y <=u x --> true. |
3154 | // x udiv y >u x --> false. |
3155 | if (match(V: LBO, P: m_LShr(L: m_Specific(V: RHS), R: m_Value())) || |
3156 | match(V: LBO, P: m_UDiv(L: m_Specific(V: RHS), R: m_Value()))) { |
3157 | // icmp pred (X op Y), X |
3158 | if (Pred == ICmpInst::ICMP_UGT) |
3159 | return getFalse(Ty: ITy); |
3160 | if (Pred == ICmpInst::ICMP_ULE) |
3161 | return getTrue(Ty: ITy); |
3162 | } |
3163 | |
3164 | // If x is nonzero: |
3165 | // x >>u C <u x --> true for C != 0. |
3166 | // x >>u C != x --> true for C != 0. |
3167 | // x >>u C >=u x --> false for C != 0. |
3168 | // x >>u C == x --> false for C != 0. |
3169 | // x udiv C <u x --> true for C != 1. |
3170 | // x udiv C != x --> true for C != 1. |
3171 | // x udiv C >=u x --> false for C != 1. |
3172 | // x udiv C == x --> false for C != 1. |
3173 | // TODO: allow non-constant shift amount/divisor |
3174 | const APInt *C; |
3175 | if ((match(V: LBO, P: m_LShr(L: m_Specific(V: RHS), R: m_APInt(Res&: C))) && *C != 0) || |
3176 | (match(V: LBO, P: m_UDiv(L: m_Specific(V: RHS), R: m_APInt(Res&: C))) && *C != 1)) { |
3177 | if (isKnownNonZero(V: RHS, Q)) { |
3178 | switch (Pred) { |
3179 | default: |
3180 | break; |
3181 | case ICmpInst::ICMP_EQ: |
3182 | case ICmpInst::ICMP_UGE: |
3183 | return getFalse(Ty: ITy); |
3184 | case ICmpInst::ICMP_NE: |
3185 | case ICmpInst::ICMP_ULT: |
3186 | return getTrue(Ty: ITy); |
3187 | case ICmpInst::ICMP_UGT: |
3188 | case ICmpInst::ICMP_ULE: |
3189 | // UGT/ULE are handled by the more general case just above |
3190 | llvm_unreachable("Unexpected UGT/ULE, should have been handled" ); |
3191 | } |
3192 | } |
3193 | } |
3194 | |
3195 | // (x*C1)/C2 <= x for C1 <= C2. |
3196 | // This holds even if the multiplication overflows: Assume that x != 0 and |
3197 | // arithmetic is modulo M. For overflow to occur we must have C1 >= M/x and |
3198 | // thus C2 >= M/x. It follows that (x*C1)/C2 <= (M-1)/C2 <= ((M-1)*x)/M < x. |
3199 | // |
3200 | // Additionally, either the multiplication and division might be represented |
3201 | // as shifts: |
3202 | // (x*C1)>>C2 <= x for C1 < 2**C2. |
3203 | // (x<<C1)/C2 <= x for 2**C1 < C2. |
3204 | const APInt *C1, *C2; |
3205 | if ((match(V: LBO, P: m_UDiv(L: m_Mul(L: m_Specific(V: RHS), R: m_APInt(Res&: C1)), R: m_APInt(Res&: C2))) && |
3206 | C1->ule(RHS: *C2)) || |
3207 | (match(V: LBO, P: m_LShr(L: m_Mul(L: m_Specific(V: RHS), R: m_APInt(Res&: C1)), R: m_APInt(Res&: C2))) && |
3208 | C1->ule(RHS: APInt(C2->getBitWidth(), 1) << *C2)) || |
3209 | (match(V: LBO, P: m_UDiv(L: m_Shl(L: m_Specific(V: RHS), R: m_APInt(Res&: C1)), R: m_APInt(Res&: C2))) && |
3210 | (APInt(C1->getBitWidth(), 1) << *C1).ule(RHS: *C2))) { |
3211 | if (Pred == ICmpInst::ICMP_UGT) |
3212 | return getFalse(Ty: ITy); |
3213 | if (Pred == ICmpInst::ICMP_ULE) |
3214 | return getTrue(Ty: ITy); |
3215 | } |
3216 | |
3217 | // (sub C, X) == X, C is odd --> false |
3218 | // (sub C, X) != X, C is odd --> true |
3219 | if (match(V: LBO, P: m_Sub(L: m_APIntAllowPoison(Res&: C), R: m_Specific(V: RHS))) && |
3220 | (*C & 1) == 1 && ICmpInst::isEquality(P: Pred)) |
3221 | return (Pred == ICmpInst::ICMP_EQ) ? getFalse(Ty: ITy) : getTrue(Ty: ITy); |
3222 | |
3223 | return nullptr; |
3224 | } |
3225 | |
3226 | // If only one of the icmp's operands has NSW flags, try to prove that: |
3227 | // |
3228 | // icmp slt (x + C1), (x +nsw C2) |
3229 | // |
3230 | // is equivalent to: |
3231 | // |
3232 | // icmp slt C1, C2 |
3233 | // |
3234 | // which is true if x + C2 has the NSW flags set and: |
3235 | // *) C1 < C2 && C1 >= 0, or |
3236 | // *) C2 < C1 && C1 <= 0. |
3237 | // |
3238 | static bool trySimplifyICmpWithAdds(CmpInst::Predicate Pred, Value *LHS, |
3239 | Value *RHS, const InstrInfoQuery &IIQ) { |
3240 | // TODO: only support icmp slt for now. |
3241 | if (Pred != CmpInst::ICMP_SLT || !IIQ.UseInstrInfo) |
3242 | return false; |
3243 | |
3244 | // Canonicalize nsw add as RHS. |
3245 | if (!match(V: RHS, P: m_NSWAdd(L: m_Value(), R: m_Value()))) |
3246 | std::swap(a&: LHS, b&: RHS); |
3247 | if (!match(V: RHS, P: m_NSWAdd(L: m_Value(), R: m_Value()))) |
3248 | return false; |
3249 | |
3250 | Value *X; |
3251 | const APInt *C1, *C2; |
3252 | if (!match(V: LHS, P: m_Add(L: m_Value(V&: X), R: m_APInt(Res&: C1))) || |
3253 | !match(V: RHS, P: m_Add(L: m_Specific(V: X), R: m_APInt(Res&: C2)))) |
3254 | return false; |
3255 | |
3256 | return (C1->slt(RHS: *C2) && C1->isNonNegative()) || |
3257 | (C2->slt(RHS: *C1) && C1->isNonPositive()); |
3258 | } |
3259 | |
3260 | /// TODO: A large part of this logic is duplicated in InstCombine's |
3261 | /// foldICmpBinOp(). We should be able to share that and avoid the code |
3262 | /// duplication. |
3263 | static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS, |
3264 | Value *RHS, const SimplifyQuery &Q, |
3265 | unsigned MaxRecurse) { |
3266 | BinaryOperator *LBO = dyn_cast<BinaryOperator>(Val: LHS); |
3267 | BinaryOperator *RBO = dyn_cast<BinaryOperator>(Val: RHS); |
3268 | if (MaxRecurse && (LBO || RBO)) { |
3269 | // Analyze the case when either LHS or RHS is an add instruction. |
3270 | Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr; |
3271 | // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null). |
3272 | bool NoLHSWrapProblem = false, NoRHSWrapProblem = false; |
3273 | if (LBO && LBO->getOpcode() == Instruction::Add) { |
3274 | A = LBO->getOperand(i_nocapture: 0); |
3275 | B = LBO->getOperand(i_nocapture: 1); |
3276 | NoLHSWrapProblem = |
3277 | ICmpInst::isEquality(P: Pred) || |
3278 | (CmpInst::isUnsigned(predicate: Pred) && |
3279 | Q.IIQ.hasNoUnsignedWrap(Op: cast<OverflowingBinaryOperator>(Val: LBO))) || |
3280 | (CmpInst::isSigned(predicate: Pred) && |
3281 | Q.IIQ.hasNoSignedWrap(Op: cast<OverflowingBinaryOperator>(Val: LBO))); |
3282 | } |
3283 | if (RBO && RBO->getOpcode() == Instruction::Add) { |
3284 | C = RBO->getOperand(i_nocapture: 0); |
3285 | D = RBO->getOperand(i_nocapture: 1); |
3286 | NoRHSWrapProblem = |
3287 | ICmpInst::isEquality(P: Pred) || |
3288 | (CmpInst::isUnsigned(predicate: Pred) && |
3289 | Q.IIQ.hasNoUnsignedWrap(Op: cast<OverflowingBinaryOperator>(Val: RBO))) || |
3290 | (CmpInst::isSigned(predicate: Pred) && |
3291 | Q.IIQ.hasNoSignedWrap(Op: cast<OverflowingBinaryOperator>(Val: RBO))); |
3292 | } |
3293 | |
3294 | // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow. |
3295 | if ((A == RHS || B == RHS) && NoLHSWrapProblem) |
3296 | if (Value *V = simplifyICmpInst(Predicate: Pred, LHS: A == RHS ? B : A, |
3297 | RHS: Constant::getNullValue(Ty: RHS->getType()), Q, |
3298 | MaxRecurse: MaxRecurse - 1)) |
3299 | return V; |
3300 | |
3301 | // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow. |
3302 | if ((C == LHS || D == LHS) && NoRHSWrapProblem) |
3303 | if (Value *V = |
3304 | simplifyICmpInst(Predicate: Pred, LHS: Constant::getNullValue(Ty: LHS->getType()), |
3305 | RHS: C == LHS ? D : C, Q, MaxRecurse: MaxRecurse - 1)) |
3306 | return V; |
3307 | |
3308 | // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow. |
3309 | bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) || |
3310 | trySimplifyICmpWithAdds(Pred, LHS, RHS, IIQ: Q.IIQ); |
3311 | if (A && C && (A == C || A == D || B == C || B == D) && CanSimplify) { |
3312 | // Determine Y and Z in the form icmp (X+Y), (X+Z). |
3313 | Value *Y, *Z; |
3314 | if (A == C) { |
3315 | // C + B == C + D -> B == D |
3316 | Y = B; |
3317 | Z = D; |
3318 | } else if (A == D) { |
3319 | // D + B == C + D -> B == C |
3320 | Y = B; |
3321 | Z = C; |
3322 | } else if (B == C) { |
3323 | // A + C == C + D -> A == D |
3324 | Y = A; |
3325 | Z = D; |
3326 | } else { |
3327 | assert(B == D); |
3328 | // A + D == C + D -> A == C |
3329 | Y = A; |
3330 | Z = C; |
3331 | } |
3332 | if (Value *V = simplifyICmpInst(Predicate: Pred, LHS: Y, RHS: Z, Q, MaxRecurse: MaxRecurse - 1)) |
3333 | return V; |
3334 | } |
3335 | } |
3336 | |
3337 | if (LBO) |
3338 | if (Value *V = simplifyICmpWithBinOpOnLHS(Pred, LBO, RHS, Q, MaxRecurse)) |
3339 | return V; |
3340 | |
3341 | if (RBO) |
3342 | if (Value *V = simplifyICmpWithBinOpOnLHS( |
3343 | Pred: ICmpInst::getSwappedPredicate(pred: Pred), LBO: RBO, RHS: LHS, Q, MaxRecurse)) |
3344 | return V; |
3345 | |
3346 | // 0 - (zext X) pred C |
3347 | if (!CmpInst::isUnsigned(predicate: Pred) && match(V: LHS, P: m_Neg(V: m_ZExt(Op: m_Value())))) { |
3348 | const APInt *C; |
3349 | if (match(V: RHS, P: m_APInt(Res&: C))) { |
3350 | if (C->isStrictlyPositive()) { |
3351 | if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_NE) |
3352 | return ConstantInt::getTrue(Ty: getCompareTy(Op: RHS)); |
3353 | if (Pred == ICmpInst::ICMP_SGE || Pred == ICmpInst::ICMP_EQ) |
3354 | return ConstantInt::getFalse(Ty: getCompareTy(Op: RHS)); |
3355 | } |
3356 | if (C->isNonNegative()) { |
3357 | if (Pred == ICmpInst::ICMP_SLE) |
3358 | return ConstantInt::getTrue(Ty: getCompareTy(Op: RHS)); |
3359 | if (Pred == ICmpInst::ICMP_SGT) |
3360 | return ConstantInt::getFalse(Ty: getCompareTy(Op: RHS)); |
3361 | } |
3362 | } |
3363 | } |
3364 | |
3365 | // If C2 is a power-of-2 and C is not: |
3366 | // (C2 << X) == C --> false |
3367 | // (C2 << X) != C --> true |
3368 | const APInt *C; |
3369 | if (match(V: LHS, P: m_Shl(L: m_Power2(), R: m_Value())) && |
3370 | match(V: RHS, P: m_APIntAllowPoison(Res&: C)) && !C->isPowerOf2()) { |
3371 | // C2 << X can equal zero in some circumstances. |
3372 | // This simplification might be unsafe if C is zero. |
3373 | // |
3374 | // We know it is safe if: |
3375 | // - The shift is nsw. We can't shift out the one bit. |
3376 | // - The shift is nuw. We can't shift out the one bit. |
3377 | // - C2 is one. |
3378 | // - C isn't zero. |
3379 | if (Q.IIQ.hasNoSignedWrap(Op: cast<OverflowingBinaryOperator>(Val: LBO)) || |
3380 | Q.IIQ.hasNoUnsignedWrap(Op: cast<OverflowingBinaryOperator>(Val: LBO)) || |
3381 | match(V: LHS, P: m_Shl(L: m_One(), R: m_Value())) || !C->isZero()) { |
3382 | if (Pred == ICmpInst::ICMP_EQ) |
3383 | return ConstantInt::getFalse(Ty: getCompareTy(Op: RHS)); |
3384 | if (Pred == ICmpInst::ICMP_NE) |
3385 | return ConstantInt::getTrue(Ty: getCompareTy(Op: RHS)); |
3386 | } |
3387 | } |
3388 | |
3389 | // If C is a power-of-2: |
3390 | // (C << X) >u 0x8000 --> false |
3391 | // (C << X) <=u 0x8000 --> true |
3392 | if (match(V: LHS, P: m_Shl(L: m_Power2(), R: m_Value())) && match(V: RHS, P: m_SignMask())) { |
3393 | if (Pred == ICmpInst::ICMP_UGT) |
3394 | return ConstantInt::getFalse(Ty: getCompareTy(Op: RHS)); |
3395 | if (Pred == ICmpInst::ICMP_ULE) |
3396 | return ConstantInt::getTrue(Ty: getCompareTy(Op: RHS)); |
3397 | } |
3398 | |
3399 | if (!MaxRecurse || !LBO || !RBO || LBO->getOpcode() != RBO->getOpcode()) |
3400 | return nullptr; |
3401 | |
3402 | if (LBO->getOperand(i_nocapture: 0) == RBO->getOperand(i_nocapture: 0)) { |
3403 | switch (LBO->getOpcode()) { |
3404 | default: |
3405 | break; |
3406 | case Instruction::Shl: { |
3407 | bool NUW = Q.IIQ.hasNoUnsignedWrap(Op: LBO) && Q.IIQ.hasNoUnsignedWrap(Op: RBO); |
3408 | bool NSW = Q.IIQ.hasNoSignedWrap(Op: LBO) && Q.IIQ.hasNoSignedWrap(Op: RBO); |
3409 | if (!NUW || (ICmpInst::isSigned(predicate: Pred) && !NSW) || |
3410 | !isKnownNonZero(V: LBO->getOperand(i_nocapture: 0), Q)) |
3411 | break; |
3412 | if (Value *V = simplifyICmpInst(Predicate: Pred, LHS: LBO->getOperand(i_nocapture: 1), |
3413 | RHS: RBO->getOperand(i_nocapture: 1), Q, MaxRecurse: MaxRecurse - 1)) |
3414 | return V; |
3415 | break; |
3416 | } |
3417 | // If C1 & C2 == C1, A = X and/or C1, B = X and/or C2: |
3418 | // icmp ule A, B -> true |
3419 | // icmp ugt A, B -> false |
3420 | // icmp sle A, B -> true (C1 and C2 are the same sign) |
3421 | // icmp sgt A, B -> false (C1 and C2 are the same sign) |
3422 | case Instruction::And: |
3423 | case Instruction::Or: { |
3424 | const APInt *C1, *C2; |
3425 | if (ICmpInst::isRelational(P: Pred) && |
3426 | match(V: LBO->getOperand(i_nocapture: 1), P: m_APInt(Res&: C1)) && |
3427 | match(V: RBO->getOperand(i_nocapture: 1), P: m_APInt(Res&: C2))) { |
3428 | if (!C1->isSubsetOf(RHS: *C2)) { |
3429 | std::swap(a&: C1, b&: C2); |
3430 | Pred = ICmpInst::getSwappedPredicate(pred: Pred); |
3431 | } |
3432 | if (C1->isSubsetOf(RHS: *C2)) { |
3433 | if (Pred == ICmpInst::ICMP_ULE) |
3434 | return ConstantInt::getTrue(Ty: getCompareTy(Op: LHS)); |
3435 | if (Pred == ICmpInst::ICMP_UGT) |
3436 | return ConstantInt::getFalse(Ty: getCompareTy(Op: LHS)); |
3437 | if (C1->isNonNegative() == C2->isNonNegative()) { |
3438 | if (Pred == ICmpInst::ICMP_SLE) |
3439 | return ConstantInt::getTrue(Ty: getCompareTy(Op: LHS)); |
3440 | if (Pred == ICmpInst::ICMP_SGT) |
3441 | return ConstantInt::getFalse(Ty: getCompareTy(Op: LHS)); |
3442 | } |
3443 | } |
3444 | } |
3445 | break; |
3446 | } |
3447 | } |
3448 | } |
3449 | |
3450 | if (LBO->getOperand(i_nocapture: 1) == RBO->getOperand(i_nocapture: 1)) { |
3451 | switch (LBO->getOpcode()) { |
3452 | default: |
3453 | break; |
3454 | case Instruction::UDiv: |
3455 | case Instruction::LShr: |
3456 | if (ICmpInst::isSigned(predicate: Pred) || !Q.IIQ.isExact(Op: LBO) || |
3457 | !Q.IIQ.isExact(Op: RBO)) |
3458 | break; |
3459 | if (Value *V = simplifyICmpInst(Predicate: Pred, LHS: LBO->getOperand(i_nocapture: 0), |
3460 | RHS: RBO->getOperand(i_nocapture: 0), Q, MaxRecurse: MaxRecurse - 1)) |
3461 | return V; |
3462 | break; |
3463 | case Instruction::SDiv: |
3464 | if (!ICmpInst::isEquality(P: Pred) || !Q.IIQ.isExact(Op: LBO) || |
3465 | !Q.IIQ.isExact(Op: RBO)) |
3466 | break; |
3467 | if (Value *V = simplifyICmpInst(Predicate: Pred, LHS: LBO->getOperand(i_nocapture: 0), |
3468 | RHS: RBO->getOperand(i_nocapture: 0), Q, MaxRecurse: MaxRecurse - 1)) |
3469 | return V; |
3470 | break; |
3471 | case Instruction::AShr: |
3472 | if (!Q.IIQ.isExact(Op: LBO) || !Q.IIQ.isExact(Op: RBO)) |
3473 | break; |
3474 | if (Value *V = simplifyICmpInst(Predicate: Pred, LHS: LBO->getOperand(i_nocapture: 0), |
3475 | RHS: RBO->getOperand(i_nocapture: 0), Q, MaxRecurse: MaxRecurse - 1)) |
3476 | return V; |
3477 | break; |
3478 | case Instruction::Shl: { |
3479 | bool NUW = Q.IIQ.hasNoUnsignedWrap(Op: LBO) && Q.IIQ.hasNoUnsignedWrap(Op: RBO); |
3480 | bool NSW = Q.IIQ.hasNoSignedWrap(Op: LBO) && Q.IIQ.hasNoSignedWrap(Op: RBO); |
3481 | if (!NUW && !NSW) |
3482 | break; |
3483 | if (!NSW && ICmpInst::isSigned(predicate: Pred)) |
3484 | break; |
3485 | if (Value *V = simplifyICmpInst(Predicate: Pred, LHS: LBO->getOperand(i_nocapture: 0), |
3486 | RHS: RBO->getOperand(i_nocapture: 0), Q, MaxRecurse: MaxRecurse - 1)) |
3487 | return V; |
3488 | break; |
3489 | } |
3490 | } |
3491 | } |
3492 | return nullptr; |
3493 | } |
3494 | |
3495 | /// simplify integer comparisons where at least one operand of the compare |
3496 | /// matches an integer min/max idiom. |
3497 | static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS, |
3498 | Value *RHS, const SimplifyQuery &Q, |
3499 | unsigned MaxRecurse) { |
3500 | Type *ITy = getCompareTy(Op: LHS); // The return type. |
3501 | Value *A, *B; |
3502 | CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE; |
3503 | CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B". |
3504 | |
3505 | // Signed variants on "max(a,b)>=a -> true". |
3506 | if (match(V: LHS, P: m_SMax(L: m_Value(V&: A), R: m_Value(V&: B))) && (A == RHS || B == RHS)) { |
3507 | if (A != RHS) |
3508 | std::swap(a&: A, b&: B); // smax(A, B) pred A. |
3509 | EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B". |
3510 | // We analyze this as smax(A, B) pred A. |
3511 | P = Pred; |
3512 | } else if (match(V: RHS, P: m_SMax(L: m_Value(V&: A), R: m_Value(V&: B))) && |
3513 | (A == LHS || B == LHS)) { |
3514 | if (A != LHS) |
3515 | std::swap(a&: A, b&: B); // A pred smax(A, B). |
3516 | EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B". |
3517 | // We analyze this as smax(A, B) swapped-pred A. |
3518 | P = CmpInst::getSwappedPredicate(pred: Pred); |
3519 | } else if (match(V: LHS, P: m_SMin(L: m_Value(V&: A), R: m_Value(V&: B))) && |
3520 | (A == RHS || B == RHS)) { |
3521 | if (A != RHS) |
3522 | std::swap(a&: A, b&: B); // smin(A, B) pred A. |
3523 | EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B". |
3524 | // We analyze this as smax(-A, -B) swapped-pred -A. |
3525 | // Note that we do not need to actually form -A or -B thanks to EqP. |
3526 | P = CmpInst::getSwappedPredicate(pred: Pred); |
3527 | } else if (match(V: RHS, P: m_SMin(L: m_Value(V&: A), R: m_Value(V&: B))) && |
3528 | (A == LHS || B == LHS)) { |
3529 | if (A != LHS) |
3530 | std::swap(a&: A, b&: B); // A pred smin(A, B). |
3531 | EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B". |
3532 | // We analyze this as smax(-A, -B) pred -A. |
3533 | // Note that we do not need to actually form -A or -B thanks to EqP. |
3534 | P = Pred; |
3535 | } |
3536 | if (P != CmpInst::BAD_ICMP_PREDICATE) { |
3537 | // Cases correspond to "max(A, B) p A". |
3538 | switch (P) { |
3539 | default: |
3540 | break; |
3541 | case CmpInst::ICMP_EQ: |
3542 | case CmpInst::ICMP_SLE: |
3543 | // Equivalent to "A EqP B". This may be the same as the condition tested |
3544 | // in the max/min; if so, we can just return that. |
3545 | if (Value *V = extractEquivalentCondition(V: LHS, Pred: EqP, LHS: A, RHS: B)) |
3546 | return V; |
3547 | if (Value *V = extractEquivalentCondition(V: RHS, Pred: EqP, LHS: A, RHS: B)) |
3548 | return V; |
3549 | // Otherwise, see if "A EqP B" simplifies. |
3550 | if (MaxRecurse) |
3551 | if (Value *V = simplifyICmpInst(Predicate: EqP, LHS: A, RHS: B, Q, MaxRecurse: MaxRecurse - 1)) |
3552 | return V; |
3553 | break; |
3554 | case CmpInst::ICMP_NE: |
3555 | case CmpInst::ICMP_SGT: { |
3556 | CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(pred: EqP); |
3557 | // Equivalent to "A InvEqP B". This may be the same as the condition |
3558 | // tested in the max/min; if so, we can just return that. |
3559 | if (Value *V = extractEquivalentCondition(V: LHS, Pred: InvEqP, LHS: A, RHS: B)) |
3560 | return V; |
3561 | if (Value *V = extractEquivalentCondition(V: RHS, Pred: InvEqP, LHS: A, RHS: B)) |
3562 | return V; |
3563 | // Otherwise, see if "A InvEqP B" simplifies. |
3564 | if (MaxRecurse) |
3565 | if (Value *V = simplifyICmpInst(Predicate: InvEqP, LHS: A, RHS: B, Q, MaxRecurse: MaxRecurse - 1)) |
3566 | return V; |
3567 | break; |
3568 | } |
3569 | case CmpInst::ICMP_SGE: |
3570 | // Always true. |
3571 | return getTrue(Ty: ITy); |
3572 | case CmpInst::ICMP_SLT: |
3573 | // Always false. |
3574 | return getFalse(Ty: ITy); |
3575 | } |
3576 | } |
3577 | |
3578 | // Unsigned variants on "max(a,b)>=a -> true". |
3579 | P = CmpInst::BAD_ICMP_PREDICATE; |
3580 | if (match(V: LHS, P: m_UMax(L: m_Value(V&: A), R: m_Value(V&: B))) && (A == RHS || B == RHS)) { |
3581 | if (A != RHS) |
3582 | std::swap(a&: A, b&: B); // umax(A, B) pred A. |
3583 | EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B". |
3584 | // We analyze this as umax(A, B) pred A. |
3585 | P = Pred; |
3586 | } else if (match(V: RHS, P: m_UMax(L: m_Value(V&: A), R: m_Value(V&: B))) && |
3587 | (A == LHS || B == LHS)) { |
3588 | if (A != LHS) |
3589 | std::swap(a&: A, b&: B); // A pred umax(A, B). |
3590 | EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B". |
3591 | // We analyze this as umax(A, B) swapped-pred A. |
3592 | P = CmpInst::getSwappedPredicate(pred: Pred); |
3593 | } else if (match(V: LHS, P: m_UMin(L: m_Value(V&: A), R: m_Value(V&: B))) && |
3594 | (A == RHS || B == RHS)) { |
3595 | if (A != RHS) |
3596 | std::swap(a&: A, b&: B); // umin(A, B) pred A. |
3597 | EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B". |
3598 | // We analyze this as umax(-A, -B) swapped-pred -A. |
3599 | // Note that we do not need to actually form -A or -B thanks to EqP. |
3600 | P = CmpInst::getSwappedPredicate(pred: Pred); |
3601 | } else if (match(V: RHS, P: m_UMin(L: m_Value(V&: A), R: m_Value(V&: B))) && |
3602 | (A == LHS || B == LHS)) { |
3603 | if (A != LHS) |
3604 | std::swap(a&: A, b&: B); // A pred umin(A, B). |
3605 | EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B". |
3606 | // We analyze this as umax(-A, -B) pred -A. |
3607 | // Note that we do not need to actually form -A or -B thanks to EqP. |
3608 | P = Pred; |
3609 | } |
3610 | if (P != CmpInst::BAD_ICMP_PREDICATE) { |
3611 | // Cases correspond to "max(A, B) p A". |
3612 | switch (P) { |
3613 | default: |
3614 | break; |
3615 | case CmpInst::ICMP_EQ: |
3616 | case CmpInst::ICMP_ULE: |
3617 | // Equivalent to "A EqP B". This may be the same as the condition tested |
3618 | // in the max/min; if so, we can just return that. |
3619 | if (Value *V = extractEquivalentCondition(V: LHS, Pred: EqP, LHS: A, RHS: B)) |
3620 | return V; |
3621 | if (Value *V = extractEquivalentCondition(V: RHS, Pred: EqP, LHS: A, RHS: B)) |
3622 | return V; |
3623 | // Otherwise, see if "A EqP B" simplifies. |
3624 | if (MaxRecurse) |
3625 | if (Value *V = simplifyICmpInst(Predicate: EqP, LHS: A, RHS: B, Q, MaxRecurse: MaxRecurse - 1)) |
3626 | return V; |
3627 | break; |
3628 | case CmpInst::ICMP_NE: |
3629 | case CmpInst::ICMP_UGT: { |
3630 | CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(pred: EqP); |
3631 | // Equivalent to "A InvEqP B". This may be the same as the condition |
3632 | // tested in the max/min; if so, we can just return that. |
3633 | if (Value *V = extractEquivalentCondition(V: LHS, Pred: InvEqP, LHS: A, RHS: B)) |
3634 | return V; |
3635 | if (Value *V = extractEquivalentCondition(V: RHS, Pred: InvEqP, LHS: A, RHS: B)) |
3636 | return V; |
3637 | // Otherwise, see if "A InvEqP B" simplifies. |
3638 | if (MaxRecurse) |
3639 | if (Value *V = simplifyICmpInst(Predicate: InvEqP, LHS: A, RHS: B, Q, MaxRecurse: MaxRecurse - 1)) |
3640 | return V; |
3641 | break; |
3642 | } |
3643 | case CmpInst::ICMP_UGE: |
3644 | return getTrue(Ty: ITy); |
3645 | case CmpInst::ICMP_ULT: |
3646 | return getFalse(Ty: ITy); |
3647 | } |
3648 | } |
3649 | |
3650 | // Comparing 1 each of min/max with a common operand? |
3651 | // Canonicalize min operand to RHS. |
3652 | if (match(V: LHS, P: m_UMin(L: m_Value(), R: m_Value())) || |
3653 | match(V: LHS, P: m_SMin(L: m_Value(), R: m_Value()))) { |
3654 | std::swap(a&: LHS, b&: RHS); |
3655 | Pred = ICmpInst::getSwappedPredicate(pred: Pred); |
3656 | } |
3657 | |
3658 | Value *C, *D; |
3659 | if (match(V: LHS, P: m_SMax(L: m_Value(V&: A), R: m_Value(V&: B))) && |
3660 | match(V: RHS, P: m_SMin(L: m_Value(V&: C), R: m_Value(V&: D))) && |
3661 | (A == C || A == D || B == C || B == D)) { |
3662 | // smax(A, B) >=s smin(A, D) --> true |
3663 | if (Pred == CmpInst::ICMP_SGE) |
3664 | return getTrue(Ty: ITy); |
3665 | // smax(A, B) <s smin(A, D) --> false |
3666 | if (Pred == CmpInst::ICMP_SLT) |
3667 | return getFalse(Ty: ITy); |
3668 | } else if (match(V: LHS, P: m_UMax(L: m_Value(V&: A), R: m_Value(V&: B))) && |
3669 | match(V: RHS, P: m_UMin(L: m_Value(V&: C), R: m_Value(V&: D))) && |
3670 | (A == C || A == D || B == C || B == D)) { |
3671 | // umax(A, B) >=u umin(A, D) --> true |
3672 | if (Pred == CmpInst::ICMP_UGE) |
3673 | return getTrue(Ty: ITy); |
3674 | // umax(A, B) <u umin(A, D) --> false |
3675 | if (Pred == CmpInst::ICMP_ULT) |
3676 | return getFalse(Ty: ITy); |
3677 | } |
3678 | |
3679 | return nullptr; |
3680 | } |
3681 | |
3682 | static Value *simplifyICmpWithDominatingAssume(CmpInst::Predicate Predicate, |
3683 | Value *LHS, Value *RHS, |
3684 | const SimplifyQuery &Q) { |
3685 | // Gracefully handle instructions that have not been inserted yet. |
3686 | if (!Q.AC || !Q.CxtI) |
3687 | return nullptr; |
3688 | |
3689 | for (Value *AssumeBaseOp : {LHS, RHS}) { |
3690 | for (auto &AssumeVH : Q.AC->assumptionsFor(V: AssumeBaseOp)) { |
3691 | if (!AssumeVH) |
3692 | continue; |
3693 | |
3694 | CallInst *Assume = cast<CallInst>(Val&: AssumeVH); |
3695 | if (std::optional<bool> Imp = isImpliedCondition( |
3696 | LHS: Assume->getArgOperand(i: 0), RHSPred: Predicate, RHSOp0: LHS, RHSOp1: RHS, DL: Q.DL)) |
3697 | if (isValidAssumeForContext(I: Assume, CxtI: Q.CxtI, DT: Q.DT)) |
3698 | return ConstantInt::get(Ty: getCompareTy(Op: LHS), V: *Imp); |
3699 | } |
3700 | } |
3701 | |
3702 | return nullptr; |
3703 | } |
3704 | |
3705 | static Value *simplifyICmpWithIntrinsicOnLHS(CmpInst::Predicate Pred, |
3706 | Value *LHS, Value *RHS) { |
3707 | auto *II = dyn_cast<IntrinsicInst>(Val: LHS); |
3708 | if (!II) |
3709 | return nullptr; |
3710 | |
3711 | switch (II->getIntrinsicID()) { |
3712 | case Intrinsic::uadd_sat: |
3713 | // uadd.sat(X, Y) uge X, uadd.sat(X, Y) uge Y |
3714 | if (II->getArgOperand(i: 0) == RHS || II->getArgOperand(i: 1) == RHS) { |
3715 | if (Pred == ICmpInst::ICMP_UGE) |
3716 | return ConstantInt::getTrue(Ty: getCompareTy(Op: II)); |
3717 | if (Pred == ICmpInst::ICMP_ULT) |
3718 | return ConstantInt::getFalse(Ty: getCompareTy(Op: II)); |
3719 | } |
3720 | return nullptr; |
3721 | case Intrinsic::usub_sat: |
3722 | // usub.sat(X, Y) ule X |
3723 | if (II->getArgOperand(i: 0) == RHS) { |
3724 | if (Pred == ICmpInst::ICMP_ULE) |
3725 | return ConstantInt::getTrue(Ty: getCompareTy(Op: II)); |
3726 | if (Pred == ICmpInst::ICMP_UGT) |
3727 | return ConstantInt::getFalse(Ty: getCompareTy(Op: II)); |
3728 | } |
3729 | return nullptr; |
3730 | default: |
3731 | return nullptr; |
3732 | } |
3733 | } |
3734 | |
3735 | /// Helper method to get range from metadata or attribute. |
3736 | static std::optional<ConstantRange> getRange(Value *V, |
3737 | const InstrInfoQuery &IIQ) { |
3738 | if (Instruction *I = dyn_cast<Instruction>(Val: V)) |
3739 | if (MDNode *MD = IIQ.getMetadata(I, KindID: LLVMContext::MD_range)) |
3740 | return getConstantRangeFromMetadata(RangeMD: *MD); |
3741 | |
3742 | if (const Argument *A = dyn_cast<Argument>(Val: V)) |
3743 | return A->getRange(); |
3744 | else if (const CallBase *CB = dyn_cast<CallBase>(Val: V)) |
3745 | return CB->getRange(); |
3746 | |
3747 | return std::nullopt; |
3748 | } |
3749 | |
3750 | /// Given operands for an ICmpInst, see if we can fold the result. |
3751 | /// If not, this returns null. |
3752 | static Value *simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, |
3753 | const SimplifyQuery &Q, unsigned MaxRecurse) { |
3754 | CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate; |
3755 | assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!" ); |
3756 | |
3757 | if (Constant *CLHS = dyn_cast<Constant>(Val: LHS)) { |
3758 | if (Constant *CRHS = dyn_cast<Constant>(Val: RHS)) |
3759 | return ConstantFoldCompareInstOperands(Predicate: Pred, LHS: CLHS, RHS: CRHS, DL: Q.DL, TLI: Q.TLI); |
3760 | |
3761 | // If we have a constant, make sure it is on the RHS. |
3762 | std::swap(a&: LHS, b&: RHS); |
3763 | Pred = CmpInst::getSwappedPredicate(pred: Pred); |
3764 | } |
3765 | assert(!isa<UndefValue>(LHS) && "Unexpected icmp undef,%X" ); |
3766 | |
3767 | Type *ITy = getCompareTy(Op: LHS); // The return type. |
3768 | |
3769 | // icmp poison, X -> poison |
3770 | if (isa<PoisonValue>(Val: RHS)) |
3771 | return PoisonValue::get(T: ITy); |
3772 | |
3773 | // For EQ and NE, we can always pick a value for the undef to make the |
3774 | // predicate pass or fail, so we can return undef. |
3775 | // Matches behavior in llvm::ConstantFoldCompareInstruction. |
3776 | if (Q.isUndefValue(V: RHS) && ICmpInst::isEquality(P: Pred)) |
3777 | return UndefValue::get(T: ITy); |
3778 | |
3779 | // icmp X, X -> true/false |
3780 | // icmp X, undef -> true/false because undef could be X. |
3781 | if (LHS == RHS || Q.isUndefValue(V: RHS)) |
3782 | return ConstantInt::get(Ty: ITy, V: CmpInst::isTrueWhenEqual(predicate: Pred)); |
3783 | |
3784 | if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q)) |
3785 | return V; |
3786 | |
3787 | // TODO: Sink/common this with other potentially expensive calls that use |
3788 | // ValueTracking? See comment below for isKnownNonEqual(). |
3789 | if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q)) |
3790 | return V; |
3791 | |
3792 | if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, IIQ: Q.IIQ)) |
3793 | return V; |
3794 | |
3795 | // If both operands have range metadata, use the metadata |
3796 | // to simplify the comparison. |
3797 | if (std::optional<ConstantRange> RhsCr = getRange(V: RHS, IIQ: Q.IIQ)) |
3798 | if (std::optional<ConstantRange> LhsCr = getRange(V: LHS, IIQ: Q.IIQ)) { |
3799 | if (LhsCr->icmp(Pred, Other: *RhsCr)) |
3800 | return ConstantInt::getTrue(Ty: ITy); |
3801 | |
3802 | if (LhsCr->icmp(Pred: CmpInst::getInversePredicate(pred: Pred), Other: *RhsCr)) |
3803 | return ConstantInt::getFalse(Ty: ITy); |
3804 | } |
3805 | |
3806 | // Compare of cast, for example (zext X) != 0 -> X != 0 |
3807 | if (isa<CastInst>(Val: LHS) && (isa<Constant>(Val: RHS) || isa<CastInst>(Val: RHS))) { |
3808 | Instruction *LI = cast<CastInst>(Val: LHS); |
3809 | Value *SrcOp = LI->getOperand(i: 0); |
3810 | Type *SrcTy = SrcOp->getType(); |
3811 | Type *DstTy = LI->getType(); |
3812 | |
3813 | // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input |
3814 | // if the integer type is the same size as the pointer type. |
3815 | if (MaxRecurse && isa<PtrToIntInst>(Val: LI) && |
3816 | Q.DL.getTypeSizeInBits(Ty: SrcTy) == DstTy->getPrimitiveSizeInBits()) { |
3817 | if (Constant *RHSC = dyn_cast<Constant>(Val: RHS)) { |
3818 | // Transfer the cast to the constant. |
3819 | if (Value *V = simplifyICmpInst(Predicate: Pred, LHS: SrcOp, |
3820 | RHS: ConstantExpr::getIntToPtr(C: RHSC, Ty: SrcTy), |
3821 | Q, MaxRecurse: MaxRecurse - 1)) |
3822 | return V; |
3823 | } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(Val: RHS)) { |
3824 | if (RI->getOperand(i_nocapture: 0)->getType() == SrcTy) |
3825 | // Compare without the cast. |
3826 | if (Value *V = simplifyICmpInst(Predicate: Pred, LHS: SrcOp, RHS: RI->getOperand(i_nocapture: 0), Q, |
3827 | MaxRecurse: MaxRecurse - 1)) |
3828 | return V; |
3829 | } |
3830 | } |
3831 | |
3832 | if (isa<ZExtInst>(Val: LHS)) { |
3833 | // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the |
3834 | // same type. |
3835 | if (ZExtInst *RI = dyn_cast<ZExtInst>(Val: RHS)) { |
3836 | if (MaxRecurse && SrcTy == RI->getOperand(i_nocapture: 0)->getType()) |
3837 | // Compare X and Y. Note that signed predicates become unsigned. |
3838 | if (Value *V = |
3839 | simplifyICmpInst(Predicate: ICmpInst::getUnsignedPredicate(pred: Pred), LHS: SrcOp, |
3840 | RHS: RI->getOperand(i_nocapture: 0), Q, MaxRecurse: MaxRecurse - 1)) |
3841 | return V; |
3842 | } |
3843 | // Fold (zext X) ule (sext X), (zext X) sge (sext X) to true. |
3844 | else if (SExtInst *RI = dyn_cast<SExtInst>(Val: RHS)) { |
3845 | if (SrcOp == RI->getOperand(i_nocapture: 0)) { |
3846 | if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_SGE) |
3847 | return ConstantInt::getTrue(Ty: ITy); |
3848 | if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SLT) |
3849 | return ConstantInt::getFalse(Ty: ITy); |
3850 | } |
3851 | } |
3852 | // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended |
3853 | // too. If not, then try to deduce the result of the comparison. |
3854 | else if (match(V: RHS, P: m_ImmConstant())) { |
3855 | Constant *C = dyn_cast<Constant>(Val: RHS); |
3856 | assert(C != nullptr); |
3857 | |
3858 | // Compute the constant that would happen if we truncated to SrcTy then |
3859 | // reextended to DstTy. |
3860 | Constant *Trunc = |
3861 | ConstantFoldCastOperand(Opcode: Instruction::Trunc, C, DestTy: SrcTy, DL: Q.DL); |
3862 | assert(Trunc && "Constant-fold of ImmConstant should not fail" ); |
3863 | Constant *RExt = |
3864 | ConstantFoldCastOperand(Opcode: CastInst::ZExt, C: Trunc, DestTy: DstTy, DL: Q.DL); |
3865 | assert(RExt && "Constant-fold of ImmConstant should not fail" ); |
3866 | Constant *AnyEq = |
3867 | ConstantFoldCompareInstOperands(Predicate: ICmpInst::ICMP_EQ, LHS: RExt, RHS: C, DL: Q.DL); |
3868 | assert(AnyEq && "Constant-fold of ImmConstant should not fail" ); |
3869 | |
3870 | // If the re-extended constant didn't change any of the elements then |
3871 | // this is effectively also a case of comparing two zero-extended |
3872 | // values. |
3873 | if (AnyEq->isAllOnesValue() && MaxRecurse) |
3874 | if (Value *V = simplifyICmpInst(Predicate: ICmpInst::getUnsignedPredicate(pred: Pred), |
3875 | LHS: SrcOp, RHS: Trunc, Q, MaxRecurse: MaxRecurse - 1)) |
3876 | return V; |
3877 | |
3878 | // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit |
3879 | // there. Use this to work out the result of the comparison. |
3880 | if (AnyEq->isNullValue()) { |
3881 | switch (Pred) { |
3882 | default: |
3883 | llvm_unreachable("Unknown ICmp predicate!" ); |
3884 | // LHS <u RHS. |
3885 | case ICmpInst::ICMP_EQ: |
3886 | case ICmpInst::ICMP_UGT: |
3887 | case ICmpInst::ICMP_UGE: |
3888 | return Constant::getNullValue(Ty: ITy); |
3889 | |
3890 | case ICmpInst::ICMP_NE: |
3891 | case ICmpInst::ICMP_ULT: |
3892 | case ICmpInst::ICMP_ULE: |
3893 | return Constant::getAllOnesValue(Ty: ITy); |
3894 | |
3895 | // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS |
3896 | // is non-negative then LHS <s RHS. |
3897 | case ICmpInst::ICMP_SGT: |
3898 | case ICmpInst::ICMP_SGE: |
3899 | return ConstantFoldCompareInstOperands( |
3900 | Predicate: ICmpInst::ICMP_SLT, LHS: C, RHS: Constant::getNullValue(Ty: C->getType()), |
3901 | DL: Q.DL); |
3902 | case ICmpInst::ICMP_SLT: |
3903 | case ICmpInst::ICMP_SLE: |
3904 | return ConstantFoldCompareInstOperands( |
3905 | Predicate: ICmpInst::ICMP_SGE, LHS: C, RHS: Constant::getNullValue(Ty: C->getType()), |
3906 | DL: Q.DL); |
3907 | } |
3908 | } |
3909 | } |
3910 | } |
3911 | |
3912 | if (isa<SExtInst>(Val: LHS)) { |
3913 | // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the |
3914 | // same type. |
3915 | if (SExtInst *RI = dyn_cast<SExtInst>(Val: RHS)) { |
3916 | if (MaxRecurse && SrcTy == RI->getOperand(i_nocapture: 0)->getType()) |
3917 | // Compare X and Y. Note that the predicate does not change. |
3918 | if (Value *V = simplifyICmpInst(Predicate: Pred, LHS: SrcOp, RHS: RI->getOperand(i_nocapture: 0), Q, |
3919 | MaxRecurse: MaxRecurse - 1)) |
3920 | return V; |
3921 | } |
3922 | // Fold (sext X) uge (zext X), (sext X) sle (zext X) to true. |
3923 | else if (ZExtInst *RI = dyn_cast<ZExtInst>(Val: RHS)) { |
3924 | if (SrcOp == RI->getOperand(i_nocapture: 0)) { |
3925 | if (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_SLE) |
3926 | return ConstantInt::getTrue(Ty: ITy); |
3927 | if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SGT) |
3928 | return ConstantInt::getFalse(Ty: ITy); |
3929 | } |
3930 | } |
3931 | // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended |
3932 | // too. If not, then try to deduce the result of the comparison. |
3933 | else if (match(V: RHS, P: m_ImmConstant())) { |
3934 | Constant *C = cast<Constant>(Val: RHS); |
3935 | |
3936 | // Compute the constant that would happen if we truncated to SrcTy then |
3937 | // reextended to DstTy. |
3938 | Constant *Trunc = |
3939 | ConstantFoldCastOperand(Opcode: Instruction::Trunc, C, DestTy: SrcTy, DL: Q.DL); |
3940 | assert(Trunc && "Constant-fold of ImmConstant should not fail" ); |
3941 | Constant *RExt = |
3942 | ConstantFoldCastOperand(Opcode: CastInst::SExt, C: Trunc, DestTy: DstTy, DL: Q.DL); |
3943 | assert(RExt && "Constant-fold of ImmConstant should not fail" ); |
3944 | Constant *AnyEq = |
3945 | ConstantFoldCompareInstOperands(Predicate: ICmpInst::ICMP_EQ, LHS: RExt, RHS: C, DL: Q.DL); |
3946 | assert(AnyEq && "Constant-fold of ImmConstant should not fail" ); |
3947 | |
3948 | // If the re-extended constant didn't change then this is effectively |
3949 | // also a case of comparing two sign-extended values. |
3950 | if (AnyEq->isAllOnesValue() && MaxRecurse) |
3951 | if (Value *V = |
3952 | simplifyICmpInst(Predicate: Pred, LHS: SrcOp, RHS: Trunc, Q, MaxRecurse: MaxRecurse - 1)) |
3953 | return V; |
3954 | |
3955 | // Otherwise the upper bits of LHS are all equal, while RHS has varying |
3956 | // bits there. Use this to work out the result of the comparison. |
3957 | if (AnyEq->isNullValue()) { |
3958 | switch (Pred) { |
3959 | default: |
3960 | llvm_unreachable("Unknown ICmp predicate!" ); |
3961 | case ICmpInst::ICMP_EQ: |
3962 | return Constant::getNullValue(Ty: ITy); |
3963 | case ICmpInst::ICMP_NE: |
3964 | return Constant::getAllOnesValue(Ty: ITy); |
3965 | |
3966 | // If RHS is non-negative then LHS <s RHS. If RHS is negative then |
3967 | // LHS >s RHS. |
3968 | case ICmpInst::ICMP_SGT: |
3969 | case ICmpInst::ICMP_SGE: |
3970 | return ConstantFoldCompareInstOperands( |
3971 | Predicate: ICmpInst::ICMP_SLT, LHS: C, RHS: Constant::getNullValue(Ty: C->getType()), |
3972 | DL: Q.DL); |
3973 | case ICmpInst::ICMP_SLT: |
3974 | case ICmpInst::ICMP_SLE: |
3975 | return ConstantFoldCompareInstOperands( |
3976 | Predicate: ICmpInst::ICMP_SGE, LHS: C, RHS: Constant::getNullValue(Ty: C->getType()), |
3977 | DL: Q.DL); |
3978 | |
3979 | // If LHS is non-negative then LHS <u RHS. If LHS is negative then |
3980 | // LHS >u RHS. |
3981 | case ICmpInst::ICMP_UGT: |
3982 | case ICmpInst::ICMP_UGE: |
3983 | // Comparison is true iff the LHS <s 0. |
3984 | if (MaxRecurse) |
3985 | if (Value *V = simplifyICmpInst(Predicate: ICmpInst::ICMP_SLT, LHS: SrcOp, |
3986 | RHS: Constant::getNullValue(Ty: SrcTy), Q, |
3987 | MaxRecurse: MaxRecurse - 1)) |
3988 | return V; |
3989 | break; |
3990 | case ICmpInst::ICMP_ULT: |
3991 | case ICmpInst::ICMP_ULE: |
3992 | // Comparison is true iff the LHS >=s 0. |
3993 | if (MaxRecurse) |
3994 | if (Value *V = simplifyICmpInst(Predicate: ICmpInst::ICMP_SGE, LHS: SrcOp, |
3995 | RHS: Constant::getNullValue(Ty: SrcTy), Q, |
3996 | MaxRecurse: MaxRecurse - 1)) |
3997 | return V; |
3998 | break; |
3999 | } |
4000 | } |
4001 | } |
4002 | } |
4003 | } |
4004 | |
4005 | // icmp eq|ne X, Y -> false|true if X != Y |
4006 | // This is potentially expensive, and we have already computedKnownBits for |
4007 | // compares with 0 above here, so only try this for a non-zero compare. |
4008 | if (ICmpInst::isEquality(P: Pred) && !match(V: RHS, P: m_Zero()) && |
4009 | isKnownNonEqual(V1: LHS, V2: RHS, DL: Q.DL, AC: Q.AC, CxtI: Q.CxtI, DT: Q.DT, UseInstrInfo: Q.IIQ.UseInstrInfo)) { |
4010 | return Pred == ICmpInst::ICMP_NE ? getTrue(Ty: ITy) : getFalse(Ty: ITy); |
4011 | } |
4012 | |
4013 | if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse)) |
4014 | return V; |
4015 | |
4016 | if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse)) |
4017 | return V; |
4018 | |
4019 | if (Value *V = simplifyICmpWithIntrinsicOnLHS(Pred, LHS, RHS)) |
4020 | return V; |
4021 | if (Value *V = simplifyICmpWithIntrinsicOnLHS( |
4022 | Pred: ICmpInst::getSwappedPredicate(pred: Pred), LHS: RHS, RHS: LHS)) |
4023 | return V; |
4024 | |
4025 | if (Value *V = simplifyICmpWithDominatingAssume(Predicate: Pred, LHS, RHS, Q)) |
4026 | return V; |
4027 | |
4028 | if (std::optional<bool> Res = |
4029 | isImpliedByDomCondition(Pred, LHS, RHS, ContextI: Q.CxtI, DL: Q.DL)) |
4030 | return ConstantInt::getBool(Ty: ITy, V: *Res); |
4031 | |
4032 | // Simplify comparisons of related pointers using a powerful, recursive |
4033 | // GEP-walk when we have target data available.. |
4034 | if (LHS->getType()->isPointerTy()) |
4035 | if (auto *C = computePointerICmp(Pred, LHS, RHS, Q)) |
4036 | return C; |
4037 | if (auto *CLHS = dyn_cast<PtrToIntOperator>(Val: LHS)) |
4038 | if (auto *CRHS = dyn_cast<PtrToIntOperator>(Val: RHS)) |
4039 | if (CLHS->getPointerOperandType() == CRHS->getPointerOperandType() && |
4040 | Q.DL.getTypeSizeInBits(Ty: CLHS->getPointerOperandType()) == |
4041 | Q.DL.getTypeSizeInBits(Ty: CLHS->getType())) |
4042 | if (auto *C = computePointerICmp(Pred, LHS: CLHS->getPointerOperand(), |
4043 | RHS: CRHS->getPointerOperand(), Q)) |
4044 | return C; |
4045 | |
4046 | // If the comparison is with the result of a select instruction, check whether |
4047 | // comparing with either branch of the select always yields the same value. |
4048 | if (isa<SelectInst>(Val: LHS) || isa<SelectInst>(Val: RHS)) |
4049 | if (Value *V = threadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse)) |
4050 | return V; |
4051 | |
4052 | // If the comparison is with the result of a phi instruction, check whether |
4053 | // doing the compare with each incoming phi value yields a common result. |
4054 | if (isa<PHINode>(Val: LHS) || isa<PHINode>(Val: RHS)) |
4055 | if (Value *V = threadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse)) |
4056 | return V; |
4057 | |
4058 | return nullptr; |
4059 | } |
4060 | |
4061 | Value *llvm::simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, |
4062 | const SimplifyQuery &Q) { |
4063 | return ::simplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse: RecursionLimit); |
4064 | } |
4065 | |
4066 | /// Given operands for an FCmpInst, see if we can fold the result. |
4067 | /// If not, this returns null. |
4068 | static Value *simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, |
4069 | FastMathFlags FMF, const SimplifyQuery &Q, |
4070 | unsigned MaxRecurse) { |
4071 | CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate; |
4072 | assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!" ); |
4073 | |
4074 | if (Constant *CLHS = dyn_cast<Constant>(Val: LHS)) { |
4075 | if (Constant *CRHS = dyn_cast<Constant>(Val: RHS)) |
4076 | return ConstantFoldCompareInstOperands(Predicate: Pred, LHS: CLHS, RHS: CRHS, DL: Q.DL, TLI: Q.TLI, |
4077 | I: Q.CxtI); |
4078 | |
4079 | // If we have a constant, make sure it is on the RHS. |
4080 | std::swap(a&: LHS, b&: RHS); |
4081 | Pred = CmpInst::getSwappedPredicate(pred: Pred); |
4082 | } |
4083 | |
4084 | // Fold trivial predicates. |
4085 | Type *RetTy = getCompareTy(Op: LHS); |
4086 | if (Pred == FCmpInst::FCMP_FALSE) |
4087 | return getFalse(Ty: RetTy); |
4088 | if (Pred == FCmpInst::FCMP_TRUE) |
4089 | return getTrue(Ty: RetTy); |
4090 | |
4091 | // fcmp pred x, poison and fcmp pred poison, x |
4092 | // fold to poison |
4093 | if (isa<PoisonValue>(Val: LHS) || isa<PoisonValue>(Val: RHS)) |
4094 | return PoisonValue::get(T: RetTy); |
4095 | |
4096 | // fcmp pred x, undef and fcmp pred undef, x |
4097 | // fold to true if unordered, false if ordered |
4098 | if (Q.isUndefValue(V: LHS) || Q.isUndefValue(V: RHS)) { |
4099 | // Choosing NaN for the undef will always make unordered comparison succeed |
4100 | // and ordered comparison fail. |
4101 | return ConstantInt::get(Ty: RetTy, V: CmpInst::isUnordered(predicate: Pred)); |
4102 | } |
4103 | |
4104 | // fcmp x,x -> true/false. Not all compares are foldable. |
4105 | if (LHS == RHS) { |
4106 | if (CmpInst::isTrueWhenEqual(predicate: Pred)) |
4107 | return getTrue(Ty: RetTy); |
4108 | if (CmpInst::isFalseWhenEqual(predicate: Pred)) |
4109 | return getFalse(Ty: RetTy); |
4110 | } |
4111 | |
4112 | // Fold (un)ordered comparison if we can determine there are no NaNs. |
4113 | // |
4114 | // This catches the 2 variable input case, constants are handled below as a |
4115 | // class-like compare. |
4116 | if (Pred == FCmpInst::FCMP_ORD || Pred == FCmpInst::FCMP_UNO) { |
4117 | KnownFPClass RHSClass = |
4118 | computeKnownFPClass(V: RHS, InterestedClasses: fcAllFlags, /*Depth=*/0, SQ: Q); |
4119 | KnownFPClass LHSClass = |
4120 | computeKnownFPClass(V: LHS, InterestedClasses: fcAllFlags, /*Depth=*/0, SQ: Q); |
4121 | |
4122 | if (FMF.noNaNs() || |
4123 | (RHSClass.isKnownNeverNaN() && LHSClass.isKnownNeverNaN())) |
4124 | return ConstantInt::get(Ty: RetTy, V: Pred == FCmpInst::FCMP_ORD); |
4125 | |
4126 | if (RHSClass.isKnownAlwaysNaN() || LHSClass.isKnownAlwaysNaN()) |
4127 | return ConstantInt::get(Ty: RetTy, V: Pred == CmpInst::FCMP_UNO); |
4128 | } |
4129 | |
4130 | const APFloat *C = nullptr; |
4131 | match(V: RHS, P: m_APFloatAllowPoison(Res&: C)); |
4132 | std::optional<KnownFPClass> FullKnownClassLHS; |
4133 | |
4134 | // Lazily compute the possible classes for LHS. Avoid computing it twice if |
4135 | // RHS is a 0. |
4136 | auto computeLHSClass = [=, &FullKnownClassLHS](FPClassTest InterestedFlags = |
4137 | fcAllFlags) { |
4138 | if (FullKnownClassLHS) |
4139 | return *FullKnownClassLHS; |
4140 | return computeKnownFPClass(V: LHS, FMF, InterestedClasses: InterestedFlags, Depth: 0, SQ: Q); |
4141 | }; |
4142 | |
4143 | if (C && Q.CxtI) { |
4144 | // Fold out compares that express a class test. |
4145 | // |
4146 | // FIXME: Should be able to perform folds without context |
4147 | // instruction. Always pass in the context function? |
4148 | |
4149 | const Function *ParentF = Q.CxtI->getFunction(); |
4150 | auto [ClassVal, ClassTest] = fcmpToClassTest(Pred, F: *ParentF, LHS, ConstRHS: C); |
4151 | if (ClassVal) { |
4152 | FullKnownClassLHS = computeLHSClass(); |
4153 | if ((FullKnownClassLHS->KnownFPClasses & ClassTest) == fcNone) |
4154 | return getFalse(Ty: RetTy); |
4155 | if ((FullKnownClassLHS->KnownFPClasses & ~ClassTest) == fcNone) |
4156 | return getTrue(Ty: RetTy); |
4157 | } |
4158 | } |
4159 | |
4160 | // Handle fcmp with constant RHS. |
4161 | if (C) { |
4162 | // TODO: If we always required a context function, we wouldn't need to |
4163 | // special case nans. |
4164 | if (C->isNaN()) |
4165 | return ConstantInt::get(Ty: RetTy, V: CmpInst::isUnordered(predicate: Pred)); |
4166 | |
4167 | // TODO: Need version fcmpToClassTest which returns implied class when the |
4168 | // compare isn't a complete class test. e.g. > 1.0 implies fcPositive, but |
4169 | // isn't implementable as a class call. |
4170 | if (C->isNegative() && !C->isNegZero()) { |
4171 | FPClassTest Interested = KnownFPClass::OrderedLessThanZeroMask; |
4172 | |
4173 | // TODO: We can catch more cases by using a range check rather than |
4174 | // relying on CannotBeOrderedLessThanZero. |
4175 | switch (Pred) { |
4176 | case FCmpInst::FCMP_UGE: |
4177 | case FCmpInst::FCMP_UGT: |
4178 | case FCmpInst::FCMP_UNE: { |
4179 | KnownFPClass KnownClass = computeLHSClass(Interested); |
4180 | |
4181 | // (X >= 0) implies (X > C) when (C < 0) |
4182 | if (KnownClass.cannotBeOrderedLessThanZero()) |
4183 | return getTrue(Ty: RetTy); |
4184 | break; |
4185 | } |
4186 | case FCmpInst::FCMP_OEQ: |
4187 | case FCmpInst::FCMP_OLE: |
4188 | case FCmpInst::FCMP_OLT: { |
4189 | KnownFPClass KnownClass = computeLHSClass(Interested); |
4190 | |
4191 | // (X >= 0) implies !(X < C) when (C < 0) |
4192 | if (KnownClass.cannotBeOrderedLessThanZero()) |
4193 | return getFalse(Ty: RetTy); |
4194 | break; |
4195 | } |
4196 | default: |
4197 | break; |
4198 | } |
4199 | } |
4200 | // Check comparison of [minnum/maxnum with constant] with other constant. |
4201 | const APFloat *C2; |
4202 | if ((match(V: LHS, P: m_Intrinsic<Intrinsic::minnum>(Op0: m_Value(), Op1: m_APFloat(Res&: C2))) && |
4203 | *C2 < *C) || |
4204 | (match(V: LHS, P: m_Intrinsic<Intrinsic::maxnum>(Op0: m_Value(), Op1: m_APFloat(Res&: C2))) && |
4205 | *C2 > *C)) { |
4206 | bool IsMaxNum = |
4207 | cast<IntrinsicInst>(Val: LHS)->getIntrinsicID() == Intrinsic::maxnum; |
4208 | // The ordered relationship and minnum/maxnum guarantee that we do not |
4209 | // have NaN constants, so ordered/unordered preds are handled the same. |
4210 | switch (Pred) { |
4211 | case FCmpInst::FCMP_OEQ: |
4212 | case FCmpInst::FCMP_UEQ: |
4213 | // minnum(X, LesserC) == C --> false |
4214 | // maxnum(X, GreaterC) == C --> false |
4215 | return getFalse(Ty: RetTy); |
4216 | case FCmpInst::FCMP_ONE: |
4217 | case FCmpInst::FCMP_UNE: |
4218 | // minnum(X, LesserC) != C --> true |
4219 | // maxnum(X, GreaterC) != C --> true |
4220 | return getTrue(Ty: RetTy); |
4221 | case FCmpInst::FCMP_OGE: |
4222 | case FCmpInst::FCMP_UGE: |
4223 | case FCmpInst::FCMP_OGT: |
4224 | case FCmpInst::FCMP_UGT: |
4225 | // minnum(X, LesserC) >= C --> false |
4226 | // minnum(X, LesserC) > C --> false |
4227 | // maxnum(X, GreaterC) >= C --> true |
4228 | // maxnum(X, GreaterC) > C --> true |
4229 | return ConstantInt::get(Ty: RetTy, V: IsMaxNum); |
4230 | case FCmpInst::FCMP_OLE: |
4231 | case FCmpInst::FCMP_ULE: |
4232 | case FCmpInst::FCMP_OLT: |
4233 | case FCmpInst::FCMP_ULT: |
4234 | // minnum(X, LesserC) <= C --> true |
4235 | // minnum(X, LesserC) < C --> true |
4236 | // maxnum(X, GreaterC) <= C --> false |
4237 | // maxnum(X, GreaterC) < C --> false |
4238 | return ConstantInt::get(Ty: RetTy, V: !IsMaxNum); |
4239 | default: |
4240 | // TRUE/FALSE/ORD/UNO should be handled before this. |
4241 | llvm_unreachable("Unexpected fcmp predicate" ); |
4242 | } |
4243 | } |
4244 | } |
4245 | |
4246 | // TODO: Could fold this with above if there were a matcher which returned all |
4247 | // classes in a non-splat vector. |
4248 | if (match(V: RHS, P: m_AnyZeroFP())) { |
4249 | switch (Pred) { |
4250 | case FCmpInst::FCMP_OGE: |
4251 | case FCmpInst::FCMP_ULT: { |
4252 | FPClassTest Interested = KnownFPClass::OrderedLessThanZeroMask; |
4253 | if (!FMF.noNaNs()) |
4254 | Interested |= fcNan; |
4255 | |
4256 | KnownFPClass Known = computeLHSClass(Interested); |
4257 | |
4258 | // Positive or zero X >= 0.0 --> true |
4259 | // Positive or zero X < 0.0 --> false |
4260 | if ((FMF.noNaNs() || Known.isKnownNeverNaN()) && |
4261 | Known.cannotBeOrderedLessThanZero()) |
4262 | return Pred == FCmpInst::FCMP_OGE ? getTrue(Ty: RetTy) : getFalse(Ty: RetTy); |
4263 | break; |
4264 | } |
4265 | case FCmpInst::FCMP_UGE: |
4266 | case FCmpInst::FCMP_OLT: { |
4267 | FPClassTest Interested = KnownFPClass::OrderedLessThanZeroMask; |
4268 | KnownFPClass Known = computeLHSClass(Interested); |
4269 | |
4270 | // Positive or zero or nan X >= 0.0 --> true |
4271 | // Positive or zero or nan X < 0.0 --> false |
4272 | if (Known.cannotBeOrderedLessThanZero()) |
4273 | return Pred == FCmpInst::FCMP_UGE ? getTrue(Ty: RetTy) : getFalse(Ty: RetTy); |
4274 | break; |
4275 | } |
4276 | default: |
4277 | break; |
4278 | } |
4279 | } |
4280 | |
4281 | // If the comparison is with the result of a select instruction, check whether |
4282 | // comparing with either branch of the select always yields the same value. |
4283 | if (isa<SelectInst>(Val: LHS) || isa<SelectInst>(Val: RHS)) |
4284 | if (Value *V = threadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse)) |
4285 | return V; |
4286 | |
4287 | // If the comparison is with the result of a phi instruction, check whether |
4288 | // doing the compare with each incoming phi value yields a common result. |
4289 | if (isa<PHINode>(Val: LHS) || isa<PHINode>(Val: RHS)) |
4290 | if (Value *V = threadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse)) |
4291 | return V; |
4292 | |
4293 | return nullptr; |
4294 | } |
4295 | |
4296 | Value *llvm::simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, |
4297 | FastMathFlags FMF, const SimplifyQuery &Q) { |
4298 | return ::simplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, MaxRecurse: RecursionLimit); |
4299 | } |
4300 | |
4301 | static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, |
4302 | const SimplifyQuery &Q, |
4303 | bool AllowRefinement, |
4304 | SmallVectorImpl<Instruction *> *DropFlags, |
4305 | unsigned MaxRecurse) { |
4306 | assert((AllowRefinement || !Q.CanUseUndef) && |
4307 | "If AllowRefinement=false then CanUseUndef=false" ); |
4308 | |
4309 | // Trivial replacement. |
4310 | if (V == Op) |
4311 | return RepOp; |
4312 | |
4313 | if (!MaxRecurse--) |
4314 | return nullptr; |
4315 | |
4316 | // We cannot replace a constant, and shouldn't even try. |
4317 | if (isa<Constant>(Val: Op)) |
4318 | return nullptr; |
4319 | |
4320 | auto *I = dyn_cast<Instruction>(Val: V); |
4321 | if (!I) |
4322 | return nullptr; |
4323 | |
4324 | // The arguments of a phi node might refer to a value from a previous |
4325 | // cycle iteration. |
4326 | if (isa<PHINode>(Val: I)) |
4327 | return nullptr; |
4328 | |
4329 | if (Op->getType()->isVectorTy()) { |
4330 | // For vector types, the simplification must hold per-lane, so forbid |
4331 | // potentially cross-lane operations like shufflevector. |
4332 | if (!I->getType()->isVectorTy() || isa<ShuffleVectorInst>(Val: I) || |
4333 | isa<CallBase>(Val: I) || isa<BitCastInst>(Val: I)) |
4334 | return nullptr; |
4335 | } |
4336 | |
4337 | // Don't fold away llvm.is.constant checks based on assumptions. |
4338 | if (match(V: I, P: m_Intrinsic<Intrinsic::is_constant>())) |
4339 | return nullptr; |
4340 | |
4341 | // Don't simplify freeze. |
4342 | if (isa<FreezeInst>(Val: I)) |
4343 | return nullptr; |
4344 | |
4345 | // Replace Op with RepOp in instruction operands. |
4346 | SmallVector<Value *, 8> NewOps; |
4347 | bool AnyReplaced = false; |
4348 | for (Value *InstOp : I->operands()) { |
4349 | if (Value *NewInstOp = simplifyWithOpReplaced( |
4350 | V: InstOp, Op, RepOp, Q, AllowRefinement, DropFlags, MaxRecurse)) { |
4351 | NewOps.push_back(Elt: NewInstOp); |
4352 | AnyReplaced = InstOp != NewInstOp; |
4353 | } else { |
4354 | NewOps.push_back(Elt: InstOp); |
4355 | } |
4356 | |
4357 | // Bail out if any operand is undef and SimplifyQuery disables undef |
4358 | // simplification. Constant folding currently doesn't respect this option. |
4359 | if (isa<UndefValue>(Val: NewOps.back()) && !Q.CanUseUndef) |
4360 | return nullptr; |
4361 | } |
4362 | |
4363 | if (!AnyReplaced) |
4364 | return nullptr; |
4365 | |
4366 | if (!AllowRefinement) { |
4367 | // General InstSimplify functions may refine the result, e.g. by returning |
4368 | // a constant for a potentially poison value. To avoid this, implement only |
4369 | // a few non-refining but profitable transforms here. |
4370 | |
4371 | if (auto *BO = dyn_cast<BinaryOperator>(Val: I)) { |
4372 | unsigned Opcode = BO->getOpcode(); |
4373 | // id op x -> x, x op id -> x |
4374 | if (NewOps[0] == ConstantExpr::getBinOpIdentity(Opcode, Ty: I->getType())) |
4375 | return NewOps[1]; |
4376 | if (NewOps[1] == ConstantExpr::getBinOpIdentity(Opcode, Ty: I->getType(), |
4377 | /* RHS */ AllowRHSConstant: true)) |
4378 | return NewOps[0]; |
4379 | |
4380 | // x & x -> x, x | x -> x |
4381 | if ((Opcode == Instruction::And || Opcode == Instruction::Or) && |
4382 | NewOps[0] == NewOps[1]) { |
4383 | // or disjoint x, x results in poison. |
4384 | if (auto *PDI = dyn_cast<PossiblyDisjointInst>(Val: BO)) { |
4385 | if (PDI->isDisjoint()) { |
4386 | if (!DropFlags) |
4387 | return nullptr; |
4388 | DropFlags->push_back(Elt: BO); |
4389 | } |
4390 | } |
4391 | return NewOps[0]; |
4392 | } |
4393 | |
4394 | // x - x -> 0, x ^ x -> 0. This is non-refining, because x is non-poison |
4395 | // by assumption and this case never wraps, so nowrap flags can be |
4396 | // ignored. |
4397 | if ((Opcode == Instruction::Sub || Opcode == Instruction::Xor) && |
4398 | NewOps[0] == RepOp && NewOps[1] == RepOp) |
4399 | return Constant::getNullValue(Ty: I->getType()); |
4400 | |
4401 | // If we are substituting an absorber constant into a binop and extra |
4402 | // poison can't leak if we remove the select -- because both operands of |
4403 | // the binop are based on the same value -- then it may be safe to replace |
4404 | // the value with the absorber constant. Examples: |
4405 | // (Op == 0) ? 0 : (Op & -Op) --> Op & -Op |
4406 | // (Op == 0) ? 0 : (Op * (binop Op, C)) --> Op * (binop Op, C) |
4407 | // (Op == -1) ? -1 : (Op | (binop C, Op) --> Op | (binop C, Op) |
4408 | Constant *Absorber = |
4409 | ConstantExpr::getBinOpAbsorber(Opcode, Ty: I->getType()); |
4410 | if ((NewOps[0] == Absorber || NewOps[1] == Absorber) && |
4411 | impliesPoison(ValAssumedPoison: BO, V: Op)) |
4412 | return Absorber; |
4413 | } |
4414 | |
4415 | if (isa<GetElementPtrInst>(Val: I)) { |
4416 | // getelementptr x, 0 -> x. |
4417 | // This never returns poison, even if inbounds is set. |
4418 | if (NewOps.size() == 2 && match(V: NewOps[1], P: m_Zero())) |
4419 | return NewOps[0]; |
4420 | } |
4421 | } else { |
4422 | // The simplification queries below may return the original value. Consider: |
4423 | // %div = udiv i32 %arg, %arg2 |
4424 | // %mul = mul nsw i32 %div, %arg2 |
4425 | // %cmp = icmp eq i32 %mul, %arg |
4426 | // %sel = select i1 %cmp, i32 %div, i32 undef |
4427 | // Replacing %arg by %mul, %div becomes "udiv i32 %mul, %arg2", which |
4428 | // simplifies back to %arg. This can only happen because %mul does not |
4429 | // dominate %div. To ensure a consistent return value contract, we make sure |
4430 | // that this case returns nullptr as well. |
4431 | auto PreventSelfSimplify = [V](Value *Simplified) { |
4432 | return Simplified != V ? Simplified : nullptr; |
4433 | }; |
4434 | |
4435 | return PreventSelfSimplify( |
4436 | ::simplifyInstructionWithOperands(I, NewOps, SQ: Q, MaxRecurse)); |
4437 | } |
4438 | |
4439 | // If all operands are constant after substituting Op for RepOp then we can |
4440 | // constant fold the instruction. |
4441 | SmallVector<Constant *, 8> ConstOps; |
4442 | for (Value *NewOp : NewOps) { |
4443 | if (Constant *ConstOp = dyn_cast<Constant>(Val: NewOp)) |
4444 | ConstOps.push_back(Elt: ConstOp); |
4445 | else |
4446 | return nullptr; |
4447 | } |
4448 | |
4449 | // Consider: |
4450 | // %cmp = icmp eq i32 %x, 2147483647 |
4451 | // %add = add nsw i32 %x, 1 |
4452 | // %sel = select i1 %cmp, i32 -2147483648, i32 %add |
4453 | // |
4454 | // We can't replace %sel with %add unless we strip away the flags (which |
4455 | // will be done in InstCombine). |
4456 | // TODO: This may be unsound, because it only catches some forms of |
4457 | // refinement. |
4458 | if (!AllowRefinement) { |
4459 | if (canCreatePoison(Op: cast<Operator>(Val: I), ConsiderFlagsAndMetadata: !DropFlags)) { |
4460 | // abs cannot create poison if the value is known to never be int_min. |
4461 | if (auto *II = dyn_cast<IntrinsicInst>(Val: I); |
4462 | II && II->getIntrinsicID() == Intrinsic::abs) { |
4463 | if (!ConstOps[0]->isNotMinSignedValue()) |
4464 | return nullptr; |
4465 | } else |
4466 | return nullptr; |
4467 | } |
4468 | Constant *Res = ConstantFoldInstOperands(I, Ops: ConstOps, DL: Q.DL, TLI: Q.TLI); |
4469 | if (DropFlags && Res && I->hasPoisonGeneratingAnnotations()) |
4470 | DropFlags->push_back(Elt: I); |
4471 | return Res; |
4472 | } |
4473 | |
4474 | return ConstantFoldInstOperands(I, Ops: ConstOps, DL: Q.DL, TLI: Q.TLI); |
4475 | } |
4476 | |
4477 | Value *llvm::simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, |
4478 | const SimplifyQuery &Q, |
4479 | bool AllowRefinement, |
4480 | SmallVectorImpl<Instruction *> *DropFlags) { |
4481 | // If refinement is disabled, also disable undef simplifications (which are |
4482 | // always refinements) in SimplifyQuery. |
4483 | if (!AllowRefinement) |
4484 | return ::simplifyWithOpReplaced(V, Op, RepOp, Q: Q.getWithoutUndef(), |
4485 | AllowRefinement, DropFlags, MaxRecurse: RecursionLimit); |
4486 | return ::simplifyWithOpReplaced(V, Op, RepOp, Q, AllowRefinement, DropFlags, |
4487 | MaxRecurse: RecursionLimit); |
4488 | } |
4489 | |
4490 | /// Try to simplify a select instruction when its condition operand is an |
4491 | /// integer comparison where one operand of the compare is a constant. |
4492 | static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X, |
4493 | const APInt *Y, bool TrueWhenUnset) { |
4494 | const APInt *C; |
4495 | |
4496 | // (X & Y) == 0 ? X & ~Y : X --> X |
4497 | // (X & Y) != 0 ? X & ~Y : X --> X & ~Y |
4498 | if (FalseVal == X && match(V: TrueVal, P: m_And(L: m_Specific(V: X), R: m_APInt(Res&: C))) && |
4499 | *Y == ~*C) |
4500 | return TrueWhenUnset ? FalseVal : TrueVal; |
4501 | |
4502 | // (X & Y) == 0 ? X : X & ~Y --> X & ~Y |
4503 | // (X & Y) != 0 ? X : X & ~Y --> X |
4504 | if (TrueVal == X && match(V: FalseVal, P: m_And(L: m_Specific(V: X), R: m_APInt(Res&: C))) && |
4505 | *Y == ~*C) |
4506 | return TrueWhenUnset ? FalseVal : TrueVal; |
4507 | |
4508 | if (Y->isPowerOf2()) { |
4509 | // (X & Y) == 0 ? X | Y : X --> X | Y |
4510 | // (X & Y) != 0 ? X | Y : X --> X |
4511 | if (FalseVal == X && match(V: TrueVal, P: m_Or(L: m_Specific(V: X), R: m_APInt(Res&: C))) && |
4512 | *Y == *C) { |
4513 | // We can't return the or if it has the disjoint flag. |
4514 | if (TrueWhenUnset && cast<PossiblyDisjointInst>(Val: TrueVal)->isDisjoint()) |
4515 | return nullptr; |
4516 | return TrueWhenUnset ? TrueVal : FalseVal; |
4517 | } |
4518 | |
4519 | // (X & Y) == 0 ? X : X | Y --> X |
4520 | // (X & Y) != 0 ? X : X | Y --> X | Y |
4521 | if (TrueVal == X && match(V: FalseVal, P: m_Or(L: m_Specific(V: X), R: m_APInt(Res&: C))) && |
4522 | *Y == *C) { |
4523 | // We can't return the or if it has the disjoint flag. |
4524 | if (!TrueWhenUnset && cast<PossiblyDisjointInst>(Val: FalseVal)->isDisjoint()) |
4525 | return nullptr; |
4526 | return TrueWhenUnset ? TrueVal : FalseVal; |
4527 | } |
4528 | } |
4529 | |
4530 | return nullptr; |
4531 | } |
4532 | |
4533 | static Value *simplifyCmpSelOfMaxMin(Value *CmpLHS, Value *CmpRHS, |
4534 | ICmpInst::Predicate Pred, Value *TVal, |
4535 | Value *FVal) { |
4536 | // Canonicalize common cmp+sel operand as CmpLHS. |
4537 | if (CmpRHS == TVal || CmpRHS == FVal) { |
4538 | std::swap(a&: CmpLHS, b&: CmpRHS); |
4539 | Pred = ICmpInst::getSwappedPredicate(pred: Pred); |
4540 | } |
4541 | |
4542 | // Canonicalize common cmp+sel operand as TVal. |
4543 | if (CmpLHS == FVal) { |
4544 | std::swap(a&: TVal, b&: FVal); |
4545 | Pred = ICmpInst::getInversePredicate(pred: Pred); |
4546 | } |
4547 | |
4548 | // A vector select may be shuffling together elements that are equivalent |
4549 | // based on the max/min/select relationship. |
4550 | Value *X = CmpLHS, *Y = CmpRHS; |
4551 | bool PeekedThroughSelectShuffle = false; |
4552 | auto *Shuf = dyn_cast<ShuffleVectorInst>(Val: FVal); |
4553 | if (Shuf && Shuf->isSelect()) { |
4554 | if (Shuf->getOperand(i_nocapture: 0) == Y) |
4555 | FVal = Shuf->getOperand(i_nocapture: 1); |
4556 | else if (Shuf->getOperand(i_nocapture: 1) == Y) |
4557 | FVal = Shuf->getOperand(i_nocapture: 0); |
4558 | else |
4559 | return nullptr; |
4560 | PeekedThroughSelectShuffle = true; |
4561 | } |
4562 | |
4563 | // (X pred Y) ? X : max/min(X, Y) |
4564 | auto *MMI = dyn_cast<MinMaxIntrinsic>(Val: FVal); |
4565 | if (!MMI || TVal != X || |
4566 | !match(V: FVal, P: m_c_MaxOrMin(L: m_Specific(V: X), R: m_Specific(V: Y)))) |
4567 | return nullptr; |
4568 | |
4569 | // (X > Y) ? X : max(X, Y) --> max(X, Y) |
4570 | // (X >= Y) ? X : max(X, Y) --> max(X, Y) |
4571 | // (X < Y) ? X : min(X, Y) --> min(X, Y) |
4572 | // (X <= Y) ? X : min(X, Y) --> min(X, Y) |
4573 | // |
4574 | // The equivalence allows a vector select (shuffle) of max/min and Y. Ex: |
4575 | // (X > Y) ? X : (Z ? max(X, Y) : Y) |
4576 | // If Z is true, this reduces as above, and if Z is false: |
4577 | // (X > Y) ? X : Y --> max(X, Y) |
4578 | ICmpInst::Predicate MMPred = MMI->getPredicate(); |
4579 | if (MMPred == CmpInst::getStrictPredicate(pred: Pred)) |
4580 | return MMI; |
4581 | |
4582 | // Other transforms are not valid with a shuffle. |
4583 | if (PeekedThroughSelectShuffle) |
4584 | return nullptr; |
4585 | |
4586 | // (X == Y) ? X : max/min(X, Y) --> max/min(X, Y) |
4587 | if (Pred == CmpInst::ICMP_EQ) |
4588 | return MMI; |
4589 | |
4590 | // (X != Y) ? X : max/min(X, Y) --> X |
4591 | if (Pred == CmpInst::ICMP_NE) |
4592 | return X; |
4593 | |
4594 | // (X < Y) ? X : max(X, Y) --> X |
4595 | // (X <= Y) ? X : max(X, Y) --> X |
4596 | // (X > Y) ? X : min(X, Y) --> X |
4597 | // (X >= Y) ? X : min(X, Y) --> X |
4598 | ICmpInst::Predicate InvPred = CmpInst::getInversePredicate(pred: Pred); |
4599 | if (MMPred == CmpInst::getStrictPredicate(pred: InvPred)) |
4600 | return X; |
4601 | |
4602 | return nullptr; |
4603 | } |
4604 | |
4605 | /// An alternative way to test if a bit is set or not uses sgt/slt instead of |
4606 | /// eq/ne. |
4607 | static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS, |
4608 | ICmpInst::Predicate Pred, |
4609 | Value *TrueVal, Value *FalseVal) { |
4610 | Value *X; |
4611 | APInt Mask; |
4612 | if (!decomposeBitTestICmp(LHS: CmpLHS, RHS: CmpRHS, Pred, X, Mask)) |
4613 | return nullptr; |
4614 | |
4615 | return simplifySelectBitTest(TrueVal, FalseVal, X, Y: &Mask, |
4616 | TrueWhenUnset: Pred == ICmpInst::ICMP_EQ); |
4617 | } |
4618 | |
4619 | /// Try to simplify a select instruction when its condition operand is an |
4620 | /// integer equality comparison. |
4621 | static Value *simplifySelectWithICmpEq(Value *CmpLHS, Value *CmpRHS, |
4622 | Value *TrueVal, Value *FalseVal, |
4623 | const SimplifyQuery &Q, |
4624 | unsigned MaxRecurse) { |
4625 | if (simplifyWithOpReplaced(V: FalseVal, Op: CmpLHS, RepOp: CmpRHS, Q: Q.getWithoutUndef(), |
4626 | /* AllowRefinement */ false, |
4627 | /* DropFlags */ nullptr, MaxRecurse) == TrueVal) |
4628 | return FalseVal; |
4629 | if (simplifyWithOpReplaced(V: TrueVal, Op: CmpLHS, RepOp: CmpRHS, Q, |
4630 | /* AllowRefinement */ true, |
4631 | /* DropFlags */ nullptr, MaxRecurse) == FalseVal) |
4632 | return FalseVal; |
4633 | |
4634 | return nullptr; |
4635 | } |
4636 | |
4637 | /// Try to simplify a select instruction when its condition operand is an |
4638 | /// integer comparison. |
4639 | static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal, |
4640 | Value *FalseVal, |
4641 | const SimplifyQuery &Q, |
4642 | unsigned MaxRecurse) { |
4643 | ICmpInst::Predicate Pred; |
4644 | Value *CmpLHS, *CmpRHS; |
4645 | if (!match(V: CondVal, P: m_ICmp(Pred, L: m_Value(V&: CmpLHS), R: m_Value(V&: CmpRHS)))) |
4646 | return nullptr; |
4647 | |
4648 | if (Value *V = simplifyCmpSelOfMaxMin(CmpLHS, CmpRHS, Pred, TVal: TrueVal, FVal: FalseVal)) |
4649 | return V; |
4650 | |
4651 | // Canonicalize ne to eq predicate. |
4652 | if (Pred == ICmpInst::ICMP_NE) { |
4653 | Pred = ICmpInst::ICMP_EQ; |
4654 | std::swap(a&: TrueVal, b&: FalseVal); |
4655 | } |
4656 | |
4657 | // Check for integer min/max with a limit constant: |
4658 | // X > MIN_INT ? X : MIN_INT --> X |
4659 | // X < MAX_INT ? X : MAX_INT --> X |
4660 | if (TrueVal->getType()->isIntOrIntVectorTy()) { |
4661 | Value *X, *Y; |
4662 | SelectPatternFlavor SPF = |
4663 | matchDecomposedSelectPattern(CmpI: cast<ICmpInst>(Val: CondVal), TrueVal, FalseVal, |
4664 | LHS&: X, RHS&: Y) |
4665 | .Flavor; |
4666 | if (SelectPatternResult::isMinOrMax(SPF) && Pred == getMinMaxPred(SPF)) { |
4667 | APInt LimitC = getMinMaxLimit(SPF: getInverseMinMaxFlavor(SPF), |
4668 | BitWidth: X->getType()->getScalarSizeInBits()); |
4669 | if (match(V: Y, P: m_SpecificInt(V: LimitC))) |
4670 | return X; |
4671 | } |
4672 | } |
4673 | |
4674 | if (Pred == ICmpInst::ICMP_EQ && match(V: CmpRHS, P: m_Zero())) { |
4675 | Value *X; |
4676 | const APInt *Y; |
4677 | if (match(V: CmpLHS, P: m_And(L: m_Value(V&: X), R: m_APInt(Res&: Y)))) |
4678 | if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y, |
4679 | /*TrueWhenUnset=*/true)) |
4680 | return V; |
4681 | |
4682 | // Test for a bogus zero-shift-guard-op around funnel-shift or rotate. |
4683 | Value *ShAmt; |
4684 | auto isFsh = m_CombineOr(L: m_FShl(Op0: m_Value(V&: X), Op1: m_Value(), Op2: m_Value(V&: ShAmt)), |
4685 | R: m_FShr(Op0: m_Value(), Op1: m_Value(V&: X), Op2: m_Value(V&: ShAmt))); |
4686 | // (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X |
4687 | // (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X |
4688 | if (match(V: TrueVal, P: isFsh) && FalseVal == X && CmpLHS == ShAmt) |
4689 | return X; |
4690 | |
4691 | // Test for a zero-shift-guard-op around rotates. These are used to |
4692 | // avoid UB from oversized shifts in raw IR rotate patterns, but the |
4693 | // intrinsics do not have that problem. |
4694 | // We do not allow this transform for the general funnel shift case because |
4695 | // that would not preserve the poison safety of the original code. |
4696 | auto isRotate = |
4697 | m_CombineOr(L: m_FShl(Op0: m_Value(V&: X), Op1: m_Deferred(V: X), Op2: m_Value(V&: ShAmt)), |
4698 | R: m_FShr(Op0: m_Value(V&: X), Op1: m_Deferred(V: X), Op2: m_Value(V&: ShAmt))); |
4699 | // (ShAmt == 0) ? X : fshl(X, X, ShAmt) --> fshl(X, X, ShAmt) |
4700 | // (ShAmt == 0) ? X : fshr(X, X, ShAmt) --> fshr(X, X, ShAmt) |
4701 | if (match(V: FalseVal, P: isRotate) && TrueVal == X && CmpLHS == ShAmt && |
4702 | Pred == ICmpInst::ICMP_EQ) |
4703 | return FalseVal; |
4704 | |
4705 | // X == 0 ? abs(X) : -abs(X) --> -abs(X) |
4706 | // X == 0 ? -abs(X) : abs(X) --> abs(X) |
4707 | if (match(V: TrueVal, P: m_Intrinsic<Intrinsic::abs>(Op0: m_Specific(V: CmpLHS))) && |
4708 | match(V: FalseVal, P: m_Neg(V: m_Intrinsic<Intrinsic::abs>(Op0: m_Specific(V: CmpLHS))))) |
4709 | return FalseVal; |
4710 | if (match(V: TrueVal, |
4711 | P: m_Neg(V: m_Intrinsic<Intrinsic::abs>(Op0: m_Specific(V: CmpLHS)))) && |
4712 | match(V: FalseVal, P: m_Intrinsic<Intrinsic::abs>(Op0: m_Specific(V: CmpLHS)))) |
4713 | return FalseVal; |
4714 | } |
4715 | |
4716 | // Check for other compares that behave like bit test. |
4717 | if (Value *V = |
4718 | simplifySelectWithFakeICmpEq(CmpLHS, CmpRHS, Pred, TrueVal, FalseVal)) |
4719 | return V; |
4720 | |
4721 | // If we have a scalar equality comparison, then we know the value in one of |
4722 | // the arms of the select. See if substituting this value into the arm and |
4723 | // simplifying the result yields the same value as the other arm. |
4724 | if (Pred == ICmpInst::ICMP_EQ) { |
4725 | if (Value *V = simplifySelectWithICmpEq(CmpLHS, CmpRHS, TrueVal, FalseVal, |
4726 | Q, MaxRecurse)) |
4727 | return V; |
4728 | if (Value *V = simplifySelectWithICmpEq(CmpLHS: CmpRHS, CmpRHS: CmpLHS, TrueVal, FalseVal, |
4729 | Q, MaxRecurse)) |
4730 | return V; |
4731 | |
4732 | Value *X; |
4733 | Value *Y; |
4734 | // select((X | Y) == 0 ? X : 0) --> 0 (commuted 2 ways) |
4735 | if (match(V: CmpLHS, P: m_Or(L: m_Value(V&: X), R: m_Value(V&: Y))) && |
4736 | match(V: CmpRHS, P: m_Zero())) { |
4737 | // (X | Y) == 0 implies X == 0 and Y == 0. |
4738 | if (Value *V = simplifySelectWithICmpEq(CmpLHS: X, CmpRHS, TrueVal, FalseVal, Q, |
4739 | MaxRecurse)) |
4740 | return V; |
4741 | if (Value *V = simplifySelectWithICmpEq(CmpLHS: Y, CmpRHS, TrueVal, FalseVal, Q, |
4742 | MaxRecurse)) |
4743 | return V; |
4744 | } |
4745 | |
4746 | // select((X & Y) == -1 ? X : -1) --> -1 (commuted 2 ways) |
4747 | if (match(V: CmpLHS, P: m_And(L: m_Value(V&: X), R: m_Value(V&: Y))) && |
4748 | match(V: CmpRHS, P: m_AllOnes())) { |
4749 | // (X & Y) == -1 implies X == -1 and Y == -1. |
4750 | if (Value *V = simplifySelectWithICmpEq(CmpLHS: X, CmpRHS, TrueVal, FalseVal, Q, |
4751 | MaxRecurse)) |
4752 | return V; |
4753 | if (Value *V = simplifySelectWithICmpEq(CmpLHS: Y, CmpRHS, TrueVal, FalseVal, Q, |
4754 | MaxRecurse)) |
4755 | return V; |
4756 | } |
4757 | } |
4758 | |
4759 | return nullptr; |
4760 | } |
4761 | |
4762 | /// Try to simplify a select instruction when its condition operand is a |
4763 | /// floating-point comparison. |
4764 | static Value *simplifySelectWithFCmp(Value *Cond, Value *T, Value *F, |
4765 | const SimplifyQuery &Q) { |
4766 | FCmpInst::Predicate Pred; |
4767 | if (!match(V: Cond, P: m_FCmp(Pred, L: m_Specific(V: T), R: m_Specific(V: F))) && |
4768 | !match(V: Cond, P: m_FCmp(Pred, L: m_Specific(V: F), R: m_Specific(V: T)))) |
4769 | return nullptr; |
4770 | |
4771 | // This transform is safe if we do not have (do not care about) -0.0 or if |
4772 | // at least one operand is known to not be -0.0. Otherwise, the select can |
4773 | // change the sign of a zero operand. |
4774 | bool HasNoSignedZeros = |
4775 | Q.CxtI && isa<FPMathOperator>(Val: Q.CxtI) && Q.CxtI->hasNoSignedZeros(); |
4776 | const APFloat *C; |
4777 | if (HasNoSignedZeros || (match(V: T, P: m_APFloat(Res&: C)) && C->isNonZero()) || |
4778 | (match(V: F, P: m_APFloat(Res&: C)) && C->isNonZero())) { |
4779 | // (T == F) ? T : F --> F |
4780 | // (F == T) ? T : F --> F |
4781 | if (Pred == FCmpInst::FCMP_OEQ) |
4782 | return F; |
4783 | |
4784 | // (T != F) ? T : F --> T |
4785 | // (F != T) ? T : F --> T |
4786 | if (Pred == FCmpInst::FCMP_UNE) |
4787 | return T; |
4788 | } |
4789 | |
4790 | return nullptr; |
4791 | } |
4792 | |
4793 | /// Given operands for a SelectInst, see if we can fold the result. |
4794 | /// If not, this returns null. |
4795 | static Value *simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, |
4796 | const SimplifyQuery &Q, unsigned MaxRecurse) { |
4797 | if (auto *CondC = dyn_cast<Constant>(Val: Cond)) { |
4798 | if (auto *TrueC = dyn_cast<Constant>(Val: TrueVal)) |
4799 | if (auto *FalseC = dyn_cast<Constant>(Val: FalseVal)) |
4800 | if (Constant *C = ConstantFoldSelectInstruction(Cond: CondC, V1: TrueC, V2: FalseC)) |
4801 | return C; |
4802 | |
4803 | // select poison, X, Y -> poison |
4804 | if (isa<PoisonValue>(Val: CondC)) |
4805 | return PoisonValue::get(T: TrueVal->getType()); |
4806 | |
4807 | // select undef, X, Y -> X or Y |
4808 | if (Q.isUndefValue(V: CondC)) |
4809 | return isa<Constant>(Val: FalseVal) ? FalseVal : TrueVal; |
4810 | |
4811 | // select true, X, Y --> X |
4812 | // select false, X, Y --> Y |
4813 | // For vectors, allow undef/poison elements in the condition to match the |
4814 | // defined elements, so we can eliminate the select. |
4815 | if (match(V: CondC, P: m_One())) |
4816 | return TrueVal; |
4817 | if (match(V: CondC, P: m_Zero())) |
4818 | return FalseVal; |
4819 | } |
4820 | |
4821 | assert(Cond->getType()->isIntOrIntVectorTy(1) && |
4822 | "Select must have bool or bool vector condition" ); |
4823 | assert(TrueVal->getType() == FalseVal->getType() && |
4824 | "Select must have same types for true/false ops" ); |
4825 | |
4826 | if (Cond->getType() == TrueVal->getType()) { |
4827 | // select i1 Cond, i1 true, i1 false --> i1 Cond |
4828 | if (match(V: TrueVal, P: m_One()) && match(V: FalseVal, P: m_ZeroInt())) |
4829 | return Cond; |
4830 | |
4831 | // (X && Y) ? X : Y --> Y (commuted 2 ways) |
4832 | if (match(V: Cond, P: m_c_LogicalAnd(L: m_Specific(V: TrueVal), R: m_Specific(V: FalseVal)))) |
4833 | return FalseVal; |
4834 | |
4835 | // (X || Y) ? X : Y --> X (commuted 2 ways) |
4836 | if (match(V: Cond, P: m_c_LogicalOr(L: m_Specific(V: TrueVal), R: m_Specific(V: FalseVal)))) |
4837 | return TrueVal; |
4838 | |
4839 | // (X || Y) ? false : X --> false (commuted 2 ways) |
4840 | if (match(V: Cond, P: m_c_LogicalOr(L: m_Specific(V: FalseVal), R: m_Value())) && |
4841 | match(V: TrueVal, P: m_ZeroInt())) |
4842 | return ConstantInt::getFalse(Ty: Cond->getType()); |
4843 | |
4844 | // Match patterns that end in logical-and. |
4845 | if (match(V: FalseVal, P: m_ZeroInt())) { |
4846 | // !(X || Y) && X --> false (commuted 2 ways) |
4847 | if (match(V: Cond, P: m_Not(V: m_c_LogicalOr(L: m_Specific(V: TrueVal), R: m_Value())))) |
4848 | return ConstantInt::getFalse(Ty: Cond->getType()); |
4849 | // X && !(X || Y) --> false (commuted 2 ways) |
4850 | if (match(V: TrueVal, P: m_Not(V: m_c_LogicalOr(L: m_Specific(V: Cond), R: m_Value())))) |
4851 | return ConstantInt::getFalse(Ty: Cond->getType()); |
4852 | |
4853 | // (X || Y) && Y --> Y (commuted 2 ways) |
4854 | if (match(V: Cond, P: m_c_LogicalOr(L: m_Specific(V: TrueVal), R: m_Value()))) |
4855 | return TrueVal; |
4856 | // Y && (X || Y) --> Y (commuted 2 ways) |
4857 | if (match(V: TrueVal, P: m_c_LogicalOr(L: m_Specific(V: Cond), R: m_Value()))) |
4858 | return Cond; |
4859 | |
4860 | // (X || Y) && (X || !Y) --> X (commuted 8 ways) |
4861 | Value *X, *Y; |
4862 | if (match(V: Cond, P: m_c_LogicalOr(L: m_Value(V&: X), R: m_Not(V: m_Value(V&: Y)))) && |
4863 | match(V: TrueVal, P: m_c_LogicalOr(L: m_Specific(V: X), R: m_Specific(V: Y)))) |
4864 | return X; |
4865 | if (match(V: TrueVal, P: m_c_LogicalOr(L: m_Value(V&: X), R: m_Not(V: m_Value(V&: Y)))) && |
4866 | match(V: Cond, P: m_c_LogicalOr(L: m_Specific(V: X), R: m_Specific(V: Y)))) |
4867 | return X; |
4868 | } |
4869 | |
4870 | // Match patterns that end in logical-or. |
4871 | if (match(V: TrueVal, P: m_One())) { |
4872 | // !(X && Y) || X --> true (commuted 2 ways) |
4873 | if (match(V: Cond, P: m_Not(V: m_c_LogicalAnd(L: m_Specific(V: FalseVal), R: m_Value())))) |
4874 | return ConstantInt::getTrue(Ty: Cond->getType()); |
4875 | // X || !(X && Y) --> true (commuted 2 ways) |
4876 | if (match(V: FalseVal, P: m_Not(V: m_c_LogicalAnd(L: m_Specific(V: Cond), R: m_Value())))) |
4877 | return ConstantInt::getTrue(Ty: Cond->getType()); |
4878 | |
4879 | // (X && Y) || Y --> Y (commuted 2 ways) |
4880 | if (match(V: Cond, P: m_c_LogicalAnd(L: m_Specific(V: FalseVal), R: m_Value()))) |
4881 | return FalseVal; |
4882 | // Y || (X && Y) --> Y (commuted 2 ways) |
4883 | if (match(V: FalseVal, P: m_c_LogicalAnd(L: m_Specific(V: Cond), R: m_Value()))) |
4884 | return Cond; |
4885 | } |
4886 | } |
4887 | |
4888 | // select ?, X, X -> X |
4889 | if (TrueVal == FalseVal) |
4890 | return TrueVal; |
4891 | |
4892 | if (Cond == TrueVal) { |
4893 | // select i1 X, i1 X, i1 false --> X (logical-and) |
4894 | if (match(V: FalseVal, P: m_ZeroInt())) |
4895 | return Cond; |
4896 | // select i1 X, i1 X, i1 true --> true |
4897 | if (match(V: FalseVal, P: m_One())) |
4898 | return ConstantInt::getTrue(Ty: Cond->getType()); |
4899 | } |
4900 | if (Cond == FalseVal) { |
4901 | // select i1 X, i1 true, i1 X --> X (logical-or) |
4902 | if (match(V: TrueVal, P: m_One())) |
4903 | return Cond; |
4904 | // select i1 X, i1 false, i1 X --> false |
4905 | if (match(V: TrueVal, P: m_ZeroInt())) |
4906 | return ConstantInt::getFalse(Ty: Cond->getType()); |
4907 | } |
4908 | |
4909 | // If the true or false value is poison, we can fold to the other value. |
4910 | // If the true or false value is undef, we can fold to the other value as |
4911 | // long as the other value isn't poison. |
4912 | // select ?, poison, X -> X |
4913 | // select ?, undef, X -> X |
4914 | if (isa<PoisonValue>(Val: TrueVal) || |
4915 | (Q.isUndefValue(V: TrueVal) && impliesPoison(ValAssumedPoison: FalseVal, V: Cond))) |
4916 | return FalseVal; |
4917 | // select ?, X, poison -> X |
4918 | // select ?, X, undef -> X |
4919 | if (isa<PoisonValue>(Val: FalseVal) || |
4920 | (Q.isUndefValue(V: FalseVal) && impliesPoison(ValAssumedPoison: TrueVal, V: Cond))) |
4921 | return TrueVal; |
4922 | |
4923 | // Deal with partial undef vector constants: select ?, VecC, VecC' --> VecC'' |
4924 | Constant *TrueC, *FalseC; |
4925 | if (isa<FixedVectorType>(Val: TrueVal->getType()) && |
4926 | match(V: TrueVal, P: m_Constant(C&: TrueC)) && |
4927 | match(V: FalseVal, P: m_Constant(C&: FalseC))) { |
4928 | unsigned NumElts = |
4929 | cast<FixedVectorType>(Val: TrueC->getType())->getNumElements(); |
4930 | SmallVector<Constant *, 16> NewC; |
4931 | for (unsigned i = 0; i != NumElts; ++i) { |
4932 | // Bail out on incomplete vector constants. |
4933 | Constant *TEltC = TrueC->getAggregateElement(Elt: i); |
4934 | Constant *FEltC = FalseC->getAggregateElement(Elt: i); |
4935 | if (!TEltC || !FEltC) |
4936 | break; |
4937 | |
4938 | // If the elements match (undef or not), that value is the result. If only |
4939 | // one element is undef, choose the defined element as the safe result. |
4940 | if (TEltC == FEltC) |
4941 | NewC.push_back(Elt: TEltC); |
4942 | else if (isa<PoisonValue>(Val: TEltC) || |
4943 | (Q.isUndefValue(V: TEltC) && isGuaranteedNotToBePoison(V: FEltC))) |
4944 | NewC.push_back(Elt: FEltC); |
4945 | else if (isa<PoisonValue>(Val: FEltC) || |
4946 | (Q.isUndefValue(V: FEltC) && isGuaranteedNotToBePoison(V: TEltC))) |
4947 | NewC.push_back(Elt: TEltC); |
4948 | else |
4949 | break; |
4950 | } |
4951 | if (NewC.size() == NumElts) |
4952 | return ConstantVector::get(V: NewC); |
4953 | } |
4954 | |
4955 | if (Value *V = |
4956 | simplifySelectWithICmpCond(CondVal: Cond, TrueVal, FalseVal, Q, MaxRecurse)) |
4957 | return V; |
4958 | |
4959 | if (Value *V = simplifySelectWithFCmp(Cond, T: TrueVal, F: FalseVal, Q)) |
4960 | return V; |
4961 | |
4962 | if (Value *V = foldSelectWithBinaryOp(Cond, TrueVal, FalseVal)) |
4963 | return V; |
4964 | |
4965 | std::optional<bool> Imp = isImpliedByDomCondition(Cond, ContextI: Q.CxtI, DL: Q.DL); |
4966 | if (Imp) |
4967 | return *Imp ? TrueVal : FalseVal; |
4968 | |
4969 | return nullptr; |
4970 | } |
4971 | |
4972 | Value *llvm::simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, |
4973 | const SimplifyQuery &Q) { |
4974 | return ::simplifySelectInst(Cond, TrueVal, FalseVal, Q, MaxRecurse: RecursionLimit); |
4975 | } |
4976 | |
4977 | /// Given operands for an GetElementPtrInst, see if we can fold the result. |
4978 | /// If not, this returns null. |
4979 | static Value *simplifyGEPInst(Type *SrcTy, Value *Ptr, |
4980 | ArrayRef<Value *> Indices, GEPNoWrapFlags NW, |
4981 | const SimplifyQuery &Q, unsigned) { |
4982 | // The type of the GEP pointer operand. |
4983 | unsigned AS = |
4984 | cast<PointerType>(Val: Ptr->getType()->getScalarType())->getAddressSpace(); |
4985 | |
4986 | // getelementptr P -> P. |
4987 | if (Indices.empty()) |
4988 | return Ptr; |
4989 | |
4990 | // Compute the (pointer) type returned by the GEP instruction. |
4991 | Type *LastType = GetElementPtrInst::getIndexedType(Ty: SrcTy, IdxList: Indices); |
4992 | Type *GEPTy = Ptr->getType(); |
4993 | if (!GEPTy->isVectorTy()) { |
4994 | for (Value *Op : Indices) { |
4995 | // If one of the operands is a vector, the result type is a vector of |
4996 | // pointers. All vector operands must have the same number of elements. |
4997 | if (VectorType *VT = dyn_cast<VectorType>(Val: Op->getType())) { |
4998 | GEPTy = VectorType::get(ElementType: GEPTy, EC: VT->getElementCount()); |
4999 | break; |
5000 | } |
5001 | } |
5002 | } |
5003 | |
5004 | // All-zero GEP is a no-op, unless it performs a vector splat. |
5005 | if (Ptr->getType() == GEPTy && |
5006 | all_of(Range&: Indices, P: [](const auto *V) { return match(V, m_Zero()); })) |
5007 | return Ptr; |
5008 | |
5009 | // getelementptr poison, idx -> poison |
5010 | // getelementptr baseptr, poison -> poison |
5011 | if (isa<PoisonValue>(Val: Ptr) || |
5012 | any_of(Range&: Indices, P: [](const auto *V) { return isa<PoisonValue>(V); })) |
5013 | return PoisonValue::get(T: GEPTy); |
5014 | |
5015 | // getelementptr undef, idx -> undef |
5016 | if (Q.isUndefValue(V: Ptr)) |
5017 | return UndefValue::get(T: GEPTy); |
5018 | |
5019 | bool IsScalableVec = |
5020 | SrcTy->isScalableTy() || any_of(Range&: Indices, P: [](const Value *V) { |
5021 | return isa<ScalableVectorType>(Val: V->getType()); |
5022 | }); |
5023 | |
5024 | if (Indices.size() == 1) { |
5025 | Type *Ty = SrcTy; |
5026 | if (!IsScalableVec && Ty->isSized()) { |
5027 | Value *P; |
5028 | uint64_t C; |
5029 | uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty); |
5030 | // getelementptr P, N -> P if P points to a type of zero size. |
5031 | if (TyAllocSize == 0 && Ptr->getType() == GEPTy) |
5032 | return Ptr; |
5033 | |
5034 | // The following transforms are only safe if the ptrtoint cast |
5035 | // doesn't truncate the pointers. |
5036 | if (Indices[0]->getType()->getScalarSizeInBits() == |
5037 | Q.DL.getPointerSizeInBits(AS)) { |
5038 | auto CanSimplify = [GEPTy, &P, Ptr]() -> bool { |
5039 | return P->getType() == GEPTy && |
5040 | getUnderlyingObject(V: P) == getUnderlyingObject(V: Ptr); |
5041 | }; |
5042 | // getelementptr V, (sub P, V) -> P if P points to a type of size 1. |
5043 | if (TyAllocSize == 1 && |
5044 | match(V: Indices[0], |
5045 | P: m_Sub(L: m_PtrToInt(Op: m_Value(V&: P)), R: m_PtrToInt(Op: m_Specific(V: Ptr)))) && |
5046 | CanSimplify()) |
5047 | return P; |
5048 | |
5049 | // getelementptr V, (ashr (sub P, V), C) -> P if P points to a type of |
5050 | // size 1 << C. |
5051 | if (match(V: Indices[0], P: m_AShr(L: m_Sub(L: m_PtrToInt(Op: m_Value(V&: P)), |
5052 | R: m_PtrToInt(Op: m_Specific(V: Ptr))), |
5053 | R: m_ConstantInt(V&: C))) && |
5054 | TyAllocSize == 1ULL << C && CanSimplify()) |
5055 | return P; |
5056 | |
5057 | // getelementptr V, (sdiv (sub P, V), C) -> P if P points to a type of |
5058 | // size C. |
5059 | if (match(V: Indices[0], P: m_SDiv(L: m_Sub(L: m_PtrToInt(Op: m_Value(V&: P)), |
5060 | R: m_PtrToInt(Op: m_Specific(V: Ptr))), |
5061 | R: m_SpecificInt(V: TyAllocSize))) && |
5062 | CanSimplify()) |
5063 | return P; |
5064 | } |
5065 | } |
5066 | } |
5067 | |
5068 | if (!IsScalableVec && Q.DL.getTypeAllocSize(Ty: LastType) == 1 && |
5069 | all_of(Range: Indices.drop_back(N: 1), |
5070 | P: [](Value *Idx) { return match(V: Idx, P: m_Zero()); })) { |
5071 | unsigned IdxWidth = |
5072 | Q.DL.getIndexSizeInBits(AS: Ptr->getType()->getPointerAddressSpace()); |
5073 | if (Q.DL.getTypeSizeInBits(Ty: Indices.back()->getType()) == IdxWidth) { |
5074 | APInt BasePtrOffset(IdxWidth, 0); |
5075 | Value *StrippedBasePtr = |
5076 | Ptr->stripAndAccumulateInBoundsConstantOffsets(DL: Q.DL, Offset&: BasePtrOffset); |
5077 | |
5078 | // Avoid creating inttoptr of zero here: While LLVMs treatment of |
5079 | // inttoptr is generally conservative, this particular case is folded to |
5080 | // a null pointer, which will have incorrect provenance. |
5081 | |
5082 | // gep (gep V, C), (sub 0, V) -> C |
5083 | if (match(V: Indices.back(), |
5084 | P: m_Neg(V: m_PtrToInt(Op: m_Specific(V: StrippedBasePtr)))) && |
5085 | !BasePtrOffset.isZero()) { |
5086 | auto *CI = ConstantInt::get(Context&: GEPTy->getContext(), V: BasePtrOffset); |
5087 | return ConstantExpr::getIntToPtr(C: CI, Ty: GEPTy); |
5088 | } |
5089 | // gep (gep V, C), (xor V, -1) -> C-1 |
5090 | if (match(V: Indices.back(), |
5091 | P: m_Xor(L: m_PtrToInt(Op: m_Specific(V: StrippedBasePtr)), R: m_AllOnes())) && |
5092 | !BasePtrOffset.isOne()) { |
5093 | auto *CI = ConstantInt::get(Context&: GEPTy->getContext(), V: BasePtrOffset - 1); |
5094 | return ConstantExpr::getIntToPtr(C: CI, Ty: GEPTy); |
5095 | } |
5096 | } |
5097 | } |
5098 | |
5099 | // Check to see if this is constant foldable. |
5100 | if (!isa<Constant>(Val: Ptr) || |
5101 | !all_of(Range&: Indices, P: [](Value *V) { return isa<Constant>(Val: V); })) |
5102 | return nullptr; |
5103 | |
5104 | if (!ConstantExpr::isSupportedGetElementPtr(SrcElemTy: SrcTy)) |
5105 | return ConstantFoldGetElementPtr(Ty: SrcTy, C: cast<Constant>(Val: Ptr), InRange: std::nullopt, |
5106 | Idxs: Indices); |
5107 | |
5108 | auto *CE = |
5109 | ConstantExpr::getGetElementPtr(Ty: SrcTy, C: cast<Constant>(Val: Ptr), IdxList: Indices, NW); |
5110 | return ConstantFoldConstant(C: CE, DL: Q.DL); |
5111 | } |
5112 | |
5113 | Value *llvm::simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef<Value *> Indices, |
5114 | GEPNoWrapFlags NW, const SimplifyQuery &Q) { |
5115 | return ::simplifyGEPInst(SrcTy, Ptr, Indices, NW, Q, RecursionLimit); |
5116 | } |
5117 | |
5118 | /// Given operands for an InsertValueInst, see if we can fold the result. |
5119 | /// If not, this returns null. |
5120 | static Value *simplifyInsertValueInst(Value *Agg, Value *Val, |
5121 | ArrayRef<unsigned> Idxs, |
5122 | const SimplifyQuery &Q, unsigned) { |
5123 | if (Constant *CAgg = dyn_cast<Constant>(Val: Agg)) |
5124 | if (Constant *CVal = dyn_cast<Constant>(Val)) |
5125 | return ConstantFoldInsertValueInstruction(Agg: CAgg, Val: CVal, Idxs); |
5126 | |
5127 | // insertvalue x, poison, n -> x |
5128 | // insertvalue x, undef, n -> x if x cannot be poison |
5129 | if (isa<PoisonValue>(Val) || |
5130 | (Q.isUndefValue(V: Val) && isGuaranteedNotToBePoison(V: Agg))) |
5131 | return Agg; |
5132 | |
5133 | // insertvalue x, (extractvalue y, n), n |
5134 | if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val)) |
5135 | if (EV->getAggregateOperand()->getType() == Agg->getType() && |
5136 | EV->getIndices() == Idxs) { |
5137 | // insertvalue poison, (extractvalue y, n), n -> y |
5138 | // insertvalue undef, (extractvalue y, n), n -> y if y cannot be poison |
5139 | if (isa<PoisonValue>(Val: Agg) || |
5140 | (Q.isUndefValue(V: Agg) && |
5141 | isGuaranteedNotToBePoison(V: EV->getAggregateOperand()))) |
5142 | return EV->getAggregateOperand(); |
5143 | |
5144 | // insertvalue y, (extractvalue y, n), n -> y |
5145 | if (Agg == EV->getAggregateOperand()) |
5146 | return Agg; |
5147 | } |
5148 | |
5149 | return nullptr; |
5150 | } |
5151 | |
5152 | Value *llvm::simplifyInsertValueInst(Value *Agg, Value *Val, |
5153 | ArrayRef<unsigned> Idxs, |
5154 | const SimplifyQuery &Q) { |
5155 | return ::simplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit); |
5156 | } |
5157 | |
5158 | Value *llvm::simplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx, |
5159 | const SimplifyQuery &Q) { |
5160 | // Try to constant fold. |
5161 | auto *VecC = dyn_cast<Constant>(Val: Vec); |
5162 | auto *ValC = dyn_cast<Constant>(Val); |
5163 | auto *IdxC = dyn_cast<Constant>(Val: Idx); |
5164 | if (VecC && ValC && IdxC) |
5165 | return ConstantExpr::getInsertElement(Vec: VecC, Elt: ValC, Idx: IdxC); |
5166 | |
5167 | // For fixed-length vector, fold into poison if index is out of bounds. |
5168 | if (auto *CI = dyn_cast<ConstantInt>(Val: Idx)) { |
5169 | if (isa<FixedVectorType>(Val: Vec->getType()) && |
5170 | CI->uge(Num: cast<FixedVectorType>(Val: Vec->getType())->getNumElements())) |
5171 | return PoisonValue::get(T: Vec->getType()); |
5172 | } |
5173 | |
5174 | // If index is undef, it might be out of bounds (see above case) |
5175 | if (Q.isUndefValue(V: Idx)) |
5176 | return PoisonValue::get(T: Vec->getType()); |
5177 | |
5178 | // If the scalar is poison, or it is undef and there is no risk of |
5179 | // propagating poison from the vector value, simplify to the vector value. |
5180 | if (isa<PoisonValue>(Val) || |
5181 | (Q.isUndefValue(V: Val) && isGuaranteedNotToBePoison(V: Vec))) |
5182 | return Vec; |
5183 | |
5184 | // If we are extracting a value from a vector, then inserting it into the same |
5185 | // place, that's the input vector: |
5186 | // insertelt Vec, (extractelt Vec, Idx), Idx --> Vec |
5187 | if (match(V: Val, P: m_ExtractElt(Val: m_Specific(V: Vec), Idx: m_Specific(V: Idx)))) |
5188 | return Vec; |
5189 | |
5190 | return nullptr; |
5191 | } |
5192 | |
5193 | /// Given operands for an ExtractValueInst, see if we can fold the result. |
5194 | /// If not, this returns null. |
5195 | static Value *(Value *Agg, ArrayRef<unsigned> Idxs, |
5196 | const SimplifyQuery &, unsigned) { |
5197 | if (auto *CAgg = dyn_cast<Constant>(Val: Agg)) |
5198 | return ConstantFoldExtractValueInstruction(Agg: CAgg, Idxs); |
5199 | |
5200 | // extractvalue x, (insertvalue y, elt, n), n -> elt |
5201 | unsigned NumIdxs = Idxs.size(); |
5202 | for (auto *IVI = dyn_cast<InsertValueInst>(Val: Agg); IVI != nullptr; |
5203 | IVI = dyn_cast<InsertValueInst>(Val: IVI->getAggregateOperand())) { |
5204 | ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices(); |
5205 | unsigned NumInsertValueIdxs = InsertValueIdxs.size(); |
5206 | unsigned NumCommonIdxs = std::min(a: NumInsertValueIdxs, b: NumIdxs); |
5207 | if (InsertValueIdxs.slice(N: 0, M: NumCommonIdxs) == |
5208 | Idxs.slice(N: 0, M: NumCommonIdxs)) { |
5209 | if (NumIdxs == NumInsertValueIdxs) |
5210 | return IVI->getInsertedValueOperand(); |
5211 | break; |
5212 | } |
5213 | } |
5214 | |
5215 | return nullptr; |
5216 | } |
5217 | |
5218 | Value *llvm::(Value *Agg, ArrayRef<unsigned> Idxs, |
5219 | const SimplifyQuery &Q) { |
5220 | return ::simplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit); |
5221 | } |
5222 | |
5223 | /// Given operands for an ExtractElementInst, see if we can fold the result. |
5224 | /// If not, this returns null. |
5225 | static Value *(Value *Vec, Value *Idx, |
5226 | const SimplifyQuery &Q, unsigned) { |
5227 | auto *VecVTy = cast<VectorType>(Val: Vec->getType()); |
5228 | if (auto *CVec = dyn_cast<Constant>(Val: Vec)) { |
5229 | if (auto *CIdx = dyn_cast<Constant>(Val: Idx)) |
5230 | return ConstantExpr::getExtractElement(Vec: CVec, Idx: CIdx); |
5231 | |
5232 | if (Q.isUndefValue(V: Vec)) |
5233 | return UndefValue::get(T: VecVTy->getElementType()); |
5234 | } |
5235 | |
5236 | // An undef extract index can be arbitrarily chosen to be an out-of-range |
5237 | // index value, which would result in the instruction being poison. |
5238 | if (Q.isUndefValue(V: Idx)) |
5239 | return PoisonValue::get(T: VecVTy->getElementType()); |
5240 | |
5241 | // If extracting a specified index from the vector, see if we can recursively |
5242 | // find a previously computed scalar that was inserted into the vector. |
5243 | if (auto *IdxC = dyn_cast<ConstantInt>(Val: Idx)) { |
5244 | // For fixed-length vector, fold into undef if index is out of bounds. |
5245 | unsigned MinNumElts = VecVTy->getElementCount().getKnownMinValue(); |
5246 | if (isa<FixedVectorType>(Val: VecVTy) && IdxC->getValue().uge(RHS: MinNumElts)) |
5247 | return PoisonValue::get(T: VecVTy->getElementType()); |
5248 | // Handle case where an element is extracted from a splat. |
5249 | if (IdxC->getValue().ult(RHS: MinNumElts)) |
5250 | if (auto *Splat = getSplatValue(V: Vec)) |
5251 | return Splat; |
5252 | if (Value *Elt = findScalarElement(V: Vec, EltNo: IdxC->getZExtValue())) |
5253 | return Elt; |
5254 | } else { |
5255 | // extractelt x, (insertelt y, elt, n), n -> elt |
5256 | // If the possibly-variable indices are trivially known to be equal |
5257 | // (because they are the same operand) then use the value that was |
5258 | // inserted directly. |
5259 | auto *IE = dyn_cast<InsertElementInst>(Val: Vec); |
5260 | if (IE && IE->getOperand(i_nocapture: 2) == Idx) |
5261 | return IE->getOperand(i_nocapture: 1); |
5262 | |
5263 | // The index is not relevant if our vector is a splat. |
5264 | if (Value *Splat = getSplatValue(V: Vec)) |
5265 | return Splat; |
5266 | } |
5267 | return nullptr; |
5268 | } |
5269 | |
5270 | Value *llvm::(Value *Vec, Value *Idx, |
5271 | const SimplifyQuery &Q) { |
5272 | return ::simplifyExtractElementInst(Vec, Idx, Q, RecursionLimit); |
5273 | } |
5274 | |
5275 | /// See if we can fold the given phi. If not, returns null. |
5276 | static Value *simplifyPHINode(PHINode *PN, ArrayRef<Value *> IncomingValues, |
5277 | const SimplifyQuery &Q) { |
5278 | // WARNING: no matter how worthwhile it may seem, we can not perform PHI CSE |
5279 | // here, because the PHI we may succeed simplifying to was not |
5280 | // def-reachable from the original PHI! |
5281 | |
5282 | // If all of the PHI's incoming values are the same then replace the PHI node |
5283 | // with the common value. |
5284 | Value *CommonValue = nullptr; |
5285 | bool HasPoisonInput = false; |
5286 | bool HasUndefInput = false; |
5287 | for (Value *Incoming : IncomingValues) { |
5288 | // If the incoming value is the phi node itself, it can safely be skipped. |
5289 | if (Incoming == PN) |
5290 | continue; |
5291 | if (isa<PoisonValue>(Val: Incoming)) { |
5292 | HasPoisonInput = true; |
5293 | continue; |
5294 | } |
5295 | if (Q.isUndefValue(V: Incoming)) { |
5296 | // Remember that we saw an undef value, but otherwise ignore them. |
5297 | HasUndefInput = true; |
5298 | continue; |
5299 | } |
5300 | if (CommonValue && Incoming != CommonValue) |
5301 | return nullptr; // Not the same, bail out. |
5302 | CommonValue = Incoming; |
5303 | } |
5304 | |
5305 | // If CommonValue is null then all of the incoming values were either undef, |
5306 | // poison or equal to the phi node itself. |
5307 | if (!CommonValue) |
5308 | return HasUndefInput ? UndefValue::get(T: PN->getType()) |
5309 | : PoisonValue::get(T: PN->getType()); |
5310 | |
5311 | if (HasPoisonInput || HasUndefInput) { |
5312 | // If we have a PHI node like phi(X, undef, X), where X is defined by some |
5313 | // instruction, we cannot return X as the result of the PHI node unless it |
5314 | // dominates the PHI block. |
5315 | return valueDominatesPHI(V: CommonValue, P: PN, DT: Q.DT) ? CommonValue : nullptr; |
5316 | } |
5317 | |
5318 | return CommonValue; |
5319 | } |
5320 | |
5321 | static Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, |
5322 | const SimplifyQuery &Q, unsigned MaxRecurse) { |
5323 | if (auto *C = dyn_cast<Constant>(Val: Op)) |
5324 | return ConstantFoldCastOperand(Opcode: CastOpc, C, DestTy: Ty, DL: Q.DL); |
5325 | |
5326 | if (auto *CI = dyn_cast<CastInst>(Val: Op)) { |
5327 | auto *Src = CI->getOperand(i_nocapture: 0); |
5328 | Type *SrcTy = Src->getType(); |
5329 | Type *MidTy = CI->getType(); |
5330 | Type *DstTy = Ty; |
5331 | if (Src->getType() == Ty) { |
5332 | auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode()); |
5333 | auto SecondOp = static_cast<Instruction::CastOps>(CastOpc); |
5334 | Type *SrcIntPtrTy = |
5335 | SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr; |
5336 | Type *MidIntPtrTy = |
5337 | MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr; |
5338 | Type *DstIntPtrTy = |
5339 | DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr; |
5340 | if (CastInst::isEliminableCastPair(firstOpcode: FirstOp, secondOpcode: SecondOp, SrcTy, MidTy, DstTy, |
5341 | SrcIntPtrTy, MidIntPtrTy, |
5342 | DstIntPtrTy) == Instruction::BitCast) |
5343 | return Src; |
5344 | } |
5345 | } |
5346 | |
5347 | // bitcast x -> x |
5348 | if (CastOpc == Instruction::BitCast) |
5349 | if (Op->getType() == Ty) |
5350 | return Op; |
5351 | |
5352 | // ptrtoint (ptradd (Ptr, X - ptrtoint(Ptr))) -> X |
5353 | Value *Ptr, *X; |
5354 | if (CastOpc == Instruction::PtrToInt && |
5355 | match(V: Op, P: m_PtrAdd(PointerOp: m_Value(V&: Ptr), |
5356 | OffsetOp: m_Sub(L: m_Value(V&: X), R: m_PtrToInt(Op: m_Deferred(V: Ptr))))) && |
5357 | X->getType() == Ty && Ty == Q.DL.getIndexType(PtrTy: Ptr->getType())) |
5358 | return X; |
5359 | |
5360 | return nullptr; |
5361 | } |
5362 | |
5363 | Value *llvm::simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, |
5364 | const SimplifyQuery &Q) { |
5365 | return ::simplifyCastInst(CastOpc, Op, Ty, Q, MaxRecurse: RecursionLimit); |
5366 | } |
5367 | |
5368 | /// For the given destination element of a shuffle, peek through shuffles to |
5369 | /// match a root vector source operand that contains that element in the same |
5370 | /// vector lane (ie, the same mask index), so we can eliminate the shuffle(s). |
5371 | static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1, |
5372 | int MaskVal, Value *RootVec, |
5373 | unsigned MaxRecurse) { |
5374 | if (!MaxRecurse--) |
5375 | return nullptr; |
5376 | |
5377 | // Bail out if any mask value is undefined. That kind of shuffle may be |
5378 | // simplified further based on demanded bits or other folds. |
5379 | if (MaskVal == -1) |
5380 | return nullptr; |
5381 | |
5382 | // The mask value chooses which source operand we need to look at next. |
5383 | int InVecNumElts = cast<FixedVectorType>(Val: Op0->getType())->getNumElements(); |
5384 | int RootElt = MaskVal; |
5385 | Value *SourceOp = Op0; |
5386 | if (MaskVal >= InVecNumElts) { |
5387 | RootElt = MaskVal - InVecNumElts; |
5388 | SourceOp = Op1; |
5389 | } |
5390 | |
5391 | // If the source operand is a shuffle itself, look through it to find the |
5392 | // matching root vector. |
5393 | if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(Val: SourceOp)) { |
5394 | return foldIdentityShuffles( |
5395 | DestElt, Op0: SourceShuf->getOperand(i_nocapture: 0), Op1: SourceShuf->getOperand(i_nocapture: 1), |
5396 | MaskVal: SourceShuf->getMaskValue(Elt: RootElt), RootVec, MaxRecurse); |
5397 | } |
5398 | |
5399 | // The source operand is not a shuffle. Initialize the root vector value for |
5400 | // this shuffle if that has not been done yet. |
5401 | if (!RootVec) |
5402 | RootVec = SourceOp; |
5403 | |
5404 | // Give up as soon as a source operand does not match the existing root value. |
5405 | if (RootVec != SourceOp) |
5406 | return nullptr; |
5407 | |
5408 | // The element must be coming from the same lane in the source vector |
5409 | // (although it may have crossed lanes in intermediate shuffles). |
5410 | if (RootElt != DestElt) |
5411 | return nullptr; |
5412 | |
5413 | return RootVec; |
5414 | } |
5415 | |
5416 | static Value *simplifyShuffleVectorInst(Value *Op0, Value *Op1, |
5417 | ArrayRef<int> Mask, Type *RetTy, |
5418 | const SimplifyQuery &Q, |
5419 | unsigned MaxRecurse) { |
5420 | if (all_of(Range&: Mask, P: [](int Elem) { return Elem == PoisonMaskElem; })) |
5421 | return PoisonValue::get(T: RetTy); |
5422 | |
5423 | auto *InVecTy = cast<VectorType>(Val: Op0->getType()); |
5424 | unsigned MaskNumElts = Mask.size(); |
5425 | ElementCount InVecEltCount = InVecTy->getElementCount(); |
5426 | |
5427 | bool Scalable = InVecEltCount.isScalable(); |
5428 | |
5429 | SmallVector<int, 32> Indices; |
5430 | Indices.assign(in_start: Mask.begin(), in_end: Mask.end()); |
5431 | |
5432 | // Canonicalization: If mask does not select elements from an input vector, |
5433 | // replace that input vector with poison. |
5434 | if (!Scalable) { |
5435 | bool MaskSelects0 = false, MaskSelects1 = false; |
5436 | unsigned InVecNumElts = InVecEltCount.getKnownMinValue(); |
5437 | for (unsigned i = 0; i != MaskNumElts; ++i) { |
5438 | if (Indices[i] == -1) |
5439 | continue; |
5440 | if ((unsigned)Indices[i] < InVecNumElts) |
5441 | MaskSelects0 = true; |
5442 | else |
5443 | MaskSelects1 = true; |
5444 | } |
5445 | if (!MaskSelects0) |
5446 | Op0 = PoisonValue::get(T: InVecTy); |
5447 | if (!MaskSelects1) |
5448 | Op1 = PoisonValue::get(T: InVecTy); |
5449 | } |
5450 | |
5451 | auto *Op0Const = dyn_cast<Constant>(Val: Op0); |
5452 | auto *Op1Const = dyn_cast<Constant>(Val: Op1); |
5453 | |
5454 | // If all operands are constant, constant fold the shuffle. This |
5455 | // transformation depends on the value of the mask which is not known at |
5456 | // compile time for scalable vectors |
5457 | if (Op0Const && Op1Const) |
5458 | return ConstantExpr::getShuffleVector(V1: Op0Const, V2: Op1Const, Mask); |
5459 | |
5460 | // Canonicalization: if only one input vector is constant, it shall be the |
5461 | // second one. This transformation depends on the value of the mask which |
5462 | // is not known at compile time for scalable vectors |
5463 | if (!Scalable && Op0Const && !Op1Const) { |
5464 | std::swap(a&: Op0, b&: Op1); |
5465 | ShuffleVectorInst::commuteShuffleMask(Mask: Indices, |
5466 | InVecNumElts: InVecEltCount.getKnownMinValue()); |
5467 | } |
5468 | |
5469 | // A splat of an inserted scalar constant becomes a vector constant: |
5470 | // shuf (inselt ?, C, IndexC), undef, <IndexC, IndexC...> --> <C, C...> |
5471 | // NOTE: We may have commuted above, so analyze the updated Indices, not the |
5472 | // original mask constant. |
5473 | // NOTE: This transformation depends on the value of the mask which is not |
5474 | // known at compile time for scalable vectors |
5475 | Constant *C; |
5476 | ConstantInt *IndexC; |
5477 | if (!Scalable && match(V: Op0, P: m_InsertElt(Val: m_Value(), Elt: m_Constant(C), |
5478 | Idx: m_ConstantInt(CI&: IndexC)))) { |
5479 | // Match a splat shuffle mask of the insert index allowing undef elements. |
5480 | int InsertIndex = IndexC->getZExtValue(); |
5481 | if (all_of(Range&: Indices, P: [InsertIndex](int MaskElt) { |
5482 | return MaskElt == InsertIndex || MaskElt == -1; |
5483 | })) { |
5484 | assert(isa<UndefValue>(Op1) && "Expected undef operand 1 for splat" ); |
5485 | |
5486 | // Shuffle mask poisons become poison constant result elements. |
5487 | SmallVector<Constant *, 16> VecC(MaskNumElts, C); |
5488 | for (unsigned i = 0; i != MaskNumElts; ++i) |
5489 | if (Indices[i] == -1) |
5490 | VecC[i] = PoisonValue::get(T: C->getType()); |
5491 | return ConstantVector::get(V: VecC); |
5492 | } |
5493 | } |
5494 | |
5495 | // A shuffle of a splat is always the splat itself. Legal if the shuffle's |
5496 | // value type is same as the input vectors' type. |
5497 | if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Val: Op0)) |
5498 | if (Q.isUndefValue(V: Op1) && RetTy == InVecTy && |
5499 | all_equal(Range: OpShuf->getShuffleMask())) |
5500 | return Op0; |
5501 | |
5502 | // All remaining transformation depend on the value of the mask, which is |
5503 | // not known at compile time for scalable vectors. |
5504 | if (Scalable) |
5505 | return nullptr; |
5506 | |
5507 | // Don't fold a shuffle with undef mask elements. This may get folded in a |
5508 | // better way using demanded bits or other analysis. |
5509 | // TODO: Should we allow this? |
5510 | if (is_contained(Range&: Indices, Element: -1)) |
5511 | return nullptr; |
5512 | |
5513 | // Check if every element of this shuffle can be mapped back to the |
5514 | // corresponding element of a single root vector. If so, we don't need this |
5515 | // shuffle. This handles simple identity shuffles as well as chains of |
5516 | // shuffles that may widen/narrow and/or move elements across lanes and back. |
5517 | Value *RootVec = nullptr; |
5518 | for (unsigned i = 0; i != MaskNumElts; ++i) { |
5519 | // Note that recursion is limited for each vector element, so if any element |
5520 | // exceeds the limit, this will fail to simplify. |
5521 | RootVec = |
5522 | foldIdentityShuffles(DestElt: i, Op0, Op1, MaskVal: Indices[i], RootVec, MaxRecurse); |
5523 | |
5524 | // We can't replace a widening/narrowing shuffle with one of its operands. |
5525 | if (!RootVec || RootVec->getType() != RetTy) |
5526 | return nullptr; |
5527 | } |
5528 | return RootVec; |
5529 | } |
5530 | |
5531 | /// Given operands for a ShuffleVectorInst, fold the result or return null. |
5532 | Value *llvm::simplifyShuffleVectorInst(Value *Op0, Value *Op1, |
5533 | ArrayRef<int> Mask, Type *RetTy, |
5534 | const SimplifyQuery &Q) { |
5535 | return ::simplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, MaxRecurse: RecursionLimit); |
5536 | } |
5537 | |
5538 | static Constant *foldConstant(Instruction::UnaryOps Opcode, Value *&Op, |
5539 | const SimplifyQuery &Q) { |
5540 | if (auto *C = dyn_cast<Constant>(Val: Op)) |
5541 | return ConstantFoldUnaryOpOperand(Opcode, Op: C, DL: Q.DL); |
5542 | return nullptr; |
5543 | } |
5544 | |
5545 | /// Given the operand for an FNeg, see if we can fold the result. If not, this |
5546 | /// returns null. |
5547 | static Value *simplifyFNegInst(Value *Op, FastMathFlags FMF, |
5548 | const SimplifyQuery &Q, unsigned MaxRecurse) { |
5549 | if (Constant *C = foldConstant(Opcode: Instruction::FNeg, Op, Q)) |
5550 | return C; |
5551 | |
5552 | Value *X; |
5553 | // fneg (fneg X) ==> X |
5554 | if (match(V: Op, P: m_FNeg(X: m_Value(V&: X)))) |
5555 | return X; |
5556 | |
5557 | return nullptr; |
5558 | } |
5559 | |
5560 | Value *llvm::simplifyFNegInst(Value *Op, FastMathFlags FMF, |
5561 | const SimplifyQuery &Q) { |
5562 | return ::simplifyFNegInst(Op, FMF, Q, MaxRecurse: RecursionLimit); |
5563 | } |
5564 | |
5565 | /// Try to propagate existing NaN values when possible. If not, replace the |
5566 | /// constant or elements in the constant with a canonical NaN. |
5567 | static Constant *propagateNaN(Constant *In) { |
5568 | Type *Ty = In->getType(); |
5569 | if (auto *VecTy = dyn_cast<FixedVectorType>(Val: Ty)) { |
5570 | unsigned NumElts = VecTy->getNumElements(); |
5571 | SmallVector<Constant *, 32> NewC(NumElts); |
5572 | for (unsigned i = 0; i != NumElts; ++i) { |
5573 | Constant *EltC = In->getAggregateElement(Elt: i); |
5574 | // Poison elements propagate. NaN propagates except signaling is quieted. |
5575 | // Replace unknown or undef elements with canonical NaN. |
5576 | if (EltC && isa<PoisonValue>(Val: EltC)) |
5577 | NewC[i] = EltC; |
5578 | else if (EltC && EltC->isNaN()) |
5579 | NewC[i] = ConstantFP::get( |
5580 | Ty: EltC->getType(), V: cast<ConstantFP>(Val: EltC)->getValue().makeQuiet()); |
5581 | else |
5582 | NewC[i] = ConstantFP::getNaN(Ty: VecTy->getElementType()); |
5583 | } |
5584 | return ConstantVector::get(V: NewC); |
5585 | } |
5586 | |
5587 | // If it is not a fixed vector, but not a simple NaN either, return a |
5588 | // canonical NaN. |
5589 | if (!In->isNaN()) |
5590 | return ConstantFP::getNaN(Ty); |
5591 | |
5592 | // If we known this is a NaN, and it's scalable vector, we must have a splat |
5593 | // on our hands. Grab that before splatting a QNaN constant. |
5594 | if (isa<ScalableVectorType>(Val: Ty)) { |
5595 | auto *Splat = In->getSplatValue(); |
5596 | assert(Splat && Splat->isNaN() && |
5597 | "Found a scalable-vector NaN but not a splat" ); |
5598 | In = Splat; |
5599 | } |
5600 | |
5601 | // Propagate an existing QNaN constant. If it is an SNaN, make it quiet, but |
5602 | // preserve the sign/payload. |
5603 | return ConstantFP::get(Ty, V: cast<ConstantFP>(Val: In)->getValue().makeQuiet()); |
5604 | } |
5605 | |
5606 | /// Perform folds that are common to any floating-point operation. This implies |
5607 | /// transforms based on poison/undef/NaN because the operation itself makes no |
5608 | /// difference to the result. |
5609 | static Constant *simplifyFPOp(ArrayRef<Value *> Ops, FastMathFlags FMF, |
5610 | const SimplifyQuery &Q, |
5611 | fp::ExceptionBehavior ExBehavior, |
5612 | RoundingMode Rounding) { |
5613 | // Poison is independent of anything else. It always propagates from an |
5614 | // operand to a math result. |
5615 | if (any_of(Range&: Ops, P: [](Value *V) { return match(V, P: m_Poison()); })) |
5616 | return PoisonValue::get(T: Ops[0]->getType()); |
5617 | |
5618 | for (Value *V : Ops) { |
5619 | bool IsNan = match(V, P: m_NaN()); |
5620 | bool IsInf = match(V, P: m_Inf()); |
5621 | bool IsUndef = Q.isUndefValue(V); |
5622 | |
5623 | // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand |
5624 | // (an undef operand can be chosen to be Nan/Inf), then the result of |
5625 | // this operation is poison. |
5626 | if (FMF.noNaNs() && (IsNan || IsUndef)) |
5627 | return PoisonValue::get(T: V->getType()); |
5628 | if (FMF.noInfs() && (IsInf || IsUndef)) |
5629 | return PoisonValue::get(T: V->getType()); |
5630 | |
5631 | if (isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding)) { |
5632 | // Undef does not propagate because undef means that all bits can take on |
5633 | // any value. If this is undef * NaN for example, then the result values |
5634 | // (at least the exponent bits) are limited. Assume the undef is a |
5635 | // canonical NaN and propagate that. |
5636 | if (IsUndef) |
5637 | return ConstantFP::getNaN(Ty: V->getType()); |
5638 | if (IsNan) |
5639 | return propagateNaN(In: cast<Constant>(Val: V)); |
5640 | } else if (ExBehavior != fp::ebStrict) { |
5641 | if (IsNan) |
5642 | return propagateNaN(In: cast<Constant>(Val: V)); |
5643 | } |
5644 | } |
5645 | return nullptr; |
5646 | } |
5647 | |
5648 | /// Given operands for an FAdd, see if we can fold the result. If not, this |
5649 | /// returns null. |
5650 | static Value * |
5651 | simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, |
5652 | const SimplifyQuery &Q, unsigned MaxRecurse, |
5653 | fp::ExceptionBehavior ExBehavior = fp::ebIgnore, |
5654 | RoundingMode Rounding = RoundingMode::NearestTiesToEven) { |
5655 | if (isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding)) |
5656 | if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::FAdd, Op0, Op1, Q)) |
5657 | return C; |
5658 | |
5659 | if (Constant *C = simplifyFPOp(Ops: {Op0, Op1}, FMF, Q, ExBehavior, Rounding)) |
5660 | return C; |
5661 | |
5662 | // fadd X, -0 ==> X |
5663 | // With strict/constrained FP, we have these possible edge cases that do |
5664 | // not simplify to Op0: |
5665 | // fadd SNaN, -0.0 --> QNaN |
5666 | // fadd +0.0, -0.0 --> -0.0 (but only with round toward negative) |
5667 | if (canIgnoreSNaN(EB: ExBehavior, FMF) && |
5668 | (!canRoundingModeBe(RM: Rounding, QRM: RoundingMode::TowardNegative) || |
5669 | FMF.noSignedZeros())) |
5670 | if (match(V: Op1, P: m_NegZeroFP())) |
5671 | return Op0; |
5672 | |
5673 | // fadd X, 0 ==> X, when we know X is not -0 |
5674 | if (canIgnoreSNaN(EB: ExBehavior, FMF)) |
5675 | if (match(V: Op1, P: m_PosZeroFP()) && |
5676 | (FMF.noSignedZeros() || cannotBeNegativeZero(V: Op0, /*Depth=*/0, SQ: Q))) |
5677 | return Op0; |
5678 | |
5679 | if (!isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding)) |
5680 | return nullptr; |
5681 | |
5682 | if (FMF.noNaNs()) { |
5683 | // With nnan: X + {+/-}Inf --> {+/-}Inf |
5684 | if (match(V: Op1, P: m_Inf())) |
5685 | return Op1; |
5686 | |
5687 | // With nnan: -X + X --> 0.0 (and commuted variant) |
5688 | // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN. |
5689 | // Negative zeros are allowed because we always end up with positive zero: |
5690 | // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0 |
5691 | // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0 |
5692 | // X = 0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0 |
5693 | // X = 0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0 |
5694 | if (match(V: Op0, P: m_FSub(L: m_AnyZeroFP(), R: m_Specific(V: Op1))) || |
5695 | match(V: Op1, P: m_FSub(L: m_AnyZeroFP(), R: m_Specific(V: Op0)))) |
5696 | return ConstantFP::getZero(Ty: Op0->getType()); |
5697 | |
5698 | if (match(V: Op0, P: m_FNeg(X: m_Specific(V: Op1))) || |
5699 | match(V: Op1, P: m_FNeg(X: m_Specific(V: Op0)))) |
5700 | return ConstantFP::getZero(Ty: Op0->getType()); |
5701 | } |
5702 | |
5703 | // (X - Y) + Y --> X |
5704 | // Y + (X - Y) --> X |
5705 | Value *X; |
5706 | if (FMF.noSignedZeros() && FMF.allowReassoc() && |
5707 | (match(V: Op0, P: m_FSub(L: m_Value(V&: X), R: m_Specific(V: Op1))) || |
5708 | match(V: Op1, P: m_FSub(L: m_Value(V&: X), R: m_Specific(V: Op0))))) |
5709 | return X; |
5710 | |
5711 | return nullptr; |
5712 | } |
5713 | |
5714 | /// Given operands for an FSub, see if we can fold the result. If not, this |
5715 | /// returns null. |
5716 | static Value * |
5717 | simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, |
5718 | const SimplifyQuery &Q, unsigned MaxRecurse, |
5719 | fp::ExceptionBehavior ExBehavior = fp::ebIgnore, |
5720 | RoundingMode Rounding = RoundingMode::NearestTiesToEven) { |
5721 | if (isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding)) |
5722 | if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::FSub, Op0, Op1, Q)) |
5723 | return C; |
5724 | |
5725 | if (Constant *C = simplifyFPOp(Ops: {Op0, Op1}, FMF, Q, ExBehavior, Rounding)) |
5726 | return C; |
5727 | |
5728 | // fsub X, +0 ==> X |
5729 | if (canIgnoreSNaN(EB: ExBehavior, FMF) && |
5730 | (!canRoundingModeBe(RM: Rounding, QRM: RoundingMode::TowardNegative) || |
5731 | FMF.noSignedZeros())) |
5732 | if (match(V: Op1, P: m_PosZeroFP())) |
5733 | return Op0; |
5734 | |
5735 | // fsub X, -0 ==> X, when we know X is not -0 |
5736 | if (canIgnoreSNaN(EB: ExBehavior, FMF)) |
5737 | if (match(V: Op1, P: m_NegZeroFP()) && |
5738 | (FMF.noSignedZeros() || cannotBeNegativeZero(V: Op0, /*Depth=*/0, SQ: Q))) |
5739 | return Op0; |
5740 | |
5741 | // fsub -0.0, (fsub -0.0, X) ==> X |
5742 | // fsub -0.0, (fneg X) ==> X |
5743 | Value *X; |
5744 | if (canIgnoreSNaN(EB: ExBehavior, FMF)) |
5745 | if (match(V: Op0, P: m_NegZeroFP()) && match(V: Op1, P: m_FNeg(X: m_Value(V&: X)))) |
5746 | return X; |
5747 | |
5748 | // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored. |
5749 | // fsub 0.0, (fneg X) ==> X if signed zeros are ignored. |
5750 | if (canIgnoreSNaN(EB: ExBehavior, FMF)) |
5751 | if (FMF.noSignedZeros() && match(V: Op0, P: m_AnyZeroFP()) && |
5752 | (match(V: Op1, P: m_FSub(L: m_AnyZeroFP(), R: m_Value(V&: X))) || |
5753 | match(V: Op1, P: m_FNeg(X: m_Value(V&: X))))) |
5754 | return X; |
5755 | |
5756 | if (!isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding)) |
5757 | return nullptr; |
5758 | |
5759 | if (FMF.noNaNs()) { |
5760 | // fsub nnan x, x ==> 0.0 |
5761 | if (Op0 == Op1) |
5762 | return Constant::getNullValue(Ty: Op0->getType()); |
5763 | |
5764 | // With nnan: {+/-}Inf - X --> {+/-}Inf |
5765 | if (match(V: Op0, P: m_Inf())) |
5766 | return Op0; |
5767 | |
5768 | // With nnan: X - {+/-}Inf --> {-/+}Inf |
5769 | if (match(V: Op1, P: m_Inf())) |
5770 | return foldConstant(Opcode: Instruction::FNeg, Op&: Op1, Q); |
5771 | } |
5772 | |
5773 | // Y - (Y - X) --> X |
5774 | // (X + Y) - Y --> X |
5775 | if (FMF.noSignedZeros() && FMF.allowReassoc() && |
5776 | (match(V: Op1, P: m_FSub(L: m_Specific(V: Op0), R: m_Value(V&: X))) || |
5777 | match(V: Op0, P: m_c_FAdd(L: m_Specific(V: Op1), R: m_Value(V&: X))))) |
5778 | return X; |
5779 | |
5780 | return nullptr; |
5781 | } |
5782 | |
5783 | static Value *simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF, |
5784 | const SimplifyQuery &Q, unsigned MaxRecurse, |
5785 | fp::ExceptionBehavior ExBehavior, |
5786 | RoundingMode Rounding) { |
5787 | if (Constant *C = simplifyFPOp(Ops: {Op0, Op1}, FMF, Q, ExBehavior, Rounding)) |
5788 | return C; |
5789 | |
5790 | if (!isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding)) |
5791 | return nullptr; |
5792 | |
5793 | // Canonicalize special constants as operand 1. |
5794 | if (match(V: Op0, P: m_FPOne()) || match(V: Op0, P: m_AnyZeroFP())) |
5795 | std::swap(a&: Op0, b&: Op1); |
5796 | |
5797 | // X * 1.0 --> X |
5798 | if (match(V: Op1, P: m_FPOne())) |
5799 | return Op0; |
5800 | |
5801 | if (match(V: Op1, P: m_AnyZeroFP())) { |
5802 | // X * 0.0 --> 0.0 (with nnan and nsz) |
5803 | if (FMF.noNaNs() && FMF.noSignedZeros()) |
5804 | return ConstantFP::getZero(Ty: Op0->getType()); |
5805 | |
5806 | KnownFPClass Known = |
5807 | computeKnownFPClass(V: Op0, FMF, InterestedClasses: fcInf | fcNan, /*Depth=*/0, SQ: Q); |
5808 | if (Known.isKnownNever(Mask: fcInf | fcNan)) { |
5809 | // +normal number * (-)0.0 --> (-)0.0 |
5810 | if (Known.SignBit == false) |
5811 | return Op1; |
5812 | // -normal number * (-)0.0 --> -(-)0.0 |
5813 | if (Known.SignBit == true) |
5814 | return foldConstant(Opcode: Instruction::FNeg, Op&: Op1, Q); |
5815 | } |
5816 | } |
5817 | |
5818 | // sqrt(X) * sqrt(X) --> X, if we can: |
5819 | // 1. Remove the intermediate rounding (reassociate). |
5820 | // 2. Ignore non-zero negative numbers because sqrt would produce NAN. |
5821 | // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0. |
5822 | Value *X; |
5823 | if (Op0 == Op1 && match(V: Op0, P: m_Sqrt(Op0: m_Value(V&: X))) && FMF.allowReassoc() && |
5824 | FMF.noNaNs() && FMF.noSignedZeros()) |
5825 | return X; |
5826 | |
5827 | return nullptr; |
5828 | } |
5829 | |
5830 | /// Given the operands for an FMul, see if we can fold the result |
5831 | static Value * |
5832 | simplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF, |
5833 | const SimplifyQuery &Q, unsigned MaxRecurse, |
5834 | fp::ExceptionBehavior ExBehavior = fp::ebIgnore, |
5835 | RoundingMode Rounding = RoundingMode::NearestTiesToEven) { |
5836 | if (isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding)) |
5837 | if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::FMul, Op0, Op1, Q)) |
5838 | return C; |
5839 | |
5840 | // Now apply simplifications that do not require rounding. |
5841 | return simplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse, ExBehavior, Rounding); |
5842 | } |
5843 | |
5844 | Value *llvm::simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, |
5845 | const SimplifyQuery &Q, |
5846 | fp::ExceptionBehavior ExBehavior, |
5847 | RoundingMode Rounding) { |
5848 | return ::simplifyFAddInst(Op0, Op1, FMF, Q, MaxRecurse: RecursionLimit, ExBehavior, |
5849 | Rounding); |
5850 | } |
5851 | |
5852 | Value *llvm::simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, |
5853 | const SimplifyQuery &Q, |
5854 | fp::ExceptionBehavior ExBehavior, |
5855 | RoundingMode Rounding) { |
5856 | return ::simplifyFSubInst(Op0, Op1, FMF, Q, MaxRecurse: RecursionLimit, ExBehavior, |
5857 | Rounding); |
5858 | } |
5859 | |
5860 | Value *llvm::simplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF, |
5861 | const SimplifyQuery &Q, |
5862 | fp::ExceptionBehavior ExBehavior, |
5863 | RoundingMode Rounding) { |
5864 | return ::simplifyFMulInst(Op0, Op1, FMF, Q, MaxRecurse: RecursionLimit, ExBehavior, |
5865 | Rounding); |
5866 | } |
5867 | |
5868 | Value *llvm::simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF, |
5869 | const SimplifyQuery &Q, |
5870 | fp::ExceptionBehavior ExBehavior, |
5871 | RoundingMode Rounding) { |
5872 | return ::simplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse: RecursionLimit, ExBehavior, |
5873 | Rounding); |
5874 | } |
5875 | |
5876 | static Value * |
5877 | simplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, |
5878 | const SimplifyQuery &Q, unsigned, |
5879 | fp::ExceptionBehavior ExBehavior = fp::ebIgnore, |
5880 | RoundingMode Rounding = RoundingMode::NearestTiesToEven) { |
5881 | if (isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding)) |
5882 | if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::FDiv, Op0, Op1, Q)) |
5883 | return C; |
5884 | |
5885 | if (Constant *C = simplifyFPOp(Ops: {Op0, Op1}, FMF, Q, ExBehavior, Rounding)) |
5886 | return C; |
5887 | |
5888 | if (!isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding)) |
5889 | return nullptr; |
5890 | |
5891 | // X / 1.0 -> X |
5892 | if (match(V: Op1, P: m_FPOne())) |
5893 | return Op0; |
5894 | |
5895 | // 0 / X -> 0 |
5896 | // Requires that NaNs are off (X could be zero) and signed zeroes are |
5897 | // ignored (X could be positive or negative, so the output sign is unknown). |
5898 | if (FMF.noNaNs() && FMF.noSignedZeros() && match(V: Op0, P: m_AnyZeroFP())) |
5899 | return ConstantFP::getZero(Ty: Op0->getType()); |
5900 | |
5901 | if (FMF.noNaNs()) { |
5902 | // X / X -> 1.0 is legal when NaNs are ignored. |
5903 | // We can ignore infinities because INF/INF is NaN. |
5904 | if (Op0 == Op1) |
5905 | return ConstantFP::get(Ty: Op0->getType(), V: 1.0); |
5906 | |
5907 | // (X * Y) / Y --> X if we can reassociate to the above form. |
5908 | Value *X; |
5909 | if (FMF.allowReassoc() && match(V: Op0, P: m_c_FMul(L: m_Value(V&: X), R: m_Specific(V: Op1)))) |
5910 | return X; |
5911 | |
5912 | // -X / X -> -1.0 and |
5913 | // X / -X -> -1.0 are legal when NaNs are ignored. |
5914 | // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored. |
5915 | if (match(V: Op0, P: m_FNegNSZ(X: m_Specific(V: Op1))) || |
5916 | match(V: Op1, P: m_FNegNSZ(X: m_Specific(V: Op0)))) |
5917 | return ConstantFP::get(Ty: Op0->getType(), V: -1.0); |
5918 | |
5919 | // nnan ninf X / [-]0.0 -> poison |
5920 | if (FMF.noInfs() && match(V: Op1, P: m_AnyZeroFP())) |
5921 | return PoisonValue::get(T: Op1->getType()); |
5922 | } |
5923 | |
5924 | return nullptr; |
5925 | } |
5926 | |
5927 | Value *llvm::simplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, |
5928 | const SimplifyQuery &Q, |
5929 | fp::ExceptionBehavior ExBehavior, |
5930 | RoundingMode Rounding) { |
5931 | return ::simplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior, |
5932 | Rounding); |
5933 | } |
5934 | |
5935 | static Value * |
5936 | simplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, |
5937 | const SimplifyQuery &Q, unsigned, |
5938 | fp::ExceptionBehavior ExBehavior = fp::ebIgnore, |
5939 | RoundingMode Rounding = RoundingMode::NearestTiesToEven) { |
5940 | if (isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding)) |
5941 | if (Constant *C = foldOrCommuteConstant(Opcode: Instruction::FRem, Op0, Op1, Q)) |
5942 | return C; |
5943 | |
5944 | if (Constant *C = simplifyFPOp(Ops: {Op0, Op1}, FMF, Q, ExBehavior, Rounding)) |
5945 | return C; |
5946 | |
5947 | if (!isDefaultFPEnvironment(EB: ExBehavior, RM: Rounding)) |
5948 | return nullptr; |
5949 | |
5950 | // Unlike fdiv, the result of frem always matches the sign of the dividend. |
5951 | // The constant match may include undef elements in a vector, so return a full |
5952 | // zero constant as the result. |
5953 | if (FMF.noNaNs()) { |
5954 | // +0 % X -> 0 |
5955 | if (match(V: Op0, P: m_PosZeroFP())) |
5956 | return ConstantFP::getZero(Ty: Op0->getType()); |
5957 | // -0 % X -> -0 |
5958 | if (match(V: Op0, P: m_NegZeroFP())) |
5959 | return ConstantFP::getNegativeZero(Ty: Op0->getType()); |
5960 | } |
5961 | |
5962 | return nullptr; |
5963 | } |
5964 | |
5965 | Value *llvm::simplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, |
5966 | const SimplifyQuery &Q, |
5967 | fp::ExceptionBehavior ExBehavior, |
5968 | RoundingMode Rounding) { |
5969 | return ::simplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior, |
5970 | Rounding); |
5971 | } |
5972 | |
5973 | //=== Helper functions for higher up the class hierarchy. |
5974 | |
5975 | /// Given the operand for a UnaryOperator, see if we can fold the result. |
5976 | /// If not, this returns null. |
5977 | static Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q, |
5978 | unsigned MaxRecurse) { |
5979 | switch (Opcode) { |
5980 | case Instruction::FNeg: |
5981 | return simplifyFNegInst(Op, FMF: FastMathFlags(), Q, MaxRecurse); |
5982 | default: |
5983 | llvm_unreachable("Unexpected opcode" ); |
5984 | } |
5985 | } |
5986 | |
5987 | /// Given the operand for a UnaryOperator, see if we can fold the result. |
5988 | /// If not, this returns null. |
5989 | /// Try to use FastMathFlags when folding the result. |
5990 | static Value *simplifyFPUnOp(unsigned Opcode, Value *Op, |
5991 | const FastMathFlags &FMF, const SimplifyQuery &Q, |
5992 | unsigned MaxRecurse) { |
5993 | switch (Opcode) { |
5994 | case Instruction::FNeg: |
5995 | return simplifyFNegInst(Op, FMF, Q, MaxRecurse); |
5996 | default: |
5997 | return simplifyUnOp(Opcode, Op, Q, MaxRecurse); |
5998 | } |
5999 | } |
6000 | |
6001 | Value *llvm::simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q) { |
6002 | return ::simplifyUnOp(Opcode, Op, Q, MaxRecurse: RecursionLimit); |
6003 | } |
6004 | |
6005 | Value *llvm::simplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF, |
6006 | const SimplifyQuery &Q) { |
6007 | return ::simplifyFPUnOp(Opcode, Op, FMF, Q, MaxRecurse: RecursionLimit); |
6008 | } |
6009 | |
6010 | /// Given operands for a BinaryOperator, see if we can fold the result. |
6011 | /// If not, this returns null. |
6012 | static Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, |
6013 | const SimplifyQuery &Q, unsigned MaxRecurse) { |
6014 | switch (Opcode) { |
6015 | case Instruction::Add: |
6016 | return simplifyAddInst(Op0: LHS, Op1: RHS, /* IsNSW */ false, /* IsNUW */ false, Q, |
6017 | MaxRecurse); |
6018 | case Instruction::Sub: |
6019 | return simplifySubInst(Op0: LHS, Op1: RHS, /* IsNSW */ false, /* IsNUW */ false, Q, |
6020 | MaxRecurse); |
6021 | case Instruction::Mul: |
6022 | return simplifyMulInst(Op0: LHS, Op1: RHS, /* IsNSW */ false, /* IsNUW */ false, Q, |
6023 | MaxRecurse); |
6024 | case Instruction::SDiv: |
6025 | return simplifySDivInst(Op0: LHS, Op1: RHS, /* IsExact */ false, Q, MaxRecurse); |
6026 | case Instruction::UDiv: |
6027 | return simplifyUDivInst(Op0: LHS, Op1: RHS, /* IsExact */ false, Q, MaxRecurse); |
6028 | case Instruction::SRem: |
6029 | return simplifySRemInst(Op0: LHS, Op1: RHS, Q, MaxRecurse); |
6030 | case Instruction::URem: |
6031 | return simplifyURemInst(Op0: LHS, Op1: RHS, Q, MaxRecurse); |
6032 | case Instruction::Shl: |
6033 | return simplifyShlInst(Op0: LHS, Op1: RHS, /* IsNSW */ false, /* IsNUW */ false, Q, |
6034 | MaxRecurse); |
6035 | case Instruction::LShr: |
6036 | return simplifyLShrInst(Op0: LHS, Op1: RHS, /* IsExact */ false, Q, MaxRecurse); |
6037 | case Instruction::AShr: |
6038 | return simplifyAShrInst(Op0: LHS, Op1: RHS, /* IsExact */ false, Q, MaxRecurse); |
6039 | case Instruction::And: |
6040 | return simplifyAndInst(Op0: LHS, Op1: RHS, Q, MaxRecurse); |
6041 | case Instruction::Or: |
6042 | return simplifyOrInst(Op0: LHS, Op1: RHS, Q, MaxRecurse); |
6043 | case Instruction::Xor: |
6044 | return simplifyXorInst(Op0: LHS, Op1: RHS, Q, MaxRecurse); |
6045 | case Instruction::FAdd: |
6046 | return simplifyFAddInst(Op0: LHS, Op1: RHS, FMF: FastMathFlags(), Q, MaxRecurse); |
6047 | case Instruction::FSub: |
6048 | return simplifyFSubInst(Op0: LHS, Op1: RHS, FMF: FastMathFlags(), Q, MaxRecurse); |
6049 | case Instruction::FMul: |
6050 | return simplifyFMulInst(Op0: LHS, Op1: RHS, FMF: FastMathFlags(), Q, MaxRecurse); |
6051 | case Instruction::FDiv: |
6052 | return simplifyFDivInst(Op0: LHS, Op1: RHS, FMF: FastMathFlags(), Q, MaxRecurse); |
6053 | case Instruction::FRem: |
6054 | return simplifyFRemInst(Op0: LHS, Op1: RHS, FMF: FastMathFlags(), Q, MaxRecurse); |
6055 | default: |
6056 | llvm_unreachable("Unexpected opcode" ); |
6057 | } |
6058 | } |
6059 | |
6060 | /// Given operands for a BinaryOperator, see if we can fold the result. |
6061 | /// If not, this returns null. |
6062 | /// Try to use FastMathFlags when folding the result. |
6063 | static Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, |
6064 | const FastMathFlags &FMF, const SimplifyQuery &Q, |
6065 | unsigned MaxRecurse) { |
6066 | switch (Opcode) { |
6067 | case Instruction::FAdd: |
6068 | return simplifyFAddInst(Op0: LHS, Op1: RHS, FMF, Q, MaxRecurse); |
6069 | case Instruction::FSub: |
6070 | return simplifyFSubInst(Op0: LHS, Op1: RHS, FMF, Q, MaxRecurse); |
6071 | case Instruction::FMul: |
6072 | return simplifyFMulInst(Op0: LHS, Op1: RHS, FMF, Q, MaxRecurse); |
6073 | case Instruction::FDiv: |
6074 | return simplifyFDivInst(Op0: LHS, Op1: RHS, FMF, Q, MaxRecurse); |
6075 | default: |
6076 | return simplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse); |
6077 | } |
6078 | } |
6079 | |
6080 | Value *llvm::simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, |
6081 | const SimplifyQuery &Q) { |
6082 | return ::simplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse: RecursionLimit); |
6083 | } |
6084 | |
6085 | Value *llvm::simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, |
6086 | FastMathFlags FMF, const SimplifyQuery &Q) { |
6087 | return ::simplifyBinOp(Opcode, LHS, RHS, FMF, Q, MaxRecurse: RecursionLimit); |
6088 | } |
6089 | |
6090 | /// Given operands for a CmpInst, see if we can fold the result. |
6091 | static Value *simplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, |
6092 | const SimplifyQuery &Q, unsigned MaxRecurse) { |
6093 | if (CmpInst::isIntPredicate(P: (CmpInst::Predicate)Predicate)) |
6094 | return simplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse); |
6095 | return simplifyFCmpInst(Predicate, LHS, RHS, FMF: FastMathFlags(), Q, MaxRecurse); |
6096 | } |
6097 | |
6098 | Value *llvm::simplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, |
6099 | const SimplifyQuery &Q) { |
6100 | return ::simplifyCmpInst(Predicate, LHS, RHS, Q, MaxRecurse: RecursionLimit); |
6101 | } |
6102 | |
6103 | static bool isIdempotent(Intrinsic::ID ID) { |
6104 | switch (ID) { |
6105 | default: |
6106 | return false; |
6107 | |
6108 | // Unary idempotent: f(f(x)) = f(x) |
6109 | case Intrinsic::fabs: |
6110 | case Intrinsic::floor: |
6111 | case Intrinsic::ceil: |
6112 | case Intrinsic::trunc: |
6113 | case Intrinsic::rint: |
6114 | case Intrinsic::nearbyint: |
6115 | case Intrinsic::round: |
6116 | case Intrinsic::roundeven: |
6117 | case Intrinsic::canonicalize: |
6118 | case Intrinsic::arithmetic_fence: |
6119 | return true; |
6120 | } |
6121 | } |
6122 | |
6123 | /// Return true if the intrinsic rounds a floating-point value to an integral |
6124 | /// floating-point value (not an integer type). |
6125 | static bool removesFPFraction(Intrinsic::ID ID) { |
6126 | switch (ID) { |
6127 | default: |
6128 | return false; |
6129 | |
6130 | case Intrinsic::floor: |
6131 | case Intrinsic::ceil: |
6132 | case Intrinsic::trunc: |
6133 | case Intrinsic::rint: |
6134 | case Intrinsic::nearbyint: |
6135 | case Intrinsic::round: |
6136 | case Intrinsic::roundeven: |
6137 | return true; |
6138 | } |
6139 | } |
6140 | |
6141 | static Value *simplifyRelativeLoad(Constant *Ptr, Constant *Offset, |
6142 | const DataLayout &DL) { |
6143 | GlobalValue *PtrSym; |
6144 | APInt PtrOffset; |
6145 | if (!IsConstantOffsetFromGlobal(C: Ptr, GV&: PtrSym, Offset&: PtrOffset, DL)) |
6146 | return nullptr; |
6147 | |
6148 | Type *Int32Ty = Type::getInt32Ty(C&: Ptr->getContext()); |
6149 | |
6150 | auto *OffsetConstInt = dyn_cast<ConstantInt>(Val: Offset); |
6151 | if (!OffsetConstInt || OffsetConstInt->getBitWidth() > 64) |
6152 | return nullptr; |
6153 | |
6154 | APInt OffsetInt = OffsetConstInt->getValue().sextOrTrunc( |
6155 | width: DL.getIndexTypeSizeInBits(Ty: Ptr->getType())); |
6156 | if (OffsetInt.srem(RHS: 4) != 0) |
6157 | return nullptr; |
6158 | |
6159 | Constant *Loaded = |
6160 | ConstantFoldLoadFromConstPtr(C: Ptr, Ty: Int32Ty, Offset: std::move(OffsetInt), DL); |
6161 | if (!Loaded) |
6162 | return nullptr; |
6163 | |
6164 | auto *LoadedCE = dyn_cast<ConstantExpr>(Val: Loaded); |
6165 | if (!LoadedCE) |
6166 | return nullptr; |
6167 | |
6168 | if (LoadedCE->getOpcode() == Instruction::Trunc) { |
6169 | LoadedCE = dyn_cast<ConstantExpr>(Val: LoadedCE->getOperand(i_nocapture: 0)); |
6170 | if (!LoadedCE) |
6171 | return nullptr; |
6172 | } |
6173 | |
6174 | if (LoadedCE->getOpcode() != Instruction::Sub) |
6175 | return nullptr; |
6176 | |
6177 | auto *LoadedLHS = dyn_cast<ConstantExpr>(Val: LoadedCE->getOperand(i_nocapture: 0)); |
6178 | if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt) |
6179 | return nullptr; |
6180 | auto *LoadedLHSPtr = LoadedLHS->getOperand(i_nocapture: 0); |
6181 | |
6182 | Constant *LoadedRHS = LoadedCE->getOperand(i_nocapture: 1); |
6183 | GlobalValue *LoadedRHSSym; |
6184 | APInt LoadedRHSOffset; |
6185 | if (!IsConstantOffsetFromGlobal(C: LoadedRHS, GV&: LoadedRHSSym, Offset&: LoadedRHSOffset, |
6186 | DL) || |
6187 | PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset) |
6188 | return nullptr; |
6189 | |
6190 | return LoadedLHSPtr; |
6191 | } |
6192 | |
6193 | // TODO: Need to pass in FastMathFlags |
6194 | static Value *simplifyLdexp(Value *Op0, Value *Op1, const SimplifyQuery &Q, |
6195 | bool IsStrict) { |
6196 | // ldexp(poison, x) -> poison |
6197 | // ldexp(x, poison) -> poison |
6198 | if (isa<PoisonValue>(Val: Op0) || isa<PoisonValue>(Val: Op1)) |
6199 | return Op0; |
6200 | |
6201 | // ldexp(undef, x) -> nan |
6202 | if (Q.isUndefValue(V: Op0)) |
6203 | return ConstantFP::getNaN(Ty: Op0->getType()); |
6204 | |
6205 | if (!IsStrict) { |
6206 | // TODO: Could insert a canonicalize for strict |
6207 | |
6208 | // ldexp(x, undef) -> x |
6209 | if (Q.isUndefValue(V: Op1)) |
6210 | return Op0; |
6211 | } |
6212 | |
6213 | const APFloat *C = nullptr; |
6214 | match(V: Op0, P: PatternMatch::m_APFloat(Res&: C)); |
6215 | |
6216 | // These cases should be safe, even with strictfp. |
6217 | // ldexp(0.0, x) -> 0.0 |
6218 | // ldexp(-0.0, x) -> -0.0 |
6219 | // ldexp(inf, x) -> inf |
6220 | // ldexp(-inf, x) -> -inf |
6221 | if (C && (C->isZero() || C->isInfinity())) |
6222 | return Op0; |
6223 | |
6224 | // These are canonicalization dropping, could do it if we knew how we could |
6225 | // ignore denormal flushes and target handling of nan payload bits. |
6226 | if (IsStrict) |
6227 | return nullptr; |
6228 | |
6229 | // TODO: Could quiet this with strictfp if the exception mode isn't strict. |
6230 | if (C && C->isNaN()) |
6231 | return ConstantFP::get(Ty: Op0->getType(), V: C->makeQuiet()); |
6232 | |
6233 | // ldexp(x, 0) -> x |
6234 | |
6235 | // TODO: Could fold this if we know the exception mode isn't |
6236 | // strict, we know the denormal mode and other target modes. |
6237 | if (match(V: Op1, P: PatternMatch::m_ZeroInt())) |
6238 | return Op0; |
6239 | |
6240 | return nullptr; |
6241 | } |
6242 | |
6243 | static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0, |
6244 | const SimplifyQuery &Q, |
6245 | const CallBase *Call) { |
6246 | // Idempotent functions return the same result when called repeatedly. |
6247 | Intrinsic::ID IID = F->getIntrinsicID(); |
6248 | if (isIdempotent(ID: IID)) |
6249 | if (auto *II = dyn_cast<IntrinsicInst>(Val: Op0)) |
6250 | if (II->getIntrinsicID() == IID) |
6251 | return II; |
6252 | |
6253 | if (removesFPFraction(ID: IID)) { |
6254 | // Converting from int or calling a rounding function always results in a |
6255 | // finite integral number or infinity. For those inputs, rounding functions |
6256 | // always return the same value, so the (2nd) rounding is eliminated. Ex: |
6257 | // floor (sitofp x) -> sitofp x |
6258 | // round (ceil x) -> ceil x |
6259 | auto *II = dyn_cast<IntrinsicInst>(Val: Op0); |
6260 | if ((II && removesFPFraction(ID: II->getIntrinsicID())) || |
6261 | match(V: Op0, P: m_SIToFP(Op: m_Value())) || match(V: Op0, P: m_UIToFP(Op: m_Value()))) |
6262 | return Op0; |
6263 | } |
6264 | |
6265 | Value *X; |
6266 | switch (IID) { |
6267 | case Intrinsic::fabs: |
6268 | if (computeKnownFPSignBit(V: Op0, /*Depth=*/0, SQ: Q) == false) |
6269 | return Op0; |
6270 | break; |
6271 | case Intrinsic::bswap: |
6272 | // bswap(bswap(x)) -> x |
6273 | if (match(V: Op0, P: m_BSwap(Op0: m_Value(V&: X)))) |
6274 | return X; |
6275 | break; |
6276 | case Intrinsic::bitreverse: |
6277 | // bitreverse(bitreverse(x)) -> x |
6278 | if (match(V: Op0, P: m_BitReverse(Op0: m_Value(V&: X)))) |
6279 | return X; |
6280 | break; |
6281 | case Intrinsic::ctpop: { |
6282 | // ctpop(X) -> 1 iff X is non-zero power of 2. |
6283 | if (isKnownToBeAPowerOfTwo(V: Op0, DL: Q.DL, /*OrZero*/ false, Depth: 0, AC: Q.AC, CxtI: Q.CxtI, |
6284 | DT: Q.DT)) |
6285 | return ConstantInt::get(Ty: Op0->getType(), V: 1); |
6286 | // If everything but the lowest bit is zero, that bit is the pop-count. Ex: |
6287 | // ctpop(and X, 1) --> and X, 1 |
6288 | unsigned BitWidth = Op0->getType()->getScalarSizeInBits(); |
6289 | if (MaskedValueIsZero(V: Op0, Mask: APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - 1), |
6290 | DL: Q)) |
6291 | return Op0; |
6292 | break; |
6293 | } |
6294 | case Intrinsic::exp: |
6295 | // exp(log(x)) -> x |
6296 | if (Call->hasAllowReassoc() && |
6297 | match(V: Op0, P: m_Intrinsic<Intrinsic::log>(Op0: m_Value(V&: X)))) |
6298 | return X; |
6299 | break; |
6300 | case Intrinsic::exp2: |
6301 | // exp2(log2(x)) -> x |
6302 | if (Call->hasAllowReassoc() && |
6303 | match(V: Op0, P: m_Intrinsic<Intrinsic::log2>(Op0: m_Value(V&: X)))) |
6304 | return X; |
6305 | break; |
6306 | case Intrinsic::exp10: |
6307 | // exp10(log10(x)) -> x |
6308 | if (Call->hasAllowReassoc() && |
6309 | match(V: Op0, P: m_Intrinsic<Intrinsic::log10>(Op0: m_Value(V&: X)))) |
6310 | return X; |
6311 | break; |
6312 | case Intrinsic::log: |
6313 | // log(exp(x)) -> x |
6314 | if (Call->hasAllowReassoc() && |
6315 | match(V: Op0, P: m_Intrinsic<Intrinsic::exp>(Op0: m_Value(V&: X)))) |
6316 | return X; |
6317 | break; |
6318 | case Intrinsic::log2: |
6319 | // log2(exp2(x)) -> x |
6320 | if (Call->hasAllowReassoc() && |
6321 | (match(V: Op0, P: m_Intrinsic<Intrinsic::exp2>(Op0: m_Value(V&: X))) || |
6322 | match(V: Op0, |
6323 | P: m_Intrinsic<Intrinsic::pow>(Op0: m_SpecificFP(V: 2.0), Op1: m_Value(V&: X))))) |
6324 | return X; |
6325 | break; |
6326 | case Intrinsic::log10: |
6327 | // log10(pow(10.0, x)) -> x |
6328 | // log10(exp10(x)) -> x |
6329 | if (Call->hasAllowReassoc() && |
6330 | (match(V: Op0, P: m_Intrinsic<Intrinsic::exp10>(Op0: m_Value(V&: X))) || |
6331 | match(V: Op0, |
6332 | P: m_Intrinsic<Intrinsic::pow>(Op0: m_SpecificFP(V: 10.0), Op1: m_Value(V&: X))))) |
6333 | return X; |
6334 | break; |
6335 | case Intrinsic::vector_reverse: |
6336 | // vector.reverse(vector.reverse(x)) -> x |
6337 | if (match(V: Op0, P: m_VecReverse(Op0: m_Value(V&: X)))) |
6338 | return X; |
6339 | // vector.reverse(splat(X)) -> splat(X) |
6340 | if (isSplatValue(V: Op0)) |
6341 | return Op0; |
6342 | break; |
6343 | case Intrinsic::frexp: { |
6344 | // Frexp is idempotent with the added complication of the struct return. |
6345 | if (match(V: Op0, P: m_ExtractValue<0>(V: m_Value(V&: X)))) { |
6346 | if (match(V: X, P: m_Intrinsic<Intrinsic::frexp>(Op0: m_Value()))) |
6347 | return X; |
6348 | } |
6349 | |
6350 | break; |
6351 | } |
6352 | default: |
6353 | break; |
6354 | } |
6355 | |
6356 | return nullptr; |
6357 | } |
6358 | |
6359 | /// Given a min/max intrinsic, see if it can be removed based on having an |
6360 | /// operand that is another min/max intrinsic with shared operand(s). The caller |
6361 | /// is expected to swap the operand arguments to handle commutation. |
6362 | static Value *foldMinMaxSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1) { |
6363 | Value *X, *Y; |
6364 | if (!match(V: Op0, P: m_MaxOrMin(L: m_Value(V&: X), R: m_Value(V&: Y)))) |
6365 | return nullptr; |
6366 | |
6367 | auto *MM0 = dyn_cast<IntrinsicInst>(Val: Op0); |
6368 | if (!MM0) |
6369 | return nullptr; |
6370 | Intrinsic::ID IID0 = MM0->getIntrinsicID(); |
6371 | |
6372 | if (Op1 == X || Op1 == Y || |
6373 | match(V: Op1, P: m_c_MaxOrMin(L: m_Specific(V: X), R: m_Specific(V: Y)))) { |
6374 | // max (max X, Y), X --> max X, Y |
6375 | if (IID0 == IID) |
6376 | return MM0; |
6377 | // max (min X, Y), X --> X |
6378 | if (IID0 == getInverseMinMaxIntrinsic(MinMaxID: IID)) |
6379 | return Op1; |
6380 | } |
6381 | return nullptr; |
6382 | } |
6383 | |
6384 | /// Given a min/max intrinsic, see if it can be removed based on having an |
6385 | /// operand that is another min/max intrinsic with shared operand(s). The caller |
6386 | /// is expected to swap the operand arguments to handle commutation. |
6387 | static Value *foldMinimumMaximumSharedOp(Intrinsic::ID IID, Value *Op0, |
6388 | Value *Op1) { |
6389 | assert((IID == Intrinsic::maxnum || IID == Intrinsic::minnum || |
6390 | IID == Intrinsic::maximum || IID == Intrinsic::minimum) && |
6391 | "Unsupported intrinsic" ); |
6392 | |
6393 | auto *M0 = dyn_cast<IntrinsicInst>(Val: Op0); |
6394 | // If Op0 is not the same intrinsic as IID, do not process. |
6395 | // This is a difference with integer min/max handling. We do not process the |
6396 | // case like max(min(X,Y),min(X,Y)) => min(X,Y). But it can be handled by GVN. |
6397 | if (!M0 || M0->getIntrinsicID() != IID) |
6398 | return nullptr; |
6399 | Value *X0 = M0->getOperand(i_nocapture: 0); |
6400 | Value *Y0 = M0->getOperand(i_nocapture: 1); |
6401 | // Simple case, m(m(X,Y), X) => m(X, Y) |
6402 | // m(m(X,Y), Y) => m(X, Y) |
6403 | // For minimum/maximum, X is NaN => m(NaN, Y) == NaN and m(NaN, NaN) == NaN. |
6404 | // For minimum/maximum, Y is NaN => m(X, NaN) == NaN and m(NaN, NaN) == NaN. |
6405 | // For minnum/maxnum, X is NaN => m(NaN, Y) == Y and m(Y, Y) == Y. |
6406 | // For minnum/maxnum, Y is NaN => m(X, NaN) == X and m(X, NaN) == X. |
6407 | if (X0 == Op1 || Y0 == Op1) |
6408 | return M0; |
6409 | |
6410 | auto *M1 = dyn_cast<IntrinsicInst>(Val: Op1); |
6411 | if (!M1) |
6412 | return nullptr; |
6413 | Value *X1 = M1->getOperand(i_nocapture: 0); |
6414 | Value *Y1 = M1->getOperand(i_nocapture: 1); |
6415 | Intrinsic::ID IID1 = M1->getIntrinsicID(); |
6416 | // we have a case m(m(X,Y),m'(X,Y)) taking into account m' is commutative. |
6417 | // if m' is m or inversion of m => m(m(X,Y),m'(X,Y)) == m(X,Y). |
6418 | // For minimum/maximum, X is NaN => m(NaN,Y) == m'(NaN, Y) == NaN. |
6419 | // For minimum/maximum, Y is NaN => m(X,NaN) == m'(X, NaN) == NaN. |
6420 | // For minnum/maxnum, X is NaN => m(NaN,Y) == m'(NaN, Y) == Y. |
6421 | // For minnum/maxnum, Y is NaN => m(X,NaN) == m'(X, NaN) == X. |
6422 | if ((X0 == X1 && Y0 == Y1) || (X0 == Y1 && Y0 == X1)) |
6423 | if (IID1 == IID || getInverseMinMaxIntrinsic(MinMaxID: IID1) == IID) |
6424 | return M0; |
6425 | |
6426 | return nullptr; |
6427 | } |
6428 | |
6429 | Value *llvm::simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType, |
6430 | Value *Op0, Value *Op1, |
6431 | const SimplifyQuery &Q, |
6432 | const CallBase *Call) { |
6433 | unsigned BitWidth = ReturnType->getScalarSizeInBits(); |
6434 | switch (IID) { |
6435 | case Intrinsic::abs: |
6436 | // abs(abs(x)) -> abs(x). We don't need to worry about the nsw arg here. |
6437 | // It is always ok to pick the earlier abs. We'll just lose nsw if its only |
6438 | // on the outer abs. |
6439 | if (match(V: Op0, P: m_Intrinsic<Intrinsic::abs>(Op0: m_Value(), Op1: m_Value()))) |
6440 | return Op0; |
6441 | break; |
6442 | |
6443 | case Intrinsic::cttz: { |
6444 | Value *X; |
6445 | if (match(V: Op0, P: m_Shl(L: m_One(), R: m_Value(V&: X)))) |
6446 | return X; |
6447 | break; |
6448 | } |
6449 | case Intrinsic::ctlz: { |
6450 | Value *X; |
6451 | if (match(V: Op0, P: m_LShr(L: m_Negative(), R: m_Value(V&: X)))) |
6452 | return X; |
6453 | if (match(V: Op0, P: m_AShr(L: m_Negative(), R: m_Value()))) |
6454 | return Constant::getNullValue(Ty: ReturnType); |
6455 | break; |
6456 | } |
6457 | case Intrinsic::ptrmask: { |
6458 | if (isa<PoisonValue>(Val: Op0) || isa<PoisonValue>(Val: Op1)) |
6459 | return PoisonValue::get(T: Op0->getType()); |
6460 | |
6461 | // NOTE: We can't apply this simplifications based on the value of Op1 |
6462 | // because we need to preserve provenance. |
6463 | if (Q.isUndefValue(V: Op0) || match(V: Op0, P: m_Zero())) |
6464 | return Constant::getNullValue(Ty: Op0->getType()); |
6465 | |
6466 | assert(Op1->getType()->getScalarSizeInBits() == |
6467 | Q.DL.getIndexTypeSizeInBits(Op0->getType()) && |
6468 | "Invalid mask width" ); |
6469 | // If index-width (mask size) is less than pointer-size then mask is |
6470 | // 1-extended. |
6471 | if (match(V: Op1, P: m_PtrToInt(Op: m_Specific(V: Op0)))) |
6472 | return Op0; |
6473 | |
6474 | // NOTE: We may have attributes associated with the return value of the |
6475 | // llvm.ptrmask intrinsic that will be lost when we just return the |
6476 | // operand. We should try to preserve them. |
6477 | if (match(V: Op1, P: m_AllOnes()) || Q.isUndefValue(V: Op1)) |
6478 | return Op0; |
6479 | |
6480 | Constant *C; |
6481 | if (match(V: Op1, P: m_ImmConstant(C))) { |
6482 | KnownBits PtrKnown = computeKnownBits(V: Op0, /*Depth=*/0, Q); |
6483 | // See if we only masking off bits we know are already zero due to |
6484 | // alignment. |
6485 | APInt IrrelevantPtrBits = |
6486 | PtrKnown.Zero.zextOrTrunc(width: C->getType()->getScalarSizeInBits()); |
6487 | C = ConstantFoldBinaryOpOperands( |
6488 | Opcode: Instruction::Or, LHS: C, RHS: ConstantInt::get(Ty: C->getType(), V: IrrelevantPtrBits), |
6489 | DL: Q.DL); |
6490 | if (C != nullptr && C->isAllOnesValue()) |
6491 | return Op0; |
6492 | } |
6493 | break; |
6494 | } |
6495 | case Intrinsic::smax: |
6496 | case Intrinsic::smin: |
6497 | case Intrinsic::umax: |
6498 | case Intrinsic::umin: { |
6499 | // If the arguments are the same, this is a no-op. |
6500 | if (Op0 == Op1) |
6501 | return Op0; |
6502 | |
6503 | // Canonicalize immediate constant operand as Op1. |
6504 | if (match(V: Op0, P: m_ImmConstant())) |
6505 | std::swap(a&: Op0, b&: Op1); |
6506 | |
6507 | // Assume undef is the limit value. |
6508 | if (Q.isUndefValue(V: Op1)) |
6509 | return ConstantInt::get( |
6510 | Ty: ReturnType, V: MinMaxIntrinsic::getSaturationPoint(ID: IID, numBits: BitWidth)); |
6511 | |
6512 | const APInt *C; |
6513 | if (match(V: Op1, P: m_APIntAllowPoison(Res&: C))) { |
6514 | // Clamp to limit value. For example: |
6515 | // umax(i8 %x, i8 255) --> 255 |
6516 | if (*C == MinMaxIntrinsic::getSaturationPoint(ID: IID, numBits: BitWidth)) |
6517 | return ConstantInt::get(Ty: ReturnType, V: *C); |
6518 | |
6519 | // If the constant op is the opposite of the limit value, the other must |
6520 | // be larger/smaller or equal. For example: |
6521 | // umin(i8 %x, i8 255) --> %x |
6522 | if (*C == MinMaxIntrinsic::getSaturationPoint( |
6523 | ID: getInverseMinMaxIntrinsic(MinMaxID: IID), numBits: BitWidth)) |
6524 | return Op0; |
6525 | |
6526 | // Remove nested call if constant operands allow it. Example: |
6527 | // max (max X, 7), 5 -> max X, 7 |
6528 | auto *MinMax0 = dyn_cast<IntrinsicInst>(Val: Op0); |
6529 | if (MinMax0 && MinMax0->getIntrinsicID() == IID) { |
6530 | // TODO: loosen undef/splat restrictions for vector constants. |
6531 | Value *M00 = MinMax0->getOperand(i_nocapture: 0), *M01 = MinMax0->getOperand(i_nocapture: 1); |
6532 | const APInt *InnerC; |
6533 | if ((match(V: M00, P: m_APInt(Res&: InnerC)) || match(V: M01, P: m_APInt(Res&: InnerC))) && |
6534 | ICmpInst::compare(LHS: *InnerC, RHS: *C, |
6535 | Pred: ICmpInst::getNonStrictPredicate( |
6536 | pred: MinMaxIntrinsic::getPredicate(ID: IID)))) |
6537 | return Op0; |
6538 | } |
6539 | } |
6540 | |
6541 | if (Value *V = foldMinMaxSharedOp(IID, Op0, Op1)) |
6542 | return V; |
6543 | if (Value *V = foldMinMaxSharedOp(IID, Op0: Op1, Op1: Op0)) |
6544 | return V; |
6545 | |
6546 | ICmpInst::Predicate Pred = |
6547 | ICmpInst::getNonStrictPredicate(pred: MinMaxIntrinsic::getPredicate(ID: IID)); |
6548 | if (isICmpTrue(Pred, LHS: Op0, RHS: Op1, Q: Q.getWithoutUndef(), MaxRecurse: RecursionLimit)) |
6549 | return Op0; |
6550 | if (isICmpTrue(Pred, LHS: Op1, RHS: Op0, Q: Q.getWithoutUndef(), MaxRecurse: RecursionLimit)) |
6551 | return Op1; |
6552 | |
6553 | break; |
6554 | } |
6555 | case Intrinsic::scmp: |
6556 | case Intrinsic::ucmp: { |
6557 | // Fold to a constant if the relationship between operands can be |
6558 | // established with certainty |
6559 | if (isICmpTrue(Pred: CmpInst::ICMP_EQ, LHS: Op0, RHS: Op1, Q, MaxRecurse: RecursionLimit)) |
6560 | return Constant::getNullValue(Ty: ReturnType); |
6561 | |
6562 | ICmpInst::Predicate PredGT = |
6563 | IID == Intrinsic::scmp ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; |
6564 | if (isICmpTrue(Pred: PredGT, LHS: Op0, RHS: Op1, Q, MaxRecurse: RecursionLimit)) |
6565 | return ConstantInt::get(Ty: ReturnType, V: 1); |
6566 | |
6567 | ICmpInst::Predicate PredLT = |
6568 | IID == Intrinsic::scmp ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; |
6569 | if (isICmpTrue(Pred: PredLT, LHS: Op0, RHS: Op1, Q, MaxRecurse: RecursionLimit)) |
6570 | return ConstantInt::getSigned(Ty: ReturnType, V: -1); |
6571 | |
6572 | break; |
6573 | } |
6574 | case Intrinsic::usub_with_overflow: |
6575 | case Intrinsic::ssub_with_overflow: |
6576 | // X - X -> { 0, false } |
6577 | // X - undef -> { 0, false } |
6578 | // undef - X -> { 0, false } |
6579 | if (Op0 == Op1 || Q.isUndefValue(V: Op0) || Q.isUndefValue(V: Op1)) |
6580 | return Constant::getNullValue(Ty: ReturnType); |
6581 | break; |
6582 | case Intrinsic::uadd_with_overflow: |
6583 | case Intrinsic::sadd_with_overflow: |
6584 | // X + undef -> { -1, false } |
6585 | // undef + x -> { -1, false } |
6586 | if (Q.isUndefValue(V: Op0) || Q.isUndefValue(V: Op1)) { |
6587 | return ConstantStruct::get( |
6588 | T: cast<StructType>(Val: ReturnType), |
6589 | V: {Constant::getAllOnesValue(Ty: ReturnType->getStructElementType(N: 0)), |
6590 | Constant::getNullValue(Ty: ReturnType->getStructElementType(N: 1))}); |
6591 | } |
6592 | break; |
6593 | case Intrinsic::umul_with_overflow: |
6594 | case Intrinsic::smul_with_overflow: |
6595 | // 0 * X -> { 0, false } |
6596 | // X * 0 -> { 0, false } |
6597 | if (match(V: Op0, P: m_Zero()) || match(V: Op1, P: m_Zero())) |
6598 | return Constant::getNullValue(Ty: ReturnType); |
6599 | // undef * X -> { 0, false } |
6600 | // X * undef -> { 0, false } |
6601 | if (Q.isUndefValue(V: Op0) || Q.isUndefValue(V: Op1)) |
6602 | return Constant::getNullValue(Ty: ReturnType); |
6603 | break; |
6604 | case Intrinsic::uadd_sat: |
6605 | // sat(MAX + X) -> MAX |
6606 | // sat(X + MAX) -> MAX |
6607 | if (match(V: Op0, P: m_AllOnes()) || match(V: Op1, P: m_AllOnes())) |
6608 | return Constant::getAllOnesValue(Ty: ReturnType); |
6609 | [[fallthrough]]; |
6610 | case Intrinsic::sadd_sat: |
6611 | // sat(X + undef) -> -1 |
6612 | // sat(undef + X) -> -1 |
6613 | // For unsigned: Assume undef is MAX, thus we saturate to MAX (-1). |
6614 | // For signed: Assume undef is ~X, in which case X + ~X = -1. |
6615 | if (Q.isUndefValue(V: Op0) || Q.isUndefValue(V: Op1)) |
6616 | return Constant::getAllOnesValue(Ty: ReturnType); |
6617 | |
6618 | // X + 0 -> X |
6619 | if (match(V: Op1, P: m_Zero())) |
6620 | return Op0; |
6621 | // 0 + X -> X |
6622 | if (match(V: Op0, P: m_Zero())) |
6623 | return Op1; |
6624 | break; |
6625 | case Intrinsic::usub_sat: |
6626 | // sat(0 - X) -> 0, sat(X - MAX) -> 0 |
6627 | if (match(V: Op0, P: m_Zero()) || match(V: Op1, P: m_AllOnes())) |
6628 | return Constant::getNullValue(Ty: ReturnType); |
6629 | [[fallthrough]]; |
6630 | case Intrinsic::ssub_sat: |
6631 | // X - X -> 0, X - undef -> 0, undef - X -> 0 |
6632 | if (Op0 == Op1 || Q.isUndefValue(V: Op0) || Q.isUndefValue(V: Op1)) |
6633 | return Constant::getNullValue(Ty: ReturnType); |
6634 | // X - 0 -> X |
6635 | if (match(V: Op1, P: m_Zero())) |
6636 | return Op0; |
6637 | break; |
6638 | case Intrinsic::load_relative: |
6639 | if (auto *C0 = dyn_cast<Constant>(Val: Op0)) |
6640 | if (auto *C1 = dyn_cast<Constant>(Val: Op1)) |
6641 | return simplifyRelativeLoad(Ptr: C0, Offset: C1, DL: Q.DL); |
6642 | break; |
6643 | case Intrinsic::powi: |
6644 | if (auto *Power = dyn_cast<ConstantInt>(Val: Op1)) { |
6645 | // powi(x, 0) -> 1.0 |
6646 | if (Power->isZero()) |
6647 | return ConstantFP::get(Ty: Op0->getType(), V: 1.0); |
6648 | // powi(x, 1) -> x |
6649 | if (Power->isOne()) |
6650 | return Op0; |
6651 | } |
6652 | break; |
6653 | case Intrinsic::ldexp: |
6654 | return simplifyLdexp(Op0, Op1, Q, IsStrict: false); |
6655 | case Intrinsic::copysign: |
6656 | // copysign X, X --> X |
6657 | if (Op0 == Op1) |
6658 | return Op0; |
6659 | // copysign -X, X --> X |
6660 | // copysign X, -X --> -X |
6661 | if (match(V: Op0, P: m_FNeg(X: m_Specific(V: Op1))) || |
6662 | match(V: Op1, P: m_FNeg(X: m_Specific(V: Op0)))) |
6663 | return Op1; |
6664 | break; |
6665 | case Intrinsic::is_fpclass: { |
6666 | if (isa<PoisonValue>(Val: Op0)) |
6667 | return PoisonValue::get(T: ReturnType); |
6668 | |
6669 | uint64_t Mask = cast<ConstantInt>(Val: Op1)->getZExtValue(); |
6670 | // If all tests are made, it doesn't matter what the value is. |
6671 | if ((Mask & fcAllFlags) == fcAllFlags) |
6672 | return ConstantInt::get(Ty: ReturnType, V: true); |
6673 | if ((Mask & fcAllFlags) == 0) |
6674 | return ConstantInt::get(Ty: ReturnType, V: false); |
6675 | if (Q.isUndefValue(V: Op0)) |
6676 | return UndefValue::get(T: ReturnType); |
6677 | break; |
6678 | } |
6679 | case Intrinsic::maxnum: |
6680 | case Intrinsic::minnum: |
6681 | case Intrinsic::maximum: |
6682 | case Intrinsic::minimum: { |
6683 | // If the arguments are the same, this is a no-op. |
6684 | if (Op0 == Op1) |
6685 | return Op0; |
6686 | |
6687 | // Canonicalize constant operand as Op1. |
6688 | if (isa<Constant>(Val: Op0)) |
6689 | std::swap(a&: Op0, b&: Op1); |
6690 | |
6691 | // If an argument is undef, return the other argument. |
6692 | if (Q.isUndefValue(V: Op1)) |
6693 | return Op0; |
6694 | |
6695 | bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum; |
6696 | bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum; |
6697 | |
6698 | // minnum(X, nan) -> X |
6699 | // maxnum(X, nan) -> X |
6700 | // minimum(X, nan) -> nan |
6701 | // maximum(X, nan) -> nan |
6702 | if (match(V: Op1, P: m_NaN())) |
6703 | return PropagateNaN ? propagateNaN(In: cast<Constant>(Val: Op1)) : Op0; |
6704 | |
6705 | // In the following folds, inf can be replaced with the largest finite |
6706 | // float, if the ninf flag is set. |
6707 | const APFloat *C; |
6708 | if (match(V: Op1, P: m_APFloat(Res&: C)) && |
6709 | (C->isInfinity() || (Call && Call->hasNoInfs() && C->isLargest()))) { |
6710 | // minnum(X, -inf) -> -inf |
6711 | // maxnum(X, +inf) -> +inf |
6712 | // minimum(X, -inf) -> -inf if nnan |
6713 | // maximum(X, +inf) -> +inf if nnan |
6714 | if (C->isNegative() == IsMin && |
6715 | (!PropagateNaN || (Call && Call->hasNoNaNs()))) |
6716 | return ConstantFP::get(Ty: ReturnType, V: *C); |
6717 | |
6718 | // minnum(X, +inf) -> X if nnan |
6719 | // maxnum(X, -inf) -> X if nnan |
6720 | // minimum(X, +inf) -> X |
6721 | // maximum(X, -inf) -> X |
6722 | if (C->isNegative() != IsMin && |
6723 | (PropagateNaN || (Call && Call->hasNoNaNs()))) |
6724 | return Op0; |
6725 | } |
6726 | |
6727 | // Min/max of the same operation with common operand: |
6728 | // m(m(X, Y)), X --> m(X, Y) (4 commuted variants) |
6729 | if (Value *V = foldMinimumMaximumSharedOp(IID, Op0, Op1)) |
6730 | return V; |
6731 | if (Value *V = foldMinimumMaximumSharedOp(IID, Op0: Op1, Op1: Op0)) |
6732 | return V; |
6733 | |
6734 | break; |
6735 | } |
6736 | case Intrinsic::vector_extract: { |
6737 | // (extract_vector (insert_vector _, X, 0), 0) -> X |
6738 | unsigned IdxN = cast<ConstantInt>(Val: Op1)->getZExtValue(); |
6739 | Value *X = nullptr; |
6740 | if (match(V: Op0, P: m_Intrinsic<Intrinsic::vector_insert>(Op0: m_Value(), Op1: m_Value(V&: X), |
6741 | Op2: m_Zero())) && |
6742 | IdxN == 0 && X->getType() == ReturnType) |
6743 | return X; |
6744 | |
6745 | break; |
6746 | } |
6747 | default: |
6748 | break; |
6749 | } |
6750 | |
6751 | return nullptr; |
6752 | } |
6753 | |
6754 | static Value *simplifyIntrinsic(CallBase *Call, Value *Callee, |
6755 | ArrayRef<Value *> Args, |
6756 | const SimplifyQuery &Q) { |
6757 | // Operand bundles should not be in Args. |
6758 | assert(Call->arg_size() == Args.size()); |
6759 | unsigned NumOperands = Args.size(); |
6760 | Function *F = cast<Function>(Val: Callee); |
6761 | Intrinsic::ID IID = F->getIntrinsicID(); |
6762 | |
6763 | // Most of the intrinsics with no operands have some kind of side effect. |
6764 | // Don't simplify. |
6765 | if (!NumOperands) { |
6766 | switch (IID) { |
6767 | case Intrinsic::vscale: { |
6768 | Type *RetTy = F->getReturnType(); |
6769 | ConstantRange CR = getVScaleRange(F: Call->getFunction(), BitWidth: 64); |
6770 | if (const APInt *C = CR.getSingleElement()) |
6771 | return ConstantInt::get(Ty: RetTy, V: C->getZExtValue()); |
6772 | return nullptr; |
6773 | } |
6774 | default: |
6775 | return nullptr; |
6776 | } |
6777 | } |
6778 | |
6779 | if (NumOperands == 1) |
6780 | return simplifyUnaryIntrinsic(F, Op0: Args[0], Q, Call); |
6781 | |
6782 | if (NumOperands == 2) |
6783 | return simplifyBinaryIntrinsic(IID, ReturnType: F->getReturnType(), Op0: Args[0], Op1: Args[1], Q, |
6784 | Call); |
6785 | |
6786 | // Handle intrinsics with 3 or more arguments. |
6787 | switch (IID) { |
6788 | case Intrinsic::masked_load: |
6789 | case Intrinsic::masked_gather: { |
6790 | Value *MaskArg = Args[2]; |
6791 | Value *PassthruArg = Args[3]; |
6792 | // If the mask is all zeros or undef, the "passthru" argument is the result. |
6793 | if (maskIsAllZeroOrUndef(Mask: MaskArg)) |
6794 | return PassthruArg; |
6795 | return nullptr; |
6796 | } |
6797 | case Intrinsic::fshl: |
6798 | case Intrinsic::fshr: { |
6799 | Value *Op0 = Args[0], *Op1 = Args[1], *ShAmtArg = Args[2]; |
6800 | |
6801 | // If both operands are undef, the result is undef. |
6802 | if (Q.isUndefValue(V: Op0) && Q.isUndefValue(V: Op1)) |
6803 | return UndefValue::get(T: F->getReturnType()); |
6804 | |
6805 | // If shift amount is undef, assume it is zero. |
6806 | if (Q.isUndefValue(V: ShAmtArg)) |
6807 | return Args[IID == Intrinsic::fshl ? 0 : 1]; |
6808 | |
6809 | const APInt *ShAmtC; |
6810 | if (match(V: ShAmtArg, P: m_APInt(Res&: ShAmtC))) { |
6811 | // If there's effectively no shift, return the 1st arg or 2nd arg. |
6812 | APInt BitWidth = APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth()); |
6813 | if (ShAmtC->urem(RHS: BitWidth).isZero()) |
6814 | return Args[IID == Intrinsic::fshl ? 0 : 1]; |
6815 | } |
6816 | |
6817 | // Rotating zero by anything is zero. |
6818 | if (match(V: Op0, P: m_Zero()) && match(V: Op1, P: m_Zero())) |
6819 | return ConstantInt::getNullValue(Ty: F->getReturnType()); |
6820 | |
6821 | // Rotating -1 by anything is -1. |
6822 | if (match(V: Op0, P: m_AllOnes()) && match(V: Op1, P: m_AllOnes())) |
6823 | return ConstantInt::getAllOnesValue(Ty: F->getReturnType()); |
6824 | |
6825 | return nullptr; |
6826 | } |
6827 | case Intrinsic::experimental_constrained_fma: { |
6828 | auto *FPI = cast<ConstrainedFPIntrinsic>(Val: Call); |
6829 | if (Value *V = simplifyFPOp(Ops: Args, FMF: {}, Q, ExBehavior: *FPI->getExceptionBehavior(), |
6830 | Rounding: *FPI->getRoundingMode())) |
6831 | return V; |
6832 | return nullptr; |
6833 | } |
6834 | case Intrinsic::fma: |
6835 | case Intrinsic::fmuladd: { |
6836 | if (Value *V = simplifyFPOp(Ops: Args, FMF: {}, Q, ExBehavior: fp::ebIgnore, |
6837 | Rounding: RoundingMode::NearestTiesToEven)) |
6838 | return V; |
6839 | return nullptr; |
6840 | } |
6841 | case Intrinsic::smul_fix: |
6842 | case Intrinsic::smul_fix_sat: { |
6843 | Value *Op0 = Args[0]; |
6844 | Value *Op1 = Args[1]; |
6845 | Value *Op2 = Args[2]; |
6846 | Type *ReturnType = F->getReturnType(); |
6847 | |
6848 | // Canonicalize constant operand as Op1 (ConstantFolding handles the case |
6849 | // when both Op0 and Op1 are constant so we do not care about that special |
6850 | // case here). |
6851 | if (isa<Constant>(Val: Op0)) |
6852 | std::swap(a&: Op0, b&: Op1); |
6853 | |
6854 | // X * 0 -> 0 |
6855 | if (match(V: Op1, P: m_Zero())) |
6856 | return Constant::getNullValue(Ty: ReturnType); |
6857 | |
6858 | // X * undef -> 0 |
6859 | if (Q.isUndefValue(V: Op1)) |
6860 | return Constant::getNullValue(Ty: ReturnType); |
6861 | |
6862 | // X * (1 << Scale) -> X |
6863 | APInt ScaledOne = |
6864 | APInt::getOneBitSet(numBits: ReturnType->getScalarSizeInBits(), |
6865 | BitNo: cast<ConstantInt>(Val: Op2)->getZExtValue()); |
6866 | if (ScaledOne.isNonNegative() && match(V: Op1, P: m_SpecificInt(V: ScaledOne))) |
6867 | return Op0; |
6868 | |
6869 | return nullptr; |
6870 | } |
6871 | case Intrinsic::vector_insert: { |
6872 | Value *Vec = Args[0]; |
6873 | Value *SubVec = Args[1]; |
6874 | Value *Idx = Args[2]; |
6875 | Type *ReturnType = F->getReturnType(); |
6876 | |
6877 | // (insert_vector Y, (extract_vector X, 0), 0) -> X |
6878 | // where: Y is X, or Y is undef |
6879 | unsigned IdxN = cast<ConstantInt>(Val: Idx)->getZExtValue(); |
6880 | Value *X = nullptr; |
6881 | if (match(V: SubVec, |
6882 | P: m_Intrinsic<Intrinsic::vector_extract>(Op0: m_Value(V&: X), Op1: m_Zero())) && |
6883 | (Q.isUndefValue(V: Vec) || Vec == X) && IdxN == 0 && |
6884 | X->getType() == ReturnType) |
6885 | return X; |
6886 | |
6887 | return nullptr; |
6888 | } |
6889 | case Intrinsic::experimental_constrained_fadd: { |
6890 | auto *FPI = cast<ConstrainedFPIntrinsic>(Val: Call); |
6891 | return simplifyFAddInst(Op0: Args[0], Op1: Args[1], FMF: FPI->getFastMathFlags(), Q, |
6892 | ExBehavior: *FPI->getExceptionBehavior(), |
6893 | Rounding: *FPI->getRoundingMode()); |
6894 | } |
6895 | case Intrinsic::experimental_constrained_fsub: { |
6896 | auto *FPI = cast<ConstrainedFPIntrinsic>(Val: Call); |
6897 | return simplifyFSubInst(Op0: Args[0], Op1: Args[1], FMF: FPI->getFastMathFlags(), Q, |
6898 | ExBehavior: *FPI->getExceptionBehavior(), |
6899 | Rounding: *FPI->getRoundingMode()); |
6900 | } |
6901 | case Intrinsic::experimental_constrained_fmul: { |
6902 | auto *FPI = cast<ConstrainedFPIntrinsic>(Val: Call); |
6903 | return simplifyFMulInst(Op0: Args[0], Op1: Args[1], FMF: FPI->getFastMathFlags(), Q, |
6904 | ExBehavior: *FPI->getExceptionBehavior(), |
6905 | Rounding: *FPI->getRoundingMode()); |
6906 | } |
6907 | case Intrinsic::experimental_constrained_fdiv: { |
6908 | auto *FPI = cast<ConstrainedFPIntrinsic>(Val: Call); |
6909 | return simplifyFDivInst(Op0: Args[0], Op1: Args[1], FMF: FPI->getFastMathFlags(), Q, |
6910 | ExBehavior: *FPI->getExceptionBehavior(), |
6911 | Rounding: *FPI->getRoundingMode()); |
6912 | } |
6913 | case Intrinsic::experimental_constrained_frem: { |
6914 | auto *FPI = cast<ConstrainedFPIntrinsic>(Val: Call); |
6915 | return simplifyFRemInst(Op0: Args[0], Op1: Args[1], FMF: FPI->getFastMathFlags(), Q, |
6916 | ExBehavior: *FPI->getExceptionBehavior(), |
6917 | Rounding: *FPI->getRoundingMode()); |
6918 | } |
6919 | case Intrinsic::experimental_constrained_ldexp: |
6920 | return simplifyLdexp(Op0: Args[0], Op1: Args[1], Q, IsStrict: true); |
6921 | case Intrinsic::experimental_gc_relocate: { |
6922 | GCRelocateInst &GCR = *cast<GCRelocateInst>(Val: Call); |
6923 | Value *DerivedPtr = GCR.getDerivedPtr(); |
6924 | Value *BasePtr = GCR.getBasePtr(); |
6925 | |
6926 | // Undef is undef, even after relocation. |
6927 | if (isa<UndefValue>(Val: DerivedPtr) || isa<UndefValue>(Val: BasePtr)) { |
6928 | return UndefValue::get(T: GCR.getType()); |
6929 | } |
6930 | |
6931 | if (auto *PT = dyn_cast<PointerType>(Val: GCR.getType())) { |
6932 | // For now, the assumption is that the relocation of null will be null |
6933 | // for most any collector. If this ever changes, a corresponding hook |
6934 | // should be added to GCStrategy and this code should check it first. |
6935 | if (isa<ConstantPointerNull>(Val: DerivedPtr)) { |
6936 | // Use null-pointer of gc_relocate's type to replace it. |
6937 | return ConstantPointerNull::get(T: PT); |
6938 | } |
6939 | } |
6940 | return nullptr; |
6941 | } |
6942 | default: |
6943 | return nullptr; |
6944 | } |
6945 | } |
6946 | |
6947 | static Value *tryConstantFoldCall(CallBase *Call, Value *Callee, |
6948 | ArrayRef<Value *> Args, |
6949 | const SimplifyQuery &Q) { |
6950 | auto *F = dyn_cast<Function>(Val: Callee); |
6951 | if (!F || !canConstantFoldCallTo(Call, F)) |
6952 | return nullptr; |
6953 | |
6954 | SmallVector<Constant *, 4> ConstantArgs; |
6955 | ConstantArgs.reserve(N: Args.size()); |
6956 | for (Value *Arg : Args) { |
6957 | Constant *C = dyn_cast<Constant>(Val: Arg); |
6958 | if (!C) { |
6959 | if (isa<MetadataAsValue>(Val: Arg)) |
6960 | continue; |
6961 | return nullptr; |
6962 | } |
6963 | ConstantArgs.push_back(Elt: C); |
6964 | } |
6965 | |
6966 | return ConstantFoldCall(Call, F, Operands: ConstantArgs, TLI: Q.TLI); |
6967 | } |
6968 | |
6969 | Value *llvm::simplifyCall(CallBase *Call, Value *Callee, ArrayRef<Value *> Args, |
6970 | const SimplifyQuery &Q) { |
6971 | // Args should not contain operand bundle operands. |
6972 | assert(Call->arg_size() == Args.size()); |
6973 | |
6974 | // musttail calls can only be simplified if they are also DCEd. |
6975 | // As we can't guarantee this here, don't simplify them. |
6976 | if (Call->isMustTailCall()) |
6977 | return nullptr; |
6978 | |
6979 | // call undef -> poison |
6980 | // call null -> poison |
6981 | if (isa<UndefValue>(Val: Callee) || isa<ConstantPointerNull>(Val: Callee)) |
6982 | return PoisonValue::get(T: Call->getType()); |
6983 | |
6984 | if (Value *V = tryConstantFoldCall(Call, Callee, Args, Q)) |
6985 | return V; |
6986 | |
6987 | auto *F = dyn_cast<Function>(Val: Callee); |
6988 | if (F && F->isIntrinsic()) |
6989 | if (Value *Ret = simplifyIntrinsic(Call, Callee, Args, Q)) |
6990 | return Ret; |
6991 | |
6992 | return nullptr; |
6993 | } |
6994 | |
6995 | Value *llvm::simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q) { |
6996 | assert(isa<ConstrainedFPIntrinsic>(Call)); |
6997 | SmallVector<Value *, 4> Args(Call->args()); |
6998 | if (Value *V = tryConstantFoldCall(Call, Callee: Call->getCalledOperand(), Args, Q)) |
6999 | return V; |
7000 | if (Value *Ret = simplifyIntrinsic(Call, Callee: Call->getCalledOperand(), Args, Q)) |
7001 | return Ret; |
7002 | return nullptr; |
7003 | } |
7004 | |
7005 | /// Given operands for a Freeze, see if we can fold the result. |
7006 | static Value *simplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) { |
7007 | // Use a utility function defined in ValueTracking. |
7008 | if (llvm::isGuaranteedNotToBeUndefOrPoison(V: Op0, AC: Q.AC, CtxI: Q.CxtI, DT: Q.DT)) |
7009 | return Op0; |
7010 | // We have room for improvement. |
7011 | return nullptr; |
7012 | } |
7013 | |
7014 | Value *llvm::simplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) { |
7015 | return ::simplifyFreezeInst(Op0, Q); |
7016 | } |
7017 | |
7018 | Value *llvm::simplifyLoadInst(LoadInst *LI, Value *PtrOp, |
7019 | const SimplifyQuery &Q) { |
7020 | if (LI->isVolatile()) |
7021 | return nullptr; |
7022 | |
7023 | if (auto *PtrOpC = dyn_cast<Constant>(Val: PtrOp)) |
7024 | return ConstantFoldLoadFromConstPtr(C: PtrOpC, Ty: LI->getType(), DL: Q.DL); |
7025 | |
7026 | // We can only fold the load if it is from a constant global with definitive |
7027 | // initializer. Skip expensive logic if this is not the case. |
7028 | auto *GV = dyn_cast<GlobalVariable>(Val: getUnderlyingObject(V: PtrOp)); |
7029 | if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) |
7030 | return nullptr; |
7031 | |
7032 | // If GlobalVariable's initializer is uniform, then return the constant |
7033 | // regardless of its offset. |
7034 | if (Constant *C = ConstantFoldLoadFromUniformValue(C: GV->getInitializer(), |
7035 | Ty: LI->getType(), DL: Q.DL)) |
7036 | return C; |
7037 | |
7038 | // Try to convert operand into a constant by stripping offsets while looking |
7039 | // through invariant.group intrinsics. |
7040 | APInt Offset(Q.DL.getIndexTypeSizeInBits(Ty: PtrOp->getType()), 0); |
7041 | PtrOp = PtrOp->stripAndAccumulateConstantOffsets( |
7042 | DL: Q.DL, Offset, /* AllowNonInbounts */ AllowNonInbounds: true, |
7043 | /* AllowInvariantGroup */ true); |
7044 | if (PtrOp == GV) { |
7045 | // Index size may have changed due to address space casts. |
7046 | Offset = Offset.sextOrTrunc(width: Q.DL.getIndexTypeSizeInBits(Ty: PtrOp->getType())); |
7047 | return ConstantFoldLoadFromConstPtr(C: GV, Ty: LI->getType(), Offset: std::move(Offset), |
7048 | DL: Q.DL); |
7049 | } |
7050 | |
7051 | return nullptr; |
7052 | } |
7053 | |
7054 | /// See if we can compute a simplified version of this instruction. |
7055 | /// If not, this returns null. |
7056 | |
7057 | static Value *simplifyInstructionWithOperands(Instruction *I, |
7058 | ArrayRef<Value *> NewOps, |
7059 | const SimplifyQuery &SQ, |
7060 | unsigned MaxRecurse) { |
7061 | assert(I->getFunction() && "instruction should be inserted in a function" ); |
7062 | assert((!SQ.CxtI || SQ.CxtI->getFunction() == I->getFunction()) && |
7063 | "context instruction should be in the same function" ); |
7064 | |
7065 | const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I); |
7066 | |
7067 | switch (I->getOpcode()) { |
7068 | default: |
7069 | if (llvm::all_of(Range&: NewOps, P: [](Value *V) { return isa<Constant>(Val: V); })) { |
7070 | SmallVector<Constant *, 8> NewConstOps(NewOps.size()); |
7071 | transform(Range&: NewOps, d_first: NewConstOps.begin(), |
7072 | F: [](Value *V) { return cast<Constant>(Val: V); }); |
7073 | return ConstantFoldInstOperands(I, Ops: NewConstOps, DL: Q.DL, TLI: Q.TLI); |
7074 | } |
7075 | return nullptr; |
7076 | case Instruction::FNeg: |
7077 | return simplifyFNegInst(Op: NewOps[0], FMF: I->getFastMathFlags(), Q, MaxRecurse); |
7078 | case Instruction::FAdd: |
7079 | return simplifyFAddInst(Op0: NewOps[0], Op1: NewOps[1], FMF: I->getFastMathFlags(), Q, |
7080 | MaxRecurse); |
7081 | case Instruction::Add: |
7082 | return simplifyAddInst( |
7083 | Op0: NewOps[0], Op1: NewOps[1], IsNSW: Q.IIQ.hasNoSignedWrap(Op: cast<BinaryOperator>(Val: I)), |
7084 | IsNUW: Q.IIQ.hasNoUnsignedWrap(Op: cast<BinaryOperator>(Val: I)), Q, MaxRecurse); |
7085 | case Instruction::FSub: |
7086 | return simplifyFSubInst(Op0: NewOps[0], Op1: NewOps[1], FMF: I->getFastMathFlags(), Q, |
7087 | MaxRecurse); |
7088 | case Instruction::Sub: |
7089 | return simplifySubInst( |
7090 | Op0: NewOps[0], Op1: NewOps[1], IsNSW: Q.IIQ.hasNoSignedWrap(Op: cast<BinaryOperator>(Val: I)), |
7091 | IsNUW: Q.IIQ.hasNoUnsignedWrap(Op: cast<BinaryOperator>(Val: I)), Q, MaxRecurse); |
7092 | case Instruction::FMul: |
7093 | return simplifyFMulInst(Op0: NewOps[0], Op1: NewOps[1], FMF: I->getFastMathFlags(), Q, |
7094 | MaxRecurse); |
7095 | case Instruction::Mul: |
7096 | return simplifyMulInst( |
7097 | Op0: NewOps[0], Op1: NewOps[1], IsNSW: Q.IIQ.hasNoSignedWrap(Op: cast<BinaryOperator>(Val: I)), |
7098 | IsNUW: Q.IIQ.hasNoUnsignedWrap(Op: cast<BinaryOperator>(Val: I)), Q, MaxRecurse); |
7099 | case Instruction::SDiv: |
7100 | return simplifySDivInst(Op0: NewOps[0], Op1: NewOps[1], |
7101 | IsExact: Q.IIQ.isExact(Op: cast<BinaryOperator>(Val: I)), Q, |
7102 | MaxRecurse); |
7103 | case Instruction::UDiv: |
7104 | return simplifyUDivInst(Op0: NewOps[0], Op1: NewOps[1], |
7105 | IsExact: Q.IIQ.isExact(Op: cast<BinaryOperator>(Val: I)), Q, |
7106 | MaxRecurse); |
7107 | case Instruction::FDiv: |
7108 | return simplifyFDivInst(Op0: NewOps[0], Op1: NewOps[1], FMF: I->getFastMathFlags(), Q, |
7109 | MaxRecurse); |
7110 | case Instruction::SRem: |
7111 | return simplifySRemInst(Op0: NewOps[0], Op1: NewOps[1], Q, MaxRecurse); |
7112 | case Instruction::URem: |
7113 | return simplifyURemInst(Op0: NewOps[0], Op1: NewOps[1], Q, MaxRecurse); |
7114 | case Instruction::FRem: |
7115 | return simplifyFRemInst(Op0: NewOps[0], Op1: NewOps[1], FMF: I->getFastMathFlags(), Q, |
7116 | MaxRecurse); |
7117 | case Instruction::Shl: |
7118 | return simplifyShlInst( |
7119 | Op0: NewOps[0], Op1: NewOps[1], IsNSW: Q.IIQ.hasNoSignedWrap(Op: cast<BinaryOperator>(Val: I)), |
7120 | IsNUW: Q.IIQ.hasNoUnsignedWrap(Op: cast<BinaryOperator>(Val: I)), Q, MaxRecurse); |
7121 | case Instruction::LShr: |
7122 | return simplifyLShrInst(Op0: NewOps[0], Op1: NewOps[1], |
7123 | IsExact: Q.IIQ.isExact(Op: cast<BinaryOperator>(Val: I)), Q, |
7124 | MaxRecurse); |
7125 | case Instruction::AShr: |
7126 | return simplifyAShrInst(Op0: NewOps[0], Op1: NewOps[1], |
7127 | IsExact: Q.IIQ.isExact(Op: cast<BinaryOperator>(Val: I)), Q, |
7128 | MaxRecurse); |
7129 | case Instruction::And: |
7130 | return simplifyAndInst(Op0: NewOps[0], Op1: NewOps[1], Q, MaxRecurse); |
7131 | case Instruction::Or: |
7132 | return simplifyOrInst(Op0: NewOps[0], Op1: NewOps[1], Q, MaxRecurse); |
7133 | case Instruction::Xor: |
7134 | return simplifyXorInst(Op0: NewOps[0], Op1: NewOps[1], Q, MaxRecurse); |
7135 | case Instruction::ICmp: |
7136 | return simplifyICmpInst(Predicate: cast<ICmpInst>(Val: I)->getPredicate(), LHS: NewOps[0], |
7137 | RHS: NewOps[1], Q, MaxRecurse); |
7138 | case Instruction::FCmp: |
7139 | return simplifyFCmpInst(Predicate: cast<FCmpInst>(Val: I)->getPredicate(), LHS: NewOps[0], |
7140 | RHS: NewOps[1], FMF: I->getFastMathFlags(), Q, MaxRecurse); |
7141 | case Instruction::Select: |
7142 | return simplifySelectInst(Cond: NewOps[0], TrueVal: NewOps[1], FalseVal: NewOps[2], Q, MaxRecurse); |
7143 | break; |
7144 | case Instruction::GetElementPtr: { |
7145 | auto *GEPI = cast<GetElementPtrInst>(Val: I); |
7146 | return simplifyGEPInst(SrcTy: GEPI->getSourceElementType(), Ptr: NewOps[0], |
7147 | Indices: ArrayRef(NewOps).slice(N: 1), NW: GEPI->getNoWrapFlags(), Q, |
7148 | MaxRecurse); |
7149 | } |
7150 | case Instruction::InsertValue: { |
7151 | InsertValueInst *IV = cast<InsertValueInst>(Val: I); |
7152 | return simplifyInsertValueInst(Agg: NewOps[0], Val: NewOps[1], Idxs: IV->getIndices(), Q, |
7153 | MaxRecurse); |
7154 | } |
7155 | case Instruction::InsertElement: |
7156 | return simplifyInsertElementInst(Vec: NewOps[0], Val: NewOps[1], Idx: NewOps[2], Q); |
7157 | case Instruction::ExtractValue: { |
7158 | auto *EVI = cast<ExtractValueInst>(Val: I); |
7159 | return simplifyExtractValueInst(Agg: NewOps[0], Idxs: EVI->getIndices(), Q, |
7160 | MaxRecurse); |
7161 | } |
7162 | case Instruction::ExtractElement: |
7163 | return simplifyExtractElementInst(Vec: NewOps[0], Idx: NewOps[1], Q, MaxRecurse); |
7164 | case Instruction::ShuffleVector: { |
7165 | auto *SVI = cast<ShuffleVectorInst>(Val: I); |
7166 | return simplifyShuffleVectorInst(Op0: NewOps[0], Op1: NewOps[1], |
7167 | Mask: SVI->getShuffleMask(), RetTy: SVI->getType(), Q, |
7168 | MaxRecurse); |
7169 | } |
7170 | case Instruction::PHI: |
7171 | return simplifyPHINode(PN: cast<PHINode>(Val: I), IncomingValues: NewOps, Q); |
7172 | case Instruction::Call: |
7173 | return simplifyCall( |
7174 | Call: cast<CallInst>(Val: I), Callee: NewOps.back(), |
7175 | Args: NewOps.drop_back(N: 1 + cast<CallInst>(Val: I)->getNumTotalBundleOperands()), Q); |
7176 | case Instruction::Freeze: |
7177 | return llvm::simplifyFreezeInst(Op0: NewOps[0], Q); |
7178 | #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc: |
7179 | #include "llvm/IR/Instruction.def" |
7180 | #undef HANDLE_CAST_INST |
7181 | return simplifyCastInst(CastOpc: I->getOpcode(), Op: NewOps[0], Ty: I->getType(), Q, |
7182 | MaxRecurse); |
7183 | case Instruction::Alloca: |
7184 | // No simplifications for Alloca and it can't be constant folded. |
7185 | return nullptr; |
7186 | case Instruction::Load: |
7187 | return simplifyLoadInst(LI: cast<LoadInst>(Val: I), PtrOp: NewOps[0], Q); |
7188 | } |
7189 | } |
7190 | |
7191 | Value *llvm::simplifyInstructionWithOperands(Instruction *I, |
7192 | ArrayRef<Value *> NewOps, |
7193 | const SimplifyQuery &SQ) { |
7194 | assert(NewOps.size() == I->getNumOperands() && |
7195 | "Number of operands should match the instruction!" ); |
7196 | return ::simplifyInstructionWithOperands(I, NewOps, SQ, MaxRecurse: RecursionLimit); |
7197 | } |
7198 | |
7199 | Value *llvm::simplifyInstruction(Instruction *I, const SimplifyQuery &SQ) { |
7200 | SmallVector<Value *, 8> Ops(I->operands()); |
7201 | Value *Result = ::simplifyInstructionWithOperands(I, NewOps: Ops, SQ, MaxRecurse: RecursionLimit); |
7202 | |
7203 | /// If called on unreachable code, the instruction may simplify to itself. |
7204 | /// Make life easier for users by detecting that case here, and returning a |
7205 | /// safe value instead. |
7206 | return Result == I ? PoisonValue::get(T: I->getType()) : Result; |
7207 | } |
7208 | |
7209 | /// Implementation of recursive simplification through an instruction's |
7210 | /// uses. |
7211 | /// |
7212 | /// This is the common implementation of the recursive simplification routines. |
7213 | /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to |
7214 | /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of |
7215 | /// instructions to process and attempt to simplify it using |
7216 | /// InstructionSimplify. Recursively visited users which could not be |
7217 | /// simplified themselves are to the optional UnsimplifiedUsers set for |
7218 | /// further processing by the caller. |
7219 | /// |
7220 | /// This routine returns 'true' only when *it* simplifies something. The passed |
7221 | /// in simplified value does not count toward this. |
7222 | static bool replaceAndRecursivelySimplifyImpl( |
7223 | Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI, |
7224 | const DominatorTree *DT, AssumptionCache *AC, |
7225 | SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) { |
7226 | bool Simplified = false; |
7227 | SmallSetVector<Instruction *, 8> Worklist; |
7228 | const DataLayout &DL = I->getDataLayout(); |
7229 | |
7230 | // If we have an explicit value to collapse to, do that round of the |
7231 | // simplification loop by hand initially. |
7232 | if (SimpleV) { |
7233 | for (User *U : I->users()) |
7234 | if (U != I) |
7235 | Worklist.insert(X: cast<Instruction>(Val: U)); |
7236 | |
7237 | // Replace the instruction with its simplified value. |
7238 | I->replaceAllUsesWith(V: SimpleV); |
7239 | |
7240 | if (!I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects()) |
7241 | I->eraseFromParent(); |
7242 | } else { |
7243 | Worklist.insert(X: I); |
7244 | } |
7245 | |
7246 | // Note that we must test the size on each iteration, the worklist can grow. |
7247 | for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) { |
7248 | I = Worklist[Idx]; |
7249 | |
7250 | // See if this instruction simplifies. |
7251 | SimpleV = simplifyInstruction(I, SQ: {DL, TLI, DT, AC}); |
7252 | if (!SimpleV) { |
7253 | if (UnsimplifiedUsers) |
7254 | UnsimplifiedUsers->insert(X: I); |
7255 | continue; |
7256 | } |
7257 | |
7258 | Simplified = true; |
7259 | |
7260 | // Stash away all the uses of the old instruction so we can check them for |
7261 | // recursive simplifications after a RAUW. This is cheaper than checking all |
7262 | // uses of To on the recursive step in most cases. |
7263 | for (User *U : I->users()) |
7264 | Worklist.insert(X: cast<Instruction>(Val: U)); |
7265 | |
7266 | // Replace the instruction with its simplified value. |
7267 | I->replaceAllUsesWith(V: SimpleV); |
7268 | |
7269 | if (!I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects()) |
7270 | I->eraseFromParent(); |
7271 | } |
7272 | return Simplified; |
7273 | } |
7274 | |
7275 | bool llvm::replaceAndRecursivelySimplify( |
7276 | Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI, |
7277 | const DominatorTree *DT, AssumptionCache *AC, |
7278 | SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) { |
7279 | assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!" ); |
7280 | assert(SimpleV && "Must provide a simplified value." ); |
7281 | return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC, |
7282 | UnsimplifiedUsers); |
7283 | } |
7284 | |
7285 | namespace llvm { |
7286 | const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) { |
7287 | auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>(); |
7288 | auto *DT = DTWP ? &DTWP->getDomTree() : nullptr; |
7289 | auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); |
7290 | auto *TLI = TLIWP ? &TLIWP->getTLI(F) : nullptr; |
7291 | auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>(); |
7292 | auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr; |
7293 | return {F.getDataLayout(), TLI, DT, AC}; |
7294 | } |
7295 | |
7296 | const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR, |
7297 | const DataLayout &DL) { |
7298 | return {DL, &AR.TLI, &AR.DT, &AR.AC}; |
7299 | } |
7300 | |
7301 | template <class T, class... TArgs> |
7302 | const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM, |
7303 | Function &F) { |
7304 | auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F); |
7305 | auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F); |
7306 | auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F); |
7307 | return {F.getDataLayout(), TLI, DT, AC}; |
7308 | } |
7309 | template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &, |
7310 | Function &); |
7311 | |
7312 | bool SimplifyQuery::isUndefValue(Value *V) const { |
7313 | if (!CanUseUndef) |
7314 | return false; |
7315 | |
7316 | return match(V, P: m_Undef()); |
7317 | } |
7318 | |
7319 | } // namespace llvm |
7320 | |
7321 | void InstSimplifyFolder::anchor() {} |
7322 | |