1 | //===- InstCombineAddSub.cpp ------------------------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the visit functions for add, fadd, sub, and fsub. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "InstCombineInternal.h" |
14 | #include "llvm/ADT/APFloat.h" |
15 | #include "llvm/ADT/APInt.h" |
16 | #include "llvm/ADT/STLExtras.h" |
17 | #include "llvm/ADT/SmallVector.h" |
18 | #include "llvm/Analysis/InstructionSimplify.h" |
19 | #include "llvm/Analysis/ValueTracking.h" |
20 | #include "llvm/IR/Constant.h" |
21 | #include "llvm/IR/Constants.h" |
22 | #include "llvm/IR/InstrTypes.h" |
23 | #include "llvm/IR/Instruction.h" |
24 | #include "llvm/IR/Instructions.h" |
25 | #include "llvm/IR/Operator.h" |
26 | #include "llvm/IR/PatternMatch.h" |
27 | #include "llvm/IR/Type.h" |
28 | #include "llvm/IR/Value.h" |
29 | #include "llvm/Support/AlignOf.h" |
30 | #include "llvm/Support/Casting.h" |
31 | #include "llvm/Support/KnownBits.h" |
32 | #include "llvm/Transforms/InstCombine/InstCombiner.h" |
33 | #include <cassert> |
34 | #include <utility> |
35 | |
36 | using namespace llvm; |
37 | using namespace PatternMatch; |
38 | |
39 | #define DEBUG_TYPE "instcombine" |
40 | |
41 | namespace { |
42 | |
43 | /// Class representing coefficient of floating-point addend. |
44 | /// This class needs to be highly efficient, which is especially true for |
45 | /// the constructor. As of I write this comment, the cost of the default |
46 | /// constructor is merely 4-byte-store-zero (Assuming compiler is able to |
47 | /// perform write-merging). |
48 | /// |
49 | class FAddendCoef { |
50 | public: |
51 | // The constructor has to initialize a APFloat, which is unnecessary for |
52 | // most addends which have coefficient either 1 or -1. So, the constructor |
53 | // is expensive. In order to avoid the cost of the constructor, we should |
54 | // reuse some instances whenever possible. The pre-created instances |
55 | // FAddCombine::Add[0-5] embodies this idea. |
56 | FAddendCoef() = default; |
57 | ~FAddendCoef(); |
58 | |
59 | // If possible, don't define operator+/operator- etc because these |
60 | // operators inevitably call FAddendCoef's constructor which is not cheap. |
61 | void operator=(const FAddendCoef &A); |
62 | void operator+=(const FAddendCoef &A); |
63 | void operator*=(const FAddendCoef &S); |
64 | |
65 | void set(short C) { |
66 | assert(!insaneIntVal(C) && "Insane coefficient" ); |
67 | IsFp = false; IntVal = C; |
68 | } |
69 | |
70 | void set(const APFloat& C); |
71 | |
72 | void negate(); |
73 | |
74 | bool isZero() const { return isInt() ? !IntVal : getFpVal().isZero(); } |
75 | Value *getValue(Type *) const; |
76 | |
77 | bool isOne() const { return isInt() && IntVal == 1; } |
78 | bool isTwo() const { return isInt() && IntVal == 2; } |
79 | bool isMinusOne() const { return isInt() && IntVal == -1; } |
80 | bool isMinusTwo() const { return isInt() && IntVal == -2; } |
81 | |
82 | private: |
83 | bool insaneIntVal(int V) { return V > 4 || V < -4; } |
84 | |
85 | APFloat *getFpValPtr() { return reinterpret_cast<APFloat *>(&FpValBuf); } |
86 | |
87 | const APFloat *getFpValPtr() const { |
88 | return reinterpret_cast<const APFloat *>(&FpValBuf); |
89 | } |
90 | |
91 | const APFloat &getFpVal() const { |
92 | assert(IsFp && BufHasFpVal && "Incorret state" ); |
93 | return *getFpValPtr(); |
94 | } |
95 | |
96 | APFloat &getFpVal() { |
97 | assert(IsFp && BufHasFpVal && "Incorret state" ); |
98 | return *getFpValPtr(); |
99 | } |
100 | |
101 | bool isInt() const { return !IsFp; } |
102 | |
103 | // If the coefficient is represented by an integer, promote it to a |
104 | // floating point. |
105 | void convertToFpType(const fltSemantics &Sem); |
106 | |
107 | // Construct an APFloat from a signed integer. |
108 | // TODO: We should get rid of this function when APFloat can be constructed |
109 | // from an *SIGNED* integer. |
110 | APFloat createAPFloatFromInt(const fltSemantics &Sem, int Val); |
111 | |
112 | bool IsFp = false; |
113 | |
114 | // True iff FpValBuf contains an instance of APFloat. |
115 | bool BufHasFpVal = false; |
116 | |
117 | // The integer coefficient of an individual addend is either 1 or -1, |
118 | // and we try to simplify at most 4 addends from neighboring at most |
119 | // two instructions. So the range of <IntVal> falls in [-4, 4]. APInt |
120 | // is overkill of this end. |
121 | short IntVal = 0; |
122 | |
123 | AlignedCharArrayUnion<APFloat> FpValBuf; |
124 | }; |
125 | |
126 | /// FAddend is used to represent floating-point addend. An addend is |
127 | /// represented as <C, V>, where the V is a symbolic value, and C is a |
128 | /// constant coefficient. A constant addend is represented as <C, 0>. |
129 | class FAddend { |
130 | public: |
131 | FAddend() = default; |
132 | |
133 | void operator+=(const FAddend &T) { |
134 | assert((Val == T.Val) && "Symbolic-values disagree" ); |
135 | Coeff += T.Coeff; |
136 | } |
137 | |
138 | Value *getSymVal() const { return Val; } |
139 | const FAddendCoef &getCoef() const { return Coeff; } |
140 | |
141 | bool isConstant() const { return Val == nullptr; } |
142 | bool isZero() const { return Coeff.isZero(); } |
143 | |
144 | void set(short Coefficient, Value *V) { |
145 | Coeff.set(Coefficient); |
146 | Val = V; |
147 | } |
148 | void set(const APFloat &Coefficient, Value *V) { |
149 | Coeff.set(Coefficient); |
150 | Val = V; |
151 | } |
152 | void set(const ConstantFP *Coefficient, Value *V) { |
153 | Coeff.set(Coefficient->getValueAPF()); |
154 | Val = V; |
155 | } |
156 | |
157 | void negate() { Coeff.negate(); } |
158 | |
159 | /// Drill down the U-D chain one step to find the definition of V, and |
160 | /// try to break the definition into one or two addends. |
161 | static unsigned drillValueDownOneStep(Value* V, FAddend &A0, FAddend &A1); |
162 | |
163 | /// Similar to FAddend::drillDownOneStep() except that the value being |
164 | /// splitted is the addend itself. |
165 | unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1) const; |
166 | |
167 | private: |
168 | void Scale(const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; } |
169 | |
170 | // This addend has the value of "Coeff * Val". |
171 | Value *Val = nullptr; |
172 | FAddendCoef Coeff; |
173 | }; |
174 | |
175 | /// FAddCombine is the class for optimizing an unsafe fadd/fsub along |
176 | /// with its neighboring at most two instructions. |
177 | /// |
178 | class FAddCombine { |
179 | public: |
180 | FAddCombine(InstCombiner::BuilderTy &B) : Builder(B) {} |
181 | |
182 | Value *simplify(Instruction *FAdd); |
183 | |
184 | private: |
185 | using AddendVect = SmallVector<const FAddend *, 4>; |
186 | |
187 | Value *simplifyFAdd(AddendVect& V, unsigned InstrQuota); |
188 | |
189 | /// Convert given addend to a Value |
190 | Value *createAddendVal(const FAddend &A, bool& NeedNeg); |
191 | |
192 | /// Return the number of instructions needed to emit the N-ary addition. |
193 | unsigned calcInstrNumber(const AddendVect& Vect); |
194 | |
195 | Value *createFSub(Value *Opnd0, Value *Opnd1); |
196 | Value *createFAdd(Value *Opnd0, Value *Opnd1); |
197 | Value *createFMul(Value *Opnd0, Value *Opnd1); |
198 | Value *createFNeg(Value *V); |
199 | Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota); |
200 | void createInstPostProc(Instruction *NewInst, bool NoNumber = false); |
201 | |
202 | // Debugging stuff are clustered here. |
203 | #ifndef NDEBUG |
204 | unsigned CreateInstrNum; |
205 | void initCreateInstNum() { CreateInstrNum = 0; } |
206 | void incCreateInstNum() { CreateInstrNum++; } |
207 | #else |
208 | void initCreateInstNum() {} |
209 | void incCreateInstNum() {} |
210 | #endif |
211 | |
212 | InstCombiner::BuilderTy &Builder; |
213 | Instruction *Instr = nullptr; |
214 | }; |
215 | |
216 | } // end anonymous namespace |
217 | |
218 | //===----------------------------------------------------------------------===// |
219 | // |
220 | // Implementation of |
221 | // {FAddendCoef, FAddend, FAddition, FAddCombine}. |
222 | // |
223 | //===----------------------------------------------------------------------===// |
224 | FAddendCoef::~FAddendCoef() { |
225 | if (BufHasFpVal) |
226 | getFpValPtr()->~APFloat(); |
227 | } |
228 | |
229 | void FAddendCoef::set(const APFloat& C) { |
230 | APFloat *P = getFpValPtr(); |
231 | |
232 | if (isInt()) { |
233 | // As the buffer is meanless byte stream, we cannot call |
234 | // APFloat::operator=(). |
235 | new(P) APFloat(C); |
236 | } else |
237 | *P = C; |
238 | |
239 | IsFp = BufHasFpVal = true; |
240 | } |
241 | |
242 | void FAddendCoef::convertToFpType(const fltSemantics &Sem) { |
243 | if (!isInt()) |
244 | return; |
245 | |
246 | APFloat *P = getFpValPtr(); |
247 | if (IntVal > 0) |
248 | new(P) APFloat(Sem, IntVal); |
249 | else { |
250 | new(P) APFloat(Sem, 0 - IntVal); |
251 | P->changeSign(); |
252 | } |
253 | IsFp = BufHasFpVal = true; |
254 | } |
255 | |
256 | APFloat FAddendCoef::createAPFloatFromInt(const fltSemantics &Sem, int Val) { |
257 | if (Val >= 0) |
258 | return APFloat(Sem, Val); |
259 | |
260 | APFloat T(Sem, 0 - Val); |
261 | T.changeSign(); |
262 | |
263 | return T; |
264 | } |
265 | |
266 | void FAddendCoef::operator=(const FAddendCoef &That) { |
267 | if (That.isInt()) |
268 | set(That.IntVal); |
269 | else |
270 | set(That.getFpVal()); |
271 | } |
272 | |
273 | void FAddendCoef::operator+=(const FAddendCoef &That) { |
274 | RoundingMode RndMode = RoundingMode::NearestTiesToEven; |
275 | if (isInt() == That.isInt()) { |
276 | if (isInt()) |
277 | IntVal += That.IntVal; |
278 | else |
279 | getFpVal().add(RHS: That.getFpVal(), RM: RndMode); |
280 | return; |
281 | } |
282 | |
283 | if (isInt()) { |
284 | const APFloat &T = That.getFpVal(); |
285 | convertToFpType(Sem: T.getSemantics()); |
286 | getFpVal().add(RHS: T, RM: RndMode); |
287 | return; |
288 | } |
289 | |
290 | APFloat &T = getFpVal(); |
291 | T.add(RHS: createAPFloatFromInt(Sem: T.getSemantics(), Val: That.IntVal), RM: RndMode); |
292 | } |
293 | |
294 | void FAddendCoef::operator*=(const FAddendCoef &That) { |
295 | if (That.isOne()) |
296 | return; |
297 | |
298 | if (That.isMinusOne()) { |
299 | negate(); |
300 | return; |
301 | } |
302 | |
303 | if (isInt() && That.isInt()) { |
304 | int Res = IntVal * (int)That.IntVal; |
305 | assert(!insaneIntVal(Res) && "Insane int value" ); |
306 | IntVal = Res; |
307 | return; |
308 | } |
309 | |
310 | const fltSemantics &Semantic = |
311 | isInt() ? That.getFpVal().getSemantics() : getFpVal().getSemantics(); |
312 | |
313 | if (isInt()) |
314 | convertToFpType(Sem: Semantic); |
315 | APFloat &F0 = getFpVal(); |
316 | |
317 | if (That.isInt()) |
318 | F0.multiply(RHS: createAPFloatFromInt(Sem: Semantic, Val: That.IntVal), |
319 | RM: APFloat::rmNearestTiesToEven); |
320 | else |
321 | F0.multiply(RHS: That.getFpVal(), RM: APFloat::rmNearestTiesToEven); |
322 | } |
323 | |
324 | void FAddendCoef::negate() { |
325 | if (isInt()) |
326 | IntVal = 0 - IntVal; |
327 | else |
328 | getFpVal().changeSign(); |
329 | } |
330 | |
331 | Value *FAddendCoef::getValue(Type *Ty) const { |
332 | return isInt() ? |
333 | ConstantFP::get(Ty, V: float(IntVal)) : |
334 | ConstantFP::get(Context&: Ty->getContext(), V: getFpVal()); |
335 | } |
336 | |
337 | // The definition of <Val> Addends |
338 | // ========================================= |
339 | // A + B <1, A>, <1,B> |
340 | // A - B <1, A>, <1,B> |
341 | // 0 - B <-1, B> |
342 | // C * A, <C, A> |
343 | // A + C <1, A> <C, NULL> |
344 | // 0 +/- 0 <0, NULL> (corner case) |
345 | // |
346 | // Legend: A and B are not constant, C is constant |
347 | unsigned FAddend::drillValueDownOneStep |
348 | (Value *Val, FAddend &Addend0, FAddend &Addend1) { |
349 | Instruction *I = nullptr; |
350 | if (!Val || !(I = dyn_cast<Instruction>(Val))) |
351 | return 0; |
352 | |
353 | unsigned Opcode = I->getOpcode(); |
354 | |
355 | if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub) { |
356 | ConstantFP *C0, *C1; |
357 | Value *Opnd0 = I->getOperand(i: 0); |
358 | Value *Opnd1 = I->getOperand(i: 1); |
359 | if ((C0 = dyn_cast<ConstantFP>(Val: Opnd0)) && C0->isZero()) |
360 | Opnd0 = nullptr; |
361 | |
362 | if ((C1 = dyn_cast<ConstantFP>(Val: Opnd1)) && C1->isZero()) |
363 | Opnd1 = nullptr; |
364 | |
365 | if (Opnd0) { |
366 | if (!C0) |
367 | Addend0.set(Coefficient: 1, V: Opnd0); |
368 | else |
369 | Addend0.set(Coefficient: C0, V: nullptr); |
370 | } |
371 | |
372 | if (Opnd1) { |
373 | FAddend &Addend = Opnd0 ? Addend1 : Addend0; |
374 | if (!C1) |
375 | Addend.set(Coefficient: 1, V: Opnd1); |
376 | else |
377 | Addend.set(Coefficient: C1, V: nullptr); |
378 | if (Opcode == Instruction::FSub) |
379 | Addend.negate(); |
380 | } |
381 | |
382 | if (Opnd0 || Opnd1) |
383 | return Opnd0 && Opnd1 ? 2 : 1; |
384 | |
385 | // Both operands are zero. Weird! |
386 | Addend0.set(Coefficient: APFloat(C0->getValueAPF().getSemantics()), V: nullptr); |
387 | return 1; |
388 | } |
389 | |
390 | if (I->getOpcode() == Instruction::FMul) { |
391 | Value *V0 = I->getOperand(i: 0); |
392 | Value *V1 = I->getOperand(i: 1); |
393 | if (ConstantFP *C = dyn_cast<ConstantFP>(Val: V0)) { |
394 | Addend0.set(Coefficient: C, V: V1); |
395 | return 1; |
396 | } |
397 | |
398 | if (ConstantFP *C = dyn_cast<ConstantFP>(Val: V1)) { |
399 | Addend0.set(Coefficient: C, V: V0); |
400 | return 1; |
401 | } |
402 | } |
403 | |
404 | return 0; |
405 | } |
406 | |
407 | // Try to break *this* addend into two addends. e.g. Suppose this addend is |
408 | // <2.3, V>, and V = X + Y, by calling this function, we obtain two addends, |
409 | // i.e. <2.3, X> and <2.3, Y>. |
410 | unsigned FAddend::drillAddendDownOneStep |
411 | (FAddend &Addend0, FAddend &Addend1) const { |
412 | if (isConstant()) |
413 | return 0; |
414 | |
415 | unsigned BreakNum = FAddend::drillValueDownOneStep(Val, Addend0, Addend1); |
416 | if (!BreakNum || Coeff.isOne()) |
417 | return BreakNum; |
418 | |
419 | Addend0.Scale(ScaleAmt: Coeff); |
420 | |
421 | if (BreakNum == 2) |
422 | Addend1.Scale(ScaleAmt: Coeff); |
423 | |
424 | return BreakNum; |
425 | } |
426 | |
427 | Value *FAddCombine::simplify(Instruction *I) { |
428 | assert(I->hasAllowReassoc() && I->hasNoSignedZeros() && |
429 | "Expected 'reassoc'+'nsz' instruction" ); |
430 | |
431 | // Currently we are not able to handle vector type. |
432 | if (I->getType()->isVectorTy()) |
433 | return nullptr; |
434 | |
435 | assert((I->getOpcode() == Instruction::FAdd || |
436 | I->getOpcode() == Instruction::FSub) && "Expect add/sub" ); |
437 | |
438 | // Save the instruction before calling other member-functions. |
439 | Instr = I; |
440 | |
441 | FAddend Opnd0, Opnd1, Opnd0_0, Opnd0_1, Opnd1_0, Opnd1_1; |
442 | |
443 | unsigned OpndNum = FAddend::drillValueDownOneStep(Val: I, Addend0&: Opnd0, Addend1&: Opnd1); |
444 | |
445 | // Step 1: Expand the 1st addend into Opnd0_0 and Opnd0_1. |
446 | unsigned Opnd0_ExpNum = 0; |
447 | unsigned Opnd1_ExpNum = 0; |
448 | |
449 | if (!Opnd0.isConstant()) |
450 | Opnd0_ExpNum = Opnd0.drillAddendDownOneStep(Addend0&: Opnd0_0, Addend1&: Opnd0_1); |
451 | |
452 | // Step 2: Expand the 2nd addend into Opnd1_0 and Opnd1_1. |
453 | if (OpndNum == 2 && !Opnd1.isConstant()) |
454 | Opnd1_ExpNum = Opnd1.drillAddendDownOneStep(Addend0&: Opnd1_0, Addend1&: Opnd1_1); |
455 | |
456 | // Step 3: Try to optimize Opnd0_0 + Opnd0_1 + Opnd1_0 + Opnd1_1 |
457 | if (Opnd0_ExpNum && Opnd1_ExpNum) { |
458 | AddendVect AllOpnds; |
459 | AllOpnds.push_back(Elt: &Opnd0_0); |
460 | AllOpnds.push_back(Elt: &Opnd1_0); |
461 | if (Opnd0_ExpNum == 2) |
462 | AllOpnds.push_back(Elt: &Opnd0_1); |
463 | if (Opnd1_ExpNum == 2) |
464 | AllOpnds.push_back(Elt: &Opnd1_1); |
465 | |
466 | // Compute instruction quota. We should save at least one instruction. |
467 | unsigned InstQuota = 0; |
468 | |
469 | Value *V0 = I->getOperand(i: 0); |
470 | Value *V1 = I->getOperand(i: 1); |
471 | InstQuota = ((!isa<Constant>(Val: V0) && V0->hasOneUse()) && |
472 | (!isa<Constant>(Val: V1) && V1->hasOneUse())) ? 2 : 1; |
473 | |
474 | if (Value *R = simplifyFAdd(V&: AllOpnds, InstrQuota: InstQuota)) |
475 | return R; |
476 | } |
477 | |
478 | if (OpndNum != 2) { |
479 | // The input instruction is : "I=0.0 +/- V". If the "V" were able to be |
480 | // splitted into two addends, say "V = X - Y", the instruction would have |
481 | // been optimized into "I = Y - X" in the previous steps. |
482 | // |
483 | const FAddendCoef &CE = Opnd0.getCoef(); |
484 | return CE.isOne() ? Opnd0.getSymVal() : nullptr; |
485 | } |
486 | |
487 | // step 4: Try to optimize Opnd0 + Opnd1_0 [+ Opnd1_1] |
488 | if (Opnd1_ExpNum) { |
489 | AddendVect AllOpnds; |
490 | AllOpnds.push_back(Elt: &Opnd0); |
491 | AllOpnds.push_back(Elt: &Opnd1_0); |
492 | if (Opnd1_ExpNum == 2) |
493 | AllOpnds.push_back(Elt: &Opnd1_1); |
494 | |
495 | if (Value *R = simplifyFAdd(V&: AllOpnds, InstrQuota: 1)) |
496 | return R; |
497 | } |
498 | |
499 | // step 5: Try to optimize Opnd1 + Opnd0_0 [+ Opnd0_1] |
500 | if (Opnd0_ExpNum) { |
501 | AddendVect AllOpnds; |
502 | AllOpnds.push_back(Elt: &Opnd1); |
503 | AllOpnds.push_back(Elt: &Opnd0_0); |
504 | if (Opnd0_ExpNum == 2) |
505 | AllOpnds.push_back(Elt: &Opnd0_1); |
506 | |
507 | if (Value *R = simplifyFAdd(V&: AllOpnds, InstrQuota: 1)) |
508 | return R; |
509 | } |
510 | |
511 | return nullptr; |
512 | } |
513 | |
514 | Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) { |
515 | unsigned AddendNum = Addends.size(); |
516 | assert(AddendNum <= 4 && "Too many addends" ); |
517 | |
518 | // For saving intermediate results; |
519 | unsigned NextTmpIdx = 0; |
520 | FAddend TmpResult[3]; |
521 | |
522 | // Simplified addends are placed <SimpVect>. |
523 | AddendVect SimpVect; |
524 | |
525 | // The outer loop works on one symbolic-value at a time. Suppose the input |
526 | // addends are : <a1, x>, <b1, y>, <a2, x>, <c1, z>, <b2, y>, ... |
527 | // The symbolic-values will be processed in this order: x, y, z. |
528 | for (unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) { |
529 | |
530 | const FAddend *ThisAddend = Addends[SymIdx]; |
531 | if (!ThisAddend) { |
532 | // This addend was processed before. |
533 | continue; |
534 | } |
535 | |
536 | Value *Val = ThisAddend->getSymVal(); |
537 | |
538 | // If the resulting expr has constant-addend, this constant-addend is |
539 | // desirable to reside at the top of the resulting expression tree. Placing |
540 | // constant close to super-expr(s) will potentially reveal some |
541 | // optimization opportunities in super-expr(s). Here we do not implement |
542 | // this logic intentionally and rely on SimplifyAssociativeOrCommutative |
543 | // call later. |
544 | |
545 | unsigned StartIdx = SimpVect.size(); |
546 | SimpVect.push_back(Elt: ThisAddend); |
547 | |
548 | // The inner loop collects addends sharing same symbolic-value, and these |
549 | // addends will be later on folded into a single addend. Following above |
550 | // example, if the symbolic value "y" is being processed, the inner loop |
551 | // will collect two addends "<b1,y>" and "<b2,Y>". These two addends will |
552 | // be later on folded into "<b1+b2, y>". |
553 | for (unsigned SameSymIdx = SymIdx + 1; |
554 | SameSymIdx < AddendNum; SameSymIdx++) { |
555 | const FAddend *T = Addends[SameSymIdx]; |
556 | if (T && T->getSymVal() == Val) { |
557 | // Set null such that next iteration of the outer loop will not process |
558 | // this addend again. |
559 | Addends[SameSymIdx] = nullptr; |
560 | SimpVect.push_back(Elt: T); |
561 | } |
562 | } |
563 | |
564 | // If multiple addends share same symbolic value, fold them together. |
565 | if (StartIdx + 1 != SimpVect.size()) { |
566 | FAddend &R = TmpResult[NextTmpIdx ++]; |
567 | R = *SimpVect[StartIdx]; |
568 | for (unsigned Idx = StartIdx + 1; Idx < SimpVect.size(); Idx++) |
569 | R += *SimpVect[Idx]; |
570 | |
571 | // Pop all addends being folded and push the resulting folded addend. |
572 | SimpVect.resize(N: StartIdx); |
573 | if (!R.isZero()) { |
574 | SimpVect.push_back(Elt: &R); |
575 | } |
576 | } |
577 | } |
578 | |
579 | assert((NextTmpIdx <= std::size(TmpResult) + 1) && "out-of-bound access" ); |
580 | |
581 | Value *Result; |
582 | if (!SimpVect.empty()) |
583 | Result = createNaryFAdd(Opnds: SimpVect, InstrQuota); |
584 | else { |
585 | // The addition is folded to 0.0. |
586 | Result = ConstantFP::get(Ty: Instr->getType(), V: 0.0); |
587 | } |
588 | |
589 | return Result; |
590 | } |
591 | |
592 | Value *FAddCombine::createNaryFAdd |
593 | (const AddendVect &Opnds, unsigned InstrQuota) { |
594 | assert(!Opnds.empty() && "Expect at least one addend" ); |
595 | |
596 | // Step 1: Check if the # of instructions needed exceeds the quota. |
597 | |
598 | unsigned InstrNeeded = calcInstrNumber(Vect: Opnds); |
599 | if (InstrNeeded > InstrQuota) |
600 | return nullptr; |
601 | |
602 | initCreateInstNum(); |
603 | |
604 | // step 2: Emit the N-ary addition. |
605 | // Note that at most three instructions are involved in Fadd-InstCombine: the |
606 | // addition in question, and at most two neighboring instructions. |
607 | // The resulting optimized addition should have at least one less instruction |
608 | // than the original addition expression tree. This implies that the resulting |
609 | // N-ary addition has at most two instructions, and we don't need to worry |
610 | // about tree-height when constructing the N-ary addition. |
611 | |
612 | Value *LastVal = nullptr; |
613 | bool LastValNeedNeg = false; |
614 | |
615 | // Iterate the addends, creating fadd/fsub using adjacent two addends. |
616 | for (const FAddend *Opnd : Opnds) { |
617 | bool NeedNeg; |
618 | Value *V = createAddendVal(A: *Opnd, NeedNeg); |
619 | if (!LastVal) { |
620 | LastVal = V; |
621 | LastValNeedNeg = NeedNeg; |
622 | continue; |
623 | } |
624 | |
625 | if (LastValNeedNeg == NeedNeg) { |
626 | LastVal = createFAdd(Opnd0: LastVal, Opnd1: V); |
627 | continue; |
628 | } |
629 | |
630 | if (LastValNeedNeg) |
631 | LastVal = createFSub(Opnd0: V, Opnd1: LastVal); |
632 | else |
633 | LastVal = createFSub(Opnd0: LastVal, Opnd1: V); |
634 | |
635 | LastValNeedNeg = false; |
636 | } |
637 | |
638 | if (LastValNeedNeg) { |
639 | LastVal = createFNeg(V: LastVal); |
640 | } |
641 | |
642 | #ifndef NDEBUG |
643 | assert(CreateInstrNum == InstrNeeded && |
644 | "Inconsistent in instruction numbers" ); |
645 | #endif |
646 | |
647 | return LastVal; |
648 | } |
649 | |
650 | Value *FAddCombine::createFSub(Value *Opnd0, Value *Opnd1) { |
651 | Value *V = Builder.CreateFSub(L: Opnd0, R: Opnd1); |
652 | if (Instruction *I = dyn_cast<Instruction>(Val: V)) |
653 | createInstPostProc(NewInst: I); |
654 | return V; |
655 | } |
656 | |
657 | Value *FAddCombine::createFNeg(Value *V) { |
658 | Value *NewV = Builder.CreateFNeg(V); |
659 | if (Instruction *I = dyn_cast<Instruction>(Val: NewV)) |
660 | createInstPostProc(NewInst: I, NoNumber: true); // fneg's don't receive instruction numbers. |
661 | return NewV; |
662 | } |
663 | |
664 | Value *FAddCombine::createFAdd(Value *Opnd0, Value *Opnd1) { |
665 | Value *V = Builder.CreateFAdd(L: Opnd0, R: Opnd1); |
666 | if (Instruction *I = dyn_cast<Instruction>(Val: V)) |
667 | createInstPostProc(NewInst: I); |
668 | return V; |
669 | } |
670 | |
671 | Value *FAddCombine::createFMul(Value *Opnd0, Value *Opnd1) { |
672 | Value *V = Builder.CreateFMul(L: Opnd0, R: Opnd1); |
673 | if (Instruction *I = dyn_cast<Instruction>(Val: V)) |
674 | createInstPostProc(NewInst: I); |
675 | return V; |
676 | } |
677 | |
678 | void FAddCombine::createInstPostProc(Instruction *NewInstr, bool NoNumber) { |
679 | NewInstr->setDebugLoc(Instr->getDebugLoc()); |
680 | |
681 | // Keep track of the number of instruction created. |
682 | if (!NoNumber) |
683 | incCreateInstNum(); |
684 | |
685 | // Propagate fast-math flags |
686 | NewInstr->setFastMathFlags(Instr->getFastMathFlags()); |
687 | } |
688 | |
689 | // Return the number of instruction needed to emit the N-ary addition. |
690 | // NOTE: Keep this function in sync with createAddendVal(). |
691 | unsigned FAddCombine::calcInstrNumber(const AddendVect &Opnds) { |
692 | unsigned OpndNum = Opnds.size(); |
693 | unsigned InstrNeeded = OpndNum - 1; |
694 | |
695 | // Adjust the number of instructions needed to emit the N-ary add. |
696 | for (const FAddend *Opnd : Opnds) { |
697 | if (Opnd->isConstant()) |
698 | continue; |
699 | |
700 | // The constant check above is really for a few special constant |
701 | // coefficients. |
702 | if (isa<UndefValue>(Val: Opnd->getSymVal())) |
703 | continue; |
704 | |
705 | const FAddendCoef &CE = Opnd->getCoef(); |
706 | // Let the addend be "c * x". If "c == +/-1", the value of the addend |
707 | // is immediately available; otherwise, it needs exactly one instruction |
708 | // to evaluate the value. |
709 | if (!CE.isMinusOne() && !CE.isOne()) |
710 | InstrNeeded++; |
711 | } |
712 | return InstrNeeded; |
713 | } |
714 | |
715 | // Input Addend Value NeedNeg(output) |
716 | // ================================================================ |
717 | // Constant C C false |
718 | // <+/-1, V> V coefficient is -1 |
719 | // <2/-2, V> "fadd V, V" coefficient is -2 |
720 | // <C, V> "fmul V, C" false |
721 | // |
722 | // NOTE: Keep this function in sync with FAddCombine::calcInstrNumber. |
723 | Value *FAddCombine::createAddendVal(const FAddend &Opnd, bool &NeedNeg) { |
724 | const FAddendCoef &Coeff = Opnd.getCoef(); |
725 | |
726 | if (Opnd.isConstant()) { |
727 | NeedNeg = false; |
728 | return Coeff.getValue(Ty: Instr->getType()); |
729 | } |
730 | |
731 | Value *OpndVal = Opnd.getSymVal(); |
732 | |
733 | if (Coeff.isMinusOne() || Coeff.isOne()) { |
734 | NeedNeg = Coeff.isMinusOne(); |
735 | return OpndVal; |
736 | } |
737 | |
738 | if (Coeff.isTwo() || Coeff.isMinusTwo()) { |
739 | NeedNeg = Coeff.isMinusTwo(); |
740 | return createFAdd(Opnd0: OpndVal, Opnd1: OpndVal); |
741 | } |
742 | |
743 | NeedNeg = false; |
744 | return createFMul(Opnd0: OpndVal, Opnd1: Coeff.getValue(Ty: Instr->getType())); |
745 | } |
746 | |
747 | // Checks if any operand is negative and we can convert add to sub. |
748 | // This function checks for following negative patterns |
749 | // ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C)) |
750 | // ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C)) |
751 | // XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even |
752 | static Value *checkForNegativeOperand(BinaryOperator &I, |
753 | InstCombiner::BuilderTy &Builder) { |
754 | Value *LHS = I.getOperand(i_nocapture: 0), *RHS = I.getOperand(i_nocapture: 1); |
755 | |
756 | // This function creates 2 instructions to replace ADD, we need at least one |
757 | // of LHS or RHS to have one use to ensure benefit in transform. |
758 | if (!LHS->hasOneUse() && !RHS->hasOneUse()) |
759 | return nullptr; |
760 | |
761 | Value *X = nullptr, *Y = nullptr, *Z = nullptr; |
762 | const APInt *C1 = nullptr, *C2 = nullptr; |
763 | |
764 | // if ONE is on other side, swap |
765 | if (match(V: RHS, P: m_Add(L: m_Value(V&: X), R: m_One()))) |
766 | std::swap(a&: LHS, b&: RHS); |
767 | |
768 | if (match(V: LHS, P: m_Add(L: m_Value(V&: X), R: m_One()))) { |
769 | // if XOR on other side, swap |
770 | if (match(V: RHS, P: m_Xor(L: m_Value(V&: Y), R: m_APInt(Res&: C1)))) |
771 | std::swap(a&: X, b&: RHS); |
772 | |
773 | if (match(V: X, P: m_Xor(L: m_Value(V&: Y), R: m_APInt(Res&: C1)))) { |
774 | // X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1)) |
775 | // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1)) |
776 | if (match(V: Y, P: m_Or(L: m_Value(V&: Z), R: m_APInt(Res&: C2))) && (*C2 == ~(*C1))) { |
777 | Value *NewAnd = Builder.CreateAnd(LHS: Z, RHS: *C1); |
778 | return Builder.CreateSub(LHS: RHS, RHS: NewAnd, Name: "sub" ); |
779 | } else if (match(V: Y, P: m_And(L: m_Value(V&: Z), R: m_APInt(Res&: C2))) && (*C1 == *C2)) { |
780 | // X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1)) |
781 | // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1)) |
782 | Value *NewOr = Builder.CreateOr(LHS: Z, RHS: ~(*C1)); |
783 | return Builder.CreateSub(LHS: RHS, RHS: NewOr, Name: "sub" ); |
784 | } |
785 | } |
786 | } |
787 | |
788 | // Restore LHS and RHS |
789 | LHS = I.getOperand(i_nocapture: 0); |
790 | RHS = I.getOperand(i_nocapture: 1); |
791 | |
792 | // if XOR is on other side, swap |
793 | if (match(V: RHS, P: m_Xor(L: m_Value(V&: Y), R: m_APInt(Res&: C1)))) |
794 | std::swap(a&: LHS, b&: RHS); |
795 | |
796 | // C2 is ODD |
797 | // LHS = XOR(Y, C1), Y = AND(Z, C2), C1 == (C2 + 1) => LHS == NEG(OR(Z, ~C2)) |
798 | // ADD(LHS, RHS) == SUB(RHS, OR(Z, ~C2)) |
799 | if (match(V: LHS, P: m_Xor(L: m_Value(V&: Y), R: m_APInt(Res&: C1)))) |
800 | if (C1->countr_zero() == 0) |
801 | if (match(V: Y, P: m_And(L: m_Value(V&: Z), R: m_APInt(Res&: C2))) && *C1 == (*C2 + 1)) { |
802 | Value *NewOr = Builder.CreateOr(LHS: Z, RHS: ~(*C2)); |
803 | return Builder.CreateSub(LHS: RHS, RHS: NewOr, Name: "sub" ); |
804 | } |
805 | return nullptr; |
806 | } |
807 | |
808 | /// Wrapping flags may allow combining constants separated by an extend. |
809 | static Instruction *foldNoWrapAdd(BinaryOperator &Add, |
810 | InstCombiner::BuilderTy &Builder) { |
811 | Value *Op0 = Add.getOperand(i_nocapture: 0), *Op1 = Add.getOperand(i_nocapture: 1); |
812 | Type *Ty = Add.getType(); |
813 | Constant *Op1C; |
814 | if (!match(V: Op1, P: m_Constant(C&: Op1C))) |
815 | return nullptr; |
816 | |
817 | // Try this match first because it results in an add in the narrow type. |
818 | // (zext (X +nuw C2)) + C1 --> zext (X + (C2 + trunc(C1))) |
819 | Value *X; |
820 | const APInt *C1, *C2; |
821 | if (match(V: Op1, P: m_APInt(Res&: C1)) && |
822 | match(V: Op0, P: m_ZExt(Op: m_NUWAddLike(L: m_Value(V&: X), R: m_APInt(Res&: C2)))) && |
823 | C1->isNegative() && C1->sge(RHS: -C2->sext(width: C1->getBitWidth()))) { |
824 | APInt NewC = *C2 + C1->trunc(width: C2->getBitWidth()); |
825 | // If the smaller add will fold to zero, we don't need to check one use. |
826 | if (NewC.isZero()) |
827 | return new ZExtInst(X, Ty); |
828 | // Otherwise only do this if the existing zero extend will be removed. |
829 | if (Op0->hasOneUse()) |
830 | return new ZExtInst( |
831 | Builder.CreateNUWAdd(LHS: X, RHS: ConstantInt::get(Ty: X->getType(), V: NewC)), Ty); |
832 | } |
833 | |
834 | // More general combining of constants in the wide type. |
835 | // (sext (X +nsw NarrowC)) + C --> (sext X) + (sext(NarrowC) + C) |
836 | // or (zext nneg (X +nsw NarrowC)) + C --> (sext X) + (sext(NarrowC) + C) |
837 | Constant *NarrowC; |
838 | if (match(V: Op0, P: m_OneUse(SubPattern: m_SExtLike( |
839 | Op: m_NSWAddLike(L: m_Value(V&: X), R: m_Constant(C&: NarrowC)))))) { |
840 | Value *WideC = Builder.CreateSExt(V: NarrowC, DestTy: Ty); |
841 | Value *NewC = Builder.CreateAdd(LHS: WideC, RHS: Op1C); |
842 | Value *WideX = Builder.CreateSExt(V: X, DestTy: Ty); |
843 | return BinaryOperator::CreateAdd(V1: WideX, V2: NewC); |
844 | } |
845 | // (zext (X +nuw NarrowC)) + C --> (zext X) + (zext(NarrowC) + C) |
846 | if (match(V: Op0, |
847 | P: m_OneUse(SubPattern: m_ZExt(Op: m_NUWAddLike(L: m_Value(V&: X), R: m_Constant(C&: NarrowC)))))) { |
848 | Value *WideC = Builder.CreateZExt(V: NarrowC, DestTy: Ty); |
849 | Value *NewC = Builder.CreateAdd(LHS: WideC, RHS: Op1C); |
850 | Value *WideX = Builder.CreateZExt(V: X, DestTy: Ty); |
851 | return BinaryOperator::CreateAdd(V1: WideX, V2: NewC); |
852 | } |
853 | return nullptr; |
854 | } |
855 | |
856 | Instruction *InstCombinerImpl::foldAddWithConstant(BinaryOperator &Add) { |
857 | Value *Op0 = Add.getOperand(i_nocapture: 0), *Op1 = Add.getOperand(i_nocapture: 1); |
858 | Type *Ty = Add.getType(); |
859 | Constant *Op1C; |
860 | if (!match(V: Op1, P: m_ImmConstant(C&: Op1C))) |
861 | return nullptr; |
862 | |
863 | if (Instruction *NV = foldBinOpIntoSelectOrPhi(I&: Add)) |
864 | return NV; |
865 | |
866 | Value *X; |
867 | Constant *Op00C; |
868 | |
869 | // add (sub C1, X), C2 --> sub (add C1, C2), X |
870 | if (match(V: Op0, P: m_Sub(L: m_Constant(C&: Op00C), R: m_Value(V&: X)))) |
871 | return BinaryOperator::CreateSub(V1: ConstantExpr::getAdd(C1: Op00C, C2: Op1C), V2: X); |
872 | |
873 | Value *Y; |
874 | |
875 | // add (sub X, Y), -1 --> add (not Y), X |
876 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Sub(L: m_Value(V&: X), R: m_Value(V&: Y)))) && |
877 | match(V: Op1, P: m_AllOnes())) |
878 | return BinaryOperator::CreateAdd(V1: Builder.CreateNot(V: Y), V2: X); |
879 | |
880 | // zext(bool) + C -> bool ? C + 1 : C |
881 | if (match(V: Op0, P: m_ZExt(Op: m_Value(V&: X))) && |
882 | X->getType()->getScalarSizeInBits() == 1) |
883 | return SelectInst::Create(C: X, S1: InstCombiner::AddOne(C: Op1C), S2: Op1); |
884 | // sext(bool) + C -> bool ? C - 1 : C |
885 | if (match(V: Op0, P: m_SExt(Op: m_Value(V&: X))) && |
886 | X->getType()->getScalarSizeInBits() == 1) |
887 | return SelectInst::Create(C: X, S1: InstCombiner::SubOne(C: Op1C), S2: Op1); |
888 | |
889 | // ~X + C --> (C-1) - X |
890 | if (match(V: Op0, P: m_Not(V: m_Value(V&: X)))) { |
891 | // ~X + C has NSW and (C-1) won't oveflow => (C-1)-X can have NSW |
892 | auto *COne = ConstantInt::get(Ty: Op1C->getType(), V: 1); |
893 | bool WillNotSOV = willNotOverflowSignedSub(LHS: Op1C, RHS: COne, CxtI: Add); |
894 | BinaryOperator *Res = |
895 | BinaryOperator::CreateSub(V1: ConstantExpr::getSub(C1: Op1C, C2: COne), V2: X); |
896 | Res->setHasNoSignedWrap(Add.hasNoSignedWrap() && WillNotSOV); |
897 | return Res; |
898 | } |
899 | |
900 | // (iN X s>> (N - 1)) + 1 --> zext (X > -1) |
901 | const APInt *C; |
902 | unsigned BitWidth = Ty->getScalarSizeInBits(); |
903 | if (match(V: Op0, P: m_OneUse(SubPattern: m_AShr(L: m_Value(V&: X), |
904 | R: m_SpecificIntAllowPoison(V: BitWidth - 1)))) && |
905 | match(V: Op1, P: m_One())) |
906 | return new ZExtInst(Builder.CreateIsNotNeg(Arg: X, Name: "isnotneg" ), Ty); |
907 | |
908 | if (!match(V: Op1, P: m_APInt(Res&: C))) |
909 | return nullptr; |
910 | |
911 | // (X | Op01C) + Op1C --> X + (Op01C + Op1C) iff the `or` is actually an `add` |
912 | Constant *Op01C; |
913 | if (match(V: Op0, P: m_DisjointOr(L: m_Value(V&: X), R: m_ImmConstant(C&: Op01C)))) { |
914 | BinaryOperator *NewAdd = |
915 | BinaryOperator::CreateAdd(V1: X, V2: ConstantExpr::getAdd(C1: Op01C, C2: Op1C)); |
916 | NewAdd->setHasNoSignedWrap(Add.hasNoSignedWrap() && |
917 | willNotOverflowSignedAdd(LHS: Op01C, RHS: Op1C, CxtI: Add)); |
918 | NewAdd->setHasNoUnsignedWrap(Add.hasNoUnsignedWrap()); |
919 | return NewAdd; |
920 | } |
921 | |
922 | // (X | C2) + C --> (X | C2) ^ C2 iff (C2 == -C) |
923 | const APInt *C2; |
924 | if (match(V: Op0, P: m_Or(L: m_Value(), R: m_APInt(Res&: C2))) && *C2 == -*C) |
925 | return BinaryOperator::CreateXor(V1: Op0, V2: ConstantInt::get(Ty: Add.getType(), V: *C2)); |
926 | |
927 | if (C->isSignMask()) { |
928 | // If wrapping is not allowed, then the addition must set the sign bit: |
929 | // X + (signmask) --> X | signmask |
930 | if (Add.hasNoSignedWrap() || Add.hasNoUnsignedWrap()) |
931 | return BinaryOperator::CreateOr(V1: Op0, V2: Op1); |
932 | |
933 | // If wrapping is allowed, then the addition flips the sign bit of LHS: |
934 | // X + (signmask) --> X ^ signmask |
935 | return BinaryOperator::CreateXor(V1: Op0, V2: Op1); |
936 | } |
937 | |
938 | // Is this add the last step in a convoluted sext? |
939 | // add(zext(xor i16 X, -32768), -32768) --> sext X |
940 | if (match(V: Op0, P: m_ZExt(Op: m_Xor(L: m_Value(V&: X), R: m_APInt(Res&: C2)))) && |
941 | C2->isMinSignedValue() && C2->sext(width: Ty->getScalarSizeInBits()) == *C) |
942 | return CastInst::Create(Instruction::SExt, S: X, Ty); |
943 | |
944 | if (match(V: Op0, P: m_Xor(L: m_Value(V&: X), R: m_APInt(Res&: C2)))) { |
945 | // (X ^ signmask) + C --> (X + (signmask ^ C)) |
946 | if (C2->isSignMask()) |
947 | return BinaryOperator::CreateAdd(V1: X, V2: ConstantInt::get(Ty, V: *C2 ^ *C)); |
948 | |
949 | // If X has no high-bits set above an xor mask: |
950 | // add (xor X, LowMaskC), C --> sub (LowMaskC + C), X |
951 | if (C2->isMask()) { |
952 | KnownBits LHSKnown = computeKnownBits(V: X, CxtI: &Add); |
953 | if ((*C2 | LHSKnown.Zero).isAllOnes()) |
954 | return BinaryOperator::CreateSub(V1: ConstantInt::get(Ty, V: *C2 + *C), V2: X); |
955 | } |
956 | |
957 | // Look for a math+logic pattern that corresponds to sext-in-register of a |
958 | // value with cleared high bits. Convert that into a pair of shifts: |
959 | // add (xor X, 0x80), 0xF..F80 --> (X << ShAmtC) >>s ShAmtC |
960 | // add (xor X, 0xF..F80), 0x80 --> (X << ShAmtC) >>s ShAmtC |
961 | if (Op0->hasOneUse() && *C2 == -(*C)) { |
962 | unsigned BitWidth = Ty->getScalarSizeInBits(); |
963 | unsigned ShAmt = 0; |
964 | if (C->isPowerOf2()) |
965 | ShAmt = BitWidth - C->logBase2() - 1; |
966 | else if (C2->isPowerOf2()) |
967 | ShAmt = BitWidth - C2->logBase2() - 1; |
968 | if (ShAmt && |
969 | MaskedValueIsZero(V: X, Mask: APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: ShAmt), CxtI: &Add)) { |
970 | Constant *ShAmtC = ConstantInt::get(Ty, V: ShAmt); |
971 | Value *NewShl = Builder.CreateShl(LHS: X, RHS: ShAmtC, Name: "sext" ); |
972 | return BinaryOperator::CreateAShr(V1: NewShl, V2: ShAmtC); |
973 | } |
974 | } |
975 | } |
976 | |
977 | if (C->isOne() && Op0->hasOneUse()) { |
978 | // add (sext i1 X), 1 --> zext (not X) |
979 | // TODO: The smallest IR representation is (select X, 0, 1), and that would |
980 | // not require the one-use check. But we need to remove a transform in |
981 | // visitSelect and make sure that IR value tracking for select is equal or |
982 | // better than for these ops. |
983 | if (match(V: Op0, P: m_SExt(Op: m_Value(V&: X))) && |
984 | X->getType()->getScalarSizeInBits() == 1) |
985 | return new ZExtInst(Builder.CreateNot(V: X), Ty); |
986 | |
987 | // Shifts and add used to flip and mask off the low bit: |
988 | // add (ashr (shl i32 X, 31), 31), 1 --> and (not X), 1 |
989 | const APInt *C3; |
990 | if (match(V: Op0, P: m_AShr(L: m_Shl(L: m_Value(V&: X), R: m_APInt(Res&: C2)), R: m_APInt(Res&: C3))) && |
991 | C2 == C3 && *C2 == Ty->getScalarSizeInBits() - 1) { |
992 | Value *NotX = Builder.CreateNot(V: X); |
993 | return BinaryOperator::CreateAnd(V1: NotX, V2: ConstantInt::get(Ty, V: 1)); |
994 | } |
995 | } |
996 | |
997 | // umax(X, C) + -C --> usub.sat(X, C) |
998 | if (match(V: Op0, P: m_OneUse(SubPattern: m_UMax(L: m_Value(V&: X), R: m_SpecificInt(V: -*C))))) |
999 | return replaceInstUsesWith( |
1000 | I&: Add, V: Builder.CreateBinaryIntrinsic( |
1001 | ID: Intrinsic::usub_sat, LHS: X, RHS: ConstantInt::get(Ty: Add.getType(), V: -*C))); |
1002 | |
1003 | // Fold (add (zext (add X, -1)), 1) -> (zext X) if X is non-zero. |
1004 | // TODO: There's a general form for any constant on the outer add. |
1005 | if (C->isOne()) { |
1006 | if (match(V: Op0, P: m_ZExt(Op: m_Add(L: m_Value(V&: X), R: m_AllOnes())))) { |
1007 | const SimplifyQuery Q = SQ.getWithInstruction(I: &Add); |
1008 | if (llvm::isKnownNonZero(V: X, Q)) |
1009 | return new ZExtInst(X, Ty); |
1010 | } |
1011 | } |
1012 | |
1013 | return nullptr; |
1014 | } |
1015 | |
1016 | // match variations of a^2 + 2*a*b + b^2 |
1017 | // |
1018 | // to reuse the code between the FP and Int versions, the instruction OpCodes |
1019 | // and constant types have been turned into template parameters. |
1020 | // |
1021 | // Mul2Rhs: The constant to perform the multiplicative equivalent of X*2 with; |
1022 | // should be `m_SpecificFP(2.0)` for FP and `m_SpecificInt(1)` for Int |
1023 | // (we're matching `X<<1` instead of `X*2` for Int) |
1024 | template <bool FP, typename Mul2Rhs> |
1025 | static bool matchesSquareSum(BinaryOperator &I, Mul2Rhs M2Rhs, Value *&A, |
1026 | Value *&B) { |
1027 | constexpr unsigned MulOp = FP ? Instruction::FMul : Instruction::Mul; |
1028 | constexpr unsigned AddOp = FP ? Instruction::FAdd : Instruction::Add; |
1029 | constexpr unsigned Mul2Op = FP ? Instruction::FMul : Instruction::Shl; |
1030 | |
1031 | // (a * a) + (((a * 2) + b) * b) |
1032 | if (match(&I, m_c_BinOp( |
1033 | AddOp, m_OneUse(SubPattern: m_BinOp(Opcode: MulOp, L: m_Value(V&: A), R: m_Deferred(V: A))), |
1034 | m_OneUse(m_c_BinOp( |
1035 | MulOp, |
1036 | m_c_BinOp(AddOp, m_BinOp(Mul2Op, m_Deferred(V: A), M2Rhs), |
1037 | m_Value(V&: B)), |
1038 | m_Deferred(V: B)))))) |
1039 | return true; |
1040 | |
1041 | // ((a * b) * 2) or ((a * 2) * b) |
1042 | // + |
1043 | // (a * a + b * b) or (b * b + a * a) |
1044 | return match( |
1045 | &I, m_c_BinOp( |
1046 | AddOp, |
1047 | m_CombineOr( |
1048 | m_OneUse(m_BinOp( |
1049 | Mul2Op, m_BinOp(Opcode: MulOp, L: m_Value(V&: A), R: m_Value(V&: B)), M2Rhs)), |
1050 | m_OneUse(m_c_BinOp(MulOp, m_BinOp(Mul2Op, m_Value(V&: A), M2Rhs), |
1051 | m_Value(V&: B)))), |
1052 | m_OneUse( |
1053 | SubPattern: m_c_BinOp(Opcode: AddOp, L: m_BinOp(Opcode: MulOp, L: m_Deferred(V: A), R: m_Deferred(V: A)), |
1054 | R: m_BinOp(Opcode: MulOp, L: m_Deferred(V: B), R: m_Deferred(V: B)))))); |
1055 | } |
1056 | |
1057 | // Fold integer variations of a^2 + 2*a*b + b^2 -> (a + b)^2 |
1058 | Instruction *InstCombinerImpl::foldSquareSumInt(BinaryOperator &I) { |
1059 | Value *A, *B; |
1060 | if (matchesSquareSum</*FP*/ false>(I, M2Rhs: m_SpecificInt(V: 1), A, B)) { |
1061 | Value *AB = Builder.CreateAdd(LHS: A, RHS: B); |
1062 | return BinaryOperator::CreateMul(V1: AB, V2: AB); |
1063 | } |
1064 | return nullptr; |
1065 | } |
1066 | |
1067 | // Fold floating point variations of a^2 + 2*a*b + b^2 -> (a + b)^2 |
1068 | // Requires `nsz` and `reassoc`. |
1069 | Instruction *InstCombinerImpl::foldSquareSumFP(BinaryOperator &I) { |
1070 | assert(I.hasAllowReassoc() && I.hasNoSignedZeros() && "Assumption mismatch" ); |
1071 | Value *A, *B; |
1072 | if (matchesSquareSum</*FP*/ true>(I, M2Rhs: m_SpecificFP(V: 2.0), A, B)) { |
1073 | Value *AB = Builder.CreateFAddFMF(L: A, R: B, FMFSource: &I); |
1074 | return BinaryOperator::CreateFMulFMF(V1: AB, V2: AB, FMFSource: &I); |
1075 | } |
1076 | return nullptr; |
1077 | } |
1078 | |
1079 | // Matches multiplication expression Op * C where C is a constant. Returns the |
1080 | // constant value in C and the other operand in Op. Returns true if such a |
1081 | // match is found. |
1082 | static bool MatchMul(Value *E, Value *&Op, APInt &C) { |
1083 | const APInt *AI; |
1084 | if (match(V: E, P: m_Mul(L: m_Value(V&: Op), R: m_APInt(Res&: AI)))) { |
1085 | C = *AI; |
1086 | return true; |
1087 | } |
1088 | if (match(V: E, P: m_Shl(L: m_Value(V&: Op), R: m_APInt(Res&: AI)))) { |
1089 | C = APInt(AI->getBitWidth(), 1); |
1090 | C <<= *AI; |
1091 | return true; |
1092 | } |
1093 | return false; |
1094 | } |
1095 | |
1096 | // Matches remainder expression Op % C where C is a constant. Returns the |
1097 | // constant value in C and the other operand in Op. Returns the signedness of |
1098 | // the remainder operation in IsSigned. Returns true if such a match is |
1099 | // found. |
1100 | static bool MatchRem(Value *E, Value *&Op, APInt &C, bool &IsSigned) { |
1101 | const APInt *AI; |
1102 | IsSigned = false; |
1103 | if (match(V: E, P: m_SRem(L: m_Value(V&: Op), R: m_APInt(Res&: AI)))) { |
1104 | IsSigned = true; |
1105 | C = *AI; |
1106 | return true; |
1107 | } |
1108 | if (match(V: E, P: m_URem(L: m_Value(V&: Op), R: m_APInt(Res&: AI)))) { |
1109 | C = *AI; |
1110 | return true; |
1111 | } |
1112 | if (match(V: E, P: m_And(L: m_Value(V&: Op), R: m_APInt(Res&: AI))) && (*AI + 1).isPowerOf2()) { |
1113 | C = *AI + 1; |
1114 | return true; |
1115 | } |
1116 | return false; |
1117 | } |
1118 | |
1119 | // Matches division expression Op / C with the given signedness as indicated |
1120 | // by IsSigned, where C is a constant. Returns the constant value in C and the |
1121 | // other operand in Op. Returns true if such a match is found. |
1122 | static bool MatchDiv(Value *E, Value *&Op, APInt &C, bool IsSigned) { |
1123 | const APInt *AI; |
1124 | if (IsSigned && match(V: E, P: m_SDiv(L: m_Value(V&: Op), R: m_APInt(Res&: AI)))) { |
1125 | C = *AI; |
1126 | return true; |
1127 | } |
1128 | if (!IsSigned) { |
1129 | if (match(V: E, P: m_UDiv(L: m_Value(V&: Op), R: m_APInt(Res&: AI)))) { |
1130 | C = *AI; |
1131 | return true; |
1132 | } |
1133 | if (match(V: E, P: m_LShr(L: m_Value(V&: Op), R: m_APInt(Res&: AI)))) { |
1134 | C = APInt(AI->getBitWidth(), 1); |
1135 | C <<= *AI; |
1136 | return true; |
1137 | } |
1138 | } |
1139 | return false; |
1140 | } |
1141 | |
1142 | // Returns whether C0 * C1 with the given signedness overflows. |
1143 | static bool MulWillOverflow(APInt &C0, APInt &C1, bool IsSigned) { |
1144 | bool overflow; |
1145 | if (IsSigned) |
1146 | (void)C0.smul_ov(RHS: C1, Overflow&: overflow); |
1147 | else |
1148 | (void)C0.umul_ov(RHS: C1, Overflow&: overflow); |
1149 | return overflow; |
1150 | } |
1151 | |
1152 | // Simplifies X % C0 + (( X / C0 ) % C1) * C0 to X % (C0 * C1), where (C0 * C1) |
1153 | // does not overflow. |
1154 | // Simplifies (X / C0) * C1 + (X % C0) * C2 to |
1155 | // (X / C0) * (C1 - C2 * C0) + X * C2 |
1156 | Value *InstCombinerImpl::SimplifyAddWithRemainder(BinaryOperator &I) { |
1157 | Value *LHS = I.getOperand(i_nocapture: 0), *RHS = I.getOperand(i_nocapture: 1); |
1158 | Value *X, *MulOpV; |
1159 | APInt C0, MulOpC; |
1160 | bool IsSigned; |
1161 | // Match I = X % C0 + MulOpV * C0 |
1162 | if (((MatchRem(E: LHS, Op&: X, C&: C0, IsSigned) && MatchMul(E: RHS, Op&: MulOpV, C&: MulOpC)) || |
1163 | (MatchRem(E: RHS, Op&: X, C&: C0, IsSigned) && MatchMul(E: LHS, Op&: MulOpV, C&: MulOpC))) && |
1164 | C0 == MulOpC) { |
1165 | Value *RemOpV; |
1166 | APInt C1; |
1167 | bool Rem2IsSigned; |
1168 | // Match MulOpC = RemOpV % C1 |
1169 | if (MatchRem(E: MulOpV, Op&: RemOpV, C&: C1, IsSigned&: Rem2IsSigned) && |
1170 | IsSigned == Rem2IsSigned) { |
1171 | Value *DivOpV; |
1172 | APInt DivOpC; |
1173 | // Match RemOpV = X / C0 |
1174 | if (MatchDiv(E: RemOpV, Op&: DivOpV, C&: DivOpC, IsSigned) && X == DivOpV && |
1175 | C0 == DivOpC && !MulWillOverflow(C0, C1, IsSigned)) { |
1176 | Value *NewDivisor = ConstantInt::get(Ty: X->getType(), V: C0 * C1); |
1177 | return IsSigned ? Builder.CreateSRem(LHS: X, RHS: NewDivisor, Name: "srem" ) |
1178 | : Builder.CreateURem(LHS: X, RHS: NewDivisor, Name: "urem" ); |
1179 | } |
1180 | } |
1181 | } |
1182 | |
1183 | // Match I = (X / C0) * C1 + (X % C0) * C2 |
1184 | Value *Div, *Rem; |
1185 | APInt C1, C2; |
1186 | if (!LHS->hasOneUse() || !MatchMul(E: LHS, Op&: Div, C&: C1)) |
1187 | Div = LHS, C1 = APInt(I.getType()->getScalarSizeInBits(), 1); |
1188 | if (!RHS->hasOneUse() || !MatchMul(E: RHS, Op&: Rem, C&: C2)) |
1189 | Rem = RHS, C2 = APInt(I.getType()->getScalarSizeInBits(), 1); |
1190 | if (match(V: Div, P: m_IRem(L: m_Value(), R: m_Value()))) { |
1191 | std::swap(a&: Div, b&: Rem); |
1192 | std::swap(a&: C1, b&: C2); |
1193 | } |
1194 | Value *DivOpV; |
1195 | APInt DivOpC; |
1196 | if (MatchRem(E: Rem, Op&: X, C&: C0, IsSigned) && |
1197 | MatchDiv(E: Div, Op&: DivOpV, C&: DivOpC, IsSigned) && X == DivOpV && C0 == DivOpC) { |
1198 | APInt NewC = C1 - C2 * C0; |
1199 | if (!NewC.isZero() && !Rem->hasOneUse()) |
1200 | return nullptr; |
1201 | if (!isGuaranteedNotToBeUndef(V: X, AC: &AC, CtxI: &I, DT: &DT)) |
1202 | return nullptr; |
1203 | Value *MulXC2 = Builder.CreateMul(LHS: X, RHS: ConstantInt::get(Ty: X->getType(), V: C2)); |
1204 | if (NewC.isZero()) |
1205 | return MulXC2; |
1206 | return Builder.CreateAdd( |
1207 | LHS: Builder.CreateMul(LHS: Div, RHS: ConstantInt::get(Ty: X->getType(), V: NewC)), RHS: MulXC2); |
1208 | } |
1209 | |
1210 | return nullptr; |
1211 | } |
1212 | |
1213 | /// Fold |
1214 | /// (1 << NBits) - 1 |
1215 | /// Into: |
1216 | /// ~(-(1 << NBits)) |
1217 | /// Because a 'not' is better for bit-tracking analysis and other transforms |
1218 | /// than an 'add'. The new shl is always nsw, and is nuw if old `and` was. |
1219 | static Instruction *canonicalizeLowbitMask(BinaryOperator &I, |
1220 | InstCombiner::BuilderTy &Builder) { |
1221 | Value *NBits; |
1222 | if (!match(V: &I, P: m_Add(L: m_OneUse(SubPattern: m_Shl(L: m_One(), R: m_Value(V&: NBits))), R: m_AllOnes()))) |
1223 | return nullptr; |
1224 | |
1225 | Constant *MinusOne = Constant::getAllOnesValue(Ty: NBits->getType()); |
1226 | Value *NotMask = Builder.CreateShl(LHS: MinusOne, RHS: NBits, Name: "notmask" ); |
1227 | // Be wary of constant folding. |
1228 | if (auto *BOp = dyn_cast<BinaryOperator>(Val: NotMask)) { |
1229 | // Always NSW. But NUW propagates from `add`. |
1230 | BOp->setHasNoSignedWrap(); |
1231 | BOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap()); |
1232 | } |
1233 | |
1234 | return BinaryOperator::CreateNot(Op: NotMask, Name: I.getName()); |
1235 | } |
1236 | |
1237 | static Instruction *foldToUnsignedSaturatedAdd(BinaryOperator &I) { |
1238 | assert(I.getOpcode() == Instruction::Add && "Expecting add instruction" ); |
1239 | Type *Ty = I.getType(); |
1240 | auto getUAddSat = [&]() { |
1241 | return Intrinsic::getOrInsertDeclaration(M: I.getModule(), id: Intrinsic::uadd_sat, |
1242 | Tys: Ty); |
1243 | }; |
1244 | |
1245 | // add (umin X, ~Y), Y --> uaddsat X, Y |
1246 | Value *X, *Y; |
1247 | if (match(V: &I, P: m_c_Add(L: m_c_UMin(L: m_Value(V&: X), R: m_Not(V: m_Value(V&: Y))), |
1248 | R: m_Deferred(V: Y)))) |
1249 | return CallInst::Create(Func: getUAddSat(), Args: { X, Y }); |
1250 | |
1251 | // add (umin X, ~C), C --> uaddsat X, C |
1252 | const APInt *C, *NotC; |
1253 | if (match(V: &I, P: m_Add(L: m_UMin(L: m_Value(V&: X), R: m_APInt(Res&: NotC)), R: m_APInt(Res&: C))) && |
1254 | *C == ~*NotC) |
1255 | return CallInst::Create(Func: getUAddSat(), Args: { X, ConstantInt::get(Ty, V: *C) }); |
1256 | |
1257 | return nullptr; |
1258 | } |
1259 | |
1260 | // Transform: |
1261 | // (add A, (shl (neg B), Y)) |
1262 | // -> (sub A, (shl B, Y)) |
1263 | static Instruction *combineAddSubWithShlAddSub(InstCombiner::BuilderTy &Builder, |
1264 | const BinaryOperator &I) { |
1265 | Value *A, *B, *Cnt; |
1266 | if (match(V: &I, |
1267 | P: m_c_Add(L: m_OneUse(SubPattern: m_Shl(L: m_OneUse(SubPattern: m_Neg(V: m_Value(V&: B))), R: m_Value(V&: Cnt))), |
1268 | R: m_Value(V&: A)))) { |
1269 | Value *NewShl = Builder.CreateShl(LHS: B, RHS: Cnt); |
1270 | return BinaryOperator::CreateSub(V1: A, V2: NewShl); |
1271 | } |
1272 | return nullptr; |
1273 | } |
1274 | |
1275 | /// Try to reduce signed division by power-of-2 to an arithmetic shift right. |
1276 | static Instruction *foldAddToAshr(BinaryOperator &Add) { |
1277 | // Division must be by power-of-2, but not the minimum signed value. |
1278 | Value *X; |
1279 | const APInt *DivC; |
1280 | if (!match(V: Add.getOperand(i_nocapture: 0), P: m_SDiv(L: m_Value(V&: X), R: m_Power2(V&: DivC))) || |
1281 | DivC->isNegative()) |
1282 | return nullptr; |
1283 | |
1284 | // Rounding is done by adding -1 if the dividend (X) is negative and has any |
1285 | // low bits set. It recognizes two canonical patterns: |
1286 | // 1. For an 'ugt' cmp with the signed minimum value (SMIN), the |
1287 | // pattern is: sext (icmp ugt (X & (DivC - 1)), SMIN). |
1288 | // 2. For an 'eq' cmp, the pattern's: sext (icmp eq X & (SMIN + 1), SMIN + 1). |
1289 | // Note that, by the time we end up here, if possible, ugt has been |
1290 | // canonicalized into eq. |
1291 | const APInt *MaskC, *MaskCCmp; |
1292 | CmpPredicate Pred; |
1293 | if (!match(V: Add.getOperand(i_nocapture: 1), |
1294 | P: m_SExt(Op: m_ICmp(Pred, L: m_And(L: m_Specific(V: X), R: m_APInt(Res&: MaskC)), |
1295 | R: m_APInt(Res&: MaskCCmp))))) |
1296 | return nullptr; |
1297 | |
1298 | if ((Pred != ICmpInst::ICMP_UGT || !MaskCCmp->isSignMask()) && |
1299 | (Pred != ICmpInst::ICMP_EQ || *MaskCCmp != *MaskC)) |
1300 | return nullptr; |
1301 | |
1302 | APInt SMin = APInt::getSignedMinValue(numBits: Add.getType()->getScalarSizeInBits()); |
1303 | bool IsMaskValid = Pred == ICmpInst::ICMP_UGT |
1304 | ? (*MaskC == (SMin | (*DivC - 1))) |
1305 | : (*DivC == 2 && *MaskC == SMin + 1); |
1306 | if (!IsMaskValid) |
1307 | return nullptr; |
1308 | |
1309 | // (X / DivC) + sext ((X & (SMin | (DivC - 1)) >u SMin) --> X >>s log2(DivC) |
1310 | return BinaryOperator::CreateAShr( |
1311 | V1: X, V2: ConstantInt::get(Ty: Add.getType(), V: DivC->exactLogBase2())); |
1312 | } |
1313 | |
1314 | Instruction *InstCombinerImpl::foldAddLikeCommutative(Value *LHS, Value *RHS, |
1315 | bool NSW, bool NUW) { |
1316 | Value *A, *B, *C; |
1317 | if (match(V: LHS, P: m_Sub(L: m_Value(V&: A), R: m_Value(V&: B))) && |
1318 | match(V: RHS, P: m_Sub(L: m_Value(V&: C), R: m_Specific(V: A)))) { |
1319 | Instruction *R = BinaryOperator::CreateSub(V1: C, V2: B); |
1320 | bool NSWOut = NSW && match(V: LHS, P: m_NSWSub(L: m_Value(), R: m_Value())) && |
1321 | match(V: RHS, P: m_NSWSub(L: m_Value(), R: m_Value())); |
1322 | |
1323 | bool NUWOut = match(V: LHS, P: m_NUWSub(L: m_Value(), R: m_Value())) && |
1324 | match(V: RHS, P: m_NUWSub(L: m_Value(), R: m_Value())); |
1325 | R->setHasNoSignedWrap(NSWOut); |
1326 | R->setHasNoUnsignedWrap(NUWOut); |
1327 | return R; |
1328 | } |
1329 | |
1330 | // ((X s/ C1) << C2) + X => X s% -C1 where -C1 is 1 << C2 |
1331 | const APInt *C1, *C2; |
1332 | if (match(V: LHS, P: m_Shl(L: m_SDiv(L: m_Specific(V: RHS), R: m_APInt(Res&: C1)), R: m_APInt(Res&: C2)))) { |
1333 | APInt One(C2->getBitWidth(), 1); |
1334 | APInt MinusC1 = -(*C1); |
1335 | if (MinusC1 == (One << *C2)) { |
1336 | Constant *NewRHS = ConstantInt::get(Ty: RHS->getType(), V: MinusC1); |
1337 | return BinaryOperator::CreateSRem(V1: RHS, V2: NewRHS); |
1338 | } |
1339 | } |
1340 | |
1341 | return nullptr; |
1342 | } |
1343 | |
1344 | Instruction *InstCombinerImpl:: |
1345 | ( |
1346 | BinaryOperator &I) { |
1347 | assert((I.getOpcode() == Instruction::Add || |
1348 | I.getOpcode() == Instruction::Or || |
1349 | I.getOpcode() == Instruction::Sub) && |
1350 | "Expecting add/or/sub instruction" ); |
1351 | |
1352 | // We have a subtraction/addition between a (potentially truncated) *logical* |
1353 | // right-shift of X and a "select". |
1354 | Value *X, *Select; |
1355 | Instruction *LowBitsToSkip, *; |
1356 | if (!match(V: &I, P: m_c_BinOp(L: m_TruncOrSelf(Op: m_CombineAnd( |
1357 | L: m_LShr(L: m_Value(V&: X), R: m_Instruction(I&: LowBitsToSkip)), |
1358 | R: m_Instruction(I&: Extract))), |
1359 | R: m_Value(V&: Select)))) |
1360 | return nullptr; |
1361 | |
1362 | // `add`/`or` is commutative; but for `sub`, "select" *must* be on RHS. |
1363 | if (I.getOpcode() == Instruction::Sub && I.getOperand(i_nocapture: 1) != Select) |
1364 | return nullptr; |
1365 | |
1366 | Type *XTy = X->getType(); |
1367 | bool HadTrunc = I.getType() != XTy; |
1368 | |
1369 | // If there was a truncation of extracted value, then we'll need to produce |
1370 | // one extra instruction, so we need to ensure one instruction will go away. |
1371 | if (HadTrunc && !match(V: &I, P: m_c_BinOp(L: m_OneUse(SubPattern: m_Value()), R: m_Value()))) |
1372 | return nullptr; |
1373 | |
1374 | // Extraction should extract high NBits bits, with shift amount calculated as: |
1375 | // low bits to skip = shift bitwidth - high bits to extract |
1376 | // The shift amount itself may be extended, and we need to look past zero-ext |
1377 | // when matching NBits, that will matter for matching later. |
1378 | Value *NBits; |
1379 | if (!match(V: LowBitsToSkip, |
1380 | P: m_ZExtOrSelf(Op: m_Sub(L: m_SpecificInt(V: XTy->getScalarSizeInBits()), |
1381 | R: m_ZExtOrSelf(Op: m_Value(V&: NBits)))))) |
1382 | return nullptr; |
1383 | |
1384 | // Sign-extending value can be zero-extended if we `sub`tract it, |
1385 | // or sign-extended otherwise. |
1386 | auto SkipExtInMagic = [&I](Value *&V) { |
1387 | if (I.getOpcode() == Instruction::Sub) |
1388 | match(V, P: m_ZExtOrSelf(Op: m_Value(V))); |
1389 | else |
1390 | match(V, P: m_SExtOrSelf(Op: m_Value(V))); |
1391 | }; |
1392 | |
1393 | // Now, finally validate the sign-extending magic. |
1394 | // `select` itself may be appropriately extended, look past that. |
1395 | SkipExtInMagic(Select); |
1396 | |
1397 | CmpPredicate Pred; |
1398 | const APInt *Thr; |
1399 | Value *SignExtendingValue, *Zero; |
1400 | bool ShouldSignext; |
1401 | // It must be a select between two values we will later establish to be a |
1402 | // sign-extending value and a zero constant. The condition guarding the |
1403 | // sign-extension must be based on a sign bit of the same X we had in `lshr`. |
1404 | if (!match(V: Select, P: m_Select(C: m_ICmp(Pred, L: m_Specific(V: X), R: m_APInt(Res&: Thr)), |
1405 | L: m_Value(V&: SignExtendingValue), R: m_Value(V&: Zero))) || |
1406 | !isSignBitCheck(Pred, RHS: *Thr, TrueIfSigned&: ShouldSignext)) |
1407 | return nullptr; |
1408 | |
1409 | // icmp-select pair is commutative. |
1410 | if (!ShouldSignext) |
1411 | std::swap(a&: SignExtendingValue, b&: Zero); |
1412 | |
1413 | // If we should not perform sign-extension then we must add/or/subtract zero. |
1414 | if (!match(V: Zero, P: m_Zero())) |
1415 | return nullptr; |
1416 | // Otherwise, it should be some constant, left-shifted by the same NBits we |
1417 | // had in `lshr`. Said left-shift can also be appropriately extended. |
1418 | // Again, we must look past zero-ext when looking for NBits. |
1419 | SkipExtInMagic(SignExtendingValue); |
1420 | Constant *SignExtendingValueBaseConstant; |
1421 | if (!match(V: SignExtendingValue, |
1422 | P: m_Shl(L: m_Constant(C&: SignExtendingValueBaseConstant), |
1423 | R: m_ZExtOrSelf(Op: m_Specific(V: NBits))))) |
1424 | return nullptr; |
1425 | // If we `sub`, then the constant should be one, else it should be all-ones. |
1426 | if (I.getOpcode() == Instruction::Sub |
1427 | ? !match(V: SignExtendingValueBaseConstant, P: m_One()) |
1428 | : !match(V: SignExtendingValueBaseConstant, P: m_AllOnes())) |
1429 | return nullptr; |
1430 | |
1431 | auto *NewAShr = BinaryOperator::CreateAShr(V1: X, V2: LowBitsToSkip, |
1432 | Name: Extract->getName() + ".sext" ); |
1433 | NewAShr->copyIRFlags(V: Extract); // Preserve `exact`-ness. |
1434 | if (!HadTrunc) |
1435 | return NewAShr; |
1436 | |
1437 | Builder.Insert(I: NewAShr); |
1438 | return TruncInst::CreateTruncOrBitCast(S: NewAShr, Ty: I.getType()); |
1439 | } |
1440 | |
1441 | /// This is a specialization of a more general transform from |
1442 | /// foldUsingDistributiveLaws. If that code can be made to work optimally |
1443 | /// for multi-use cases or propagating nsw/nuw, then we would not need this. |
1444 | static Instruction *factorizeMathWithShlOps(BinaryOperator &I, |
1445 | InstCombiner::BuilderTy &Builder) { |
1446 | // TODO: Also handle mul by doubling the shift amount? |
1447 | assert((I.getOpcode() == Instruction::Add || |
1448 | I.getOpcode() == Instruction::Sub) && |
1449 | "Expected add/sub" ); |
1450 | auto *Op0 = dyn_cast<BinaryOperator>(Val: I.getOperand(i_nocapture: 0)); |
1451 | auto *Op1 = dyn_cast<BinaryOperator>(Val: I.getOperand(i_nocapture: 1)); |
1452 | if (!Op0 || !Op1 || !(Op0->hasOneUse() || Op1->hasOneUse())) |
1453 | return nullptr; |
1454 | |
1455 | Value *X, *Y, *ShAmt; |
1456 | if (!match(V: Op0, P: m_Shl(L: m_Value(V&: X), R: m_Value(V&: ShAmt))) || |
1457 | !match(V: Op1, P: m_Shl(L: m_Value(V&: Y), R: m_Specific(V: ShAmt)))) |
1458 | return nullptr; |
1459 | |
1460 | // No-wrap propagates only when all ops have no-wrap. |
1461 | bool HasNSW = I.hasNoSignedWrap() && Op0->hasNoSignedWrap() && |
1462 | Op1->hasNoSignedWrap(); |
1463 | bool HasNUW = I.hasNoUnsignedWrap() && Op0->hasNoUnsignedWrap() && |
1464 | Op1->hasNoUnsignedWrap(); |
1465 | |
1466 | // add/sub (X << ShAmt), (Y << ShAmt) --> (add/sub X, Y) << ShAmt |
1467 | Value *NewMath = Builder.CreateBinOp(Opc: I.getOpcode(), LHS: X, RHS: Y); |
1468 | if (auto *NewI = dyn_cast<BinaryOperator>(Val: NewMath)) { |
1469 | NewI->setHasNoSignedWrap(HasNSW); |
1470 | NewI->setHasNoUnsignedWrap(HasNUW); |
1471 | } |
1472 | auto *NewShl = BinaryOperator::CreateShl(V1: NewMath, V2: ShAmt); |
1473 | NewShl->setHasNoSignedWrap(HasNSW); |
1474 | NewShl->setHasNoUnsignedWrap(HasNUW); |
1475 | return NewShl; |
1476 | } |
1477 | |
1478 | /// Reduce a sequence of masked half-width multiplies to a single multiply. |
1479 | /// ((XLow * YHigh) + (YLow * XHigh)) << HalfBits) + (XLow * YLow) --> X * Y |
1480 | static Instruction *foldBoxMultiply(BinaryOperator &I) { |
1481 | unsigned BitWidth = I.getType()->getScalarSizeInBits(); |
1482 | // Skip the odd bitwidth types. |
1483 | if ((BitWidth & 0x1)) |
1484 | return nullptr; |
1485 | |
1486 | unsigned HalfBits = BitWidth >> 1; |
1487 | APInt HalfMask = APInt::getMaxValue(numBits: HalfBits); |
1488 | |
1489 | // ResLo = (CrossSum << HalfBits) + (YLo * XLo) |
1490 | Value *XLo, *YLo; |
1491 | Value *CrossSum; |
1492 | // Require one-use on the multiply to avoid increasing the number of |
1493 | // multiplications. |
1494 | if (!match(V: &I, P: m_c_Add(L: m_Shl(L: m_Value(V&: CrossSum), R: m_SpecificInt(V: HalfBits)), |
1495 | R: m_OneUse(SubPattern: m_Mul(L: m_Value(V&: YLo), R: m_Value(V&: XLo)))))) |
1496 | return nullptr; |
1497 | |
1498 | // XLo = X & HalfMask |
1499 | // YLo = Y & HalfMask |
1500 | // TODO: Refactor with SimplifyDemandedBits or KnownBits known leading zeros |
1501 | // to enhance robustness |
1502 | Value *X, *Y; |
1503 | if (!match(V: XLo, P: m_And(L: m_Value(V&: X), R: m_SpecificInt(V: HalfMask))) || |
1504 | !match(V: YLo, P: m_And(L: m_Value(V&: Y), R: m_SpecificInt(V: HalfMask)))) |
1505 | return nullptr; |
1506 | |
1507 | // CrossSum = (X' * (Y >> Halfbits)) + (Y' * (X >> HalfBits)) |
1508 | // X' can be either X or XLo in the pattern (and the same for Y') |
1509 | if (match(V: CrossSum, |
1510 | P: m_c_Add(L: m_c_Mul(L: m_LShr(L: m_Specific(V: Y), R: m_SpecificInt(V: HalfBits)), |
1511 | R: m_CombineOr(L: m_Specific(V: X), R: m_Specific(V: XLo))), |
1512 | R: m_c_Mul(L: m_LShr(L: m_Specific(V: X), R: m_SpecificInt(V: HalfBits)), |
1513 | R: m_CombineOr(L: m_Specific(V: Y), R: m_Specific(V: YLo)))))) |
1514 | return BinaryOperator::CreateMul(V1: X, V2: Y); |
1515 | |
1516 | return nullptr; |
1517 | } |
1518 | |
1519 | Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) { |
1520 | if (Value *V = simplifyAddInst(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1), |
1521 | IsNSW: I.hasNoSignedWrap(), IsNUW: I.hasNoUnsignedWrap(), |
1522 | Q: SQ.getWithInstruction(I: &I))) |
1523 | return replaceInstUsesWith(I, V); |
1524 | |
1525 | if (SimplifyAssociativeOrCommutative(I)) |
1526 | return &I; |
1527 | |
1528 | if (Instruction *X = foldVectorBinop(Inst&: I)) |
1529 | return X; |
1530 | |
1531 | if (Instruction *Phi = foldBinopWithPhiOperands(BO&: I)) |
1532 | return Phi; |
1533 | |
1534 | // (A*B)+(A*C) -> A*(B+C) etc |
1535 | if (Value *V = foldUsingDistributiveLaws(I)) |
1536 | return replaceInstUsesWith(I, V); |
1537 | |
1538 | if (Instruction *R = foldBoxMultiply(I)) |
1539 | return R; |
1540 | |
1541 | if (Instruction *R = factorizeMathWithShlOps(I, Builder)) |
1542 | return R; |
1543 | |
1544 | if (Instruction *X = foldAddWithConstant(Add&: I)) |
1545 | return X; |
1546 | |
1547 | if (Instruction *X = foldNoWrapAdd(Add&: I, Builder)) |
1548 | return X; |
1549 | |
1550 | if (Instruction *R = foldBinOpShiftWithShift(I)) |
1551 | return R; |
1552 | |
1553 | if (Instruction *R = combineAddSubWithShlAddSub(Builder, I)) |
1554 | return R; |
1555 | |
1556 | Value *LHS = I.getOperand(i_nocapture: 0), *RHS = I.getOperand(i_nocapture: 1); |
1557 | if (Instruction *R = foldAddLikeCommutative(LHS, RHS, NSW: I.hasNoSignedWrap(), |
1558 | NUW: I.hasNoUnsignedWrap())) |
1559 | return R; |
1560 | if (Instruction *R = foldAddLikeCommutative(LHS: RHS, RHS: LHS, NSW: I.hasNoSignedWrap(), |
1561 | NUW: I.hasNoUnsignedWrap())) |
1562 | return R; |
1563 | Type *Ty = I.getType(); |
1564 | if (Ty->isIntOrIntVectorTy(BitWidth: 1)) |
1565 | return BinaryOperator::CreateXor(V1: LHS, V2: RHS); |
1566 | |
1567 | // X + X --> X << 1 |
1568 | if (LHS == RHS) { |
1569 | auto *Shl = BinaryOperator::CreateShl(V1: LHS, V2: ConstantInt::get(Ty, V: 1)); |
1570 | Shl->setHasNoSignedWrap(I.hasNoSignedWrap()); |
1571 | Shl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap()); |
1572 | return Shl; |
1573 | } |
1574 | |
1575 | Value *A, *B; |
1576 | if (match(V: LHS, P: m_Neg(V: m_Value(V&: A)))) { |
1577 | // -A + -B --> -(A + B) |
1578 | if (match(V: RHS, P: m_Neg(V: m_Value(V&: B)))) |
1579 | return BinaryOperator::CreateNeg(Op: Builder.CreateAdd(LHS: A, RHS: B)); |
1580 | |
1581 | // -A + B --> B - A |
1582 | auto *Sub = BinaryOperator::CreateSub(V1: RHS, V2: A); |
1583 | auto *OB0 = cast<OverflowingBinaryOperator>(Val: LHS); |
1584 | Sub->setHasNoSignedWrap(I.hasNoSignedWrap() && OB0->hasNoSignedWrap()); |
1585 | |
1586 | return Sub; |
1587 | } |
1588 | |
1589 | // A + -B --> A - B |
1590 | if (match(V: RHS, P: m_Neg(V: m_Value(V&: B)))) { |
1591 | auto *Sub = BinaryOperator::CreateSub(V1: LHS, V2: B); |
1592 | auto *OBO = cast<OverflowingBinaryOperator>(Val: RHS); |
1593 | Sub->setHasNoSignedWrap(I.hasNoSignedWrap() && OBO->hasNoSignedWrap()); |
1594 | return Sub; |
1595 | } |
1596 | |
1597 | if (Value *V = checkForNegativeOperand(I, Builder)) |
1598 | return replaceInstUsesWith(I, V); |
1599 | |
1600 | // (A + 1) + ~B --> A - B |
1601 | // ~B + (A + 1) --> A - B |
1602 | // (~B + A) + 1 --> A - B |
1603 | // (A + ~B) + 1 --> A - B |
1604 | if (match(V: &I, P: m_c_BinOp(L: m_Add(L: m_Value(V&: A), R: m_One()), R: m_Not(V: m_Value(V&: B)))) || |
1605 | match(V: &I, P: m_BinOp(L: m_c_Add(L: m_Not(V: m_Value(V&: B)), R: m_Value(V&: A)), R: m_One()))) |
1606 | return BinaryOperator::CreateSub(V1: A, V2: B); |
1607 | |
1608 | // (A + RHS) + RHS --> A + (RHS << 1) |
1609 | if (match(V: LHS, P: m_OneUse(SubPattern: m_c_Add(L: m_Value(V&: A), R: m_Specific(V: RHS))))) |
1610 | return BinaryOperator::CreateAdd(V1: A, V2: Builder.CreateShl(LHS: RHS, RHS: 1, Name: "reass.add" )); |
1611 | |
1612 | // LHS + (A + LHS) --> A + (LHS << 1) |
1613 | if (match(V: RHS, P: m_OneUse(SubPattern: m_c_Add(L: m_Value(V&: A), R: m_Specific(V: LHS))))) |
1614 | return BinaryOperator::CreateAdd(V1: A, V2: Builder.CreateShl(LHS, RHS: 1, Name: "reass.add" )); |
1615 | |
1616 | { |
1617 | // (A + C1) + (C2 - B) --> (A - B) + (C1 + C2) |
1618 | Constant *C1, *C2; |
1619 | if (match(V: &I, P: m_c_Add(L: m_Add(L: m_Value(V&: A), R: m_ImmConstant(C&: C1)), |
1620 | R: m_Sub(L: m_ImmConstant(C&: C2), R: m_Value(V&: B)))) && |
1621 | (LHS->hasOneUse() || RHS->hasOneUse())) { |
1622 | Value *Sub = Builder.CreateSub(LHS: A, RHS: B); |
1623 | return BinaryOperator::CreateAdd(V1: Sub, V2: ConstantExpr::getAdd(C1, C2)); |
1624 | } |
1625 | |
1626 | // Canonicalize a constant sub operand as an add operand for better folding: |
1627 | // (C1 - A) + B --> (B - A) + C1 |
1628 | if (match(V: &I, P: m_c_Add(L: m_OneUse(SubPattern: m_Sub(L: m_ImmConstant(C&: C1), R: m_Value(V&: A))), |
1629 | R: m_Value(V&: B)))) { |
1630 | Value *Sub = Builder.CreateSub(LHS: B, RHS: A, Name: "reass.sub" ); |
1631 | return BinaryOperator::CreateAdd(V1: Sub, V2: C1); |
1632 | } |
1633 | } |
1634 | |
1635 | // X % C0 + (( X / C0 ) % C1) * C0 => X % (C0 * C1) |
1636 | if (Value *V = SimplifyAddWithRemainder(I)) return replaceInstUsesWith(I, V); |
1637 | |
1638 | const APInt *C1; |
1639 | // (A & 2^C1) + A => A & (2^C1 - 1) iff bit C1 in A is a sign bit |
1640 | if (match(V: &I, P: m_c_Add(L: m_And(L: m_Value(V&: A), R: m_APInt(Res&: C1)), R: m_Deferred(V: A))) && |
1641 | C1->isPowerOf2() && (ComputeNumSignBits(Op: A) > C1->countl_zero())) { |
1642 | Constant *NewMask = ConstantInt::get(Ty: RHS->getType(), V: *C1 - 1); |
1643 | return BinaryOperator::CreateAnd(V1: A, V2: NewMask); |
1644 | } |
1645 | |
1646 | // ZExt (B - A) + ZExt(A) --> ZExt(B) |
1647 | if ((match(V: RHS, P: m_ZExt(Op: m_Value(V&: A))) && |
1648 | match(V: LHS, P: m_ZExt(Op: m_NUWSub(L: m_Value(V&: B), R: m_Specific(V: A))))) || |
1649 | (match(V: LHS, P: m_ZExt(Op: m_Value(V&: A))) && |
1650 | match(V: RHS, P: m_ZExt(Op: m_NUWSub(L: m_Value(V&: B), R: m_Specific(V: A)))))) |
1651 | return new ZExtInst(B, LHS->getType()); |
1652 | |
1653 | // zext(A) + sext(A) --> 0 if A is i1 |
1654 | if (match(V: &I, P: m_c_BinOp(L: m_ZExt(Op: m_Value(V&: A)), R: m_SExt(Op: m_Deferred(V: A)))) && |
1655 | A->getType()->isIntOrIntVectorTy(BitWidth: 1)) |
1656 | return replaceInstUsesWith(I, V: Constant::getNullValue(Ty: I.getType())); |
1657 | |
1658 | // sext(A < B) + zext(A > B) => ucmp/scmp(A, B) |
1659 | CmpPredicate LTPred, GTPred; |
1660 | if (match(V: &I, |
1661 | P: m_c_Add(L: m_SExt(Op: m_c_ICmp(Pred&: LTPred, L: m_Value(V&: A), R: m_Value(V&: B))), |
1662 | R: m_ZExt(Op: m_c_ICmp(Pred&: GTPred, L: m_Deferred(V: A), R: m_Deferred(V: B))))) && |
1663 | A->getType()->isIntOrIntVectorTy()) { |
1664 | if (ICmpInst::isGT(P: LTPred)) { |
1665 | std::swap(a&: LTPred, b&: GTPred); |
1666 | std::swap(a&: A, b&: B); |
1667 | } |
1668 | |
1669 | if (ICmpInst::isLT(P: LTPred) && ICmpInst::isGT(P: GTPred) && |
1670 | ICmpInst::isSigned(predicate: LTPred) == ICmpInst::isSigned(predicate: GTPred)) |
1671 | return replaceInstUsesWith( |
1672 | I, V: Builder.CreateIntrinsic( |
1673 | RetTy: Ty, |
1674 | ID: ICmpInst::isSigned(predicate: LTPred) ? Intrinsic::scmp : Intrinsic::ucmp, |
1675 | Args: {A, B})); |
1676 | } |
1677 | |
1678 | // A+B --> A|B iff A and B have no bits set in common. |
1679 | WithCache<const Value *> LHSCache(LHS), RHSCache(RHS); |
1680 | if (haveNoCommonBitsSet(LHSCache, RHSCache, SQ: SQ.getWithInstruction(I: &I))) |
1681 | return BinaryOperator::CreateDisjointOr(V1: LHS, V2: RHS); |
1682 | |
1683 | if (Instruction *Ext = narrowMathIfNoOverflow(I)) |
1684 | return Ext; |
1685 | |
1686 | // (add (xor A, B) (and A, B)) --> (or A, B) |
1687 | // (add (and A, B) (xor A, B)) --> (or A, B) |
1688 | if (match(V: &I, P: m_c_BinOp(L: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B)), |
1689 | R: m_c_And(L: m_Deferred(V: A), R: m_Deferred(V: B))))) |
1690 | return BinaryOperator::CreateOr(V1: A, V2: B); |
1691 | |
1692 | // (add (or A, B) (and A, B)) --> (add A, B) |
1693 | // (add (and A, B) (or A, B)) --> (add A, B) |
1694 | if (match(V: &I, P: m_c_BinOp(L: m_Or(L: m_Value(V&: A), R: m_Value(V&: B)), |
1695 | R: m_c_And(L: m_Deferred(V: A), R: m_Deferred(V: B))))) { |
1696 | // Replacing operands in-place to preserve nuw/nsw flags. |
1697 | replaceOperand(I, OpNum: 0, V: A); |
1698 | replaceOperand(I, OpNum: 1, V: B); |
1699 | return &I; |
1700 | } |
1701 | |
1702 | // (add A (or A, -A)) --> (and (add A, -1) A) |
1703 | // (add A (or -A, A)) --> (and (add A, -1) A) |
1704 | // (add (or A, -A) A) --> (and (add A, -1) A) |
1705 | // (add (or -A, A) A) --> (and (add A, -1) A) |
1706 | if (match(V: &I, P: m_c_BinOp(L: m_Value(V&: A), R: m_OneUse(SubPattern: m_c_Or(L: m_Neg(V: m_Deferred(V: A)), |
1707 | R: m_Deferred(V: A)))))) { |
1708 | Value *Add = |
1709 | Builder.CreateAdd(LHS: A, RHS: Constant::getAllOnesValue(Ty: A->getType()), Name: "" , |
1710 | HasNUW: I.hasNoUnsignedWrap(), HasNSW: I.hasNoSignedWrap()); |
1711 | return BinaryOperator::CreateAnd(V1: Add, V2: A); |
1712 | } |
1713 | |
1714 | // Canonicalize ((A & -A) - 1) --> ((A - 1) & ~A) |
1715 | // Forms all commutable operations, and simplifies ctpop -> cttz folds. |
1716 | if (match(V: &I, |
1717 | P: m_Add(L: m_OneUse(SubPattern: m_c_And(L: m_Value(V&: A), R: m_OneUse(SubPattern: m_Neg(V: m_Deferred(V: A))))), |
1718 | R: m_AllOnes()))) { |
1719 | Constant *AllOnes = ConstantInt::getAllOnesValue(Ty: RHS->getType()); |
1720 | Value *Dec = Builder.CreateAdd(LHS: A, RHS: AllOnes); |
1721 | Value *Not = Builder.CreateXor(LHS: A, RHS: AllOnes); |
1722 | return BinaryOperator::CreateAnd(V1: Dec, V2: Not); |
1723 | } |
1724 | |
1725 | // Disguised reassociation/factorization: |
1726 | // ~(A * C1) + A |
1727 | // ((A * -C1) - 1) + A |
1728 | // ((A * -C1) + A) - 1 |
1729 | // (A * (1 - C1)) - 1 |
1730 | if (match(V: &I, |
1731 | P: m_c_Add(L: m_OneUse(SubPattern: m_Not(V: m_OneUse(SubPattern: m_Mul(L: m_Value(V&: A), R: m_APInt(Res&: C1))))), |
1732 | R: m_Deferred(V: A)))) { |
1733 | Type *Ty = I.getType(); |
1734 | Constant *NewMulC = ConstantInt::get(Ty, V: 1 - *C1); |
1735 | Value *NewMul = Builder.CreateMul(LHS: A, RHS: NewMulC); |
1736 | return BinaryOperator::CreateAdd(V1: NewMul, V2: ConstantInt::getAllOnesValue(Ty)); |
1737 | } |
1738 | |
1739 | // (A * -2**C) + B --> B - (A << C) |
1740 | const APInt *NegPow2C; |
1741 | if (match(V: &I, P: m_c_Add(L: m_OneUse(SubPattern: m_Mul(L: m_Value(V&: A), R: m_NegatedPower2(V&: NegPow2C))), |
1742 | R: m_Value(V&: B)))) { |
1743 | Constant *ShiftAmtC = ConstantInt::get(Ty, V: NegPow2C->countr_zero()); |
1744 | Value *Shl = Builder.CreateShl(LHS: A, RHS: ShiftAmtC); |
1745 | return BinaryOperator::CreateSub(V1: B, V2: Shl); |
1746 | } |
1747 | |
1748 | // Canonicalize signum variant that ends in add: |
1749 | // (A s>> (BW - 1)) + (zext (A s> 0)) --> (A s>> (BW - 1)) | (zext (A != 0)) |
1750 | uint64_t BitWidth = Ty->getScalarSizeInBits(); |
1751 | if (match(V: LHS, P: m_AShr(L: m_Value(V&: A), R: m_SpecificIntAllowPoison(V: BitWidth - 1))) && |
1752 | match(V: RHS, P: m_OneUse(SubPattern: m_ZExt(Op: m_OneUse(SubPattern: m_SpecificICmp( |
1753 | MatchPred: CmpInst::ICMP_SGT, L: m_Specific(V: A), R: m_ZeroInt())))))) { |
1754 | Value *NotZero = Builder.CreateIsNotNull(Arg: A, Name: "isnotnull" ); |
1755 | Value *Zext = Builder.CreateZExt(V: NotZero, DestTy: Ty, Name: "isnotnull.zext" ); |
1756 | return BinaryOperator::CreateOr(V1: LHS, V2: Zext); |
1757 | } |
1758 | |
1759 | { |
1760 | Value *Cond, *Ext; |
1761 | Constant *C; |
1762 | // (add X, (sext/zext (icmp eq X, C))) |
1763 | // -> (select (icmp eq X, C), (add C, (sext/zext 1)), X) |
1764 | auto CondMatcher = m_CombineAnd( |
1765 | L: m_Value(V&: Cond), |
1766 | R: m_SpecificICmp(MatchPred: ICmpInst::ICMP_EQ, L: m_Deferred(V: A), R: m_ImmConstant(C))); |
1767 | |
1768 | if (match(V: &I, |
1769 | P: m_c_Add(L: m_Value(V&: A), |
1770 | R: m_CombineAnd(L: m_Value(V&: Ext), R: m_ZExtOrSExt(Op: CondMatcher)))) && |
1771 | Ext->hasOneUse()) { |
1772 | Value *Add = isa<ZExtInst>(Val: Ext) ? InstCombiner::AddOne(C) |
1773 | : InstCombiner::SubOne(C); |
1774 | return replaceInstUsesWith(I, V: Builder.CreateSelect(C: Cond, True: Add, False: A)); |
1775 | } |
1776 | } |
1777 | |
1778 | // (add (add A, 1), (sext (icmp ne A, 0))) => call umax(A, 1) |
1779 | if (match(V: LHS, P: m_Add(L: m_Value(V&: A), R: m_One())) && |
1780 | match(V: RHS, P: m_OneUse(SubPattern: m_SExt(Op: m_OneUse(SubPattern: m_SpecificICmp( |
1781 | MatchPred: ICmpInst::ICMP_NE, L: m_Specific(V: A), R: m_ZeroInt())))))) { |
1782 | Value *OneConst = ConstantInt::get(Ty: A->getType(), V: 1); |
1783 | Value *UMax = Builder.CreateBinaryIntrinsic(ID: Intrinsic::umax, LHS: A, RHS: OneConst); |
1784 | return replaceInstUsesWith(I, V: UMax); |
1785 | } |
1786 | |
1787 | if (Instruction *Ashr = foldAddToAshr(Add&: I)) |
1788 | return Ashr; |
1789 | |
1790 | // Ceiling division by power-of-2: |
1791 | // (X >> log2(N)) + zext(X & (N-1) != 0) --> (X + (N-1)) >> log2(N) |
1792 | // This is valid when adding (N-1) to X doesn't overflow. |
1793 | { |
1794 | Value *X; |
1795 | const APInt *ShiftAmt, *Mask; |
1796 | CmpPredicate Pred; |
1797 | |
1798 | // Match: (X >> C) + zext((X & Mask) != 0) |
1799 | // or: zext((X & Mask) != 0) + (X >> C) |
1800 | if (match(V: &I, P: m_c_Add(L: m_OneUse(SubPattern: m_LShr(L: m_Value(V&: X), R: m_APInt(Res&: ShiftAmt))), |
1801 | R: m_ZExt(Op: m_SpecificICmp( |
1802 | MatchPred: ICmpInst::ICMP_NE, |
1803 | L: m_And(L: m_Deferred(V: X), R: m_LowBitMask(V&: Mask)), |
1804 | R: m_ZeroInt())))) && |
1805 | Mask->popcount() == *ShiftAmt) { |
1806 | |
1807 | // Check if X + Mask doesn't overflow |
1808 | Constant *MaskC = ConstantInt::get(Ty: X->getType(), V: *Mask); |
1809 | if (willNotOverflowUnsignedAdd(LHS: X, RHS: MaskC, CxtI: I)) { |
1810 | // (X + Mask) >> ShiftAmt |
1811 | Value *Add = Builder.CreateNUWAdd(LHS: X, RHS: MaskC); |
1812 | return BinaryOperator::CreateLShr( |
1813 | V1: Add, V2: ConstantInt::get(Ty: X->getType(), V: *ShiftAmt)); |
1814 | } |
1815 | } |
1816 | } |
1817 | |
1818 | // (~X) + (~Y) --> -2 - (X + Y) |
1819 | { |
1820 | // To ensure we can save instructions we need to ensure that we consume both |
1821 | // LHS/RHS (i.e they have a `not`). |
1822 | bool ConsumesLHS, ConsumesRHS; |
1823 | if (isFreeToInvert(V: LHS, WillInvertAllUses: LHS->hasOneUse(), DoesConsume&: ConsumesLHS) && ConsumesLHS && |
1824 | isFreeToInvert(V: RHS, WillInvertAllUses: RHS->hasOneUse(), DoesConsume&: ConsumesRHS) && ConsumesRHS) { |
1825 | Value *NotLHS = getFreelyInverted(V: LHS, WillInvertAllUses: LHS->hasOneUse(), Builder: &Builder); |
1826 | Value *NotRHS = getFreelyInverted(V: RHS, WillInvertAllUses: RHS->hasOneUse(), Builder: &Builder); |
1827 | assert(NotLHS != nullptr && NotRHS != nullptr && |
1828 | "isFreeToInvert desynced with getFreelyInverted" ); |
1829 | Value *LHSPlusRHS = Builder.CreateAdd(LHS: NotLHS, RHS: NotRHS); |
1830 | return BinaryOperator::CreateSub( |
1831 | V1: ConstantInt::getSigned(Ty: RHS->getType(), V: -2), V2: LHSPlusRHS); |
1832 | } |
1833 | } |
1834 | |
1835 | if (Instruction *R = tryFoldInstWithCtpopWithNot(I: &I)) |
1836 | return R; |
1837 | |
1838 | // TODO(jingyue): Consider willNotOverflowSignedAdd and |
1839 | // willNotOverflowUnsignedAdd to reduce the number of invocations of |
1840 | // computeKnownBits. |
1841 | bool Changed = false; |
1842 | if (!I.hasNoSignedWrap() && willNotOverflowSignedAdd(LHS: LHSCache, RHS: RHSCache, CxtI: I)) { |
1843 | Changed = true; |
1844 | I.setHasNoSignedWrap(true); |
1845 | } |
1846 | if (!I.hasNoUnsignedWrap() && |
1847 | willNotOverflowUnsignedAdd(LHS: LHSCache, RHS: RHSCache, CxtI: I)) { |
1848 | Changed = true; |
1849 | I.setHasNoUnsignedWrap(true); |
1850 | } |
1851 | |
1852 | if (Instruction *V = canonicalizeLowbitMask(I, Builder)) |
1853 | return V; |
1854 | |
1855 | if (Instruction *V = |
1856 | canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I)) |
1857 | return V; |
1858 | |
1859 | if (Instruction *SatAdd = foldToUnsignedSaturatedAdd(I)) |
1860 | return SatAdd; |
1861 | |
1862 | // usub.sat(A, B) + B => umax(A, B) |
1863 | if (match(V: &I, P: m_c_BinOp( |
1864 | L: m_OneUse(SubPattern: m_Intrinsic<Intrinsic::usub_sat>(Op0: m_Value(V&: A), Op1: m_Value(V&: B))), |
1865 | R: m_Deferred(V: B)))) { |
1866 | return replaceInstUsesWith(I, |
1867 | V: Builder.CreateIntrinsic(ID: Intrinsic::umax, Types: {I.getType()}, Args: {A, B})); |
1868 | } |
1869 | |
1870 | // ctpop(A) + ctpop(B) => ctpop(A | B) if A and B have no bits set in common. |
1871 | if (match(V: LHS, P: m_OneUse(SubPattern: m_Intrinsic<Intrinsic::ctpop>(Op0: m_Value(V&: A)))) && |
1872 | match(V: RHS, P: m_OneUse(SubPattern: m_Intrinsic<Intrinsic::ctpop>(Op0: m_Value(V&: B)))) && |
1873 | haveNoCommonBitsSet(LHSCache: A, RHSCache: B, SQ: SQ.getWithInstruction(I: &I))) |
1874 | return replaceInstUsesWith( |
1875 | I, V: Builder.CreateIntrinsic(ID: Intrinsic::ctpop, Types: {I.getType()}, |
1876 | Args: {Builder.CreateOr(LHS: A, RHS: B)})); |
1877 | |
1878 | // Fold the log2_ceil idiom: |
1879 | // zext(ctpop(A) >u/!= 1) + (ctlz(A, true) ^ (BW - 1)) |
1880 | // --> |
1881 | // BW - ctlz(A - 1, false) |
1882 | const APInt *XorC; |
1883 | CmpPredicate Pred; |
1884 | if (match(V: &I, |
1885 | P: m_c_Add( |
1886 | L: m_ZExt(Op: m_ICmp(Pred, L: m_Intrinsic<Intrinsic::ctpop>(Op0: m_Value(V&: A)), |
1887 | R: m_One())), |
1888 | R: m_OneUse(SubPattern: m_ZExtOrSelf(Op: m_OneUse(SubPattern: m_Xor( |
1889 | L: m_OneUse(SubPattern: m_TruncOrSelf(Op: m_OneUse( |
1890 | SubPattern: m_Intrinsic<Intrinsic::ctlz>(Op0: m_Deferred(V: A), Op1: m_One())))), |
1891 | R: m_APInt(Res&: XorC))))))) && |
1892 | (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_NE) && |
1893 | *XorC == A->getType()->getScalarSizeInBits() - 1) { |
1894 | Value *Sub = Builder.CreateAdd(LHS: A, RHS: Constant::getAllOnesValue(Ty: A->getType())); |
1895 | Value *Ctlz = Builder.CreateIntrinsic(ID: Intrinsic::ctlz, Types: {A->getType()}, |
1896 | Args: {Sub, Builder.getFalse()}); |
1897 | Value *Ret = Builder.CreateSub( |
1898 | LHS: ConstantInt::get(Ty: A->getType(), V: A->getType()->getScalarSizeInBits()), |
1899 | RHS: Ctlz, Name: "" , /*HasNUW=*/true, /*HasNSW=*/true); |
1900 | return replaceInstUsesWith(I, V: Builder.CreateZExtOrTrunc(V: Ret, DestTy: I.getType())); |
1901 | } |
1902 | |
1903 | if (Instruction *Res = foldSquareSumInt(I)) |
1904 | return Res; |
1905 | |
1906 | if (Instruction *Res = foldBinOpOfDisplacedShifts(I)) |
1907 | return Res; |
1908 | |
1909 | if (Instruction *Res = foldBinOpOfSelectAndCastOfSelectCondition(I)) |
1910 | return Res; |
1911 | |
1912 | // Re-enqueue users of the induction variable of add recurrence if we infer |
1913 | // new nuw/nsw flags. |
1914 | if (Changed) { |
1915 | PHINode *PHI; |
1916 | Value *Start, *Step; |
1917 | if (matchSimpleRecurrence(I: &I, P&: PHI, Start, Step)) |
1918 | Worklist.pushUsersToWorkList(I&: *PHI); |
1919 | } |
1920 | |
1921 | return Changed ? &I : nullptr; |
1922 | } |
1923 | |
1924 | /// Eliminate an op from a linear interpolation (lerp) pattern. |
1925 | static Instruction *factorizeLerp(BinaryOperator &I, |
1926 | InstCombiner::BuilderTy &Builder) { |
1927 | Value *X, *Y, *Z; |
1928 | if (!match(V: &I, P: m_c_FAdd(L: m_OneUse(SubPattern: m_c_FMul(L: m_Value(V&: Y), |
1929 | R: m_OneUse(SubPattern: m_FSub(L: m_FPOne(), |
1930 | R: m_Value(V&: Z))))), |
1931 | R: m_OneUse(SubPattern: m_c_FMul(L: m_Value(V&: X), R: m_Deferred(V: Z)))))) |
1932 | return nullptr; |
1933 | |
1934 | // (Y * (1.0 - Z)) + (X * Z) --> Y + Z * (X - Y) [8 commuted variants] |
1935 | Value *XY = Builder.CreateFSubFMF(L: X, R: Y, FMFSource: &I); |
1936 | Value *MulZ = Builder.CreateFMulFMF(L: Z, R: XY, FMFSource: &I); |
1937 | return BinaryOperator::CreateFAddFMF(V1: Y, V2: MulZ, FMFSource: &I); |
1938 | } |
1939 | |
1940 | /// Factor a common operand out of fadd/fsub of fmul/fdiv. |
1941 | static Instruction *factorizeFAddFSub(BinaryOperator &I, |
1942 | InstCombiner::BuilderTy &Builder) { |
1943 | assert((I.getOpcode() == Instruction::FAdd || |
1944 | I.getOpcode() == Instruction::FSub) && "Expecting fadd/fsub" ); |
1945 | assert(I.hasAllowReassoc() && I.hasNoSignedZeros() && |
1946 | "FP factorization requires FMF" ); |
1947 | |
1948 | if (Instruction *Lerp = factorizeLerp(I, Builder)) |
1949 | return Lerp; |
1950 | |
1951 | Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1); |
1952 | if (!Op0->hasOneUse() || !Op1->hasOneUse()) |
1953 | return nullptr; |
1954 | |
1955 | Value *X, *Y, *Z; |
1956 | bool IsFMul; |
1957 | if ((match(V: Op0, P: m_FMul(L: m_Value(V&: X), R: m_Value(V&: Z))) && |
1958 | match(V: Op1, P: m_c_FMul(L: m_Value(V&: Y), R: m_Specific(V: Z)))) || |
1959 | (match(V: Op0, P: m_FMul(L: m_Value(V&: Z), R: m_Value(V&: X))) && |
1960 | match(V: Op1, P: m_c_FMul(L: m_Value(V&: Y), R: m_Specific(V: Z))))) |
1961 | IsFMul = true; |
1962 | else if (match(V: Op0, P: m_FDiv(L: m_Value(V&: X), R: m_Value(V&: Z))) && |
1963 | match(V: Op1, P: m_FDiv(L: m_Value(V&: Y), R: m_Specific(V: Z)))) |
1964 | IsFMul = false; |
1965 | else |
1966 | return nullptr; |
1967 | |
1968 | // (X * Z) + (Y * Z) --> (X + Y) * Z |
1969 | // (X * Z) - (Y * Z) --> (X - Y) * Z |
1970 | // (X / Z) + (Y / Z) --> (X + Y) / Z |
1971 | // (X / Z) - (Y / Z) --> (X - Y) / Z |
1972 | bool IsFAdd = I.getOpcode() == Instruction::FAdd; |
1973 | Value *XY = IsFAdd ? Builder.CreateFAddFMF(L: X, R: Y, FMFSource: &I) |
1974 | : Builder.CreateFSubFMF(L: X, R: Y, FMFSource: &I); |
1975 | |
1976 | // Bail out if we just created a denormal constant. |
1977 | // TODO: This is copied from a previous implementation. Is it necessary? |
1978 | const APFloat *C; |
1979 | if (match(V: XY, P: m_APFloat(Res&: C)) && !C->isNormal()) |
1980 | return nullptr; |
1981 | |
1982 | return IsFMul ? BinaryOperator::CreateFMulFMF(V1: XY, V2: Z, FMFSource: &I) |
1983 | : BinaryOperator::CreateFDivFMF(V1: XY, V2: Z, FMFSource: &I); |
1984 | } |
1985 | |
1986 | Instruction *InstCombinerImpl::visitFAdd(BinaryOperator &I) { |
1987 | if (Value *V = simplifyFAddInst(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1), |
1988 | FMF: I.getFastMathFlags(), |
1989 | Q: SQ.getWithInstruction(I: &I))) |
1990 | return replaceInstUsesWith(I, V); |
1991 | |
1992 | if (SimplifyAssociativeOrCommutative(I)) |
1993 | return &I; |
1994 | |
1995 | if (Instruction *X = foldVectorBinop(Inst&: I)) |
1996 | return X; |
1997 | |
1998 | if (Instruction *Phi = foldBinopWithPhiOperands(BO&: I)) |
1999 | return Phi; |
2000 | |
2001 | if (Instruction *FoldedFAdd = foldBinOpIntoSelectOrPhi(I)) |
2002 | return FoldedFAdd; |
2003 | |
2004 | // (-X) + Y --> Y - X |
2005 | Value *X, *Y; |
2006 | if (match(V: &I, P: m_c_FAdd(L: m_FNeg(X: m_Value(V&: X)), R: m_Value(V&: Y)))) |
2007 | return BinaryOperator::CreateFSubFMF(V1: Y, V2: X, FMFSource: &I); |
2008 | |
2009 | // Similar to above, but look through fmul/fdiv for the negated term. |
2010 | // (-X * Y) + Z --> Z - (X * Y) [4 commuted variants] |
2011 | Value *Z; |
2012 | if (match(V: &I, P: m_c_FAdd(L: m_OneUse(SubPattern: m_c_FMul(L: m_FNeg(X: m_Value(V&: X)), R: m_Value(V&: Y))), |
2013 | R: m_Value(V&: Z)))) { |
2014 | Value *XY = Builder.CreateFMulFMF(L: X, R: Y, FMFSource: &I); |
2015 | return BinaryOperator::CreateFSubFMF(V1: Z, V2: XY, FMFSource: &I); |
2016 | } |
2017 | // (-X / Y) + Z --> Z - (X / Y) [2 commuted variants] |
2018 | // (X / -Y) + Z --> Z - (X / Y) [2 commuted variants] |
2019 | if (match(V: &I, P: m_c_FAdd(L: m_OneUse(SubPattern: m_FDiv(L: m_FNeg(X: m_Value(V&: X)), R: m_Value(V&: Y))), |
2020 | R: m_Value(V&: Z))) || |
2021 | match(V: &I, P: m_c_FAdd(L: m_OneUse(SubPattern: m_FDiv(L: m_Value(V&: X), R: m_FNeg(X: m_Value(V&: Y)))), |
2022 | R: m_Value(V&: Z)))) { |
2023 | Value *XY = Builder.CreateFDivFMF(L: X, R: Y, FMFSource: &I); |
2024 | return BinaryOperator::CreateFSubFMF(V1: Z, V2: XY, FMFSource: &I); |
2025 | } |
2026 | |
2027 | // Check for (fadd double (sitofp x), y), see if we can merge this into an |
2028 | // integer add followed by a promotion. |
2029 | if (Instruction *R = foldFBinOpOfIntCasts(I)) |
2030 | return R; |
2031 | |
2032 | Value *LHS = I.getOperand(i_nocapture: 0), *RHS = I.getOperand(i_nocapture: 1); |
2033 | // Handle specials cases for FAdd with selects feeding the operation |
2034 | if (Value *V = SimplifySelectsFeedingBinaryOp(I, LHS, RHS)) |
2035 | return replaceInstUsesWith(I, V); |
2036 | |
2037 | if (I.hasAllowReassoc() && I.hasNoSignedZeros()) { |
2038 | if (Instruction *F = factorizeFAddFSub(I, Builder)) |
2039 | return F; |
2040 | |
2041 | if (Instruction *F = foldSquareSumFP(I)) |
2042 | return F; |
2043 | |
2044 | // Try to fold fadd into start value of reduction intrinsic. |
2045 | if (match(V: &I, P: m_c_FAdd(L: m_OneUse(SubPattern: m_Intrinsic<Intrinsic::vector_reduce_fadd>( |
2046 | Op0: m_AnyZeroFP(), Op1: m_Value(V&: X))), |
2047 | R: m_Value(V&: Y)))) { |
2048 | // fadd (rdx 0.0, X), Y --> rdx Y, X |
2049 | return replaceInstUsesWith( |
2050 | I, V: Builder.CreateIntrinsic(ID: Intrinsic::vector_reduce_fadd, |
2051 | Types: {X->getType()}, Args: {Y, X}, FMFSource: &I)); |
2052 | } |
2053 | const APFloat *StartC, *C; |
2054 | if (match(V: LHS, P: m_OneUse(SubPattern: m_Intrinsic<Intrinsic::vector_reduce_fadd>( |
2055 | Op0: m_APFloat(Res&: StartC), Op1: m_Value(V&: X)))) && |
2056 | match(V: RHS, P: m_APFloat(Res&: C))) { |
2057 | // fadd (rdx StartC, X), C --> rdx (C + StartC), X |
2058 | Constant *NewStartC = ConstantFP::get(Ty: I.getType(), V: *C + *StartC); |
2059 | return replaceInstUsesWith( |
2060 | I, V: Builder.CreateIntrinsic(ID: Intrinsic::vector_reduce_fadd, |
2061 | Types: {X->getType()}, Args: {NewStartC, X}, FMFSource: &I)); |
2062 | } |
2063 | |
2064 | // (X * MulC) + X --> X * (MulC + 1.0) |
2065 | Constant *MulC; |
2066 | if (match(V: &I, P: m_c_FAdd(L: m_FMul(L: m_Value(V&: X), R: m_ImmConstant(C&: MulC)), |
2067 | R: m_Deferred(V: X)))) { |
2068 | if (Constant *NewMulC = ConstantFoldBinaryOpOperands( |
2069 | Opcode: Instruction::FAdd, LHS: MulC, RHS: ConstantFP::get(Ty: I.getType(), V: 1.0), DL)) |
2070 | return BinaryOperator::CreateFMulFMF(V1: X, V2: NewMulC, FMFSource: &I); |
2071 | } |
2072 | |
2073 | // (-X - Y) + (X + Z) --> Z - Y |
2074 | if (match(V: &I, P: m_c_FAdd(L: m_FSub(L: m_FNeg(X: m_Value(V&: X)), R: m_Value(V&: Y)), |
2075 | R: m_c_FAdd(L: m_Deferred(V: X), R: m_Value(V&: Z))))) |
2076 | return BinaryOperator::CreateFSubFMF(V1: Z, V2: Y, FMFSource: &I); |
2077 | |
2078 | if (Value *V = FAddCombine(Builder).simplify(I: &I)) |
2079 | return replaceInstUsesWith(I, V); |
2080 | } |
2081 | |
2082 | // minumum(X, Y) + maximum(X, Y) => X + Y. |
2083 | if (match(V: &I, |
2084 | P: m_c_FAdd(L: m_Intrinsic<Intrinsic::maximum>(Op0: m_Value(V&: X), Op1: m_Value(V&: Y)), |
2085 | R: m_c_Intrinsic<Intrinsic::minimum>(Op0: m_Deferred(V: X), |
2086 | Op1: m_Deferred(V: Y))))) { |
2087 | BinaryOperator *Result = BinaryOperator::CreateFAddFMF(V1: X, V2: Y, FMFSource: &I); |
2088 | // We cannot preserve ninf if nnan flag is not set. |
2089 | // If X is NaN and Y is Inf then in original program we had NaN + NaN, |
2090 | // while in optimized version NaN + Inf and this is a poison with ninf flag. |
2091 | if (!Result->hasNoNaNs()) |
2092 | Result->setHasNoInfs(false); |
2093 | return Result; |
2094 | } |
2095 | |
2096 | return nullptr; |
2097 | } |
2098 | |
2099 | CommonPointerBase CommonPointerBase::compute(Value *LHS, Value *RHS) { |
2100 | CommonPointerBase Base; |
2101 | |
2102 | if (LHS->getType() != RHS->getType()) |
2103 | return Base; |
2104 | |
2105 | // Collect all base pointers of LHS. |
2106 | SmallPtrSet<Value *, 16> Ptrs; |
2107 | Value *Ptr = LHS; |
2108 | while (true) { |
2109 | Ptrs.insert(Ptr); |
2110 | if (auto *GEP = dyn_cast<GEPOperator>(Val: Ptr)) |
2111 | Ptr = GEP->getPointerOperand(); |
2112 | else |
2113 | break; |
2114 | } |
2115 | |
2116 | // Find common base and collect RHS GEPs. |
2117 | while (true) { |
2118 | if (Ptrs.contains(Ptr: RHS)) { |
2119 | Base.Ptr = RHS; |
2120 | break; |
2121 | } |
2122 | |
2123 | if (auto *GEP = dyn_cast<GEPOperator>(Val: RHS)) { |
2124 | Base.RHSGEPs.push_back(Elt: GEP); |
2125 | Base.RHSNW &= GEP->getNoWrapFlags(); |
2126 | RHS = GEP->getPointerOperand(); |
2127 | } else { |
2128 | // No common base. |
2129 | return Base; |
2130 | } |
2131 | } |
2132 | |
2133 | // Collect LHS GEPs. |
2134 | while (true) { |
2135 | if (LHS == Base.Ptr) |
2136 | break; |
2137 | |
2138 | auto *GEP = cast<GEPOperator>(Val: LHS); |
2139 | Base.LHSGEPs.push_back(Elt: GEP); |
2140 | Base.LHSNW &= GEP->getNoWrapFlags(); |
2141 | LHS = GEP->getPointerOperand(); |
2142 | } |
2143 | |
2144 | return Base; |
2145 | } |
2146 | |
2147 | /// Optimize pointer differences into the same array into a size. Consider: |
2148 | /// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer |
2149 | /// operands to the ptrtoint instructions for the LHS/RHS of the subtract. |
2150 | Value *InstCombinerImpl::OptimizePointerDifference(Value *LHS, Value *RHS, |
2151 | Type *Ty, bool IsNUW) { |
2152 | CommonPointerBase Base = CommonPointerBase::compute(LHS, RHS); |
2153 | if (!Base.Ptr) |
2154 | return nullptr; |
2155 | |
2156 | // To avoid duplicating the offset arithmetic, rewrite the GEP to use the |
2157 | // computed offset. |
2158 | // TODO: We should probably do this even if there is only one GEP. |
2159 | bool RewriteGEPs = !Base.LHSGEPs.empty() && !Base.RHSGEPs.empty(); |
2160 | |
2161 | Type *IdxTy = DL.getIndexType(PtrTy: LHS->getType()); |
2162 | Value *Result = EmitGEPOffsets(GEPs: Base.LHSGEPs, NW: Base.LHSNW, IdxTy, RewriteGEPs); |
2163 | Value *Offset2 = EmitGEPOffsets(GEPs: Base.RHSGEPs, NW: Base.RHSNW, IdxTy, RewriteGEPs); |
2164 | |
2165 | // If this is a single inbounds GEP and the original sub was nuw, |
2166 | // then the final multiplication is also nuw. |
2167 | if (auto *I = dyn_cast<Instruction>(Val: Result)) |
2168 | if (IsNUW && match(V: Offset2, P: m_Zero()) && Base.LHSNW.isInBounds() && |
2169 | I->getOpcode() == Instruction::Mul) |
2170 | I->setHasNoUnsignedWrap(); |
2171 | |
2172 | // If we have a 2nd GEP of the same base pointer, subtract the offsets. |
2173 | // If both GEPs are inbounds, then the subtract does not have signed overflow. |
2174 | // If both GEPs are nuw and the original sub is nuw, the new sub is also nuw. |
2175 | if (!match(V: Offset2, P: m_Zero())) { |
2176 | Result = |
2177 | Builder.CreateSub(LHS: Result, RHS: Offset2, Name: "gepdiff" , |
2178 | HasNUW: IsNUW && Base.LHSNW.hasNoUnsignedWrap() && |
2179 | Base.RHSNW.hasNoUnsignedWrap(), |
2180 | HasNSW: Base.LHSNW.isInBounds() && Base.RHSNW.isInBounds()); |
2181 | } |
2182 | |
2183 | return Builder.CreateIntCast(V: Result, DestTy: Ty, isSigned: true); |
2184 | } |
2185 | |
2186 | static Instruction *foldSubOfMinMax(BinaryOperator &I, |
2187 | InstCombiner::BuilderTy &Builder) { |
2188 | Value *Op0 = I.getOperand(i_nocapture: 0); |
2189 | Value *Op1 = I.getOperand(i_nocapture: 1); |
2190 | Type *Ty = I.getType(); |
2191 | auto *MinMax = dyn_cast<MinMaxIntrinsic>(Val: Op1); |
2192 | if (!MinMax) |
2193 | return nullptr; |
2194 | |
2195 | // sub(add(X,Y), s/umin(X,Y)) --> s/umax(X,Y) |
2196 | // sub(add(X,Y), s/umax(X,Y)) --> s/umin(X,Y) |
2197 | Value *X = MinMax->getLHS(); |
2198 | Value *Y = MinMax->getRHS(); |
2199 | if (match(V: Op0, P: m_c_Add(L: m_Specific(V: X), R: m_Specific(V: Y))) && |
2200 | (Op0->hasOneUse() || Op1->hasOneUse())) { |
2201 | Intrinsic::ID InvID = getInverseMinMaxIntrinsic(MinMaxID: MinMax->getIntrinsicID()); |
2202 | Function *F = Intrinsic::getOrInsertDeclaration(M: I.getModule(), id: InvID, Tys: Ty); |
2203 | return CallInst::Create(Func: F, Args: {X, Y}); |
2204 | } |
2205 | |
2206 | // sub(add(X,Y),umin(Y,Z)) --> add(X,usub.sat(Y,Z)) |
2207 | // sub(add(X,Z),umin(Y,Z)) --> add(X,usub.sat(Z,Y)) |
2208 | Value *Z; |
2209 | if (match(V: Op1, P: m_OneUse(SubPattern: m_UMin(L: m_Value(V&: Y), R: m_Value(V&: Z))))) { |
2210 | if (match(V: Op0, P: m_OneUse(SubPattern: m_c_Add(L: m_Specific(V: Y), R: m_Value(V&: X))))) { |
2211 | Value *USub = Builder.CreateIntrinsic(ID: Intrinsic::usub_sat, Types: Ty, Args: {Y, Z}); |
2212 | return BinaryOperator::CreateAdd(V1: X, V2: USub); |
2213 | } |
2214 | if (match(V: Op0, P: m_OneUse(SubPattern: m_c_Add(L: m_Specific(V: Z), R: m_Value(V&: X))))) { |
2215 | Value *USub = Builder.CreateIntrinsic(ID: Intrinsic::usub_sat, Types: Ty, Args: {Z, Y}); |
2216 | return BinaryOperator::CreateAdd(V1: X, V2: USub); |
2217 | } |
2218 | } |
2219 | |
2220 | // sub Op0, smin((sub nsw Op0, Z), 0) --> smax Op0, Z |
2221 | // sub Op0, smax((sub nsw Op0, Z), 0) --> smin Op0, Z |
2222 | if (MinMax->isSigned() && match(V: Y, P: m_ZeroInt()) && |
2223 | match(V: X, P: m_NSWSub(L: m_Specific(V: Op0), R: m_Value(V&: Z)))) { |
2224 | Intrinsic::ID InvID = getInverseMinMaxIntrinsic(MinMaxID: MinMax->getIntrinsicID()); |
2225 | Function *F = Intrinsic::getOrInsertDeclaration(M: I.getModule(), id: InvID, Tys: Ty); |
2226 | return CallInst::Create(Func: F, Args: {Op0, Z}); |
2227 | } |
2228 | |
2229 | return nullptr; |
2230 | } |
2231 | |
2232 | Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) { |
2233 | if (Value *V = simplifySubInst(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1), |
2234 | IsNSW: I.hasNoSignedWrap(), IsNUW: I.hasNoUnsignedWrap(), |
2235 | Q: SQ.getWithInstruction(I: &I))) |
2236 | return replaceInstUsesWith(I, V); |
2237 | |
2238 | if (Instruction *X = foldVectorBinop(Inst&: I)) |
2239 | return X; |
2240 | |
2241 | if (Instruction *Phi = foldBinopWithPhiOperands(BO&: I)) |
2242 | return Phi; |
2243 | |
2244 | Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1); |
2245 | |
2246 | // If this is a 'B = x-(-A)', change to B = x+A. |
2247 | // We deal with this without involving Negator to preserve NSW flag. |
2248 | if (Value *V = dyn_castNegVal(V: Op1)) { |
2249 | BinaryOperator *Res = BinaryOperator::CreateAdd(V1: Op0, V2: V); |
2250 | |
2251 | if (const auto *BO = dyn_cast<BinaryOperator>(Val: Op1)) { |
2252 | assert(BO->getOpcode() == Instruction::Sub && |
2253 | "Expected a subtraction operator!" ); |
2254 | if (BO->hasNoSignedWrap() && I.hasNoSignedWrap()) |
2255 | Res->setHasNoSignedWrap(true); |
2256 | } else { |
2257 | if (cast<Constant>(Val: Op1)->isNotMinSignedValue() && I.hasNoSignedWrap()) |
2258 | Res->setHasNoSignedWrap(true); |
2259 | } |
2260 | |
2261 | return Res; |
2262 | } |
2263 | |
2264 | // Try this before Negator to preserve NSW flag. |
2265 | if (Instruction *R = factorizeMathWithShlOps(I, Builder)) |
2266 | return R; |
2267 | |
2268 | Constant *C; |
2269 | if (match(V: Op0, P: m_ImmConstant(C))) { |
2270 | Value *X; |
2271 | Constant *C2; |
2272 | |
2273 | // C-(X+C2) --> (C-C2)-X |
2274 | if (match(V: Op1, P: m_Add(L: m_Value(V&: X), R: m_ImmConstant(C&: C2)))) { |
2275 | // C-C2 never overflow, and C-(X+C2), (X+C2) has NSW/NUW |
2276 | // => (C-C2)-X can have NSW/NUW |
2277 | bool WillNotSOV = willNotOverflowSignedSub(LHS: C, RHS: C2, CxtI: I); |
2278 | BinaryOperator *Res = |
2279 | BinaryOperator::CreateSub(V1: ConstantExpr::getSub(C1: C, C2), V2: X); |
2280 | auto *OBO1 = cast<OverflowingBinaryOperator>(Val: Op1); |
2281 | Res->setHasNoSignedWrap(I.hasNoSignedWrap() && OBO1->hasNoSignedWrap() && |
2282 | WillNotSOV); |
2283 | Res->setHasNoUnsignedWrap(I.hasNoUnsignedWrap() && |
2284 | OBO1->hasNoUnsignedWrap()); |
2285 | return Res; |
2286 | } |
2287 | } |
2288 | |
2289 | auto TryToNarrowDeduceFlags = [this, &I, &Op0, &Op1]() -> Instruction * { |
2290 | if (Instruction *Ext = narrowMathIfNoOverflow(I)) |
2291 | return Ext; |
2292 | |
2293 | bool Changed = false; |
2294 | if (!I.hasNoSignedWrap() && willNotOverflowSignedSub(LHS: Op0, RHS: Op1, CxtI: I)) { |
2295 | Changed = true; |
2296 | I.setHasNoSignedWrap(true); |
2297 | } |
2298 | if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedSub(LHS: Op0, RHS: Op1, CxtI: I)) { |
2299 | Changed = true; |
2300 | I.setHasNoUnsignedWrap(true); |
2301 | } |
2302 | |
2303 | return Changed ? &I : nullptr; |
2304 | }; |
2305 | |
2306 | // First, let's try to interpret `sub a, b` as `add a, (sub 0, b)`, |
2307 | // and let's try to sink `(sub 0, b)` into `b` itself. But only if this isn't |
2308 | // a pure negation used by a select that looks like abs/nabs. |
2309 | bool IsNegation = match(V: Op0, P: m_ZeroInt()); |
2310 | if (!IsNegation || none_of(Range: I.users(), P: [&I, Op1](const User *U) { |
2311 | const Instruction *UI = dyn_cast<Instruction>(Val: U); |
2312 | if (!UI) |
2313 | return false; |
2314 | return match(V: UI, P: m_c_Select(L: m_Specific(V: Op1), R: m_Specific(V: &I))); |
2315 | })) { |
2316 | if (Value *NegOp1 = Negator::Negate(LHSIsZero: IsNegation, /* IsNSW */ IsNegation && |
2317 | I.hasNoSignedWrap(), |
2318 | Root: Op1, IC&: *this)) |
2319 | return BinaryOperator::CreateAdd(V1: NegOp1, V2: Op0); |
2320 | } |
2321 | if (IsNegation) |
2322 | return TryToNarrowDeduceFlags(); // Should have been handled in Negator! |
2323 | |
2324 | // (A*B)-(A*C) -> A*(B-C) etc |
2325 | if (Value *V = foldUsingDistributiveLaws(I)) |
2326 | return replaceInstUsesWith(I, V); |
2327 | |
2328 | if (I.getType()->isIntOrIntVectorTy(BitWidth: 1)) |
2329 | return BinaryOperator::CreateXor(V1: Op0, V2: Op1); |
2330 | |
2331 | // Replace (-1 - A) with (~A). |
2332 | if (match(V: Op0, P: m_AllOnes())) |
2333 | return BinaryOperator::CreateNot(Op: Op1); |
2334 | |
2335 | // (X + -1) - Y --> ~Y + X |
2336 | Value *X, *Y; |
2337 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Add(L: m_Value(V&: X), R: m_AllOnes())))) |
2338 | return BinaryOperator::CreateAdd(V1: Builder.CreateNot(V: Op1), V2: X); |
2339 | |
2340 | // if (C1 & C2) == C2 then (X & C1) - (X & C2) -> X & (C1 ^ C2) |
2341 | Constant *C1, *C2; |
2342 | if (match(V: Op0, P: m_And(L: m_Value(V&: X), R: m_ImmConstant(C&: C1))) && |
2343 | match(V: Op1, P: m_And(L: m_Specific(V: X), R: m_ImmConstant(C&: C2)))) { |
2344 | Value *AndC = ConstantFoldBinaryInstruction(Opcode: Instruction::And, V1: C1, V2: C2); |
2345 | if (C2->isElementWiseEqual(Y: AndC)) |
2346 | return BinaryOperator::CreateAnd( |
2347 | V1: X, V2: ConstantFoldBinaryInstruction(Opcode: Instruction::Xor, V1: C1, V2: C2)); |
2348 | } |
2349 | |
2350 | // Reassociate sub/add sequences to create more add instructions and |
2351 | // reduce dependency chains: |
2352 | // ((X - Y) + Z) - Op1 --> (X + Z) - (Y + Op1) |
2353 | Value *Z; |
2354 | if (match(V: Op0, P: m_OneUse(SubPattern: m_c_Add(L: m_OneUse(SubPattern: m_Sub(L: m_Value(V&: X), R: m_Value(V&: Y))), |
2355 | R: m_Value(V&: Z))))) { |
2356 | Value *XZ = Builder.CreateAdd(LHS: X, RHS: Z); |
2357 | Value *YW = Builder.CreateAdd(LHS: Y, RHS: Op1); |
2358 | return BinaryOperator::CreateSub(V1: XZ, V2: YW); |
2359 | } |
2360 | |
2361 | // ((X - Y) - Op1) --> X - (Y + Op1) |
2362 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Sub(L: m_Value(V&: X), R: m_Value(V&: Y))))) { |
2363 | OverflowingBinaryOperator *LHSSub = cast<OverflowingBinaryOperator>(Val: Op0); |
2364 | bool HasNUW = I.hasNoUnsignedWrap() && LHSSub->hasNoUnsignedWrap(); |
2365 | bool HasNSW = HasNUW && I.hasNoSignedWrap() && LHSSub->hasNoSignedWrap(); |
2366 | Value *Add = Builder.CreateAdd(LHS: Y, RHS: Op1, Name: "" , /*HasNUW=*/HasNUW, |
2367 | /*HasNSW=*/HasNSW); |
2368 | BinaryOperator *Sub = BinaryOperator::CreateSub(V1: X, V2: Add); |
2369 | Sub->setHasNoUnsignedWrap(HasNUW); |
2370 | Sub->setHasNoSignedWrap(HasNSW); |
2371 | return Sub; |
2372 | } |
2373 | |
2374 | { |
2375 | // (X + Z) - (Y + Z) --> (X - Y) |
2376 | // This is done in other passes, but we want to be able to consume this |
2377 | // pattern in InstCombine so we can generate it without creating infinite |
2378 | // loops. |
2379 | if (match(V: Op0, P: m_Add(L: m_Value(V&: X), R: m_Value(V&: Z))) && |
2380 | match(V: Op1, P: m_c_Add(L: m_Value(V&: Y), R: m_Specific(V: Z)))) |
2381 | return BinaryOperator::CreateSub(V1: X, V2: Y); |
2382 | |
2383 | // (X + C0) - (Y + C1) --> (X - Y) + (C0 - C1) |
2384 | Constant *CX, *CY; |
2385 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Add(L: m_Value(V&: X), R: m_ImmConstant(C&: CX)))) && |
2386 | match(V: Op1, P: m_OneUse(SubPattern: m_Add(L: m_Value(V&: Y), R: m_ImmConstant(C&: CY))))) { |
2387 | Value *OpsSub = Builder.CreateSub(LHS: X, RHS: Y); |
2388 | Constant *ConstsSub = ConstantExpr::getSub(C1: CX, C2: CY); |
2389 | return BinaryOperator::CreateAdd(V1: OpsSub, V2: ConstsSub); |
2390 | } |
2391 | } |
2392 | |
2393 | { |
2394 | Value *W, *Z; |
2395 | if (match(V: Op0, P: m_AddLike(L: m_Value(V&: W), R: m_Value(V&: X))) && |
2396 | match(V: Op1, P: m_AddLike(L: m_Value(V&: Y), R: m_Value(V&: Z)))) { |
2397 | Instruction *R = nullptr; |
2398 | if (W == Y) |
2399 | R = BinaryOperator::CreateSub(V1: X, V2: Z); |
2400 | else if (W == Z) |
2401 | R = BinaryOperator::CreateSub(V1: X, V2: Y); |
2402 | else if (X == Y) |
2403 | R = BinaryOperator::CreateSub(V1: W, V2: Z); |
2404 | else if (X == Z) |
2405 | R = BinaryOperator::CreateSub(V1: W, V2: Y); |
2406 | if (R) { |
2407 | bool NSW = I.hasNoSignedWrap() && |
2408 | match(V: Op0, P: m_NSWAddLike(L: m_Value(), R: m_Value())) && |
2409 | match(V: Op1, P: m_NSWAddLike(L: m_Value(), R: m_Value())); |
2410 | |
2411 | bool NUW = I.hasNoUnsignedWrap() && |
2412 | match(V: Op1, P: m_NUWAddLike(L: m_Value(), R: m_Value())); |
2413 | R->setHasNoSignedWrap(NSW); |
2414 | R->setHasNoUnsignedWrap(NUW); |
2415 | return R; |
2416 | } |
2417 | } |
2418 | } |
2419 | |
2420 | // (~X) - (~Y) --> Y - X |
2421 | { |
2422 | // Need to ensure we can consume at least one of the `not` instructions, |
2423 | // otherwise this can inf loop. |
2424 | bool ConsumesOp0, ConsumesOp1; |
2425 | if (isFreeToInvert(V: Op0, WillInvertAllUses: Op0->hasOneUse(), DoesConsume&: ConsumesOp0) && |
2426 | isFreeToInvert(V: Op1, WillInvertAllUses: Op1->hasOneUse(), DoesConsume&: ConsumesOp1) && |
2427 | (ConsumesOp0 || ConsumesOp1)) { |
2428 | Value *NotOp0 = getFreelyInverted(V: Op0, WillInvertAllUses: Op0->hasOneUse(), Builder: &Builder); |
2429 | Value *NotOp1 = getFreelyInverted(V: Op1, WillInvertAllUses: Op1->hasOneUse(), Builder: &Builder); |
2430 | assert(NotOp0 != nullptr && NotOp1 != nullptr && |
2431 | "isFreeToInvert desynced with getFreelyInverted" ); |
2432 | return BinaryOperator::CreateSub(V1: NotOp1, V2: NotOp0); |
2433 | } |
2434 | } |
2435 | |
2436 | auto m_AddRdx = [](Value *&Vec) { |
2437 | return m_OneUse(SubPattern: m_Intrinsic<Intrinsic::vector_reduce_add>(Op0: m_Value(V&: Vec))); |
2438 | }; |
2439 | Value *V0, *V1; |
2440 | if (match(V: Op0, P: m_AddRdx(V0)) && match(V: Op1, P: m_AddRdx(V1)) && |
2441 | V0->getType() == V1->getType()) { |
2442 | // Difference of sums is sum of differences: |
2443 | // add_rdx(V0) - add_rdx(V1) --> add_rdx(V0 - V1) |
2444 | Value *Sub = Builder.CreateSub(LHS: V0, RHS: V1); |
2445 | Value *Rdx = Builder.CreateIntrinsic(ID: Intrinsic::vector_reduce_add, |
2446 | Types: {Sub->getType()}, Args: {Sub}); |
2447 | return replaceInstUsesWith(I, V: Rdx); |
2448 | } |
2449 | |
2450 | if (Constant *C = dyn_cast<Constant>(Val: Op0)) { |
2451 | Value *X; |
2452 | if (match(V: Op1, P: m_ZExt(Op: m_Value(V&: X))) && X->getType()->isIntOrIntVectorTy(BitWidth: 1)) |
2453 | // C - (zext bool) --> bool ? C - 1 : C |
2454 | return SelectInst::Create(C: X, S1: InstCombiner::SubOne(C), S2: C); |
2455 | if (match(V: Op1, P: m_SExt(Op: m_Value(V&: X))) && X->getType()->isIntOrIntVectorTy(BitWidth: 1)) |
2456 | // C - (sext bool) --> bool ? C + 1 : C |
2457 | return SelectInst::Create(C: X, S1: InstCombiner::AddOne(C), S2: C); |
2458 | |
2459 | // C - ~X == X + (1+C) |
2460 | if (match(V: Op1, P: m_Not(V: m_Value(V&: X)))) |
2461 | return BinaryOperator::CreateAdd(V1: X, V2: InstCombiner::AddOne(C)); |
2462 | |
2463 | // Try to fold constant sub into select arguments. |
2464 | if (SelectInst *SI = dyn_cast<SelectInst>(Val: Op1)) |
2465 | if (Instruction *R = FoldOpIntoSelect(Op&: I, SI)) |
2466 | return R; |
2467 | |
2468 | // Try to fold constant sub into PHI values. |
2469 | if (PHINode *PN = dyn_cast<PHINode>(Val: Op1)) |
2470 | if (Instruction *R = foldOpIntoPhi(I, PN)) |
2471 | return R; |
2472 | |
2473 | Constant *C2; |
2474 | |
2475 | // C-(C2-X) --> X+(C-C2) |
2476 | if (match(V: Op1, P: m_Sub(L: m_ImmConstant(C&: C2), R: m_Value(V&: X)))) |
2477 | return BinaryOperator::CreateAdd(V1: X, V2: ConstantExpr::getSub(C1: C, C2)); |
2478 | } |
2479 | |
2480 | const APInt *Op0C; |
2481 | if (match(V: Op0, P: m_APInt(Res&: Op0C))) { |
2482 | if (Op0C->isMask()) { |
2483 | // Turn this into a xor if LHS is 2^n-1 and the remaining bits are known |
2484 | // zero. We don't use information from dominating conditions so this |
2485 | // transform is easier to reverse if necessary. |
2486 | KnownBits RHSKnown = llvm::computeKnownBits( |
2487 | V: Op1, Q: SQ.getWithInstruction(I: &I).getWithoutDomCondCache()); |
2488 | if ((*Op0C | RHSKnown.Zero).isAllOnes()) |
2489 | return BinaryOperator::CreateXor(V1: Op1, V2: Op0); |
2490 | } |
2491 | |
2492 | // C - ((C3 -nuw X) & C2) --> (C - (C2 & C3)) + (X & C2) when: |
2493 | // (C3 - ((C2 & C3) - 1)) is pow2 |
2494 | // ((C2 + C3) & ((C2 & C3) - 1)) == ((C2 & C3) - 1) |
2495 | // C2 is negative pow2 || sub nuw |
2496 | const APInt *C2, *C3; |
2497 | BinaryOperator *InnerSub; |
2498 | if (match(V: Op1, P: m_OneUse(SubPattern: m_And(L: m_BinOp(I&: InnerSub), R: m_APInt(Res&: C2)))) && |
2499 | match(V: InnerSub, P: m_Sub(L: m_APInt(Res&: C3), R: m_Value(V&: X))) && |
2500 | (InnerSub->hasNoUnsignedWrap() || C2->isNegatedPowerOf2())) { |
2501 | APInt C2AndC3 = *C2 & *C3; |
2502 | APInt C2AndC3Minus1 = C2AndC3 - 1; |
2503 | APInt C2AddC3 = *C2 + *C3; |
2504 | if ((*C3 - C2AndC3Minus1).isPowerOf2() && |
2505 | C2AndC3Minus1.isSubsetOf(RHS: C2AddC3)) { |
2506 | Value *And = Builder.CreateAnd(LHS: X, RHS: ConstantInt::get(Ty: I.getType(), V: *C2)); |
2507 | return BinaryOperator::CreateAdd( |
2508 | V1: And, V2: ConstantInt::get(Ty: I.getType(), V: *Op0C - C2AndC3)); |
2509 | } |
2510 | } |
2511 | } |
2512 | |
2513 | { |
2514 | Value *Y; |
2515 | // X-(X+Y) == -Y X-(Y+X) == -Y |
2516 | if (match(V: Op1, P: m_c_Add(L: m_Specific(V: Op0), R: m_Value(V&: Y)))) |
2517 | return BinaryOperator::CreateNeg(Op: Y); |
2518 | |
2519 | // (X-Y)-X == -Y |
2520 | if (match(V: Op0, P: m_Sub(L: m_Specific(V: Op1), R: m_Value(V&: Y)))) |
2521 | return BinaryOperator::CreateNeg(Op: Y); |
2522 | } |
2523 | |
2524 | // (sub (or A, B) (and A, B)) --> (xor A, B) |
2525 | { |
2526 | Value *A, *B; |
2527 | if (match(V: Op1, P: m_And(L: m_Value(V&: A), R: m_Value(V&: B))) && |
2528 | match(V: Op0, P: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B)))) |
2529 | return BinaryOperator::CreateXor(V1: A, V2: B); |
2530 | } |
2531 | |
2532 | // (sub (add A, B) (or A, B)) --> (and A, B) |
2533 | { |
2534 | Value *A, *B; |
2535 | if (match(V: Op0, P: m_Add(L: m_Value(V&: A), R: m_Value(V&: B))) && |
2536 | match(V: Op1, P: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B)))) |
2537 | return BinaryOperator::CreateAnd(V1: A, V2: B); |
2538 | } |
2539 | |
2540 | // (sub (add A, B) (and A, B)) --> (or A, B) |
2541 | { |
2542 | Value *A, *B; |
2543 | if (match(V: Op0, P: m_Add(L: m_Value(V&: A), R: m_Value(V&: B))) && |
2544 | match(V: Op1, P: m_c_And(L: m_Specific(V: A), R: m_Specific(V: B)))) |
2545 | return BinaryOperator::CreateOr(V1: A, V2: B); |
2546 | } |
2547 | |
2548 | // (sub (and A, B) (or A, B)) --> neg (xor A, B) |
2549 | { |
2550 | Value *A, *B; |
2551 | if (match(V: Op0, P: m_And(L: m_Value(V&: A), R: m_Value(V&: B))) && |
2552 | match(V: Op1, P: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B))) && |
2553 | (Op0->hasOneUse() || Op1->hasOneUse())) |
2554 | return BinaryOperator::CreateNeg(Op: Builder.CreateXor(LHS: A, RHS: B)); |
2555 | } |
2556 | |
2557 | // (sub (or A, B), (xor A, B)) --> (and A, B) |
2558 | { |
2559 | Value *A, *B; |
2560 | if (match(V: Op1, P: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B))) && |
2561 | match(V: Op0, P: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B)))) |
2562 | return BinaryOperator::CreateAnd(V1: A, V2: B); |
2563 | } |
2564 | |
2565 | // (sub (xor A, B) (or A, B)) --> neg (and A, B) |
2566 | { |
2567 | Value *A, *B; |
2568 | if (match(V: Op0, P: m_Xor(L: m_Value(V&: A), R: m_Value(V&: B))) && |
2569 | match(V: Op1, P: m_c_Or(L: m_Specific(V: A), R: m_Specific(V: B))) && |
2570 | (Op0->hasOneUse() || Op1->hasOneUse())) |
2571 | return BinaryOperator::CreateNeg(Op: Builder.CreateAnd(LHS: A, RHS: B)); |
2572 | } |
2573 | |
2574 | { |
2575 | Value *Y; |
2576 | // ((X | Y) - X) --> (~X & Y) |
2577 | if (match(V: Op0, P: m_OneUse(SubPattern: m_c_Or(L: m_Value(V&: Y), R: m_Specific(V: Op1))))) |
2578 | return BinaryOperator::CreateAnd( |
2579 | V1: Y, V2: Builder.CreateNot(V: Op1, Name: Op1->getName() + ".not" )); |
2580 | } |
2581 | |
2582 | { |
2583 | // (sub (and Op1, (neg X)), Op1) --> neg (and Op1, (add X, -1)) |
2584 | Value *X; |
2585 | if (match(V: Op0, P: m_OneUse(SubPattern: m_c_And(L: m_Specific(V: Op1), |
2586 | R: m_OneUse(SubPattern: m_Neg(V: m_Value(V&: X))))))) { |
2587 | return BinaryOperator::CreateNeg(Op: Builder.CreateAnd( |
2588 | LHS: Op1, RHS: Builder.CreateAdd(LHS: X, RHS: Constant::getAllOnesValue(Ty: I.getType())))); |
2589 | } |
2590 | } |
2591 | |
2592 | { |
2593 | // (sub (and Op1, C), Op1) --> neg (and Op1, ~C) |
2594 | Constant *C; |
2595 | if (match(V: Op0, P: m_OneUse(SubPattern: m_And(L: m_Specific(V: Op1), R: m_Constant(C))))) { |
2596 | return BinaryOperator::CreateNeg( |
2597 | Op: Builder.CreateAnd(LHS: Op1, RHS: Builder.CreateNot(V: C))); |
2598 | } |
2599 | } |
2600 | |
2601 | { |
2602 | // (sub (xor X, (sext C)), (sext C)) => (select C, (neg X), X) |
2603 | // (sub (sext C), (xor X, (sext C))) => (select C, X, (neg X)) |
2604 | Value *C, *X; |
2605 | auto m_SubXorCmp = [&C, &X](Value *LHS, Value *RHS) { |
2606 | return match(V: LHS, P: m_OneUse(SubPattern: m_c_Xor(L: m_Value(V&: X), R: m_Specific(V: RHS)))) && |
2607 | match(V: RHS, P: m_SExt(Op: m_Value(V&: C))) && |
2608 | (C->getType()->getScalarSizeInBits() == 1); |
2609 | }; |
2610 | if (m_SubXorCmp(Op0, Op1)) |
2611 | return SelectInst::Create(C, S1: Builder.CreateNeg(V: X), S2: X); |
2612 | if (m_SubXorCmp(Op1, Op0)) |
2613 | return SelectInst::Create(C, S1: X, S2: Builder.CreateNeg(V: X)); |
2614 | } |
2615 | |
2616 | if (Instruction *R = tryFoldInstWithCtpopWithNot(I: &I)) |
2617 | return R; |
2618 | |
2619 | if (Instruction *R = foldSubOfMinMax(I, Builder)) |
2620 | return R; |
2621 | |
2622 | { |
2623 | // If we have a subtraction between some value and a select between |
2624 | // said value and something else, sink subtraction into select hands, i.e.: |
2625 | // sub (select %Cond, %TrueVal, %FalseVal), %Op1 |
2626 | // -> |
2627 | // select %Cond, (sub %TrueVal, %Op1), (sub %FalseVal, %Op1) |
2628 | // or |
2629 | // sub %Op0, (select %Cond, %TrueVal, %FalseVal) |
2630 | // -> |
2631 | // select %Cond, (sub %Op0, %TrueVal), (sub %Op0, %FalseVal) |
2632 | // This will result in select between new subtraction and 0. |
2633 | auto SinkSubIntoSelect = |
2634 | [Ty = I.getType()](Value *Select, Value *OtherHandOfSub, |
2635 | auto SubBuilder) -> Instruction * { |
2636 | Value *Cond, *TrueVal, *FalseVal; |
2637 | if (!match(V: Select, P: m_OneUse(SubPattern: m_Select(C: m_Value(V&: Cond), L: m_Value(V&: TrueVal), |
2638 | R: m_Value(V&: FalseVal))))) |
2639 | return nullptr; |
2640 | if (OtherHandOfSub != TrueVal && OtherHandOfSub != FalseVal) |
2641 | return nullptr; |
2642 | // While it is really tempting to just create two subtractions and let |
2643 | // InstCombine fold one of those to 0, it isn't possible to do so |
2644 | // because of worklist visitation order. So ugly it is. |
2645 | bool OtherHandOfSubIsTrueVal = OtherHandOfSub == TrueVal; |
2646 | Value *NewSub = SubBuilder(OtherHandOfSubIsTrueVal ? FalseVal : TrueVal); |
2647 | Constant *Zero = Constant::getNullValue(Ty); |
2648 | SelectInst *NewSel = |
2649 | SelectInst::Create(C: Cond, S1: OtherHandOfSubIsTrueVal ? Zero : NewSub, |
2650 | S2: OtherHandOfSubIsTrueVal ? NewSub : Zero); |
2651 | // Preserve prof metadata if any. |
2652 | NewSel->copyMetadata(SrcInst: cast<Instruction>(Val&: *Select)); |
2653 | return NewSel; |
2654 | }; |
2655 | if (Instruction *NewSel = SinkSubIntoSelect( |
2656 | /*Select=*/Op0, /*OtherHandOfSub=*/Op1, |
2657 | [Builder = &Builder, Op1](Value *OtherHandOfSelect) { |
2658 | return Builder->CreateSub(LHS: OtherHandOfSelect, |
2659 | /*OtherHandOfSub=*/RHS: Op1); |
2660 | })) |
2661 | return NewSel; |
2662 | if (Instruction *NewSel = SinkSubIntoSelect( |
2663 | /*Select=*/Op1, /*OtherHandOfSub=*/Op0, |
2664 | [Builder = &Builder, Op0](Value *OtherHandOfSelect) { |
2665 | return Builder->CreateSub(/*OtherHandOfSub=*/LHS: Op0, |
2666 | RHS: OtherHandOfSelect); |
2667 | })) |
2668 | return NewSel; |
2669 | } |
2670 | |
2671 | // (X - (X & Y)) --> (X & ~Y) |
2672 | if (match(V: Op1, P: m_c_And(L: m_Specific(V: Op0), R: m_Value(V&: Y))) && |
2673 | (Op1->hasOneUse() || isa<Constant>(Val: Y))) |
2674 | return BinaryOperator::CreateAnd( |
2675 | V1: Op0, V2: Builder.CreateNot(V: Y, Name: Y->getName() + ".not" )); |
2676 | |
2677 | // ~X - Min/Max(~X, Y) -> ~Min/Max(X, ~Y) - X |
2678 | // ~X - Min/Max(Y, ~X) -> ~Min/Max(X, ~Y) - X |
2679 | // Min/Max(~X, Y) - ~X -> X - ~Min/Max(X, ~Y) |
2680 | // Min/Max(Y, ~X) - ~X -> X - ~Min/Max(X, ~Y) |
2681 | // As long as Y is freely invertible, this will be neutral or a win. |
2682 | // Note: We don't generate the inverse max/min, just create the 'not' of |
2683 | // it and let other folds do the rest. |
2684 | if (match(V: Op0, P: m_Not(V: m_Value(V&: X))) && |
2685 | match(V: Op1, P: m_c_MaxOrMin(L: m_Specific(V: Op0), R: m_Value(V&: Y))) && |
2686 | !Op0->hasNUsesOrMore(N: 3) && isFreeToInvert(V: Y, WillInvertAllUses: Y->hasOneUse())) { |
2687 | Value *Not = Builder.CreateNot(V: Op1); |
2688 | return BinaryOperator::CreateSub(V1: Not, V2: X); |
2689 | } |
2690 | if (match(V: Op1, P: m_Not(V: m_Value(V&: X))) && |
2691 | match(V: Op0, P: m_c_MaxOrMin(L: m_Specific(V: Op1), R: m_Value(V&: Y))) && |
2692 | !Op1->hasNUsesOrMore(N: 3) && isFreeToInvert(V: Y, WillInvertAllUses: Y->hasOneUse())) { |
2693 | Value *Not = Builder.CreateNot(V: Op0); |
2694 | return BinaryOperator::CreateSub(V1: X, V2: Not); |
2695 | } |
2696 | |
2697 | // Optimize pointer differences into the same array into a size. Consider: |
2698 | // &A[10] - &A[0]: we should compile this to "10". |
2699 | Value *LHSOp, *RHSOp; |
2700 | if (match(V: Op0, P: m_PtrToInt(Op: m_Value(V&: LHSOp))) && |
2701 | match(V: Op1, P: m_PtrToInt(Op: m_Value(V&: RHSOp)))) |
2702 | if (Value *Res = OptimizePointerDifference(LHS: LHSOp, RHS: RHSOp, Ty: I.getType(), |
2703 | IsNUW: I.hasNoUnsignedWrap())) |
2704 | return replaceInstUsesWith(I, V: Res); |
2705 | |
2706 | // trunc(p)-trunc(q) -> trunc(p-q) |
2707 | if (match(V: Op0, P: m_Trunc(Op: m_PtrToInt(Op: m_Value(V&: LHSOp)))) && |
2708 | match(V: Op1, P: m_Trunc(Op: m_PtrToInt(Op: m_Value(V&: RHSOp))))) |
2709 | if (Value *Res = OptimizePointerDifference(LHS: LHSOp, RHS: RHSOp, Ty: I.getType(), |
2710 | /* IsNUW */ false)) |
2711 | return replaceInstUsesWith(I, V: Res); |
2712 | |
2713 | if (match(V: Op0, P: m_ZExt(Op: m_PtrToIntSameSize(DL, Op: m_Value(V&: LHSOp)))) && |
2714 | match(V: Op1, P: m_ZExtOrSelf(Op: m_PtrToInt(Op: m_Value(V&: RHSOp))))) { |
2715 | if (auto *GEP = dyn_cast<GEPOperator>(Val: LHSOp)) { |
2716 | if (GEP->getPointerOperand() == RHSOp) { |
2717 | if (GEP->hasNoUnsignedWrap() || GEP->hasNoUnsignedSignedWrap()) { |
2718 | Value *Offset = EmitGEPOffset(GEP); |
2719 | Value *Res = GEP->hasNoUnsignedWrap() |
2720 | ? Builder.CreateZExt( |
2721 | V: Offset, DestTy: I.getType(), Name: "" , |
2722 | /*IsNonNeg=*/GEP->hasNoUnsignedSignedWrap()) |
2723 | : Builder.CreateSExt(V: Offset, DestTy: I.getType()); |
2724 | return replaceInstUsesWith(I, V: Res); |
2725 | } |
2726 | } |
2727 | } |
2728 | } |
2729 | |
2730 | // Canonicalize a shifty way to code absolute value to the common pattern. |
2731 | // There are 2 potential commuted variants. |
2732 | // We're relying on the fact that we only do this transform when the shift has |
2733 | // exactly 2 uses and the xor has exactly 1 use (otherwise, we might increase |
2734 | // instructions). |
2735 | Value *A; |
2736 | const APInt *ShAmt; |
2737 | Type *Ty = I.getType(); |
2738 | unsigned BitWidth = Ty->getScalarSizeInBits(); |
2739 | if (match(V: Op1, P: m_AShr(L: m_Value(V&: A), R: m_APInt(Res&: ShAmt))) && |
2740 | Op1->hasNUses(N: 2) && *ShAmt == BitWidth - 1 && |
2741 | match(V: Op0, P: m_OneUse(SubPattern: m_c_Xor(L: m_Specific(V: A), R: m_Specific(V: Op1))))) { |
2742 | // B = ashr i32 A, 31 ; smear the sign bit |
2743 | // sub (xor A, B), B ; flip bits if negative and subtract -1 (add 1) |
2744 | // --> (A < 0) ? -A : A |
2745 | Value *IsNeg = Builder.CreateIsNeg(Arg: A); |
2746 | // Copy the nsw flags from the sub to the negate. |
2747 | Value *NegA = I.hasNoUnsignedWrap() |
2748 | ? Constant::getNullValue(Ty: A->getType()) |
2749 | : Builder.CreateNeg(V: A, Name: "" , HasNSW: I.hasNoSignedWrap()); |
2750 | return SelectInst::Create(C: IsNeg, S1: NegA, S2: A); |
2751 | } |
2752 | |
2753 | // If we are subtracting a low-bit masked subset of some value from an add |
2754 | // of that same value with no low bits changed, that is clearing some low bits |
2755 | // of the sum: |
2756 | // sub (X + AddC), (X & AndC) --> and (X + AddC), ~AndC |
2757 | const APInt *AddC, *AndC; |
2758 | if (match(V: Op0, P: m_Add(L: m_Value(V&: X), R: m_APInt(Res&: AddC))) && |
2759 | match(V: Op1, P: m_And(L: m_Specific(V: X), R: m_APInt(Res&: AndC)))) { |
2760 | unsigned Cttz = AddC->countr_zero(); |
2761 | APInt HighMask(APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - Cttz)); |
2762 | if ((HighMask & *AndC).isZero()) |
2763 | return BinaryOperator::CreateAnd(V1: Op0, V2: ConstantInt::get(Ty, V: ~(*AndC))); |
2764 | } |
2765 | |
2766 | if (Instruction *V = |
2767 | canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I)) |
2768 | return V; |
2769 | |
2770 | // X - usub.sat(X, Y) => umin(X, Y) |
2771 | if (match(V: Op1, P: m_OneUse(SubPattern: m_Intrinsic<Intrinsic::usub_sat>(Op0: m_Specific(V: Op0), |
2772 | Op1: m_Value(V&: Y))))) |
2773 | return replaceInstUsesWith( |
2774 | I, V: Builder.CreateIntrinsic(ID: Intrinsic::umin, Types: {I.getType()}, Args: {Op0, Y})); |
2775 | |
2776 | // umax(X, Op1) - Op1 --> usub.sat(X, Op1) |
2777 | // TODO: The one-use restriction is not strictly necessary, but it may |
2778 | // require improving other pattern matching and/or codegen. |
2779 | if (match(V: Op0, P: m_OneUse(SubPattern: m_c_UMax(L: m_Value(V&: X), R: m_Specific(V: Op1))))) |
2780 | return replaceInstUsesWith( |
2781 | I, V: Builder.CreateIntrinsic(ID: Intrinsic::usub_sat, Types: {Ty}, Args: {X, Op1})); |
2782 | |
2783 | // Op0 - umin(X, Op0) --> usub.sat(Op0, X) |
2784 | if (match(V: Op1, P: m_OneUse(SubPattern: m_c_UMin(L: m_Value(V&: X), R: m_Specific(V: Op0))))) |
2785 | return replaceInstUsesWith( |
2786 | I, V: Builder.CreateIntrinsic(ID: Intrinsic::usub_sat, Types: {Ty}, Args: {Op0, X})); |
2787 | |
2788 | // Op0 - umax(X, Op0) --> 0 - usub.sat(X, Op0) |
2789 | if (match(V: Op1, P: m_OneUse(SubPattern: m_c_UMax(L: m_Value(V&: X), R: m_Specific(V: Op0))))) { |
2790 | Value *USub = Builder.CreateIntrinsic(ID: Intrinsic::usub_sat, Types: {Ty}, Args: {X, Op0}); |
2791 | return BinaryOperator::CreateNeg(Op: USub); |
2792 | } |
2793 | |
2794 | // umin(X, Op1) - Op1 --> 0 - usub.sat(Op1, X) |
2795 | if (match(V: Op0, P: m_OneUse(SubPattern: m_c_UMin(L: m_Value(V&: X), R: m_Specific(V: Op1))))) { |
2796 | Value *USub = Builder.CreateIntrinsic(ID: Intrinsic::usub_sat, Types: {Ty}, Args: {Op1, X}); |
2797 | return BinaryOperator::CreateNeg(Op: USub); |
2798 | } |
2799 | |
2800 | // C - ctpop(X) => ctpop(~X) if C is bitwidth |
2801 | if (match(V: Op0, P: m_SpecificInt(V: BitWidth)) && |
2802 | match(V: Op1, P: m_OneUse(SubPattern: m_Intrinsic<Intrinsic::ctpop>(Op0: m_Value(V&: X))))) |
2803 | return replaceInstUsesWith( |
2804 | I, V: Builder.CreateIntrinsic(ID: Intrinsic::ctpop, Types: {I.getType()}, |
2805 | Args: {Builder.CreateNot(V: X)})); |
2806 | |
2807 | // Reduce multiplies for difference-of-squares by factoring: |
2808 | // (X * X) - (Y * Y) --> (X + Y) * (X - Y) |
2809 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Mul(L: m_Value(V&: X), R: m_Deferred(V: X)))) && |
2810 | match(V: Op1, P: m_OneUse(SubPattern: m_Mul(L: m_Value(V&: Y), R: m_Deferred(V: Y))))) { |
2811 | auto *OBO0 = cast<OverflowingBinaryOperator>(Val: Op0); |
2812 | auto *OBO1 = cast<OverflowingBinaryOperator>(Val: Op1); |
2813 | bool PropagateNSW = I.hasNoSignedWrap() && OBO0->hasNoSignedWrap() && |
2814 | OBO1->hasNoSignedWrap() && BitWidth > 2; |
2815 | bool PropagateNUW = I.hasNoUnsignedWrap() && OBO0->hasNoUnsignedWrap() && |
2816 | OBO1->hasNoUnsignedWrap() && BitWidth > 1; |
2817 | Value *Add = Builder.CreateAdd(LHS: X, RHS: Y, Name: "add" , HasNUW: PropagateNUW, HasNSW: PropagateNSW); |
2818 | Value *Sub = Builder.CreateSub(LHS: X, RHS: Y, Name: "sub" , HasNUW: PropagateNUW, HasNSW: PropagateNSW); |
2819 | Value *Mul = Builder.CreateMul(LHS: Add, RHS: Sub, Name: "" , HasNUW: PropagateNUW, HasNSW: PropagateNSW); |
2820 | return replaceInstUsesWith(I, V: Mul); |
2821 | } |
2822 | |
2823 | // max(X,Y) nsw/nuw - min(X,Y) --> abs(X nsw - Y) |
2824 | if (match(V: Op0, P: m_OneUse(SubPattern: m_c_SMax(L: m_Value(V&: X), R: m_Value(V&: Y)))) && |
2825 | match(V: Op1, P: m_OneUse(SubPattern: m_c_SMin(L: m_Specific(V: X), R: m_Specific(V: Y))))) { |
2826 | if (I.hasNoUnsignedWrap() || I.hasNoSignedWrap()) { |
2827 | Value *Sub = |
2828 | Builder.CreateSub(LHS: X, RHS: Y, Name: "sub" , /*HasNUW=*/false, /*HasNSW=*/true); |
2829 | Value *Call = |
2830 | Builder.CreateBinaryIntrinsic(ID: Intrinsic::abs, LHS: Sub, RHS: Builder.getTrue()); |
2831 | return replaceInstUsesWith(I, V: Call); |
2832 | } |
2833 | } |
2834 | |
2835 | if (Instruction *Res = foldBinOpOfSelectAndCastOfSelectCondition(I)) |
2836 | return Res; |
2837 | |
2838 | // (sub (sext (add nsw (X, Y)), sext (X))) --> (sext (Y)) |
2839 | if (match(V: Op1, P: m_SExtLike(Op: m_Value(V&: X))) && |
2840 | match(V: Op0, P: m_SExtLike(Op: m_c_NSWAdd(L: m_Specific(V: X), R: m_Value(V&: Y))))) { |
2841 | Value *SExtY = Builder.CreateSExt(V: Y, DestTy: I.getType()); |
2842 | return replaceInstUsesWith(I, V: SExtY); |
2843 | } |
2844 | |
2845 | // (sub[ nsw] (sext (add nsw (X, Y)), sext (add nsw (X, Z)))) --> |
2846 | // --> (sub[ nsw] (sext (Y), sext (Z))) |
2847 | { |
2848 | Value *Z, *Add0, *Add1; |
2849 | if (match(V: Op0, P: m_SExtLike(Op: m_Value(V&: Add0))) && |
2850 | match(V: Op1, P: m_SExtLike(Op: m_Value(V&: Add1))) && |
2851 | ((match(V: Add0, P: m_NSWAdd(L: m_Value(V&: X), R: m_Value(V&: Y))) && |
2852 | match(V: Add1, P: m_c_NSWAdd(L: m_Specific(V: X), R: m_Value(V&: Z)))) || |
2853 | (match(V: Add0, P: m_NSWAdd(L: m_Value(V&: Y), R: m_Value(V&: X))) && |
2854 | match(V: Add1, P: m_c_NSWAdd(L: m_Specific(V: X), R: m_Value(V&: Z)))))) { |
2855 | unsigned NumOfNewInstrs = 0; |
2856 | // Non-constant Y, Z require new SExt. |
2857 | NumOfNewInstrs += !isa<Constant>(Val: Y) ? 1 : 0; |
2858 | NumOfNewInstrs += !isa<Constant>(Val: Z) ? 1 : 0; |
2859 | // Check if we can trade some of the old instructions for the new ones. |
2860 | unsigned NumOfDeadInstrs = 0; |
2861 | if (Op0->hasOneUse()) { |
2862 | // If Op0 (sext) has multiple uses, then we keep it |
2863 | // and the add that it uses, otherwise, we can remove |
2864 | // the sext and probably the add (depending on the number of its uses). |
2865 | ++NumOfDeadInstrs; |
2866 | NumOfDeadInstrs += Add0->hasOneUse() ? 1 : 0; |
2867 | } |
2868 | if (Op1->hasOneUse()) { |
2869 | ++NumOfDeadInstrs; |
2870 | NumOfDeadInstrs += Add1->hasOneUse() ? 1 : 0; |
2871 | } |
2872 | if (NumOfDeadInstrs >= NumOfNewInstrs) { |
2873 | Value *SExtY = Builder.CreateSExt(V: Y, DestTy: I.getType()); |
2874 | Value *SExtZ = Builder.CreateSExt(V: Z, DestTy: I.getType()); |
2875 | Value *Sub = Builder.CreateSub(LHS: SExtY, RHS: SExtZ, Name: "" , |
2876 | /*HasNUW=*/false, |
2877 | /*HasNSW=*/I.hasNoSignedWrap()); |
2878 | return replaceInstUsesWith(I, V: Sub); |
2879 | } |
2880 | } |
2881 | } |
2882 | |
2883 | return TryToNarrowDeduceFlags(); |
2884 | } |
2885 | |
2886 | /// This eliminates floating-point negation in either 'fneg(X)' or |
2887 | /// 'fsub(-0.0, X)' form by combining into a constant operand. |
2888 | static Instruction *foldFNegIntoConstant(Instruction &I, const DataLayout &DL) { |
2889 | // This is limited with one-use because fneg is assumed better for |
2890 | // reassociation and cheaper in codegen than fmul/fdiv. |
2891 | // TODO: Should the m_OneUse restriction be removed? |
2892 | Instruction *FNegOp; |
2893 | if (!match(V: &I, P: m_FNeg(X: m_OneUse(SubPattern: m_Instruction(I&: FNegOp))))) |
2894 | return nullptr; |
2895 | |
2896 | Value *X; |
2897 | Constant *C; |
2898 | |
2899 | // Fold negation into constant operand. |
2900 | // -(X * C) --> X * (-C) |
2901 | if (match(V: FNegOp, P: m_FMul(L: m_Value(V&: X), R: m_Constant(C)))) |
2902 | if (Constant *NegC = ConstantFoldUnaryOpOperand(Opcode: Instruction::FNeg, Op: C, DL)) { |
2903 | FastMathFlags FNegF = I.getFastMathFlags(); |
2904 | FastMathFlags OpF = FNegOp->getFastMathFlags(); |
2905 | FastMathFlags FMF = FastMathFlags::unionValue(LHS: FNegF, RHS: OpF) | |
2906 | FastMathFlags::intersectRewrite(LHS: FNegF, RHS: OpF); |
2907 | FMF.setNoInfs(FNegF.noInfs() && OpF.noInfs()); |
2908 | return BinaryOperator::CreateFMulFMF(V1: X, V2: NegC, FMF); |
2909 | } |
2910 | // -(X / C) --> X / (-C) |
2911 | if (match(V: FNegOp, P: m_FDiv(L: m_Value(V&: X), R: m_Constant(C)))) |
2912 | if (Constant *NegC = ConstantFoldUnaryOpOperand(Opcode: Instruction::FNeg, Op: C, DL)) |
2913 | return BinaryOperator::CreateFDivFMF(V1: X, V2: NegC, FMFSource: &I); |
2914 | // -(C / X) --> (-C) / X |
2915 | if (match(V: FNegOp, P: m_FDiv(L: m_Constant(C), R: m_Value(V&: X)))) |
2916 | if (Constant *NegC = ConstantFoldUnaryOpOperand(Opcode: Instruction::FNeg, Op: C, DL)) { |
2917 | Instruction *FDiv = BinaryOperator::CreateFDivFMF(V1: NegC, V2: X, FMFSource: &I); |
2918 | |
2919 | // Intersect 'nsz' and 'ninf' because those special value exceptions may |
2920 | // not apply to the fdiv. Everything else propagates from the fneg. |
2921 | // TODO: We could propagate nsz/ninf from fdiv alone? |
2922 | FastMathFlags FMF = I.getFastMathFlags(); |
2923 | FastMathFlags OpFMF = FNegOp->getFastMathFlags(); |
2924 | FDiv->setHasNoSignedZeros(FMF.noSignedZeros() && OpFMF.noSignedZeros()); |
2925 | FDiv->setHasNoInfs(FMF.noInfs() && OpFMF.noInfs()); |
2926 | return FDiv; |
2927 | } |
2928 | // With NSZ [ counter-example with -0.0: -(-0.0 + 0.0) != 0.0 + -0.0 ]: |
2929 | // -(X + C) --> -X + -C --> -C - X |
2930 | if (I.hasNoSignedZeros() && match(V: FNegOp, P: m_FAdd(L: m_Value(V&: X), R: m_Constant(C)))) |
2931 | if (Constant *NegC = ConstantFoldUnaryOpOperand(Opcode: Instruction::FNeg, Op: C, DL)) |
2932 | return BinaryOperator::CreateFSubFMF(V1: NegC, V2: X, FMFSource: &I); |
2933 | |
2934 | return nullptr; |
2935 | } |
2936 | |
2937 | Instruction *InstCombinerImpl::hoistFNegAboveFMulFDiv(Value *FNegOp, |
2938 | Instruction &FMFSource) { |
2939 | Value *X, *Y; |
2940 | if (match(V: FNegOp, P: m_FMul(L: m_Value(V&: X), R: m_Value(V&: Y)))) { |
2941 | // Push into RHS which is more likely to simplify (const or another fneg). |
2942 | // FIXME: It would be better to invert the transform. |
2943 | return cast<Instruction>(Val: Builder.CreateFMulFMF( |
2944 | L: X, R: Builder.CreateFNegFMF(V: Y, FMFSource: &FMFSource), FMFSource: &FMFSource)); |
2945 | } |
2946 | |
2947 | if (match(V: FNegOp, P: m_FDiv(L: m_Value(V&: X), R: m_Value(V&: Y)))) { |
2948 | return cast<Instruction>(Val: Builder.CreateFDivFMF( |
2949 | L: Builder.CreateFNegFMF(V: X, FMFSource: &FMFSource), R: Y, FMFSource: &FMFSource)); |
2950 | } |
2951 | |
2952 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: FNegOp)) { |
2953 | // Make sure to preserve flags and metadata on the call. |
2954 | if (II->getIntrinsicID() == Intrinsic::ldexp) { |
2955 | FastMathFlags FMF = FMFSource.getFastMathFlags() | II->getFastMathFlags(); |
2956 | CallInst *New = |
2957 | Builder.CreateCall(Callee: II->getCalledFunction(), |
2958 | Args: {Builder.CreateFNegFMF(V: II->getArgOperand(i: 0), FMFSource: FMF), |
2959 | II->getArgOperand(i: 1)}); |
2960 | New->setFastMathFlags(FMF); |
2961 | New->copyMetadata(SrcInst: *II); |
2962 | return New; |
2963 | } |
2964 | } |
2965 | |
2966 | return nullptr; |
2967 | } |
2968 | |
2969 | Instruction *InstCombinerImpl::visitFNeg(UnaryOperator &I) { |
2970 | Value *Op = I.getOperand(i_nocapture: 0); |
2971 | |
2972 | if (Value *V = simplifyFNegInst(Op, FMF: I.getFastMathFlags(), |
2973 | Q: getSimplifyQuery().getWithInstruction(I: &I))) |
2974 | return replaceInstUsesWith(I, V); |
2975 | |
2976 | if (Instruction *X = foldFNegIntoConstant(I, DL)) |
2977 | return X; |
2978 | |
2979 | Value *X, *Y; |
2980 | |
2981 | // If we can ignore the sign of zeros: -(X - Y) --> (Y - X) |
2982 | if (I.hasNoSignedZeros() && |
2983 | match(V: Op, P: m_OneUse(SubPattern: m_FSub(L: m_Value(V&: X), R: m_Value(V&: Y))))) |
2984 | return BinaryOperator::CreateFSubFMF(V1: Y, V2: X, FMFSource: &I); |
2985 | |
2986 | Value *OneUse; |
2987 | if (!match(V: Op, P: m_OneUse(SubPattern: m_Value(V&: OneUse)))) |
2988 | return nullptr; |
2989 | |
2990 | if (Instruction *R = hoistFNegAboveFMulFDiv(FNegOp: OneUse, FMFSource&: I)) |
2991 | return replaceInstUsesWith(I, V: R); |
2992 | |
2993 | // Try to eliminate fneg if at least 1 arm of the select is negated. |
2994 | Value *Cond; |
2995 | if (match(V: OneUse, P: m_Select(C: m_Value(V&: Cond), L: m_Value(V&: X), R: m_Value(V&: Y)))) { |
2996 | // Unlike most transforms, this one is not safe to propagate nsz unless |
2997 | // it is present on the original select. We union the flags from the select |
2998 | // and fneg and then remove nsz if needed. |
2999 | auto propagateSelectFMF = [&](SelectInst *S, bool CommonOperand) { |
3000 | S->copyFastMathFlags(I: &I); |
3001 | if (auto *OldSel = dyn_cast<SelectInst>(Val: Op)) { |
3002 | FastMathFlags FMF = I.getFastMathFlags() | OldSel->getFastMathFlags(); |
3003 | S->setFastMathFlags(FMF); |
3004 | if (!OldSel->hasNoSignedZeros() && !CommonOperand && |
3005 | !isGuaranteedNotToBeUndefOrPoison(V: OldSel->getCondition())) |
3006 | S->setHasNoSignedZeros(false); |
3007 | } |
3008 | }; |
3009 | // -(Cond ? -P : Y) --> Cond ? P : -Y |
3010 | Value *P; |
3011 | if (match(V: X, P: m_FNeg(X: m_Value(V&: P)))) { |
3012 | Value *NegY = Builder.CreateFNegFMF(V: Y, FMFSource: &I, Name: Y->getName() + ".neg" ); |
3013 | SelectInst *NewSel = SelectInst::Create(C: Cond, S1: P, S2: NegY); |
3014 | propagateSelectFMF(NewSel, P == Y); |
3015 | return NewSel; |
3016 | } |
3017 | // -(Cond ? X : -P) --> Cond ? -X : P |
3018 | if (match(V: Y, P: m_FNeg(X: m_Value(V&: P)))) { |
3019 | Value *NegX = Builder.CreateFNegFMF(V: X, FMFSource: &I, Name: X->getName() + ".neg" ); |
3020 | SelectInst *NewSel = SelectInst::Create(C: Cond, S1: NegX, S2: P); |
3021 | propagateSelectFMF(NewSel, P == X); |
3022 | return NewSel; |
3023 | } |
3024 | |
3025 | // -(Cond ? X : C) --> Cond ? -X : -C |
3026 | // -(Cond ? C : Y) --> Cond ? -C : -Y |
3027 | if (match(V: X, P: m_ImmConstant()) || match(V: Y, P: m_ImmConstant())) { |
3028 | Value *NegX = Builder.CreateFNegFMF(V: X, FMFSource: &I, Name: X->getName() + ".neg" ); |
3029 | Value *NegY = Builder.CreateFNegFMF(V: Y, FMFSource: &I, Name: Y->getName() + ".neg" ); |
3030 | SelectInst *NewSel = SelectInst::Create(C: Cond, S1: NegX, S2: NegY); |
3031 | propagateSelectFMF(NewSel, /*CommonOperand=*/true); |
3032 | return NewSel; |
3033 | } |
3034 | } |
3035 | |
3036 | // fneg (copysign x, y) -> copysign x, (fneg y) |
3037 | if (match(V: OneUse, P: m_CopySign(Op0: m_Value(V&: X), Op1: m_Value(V&: Y)))) { |
3038 | // The source copysign has an additional value input, so we can't propagate |
3039 | // flags the copysign doesn't also have. |
3040 | FastMathFlags FMF = I.getFastMathFlags(); |
3041 | FMF &= cast<FPMathOperator>(Val: OneUse)->getFastMathFlags(); |
3042 | Value *NegY = Builder.CreateFNegFMF(V: Y, FMFSource: FMF); |
3043 | Value *NewCopySign = Builder.CreateCopySign(LHS: X, RHS: NegY, FMFSource: FMF); |
3044 | return replaceInstUsesWith(I, V: NewCopySign); |
3045 | } |
3046 | |
3047 | // fneg (shuffle x, Mask) --> shuffle (fneg x), Mask |
3048 | ArrayRef<int> Mask; |
3049 | if (match(V: OneUse, P: m_Shuffle(v1: m_Value(V&: X), v2: m_Poison(), mask: m_Mask(Mask)))) |
3050 | return new ShuffleVectorInst(Builder.CreateFNegFMF(V: X, FMFSource: &I), Mask); |
3051 | |
3052 | // fneg (reverse x) --> reverse (fneg x) |
3053 | if (match(V: OneUse, P: m_VecReverse(Op0: m_Value(V&: X)))) { |
3054 | Value *Reverse = Builder.CreateVectorReverse(V: Builder.CreateFNegFMF(V: X, FMFSource: &I)); |
3055 | return replaceInstUsesWith(I, V: Reverse); |
3056 | } |
3057 | |
3058 | return nullptr; |
3059 | } |
3060 | |
3061 | Instruction *InstCombinerImpl::visitFSub(BinaryOperator &I) { |
3062 | if (Value *V = simplifyFSubInst(LHS: I.getOperand(i_nocapture: 0), RHS: I.getOperand(i_nocapture: 1), |
3063 | FMF: I.getFastMathFlags(), |
3064 | Q: getSimplifyQuery().getWithInstruction(I: &I))) |
3065 | return replaceInstUsesWith(I, V); |
3066 | |
3067 | if (Instruction *X = foldVectorBinop(Inst&: I)) |
3068 | return X; |
3069 | |
3070 | if (Instruction *Phi = foldBinopWithPhiOperands(BO&: I)) |
3071 | return Phi; |
3072 | |
3073 | // Subtraction from -0.0 is the canonical form of fneg. |
3074 | // fsub -0.0, X ==> fneg X |
3075 | // fsub nsz 0.0, X ==> fneg nsz X |
3076 | // |
3077 | // FIXME This matcher does not respect FTZ or DAZ yet: |
3078 | // fsub -0.0, Denorm ==> +-0 |
3079 | // fneg Denorm ==> -Denorm |
3080 | Value *Op; |
3081 | if (match(V: &I, P: m_FNeg(X: m_Value(V&: Op)))) |
3082 | return UnaryOperator::CreateFNegFMF(Op, FMFSource: &I); |
3083 | |
3084 | if (Instruction *X = foldFNegIntoConstant(I, DL)) |
3085 | return X; |
3086 | |
3087 | if (Instruction *R = foldFBinOpOfIntCasts(I)) |
3088 | return R; |
3089 | |
3090 | Value *X, *Y; |
3091 | Constant *C; |
3092 | |
3093 | Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1); |
3094 | // If Op0 is not -0.0 or we can ignore -0.0: Z - (X - Y) --> Z + (Y - X) |
3095 | // Canonicalize to fadd to make analysis easier. |
3096 | // This can also help codegen because fadd is commutative. |
3097 | // Note that if this fsub was really an fneg, the fadd with -0.0 will get |
3098 | // killed later. We still limit that particular transform with 'hasOneUse' |
3099 | // because an fneg is assumed better/cheaper than a generic fsub. |
3100 | if (I.hasNoSignedZeros() || |
3101 | cannotBeNegativeZero(V: Op0, SQ: getSimplifyQuery().getWithInstruction(I: &I))) { |
3102 | if (match(V: Op1, P: m_OneUse(SubPattern: m_FSub(L: m_Value(V&: X), R: m_Value(V&: Y))))) { |
3103 | Value *NewSub = Builder.CreateFSubFMF(L: Y, R: X, FMFSource: &I); |
3104 | return BinaryOperator::CreateFAddFMF(V1: Op0, V2: NewSub, FMFSource: &I); |
3105 | } |
3106 | } |
3107 | |
3108 | // (-X) - Op1 --> -(X + Op1) |
3109 | if (I.hasNoSignedZeros() && !isa<ConstantExpr>(Val: Op0) && |
3110 | match(V: Op0, P: m_OneUse(SubPattern: m_FNeg(X: m_Value(V&: X))))) { |
3111 | Value *FAdd = Builder.CreateFAddFMF(L: X, R: Op1, FMFSource: &I); |
3112 | return UnaryOperator::CreateFNegFMF(Op: FAdd, FMFSource: &I); |
3113 | } |
3114 | |
3115 | if (isa<Constant>(Val: Op0)) |
3116 | if (SelectInst *SI = dyn_cast<SelectInst>(Val: Op1)) |
3117 | if (Instruction *NV = FoldOpIntoSelect(Op&: I, SI)) |
3118 | return NV; |
3119 | |
3120 | // X - C --> X + (-C) |
3121 | // But don't transform constant expressions because there's an inverse fold |
3122 | // for X + (-Y) --> X - Y. |
3123 | if (match(V: Op1, P: m_ImmConstant(C))) |
3124 | if (Constant *NegC = ConstantFoldUnaryOpOperand(Opcode: Instruction::FNeg, Op: C, DL)) |
3125 | return BinaryOperator::CreateFAddFMF(V1: Op0, V2: NegC, FMFSource: &I); |
3126 | |
3127 | // X - (-Y) --> X + Y |
3128 | if (match(V: Op1, P: m_FNeg(X: m_Value(V&: Y)))) |
3129 | return BinaryOperator::CreateFAddFMF(V1: Op0, V2: Y, FMFSource: &I); |
3130 | |
3131 | // Similar to above, but look through a cast of the negated value: |
3132 | // X - (fptrunc(-Y)) --> X + fptrunc(Y) |
3133 | Type *Ty = I.getType(); |
3134 | if (match(V: Op1, P: m_OneUse(SubPattern: m_FPTrunc(Op: m_FNeg(X: m_Value(V&: Y)))))) |
3135 | return BinaryOperator::CreateFAddFMF(V1: Op0, V2: Builder.CreateFPTrunc(V: Y, DestTy: Ty), FMFSource: &I); |
3136 | |
3137 | // X - (fpext(-Y)) --> X + fpext(Y) |
3138 | if (match(V: Op1, P: m_OneUse(SubPattern: m_FPExt(Op: m_FNeg(X: m_Value(V&: Y)))))) |
3139 | return BinaryOperator::CreateFAddFMF(V1: Op0, V2: Builder.CreateFPExt(V: Y, DestTy: Ty), FMFSource: &I); |
3140 | |
3141 | // Similar to above, but look through fmul/fdiv of the negated value: |
3142 | // Op0 - (-X * Y) --> Op0 + (X * Y) |
3143 | // Op0 - (Y * -X) --> Op0 + (X * Y) |
3144 | if (match(V: Op1, P: m_OneUse(SubPattern: m_c_FMul(L: m_FNeg(X: m_Value(V&: X)), R: m_Value(V&: Y))))) { |
3145 | Value *FMul = Builder.CreateFMulFMF(L: X, R: Y, FMFSource: &I); |
3146 | return BinaryOperator::CreateFAddFMF(V1: Op0, V2: FMul, FMFSource: &I); |
3147 | } |
3148 | // Op0 - (-X / Y) --> Op0 + (X / Y) |
3149 | // Op0 - (X / -Y) --> Op0 + (X / Y) |
3150 | if (match(V: Op1, P: m_OneUse(SubPattern: m_FDiv(L: m_FNeg(X: m_Value(V&: X)), R: m_Value(V&: Y)))) || |
3151 | match(V: Op1, P: m_OneUse(SubPattern: m_FDiv(L: m_Value(V&: X), R: m_FNeg(X: m_Value(V&: Y)))))) { |
3152 | Value *FDiv = Builder.CreateFDivFMF(L: X, R: Y, FMFSource: &I); |
3153 | return BinaryOperator::CreateFAddFMF(V1: Op0, V2: FDiv, FMFSource: &I); |
3154 | } |
3155 | |
3156 | // Handle special cases for FSub with selects feeding the operation |
3157 | if (Value *V = SimplifySelectsFeedingBinaryOp(I, LHS: Op0, RHS: Op1)) |
3158 | return replaceInstUsesWith(I, V); |
3159 | |
3160 | if (I.hasAllowReassoc() && I.hasNoSignedZeros()) { |
3161 | // (Y - X) - Y --> -X |
3162 | if (match(V: Op0, P: m_FSub(L: m_Specific(V: Op1), R: m_Value(V&: X)))) |
3163 | return UnaryOperator::CreateFNegFMF(Op: X, FMFSource: &I); |
3164 | |
3165 | // Y - (X + Y) --> -X |
3166 | // Y - (Y + X) --> -X |
3167 | if (match(V: Op1, P: m_c_FAdd(L: m_Specific(V: Op0), R: m_Value(V&: X)))) |
3168 | return UnaryOperator::CreateFNegFMF(Op: X, FMFSource: &I); |
3169 | |
3170 | // (X * C) - X --> X * (C - 1.0) |
3171 | if (match(V: Op0, P: m_FMul(L: m_Specific(V: Op1), R: m_Constant(C)))) { |
3172 | if (Constant *CSubOne = ConstantFoldBinaryOpOperands( |
3173 | Opcode: Instruction::FSub, LHS: C, RHS: ConstantFP::get(Ty, V: 1.0), DL)) |
3174 | return BinaryOperator::CreateFMulFMF(V1: Op1, V2: CSubOne, FMFSource: &I); |
3175 | } |
3176 | // X - (X * C) --> X * (1.0 - C) |
3177 | if (match(V: Op1, P: m_FMul(L: m_Specific(V: Op0), R: m_Constant(C)))) { |
3178 | if (Constant *OneSubC = ConstantFoldBinaryOpOperands( |
3179 | Opcode: Instruction::FSub, LHS: ConstantFP::get(Ty, V: 1.0), RHS: C, DL)) |
3180 | return BinaryOperator::CreateFMulFMF(V1: Op0, V2: OneSubC, FMFSource: &I); |
3181 | } |
3182 | |
3183 | // Reassociate fsub/fadd sequences to create more fadd instructions and |
3184 | // reduce dependency chains: |
3185 | // ((X - Y) + Z) - Op1 --> (X + Z) - (Y + Op1) |
3186 | Value *Z; |
3187 | if (match(V: Op0, P: m_OneUse(SubPattern: m_c_FAdd(L: m_OneUse(SubPattern: m_FSub(L: m_Value(V&: X), R: m_Value(V&: Y))), |
3188 | R: m_Value(V&: Z))))) { |
3189 | Value *XZ = Builder.CreateFAddFMF(L: X, R: Z, FMFSource: &I); |
3190 | Value *YW = Builder.CreateFAddFMF(L: Y, R: Op1, FMFSource: &I); |
3191 | return BinaryOperator::CreateFSubFMF(V1: XZ, V2: YW, FMFSource: &I); |
3192 | } |
3193 | |
3194 | auto m_FaddRdx = [](Value *&Sum, Value *&Vec) { |
3195 | return m_OneUse(SubPattern: m_Intrinsic<Intrinsic::vector_reduce_fadd>(Op0: m_Value(V&: Sum), |
3196 | Op1: m_Value(V&: Vec))); |
3197 | }; |
3198 | Value *A0, *A1, *V0, *V1; |
3199 | if (match(V: Op0, P: m_FaddRdx(A0, V0)) && match(V: Op1, P: m_FaddRdx(A1, V1)) && |
3200 | V0->getType() == V1->getType()) { |
3201 | // Difference of sums is sum of differences: |
3202 | // add_rdx(A0, V0) - add_rdx(A1, V1) --> add_rdx(A0, V0 - V1) - A1 |
3203 | Value *Sub = Builder.CreateFSubFMF(L: V0, R: V1, FMFSource: &I); |
3204 | Value *Rdx = Builder.CreateIntrinsic(ID: Intrinsic::vector_reduce_fadd, |
3205 | Types: {Sub->getType()}, Args: {A0, Sub}, FMFSource: &I); |
3206 | return BinaryOperator::CreateFSubFMF(V1: Rdx, V2: A1, FMFSource: &I); |
3207 | } |
3208 | |
3209 | if (Instruction *F = factorizeFAddFSub(I, Builder)) |
3210 | return F; |
3211 | |
3212 | // TODO: This performs reassociative folds for FP ops. Some fraction of the |
3213 | // functionality has been subsumed by simple pattern matching here and in |
3214 | // InstSimplify. We should let a dedicated reassociation pass handle more |
3215 | // complex pattern matching and remove this from InstCombine. |
3216 | if (Value *V = FAddCombine(Builder).simplify(I: &I)) |
3217 | return replaceInstUsesWith(I, V); |
3218 | |
3219 | // (X - Y) - Op1 --> X - (Y + Op1) |
3220 | if (match(V: Op0, P: m_OneUse(SubPattern: m_FSub(L: m_Value(V&: X), R: m_Value(V&: Y))))) { |
3221 | Value *FAdd = Builder.CreateFAddFMF(L: Y, R: Op1, FMFSource: &I); |
3222 | return BinaryOperator::CreateFSubFMF(V1: X, V2: FAdd, FMFSource: &I); |
3223 | } |
3224 | } |
3225 | |
3226 | return nullptr; |
3227 | } |
3228 | |