1 | //===- InstCombineShifts.cpp ----------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file implements the visitShl, visitLShr, and visitAShr functions. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "InstCombineInternal.h" |
14 | #include "llvm/Analysis/InstructionSimplify.h" |
15 | #include "llvm/IR/IntrinsicInst.h" |
16 | #include "llvm/IR/PatternMatch.h" |
17 | #include "llvm/Transforms/InstCombine/InstCombiner.h" |
18 | using namespace llvm; |
19 | using namespace PatternMatch; |
20 | |
21 | #define DEBUG_TYPE "instcombine" |
22 | |
23 | bool canTryToConstantAddTwoShiftAmounts(Value *Sh0, Value *ShAmt0, Value *Sh1, |
24 | Value *ShAmt1) { |
25 | // We have two shift amounts from two different shifts. The types of those |
26 | // shift amounts may not match. If that's the case let's bailout now.. |
27 | if (ShAmt0->getType() != ShAmt1->getType()) |
28 | return false; |
29 | |
30 | // As input, we have the following pattern: |
31 | // Sh0 (Sh1 X, Q), K |
32 | // We want to rewrite that as: |
33 | // Sh x, (Q+K) iff (Q+K) u< bitwidth(x) |
34 | // While we know that originally (Q+K) would not overflow |
35 | // (because 2 * (N-1) u<= iN -1), we have looked past extensions of |
36 | // shift amounts. so it may now overflow in smaller bitwidth. |
37 | // To ensure that does not happen, we need to ensure that the total maximal |
38 | // shift amount is still representable in that smaller bit width. |
39 | unsigned MaximalPossibleTotalShiftAmount = |
40 | (Sh0->getType()->getScalarSizeInBits() - 1) + |
41 | (Sh1->getType()->getScalarSizeInBits() - 1); |
42 | APInt MaximalRepresentableShiftAmount = |
43 | APInt::getAllOnes(numBits: ShAmt0->getType()->getScalarSizeInBits()); |
44 | return MaximalRepresentableShiftAmount.uge(RHS: MaximalPossibleTotalShiftAmount); |
45 | } |
46 | |
47 | // Given pattern: |
48 | // (x shiftopcode Q) shiftopcode K |
49 | // we should rewrite it as |
50 | // x shiftopcode (Q+K) iff (Q+K) u< bitwidth(x) and |
51 | // |
52 | // This is valid for any shift, but they must be identical, and we must be |
53 | // careful in case we have (zext(Q)+zext(K)) and look past extensions, |
54 | // (Q+K) must not overflow or else (Q+K) u< bitwidth(x) is bogus. |
55 | // |
56 | // AnalyzeForSignBitExtraction indicates that we will only analyze whether this |
57 | // pattern has any 2 right-shifts that sum to 1 less than original bit width. |
58 | Value *InstCombinerImpl::reassociateShiftAmtsOfTwoSameDirectionShifts( |
59 | BinaryOperator *Sh0, const SimplifyQuery &SQ, |
60 | bool ) { |
61 | // Look for a shift of some instruction, ignore zext of shift amount if any. |
62 | Instruction *Sh0Op0; |
63 | Value *ShAmt0; |
64 | if (!match(V: Sh0, |
65 | P: m_Shift(L: m_Instruction(I&: Sh0Op0), R: m_ZExtOrSelf(Op: m_Value(V&: ShAmt0))))) |
66 | return nullptr; |
67 | |
68 | // If there is a truncation between the two shifts, we must make note of it |
69 | // and look through it. The truncation imposes additional constraints on the |
70 | // transform. |
71 | Instruction *Sh1; |
72 | Value *Trunc = nullptr; |
73 | match(V: Sh0Op0, |
74 | P: m_CombineOr(L: m_CombineAnd(L: m_Trunc(Op: m_Instruction(I&: Sh1)), R: m_Value(V&: Trunc)), |
75 | R: m_Instruction(I&: Sh1))); |
76 | |
77 | // Inner shift: (x shiftopcode ShAmt1) |
78 | // Like with other shift, ignore zext of shift amount if any. |
79 | Value *X, *ShAmt1; |
80 | if (!match(V: Sh1, P: m_Shift(L: m_Value(V&: X), R: m_ZExtOrSelf(Op: m_Value(V&: ShAmt1))))) |
81 | return nullptr; |
82 | |
83 | // Verify that it would be safe to try to add those two shift amounts. |
84 | if (!canTryToConstantAddTwoShiftAmounts(Sh0, ShAmt0, Sh1, ShAmt1)) |
85 | return nullptr; |
86 | |
87 | // We are only looking for signbit extraction if we have two right shifts. |
88 | bool HadTwoRightShifts = match(V: Sh0, P: m_Shr(L: m_Value(), R: m_Value())) && |
89 | match(V: Sh1, P: m_Shr(L: m_Value(), R: m_Value())); |
90 | // ... and if it's not two right-shifts, we know the answer already. |
91 | if (AnalyzeForSignBitExtraction && !HadTwoRightShifts) |
92 | return nullptr; |
93 | |
94 | // The shift opcodes must be identical, unless we are just checking whether |
95 | // this pattern can be interpreted as a sign-bit-extraction. |
96 | Instruction::BinaryOps ShiftOpcode = Sh0->getOpcode(); |
97 | bool IdenticalShOpcodes = Sh0->getOpcode() == Sh1->getOpcode(); |
98 | if (!IdenticalShOpcodes && !AnalyzeForSignBitExtraction) |
99 | return nullptr; |
100 | |
101 | // If we saw truncation, we'll need to produce extra instruction, |
102 | // and for that one of the operands of the shift must be one-use, |
103 | // unless of course we don't actually plan to produce any instructions here. |
104 | if (Trunc && !AnalyzeForSignBitExtraction && |
105 | !match(V: Sh0, P: m_c_BinOp(L: m_OneUse(SubPattern: m_Value()), R: m_Value()))) |
106 | return nullptr; |
107 | |
108 | // Can we fold (ShAmt0+ShAmt1) ? |
109 | auto *NewShAmt = dyn_cast_or_null<Constant>( |
110 | Val: simplifyAddInst(LHS: ShAmt0, RHS: ShAmt1, /*isNSW=*/IsNSW: false, /*isNUW=*/IsNUW: false, |
111 | Q: SQ.getWithInstruction(I: Sh0))); |
112 | if (!NewShAmt) |
113 | return nullptr; // Did not simplify. |
114 | unsigned NewShAmtBitWidth = NewShAmt->getType()->getScalarSizeInBits(); |
115 | unsigned XBitWidth = X->getType()->getScalarSizeInBits(); |
116 | // Is the new shift amount smaller than the bit width of inner/new shift? |
117 | if (!match(V: NewShAmt, P: m_SpecificInt_ICMP(Predicate: ICmpInst::Predicate::ICMP_ULT, |
118 | Threshold: APInt(NewShAmtBitWidth, XBitWidth)))) |
119 | return nullptr; // FIXME: could perform constant-folding. |
120 | |
121 | // If there was a truncation, and we have a right-shift, we can only fold if |
122 | // we are left with the original sign bit. Likewise, if we were just checking |
123 | // that this is a sighbit extraction, this is the place to check it. |
124 | // FIXME: zero shift amount is also legal here, but we can't *easily* check |
125 | // more than one predicate so it's not really worth it. |
126 | if (HadTwoRightShifts && (Trunc || AnalyzeForSignBitExtraction)) { |
127 | // If it's not a sign bit extraction, then we're done. |
128 | if (!match(V: NewShAmt, |
129 | P: m_SpecificInt_ICMP(Predicate: ICmpInst::Predicate::ICMP_EQ, |
130 | Threshold: APInt(NewShAmtBitWidth, XBitWidth - 1)))) |
131 | return nullptr; |
132 | // If it is, and that was the question, return the base value. |
133 | if (AnalyzeForSignBitExtraction) |
134 | return X; |
135 | } |
136 | |
137 | assert(IdenticalShOpcodes && "Should not get here with different shifts." ); |
138 | |
139 | if (NewShAmt->getType() != X->getType()) { |
140 | NewShAmt = ConstantFoldCastOperand(Opcode: Instruction::ZExt, C: NewShAmt, |
141 | DestTy: X->getType(), DL: SQ.DL); |
142 | if (!NewShAmt) |
143 | return nullptr; |
144 | } |
145 | |
146 | // All good, we can do this fold. |
147 | BinaryOperator *NewShift = BinaryOperator::Create(Op: ShiftOpcode, S1: X, S2: NewShAmt); |
148 | |
149 | // The flags can only be propagated if there wasn't a trunc. |
150 | if (!Trunc) { |
151 | // If the pattern did not involve trunc, and both of the original shifts |
152 | // had the same flag set, preserve the flag. |
153 | if (ShiftOpcode == Instruction::BinaryOps::Shl) { |
154 | NewShift->setHasNoUnsignedWrap(Sh0->hasNoUnsignedWrap() && |
155 | Sh1->hasNoUnsignedWrap()); |
156 | NewShift->setHasNoSignedWrap(Sh0->hasNoSignedWrap() && |
157 | Sh1->hasNoSignedWrap()); |
158 | } else { |
159 | NewShift->setIsExact(Sh0->isExact() && Sh1->isExact()); |
160 | } |
161 | } |
162 | |
163 | Instruction *Ret = NewShift; |
164 | if (Trunc) { |
165 | Builder.Insert(I: NewShift); |
166 | Ret = CastInst::Create(Instruction::Trunc, S: NewShift, Ty: Sh0->getType()); |
167 | } |
168 | |
169 | return Ret; |
170 | } |
171 | |
172 | // If we have some pattern that leaves only some low bits set, and then performs |
173 | // left-shift of those bits, if none of the bits that are left after the final |
174 | // shift are modified by the mask, we can omit the mask. |
175 | // |
176 | // There are many variants to this pattern: |
177 | // a) (x & ((1 << MaskShAmt) - 1)) << ShiftShAmt |
178 | // b) (x & (~(-1 << MaskShAmt))) << ShiftShAmt |
179 | // c) (x & (-1 l>> MaskShAmt)) << ShiftShAmt |
180 | // d) (x & ((-1 << MaskShAmt) l>> MaskShAmt)) << ShiftShAmt |
181 | // e) ((x << MaskShAmt) l>> MaskShAmt) << ShiftShAmt |
182 | // f) ((x << MaskShAmt) a>> MaskShAmt) << ShiftShAmt |
183 | // All these patterns can be simplified to just: |
184 | // x << ShiftShAmt |
185 | // iff: |
186 | // a,b) (MaskShAmt+ShiftShAmt) u>= bitwidth(x) |
187 | // c,d,e,f) (ShiftShAmt-MaskShAmt) s>= 0 (i.e. ShiftShAmt u>= MaskShAmt) |
188 | static Instruction * |
189 | dropRedundantMaskingOfLeftShiftInput(BinaryOperator *OuterShift, |
190 | const SimplifyQuery &Q, |
191 | InstCombiner::BuilderTy &Builder) { |
192 | assert(OuterShift->getOpcode() == Instruction::BinaryOps::Shl && |
193 | "The input must be 'shl'!" ); |
194 | |
195 | Value *Masked, *ShiftShAmt; |
196 | match(V: OuterShift, |
197 | P: m_Shift(L: m_Value(V&: Masked), R: m_ZExtOrSelf(Op: m_Value(V&: ShiftShAmt)))); |
198 | |
199 | // *If* there is a truncation between an outer shift and a possibly-mask, |
200 | // then said truncation *must* be one-use, else we can't perform the fold. |
201 | Value *Trunc; |
202 | if (match(V: Masked, P: m_CombineAnd(L: m_Trunc(Op: m_Value(V&: Masked)), R: m_Value(V&: Trunc))) && |
203 | !Trunc->hasOneUse()) |
204 | return nullptr; |
205 | |
206 | Type *NarrowestTy = OuterShift->getType(); |
207 | Type *WidestTy = Masked->getType(); |
208 | bool HadTrunc = WidestTy != NarrowestTy; |
209 | |
210 | // The mask must be computed in a type twice as wide to ensure |
211 | // that no bits are lost if the sum-of-shifts is wider than the base type. |
212 | Type *ExtendedTy = WidestTy->getExtendedType(); |
213 | |
214 | Value *MaskShAmt; |
215 | |
216 | // ((1 << MaskShAmt) - 1) |
217 | auto MaskA = m_Add(L: m_Shl(L: m_One(), R: m_Value(V&: MaskShAmt)), R: m_AllOnes()); |
218 | // (~(-1 << maskNbits)) |
219 | auto MaskB = m_Not(V: m_Shl(L: m_AllOnes(), R: m_Value(V&: MaskShAmt))); |
220 | // (-1 l>> MaskShAmt) |
221 | auto MaskC = m_LShr(L: m_AllOnes(), R: m_Value(V&: MaskShAmt)); |
222 | // ((-1 << MaskShAmt) l>> MaskShAmt) |
223 | auto MaskD = |
224 | m_LShr(L: m_Shl(L: m_AllOnes(), R: m_Value(V&: MaskShAmt)), R: m_Deferred(V: MaskShAmt)); |
225 | |
226 | Value *X; |
227 | Constant *NewMask; |
228 | |
229 | if (match(V: Masked, P: m_c_And(L: m_CombineOr(L: MaskA, R: MaskB), R: m_Value(V&: X)))) { |
230 | // Peek through an optional zext of the shift amount. |
231 | match(V: MaskShAmt, P: m_ZExtOrSelf(Op: m_Value(V&: MaskShAmt))); |
232 | |
233 | // Verify that it would be safe to try to add those two shift amounts. |
234 | if (!canTryToConstantAddTwoShiftAmounts(Sh0: OuterShift, ShAmt0: ShiftShAmt, Sh1: Masked, |
235 | ShAmt1: MaskShAmt)) |
236 | return nullptr; |
237 | |
238 | // Can we simplify (MaskShAmt+ShiftShAmt) ? |
239 | auto *SumOfShAmts = dyn_cast_or_null<Constant>(Val: simplifyAddInst( |
240 | LHS: MaskShAmt, RHS: ShiftShAmt, /*IsNSW=*/false, /*IsNUW=*/false, Q)); |
241 | if (!SumOfShAmts) |
242 | return nullptr; // Did not simplify. |
243 | // In this pattern SumOfShAmts correlates with the number of low bits |
244 | // that shall remain in the root value (OuterShift). |
245 | |
246 | // An extend of an undef value becomes zero because the high bits are never |
247 | // completely unknown. Replace the `undef` shift amounts with final |
248 | // shift bitwidth to ensure that the value remains undef when creating the |
249 | // subsequent shift op. |
250 | SumOfShAmts = Constant::replaceUndefsWith( |
251 | C: SumOfShAmts, Replacement: ConstantInt::get(Ty: SumOfShAmts->getType()->getScalarType(), |
252 | V: ExtendedTy->getScalarSizeInBits())); |
253 | auto *ExtendedSumOfShAmts = ConstantFoldCastOperand( |
254 | Opcode: Instruction::ZExt, C: SumOfShAmts, DestTy: ExtendedTy, DL: Q.DL); |
255 | if (!ExtendedSumOfShAmts) |
256 | return nullptr; |
257 | |
258 | // And compute the mask as usual: ~(-1 << (SumOfShAmts)) |
259 | auto *ExtendedAllOnes = ConstantExpr::getAllOnesValue(Ty: ExtendedTy); |
260 | Constant *ExtendedInvertedMask = ConstantFoldBinaryOpOperands( |
261 | Opcode: Instruction::Shl, LHS: ExtendedAllOnes, RHS: ExtendedSumOfShAmts, DL: Q.DL); |
262 | if (!ExtendedInvertedMask) |
263 | return nullptr; |
264 | |
265 | NewMask = ConstantExpr::getNot(C: ExtendedInvertedMask); |
266 | } else if (match(V: Masked, P: m_c_And(L: m_CombineOr(L: MaskC, R: MaskD), R: m_Value(V&: X))) || |
267 | match(V: Masked, P: m_Shr(L: m_Shl(L: m_Value(V&: X), R: m_Value(V&: MaskShAmt)), |
268 | R: m_Deferred(V: MaskShAmt)))) { |
269 | // Peek through an optional zext of the shift amount. |
270 | match(V: MaskShAmt, P: m_ZExtOrSelf(Op: m_Value(V&: MaskShAmt))); |
271 | |
272 | // Verify that it would be safe to try to add those two shift amounts. |
273 | if (!canTryToConstantAddTwoShiftAmounts(Sh0: OuterShift, ShAmt0: ShiftShAmt, Sh1: Masked, |
274 | ShAmt1: MaskShAmt)) |
275 | return nullptr; |
276 | |
277 | // Can we simplify (ShiftShAmt-MaskShAmt) ? |
278 | auto *ShAmtsDiff = dyn_cast_or_null<Constant>(Val: simplifySubInst( |
279 | LHS: ShiftShAmt, RHS: MaskShAmt, /*IsNSW=*/false, /*IsNUW=*/false, Q)); |
280 | if (!ShAmtsDiff) |
281 | return nullptr; // Did not simplify. |
282 | // In this pattern ShAmtsDiff correlates with the number of high bits that |
283 | // shall be unset in the root value (OuterShift). |
284 | |
285 | // An extend of an undef value becomes zero because the high bits are never |
286 | // completely unknown. Replace the `undef` shift amounts with negated |
287 | // bitwidth of innermost shift to ensure that the value remains undef when |
288 | // creating the subsequent shift op. |
289 | unsigned WidestTyBitWidth = WidestTy->getScalarSizeInBits(); |
290 | ShAmtsDiff = Constant::replaceUndefsWith( |
291 | C: ShAmtsDiff, Replacement: ConstantInt::get(Ty: ShAmtsDiff->getType()->getScalarType(), |
292 | V: -WidestTyBitWidth)); |
293 | auto *ExtendedNumHighBitsToClear = ConstantFoldCastOperand( |
294 | Opcode: Instruction::ZExt, |
295 | C: ConstantExpr::getSub(C1: ConstantInt::get(Ty: ShAmtsDiff->getType(), |
296 | V: WidestTyBitWidth, |
297 | /*isSigned=*/IsSigned: false), |
298 | C2: ShAmtsDiff), |
299 | DestTy: ExtendedTy, DL: Q.DL); |
300 | if (!ExtendedNumHighBitsToClear) |
301 | return nullptr; |
302 | |
303 | // And compute the mask as usual: (-1 l>> (NumHighBitsToClear)) |
304 | auto *ExtendedAllOnes = ConstantExpr::getAllOnesValue(Ty: ExtendedTy); |
305 | NewMask = ConstantFoldBinaryOpOperands(Opcode: Instruction::LShr, LHS: ExtendedAllOnes, |
306 | RHS: ExtendedNumHighBitsToClear, DL: Q.DL); |
307 | if (!NewMask) |
308 | return nullptr; |
309 | } else |
310 | return nullptr; // Don't know anything about this pattern. |
311 | |
312 | NewMask = ConstantExpr::getTrunc(C: NewMask, Ty: NarrowestTy); |
313 | |
314 | // Does this mask has any unset bits? If not then we can just not apply it. |
315 | bool NeedMask = !match(V: NewMask, P: m_AllOnes()); |
316 | |
317 | // If we need to apply a mask, there are several more restrictions we have. |
318 | if (NeedMask) { |
319 | // The old masking instruction must go away. |
320 | if (!Masked->hasOneUse()) |
321 | return nullptr; |
322 | // The original "masking" instruction must not have been`ashr`. |
323 | if (match(V: Masked, P: m_AShr(L: m_Value(), R: m_Value()))) |
324 | return nullptr; |
325 | } |
326 | |
327 | // If we need to apply truncation, let's do it first, since we can. |
328 | // We have already ensured that the old truncation will go away. |
329 | if (HadTrunc) |
330 | X = Builder.CreateTrunc(V: X, DestTy: NarrowestTy); |
331 | |
332 | // No 'NUW'/'NSW'! We no longer know that we won't shift-out non-0 bits. |
333 | // We didn't change the Type of this outermost shift, so we can just do it. |
334 | auto *NewShift = BinaryOperator::Create(Op: OuterShift->getOpcode(), S1: X, |
335 | S2: OuterShift->getOperand(i_nocapture: 1)); |
336 | if (!NeedMask) |
337 | return NewShift; |
338 | |
339 | Builder.Insert(I: NewShift); |
340 | return BinaryOperator::Create(Op: Instruction::And, S1: NewShift, S2: NewMask); |
341 | } |
342 | |
343 | /// If we have a shift-by-constant of a bin op (bitwise logic op or add/sub w/ |
344 | /// shl) that itself has a shift-by-constant operand with identical opcode, we |
345 | /// may be able to convert that into 2 independent shifts followed by the logic |
346 | /// op. This eliminates a use of an intermediate value (reduces dependency |
347 | /// chain). |
348 | static Instruction *foldShiftOfShiftedBinOp(BinaryOperator &I, |
349 | InstCombiner::BuilderTy &Builder) { |
350 | assert(I.isShift() && "Expected a shift as input" ); |
351 | auto *BinInst = dyn_cast<BinaryOperator>(Val: I.getOperand(i_nocapture: 0)); |
352 | if (!BinInst || |
353 | (!BinInst->isBitwiseLogicOp() && |
354 | BinInst->getOpcode() != Instruction::Add && |
355 | BinInst->getOpcode() != Instruction::Sub) || |
356 | !BinInst->hasOneUse()) |
357 | return nullptr; |
358 | |
359 | Constant *C0, *C1; |
360 | if (!match(V: I.getOperand(i_nocapture: 1), P: m_Constant(C&: C1))) |
361 | return nullptr; |
362 | |
363 | Instruction::BinaryOps ShiftOpcode = I.getOpcode(); |
364 | // Transform for add/sub only works with shl. |
365 | if ((BinInst->getOpcode() == Instruction::Add || |
366 | BinInst->getOpcode() == Instruction::Sub) && |
367 | ShiftOpcode != Instruction::Shl) |
368 | return nullptr; |
369 | |
370 | Type *Ty = I.getType(); |
371 | |
372 | // Find a matching shift by constant. The fold is not valid if the sum |
373 | // of the shift values equals or exceeds bitwidth. |
374 | Value *X, *Y; |
375 | auto matchFirstShift = [&](Value *V, Value *W) { |
376 | unsigned Size = Ty->getScalarSizeInBits(); |
377 | APInt Threshold(Size, Size); |
378 | return match(V, P: m_BinOp(Opcode: ShiftOpcode, L: m_Value(V&: X), R: m_Constant(C&: C0))) && |
379 | (V->hasOneUse() || match(V: W, P: m_ImmConstant())) && |
380 | match(V: ConstantExpr::getAdd(C1: C0, C2: C1), |
381 | P: m_SpecificInt_ICMP(Predicate: ICmpInst::ICMP_ULT, Threshold)); |
382 | }; |
383 | |
384 | // Logic ops and Add are commutative, so check each operand for a match. Sub |
385 | // is not so we cannot reoder if we match operand(1) and need to keep the |
386 | // operands in their original positions. |
387 | bool FirstShiftIsOp1 = false; |
388 | if (matchFirstShift(BinInst->getOperand(i_nocapture: 0), BinInst->getOperand(i_nocapture: 1))) |
389 | Y = BinInst->getOperand(i_nocapture: 1); |
390 | else if (matchFirstShift(BinInst->getOperand(i_nocapture: 1), BinInst->getOperand(i_nocapture: 0))) { |
391 | Y = BinInst->getOperand(i_nocapture: 0); |
392 | FirstShiftIsOp1 = BinInst->getOpcode() == Instruction::Sub; |
393 | } else |
394 | return nullptr; |
395 | |
396 | // shift (binop (shift X, C0), Y), C1 -> binop (shift X, C0+C1), (shift Y, C1) |
397 | Constant *ShiftSumC = ConstantExpr::getAdd(C1: C0, C2: C1); |
398 | Value *NewShift1 = Builder.CreateBinOp(Opc: ShiftOpcode, LHS: X, RHS: ShiftSumC); |
399 | Value *NewShift2 = Builder.CreateBinOp(Opc: ShiftOpcode, LHS: Y, RHS: C1); |
400 | Value *Op1 = FirstShiftIsOp1 ? NewShift2 : NewShift1; |
401 | Value *Op2 = FirstShiftIsOp1 ? NewShift1 : NewShift2; |
402 | return BinaryOperator::Create(Op: BinInst->getOpcode(), S1: Op1, S2: Op2); |
403 | } |
404 | |
405 | Instruction *InstCombinerImpl::commonShiftTransforms(BinaryOperator &I) { |
406 | if (Instruction *Phi = foldBinopWithPhiOperands(BO&: I)) |
407 | return Phi; |
408 | |
409 | Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1); |
410 | assert(Op0->getType() == Op1->getType()); |
411 | Type *Ty = I.getType(); |
412 | |
413 | // If the shift amount is a one-use `sext`, we can demote it to `zext`. |
414 | Value *Y; |
415 | if (match(V: Op1, P: m_OneUse(SubPattern: m_SExt(Op: m_Value(V&: Y))))) { |
416 | Value *NewExt = Builder.CreateZExt(V: Y, DestTy: Ty, Name: Op1->getName()); |
417 | return BinaryOperator::Create(Op: I.getOpcode(), S1: Op0, S2: NewExt); |
418 | } |
419 | |
420 | // See if we can fold away this shift. |
421 | if (SimplifyDemandedInstructionBits(Inst&: I)) |
422 | return &I; |
423 | |
424 | // Try to fold constant and into select arguments. |
425 | if (isa<Constant>(Val: Op0)) |
426 | if (SelectInst *SI = dyn_cast<SelectInst>(Val: Op1)) |
427 | if (Instruction *R = FoldOpIntoSelect(Op&: I, SI)) |
428 | return R; |
429 | |
430 | if (Constant *CUI = dyn_cast<Constant>(Val: Op1)) |
431 | if (Instruction *Res = FoldShiftByConstant(Op0, Op1: CUI, I)) |
432 | return Res; |
433 | |
434 | if (auto *NewShift = cast_or_null<Instruction>( |
435 | Val: reassociateShiftAmtsOfTwoSameDirectionShifts(Sh0: &I, SQ))) |
436 | return NewShift; |
437 | |
438 | // Pre-shift a constant shifted by a variable amount with constant offset: |
439 | // C shift (A add nuw C1) --> (C shift C1) shift A |
440 | Value *A; |
441 | Constant *C, *C1; |
442 | if (match(V: Op0, P: m_Constant(C)) && |
443 | match(V: Op1, P: m_NUWAddLike(L: m_Value(V&: A), R: m_Constant(C&: C1)))) { |
444 | Value *NewC = Builder.CreateBinOp(Opc: I.getOpcode(), LHS: C, RHS: C1); |
445 | BinaryOperator *NewShiftOp = BinaryOperator::Create(Op: I.getOpcode(), S1: NewC, S2: A); |
446 | if (I.getOpcode() == Instruction::Shl) { |
447 | NewShiftOp->setHasNoSignedWrap(I.hasNoSignedWrap()); |
448 | NewShiftOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap()); |
449 | } else { |
450 | NewShiftOp->setIsExact(I.isExact()); |
451 | } |
452 | return NewShiftOp; |
453 | } |
454 | |
455 | unsigned BitWidth = Ty->getScalarSizeInBits(); |
456 | |
457 | const APInt *AC, *AddC; |
458 | // Try to pre-shift a constant shifted by a variable amount added with a |
459 | // negative number: |
460 | // C << (X - AddC) --> (C >> AddC) << X |
461 | // and |
462 | // C >> (X - AddC) --> (C << AddC) >> X |
463 | if (match(V: Op0, P: m_APInt(Res&: AC)) && match(V: Op1, P: m_Add(L: m_Value(V&: A), R: m_APInt(Res&: AddC))) && |
464 | AddC->isNegative() && (-*AddC).ult(RHS: BitWidth)) { |
465 | assert(!AC->isZero() && "Expected simplify of shifted zero" ); |
466 | unsigned PosOffset = (-*AddC).getZExtValue(); |
467 | |
468 | auto isSuitableForPreShift = [PosOffset, &I, AC]() { |
469 | switch (I.getOpcode()) { |
470 | default: |
471 | return false; |
472 | case Instruction::Shl: |
473 | return (I.hasNoSignedWrap() || I.hasNoUnsignedWrap()) && |
474 | AC->eq(RHS: AC->lshr(shiftAmt: PosOffset).shl(shiftAmt: PosOffset)); |
475 | case Instruction::LShr: |
476 | return I.isExact() && AC->eq(RHS: AC->shl(shiftAmt: PosOffset).lshr(shiftAmt: PosOffset)); |
477 | case Instruction::AShr: |
478 | return I.isExact() && AC->eq(RHS: AC->shl(shiftAmt: PosOffset).ashr(ShiftAmt: PosOffset)); |
479 | } |
480 | }; |
481 | if (isSuitableForPreShift()) { |
482 | Constant *NewC = ConstantInt::get(Ty, V: I.getOpcode() == Instruction::Shl |
483 | ? AC->lshr(shiftAmt: PosOffset) |
484 | : AC->shl(shiftAmt: PosOffset)); |
485 | BinaryOperator *NewShiftOp = |
486 | BinaryOperator::Create(Op: I.getOpcode(), S1: NewC, S2: A); |
487 | if (I.getOpcode() == Instruction::Shl) { |
488 | NewShiftOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap()); |
489 | } else { |
490 | NewShiftOp->setIsExact(); |
491 | } |
492 | return NewShiftOp; |
493 | } |
494 | } |
495 | |
496 | // X shift (A srem C) -> X shift (A and (C - 1)) iff C is a power of 2. |
497 | // Because shifts by negative values (which could occur if A were negative) |
498 | // are undefined. |
499 | if (Op1->hasOneUse() && match(V: Op1, P: m_SRem(L: m_Value(V&: A), R: m_Constant(C))) && |
500 | match(V: C, P: m_Power2())) { |
501 | // FIXME: Should this get moved into SimplifyDemandedBits by saying we don't |
502 | // demand the sign bit (and many others) here?? |
503 | Constant *Mask = ConstantExpr::getSub(C1: C, C2: ConstantInt::get(Ty, V: 1)); |
504 | Value *Rem = Builder.CreateAnd(LHS: A, RHS: Mask, Name: Op1->getName()); |
505 | return replaceOperand(I, OpNum: 1, V: Rem); |
506 | } |
507 | |
508 | if (Instruction *Logic = foldShiftOfShiftedBinOp(I, Builder)) |
509 | return Logic; |
510 | |
511 | if (match(V: Op1, P: m_Or(L: m_Value(), R: m_SpecificInt(V: BitWidth - 1)))) |
512 | return replaceOperand(I, OpNum: 1, V: ConstantInt::get(Ty, V: BitWidth - 1)); |
513 | |
514 | return nullptr; |
515 | } |
516 | |
517 | /// Return true if we can simplify two logical (either left or right) shifts |
518 | /// that have constant shift amounts: OuterShift (InnerShift X, C1), C2. |
519 | static bool canEvaluateShiftedShift(unsigned OuterShAmt, bool IsOuterShl, |
520 | Instruction *InnerShift, |
521 | InstCombinerImpl &IC, Instruction *CxtI) { |
522 | assert(InnerShift->isLogicalShift() && "Unexpected instruction type" ); |
523 | |
524 | // We need constant scalar or constant splat shifts. |
525 | const APInt *InnerShiftConst; |
526 | if (!match(V: InnerShift->getOperand(i: 1), P: m_APInt(Res&: InnerShiftConst))) |
527 | return false; |
528 | |
529 | // Two logical shifts in the same direction: |
530 | // shl (shl X, C1), C2 --> shl X, C1 + C2 |
531 | // lshr (lshr X, C1), C2 --> lshr X, C1 + C2 |
532 | bool IsInnerShl = InnerShift->getOpcode() == Instruction::Shl; |
533 | if (IsInnerShl == IsOuterShl) |
534 | return true; |
535 | |
536 | // Equal shift amounts in opposite directions become bitwise 'and': |
537 | // lshr (shl X, C), C --> and X, C' |
538 | // shl (lshr X, C), C --> and X, C' |
539 | if (*InnerShiftConst == OuterShAmt) |
540 | return true; |
541 | |
542 | // If the 2nd shift is bigger than the 1st, we can fold: |
543 | // lshr (shl X, C1), C2 --> and (shl X, C1 - C2), C3 |
544 | // shl (lshr X, C1), C2 --> and (lshr X, C1 - C2), C3 |
545 | // but it isn't profitable unless we know the and'd out bits are already zero. |
546 | // Also, check that the inner shift is valid (less than the type width) or |
547 | // we'll crash trying to produce the bit mask for the 'and'. |
548 | unsigned TypeWidth = InnerShift->getType()->getScalarSizeInBits(); |
549 | if (InnerShiftConst->ugt(RHS: OuterShAmt) && InnerShiftConst->ult(RHS: TypeWidth)) { |
550 | unsigned InnerShAmt = InnerShiftConst->getZExtValue(); |
551 | unsigned MaskShift = |
552 | IsInnerShl ? TypeWidth - InnerShAmt : InnerShAmt - OuterShAmt; |
553 | APInt Mask = APInt::getLowBitsSet(numBits: TypeWidth, loBitsSet: OuterShAmt) << MaskShift; |
554 | if (IC.MaskedValueIsZero(V: InnerShift->getOperand(i: 0), Mask, Depth: 0, CxtI)) |
555 | return true; |
556 | } |
557 | |
558 | return false; |
559 | } |
560 | |
561 | /// See if we can compute the specified value, but shifted logically to the left |
562 | /// or right by some number of bits. This should return true if the expression |
563 | /// can be computed for the same cost as the current expression tree. This is |
564 | /// used to eliminate extraneous shifting from things like: |
565 | /// %C = shl i128 %A, 64 |
566 | /// %D = shl i128 %B, 96 |
567 | /// %E = or i128 %C, %D |
568 | /// %F = lshr i128 %E, 64 |
569 | /// where the client will ask if E can be computed shifted right by 64-bits. If |
570 | /// this succeeds, getShiftedValue() will be called to produce the value. |
571 | static bool canEvaluateShifted(Value *V, unsigned NumBits, bool IsLeftShift, |
572 | InstCombinerImpl &IC, Instruction *CxtI) { |
573 | // We can always evaluate immediate constants. |
574 | if (match(V, P: m_ImmConstant())) |
575 | return true; |
576 | |
577 | Instruction *I = dyn_cast<Instruction>(Val: V); |
578 | if (!I) return false; |
579 | |
580 | // We can't mutate something that has multiple uses: doing so would |
581 | // require duplicating the instruction in general, which isn't profitable. |
582 | if (!I->hasOneUse()) return false; |
583 | |
584 | switch (I->getOpcode()) { |
585 | default: return false; |
586 | case Instruction::And: |
587 | case Instruction::Or: |
588 | case Instruction::Xor: |
589 | // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted. |
590 | return canEvaluateShifted(V: I->getOperand(i: 0), NumBits, IsLeftShift, IC, CxtI: I) && |
591 | canEvaluateShifted(V: I->getOperand(i: 1), NumBits, IsLeftShift, IC, CxtI: I); |
592 | |
593 | case Instruction::Shl: |
594 | case Instruction::LShr: |
595 | return canEvaluateShiftedShift(OuterShAmt: NumBits, IsOuterShl: IsLeftShift, InnerShift: I, IC, CxtI); |
596 | |
597 | case Instruction::Select: { |
598 | SelectInst *SI = cast<SelectInst>(Val: I); |
599 | Value *TrueVal = SI->getTrueValue(); |
600 | Value *FalseVal = SI->getFalseValue(); |
601 | return canEvaluateShifted(V: TrueVal, NumBits, IsLeftShift, IC, CxtI: SI) && |
602 | canEvaluateShifted(V: FalseVal, NumBits, IsLeftShift, IC, CxtI: SI); |
603 | } |
604 | case Instruction::PHI: { |
605 | // We can change a phi if we can change all operands. Note that we never |
606 | // get into trouble with cyclic PHIs here because we only consider |
607 | // instructions with a single use. |
608 | PHINode *PN = cast<PHINode>(Val: I); |
609 | for (Value *IncValue : PN->incoming_values()) |
610 | if (!canEvaluateShifted(V: IncValue, NumBits, IsLeftShift, IC, CxtI: PN)) |
611 | return false; |
612 | return true; |
613 | } |
614 | case Instruction::Mul: { |
615 | const APInt *MulConst; |
616 | // We can fold (shr (mul X, -(1 << C)), C) -> (and (neg X), C`) |
617 | return !IsLeftShift && match(V: I->getOperand(i: 1), P: m_APInt(Res&: MulConst)) && |
618 | MulConst->isNegatedPowerOf2() && MulConst->countr_zero() == NumBits; |
619 | } |
620 | } |
621 | } |
622 | |
623 | /// Fold OuterShift (InnerShift X, C1), C2. |
624 | /// See canEvaluateShiftedShift() for the constraints on these instructions. |
625 | static Value *foldShiftedShift(BinaryOperator *InnerShift, unsigned OuterShAmt, |
626 | bool IsOuterShl, |
627 | InstCombiner::BuilderTy &Builder) { |
628 | bool IsInnerShl = InnerShift->getOpcode() == Instruction::Shl; |
629 | Type *ShType = InnerShift->getType(); |
630 | unsigned TypeWidth = ShType->getScalarSizeInBits(); |
631 | |
632 | // We only accept shifts-by-a-constant in canEvaluateShifted(). |
633 | const APInt *C1; |
634 | match(V: InnerShift->getOperand(i_nocapture: 1), P: m_APInt(Res&: C1)); |
635 | unsigned InnerShAmt = C1->getZExtValue(); |
636 | |
637 | // Change the shift amount and clear the appropriate IR flags. |
638 | auto NewInnerShift = [&](unsigned ShAmt) { |
639 | InnerShift->setOperand(i_nocapture: 1, Val_nocapture: ConstantInt::get(Ty: ShType, V: ShAmt)); |
640 | if (IsInnerShl) { |
641 | InnerShift->setHasNoUnsignedWrap(false); |
642 | InnerShift->setHasNoSignedWrap(false); |
643 | } else { |
644 | InnerShift->setIsExact(false); |
645 | } |
646 | return InnerShift; |
647 | }; |
648 | |
649 | // Two logical shifts in the same direction: |
650 | // shl (shl X, C1), C2 --> shl X, C1 + C2 |
651 | // lshr (lshr X, C1), C2 --> lshr X, C1 + C2 |
652 | if (IsInnerShl == IsOuterShl) { |
653 | // If this is an oversized composite shift, then unsigned shifts get 0. |
654 | if (InnerShAmt + OuterShAmt >= TypeWidth) |
655 | return Constant::getNullValue(Ty: ShType); |
656 | |
657 | return NewInnerShift(InnerShAmt + OuterShAmt); |
658 | } |
659 | |
660 | // Equal shift amounts in opposite directions become bitwise 'and': |
661 | // lshr (shl X, C), C --> and X, C' |
662 | // shl (lshr X, C), C --> and X, C' |
663 | if (InnerShAmt == OuterShAmt) { |
664 | APInt Mask = IsInnerShl |
665 | ? APInt::getLowBitsSet(numBits: TypeWidth, loBitsSet: TypeWidth - OuterShAmt) |
666 | : APInt::getHighBitsSet(numBits: TypeWidth, hiBitsSet: TypeWidth - OuterShAmt); |
667 | Value *And = Builder.CreateAnd(LHS: InnerShift->getOperand(i_nocapture: 0), |
668 | RHS: ConstantInt::get(Ty: ShType, V: Mask)); |
669 | if (auto *AndI = dyn_cast<Instruction>(Val: And)) { |
670 | AndI->moveBefore(MovePos: InnerShift); |
671 | AndI->takeName(V: InnerShift); |
672 | } |
673 | return And; |
674 | } |
675 | |
676 | assert(InnerShAmt > OuterShAmt && |
677 | "Unexpected opposite direction logical shift pair" ); |
678 | |
679 | // In general, we would need an 'and' for this transform, but |
680 | // canEvaluateShiftedShift() guarantees that the masked-off bits are not used. |
681 | // lshr (shl X, C1), C2 --> shl X, C1 - C2 |
682 | // shl (lshr X, C1), C2 --> lshr X, C1 - C2 |
683 | return NewInnerShift(InnerShAmt - OuterShAmt); |
684 | } |
685 | |
686 | /// When canEvaluateShifted() returns true for an expression, this function |
687 | /// inserts the new computation that produces the shifted value. |
688 | static Value *getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift, |
689 | InstCombinerImpl &IC, const DataLayout &DL) { |
690 | // We can always evaluate constants shifted. |
691 | if (Constant *C = dyn_cast<Constant>(Val: V)) { |
692 | if (isLeftShift) |
693 | return IC.Builder.CreateShl(LHS: C, RHS: NumBits); |
694 | else |
695 | return IC.Builder.CreateLShr(LHS: C, RHS: NumBits); |
696 | } |
697 | |
698 | Instruction *I = cast<Instruction>(Val: V); |
699 | IC.addToWorklist(I); |
700 | |
701 | switch (I->getOpcode()) { |
702 | default: llvm_unreachable("Inconsistency with CanEvaluateShifted" ); |
703 | case Instruction::And: |
704 | case Instruction::Or: |
705 | case Instruction::Xor: |
706 | // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted. |
707 | I->setOperand( |
708 | i: 0, Val: getShiftedValue(V: I->getOperand(i: 0), NumBits, isLeftShift, IC, DL)); |
709 | I->setOperand( |
710 | i: 1, Val: getShiftedValue(V: I->getOperand(i: 1), NumBits, isLeftShift, IC, DL)); |
711 | return I; |
712 | |
713 | case Instruction::Shl: |
714 | case Instruction::LShr: |
715 | return foldShiftedShift(InnerShift: cast<BinaryOperator>(Val: I), OuterShAmt: NumBits, IsOuterShl: isLeftShift, |
716 | Builder&: IC.Builder); |
717 | |
718 | case Instruction::Select: |
719 | I->setOperand( |
720 | i: 1, Val: getShiftedValue(V: I->getOperand(i: 1), NumBits, isLeftShift, IC, DL)); |
721 | I->setOperand( |
722 | i: 2, Val: getShiftedValue(V: I->getOperand(i: 2), NumBits, isLeftShift, IC, DL)); |
723 | return I; |
724 | case Instruction::PHI: { |
725 | // We can change a phi if we can change all operands. Note that we never |
726 | // get into trouble with cyclic PHIs here because we only consider |
727 | // instructions with a single use. |
728 | PHINode *PN = cast<PHINode>(Val: I); |
729 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) |
730 | PN->setIncomingValue(i, V: getShiftedValue(V: PN->getIncomingValue(i), NumBits, |
731 | isLeftShift, IC, DL)); |
732 | return PN; |
733 | } |
734 | case Instruction::Mul: { |
735 | assert(!isLeftShift && "Unexpected shift direction!" ); |
736 | auto *Neg = BinaryOperator::CreateNeg(Op: I->getOperand(i: 0)); |
737 | IC.InsertNewInstWith(New: Neg, Old: I->getIterator()); |
738 | unsigned TypeWidth = I->getType()->getScalarSizeInBits(); |
739 | APInt Mask = APInt::getLowBitsSet(numBits: TypeWidth, loBitsSet: TypeWidth - NumBits); |
740 | auto *And = BinaryOperator::CreateAnd(V1: Neg, |
741 | V2: ConstantInt::get(Ty: I->getType(), V: Mask)); |
742 | And->takeName(V: I); |
743 | return IC.InsertNewInstWith(New: And, Old: I->getIterator()); |
744 | } |
745 | } |
746 | } |
747 | |
748 | // If this is a bitwise operator or add with a constant RHS we might be able |
749 | // to pull it through a shift. |
750 | static bool canShiftBinOpWithConstantRHS(BinaryOperator &Shift, |
751 | BinaryOperator *BO) { |
752 | switch (BO->getOpcode()) { |
753 | default: |
754 | return false; // Do not perform transform! |
755 | case Instruction::Add: |
756 | return Shift.getOpcode() == Instruction::Shl; |
757 | case Instruction::Or: |
758 | case Instruction::And: |
759 | return true; |
760 | case Instruction::Xor: |
761 | // Do not change a 'not' of logical shift because that would create a normal |
762 | // 'xor'. The 'not' is likely better for analysis, SCEV, and codegen. |
763 | return !(Shift.isLogicalShift() && match(V: BO, P: m_Not(V: m_Value()))); |
764 | } |
765 | } |
766 | |
767 | Instruction *InstCombinerImpl::FoldShiftByConstant(Value *Op0, Constant *C1, |
768 | BinaryOperator &I) { |
769 | // (C2 << X) << C1 --> (C2 << C1) << X |
770 | // (C2 >> X) >> C1 --> (C2 >> C1) >> X |
771 | Constant *C2; |
772 | Value *X; |
773 | bool IsLeftShift = I.getOpcode() == Instruction::Shl; |
774 | if (match(V: Op0, P: m_BinOp(Opcode: I.getOpcode(), L: m_ImmConstant(C&: C2), R: m_Value(V&: X)))) { |
775 | Instruction *R = BinaryOperator::Create( |
776 | Op: I.getOpcode(), S1: Builder.CreateBinOp(Opc: I.getOpcode(), LHS: C2, RHS: C1), S2: X); |
777 | BinaryOperator *BO0 = cast<BinaryOperator>(Val: Op0); |
778 | if (IsLeftShift) { |
779 | R->setHasNoUnsignedWrap(I.hasNoUnsignedWrap() && |
780 | BO0->hasNoUnsignedWrap()); |
781 | R->setHasNoSignedWrap(I.hasNoSignedWrap() && BO0->hasNoSignedWrap()); |
782 | } else |
783 | R->setIsExact(I.isExact() && BO0->isExact()); |
784 | return R; |
785 | } |
786 | |
787 | Type *Ty = I.getType(); |
788 | unsigned TypeBits = Ty->getScalarSizeInBits(); |
789 | |
790 | // (X / +DivC) >> (Width - 1) --> ext (X <= -DivC) |
791 | // (X / -DivC) >> (Width - 1) --> ext (X >= +DivC) |
792 | const APInt *DivC; |
793 | if (!IsLeftShift && match(V: C1, P: m_SpecificIntAllowPoison(V: TypeBits - 1)) && |
794 | match(V: Op0, P: m_SDiv(L: m_Value(V&: X), R: m_APInt(Res&: DivC))) && !DivC->isZero() && |
795 | !DivC->isMinSignedValue()) { |
796 | Constant *NegDivC = ConstantInt::get(Ty, V: -(*DivC)); |
797 | ICmpInst::Predicate Pred = |
798 | DivC->isNegative() ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_SLE; |
799 | Value *Cmp = Builder.CreateICmp(P: Pred, LHS: X, RHS: NegDivC); |
800 | auto ExtOpcode = (I.getOpcode() == Instruction::AShr) ? Instruction::SExt |
801 | : Instruction::ZExt; |
802 | return CastInst::Create(ExtOpcode, S: Cmp, Ty); |
803 | } |
804 | |
805 | const APInt *Op1C; |
806 | if (!match(V: C1, P: m_APInt(Res&: Op1C))) |
807 | return nullptr; |
808 | |
809 | assert(!Op1C->uge(TypeBits) && |
810 | "Shift over the type width should have been removed already" ); |
811 | |
812 | // See if we can propagate this shift into the input, this covers the trivial |
813 | // cast of lshr(shl(x,c1),c2) as well as other more complex cases. |
814 | if (I.getOpcode() != Instruction::AShr && |
815 | canEvaluateShifted(V: Op0, NumBits: Op1C->getZExtValue(), IsLeftShift, IC&: *this, CxtI: &I)) { |
816 | LLVM_DEBUG( |
817 | dbgs() << "ICE: GetShiftedValue propagating shift through expression" |
818 | " to eliminate shift:\n IN: " |
819 | << *Op0 << "\n SH: " << I << "\n" ); |
820 | |
821 | return replaceInstUsesWith( |
822 | I, V: getShiftedValue(V: Op0, NumBits: Op1C->getZExtValue(), isLeftShift: IsLeftShift, IC&: *this, DL)); |
823 | } |
824 | |
825 | if (Instruction *FoldedShift = foldBinOpIntoSelectOrPhi(I)) |
826 | return FoldedShift; |
827 | |
828 | if (!Op0->hasOneUse()) |
829 | return nullptr; |
830 | |
831 | if (auto *Op0BO = dyn_cast<BinaryOperator>(Val: Op0)) { |
832 | // If the operand is a bitwise operator with a constant RHS, and the |
833 | // shift is the only use, we can pull it out of the shift. |
834 | const APInt *Op0C; |
835 | if (match(V: Op0BO->getOperand(i_nocapture: 1), P: m_APInt(Res&: Op0C))) { |
836 | if (canShiftBinOpWithConstantRHS(Shift&: I, BO: Op0BO)) { |
837 | Value *NewRHS = |
838 | Builder.CreateBinOp(Opc: I.getOpcode(), LHS: Op0BO->getOperand(i_nocapture: 1), RHS: C1); |
839 | |
840 | Value *NewShift = |
841 | Builder.CreateBinOp(Opc: I.getOpcode(), LHS: Op0BO->getOperand(i_nocapture: 0), RHS: C1); |
842 | NewShift->takeName(V: Op0BO); |
843 | |
844 | return BinaryOperator::Create(Op: Op0BO->getOpcode(), S1: NewShift, S2: NewRHS); |
845 | } |
846 | } |
847 | } |
848 | |
849 | // If we have a select that conditionally executes some binary operator, |
850 | // see if we can pull it the select and operator through the shift. |
851 | // |
852 | // For example, turning: |
853 | // shl (select C, (add X, C1), X), C2 |
854 | // Into: |
855 | // Y = shl X, C2 |
856 | // select C, (add Y, C1 << C2), Y |
857 | Value *Cond; |
858 | BinaryOperator *TBO; |
859 | Value *FalseVal; |
860 | if (match(V: Op0, P: m_Select(C: m_Value(V&: Cond), L: m_OneUse(SubPattern: m_BinOp(I&: TBO)), |
861 | R: m_Value(V&: FalseVal)))) { |
862 | const APInt *C; |
863 | if (!isa<Constant>(Val: FalseVal) && TBO->getOperand(i_nocapture: 0) == FalseVal && |
864 | match(V: TBO->getOperand(i_nocapture: 1), P: m_APInt(Res&: C)) && |
865 | canShiftBinOpWithConstantRHS(Shift&: I, BO: TBO)) { |
866 | Value *NewRHS = |
867 | Builder.CreateBinOp(Opc: I.getOpcode(), LHS: TBO->getOperand(i_nocapture: 1), RHS: C1); |
868 | |
869 | Value *NewShift = Builder.CreateBinOp(Opc: I.getOpcode(), LHS: FalseVal, RHS: C1); |
870 | Value *NewOp = Builder.CreateBinOp(Opc: TBO->getOpcode(), LHS: NewShift, RHS: NewRHS); |
871 | return SelectInst::Create(C: Cond, S1: NewOp, S2: NewShift); |
872 | } |
873 | } |
874 | |
875 | BinaryOperator *FBO; |
876 | Value *TrueVal; |
877 | if (match(V: Op0, P: m_Select(C: m_Value(V&: Cond), L: m_Value(V&: TrueVal), |
878 | R: m_OneUse(SubPattern: m_BinOp(I&: FBO))))) { |
879 | const APInt *C; |
880 | if (!isa<Constant>(Val: TrueVal) && FBO->getOperand(i_nocapture: 0) == TrueVal && |
881 | match(V: FBO->getOperand(i_nocapture: 1), P: m_APInt(Res&: C)) && |
882 | canShiftBinOpWithConstantRHS(Shift&: I, BO: FBO)) { |
883 | Value *NewRHS = |
884 | Builder.CreateBinOp(Opc: I.getOpcode(), LHS: FBO->getOperand(i_nocapture: 1), RHS: C1); |
885 | |
886 | Value *NewShift = Builder.CreateBinOp(Opc: I.getOpcode(), LHS: TrueVal, RHS: C1); |
887 | Value *NewOp = Builder.CreateBinOp(Opc: FBO->getOpcode(), LHS: NewShift, RHS: NewRHS); |
888 | return SelectInst::Create(C: Cond, S1: NewShift, S2: NewOp); |
889 | } |
890 | } |
891 | |
892 | return nullptr; |
893 | } |
894 | |
895 | // Tries to perform |
896 | // (lshr (add (zext X), (zext Y)), K) |
897 | // -> (icmp ult (add X, Y), X) |
898 | // where |
899 | // - The add's operands are zexts from a K-bits integer to a bigger type. |
900 | // - The add is only used by the shr, or by iK (or narrower) truncates. |
901 | // - The lshr type has more than 2 bits (other types are boolean math). |
902 | // - K > 1 |
903 | // note that |
904 | // - The resulting add cannot have nuw/nsw, else on overflow we get a |
905 | // poison value and the transform isn't legal anymore. |
906 | Instruction *InstCombinerImpl::foldLShrOverflowBit(BinaryOperator &I) { |
907 | assert(I.getOpcode() == Instruction::LShr); |
908 | |
909 | Value *Add = I.getOperand(i_nocapture: 0); |
910 | Value *ShiftAmt = I.getOperand(i_nocapture: 1); |
911 | Type *Ty = I.getType(); |
912 | |
913 | if (Ty->getScalarSizeInBits() < 3) |
914 | return nullptr; |
915 | |
916 | const APInt *ShAmtAPInt = nullptr; |
917 | Value *X = nullptr, *Y = nullptr; |
918 | if (!match(V: ShiftAmt, P: m_APInt(Res&: ShAmtAPInt)) || |
919 | !match(V: Add, |
920 | P: m_Add(L: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: X))), R: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: Y)))))) |
921 | return nullptr; |
922 | |
923 | const unsigned ShAmt = ShAmtAPInt->getZExtValue(); |
924 | if (ShAmt == 1) |
925 | return nullptr; |
926 | |
927 | // X/Y are zexts from `ShAmt`-sized ints. |
928 | if (X->getType()->getScalarSizeInBits() != ShAmt || |
929 | Y->getType()->getScalarSizeInBits() != ShAmt) |
930 | return nullptr; |
931 | |
932 | // Make sure that `Add` is only used by `I` and `ShAmt`-truncates. |
933 | if (!Add->hasOneUse()) { |
934 | for (User *U : Add->users()) { |
935 | if (U == &I) |
936 | continue; |
937 | |
938 | TruncInst *Trunc = dyn_cast<TruncInst>(Val: U); |
939 | if (!Trunc || Trunc->getType()->getScalarSizeInBits() > ShAmt) |
940 | return nullptr; |
941 | } |
942 | } |
943 | |
944 | // Insert at Add so that the newly created `NarrowAdd` will dominate it's |
945 | // users (i.e. `Add`'s users). |
946 | Instruction *AddInst = cast<Instruction>(Val: Add); |
947 | Builder.SetInsertPoint(AddInst); |
948 | |
949 | Value *NarrowAdd = Builder.CreateAdd(LHS: X, RHS: Y, Name: "add.narrowed" ); |
950 | Value *Overflow = |
951 | Builder.CreateICmpULT(LHS: NarrowAdd, RHS: X, Name: "add.narrowed.overflow" ); |
952 | |
953 | // Replace the uses of the original add with a zext of the |
954 | // NarrowAdd's result. Note that all users at this stage are known to |
955 | // be ShAmt-sized truncs, or the lshr itself. |
956 | if (!Add->hasOneUse()) { |
957 | replaceInstUsesWith(I&: *AddInst, V: Builder.CreateZExt(V: NarrowAdd, DestTy: Ty)); |
958 | eraseInstFromFunction(I&: *AddInst); |
959 | } |
960 | |
961 | // Replace the LShr with a zext of the overflow check. |
962 | return new ZExtInst(Overflow, Ty); |
963 | } |
964 | |
965 | // Try to set nuw/nsw flags on shl or exact flag on lshr/ashr using knownbits. |
966 | static bool setShiftFlags(BinaryOperator &I, const SimplifyQuery &Q) { |
967 | assert(I.isShift() && "Expected a shift as input" ); |
968 | // We already have all the flags. |
969 | if (I.getOpcode() == Instruction::Shl) { |
970 | if (I.hasNoUnsignedWrap() && I.hasNoSignedWrap()) |
971 | return false; |
972 | } else { |
973 | if (I.isExact()) |
974 | return false; |
975 | |
976 | // shr (shl X, Y), Y |
977 | if (match(V: I.getOperand(i_nocapture: 0), P: m_Shl(L: m_Value(), R: m_Specific(V: I.getOperand(i_nocapture: 1))))) { |
978 | I.setIsExact(); |
979 | return true; |
980 | } |
981 | } |
982 | |
983 | // Compute what we know about shift count. |
984 | KnownBits KnownCnt = computeKnownBits(V: I.getOperand(i_nocapture: 1), /* Depth */ 0, Q); |
985 | unsigned BitWidth = KnownCnt.getBitWidth(); |
986 | // Since shift produces a poison value if RHS is equal to or larger than the |
987 | // bit width, we can safely assume that RHS is less than the bit width. |
988 | uint64_t MaxCnt = KnownCnt.getMaxValue().getLimitedValue(Limit: BitWidth - 1); |
989 | |
990 | KnownBits KnownAmt = computeKnownBits(V: I.getOperand(i_nocapture: 0), /* Depth */ 0, Q); |
991 | bool Changed = false; |
992 | |
993 | if (I.getOpcode() == Instruction::Shl) { |
994 | // If we have as many leading zeros than maximum shift cnt we have nuw. |
995 | if (!I.hasNoUnsignedWrap() && MaxCnt <= KnownAmt.countMinLeadingZeros()) { |
996 | I.setHasNoUnsignedWrap(); |
997 | Changed = true; |
998 | } |
999 | // If we have more sign bits than maximum shift cnt we have nsw. |
1000 | if (!I.hasNoSignedWrap()) { |
1001 | if (MaxCnt < KnownAmt.countMinSignBits() || |
1002 | MaxCnt < ComputeNumSignBits(Op: I.getOperand(i_nocapture: 0), DL: Q.DL, /*Depth*/ 0, AC: Q.AC, |
1003 | CxtI: Q.CxtI, DT: Q.DT)) { |
1004 | I.setHasNoSignedWrap(); |
1005 | Changed = true; |
1006 | } |
1007 | } |
1008 | return Changed; |
1009 | } |
1010 | |
1011 | // If we have at least as many trailing zeros as maximum count then we have |
1012 | // exact. |
1013 | Changed = MaxCnt <= KnownAmt.countMinTrailingZeros(); |
1014 | I.setIsExact(Changed); |
1015 | |
1016 | return Changed; |
1017 | } |
1018 | |
1019 | Instruction *InstCombinerImpl::visitShl(BinaryOperator &I) { |
1020 | const SimplifyQuery Q = SQ.getWithInstruction(I: &I); |
1021 | |
1022 | if (Value *V = simplifyShlInst(Op0: I.getOperand(i_nocapture: 0), Op1: I.getOperand(i_nocapture: 1), |
1023 | IsNSW: I.hasNoSignedWrap(), IsNUW: I.hasNoUnsignedWrap(), Q)) |
1024 | return replaceInstUsesWith(I, V); |
1025 | |
1026 | if (Instruction *X = foldVectorBinop(Inst&: I)) |
1027 | return X; |
1028 | |
1029 | if (Instruction *V = commonShiftTransforms(I)) |
1030 | return V; |
1031 | |
1032 | if (Instruction *V = dropRedundantMaskingOfLeftShiftInput(OuterShift: &I, Q, Builder)) |
1033 | return V; |
1034 | |
1035 | Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1); |
1036 | Type *Ty = I.getType(); |
1037 | unsigned BitWidth = Ty->getScalarSizeInBits(); |
1038 | |
1039 | const APInt *C; |
1040 | if (match(V: Op1, P: m_APInt(Res&: C))) { |
1041 | unsigned ShAmtC = C->getZExtValue(); |
1042 | |
1043 | // shl (zext X), C --> zext (shl X, C) |
1044 | // This is only valid if X would have zeros shifted out. |
1045 | Value *X; |
1046 | if (match(V: Op0, P: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: X))))) { |
1047 | unsigned SrcWidth = X->getType()->getScalarSizeInBits(); |
1048 | if (ShAmtC < SrcWidth && |
1049 | MaskedValueIsZero(V: X, Mask: APInt::getHighBitsSet(numBits: SrcWidth, hiBitsSet: ShAmtC), Depth: 0, CxtI: &I)) |
1050 | return new ZExtInst(Builder.CreateShl(LHS: X, RHS: ShAmtC), Ty); |
1051 | } |
1052 | |
1053 | // (X >> C) << C --> X & (-1 << C) |
1054 | if (match(V: Op0, P: m_Shr(L: m_Value(V&: X), R: m_Specific(V: Op1)))) { |
1055 | APInt Mask(APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - ShAmtC)); |
1056 | return BinaryOperator::CreateAnd(V1: X, V2: ConstantInt::get(Ty, V: Mask)); |
1057 | } |
1058 | |
1059 | const APInt *C1; |
1060 | if (match(V: Op0, P: m_Exact(SubPattern: m_Shr(L: m_Value(V&: X), R: m_APInt(Res&: C1)))) && |
1061 | C1->ult(RHS: BitWidth)) { |
1062 | unsigned ShrAmt = C1->getZExtValue(); |
1063 | if (ShrAmt < ShAmtC) { |
1064 | // If C1 < C: (X >>?,exact C1) << C --> X << (C - C1) |
1065 | Constant *ShiftDiff = ConstantInt::get(Ty, V: ShAmtC - ShrAmt); |
1066 | auto *NewShl = BinaryOperator::CreateShl(V1: X, V2: ShiftDiff); |
1067 | NewShl->setHasNoUnsignedWrap( |
1068 | I.hasNoUnsignedWrap() || |
1069 | (ShrAmt && |
1070 | cast<Instruction>(Val: Op0)->getOpcode() == Instruction::LShr && |
1071 | I.hasNoSignedWrap())); |
1072 | NewShl->setHasNoSignedWrap(I.hasNoSignedWrap()); |
1073 | return NewShl; |
1074 | } |
1075 | if (ShrAmt > ShAmtC) { |
1076 | // If C1 > C: (X >>?exact C1) << C --> X >>?exact (C1 - C) |
1077 | Constant *ShiftDiff = ConstantInt::get(Ty, V: ShrAmt - ShAmtC); |
1078 | auto *NewShr = BinaryOperator::Create( |
1079 | Op: cast<BinaryOperator>(Val: Op0)->getOpcode(), S1: X, S2: ShiftDiff); |
1080 | NewShr->setIsExact(true); |
1081 | return NewShr; |
1082 | } |
1083 | } |
1084 | |
1085 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Shr(L: m_Value(V&: X), R: m_APInt(Res&: C1)))) && |
1086 | C1->ult(RHS: BitWidth)) { |
1087 | unsigned ShrAmt = C1->getZExtValue(); |
1088 | if (ShrAmt < ShAmtC) { |
1089 | // If C1 < C: (X >>? C1) << C --> (X << (C - C1)) & (-1 << C) |
1090 | Constant *ShiftDiff = ConstantInt::get(Ty, V: ShAmtC - ShrAmt); |
1091 | auto *NewShl = BinaryOperator::CreateShl(V1: X, V2: ShiftDiff); |
1092 | NewShl->setHasNoUnsignedWrap( |
1093 | I.hasNoUnsignedWrap() || |
1094 | (ShrAmt && |
1095 | cast<Instruction>(Val: Op0)->getOpcode() == Instruction::LShr && |
1096 | I.hasNoSignedWrap())); |
1097 | NewShl->setHasNoSignedWrap(I.hasNoSignedWrap()); |
1098 | Builder.Insert(I: NewShl); |
1099 | APInt Mask(APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - ShAmtC)); |
1100 | return BinaryOperator::CreateAnd(V1: NewShl, V2: ConstantInt::get(Ty, V: Mask)); |
1101 | } |
1102 | if (ShrAmt > ShAmtC) { |
1103 | // If C1 > C: (X >>? C1) << C --> (X >>? (C1 - C)) & (-1 << C) |
1104 | Constant *ShiftDiff = ConstantInt::get(Ty, V: ShrAmt - ShAmtC); |
1105 | auto *OldShr = cast<BinaryOperator>(Val: Op0); |
1106 | auto *NewShr = |
1107 | BinaryOperator::Create(Op: OldShr->getOpcode(), S1: X, S2: ShiftDiff); |
1108 | NewShr->setIsExact(OldShr->isExact()); |
1109 | Builder.Insert(I: NewShr); |
1110 | APInt Mask(APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - ShAmtC)); |
1111 | return BinaryOperator::CreateAnd(V1: NewShr, V2: ConstantInt::get(Ty, V: Mask)); |
1112 | } |
1113 | } |
1114 | |
1115 | // Similar to above, but look through an intermediate trunc instruction. |
1116 | BinaryOperator *Shr; |
1117 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Trunc(Op: m_OneUse(SubPattern: m_BinOp(I&: Shr))))) && |
1118 | match(V: Shr, P: m_Shr(L: m_Value(V&: X), R: m_APInt(Res&: C1)))) { |
1119 | // The larger shift direction survives through the transform. |
1120 | unsigned ShrAmtC = C1->getZExtValue(); |
1121 | unsigned ShDiff = ShrAmtC > ShAmtC ? ShrAmtC - ShAmtC : ShAmtC - ShrAmtC; |
1122 | Constant *ShiftDiffC = ConstantInt::get(Ty: X->getType(), V: ShDiff); |
1123 | auto ShiftOpc = ShrAmtC > ShAmtC ? Shr->getOpcode() : Instruction::Shl; |
1124 | |
1125 | // If C1 > C: |
1126 | // (trunc (X >> C1)) << C --> (trunc (X >> (C1 - C))) && (-1 << C) |
1127 | // If C > C1: |
1128 | // (trunc (X >> C1)) << C --> (trunc (X << (C - C1))) && (-1 << C) |
1129 | Value *NewShift = Builder.CreateBinOp(Opc: ShiftOpc, LHS: X, RHS: ShiftDiffC, Name: "sh.diff" ); |
1130 | Value *Trunc = Builder.CreateTrunc(V: NewShift, DestTy: Ty, Name: "tr.sh.diff" ); |
1131 | APInt Mask(APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - ShAmtC)); |
1132 | return BinaryOperator::CreateAnd(V1: Trunc, V2: ConstantInt::get(Ty, V: Mask)); |
1133 | } |
1134 | |
1135 | // If we have an opposite shift by the same amount, we may be able to |
1136 | // reorder binops and shifts to eliminate math/logic. |
1137 | auto isSuitableBinOpcode = [](Instruction::BinaryOps BinOpcode) { |
1138 | switch (BinOpcode) { |
1139 | default: |
1140 | return false; |
1141 | case Instruction::Add: |
1142 | case Instruction::And: |
1143 | case Instruction::Or: |
1144 | case Instruction::Xor: |
1145 | case Instruction::Sub: |
1146 | // NOTE: Sub is not commutable and the tranforms below may not be valid |
1147 | // when the shift-right is operand 1 (RHS) of the sub. |
1148 | return true; |
1149 | } |
1150 | }; |
1151 | BinaryOperator *Op0BO; |
1152 | if (match(V: Op0, P: m_OneUse(SubPattern: m_BinOp(I&: Op0BO))) && |
1153 | isSuitableBinOpcode(Op0BO->getOpcode())) { |
1154 | // Commute so shift-right is on LHS of the binop. |
1155 | // (Y bop (X >> C)) << C -> ((X >> C) bop Y) << C |
1156 | // (Y bop ((X >> C) & CC)) << C -> (((X >> C) & CC) bop Y) << C |
1157 | Value *Shr = Op0BO->getOperand(i_nocapture: 0); |
1158 | Value *Y = Op0BO->getOperand(i_nocapture: 1); |
1159 | Value *X; |
1160 | const APInt *CC; |
1161 | if (Op0BO->isCommutative() && Y->hasOneUse() && |
1162 | (match(V: Y, P: m_Shr(L: m_Value(), R: m_Specific(V: Op1))) || |
1163 | match(V: Y, P: m_And(L: m_OneUse(SubPattern: m_Shr(L: m_Value(), R: m_Specific(V: Op1))), |
1164 | R: m_APInt(Res&: CC))))) |
1165 | std::swap(a&: Shr, b&: Y); |
1166 | |
1167 | // ((X >> C) bop Y) << C -> (X bop (Y << C)) & (~0 << C) |
1168 | if (match(V: Shr, P: m_OneUse(SubPattern: m_Shr(L: m_Value(V&: X), R: m_Specific(V: Op1))))) { |
1169 | // Y << C |
1170 | Value *YS = Builder.CreateShl(LHS: Y, RHS: Op1, Name: Op0BO->getName()); |
1171 | // (X bop (Y << C)) |
1172 | Value *B = |
1173 | Builder.CreateBinOp(Opc: Op0BO->getOpcode(), LHS: X, RHS: YS, Name: Shr->getName()); |
1174 | unsigned Op1Val = C->getLimitedValue(Limit: BitWidth); |
1175 | APInt Bits = APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - Op1Val); |
1176 | Constant *Mask = ConstantInt::get(Ty, V: Bits); |
1177 | return BinaryOperator::CreateAnd(V1: B, V2: Mask); |
1178 | } |
1179 | |
1180 | // (((X >> C) & CC) bop Y) << C -> (X & (CC << C)) bop (Y << C) |
1181 | if (match(V: Shr, |
1182 | P: m_OneUse(SubPattern: m_And(L: m_OneUse(SubPattern: m_Shr(L: m_Value(V&: X), R: m_Specific(V: Op1))), |
1183 | R: m_APInt(Res&: CC))))) { |
1184 | // Y << C |
1185 | Value *YS = Builder.CreateShl(LHS: Y, RHS: Op1, Name: Op0BO->getName()); |
1186 | // X & (CC << C) |
1187 | Value *M = Builder.CreateAnd(LHS: X, RHS: ConstantInt::get(Ty, V: CC->shl(ShiftAmt: *C)), |
1188 | Name: X->getName() + ".mask" ); |
1189 | auto *NewOp = BinaryOperator::Create(Op: Op0BO->getOpcode(), S1: M, S2: YS); |
1190 | if (auto *Disjoint = dyn_cast<PossiblyDisjointInst>(Val: Op0BO); |
1191 | Disjoint && Disjoint->isDisjoint()) |
1192 | cast<PossiblyDisjointInst>(Val: NewOp)->setIsDisjoint(true); |
1193 | return NewOp; |
1194 | } |
1195 | } |
1196 | |
1197 | // (C1 - X) << C --> (C1 << C) - (X << C) |
1198 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Sub(L: m_APInt(Res&: C1), R: m_Value(V&: X))))) { |
1199 | Constant *NewLHS = ConstantInt::get(Ty, V: C1->shl(ShiftAmt: *C)); |
1200 | Value *NewShift = Builder.CreateShl(LHS: X, RHS: Op1); |
1201 | return BinaryOperator::CreateSub(V1: NewLHS, V2: NewShift); |
1202 | } |
1203 | } |
1204 | |
1205 | if (setShiftFlags(I, Q)) |
1206 | return &I; |
1207 | |
1208 | // Transform (x >> y) << y to x & (-1 << y) |
1209 | // Valid for any type of right-shift. |
1210 | Value *X; |
1211 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Shr(L: m_Value(V&: X), R: m_Specific(V: Op1))))) { |
1212 | Constant *AllOnes = ConstantInt::getAllOnesValue(Ty); |
1213 | Value *Mask = Builder.CreateShl(LHS: AllOnes, RHS: Op1); |
1214 | return BinaryOperator::CreateAnd(V1: Mask, V2: X); |
1215 | } |
1216 | |
1217 | // Transform (-1 >> y) << y to -1 << y |
1218 | if (match(V: Op0, P: m_LShr(L: m_AllOnes(), R: m_Specific(V: Op1)))) { |
1219 | Constant *AllOnes = ConstantInt::getAllOnesValue(Ty); |
1220 | return BinaryOperator::CreateShl(V1: AllOnes, V2: Op1); |
1221 | } |
1222 | |
1223 | Constant *C1; |
1224 | if (match(V: Op1, P: m_ImmConstant(C&: C1))) { |
1225 | Constant *C2; |
1226 | Value *X; |
1227 | // (X * C2) << C1 --> X * (C2 << C1) |
1228 | if (match(V: Op0, P: m_Mul(L: m_Value(V&: X), R: m_ImmConstant(C&: C2)))) |
1229 | return BinaryOperator::CreateMul(V1: X, V2: Builder.CreateShl(LHS: C2, RHS: C1)); |
1230 | |
1231 | // shl (zext i1 X), C1 --> select (X, 1 << C1, 0) |
1232 | if (match(V: Op0, P: m_ZExt(Op: m_Value(V&: X))) && X->getType()->isIntOrIntVectorTy(BitWidth: 1)) { |
1233 | auto *NewC = Builder.CreateShl(LHS: ConstantInt::get(Ty, V: 1), RHS: C1); |
1234 | return SelectInst::Create(C: X, S1: NewC, S2: ConstantInt::getNullValue(Ty)); |
1235 | } |
1236 | } |
1237 | |
1238 | if (match(V: Op0, P: m_One())) { |
1239 | // (1 << (C - x)) -> ((1 << C) >> x) if C is bitwidth - 1 |
1240 | if (match(V: Op1, P: m_Sub(L: m_SpecificInt(V: BitWidth - 1), R: m_Value(V&: X)))) |
1241 | return BinaryOperator::CreateLShr( |
1242 | V1: ConstantInt::get(Ty, V: APInt::getSignMask(BitWidth)), V2: X); |
1243 | |
1244 | // Canonicalize "extract lowest set bit" using cttz to and-with-negate: |
1245 | // 1 << (cttz X) --> -X & X |
1246 | if (match(V: Op1, |
1247 | P: m_OneUse(SubPattern: m_Intrinsic<Intrinsic::cttz>(Op0: m_Value(V&: X), Op1: m_Value())))) { |
1248 | Value *NegX = Builder.CreateNeg(V: X, Name: "neg" ); |
1249 | return BinaryOperator::CreateAnd(V1: NegX, V2: X); |
1250 | } |
1251 | } |
1252 | |
1253 | return nullptr; |
1254 | } |
1255 | |
1256 | Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) { |
1257 | if (Value *V = simplifyLShrInst(Op0: I.getOperand(i_nocapture: 0), Op1: I.getOperand(i_nocapture: 1), IsExact: I.isExact(), |
1258 | Q: SQ.getWithInstruction(I: &I))) |
1259 | return replaceInstUsesWith(I, V); |
1260 | |
1261 | if (Instruction *X = foldVectorBinop(Inst&: I)) |
1262 | return X; |
1263 | |
1264 | if (Instruction *R = commonShiftTransforms(I)) |
1265 | return R; |
1266 | |
1267 | Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1); |
1268 | Type *Ty = I.getType(); |
1269 | Value *X; |
1270 | const APInt *C; |
1271 | unsigned BitWidth = Ty->getScalarSizeInBits(); |
1272 | |
1273 | // (iN (~X) u>> (N - 1)) --> zext (X > -1) |
1274 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Not(V: m_Value(V&: X)))) && |
1275 | match(V: Op1, P: m_SpecificIntAllowPoison(V: BitWidth - 1))) |
1276 | return new ZExtInst(Builder.CreateIsNotNeg(Arg: X, Name: "isnotneg" ), Ty); |
1277 | |
1278 | // ((X << nuw Z) sub nuw Y) >>u exact Z --> X sub nuw (Y >>u exact Z) |
1279 | Value *Y; |
1280 | if (I.isExact() && |
1281 | match(V: Op0, P: m_OneUse(SubPattern: m_NUWSub(L: m_NUWShl(L: m_Value(V&: X), R: m_Specific(V: Op1)), |
1282 | R: m_Value(V&: Y))))) { |
1283 | Value *NewLshr = Builder.CreateLShr(LHS: Y, RHS: Op1, Name: "" , /*isExact=*/true); |
1284 | auto *NewSub = BinaryOperator::CreateNUWSub(V1: X, V2: NewLshr); |
1285 | NewSub->setHasNoSignedWrap( |
1286 | cast<OverflowingBinaryOperator>(Val: Op0)->hasNoSignedWrap()); |
1287 | return NewSub; |
1288 | } |
1289 | |
1290 | // Fold (X + Y) / 2 --> (X & Y) iff (X u<= 1) && (Y u<= 1) |
1291 | if (match(V: Op0, P: m_Add(L: m_Value(V&: X), R: m_Value(V&: Y))) && match(V: Op1, P: m_One()) && |
1292 | computeKnownBits(V: X, /*Depth=*/0, CxtI: &I).countMaxActiveBits() <= 1 && |
1293 | computeKnownBits(V: Y, /*Depth=*/0, CxtI: &I).countMaxActiveBits() <= 1) |
1294 | return BinaryOperator::CreateAnd(V1: X, V2: Y); |
1295 | |
1296 | // (sub nuw X, (Y << nuw Z)) >>u exact Z --> (X >>u exact Z) sub nuw Y |
1297 | if (I.isExact() && |
1298 | match(V: Op0, P: m_OneUse(SubPattern: m_NUWSub(L: m_Value(V&: X), |
1299 | R: m_NUWShl(L: m_Value(V&: Y), R: m_Specific(V: Op1)))))) { |
1300 | Value *NewLshr = Builder.CreateLShr(LHS: X, RHS: Op1, Name: "" , /*isExact=*/true); |
1301 | auto *NewSub = BinaryOperator::CreateNUWSub(V1: NewLshr, V2: Y); |
1302 | NewSub->setHasNoSignedWrap( |
1303 | cast<OverflowingBinaryOperator>(Val: Op0)->hasNoSignedWrap()); |
1304 | return NewSub; |
1305 | } |
1306 | |
1307 | auto isSuitableBinOpcode = [](Instruction::BinaryOps BinOpcode) { |
1308 | switch (BinOpcode) { |
1309 | default: |
1310 | return false; |
1311 | case Instruction::Add: |
1312 | case Instruction::And: |
1313 | case Instruction::Or: |
1314 | case Instruction::Xor: |
1315 | // Sub is handled separately. |
1316 | return true; |
1317 | } |
1318 | }; |
1319 | |
1320 | // If both the binop and the shift are nuw, then: |
1321 | // ((X << nuw Z) binop nuw Y) >>u Z --> X binop nuw (Y >>u Z) |
1322 | if (match(V: Op0, P: m_OneUse(SubPattern: m_c_BinOp(L: m_NUWShl(L: m_Value(V&: X), R: m_Specific(V: Op1)), |
1323 | R: m_Value(V&: Y))))) { |
1324 | BinaryOperator *Op0OB = cast<BinaryOperator>(Val: Op0); |
1325 | if (isSuitableBinOpcode(Op0OB->getOpcode())) { |
1326 | if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Val: Op0); |
1327 | !OBO || OBO->hasNoUnsignedWrap()) { |
1328 | Value *NewLshr = Builder.CreateLShr( |
1329 | LHS: Y, RHS: Op1, Name: "" , isExact: I.isExact() && Op0OB->getOpcode() != Instruction::And); |
1330 | auto *NewBinOp = BinaryOperator::Create(Op: Op0OB->getOpcode(), S1: NewLshr, S2: X); |
1331 | if (OBO) { |
1332 | NewBinOp->setHasNoUnsignedWrap(true); |
1333 | NewBinOp->setHasNoSignedWrap(OBO->hasNoSignedWrap()); |
1334 | } else if (auto *Disjoint = dyn_cast<PossiblyDisjointInst>(Val: Op0)) { |
1335 | cast<PossiblyDisjointInst>(Val: NewBinOp)->setIsDisjoint( |
1336 | Disjoint->isDisjoint()); |
1337 | } |
1338 | return NewBinOp; |
1339 | } |
1340 | } |
1341 | } |
1342 | |
1343 | if (match(V: Op1, P: m_APInt(Res&: C))) { |
1344 | unsigned ShAmtC = C->getZExtValue(); |
1345 | auto *II = dyn_cast<IntrinsicInst>(Val: Op0); |
1346 | if (II && isPowerOf2_32(Value: BitWidth) && Log2_32(Value: BitWidth) == ShAmtC && |
1347 | (II->getIntrinsicID() == Intrinsic::ctlz || |
1348 | II->getIntrinsicID() == Intrinsic::cttz || |
1349 | II->getIntrinsicID() == Intrinsic::ctpop)) { |
1350 | // ctlz.i32(x)>>5 --> zext(x == 0) |
1351 | // cttz.i32(x)>>5 --> zext(x == 0) |
1352 | // ctpop.i32(x)>>5 --> zext(x == -1) |
1353 | bool IsPop = II->getIntrinsicID() == Intrinsic::ctpop; |
1354 | Constant *RHS = ConstantInt::getSigned(Ty, V: IsPop ? -1 : 0); |
1355 | Value *Cmp = Builder.CreateICmpEQ(LHS: II->getArgOperand(i: 0), RHS); |
1356 | return new ZExtInst(Cmp, Ty); |
1357 | } |
1358 | |
1359 | const APInt *C1; |
1360 | if (match(V: Op0, P: m_Shl(L: m_Value(V&: X), R: m_APInt(Res&: C1))) && C1->ult(RHS: BitWidth)) { |
1361 | if (C1->ult(RHS: ShAmtC)) { |
1362 | unsigned ShlAmtC = C1->getZExtValue(); |
1363 | Constant *ShiftDiff = ConstantInt::get(Ty, V: ShAmtC - ShlAmtC); |
1364 | if (cast<BinaryOperator>(Val: Op0)->hasNoUnsignedWrap()) { |
1365 | // (X <<nuw C1) >>u C --> X >>u (C - C1) |
1366 | auto *NewLShr = BinaryOperator::CreateLShr(V1: X, V2: ShiftDiff); |
1367 | NewLShr->setIsExact(I.isExact()); |
1368 | return NewLShr; |
1369 | } |
1370 | if (Op0->hasOneUse()) { |
1371 | // (X << C1) >>u C --> (X >>u (C - C1)) & (-1 >> C) |
1372 | Value *NewLShr = Builder.CreateLShr(LHS: X, RHS: ShiftDiff, Name: "" , isExact: I.isExact()); |
1373 | APInt Mask(APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: BitWidth - ShAmtC)); |
1374 | return BinaryOperator::CreateAnd(V1: NewLShr, V2: ConstantInt::get(Ty, V: Mask)); |
1375 | } |
1376 | } else if (C1->ugt(RHS: ShAmtC)) { |
1377 | unsigned ShlAmtC = C1->getZExtValue(); |
1378 | Constant *ShiftDiff = ConstantInt::get(Ty, V: ShlAmtC - ShAmtC); |
1379 | if (cast<BinaryOperator>(Val: Op0)->hasNoUnsignedWrap()) { |
1380 | // (X <<nuw C1) >>u C --> X <<nuw/nsw (C1 - C) |
1381 | auto *NewShl = BinaryOperator::CreateShl(V1: X, V2: ShiftDiff); |
1382 | NewShl->setHasNoUnsignedWrap(true); |
1383 | NewShl->setHasNoSignedWrap(ShAmtC > 0); |
1384 | return NewShl; |
1385 | } |
1386 | if (Op0->hasOneUse()) { |
1387 | // (X << C1) >>u C --> X << (C1 - C) & (-1 >> C) |
1388 | Value *NewShl = Builder.CreateShl(LHS: X, RHS: ShiftDiff); |
1389 | APInt Mask(APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: BitWidth - ShAmtC)); |
1390 | return BinaryOperator::CreateAnd(V1: NewShl, V2: ConstantInt::get(Ty, V: Mask)); |
1391 | } |
1392 | } else { |
1393 | assert(*C1 == ShAmtC); |
1394 | // (X << C) >>u C --> X & (-1 >>u C) |
1395 | APInt Mask(APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: BitWidth - ShAmtC)); |
1396 | return BinaryOperator::CreateAnd(V1: X, V2: ConstantInt::get(Ty, V: Mask)); |
1397 | } |
1398 | } |
1399 | |
1400 | // ((X << C) + Y) >>u C --> (X + (Y >>u C)) & (-1 >>u C) |
1401 | // TODO: Consolidate with the more general transform that starts from shl |
1402 | // (the shifts are in the opposite order). |
1403 | if (match(V: Op0, |
1404 | P: m_OneUse(SubPattern: m_c_Add(L: m_OneUse(SubPattern: m_Shl(L: m_Value(V&: X), R: m_Specific(V: Op1))), |
1405 | R: m_Value(V&: Y))))) { |
1406 | Value *NewLshr = Builder.CreateLShr(LHS: Y, RHS: Op1); |
1407 | Value *NewAdd = Builder.CreateAdd(LHS: NewLshr, RHS: X); |
1408 | unsigned Op1Val = C->getLimitedValue(Limit: BitWidth); |
1409 | APInt Bits = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: BitWidth - Op1Val); |
1410 | Constant *Mask = ConstantInt::get(Ty, V: Bits); |
1411 | return BinaryOperator::CreateAnd(V1: NewAdd, V2: Mask); |
1412 | } |
1413 | |
1414 | if (match(V: Op0, P: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: X)))) && |
1415 | (!Ty->isIntegerTy() || shouldChangeType(From: Ty, To: X->getType()))) { |
1416 | assert(ShAmtC < X->getType()->getScalarSizeInBits() && |
1417 | "Big shift not simplified to zero?" ); |
1418 | // lshr (zext iM X to iN), C --> zext (lshr X, C) to iN |
1419 | Value *NewLShr = Builder.CreateLShr(LHS: X, RHS: ShAmtC); |
1420 | return new ZExtInst(NewLShr, Ty); |
1421 | } |
1422 | |
1423 | if (match(V: Op0, P: m_SExt(Op: m_Value(V&: X)))) { |
1424 | unsigned SrcTyBitWidth = X->getType()->getScalarSizeInBits(); |
1425 | // lshr (sext i1 X to iN), C --> select (X, -1 >> C, 0) |
1426 | if (SrcTyBitWidth == 1) { |
1427 | auto *NewC = ConstantInt::get( |
1428 | Ty, V: APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: BitWidth - ShAmtC)); |
1429 | return SelectInst::Create(C: X, S1: NewC, S2: ConstantInt::getNullValue(Ty)); |
1430 | } |
1431 | |
1432 | if ((!Ty->isIntegerTy() || shouldChangeType(From: Ty, To: X->getType())) && |
1433 | Op0->hasOneUse()) { |
1434 | // Are we moving the sign bit to the low bit and widening with high |
1435 | // zeros? lshr (sext iM X to iN), N-1 --> zext (lshr X, M-1) to iN |
1436 | if (ShAmtC == BitWidth - 1) { |
1437 | Value *NewLShr = Builder.CreateLShr(LHS: X, RHS: SrcTyBitWidth - 1); |
1438 | return new ZExtInst(NewLShr, Ty); |
1439 | } |
1440 | |
1441 | // lshr (sext iM X to iN), N-M --> zext (ashr X, min(N-M, M-1)) to iN |
1442 | if (ShAmtC == BitWidth - SrcTyBitWidth) { |
1443 | // The new shift amount can't be more than the narrow source type. |
1444 | unsigned NewShAmt = std::min(a: ShAmtC, b: SrcTyBitWidth - 1); |
1445 | Value *AShr = Builder.CreateAShr(LHS: X, RHS: NewShAmt); |
1446 | return new ZExtInst(AShr, Ty); |
1447 | } |
1448 | } |
1449 | } |
1450 | |
1451 | if (ShAmtC == BitWidth - 1) { |
1452 | // lshr i32 or(X,-X), 31 --> zext (X != 0) |
1453 | if (match(V: Op0, P: m_OneUse(SubPattern: m_c_Or(L: m_Neg(V: m_Value(V&: X)), R: m_Deferred(V: X))))) |
1454 | return new ZExtInst(Builder.CreateIsNotNull(Arg: X), Ty); |
1455 | |
1456 | // lshr i32 (X -nsw Y), 31 --> zext (X < Y) |
1457 | if (match(V: Op0, P: m_OneUse(SubPattern: m_NSWSub(L: m_Value(V&: X), R: m_Value(V&: Y))))) |
1458 | return new ZExtInst(Builder.CreateICmpSLT(LHS: X, RHS: Y), Ty); |
1459 | |
1460 | // Check if a number is negative and odd: |
1461 | // lshr i32 (srem X, 2), 31 --> and (X >> 31), X |
1462 | if (match(V: Op0, P: m_OneUse(SubPattern: m_SRem(L: m_Value(V&: X), R: m_SpecificInt(V: 2))))) { |
1463 | Value *Signbit = Builder.CreateLShr(LHS: X, RHS: ShAmtC); |
1464 | return BinaryOperator::CreateAnd(V1: Signbit, V2: X); |
1465 | } |
1466 | } |
1467 | |
1468 | Instruction *TruncSrc; |
1469 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Trunc(Op: m_Instruction(I&: TruncSrc)))) && |
1470 | match(V: TruncSrc, P: m_LShr(L: m_Value(V&: X), R: m_APInt(Res&: C1)))) { |
1471 | unsigned SrcWidth = X->getType()->getScalarSizeInBits(); |
1472 | unsigned AmtSum = ShAmtC + C1->getZExtValue(); |
1473 | |
1474 | // If the combined shift fits in the source width: |
1475 | // (trunc (X >>u C1)) >>u C --> and (trunc (X >>u (C1 + C)), MaskC |
1476 | // |
1477 | // If the first shift covers the number of bits truncated, then the |
1478 | // mask instruction is eliminated (and so the use check is relaxed). |
1479 | if (AmtSum < SrcWidth && |
1480 | (TruncSrc->hasOneUse() || C1->uge(RHS: SrcWidth - BitWidth))) { |
1481 | Value *SumShift = Builder.CreateLShr(LHS: X, RHS: AmtSum, Name: "sum.shift" ); |
1482 | Value *Trunc = Builder.CreateTrunc(V: SumShift, DestTy: Ty, Name: I.getName()); |
1483 | |
1484 | // If the first shift does not cover the number of bits truncated, then |
1485 | // we require a mask to get rid of high bits in the result. |
1486 | APInt MaskC = APInt::getAllOnes(numBits: BitWidth).lshr(shiftAmt: ShAmtC); |
1487 | return BinaryOperator::CreateAnd(V1: Trunc, V2: ConstantInt::get(Ty, V: MaskC)); |
1488 | } |
1489 | } |
1490 | |
1491 | const APInt *MulC; |
1492 | if (match(V: Op0, P: m_NUWMul(L: m_Value(V&: X), R: m_APInt(Res&: MulC)))) { |
1493 | if (BitWidth > 2 && (*MulC - 1).isPowerOf2() && |
1494 | MulC->logBase2() == ShAmtC) { |
1495 | // Look for a "splat" mul pattern - it replicates bits across each half |
1496 | // of a value, so a right shift simplifies back to just X: |
1497 | // lshr i[2N] (mul nuw X, (2^N)+1), N --> X |
1498 | if (ShAmtC * 2 == BitWidth) |
1499 | return replaceInstUsesWith(I, V: X); |
1500 | |
1501 | // lshr (mul nuw (X, 2^N + 1)), N -> add nuw (X, lshr(X, N)) |
1502 | if (Op0->hasOneUse()) { |
1503 | auto *NewAdd = BinaryOperator::CreateNUWAdd( |
1504 | V1: X, V2: Builder.CreateLShr(LHS: X, RHS: ConstantInt::get(Ty, V: ShAmtC), Name: "" , |
1505 | isExact: I.isExact())); |
1506 | NewAdd->setHasNoSignedWrap( |
1507 | cast<OverflowingBinaryOperator>(Val: Op0)->hasNoSignedWrap()); |
1508 | return NewAdd; |
1509 | } |
1510 | } |
1511 | |
1512 | // The one-use check is not strictly necessary, but codegen may not be |
1513 | // able to invert the transform and perf may suffer with an extra mul |
1514 | // instruction. |
1515 | if (Op0->hasOneUse()) { |
1516 | APInt NewMulC = MulC->lshr(shiftAmt: ShAmtC); |
1517 | // if c is divisible by (1 << ShAmtC): |
1518 | // lshr (mul nuw x, MulC), ShAmtC -> mul nuw nsw x, (MulC >> ShAmtC) |
1519 | if (MulC->eq(RHS: NewMulC.shl(shiftAmt: ShAmtC))) { |
1520 | auto *NewMul = |
1521 | BinaryOperator::CreateNUWMul(V1: X, V2: ConstantInt::get(Ty, V: NewMulC)); |
1522 | assert(ShAmtC != 0 && |
1523 | "lshr X, 0 should be handled by simplifyLShrInst." ); |
1524 | NewMul->setHasNoSignedWrap(true); |
1525 | return NewMul; |
1526 | } |
1527 | } |
1528 | } |
1529 | |
1530 | // lshr (mul nsw (X, 2^N + 1)), N -> add nsw (X, lshr(X, N)) |
1531 | if (match(V: Op0, P: m_OneUse(SubPattern: m_NSWMul(L: m_Value(V&: X), R: m_APInt(Res&: MulC))))) { |
1532 | if (BitWidth > 2 && (*MulC - 1).isPowerOf2() && |
1533 | MulC->logBase2() == ShAmtC) { |
1534 | return BinaryOperator::CreateNSWAdd( |
1535 | V1: X, V2: Builder.CreateLShr(LHS: X, RHS: ConstantInt::get(Ty, V: ShAmtC), Name: "" , |
1536 | isExact: I.isExact())); |
1537 | } |
1538 | } |
1539 | |
1540 | // Try to narrow bswap. |
1541 | // In the case where the shift amount equals the bitwidth difference, the |
1542 | // shift is eliminated. |
1543 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Intrinsic<Intrinsic::bswap>( |
1544 | Op0: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: X))))))) { |
1545 | unsigned SrcWidth = X->getType()->getScalarSizeInBits(); |
1546 | unsigned WidthDiff = BitWidth - SrcWidth; |
1547 | if (SrcWidth % 16 == 0) { |
1548 | Value *NarrowSwap = Builder.CreateUnaryIntrinsic(ID: Intrinsic::bswap, V: X); |
1549 | if (ShAmtC >= WidthDiff) { |
1550 | // (bswap (zext X)) >> C --> zext (bswap X >> C') |
1551 | Value *NewShift = Builder.CreateLShr(LHS: NarrowSwap, RHS: ShAmtC - WidthDiff); |
1552 | return new ZExtInst(NewShift, Ty); |
1553 | } else { |
1554 | // (bswap (zext X)) >> C --> (zext (bswap X)) << C' |
1555 | Value *NewZExt = Builder.CreateZExt(V: NarrowSwap, DestTy: Ty); |
1556 | Constant *ShiftDiff = ConstantInt::get(Ty, V: WidthDiff - ShAmtC); |
1557 | return BinaryOperator::CreateShl(V1: NewZExt, V2: ShiftDiff); |
1558 | } |
1559 | } |
1560 | } |
1561 | |
1562 | // Reduce add-carry of bools to logic: |
1563 | // ((zext BoolX) + (zext BoolY)) >> 1 --> zext (BoolX && BoolY) |
1564 | Value *BoolX, *BoolY; |
1565 | if (ShAmtC == 1 && match(V: Op0, P: m_Add(L: m_Value(V&: X), R: m_Value(V&: Y))) && |
1566 | match(V: X, P: m_ZExt(Op: m_Value(V&: BoolX))) && match(V: Y, P: m_ZExt(Op: m_Value(V&: BoolY))) && |
1567 | BoolX->getType()->isIntOrIntVectorTy(BitWidth: 1) && |
1568 | BoolY->getType()->isIntOrIntVectorTy(BitWidth: 1) && |
1569 | (X->hasOneUse() || Y->hasOneUse() || Op0->hasOneUse())) { |
1570 | Value *And = Builder.CreateAnd(LHS: BoolX, RHS: BoolY); |
1571 | return new ZExtInst(And, Ty); |
1572 | } |
1573 | } |
1574 | |
1575 | const SimplifyQuery Q = SQ.getWithInstruction(I: &I); |
1576 | if (setShiftFlags(I, Q)) |
1577 | return &I; |
1578 | |
1579 | // Transform (x << y) >> y to x & (-1 >> y) |
1580 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Shl(L: m_Value(V&: X), R: m_Specific(V: Op1))))) { |
1581 | Constant *AllOnes = ConstantInt::getAllOnesValue(Ty); |
1582 | Value *Mask = Builder.CreateLShr(LHS: AllOnes, RHS: Op1); |
1583 | return BinaryOperator::CreateAnd(V1: Mask, V2: X); |
1584 | } |
1585 | |
1586 | // Transform (-1 << y) >> y to -1 >> y |
1587 | if (match(V: Op0, P: m_Shl(L: m_AllOnes(), R: m_Specific(V: Op1)))) { |
1588 | Constant *AllOnes = ConstantInt::getAllOnesValue(Ty); |
1589 | return BinaryOperator::CreateLShr(V1: AllOnes, V2: Op1); |
1590 | } |
1591 | |
1592 | if (Instruction *Overflow = foldLShrOverflowBit(I)) |
1593 | return Overflow; |
1594 | |
1595 | return nullptr; |
1596 | } |
1597 | |
1598 | Instruction * |
1599 | InstCombinerImpl::( |
1600 | BinaryOperator &OldAShr) { |
1601 | assert(OldAShr.getOpcode() == Instruction::AShr && |
1602 | "Must be called with arithmetic right-shift instruction only." ); |
1603 | |
1604 | // Check that constant C is a splat of the element-wise bitwidth of V. |
1605 | auto BitWidthSplat = [](Constant *C, Value *V) { |
1606 | return match( |
1607 | V: C, P: m_SpecificInt_ICMP(Predicate: ICmpInst::Predicate::ICMP_EQ, |
1608 | Threshold: APInt(C->getType()->getScalarSizeInBits(), |
1609 | V->getType()->getScalarSizeInBits()))); |
1610 | }; |
1611 | |
1612 | // It should look like variable-length sign-extension on the outside: |
1613 | // (Val << (bitwidth(Val)-Nbits)) a>> (bitwidth(Val)-Nbits) |
1614 | Value *NBits; |
1615 | Instruction *MaybeTrunc; |
1616 | Constant *C1, *C2; |
1617 | if (!match(V: &OldAShr, |
1618 | P: m_AShr(L: m_Shl(L: m_Instruction(I&: MaybeTrunc), |
1619 | R: m_ZExtOrSelf(Op: m_Sub(L: m_Constant(C&: C1), |
1620 | R: m_ZExtOrSelf(Op: m_Value(V&: NBits))))), |
1621 | R: m_ZExtOrSelf(Op: m_Sub(L: m_Constant(C&: C2), |
1622 | R: m_ZExtOrSelf(Op: m_Deferred(V: NBits)))))) || |
1623 | !BitWidthSplat(C1, &OldAShr) || !BitWidthSplat(C2, &OldAShr)) |
1624 | return nullptr; |
1625 | |
1626 | // There may or may not be a truncation after outer two shifts. |
1627 | Instruction *; |
1628 | match(V: MaybeTrunc, P: m_TruncOrSelf(Op: m_Instruction(I&: HighBitExtract))); |
1629 | bool HadTrunc = MaybeTrunc != HighBitExtract; |
1630 | |
1631 | // And finally, the innermost part of the pattern must be a right-shift. |
1632 | Value *X, *NumLowBitsToSkip; |
1633 | if (!match(V: HighBitExtract, P: m_Shr(L: m_Value(V&: X), R: m_Value(V&: NumLowBitsToSkip)))) |
1634 | return nullptr; |
1635 | |
1636 | // Said right-shift must extract high NBits bits - C0 must be it's bitwidth. |
1637 | Constant *C0; |
1638 | if (!match(V: NumLowBitsToSkip, |
1639 | P: m_ZExtOrSelf( |
1640 | Op: m_Sub(L: m_Constant(C&: C0), R: m_ZExtOrSelf(Op: m_Specific(V: NBits))))) || |
1641 | !BitWidthSplat(C0, HighBitExtract)) |
1642 | return nullptr; |
1643 | |
1644 | // Since the NBits is identical for all shifts, if the outermost and |
1645 | // innermost shifts are identical, then outermost shifts are redundant. |
1646 | // If we had truncation, do keep it though. |
1647 | if (HighBitExtract->getOpcode() == OldAShr.getOpcode()) |
1648 | return replaceInstUsesWith(I&: OldAShr, V: MaybeTrunc); |
1649 | |
1650 | // Else, if there was a truncation, then we need to ensure that one |
1651 | // instruction will go away. |
1652 | if (HadTrunc && !match(V: &OldAShr, P: m_c_BinOp(L: m_OneUse(SubPattern: m_Value()), R: m_Value()))) |
1653 | return nullptr; |
1654 | |
1655 | // Finally, bypass two innermost shifts, and perform the outermost shift on |
1656 | // the operands of the innermost shift. |
1657 | Instruction *NewAShr = |
1658 | BinaryOperator::Create(Op: OldAShr.getOpcode(), S1: X, S2: NumLowBitsToSkip); |
1659 | NewAShr->copyIRFlags(V: HighBitExtract); // We can preserve 'exact'-ness. |
1660 | if (!HadTrunc) |
1661 | return NewAShr; |
1662 | |
1663 | Builder.Insert(I: NewAShr); |
1664 | return TruncInst::CreateTruncOrBitCast(S: NewAShr, Ty: OldAShr.getType()); |
1665 | } |
1666 | |
1667 | Instruction *InstCombinerImpl::visitAShr(BinaryOperator &I) { |
1668 | if (Value *V = simplifyAShrInst(Op0: I.getOperand(i_nocapture: 0), Op1: I.getOperand(i_nocapture: 1), IsExact: I.isExact(), |
1669 | Q: SQ.getWithInstruction(I: &I))) |
1670 | return replaceInstUsesWith(I, V); |
1671 | |
1672 | if (Instruction *X = foldVectorBinop(Inst&: I)) |
1673 | return X; |
1674 | |
1675 | if (Instruction *R = commonShiftTransforms(I)) |
1676 | return R; |
1677 | |
1678 | Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1); |
1679 | Type *Ty = I.getType(); |
1680 | unsigned BitWidth = Ty->getScalarSizeInBits(); |
1681 | const APInt *ShAmtAPInt; |
1682 | if (match(V: Op1, P: m_APInt(Res&: ShAmtAPInt)) && ShAmtAPInt->ult(RHS: BitWidth)) { |
1683 | unsigned ShAmt = ShAmtAPInt->getZExtValue(); |
1684 | |
1685 | // If the shift amount equals the difference in width of the destination |
1686 | // and source scalar types: |
1687 | // ashr (shl (zext X), C), C --> sext X |
1688 | Value *X; |
1689 | if (match(V: Op0, P: m_Shl(L: m_ZExt(Op: m_Value(V&: X)), R: m_Specific(V: Op1))) && |
1690 | ShAmt == BitWidth - X->getType()->getScalarSizeInBits()) |
1691 | return new SExtInst(X, Ty); |
1692 | |
1693 | // We can't handle (X << C1) >>s C2. It shifts arbitrary bits in. However, |
1694 | // we can handle (X <<nsw C1) >>s C2 since it only shifts in sign bits. |
1695 | const APInt *ShOp1; |
1696 | if (match(V: Op0, P: m_NSWShl(L: m_Value(V&: X), R: m_APInt(Res&: ShOp1))) && |
1697 | ShOp1->ult(RHS: BitWidth)) { |
1698 | unsigned ShlAmt = ShOp1->getZExtValue(); |
1699 | if (ShlAmt < ShAmt) { |
1700 | // (X <<nsw C1) >>s C2 --> X >>s (C2 - C1) |
1701 | Constant *ShiftDiff = ConstantInt::get(Ty, V: ShAmt - ShlAmt); |
1702 | auto *NewAShr = BinaryOperator::CreateAShr(V1: X, V2: ShiftDiff); |
1703 | NewAShr->setIsExact(I.isExact()); |
1704 | return NewAShr; |
1705 | } |
1706 | if (ShlAmt > ShAmt) { |
1707 | // (X <<nsw C1) >>s C2 --> X <<nsw (C1 - C2) |
1708 | Constant *ShiftDiff = ConstantInt::get(Ty, V: ShlAmt - ShAmt); |
1709 | auto *NewShl = BinaryOperator::Create(Op: Instruction::Shl, S1: X, S2: ShiftDiff); |
1710 | NewShl->setHasNoSignedWrap(true); |
1711 | return NewShl; |
1712 | } |
1713 | } |
1714 | |
1715 | if (match(V: Op0, P: m_AShr(L: m_Value(V&: X), R: m_APInt(Res&: ShOp1))) && |
1716 | ShOp1->ult(RHS: BitWidth)) { |
1717 | unsigned AmtSum = ShAmt + ShOp1->getZExtValue(); |
1718 | // Oversized arithmetic shifts replicate the sign bit. |
1719 | AmtSum = std::min(a: AmtSum, b: BitWidth - 1); |
1720 | // (X >>s C1) >>s C2 --> X >>s (C1 + C2) |
1721 | return BinaryOperator::CreateAShr(V1: X, V2: ConstantInt::get(Ty, V: AmtSum)); |
1722 | } |
1723 | |
1724 | if (match(V: Op0, P: m_OneUse(SubPattern: m_SExt(Op: m_Value(V&: X)))) && |
1725 | (Ty->isVectorTy() || shouldChangeType(From: Ty, To: X->getType()))) { |
1726 | // ashr (sext X), C --> sext (ashr X, C') |
1727 | Type *SrcTy = X->getType(); |
1728 | ShAmt = std::min(a: ShAmt, b: SrcTy->getScalarSizeInBits() - 1); |
1729 | Value *NewSh = Builder.CreateAShr(LHS: X, RHS: ConstantInt::get(Ty: SrcTy, V: ShAmt)); |
1730 | return new SExtInst(NewSh, Ty); |
1731 | } |
1732 | |
1733 | if (ShAmt == BitWidth - 1) { |
1734 | // ashr i32 or(X,-X), 31 --> sext (X != 0) |
1735 | if (match(V: Op0, P: m_OneUse(SubPattern: m_c_Or(L: m_Neg(V: m_Value(V&: X)), R: m_Deferred(V: X))))) |
1736 | return new SExtInst(Builder.CreateIsNotNull(Arg: X), Ty); |
1737 | |
1738 | // ashr i32 (X -nsw Y), 31 --> sext (X < Y) |
1739 | Value *Y; |
1740 | if (match(V: Op0, P: m_OneUse(SubPattern: m_NSWSub(L: m_Value(V&: X), R: m_Value(V&: Y))))) |
1741 | return new SExtInst(Builder.CreateICmpSLT(LHS: X, RHS: Y), Ty); |
1742 | } |
1743 | |
1744 | const APInt *MulC; |
1745 | if (match(V: Op0, P: m_OneUse(SubPattern: m_NSWMul(L: m_Value(V&: X), R: m_APInt(Res&: MulC)))) && |
1746 | (BitWidth > 2 && (*MulC - 1).isPowerOf2() && |
1747 | MulC->logBase2() == ShAmt && |
1748 | (ShAmt < BitWidth - 1))) /* Minus 1 for the sign bit */ { |
1749 | |
1750 | // ashr (mul nsw (X, 2^N + 1)), N -> add nsw (X, ashr(X, N)) |
1751 | auto *NewAdd = BinaryOperator::CreateNSWAdd( |
1752 | V1: X, |
1753 | V2: Builder.CreateAShr(LHS: X, RHS: ConstantInt::get(Ty, V: ShAmt), Name: "" , isExact: I.isExact())); |
1754 | NewAdd->setHasNoUnsignedWrap( |
1755 | cast<OverflowingBinaryOperator>(Val: Op0)->hasNoUnsignedWrap()); |
1756 | return NewAdd; |
1757 | } |
1758 | } |
1759 | |
1760 | const SimplifyQuery Q = SQ.getWithInstruction(I: &I); |
1761 | if (setShiftFlags(I, Q)) |
1762 | return &I; |
1763 | |
1764 | // Prefer `-(x & 1)` over `(x << (bitwidth(x)-1)) a>> (bitwidth(x)-1)` |
1765 | // as the pattern to splat the lowest bit. |
1766 | // FIXME: iff X is already masked, we don't need the one-use check. |
1767 | Value *X; |
1768 | if (match(V: Op1, P: m_SpecificIntAllowPoison(V: BitWidth - 1)) && |
1769 | match(V: Op0, P: m_OneUse(SubPattern: m_Shl(L: m_Value(V&: X), |
1770 | R: m_SpecificIntAllowPoison(V: BitWidth - 1))))) { |
1771 | Constant *Mask = ConstantInt::get(Ty, V: 1); |
1772 | // Retain the knowledge about the ignored lanes. |
1773 | Mask = Constant::mergeUndefsWith( |
1774 | C: Constant::mergeUndefsWith(C: Mask, Other: cast<Constant>(Val: Op1)), |
1775 | Other: cast<Constant>(Val: cast<Instruction>(Val: Op0)->getOperand(i: 1))); |
1776 | X = Builder.CreateAnd(LHS: X, RHS: Mask); |
1777 | return BinaryOperator::CreateNeg(Op: X); |
1778 | } |
1779 | |
1780 | if (Instruction *R = foldVariableSignZeroExtensionOfVariableHighBitExtract(OldAShr&: I)) |
1781 | return R; |
1782 | |
1783 | // See if we can turn a signed shr into an unsigned shr. |
1784 | if (MaskedValueIsZero(V: Op0, Mask: APInt::getSignMask(BitWidth), Depth: 0, CxtI: &I)) { |
1785 | Instruction *Lshr = BinaryOperator::CreateLShr(V1: Op0, V2: Op1); |
1786 | Lshr->setIsExact(I.isExact()); |
1787 | return Lshr; |
1788 | } |
1789 | |
1790 | // ashr (xor %x, -1), %y --> xor (ashr %x, %y), -1 |
1791 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Not(V: m_Value(V&: X))))) { |
1792 | // Note that we must drop 'exact'-ness of the shift! |
1793 | // Note that we can't keep undef's in -1 vector constant! |
1794 | auto *NewAShr = Builder.CreateAShr(LHS: X, RHS: Op1, Name: Op0->getName() + ".not" ); |
1795 | return BinaryOperator::CreateNot(Op: NewAShr); |
1796 | } |
1797 | |
1798 | return nullptr; |
1799 | } |
1800 | |