| 1 | //===- InstCombineShifts.cpp ----------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file implements the visitShl, visitLShr, and visitAShr functions. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
| 13 | #include "InstCombineInternal.h" |
| 14 | #include "llvm/Analysis/InstructionSimplify.h" |
| 15 | #include "llvm/IR/IntrinsicInst.h" |
| 16 | #include "llvm/IR/PatternMatch.h" |
| 17 | #include "llvm/Transforms/InstCombine/InstCombiner.h" |
| 18 | using namespace llvm; |
| 19 | using namespace PatternMatch; |
| 20 | |
| 21 | #define DEBUG_TYPE "instcombine" |
| 22 | |
| 23 | bool canTryToConstantAddTwoShiftAmounts(Value *Sh0, Value *ShAmt0, Value *Sh1, |
| 24 | Value *ShAmt1) { |
| 25 | // We have two shift amounts from two different shifts. The types of those |
| 26 | // shift amounts may not match. If that's the case let's bailout now.. |
| 27 | if (ShAmt0->getType() != ShAmt1->getType()) |
| 28 | return false; |
| 29 | |
| 30 | // As input, we have the following pattern: |
| 31 | // Sh0 (Sh1 X, Q), K |
| 32 | // We want to rewrite that as: |
| 33 | // Sh x, (Q+K) iff (Q+K) u< bitwidth(x) |
| 34 | // While we know that originally (Q+K) would not overflow |
| 35 | // (because 2 * (N-1) u<= iN -1), we have looked past extensions of |
| 36 | // shift amounts. so it may now overflow in smaller bitwidth. |
| 37 | // To ensure that does not happen, we need to ensure that the total maximal |
| 38 | // shift amount is still representable in that smaller bit width. |
| 39 | unsigned MaximalPossibleTotalShiftAmount = |
| 40 | (Sh0->getType()->getScalarSizeInBits() - 1) + |
| 41 | (Sh1->getType()->getScalarSizeInBits() - 1); |
| 42 | APInt MaximalRepresentableShiftAmount = |
| 43 | APInt::getAllOnes(numBits: ShAmt0->getType()->getScalarSizeInBits()); |
| 44 | return MaximalRepresentableShiftAmount.uge(RHS: MaximalPossibleTotalShiftAmount); |
| 45 | } |
| 46 | |
| 47 | // Given pattern: |
| 48 | // (x shiftopcode Q) shiftopcode K |
| 49 | // we should rewrite it as |
| 50 | // x shiftopcode (Q+K) iff (Q+K) u< bitwidth(x) and |
| 51 | // |
| 52 | // This is valid for any shift, but they must be identical, and we must be |
| 53 | // careful in case we have (zext(Q)+zext(K)) and look past extensions, |
| 54 | // (Q+K) must not overflow or else (Q+K) u< bitwidth(x) is bogus. |
| 55 | // |
| 56 | // AnalyzeForSignBitExtraction indicates that we will only analyze whether this |
| 57 | // pattern has any 2 right-shifts that sum to 1 less than original bit width. |
| 58 | Value *InstCombinerImpl::reassociateShiftAmtsOfTwoSameDirectionShifts( |
| 59 | BinaryOperator *Sh0, const SimplifyQuery &SQ, |
| 60 | bool ) { |
| 61 | // Look for a shift of some instruction, ignore zext of shift amount if any. |
| 62 | Instruction *Sh0Op0; |
| 63 | Value *ShAmt0; |
| 64 | if (!match(V: Sh0, |
| 65 | P: m_Shift(L: m_Instruction(I&: Sh0Op0), R: m_ZExtOrSelf(Op: m_Value(V&: ShAmt0))))) |
| 66 | return nullptr; |
| 67 | |
| 68 | // If there is a truncation between the two shifts, we must make note of it |
| 69 | // and look through it. The truncation imposes additional constraints on the |
| 70 | // transform. |
| 71 | Instruction *Sh1; |
| 72 | Value *Trunc = nullptr; |
| 73 | match(V: Sh0Op0, |
| 74 | P: m_CombineOr(L: m_CombineAnd(L: m_Trunc(Op: m_Instruction(I&: Sh1)), R: m_Value(V&: Trunc)), |
| 75 | R: m_Instruction(I&: Sh1))); |
| 76 | |
| 77 | // Inner shift: (x shiftopcode ShAmt1) |
| 78 | // Like with other shift, ignore zext of shift amount if any. |
| 79 | Value *X, *ShAmt1; |
| 80 | if (!match(V: Sh1, P: m_Shift(L: m_Value(V&: X), R: m_ZExtOrSelf(Op: m_Value(V&: ShAmt1))))) |
| 81 | return nullptr; |
| 82 | |
| 83 | // Verify that it would be safe to try to add those two shift amounts. |
| 84 | if (!canTryToConstantAddTwoShiftAmounts(Sh0, ShAmt0, Sh1, ShAmt1)) |
| 85 | return nullptr; |
| 86 | |
| 87 | // We are only looking for signbit extraction if we have two right shifts. |
| 88 | bool HadTwoRightShifts = match(V: Sh0, P: m_Shr(L: m_Value(), R: m_Value())) && |
| 89 | match(V: Sh1, P: m_Shr(L: m_Value(), R: m_Value())); |
| 90 | // ... and if it's not two right-shifts, we know the answer already. |
| 91 | if (AnalyzeForSignBitExtraction && !HadTwoRightShifts) |
| 92 | return nullptr; |
| 93 | |
| 94 | // The shift opcodes must be identical, unless we are just checking whether |
| 95 | // this pattern can be interpreted as a sign-bit-extraction. |
| 96 | Instruction::BinaryOps ShiftOpcode = Sh0->getOpcode(); |
| 97 | bool IdenticalShOpcodes = Sh0->getOpcode() == Sh1->getOpcode(); |
| 98 | if (!IdenticalShOpcodes && !AnalyzeForSignBitExtraction) |
| 99 | return nullptr; |
| 100 | |
| 101 | // If we saw truncation, we'll need to produce extra instruction, |
| 102 | // and for that one of the operands of the shift must be one-use, |
| 103 | // unless of course we don't actually plan to produce any instructions here. |
| 104 | if (Trunc && !AnalyzeForSignBitExtraction && |
| 105 | !match(V: Sh0, P: m_c_BinOp(L: m_OneUse(SubPattern: m_Value()), R: m_Value()))) |
| 106 | return nullptr; |
| 107 | |
| 108 | // Can we fold (ShAmt0+ShAmt1) ? |
| 109 | auto *NewShAmt = dyn_cast_or_null<Constant>( |
| 110 | Val: simplifyAddInst(LHS: ShAmt0, RHS: ShAmt1, /*isNSW=*/IsNSW: false, /*isNUW=*/IsNUW: false, |
| 111 | Q: SQ.getWithInstruction(I: Sh0))); |
| 112 | if (!NewShAmt) |
| 113 | return nullptr; // Did not simplify. |
| 114 | unsigned NewShAmtBitWidth = NewShAmt->getType()->getScalarSizeInBits(); |
| 115 | unsigned XBitWidth = X->getType()->getScalarSizeInBits(); |
| 116 | // Is the new shift amount smaller than the bit width of inner/new shift? |
| 117 | if (!match(V: NewShAmt, P: m_SpecificInt_ICMP(Predicate: ICmpInst::Predicate::ICMP_ULT, |
| 118 | Threshold: APInt(NewShAmtBitWidth, XBitWidth)))) |
| 119 | return nullptr; // FIXME: could perform constant-folding. |
| 120 | |
| 121 | // If there was a truncation, and we have a right-shift, we can only fold if |
| 122 | // we are left with the original sign bit. Likewise, if we were just checking |
| 123 | // that this is a sighbit extraction, this is the place to check it. |
| 124 | // FIXME: zero shift amount is also legal here, but we can't *easily* check |
| 125 | // more than one predicate so it's not really worth it. |
| 126 | if (HadTwoRightShifts && (Trunc || AnalyzeForSignBitExtraction)) { |
| 127 | // If it's not a sign bit extraction, then we're done. |
| 128 | if (!match(V: NewShAmt, |
| 129 | P: m_SpecificInt_ICMP(Predicate: ICmpInst::Predicate::ICMP_EQ, |
| 130 | Threshold: APInt(NewShAmtBitWidth, XBitWidth - 1)))) |
| 131 | return nullptr; |
| 132 | // If it is, and that was the question, return the base value. |
| 133 | if (AnalyzeForSignBitExtraction) |
| 134 | return X; |
| 135 | } |
| 136 | |
| 137 | assert(IdenticalShOpcodes && "Should not get here with different shifts." ); |
| 138 | |
| 139 | if (NewShAmt->getType() != X->getType()) { |
| 140 | NewShAmt = ConstantFoldCastOperand(Opcode: Instruction::ZExt, C: NewShAmt, |
| 141 | DestTy: X->getType(), DL: SQ.DL); |
| 142 | if (!NewShAmt) |
| 143 | return nullptr; |
| 144 | } |
| 145 | |
| 146 | // All good, we can do this fold. |
| 147 | BinaryOperator *NewShift = BinaryOperator::Create(Op: ShiftOpcode, S1: X, S2: NewShAmt); |
| 148 | |
| 149 | // The flags can only be propagated if there wasn't a trunc. |
| 150 | if (!Trunc) { |
| 151 | // If the pattern did not involve trunc, and both of the original shifts |
| 152 | // had the same flag set, preserve the flag. |
| 153 | if (ShiftOpcode == Instruction::BinaryOps::Shl) { |
| 154 | NewShift->setHasNoUnsignedWrap(Sh0->hasNoUnsignedWrap() && |
| 155 | Sh1->hasNoUnsignedWrap()); |
| 156 | NewShift->setHasNoSignedWrap(Sh0->hasNoSignedWrap() && |
| 157 | Sh1->hasNoSignedWrap()); |
| 158 | } else { |
| 159 | NewShift->setIsExact(Sh0->isExact() && Sh1->isExact()); |
| 160 | } |
| 161 | } |
| 162 | |
| 163 | Instruction *Ret = NewShift; |
| 164 | if (Trunc) { |
| 165 | Builder.Insert(I: NewShift); |
| 166 | Ret = CastInst::Create(Instruction::Trunc, S: NewShift, Ty: Sh0->getType()); |
| 167 | } |
| 168 | |
| 169 | return Ret; |
| 170 | } |
| 171 | |
| 172 | // If we have some pattern that leaves only some low bits set, and then performs |
| 173 | // left-shift of those bits, if none of the bits that are left after the final |
| 174 | // shift are modified by the mask, we can omit the mask. |
| 175 | // |
| 176 | // There are many variants to this pattern: |
| 177 | // a) (x & ((1 << MaskShAmt) - 1)) << ShiftShAmt |
| 178 | // b) (x & (~(-1 << MaskShAmt))) << ShiftShAmt |
| 179 | // c) (x & (-1 l>> MaskShAmt)) << ShiftShAmt |
| 180 | // d) (x & ((-1 << MaskShAmt) l>> MaskShAmt)) << ShiftShAmt |
| 181 | // e) ((x << MaskShAmt) l>> MaskShAmt) << ShiftShAmt |
| 182 | // f) ((x << MaskShAmt) a>> MaskShAmt) << ShiftShAmt |
| 183 | // All these patterns can be simplified to just: |
| 184 | // x << ShiftShAmt |
| 185 | // iff: |
| 186 | // a,b) (MaskShAmt+ShiftShAmt) u>= bitwidth(x) |
| 187 | // c,d,e,f) (ShiftShAmt-MaskShAmt) s>= 0 (i.e. ShiftShAmt u>= MaskShAmt) |
| 188 | static Instruction * |
| 189 | dropRedundantMaskingOfLeftShiftInput(BinaryOperator *OuterShift, |
| 190 | const SimplifyQuery &Q, |
| 191 | InstCombiner::BuilderTy &Builder) { |
| 192 | assert(OuterShift->getOpcode() == Instruction::BinaryOps::Shl && |
| 193 | "The input must be 'shl'!" ); |
| 194 | |
| 195 | Value *Masked, *ShiftShAmt; |
| 196 | match(V: OuterShift, |
| 197 | P: m_Shift(L: m_Value(V&: Masked), R: m_ZExtOrSelf(Op: m_Value(V&: ShiftShAmt)))); |
| 198 | |
| 199 | // *If* there is a truncation between an outer shift and a possibly-mask, |
| 200 | // then said truncation *must* be one-use, else we can't perform the fold. |
| 201 | Value *Trunc; |
| 202 | if (match(V: Masked, P: m_CombineAnd(L: m_Trunc(Op: m_Value(V&: Masked)), R: m_Value(V&: Trunc))) && |
| 203 | !Trunc->hasOneUse()) |
| 204 | return nullptr; |
| 205 | |
| 206 | Type *NarrowestTy = OuterShift->getType(); |
| 207 | Type *WidestTy = Masked->getType(); |
| 208 | bool HadTrunc = WidestTy != NarrowestTy; |
| 209 | |
| 210 | // The mask must be computed in a type twice as wide to ensure |
| 211 | // that no bits are lost if the sum-of-shifts is wider than the base type. |
| 212 | Type *ExtendedTy = WidestTy->getExtendedType(); |
| 213 | |
| 214 | Value *MaskShAmt; |
| 215 | |
| 216 | // ((1 << MaskShAmt) - 1) |
| 217 | auto MaskA = m_Add(L: m_Shl(L: m_One(), R: m_Value(V&: MaskShAmt)), R: m_AllOnes()); |
| 218 | // (~(-1 << maskNbits)) |
| 219 | auto MaskB = m_Not(V: m_Shl(L: m_AllOnes(), R: m_Value(V&: MaskShAmt))); |
| 220 | // (-1 l>> MaskShAmt) |
| 221 | auto MaskC = m_LShr(L: m_AllOnes(), R: m_Value(V&: MaskShAmt)); |
| 222 | // ((-1 << MaskShAmt) l>> MaskShAmt) |
| 223 | auto MaskD = |
| 224 | m_LShr(L: m_Shl(L: m_AllOnes(), R: m_Value(V&: MaskShAmt)), R: m_Deferred(V: MaskShAmt)); |
| 225 | |
| 226 | Value *X; |
| 227 | Constant *NewMask; |
| 228 | |
| 229 | if (match(V: Masked, P: m_c_And(L: m_CombineOr(L: MaskA, R: MaskB), R: m_Value(V&: X)))) { |
| 230 | // Peek through an optional zext of the shift amount. |
| 231 | match(V: MaskShAmt, P: m_ZExtOrSelf(Op: m_Value(V&: MaskShAmt))); |
| 232 | |
| 233 | // Verify that it would be safe to try to add those two shift amounts. |
| 234 | if (!canTryToConstantAddTwoShiftAmounts(Sh0: OuterShift, ShAmt0: ShiftShAmt, Sh1: Masked, |
| 235 | ShAmt1: MaskShAmt)) |
| 236 | return nullptr; |
| 237 | |
| 238 | // Can we simplify (MaskShAmt+ShiftShAmt) ? |
| 239 | auto *SumOfShAmts = dyn_cast_or_null<Constant>(Val: simplifyAddInst( |
| 240 | LHS: MaskShAmt, RHS: ShiftShAmt, /*IsNSW=*/false, /*IsNUW=*/false, Q)); |
| 241 | if (!SumOfShAmts) |
| 242 | return nullptr; // Did not simplify. |
| 243 | // In this pattern SumOfShAmts correlates with the number of low bits |
| 244 | // that shall remain in the root value (OuterShift). |
| 245 | |
| 246 | // An extend of an undef value becomes zero because the high bits are never |
| 247 | // completely unknown. Replace the `undef` shift amounts with final |
| 248 | // shift bitwidth to ensure that the value remains undef when creating the |
| 249 | // subsequent shift op. |
| 250 | SumOfShAmts = Constant::replaceUndefsWith( |
| 251 | C: SumOfShAmts, Replacement: ConstantInt::get(Ty: SumOfShAmts->getType()->getScalarType(), |
| 252 | V: ExtendedTy->getScalarSizeInBits())); |
| 253 | auto *ExtendedSumOfShAmts = ConstantFoldCastOperand( |
| 254 | Opcode: Instruction::ZExt, C: SumOfShAmts, DestTy: ExtendedTy, DL: Q.DL); |
| 255 | if (!ExtendedSumOfShAmts) |
| 256 | return nullptr; |
| 257 | |
| 258 | // And compute the mask as usual: ~(-1 << (SumOfShAmts)) |
| 259 | auto *ExtendedAllOnes = ConstantExpr::getAllOnesValue(Ty: ExtendedTy); |
| 260 | Constant *ExtendedInvertedMask = ConstantFoldBinaryOpOperands( |
| 261 | Opcode: Instruction::Shl, LHS: ExtendedAllOnes, RHS: ExtendedSumOfShAmts, DL: Q.DL); |
| 262 | if (!ExtendedInvertedMask) |
| 263 | return nullptr; |
| 264 | |
| 265 | NewMask = ConstantExpr::getNot(C: ExtendedInvertedMask); |
| 266 | } else if (match(V: Masked, P: m_c_And(L: m_CombineOr(L: MaskC, R: MaskD), R: m_Value(V&: X))) || |
| 267 | match(V: Masked, P: m_Shr(L: m_Shl(L: m_Value(V&: X), R: m_Value(V&: MaskShAmt)), |
| 268 | R: m_Deferred(V: MaskShAmt)))) { |
| 269 | // Peek through an optional zext of the shift amount. |
| 270 | match(V: MaskShAmt, P: m_ZExtOrSelf(Op: m_Value(V&: MaskShAmt))); |
| 271 | |
| 272 | // Verify that it would be safe to try to add those two shift amounts. |
| 273 | if (!canTryToConstantAddTwoShiftAmounts(Sh0: OuterShift, ShAmt0: ShiftShAmt, Sh1: Masked, |
| 274 | ShAmt1: MaskShAmt)) |
| 275 | return nullptr; |
| 276 | |
| 277 | // Can we simplify (ShiftShAmt-MaskShAmt) ? |
| 278 | auto *ShAmtsDiff = dyn_cast_or_null<Constant>(Val: simplifySubInst( |
| 279 | LHS: ShiftShAmt, RHS: MaskShAmt, /*IsNSW=*/false, /*IsNUW=*/false, Q)); |
| 280 | if (!ShAmtsDiff) |
| 281 | return nullptr; // Did not simplify. |
| 282 | // In this pattern ShAmtsDiff correlates with the number of high bits that |
| 283 | // shall be unset in the root value (OuterShift). |
| 284 | |
| 285 | // An extend of an undef value becomes zero because the high bits are never |
| 286 | // completely unknown. Replace the `undef` shift amounts with negated |
| 287 | // bitwidth of innermost shift to ensure that the value remains undef when |
| 288 | // creating the subsequent shift op. |
| 289 | unsigned WidestTyBitWidth = WidestTy->getScalarSizeInBits(); |
| 290 | ShAmtsDiff = Constant::replaceUndefsWith( |
| 291 | C: ShAmtsDiff, Replacement: ConstantInt::get(Ty: ShAmtsDiff->getType()->getScalarType(), |
| 292 | V: -WidestTyBitWidth)); |
| 293 | auto *ExtendedNumHighBitsToClear = ConstantFoldCastOperand( |
| 294 | Opcode: Instruction::ZExt, |
| 295 | C: ConstantExpr::getSub(C1: ConstantInt::get(Ty: ShAmtsDiff->getType(), |
| 296 | V: WidestTyBitWidth, |
| 297 | /*isSigned=*/IsSigned: false), |
| 298 | C2: ShAmtsDiff), |
| 299 | DestTy: ExtendedTy, DL: Q.DL); |
| 300 | if (!ExtendedNumHighBitsToClear) |
| 301 | return nullptr; |
| 302 | |
| 303 | // And compute the mask as usual: (-1 l>> (NumHighBitsToClear)) |
| 304 | auto *ExtendedAllOnes = ConstantExpr::getAllOnesValue(Ty: ExtendedTy); |
| 305 | NewMask = ConstantFoldBinaryOpOperands(Opcode: Instruction::LShr, LHS: ExtendedAllOnes, |
| 306 | RHS: ExtendedNumHighBitsToClear, DL: Q.DL); |
| 307 | if (!NewMask) |
| 308 | return nullptr; |
| 309 | } else |
| 310 | return nullptr; // Don't know anything about this pattern. |
| 311 | |
| 312 | NewMask = ConstantExpr::getTrunc(C: NewMask, Ty: NarrowestTy); |
| 313 | |
| 314 | // Does this mask has any unset bits? If not then we can just not apply it. |
| 315 | bool NeedMask = !match(V: NewMask, P: m_AllOnes()); |
| 316 | |
| 317 | // If we need to apply a mask, there are several more restrictions we have. |
| 318 | if (NeedMask) { |
| 319 | // The old masking instruction must go away. |
| 320 | if (!Masked->hasOneUse()) |
| 321 | return nullptr; |
| 322 | // The original "masking" instruction must not have been`ashr`. |
| 323 | if (match(V: Masked, P: m_AShr(L: m_Value(), R: m_Value()))) |
| 324 | return nullptr; |
| 325 | } |
| 326 | |
| 327 | // If we need to apply truncation, let's do it first, since we can. |
| 328 | // We have already ensured that the old truncation will go away. |
| 329 | if (HadTrunc) |
| 330 | X = Builder.CreateTrunc(V: X, DestTy: NarrowestTy); |
| 331 | |
| 332 | // No 'NUW'/'NSW'! We no longer know that we won't shift-out non-0 bits. |
| 333 | // We didn't change the Type of this outermost shift, so we can just do it. |
| 334 | auto *NewShift = BinaryOperator::Create(Op: OuterShift->getOpcode(), S1: X, |
| 335 | S2: OuterShift->getOperand(i_nocapture: 1)); |
| 336 | if (!NeedMask) |
| 337 | return NewShift; |
| 338 | |
| 339 | Builder.Insert(I: NewShift); |
| 340 | return BinaryOperator::Create(Op: Instruction::And, S1: NewShift, S2: NewMask); |
| 341 | } |
| 342 | |
| 343 | /// If we have a shift-by-constant of a bin op (bitwise logic op or add/sub w/ |
| 344 | /// shl) that itself has a shift-by-constant operand with identical opcode, we |
| 345 | /// may be able to convert that into 2 independent shifts followed by the logic |
| 346 | /// op. This eliminates a use of an intermediate value (reduces dependency |
| 347 | /// chain). |
| 348 | static Instruction *foldShiftOfShiftedBinOp(BinaryOperator &I, |
| 349 | InstCombiner::BuilderTy &Builder) { |
| 350 | assert(I.isShift() && "Expected a shift as input" ); |
| 351 | auto *BinInst = dyn_cast<BinaryOperator>(Val: I.getOperand(i_nocapture: 0)); |
| 352 | if (!BinInst || |
| 353 | (!BinInst->isBitwiseLogicOp() && |
| 354 | BinInst->getOpcode() != Instruction::Add && |
| 355 | BinInst->getOpcode() != Instruction::Sub) || |
| 356 | !BinInst->hasOneUse()) |
| 357 | return nullptr; |
| 358 | |
| 359 | Constant *C0, *C1; |
| 360 | if (!match(V: I.getOperand(i_nocapture: 1), P: m_Constant(C&: C1))) |
| 361 | return nullptr; |
| 362 | |
| 363 | Instruction::BinaryOps ShiftOpcode = I.getOpcode(); |
| 364 | // Transform for add/sub only works with shl. |
| 365 | if ((BinInst->getOpcode() == Instruction::Add || |
| 366 | BinInst->getOpcode() == Instruction::Sub) && |
| 367 | ShiftOpcode != Instruction::Shl) |
| 368 | return nullptr; |
| 369 | |
| 370 | Type *Ty = I.getType(); |
| 371 | |
| 372 | // Find a matching shift by constant. The fold is not valid if the sum |
| 373 | // of the shift values equals or exceeds bitwidth. |
| 374 | Value *X, *Y; |
| 375 | auto matchFirstShift = [&](Value *V, Value *W) { |
| 376 | unsigned Size = Ty->getScalarSizeInBits(); |
| 377 | APInt Threshold(Size, Size); |
| 378 | return match(V, P: m_BinOp(Opcode: ShiftOpcode, L: m_Value(V&: X), R: m_Constant(C&: C0))) && |
| 379 | (V->hasOneUse() || match(V: W, P: m_ImmConstant())) && |
| 380 | match(V: ConstantExpr::getAdd(C1: C0, C2: C1), |
| 381 | P: m_SpecificInt_ICMP(Predicate: ICmpInst::ICMP_ULT, Threshold)); |
| 382 | }; |
| 383 | |
| 384 | // Logic ops and Add are commutative, so check each operand for a match. Sub |
| 385 | // is not so we cannot reoder if we match operand(1) and need to keep the |
| 386 | // operands in their original positions. |
| 387 | bool FirstShiftIsOp1 = false; |
| 388 | if (matchFirstShift(BinInst->getOperand(i_nocapture: 0), BinInst->getOperand(i_nocapture: 1))) |
| 389 | Y = BinInst->getOperand(i_nocapture: 1); |
| 390 | else if (matchFirstShift(BinInst->getOperand(i_nocapture: 1), BinInst->getOperand(i_nocapture: 0))) { |
| 391 | Y = BinInst->getOperand(i_nocapture: 0); |
| 392 | FirstShiftIsOp1 = BinInst->getOpcode() == Instruction::Sub; |
| 393 | } else |
| 394 | return nullptr; |
| 395 | |
| 396 | // shift (binop (shift X, C0), Y), C1 -> binop (shift X, C0+C1), (shift Y, C1) |
| 397 | Constant *ShiftSumC = ConstantExpr::getAdd(C1: C0, C2: C1); |
| 398 | Value *NewShift1 = Builder.CreateBinOp(Opc: ShiftOpcode, LHS: X, RHS: ShiftSumC); |
| 399 | Value *NewShift2 = Builder.CreateBinOp(Opc: ShiftOpcode, LHS: Y, RHS: C1); |
| 400 | Value *Op1 = FirstShiftIsOp1 ? NewShift2 : NewShift1; |
| 401 | Value *Op2 = FirstShiftIsOp1 ? NewShift1 : NewShift2; |
| 402 | return BinaryOperator::Create(Op: BinInst->getOpcode(), S1: Op1, S2: Op2); |
| 403 | } |
| 404 | |
| 405 | Instruction *InstCombinerImpl::commonShiftTransforms(BinaryOperator &I) { |
| 406 | if (Instruction *Phi = foldBinopWithPhiOperands(BO&: I)) |
| 407 | return Phi; |
| 408 | |
| 409 | Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1); |
| 410 | assert(Op0->getType() == Op1->getType()); |
| 411 | Type *Ty = I.getType(); |
| 412 | |
| 413 | // If the shift amount is a one-use `sext`, we can demote it to `zext`. |
| 414 | Value *Y; |
| 415 | if (match(V: Op1, P: m_OneUse(SubPattern: m_SExt(Op: m_Value(V&: Y))))) { |
| 416 | Value *NewExt = Builder.CreateZExt(V: Y, DestTy: Ty, Name: Op1->getName()); |
| 417 | return BinaryOperator::Create(Op: I.getOpcode(), S1: Op0, S2: NewExt); |
| 418 | } |
| 419 | |
| 420 | // See if we can fold away this shift. |
| 421 | if (SimplifyDemandedInstructionBits(Inst&: I)) |
| 422 | return &I; |
| 423 | |
| 424 | // Try to fold constant and into select arguments. |
| 425 | if (isa<Constant>(Val: Op0)) |
| 426 | if (SelectInst *SI = dyn_cast<SelectInst>(Val: Op1)) |
| 427 | if (Instruction *R = FoldOpIntoSelect(Op&: I, SI)) |
| 428 | return R; |
| 429 | |
| 430 | Constant *CUI; |
| 431 | if (match(V: Op1, P: m_ImmConstant(C&: CUI))) |
| 432 | if (Instruction *Res = FoldShiftByConstant(Op0, Op1: CUI, I)) |
| 433 | return Res; |
| 434 | |
| 435 | if (auto *NewShift = cast_or_null<Instruction>( |
| 436 | Val: reassociateShiftAmtsOfTwoSameDirectionShifts(Sh0: &I, SQ))) |
| 437 | return NewShift; |
| 438 | |
| 439 | // Pre-shift a constant shifted by a variable amount with constant offset: |
| 440 | // C shift (A add nuw C1) --> (C shift C1) shift A |
| 441 | Value *A; |
| 442 | Constant *C, *C1; |
| 443 | if (match(V: Op0, P: m_Constant(C)) && |
| 444 | match(V: Op1, P: m_NUWAddLike(L: m_Value(V&: A), R: m_Constant(C&: C1)))) { |
| 445 | Value *NewC = Builder.CreateBinOp(Opc: I.getOpcode(), LHS: C, RHS: C1); |
| 446 | BinaryOperator *NewShiftOp = BinaryOperator::Create(Op: I.getOpcode(), S1: NewC, S2: A); |
| 447 | if (I.getOpcode() == Instruction::Shl) { |
| 448 | NewShiftOp->setHasNoSignedWrap(I.hasNoSignedWrap()); |
| 449 | NewShiftOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap()); |
| 450 | } else { |
| 451 | NewShiftOp->setIsExact(I.isExact()); |
| 452 | } |
| 453 | return NewShiftOp; |
| 454 | } |
| 455 | |
| 456 | unsigned BitWidth = Ty->getScalarSizeInBits(); |
| 457 | |
| 458 | const APInt *AC, *AddC; |
| 459 | // Try to pre-shift a constant shifted by a variable amount added with a |
| 460 | // negative number: |
| 461 | // C << (X - AddC) --> (C >> AddC) << X |
| 462 | // and |
| 463 | // C >> (X - AddC) --> (C << AddC) >> X |
| 464 | if (match(V: Op0, P: m_APInt(Res&: AC)) && match(V: Op1, P: m_Add(L: m_Value(V&: A), R: m_APInt(Res&: AddC))) && |
| 465 | AddC->isNegative() && (-*AddC).ult(RHS: BitWidth)) { |
| 466 | assert(!AC->isZero() && "Expected simplify of shifted zero" ); |
| 467 | unsigned PosOffset = (-*AddC).getZExtValue(); |
| 468 | |
| 469 | auto isSuitableForPreShift = [PosOffset, &I, AC]() { |
| 470 | switch (I.getOpcode()) { |
| 471 | default: |
| 472 | return false; |
| 473 | case Instruction::Shl: |
| 474 | return (I.hasNoSignedWrap() || I.hasNoUnsignedWrap()) && |
| 475 | AC->eq(RHS: AC->lshr(shiftAmt: PosOffset).shl(shiftAmt: PosOffset)); |
| 476 | case Instruction::LShr: |
| 477 | return I.isExact() && AC->eq(RHS: AC->shl(shiftAmt: PosOffset).lshr(shiftAmt: PosOffset)); |
| 478 | case Instruction::AShr: |
| 479 | return I.isExact() && AC->eq(RHS: AC->shl(shiftAmt: PosOffset).ashr(ShiftAmt: PosOffset)); |
| 480 | } |
| 481 | }; |
| 482 | if (isSuitableForPreShift()) { |
| 483 | Constant *NewC = ConstantInt::get(Ty, V: I.getOpcode() == Instruction::Shl |
| 484 | ? AC->lshr(shiftAmt: PosOffset) |
| 485 | : AC->shl(shiftAmt: PosOffset)); |
| 486 | BinaryOperator *NewShiftOp = |
| 487 | BinaryOperator::Create(Op: I.getOpcode(), S1: NewC, S2: A); |
| 488 | if (I.getOpcode() == Instruction::Shl) { |
| 489 | NewShiftOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap()); |
| 490 | } else { |
| 491 | NewShiftOp->setIsExact(); |
| 492 | } |
| 493 | return NewShiftOp; |
| 494 | } |
| 495 | } |
| 496 | |
| 497 | // X shift (A srem C) -> X shift (A and (C - 1)) iff C is a power of 2. |
| 498 | // Because shifts by negative values (which could occur if A were negative) |
| 499 | // are undefined. |
| 500 | if (Op1->hasOneUse() && match(V: Op1, P: m_SRem(L: m_Value(V&: A), R: m_Constant(C))) && |
| 501 | match(V: C, P: m_Power2())) { |
| 502 | // FIXME: Should this get moved into SimplifyDemandedBits by saying we don't |
| 503 | // demand the sign bit (and many others) here?? |
| 504 | Constant *Mask = ConstantExpr::getSub(C1: C, C2: ConstantInt::get(Ty, V: 1)); |
| 505 | Value *Rem = Builder.CreateAnd(LHS: A, RHS: Mask, Name: Op1->getName()); |
| 506 | return replaceOperand(I, OpNum: 1, V: Rem); |
| 507 | } |
| 508 | |
| 509 | if (Instruction *Logic = foldShiftOfShiftedBinOp(I, Builder)) |
| 510 | return Logic; |
| 511 | |
| 512 | if (match(V: Op1, P: m_Or(L: m_Value(), R: m_SpecificInt(V: BitWidth - 1)))) |
| 513 | return replaceOperand(I, OpNum: 1, V: ConstantInt::get(Ty, V: BitWidth - 1)); |
| 514 | |
| 515 | Instruction *CmpIntr; |
| 516 | if ((I.getOpcode() == Instruction::LShr || |
| 517 | I.getOpcode() == Instruction::AShr) && |
| 518 | match(V: Op0, P: m_OneUse(SubPattern: m_Instruction(I&: CmpIntr))) && |
| 519 | isa<CmpIntrinsic>(Val: CmpIntr) && |
| 520 | match(V: Op1, P: m_SpecificInt(V: Ty->getScalarSizeInBits() - 1))) { |
| 521 | Value *Cmp = |
| 522 | Builder.CreateICmp(P: cast<CmpIntrinsic>(Val: CmpIntr)->getLTPredicate(), |
| 523 | LHS: CmpIntr->getOperand(i: 0), RHS: CmpIntr->getOperand(i: 1)); |
| 524 | return CastInst::Create(I.getOpcode() == Instruction::LShr |
| 525 | ? Instruction::ZExt |
| 526 | : Instruction::SExt, |
| 527 | S: Cmp, Ty); |
| 528 | } |
| 529 | |
| 530 | return nullptr; |
| 531 | } |
| 532 | |
| 533 | /// Return true if we can simplify two logical (either left or right) shifts |
| 534 | /// that have constant shift amounts: OuterShift (InnerShift X, C1), C2. |
| 535 | static bool canEvaluateShiftedShift(unsigned OuterShAmt, bool IsOuterShl, |
| 536 | Instruction *InnerShift, |
| 537 | InstCombinerImpl &IC, Instruction *CxtI) { |
| 538 | assert(InnerShift->isLogicalShift() && "Unexpected instruction type" ); |
| 539 | |
| 540 | // We need constant scalar or constant splat shifts. |
| 541 | const APInt *InnerShiftConst; |
| 542 | if (!match(V: InnerShift->getOperand(i: 1), P: m_APInt(Res&: InnerShiftConst))) |
| 543 | return false; |
| 544 | |
| 545 | // Two logical shifts in the same direction: |
| 546 | // shl (shl X, C1), C2 --> shl X, C1 + C2 |
| 547 | // lshr (lshr X, C1), C2 --> lshr X, C1 + C2 |
| 548 | bool IsInnerShl = InnerShift->getOpcode() == Instruction::Shl; |
| 549 | if (IsInnerShl == IsOuterShl) |
| 550 | return true; |
| 551 | |
| 552 | // Equal shift amounts in opposite directions become bitwise 'and': |
| 553 | // lshr (shl X, C), C --> and X, C' |
| 554 | // shl (lshr X, C), C --> and X, C' |
| 555 | if (*InnerShiftConst == OuterShAmt) |
| 556 | return true; |
| 557 | |
| 558 | // If the 2nd shift is bigger than the 1st, we can fold: |
| 559 | // lshr (shl X, C1), C2 --> and (shl X, C1 - C2), C3 |
| 560 | // shl (lshr X, C1), C2 --> and (lshr X, C1 - C2), C3 |
| 561 | // but it isn't profitable unless we know the and'd out bits are already zero. |
| 562 | // Also, check that the inner shift is valid (less than the type width) or |
| 563 | // we'll crash trying to produce the bit mask for the 'and'. |
| 564 | unsigned TypeWidth = InnerShift->getType()->getScalarSizeInBits(); |
| 565 | if (InnerShiftConst->ugt(RHS: OuterShAmt) && InnerShiftConst->ult(RHS: TypeWidth)) { |
| 566 | unsigned InnerShAmt = InnerShiftConst->getZExtValue(); |
| 567 | unsigned MaskShift = |
| 568 | IsInnerShl ? TypeWidth - InnerShAmt : InnerShAmt - OuterShAmt; |
| 569 | APInt Mask = APInt::getLowBitsSet(numBits: TypeWidth, loBitsSet: OuterShAmt) << MaskShift; |
| 570 | if (IC.MaskedValueIsZero(V: InnerShift->getOperand(i: 0), Mask, CxtI)) |
| 571 | return true; |
| 572 | } |
| 573 | |
| 574 | return false; |
| 575 | } |
| 576 | |
| 577 | /// See if we can compute the specified value, but shifted logically to the left |
| 578 | /// or right by some number of bits. This should return true if the expression |
| 579 | /// can be computed for the same cost as the current expression tree. This is |
| 580 | /// used to eliminate extraneous shifting from things like: |
| 581 | /// %C = shl i128 %A, 64 |
| 582 | /// %D = shl i128 %B, 96 |
| 583 | /// %E = or i128 %C, %D |
| 584 | /// %F = lshr i128 %E, 64 |
| 585 | /// where the client will ask if E can be computed shifted right by 64-bits. If |
| 586 | /// this succeeds, getShiftedValue() will be called to produce the value. |
| 587 | static bool canEvaluateShifted(Value *V, unsigned NumBits, bool IsLeftShift, |
| 588 | InstCombinerImpl &IC, Instruction *CxtI) { |
| 589 | // We can always evaluate immediate constants. |
| 590 | if (match(V, P: m_ImmConstant())) |
| 591 | return true; |
| 592 | |
| 593 | Instruction *I = dyn_cast<Instruction>(Val: V); |
| 594 | if (!I) return false; |
| 595 | |
| 596 | // We can't mutate something that has multiple uses: doing so would |
| 597 | // require duplicating the instruction in general, which isn't profitable. |
| 598 | if (!I->hasOneUse()) return false; |
| 599 | |
| 600 | switch (I->getOpcode()) { |
| 601 | default: return false; |
| 602 | case Instruction::And: |
| 603 | case Instruction::Or: |
| 604 | case Instruction::Xor: |
| 605 | // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted. |
| 606 | return canEvaluateShifted(V: I->getOperand(i: 0), NumBits, IsLeftShift, IC, CxtI: I) && |
| 607 | canEvaluateShifted(V: I->getOperand(i: 1), NumBits, IsLeftShift, IC, CxtI: I); |
| 608 | |
| 609 | case Instruction::Shl: |
| 610 | case Instruction::LShr: |
| 611 | return canEvaluateShiftedShift(OuterShAmt: NumBits, IsOuterShl: IsLeftShift, InnerShift: I, IC, CxtI); |
| 612 | |
| 613 | case Instruction::Select: { |
| 614 | SelectInst *SI = cast<SelectInst>(Val: I); |
| 615 | Value *TrueVal = SI->getTrueValue(); |
| 616 | Value *FalseVal = SI->getFalseValue(); |
| 617 | return canEvaluateShifted(V: TrueVal, NumBits, IsLeftShift, IC, CxtI: SI) && |
| 618 | canEvaluateShifted(V: FalseVal, NumBits, IsLeftShift, IC, CxtI: SI); |
| 619 | } |
| 620 | case Instruction::PHI: { |
| 621 | // We can change a phi if we can change all operands. Note that we never |
| 622 | // get into trouble with cyclic PHIs here because we only consider |
| 623 | // instructions with a single use. |
| 624 | PHINode *PN = cast<PHINode>(Val: I); |
| 625 | for (Value *IncValue : PN->incoming_values()) |
| 626 | if (!canEvaluateShifted(V: IncValue, NumBits, IsLeftShift, IC, CxtI: PN)) |
| 627 | return false; |
| 628 | return true; |
| 629 | } |
| 630 | case Instruction::Mul: { |
| 631 | const APInt *MulConst; |
| 632 | // We can fold (shr (mul X, -(1 << C)), C) -> (and (neg X), C`) |
| 633 | return !IsLeftShift && match(V: I->getOperand(i: 1), P: m_APInt(Res&: MulConst)) && |
| 634 | MulConst->isNegatedPowerOf2() && MulConst->countr_zero() == NumBits; |
| 635 | } |
| 636 | } |
| 637 | } |
| 638 | |
| 639 | /// Fold OuterShift (InnerShift X, C1), C2. |
| 640 | /// See canEvaluateShiftedShift() for the constraints on these instructions. |
| 641 | static Value *foldShiftedShift(BinaryOperator *InnerShift, unsigned OuterShAmt, |
| 642 | bool IsOuterShl, |
| 643 | InstCombiner::BuilderTy &Builder) { |
| 644 | bool IsInnerShl = InnerShift->getOpcode() == Instruction::Shl; |
| 645 | Type *ShType = InnerShift->getType(); |
| 646 | unsigned TypeWidth = ShType->getScalarSizeInBits(); |
| 647 | |
| 648 | // We only accept shifts-by-a-constant in canEvaluateShifted(). |
| 649 | const APInt *C1; |
| 650 | match(V: InnerShift->getOperand(i_nocapture: 1), P: m_APInt(Res&: C1)); |
| 651 | unsigned InnerShAmt = C1->getZExtValue(); |
| 652 | |
| 653 | // Change the shift amount and clear the appropriate IR flags. |
| 654 | auto NewInnerShift = [&](unsigned ShAmt) { |
| 655 | InnerShift->setOperand(i_nocapture: 1, Val_nocapture: ConstantInt::get(Ty: ShType, V: ShAmt)); |
| 656 | if (IsInnerShl) { |
| 657 | InnerShift->setHasNoUnsignedWrap(false); |
| 658 | InnerShift->setHasNoSignedWrap(false); |
| 659 | } else { |
| 660 | InnerShift->setIsExact(false); |
| 661 | } |
| 662 | return InnerShift; |
| 663 | }; |
| 664 | |
| 665 | // Two logical shifts in the same direction: |
| 666 | // shl (shl X, C1), C2 --> shl X, C1 + C2 |
| 667 | // lshr (lshr X, C1), C2 --> lshr X, C1 + C2 |
| 668 | if (IsInnerShl == IsOuterShl) { |
| 669 | // If this is an oversized composite shift, then unsigned shifts get 0. |
| 670 | if (InnerShAmt + OuterShAmt >= TypeWidth) |
| 671 | return Constant::getNullValue(Ty: ShType); |
| 672 | |
| 673 | return NewInnerShift(InnerShAmt + OuterShAmt); |
| 674 | } |
| 675 | |
| 676 | // Equal shift amounts in opposite directions become bitwise 'and': |
| 677 | // lshr (shl X, C), C --> and X, C' |
| 678 | // shl (lshr X, C), C --> and X, C' |
| 679 | if (InnerShAmt == OuterShAmt) { |
| 680 | APInt Mask = IsInnerShl |
| 681 | ? APInt::getLowBitsSet(numBits: TypeWidth, loBitsSet: TypeWidth - OuterShAmt) |
| 682 | : APInt::getHighBitsSet(numBits: TypeWidth, hiBitsSet: TypeWidth - OuterShAmt); |
| 683 | Value *And = Builder.CreateAnd(LHS: InnerShift->getOperand(i_nocapture: 0), |
| 684 | RHS: ConstantInt::get(Ty: ShType, V: Mask)); |
| 685 | if (auto *AndI = dyn_cast<Instruction>(Val: And)) { |
| 686 | AndI->moveBefore(InsertPos: InnerShift->getIterator()); |
| 687 | AndI->takeName(V: InnerShift); |
| 688 | } |
| 689 | return And; |
| 690 | } |
| 691 | |
| 692 | assert(InnerShAmt > OuterShAmt && |
| 693 | "Unexpected opposite direction logical shift pair" ); |
| 694 | |
| 695 | // In general, we would need an 'and' for this transform, but |
| 696 | // canEvaluateShiftedShift() guarantees that the masked-off bits are not used. |
| 697 | // lshr (shl X, C1), C2 --> shl X, C1 - C2 |
| 698 | // shl (lshr X, C1), C2 --> lshr X, C1 - C2 |
| 699 | return NewInnerShift(InnerShAmt - OuterShAmt); |
| 700 | } |
| 701 | |
| 702 | /// When canEvaluateShifted() returns true for an expression, this function |
| 703 | /// inserts the new computation that produces the shifted value. |
| 704 | static Value *getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift, |
| 705 | InstCombinerImpl &IC, const DataLayout &DL) { |
| 706 | // We can always evaluate constants shifted. |
| 707 | if (Constant *C = dyn_cast<Constant>(Val: V)) { |
| 708 | if (isLeftShift) |
| 709 | return IC.Builder.CreateShl(LHS: C, RHS: NumBits); |
| 710 | else |
| 711 | return IC.Builder.CreateLShr(LHS: C, RHS: NumBits); |
| 712 | } |
| 713 | |
| 714 | Instruction *I = cast<Instruction>(Val: V); |
| 715 | IC.addToWorklist(I); |
| 716 | |
| 717 | switch (I->getOpcode()) { |
| 718 | default: llvm_unreachable("Inconsistency with CanEvaluateShifted" ); |
| 719 | case Instruction::And: |
| 720 | case Instruction::Or: |
| 721 | case Instruction::Xor: |
| 722 | // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted. |
| 723 | I->setOperand( |
| 724 | i: 0, Val: getShiftedValue(V: I->getOperand(i: 0), NumBits, isLeftShift, IC, DL)); |
| 725 | I->setOperand( |
| 726 | i: 1, Val: getShiftedValue(V: I->getOperand(i: 1), NumBits, isLeftShift, IC, DL)); |
| 727 | return I; |
| 728 | |
| 729 | case Instruction::Shl: |
| 730 | case Instruction::LShr: |
| 731 | return foldShiftedShift(InnerShift: cast<BinaryOperator>(Val: I), OuterShAmt: NumBits, IsOuterShl: isLeftShift, |
| 732 | Builder&: IC.Builder); |
| 733 | |
| 734 | case Instruction::Select: |
| 735 | I->setOperand( |
| 736 | i: 1, Val: getShiftedValue(V: I->getOperand(i: 1), NumBits, isLeftShift, IC, DL)); |
| 737 | I->setOperand( |
| 738 | i: 2, Val: getShiftedValue(V: I->getOperand(i: 2), NumBits, isLeftShift, IC, DL)); |
| 739 | return I; |
| 740 | case Instruction::PHI: { |
| 741 | // We can change a phi if we can change all operands. Note that we never |
| 742 | // get into trouble with cyclic PHIs here because we only consider |
| 743 | // instructions with a single use. |
| 744 | PHINode *PN = cast<PHINode>(Val: I); |
| 745 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) |
| 746 | PN->setIncomingValue(i, V: getShiftedValue(V: PN->getIncomingValue(i), NumBits, |
| 747 | isLeftShift, IC, DL)); |
| 748 | return PN; |
| 749 | } |
| 750 | case Instruction::Mul: { |
| 751 | assert(!isLeftShift && "Unexpected shift direction!" ); |
| 752 | auto *Neg = BinaryOperator::CreateNeg(Op: I->getOperand(i: 0)); |
| 753 | IC.InsertNewInstWith(New: Neg, Old: I->getIterator()); |
| 754 | unsigned TypeWidth = I->getType()->getScalarSizeInBits(); |
| 755 | APInt Mask = APInt::getLowBitsSet(numBits: TypeWidth, loBitsSet: TypeWidth - NumBits); |
| 756 | auto *And = BinaryOperator::CreateAnd(V1: Neg, |
| 757 | V2: ConstantInt::get(Ty: I->getType(), V: Mask)); |
| 758 | And->takeName(V: I); |
| 759 | return IC.InsertNewInstWith(New: And, Old: I->getIterator()); |
| 760 | } |
| 761 | } |
| 762 | } |
| 763 | |
| 764 | // If this is a bitwise operator or add with a constant RHS we might be able |
| 765 | // to pull it through a shift. |
| 766 | static bool canShiftBinOpWithConstantRHS(BinaryOperator &Shift, |
| 767 | BinaryOperator *BO) { |
| 768 | switch (BO->getOpcode()) { |
| 769 | default: |
| 770 | return false; // Do not perform transform! |
| 771 | case Instruction::Add: |
| 772 | return Shift.getOpcode() == Instruction::Shl; |
| 773 | case Instruction::Or: |
| 774 | case Instruction::And: |
| 775 | return true; |
| 776 | case Instruction::Xor: |
| 777 | // Do not change a 'not' of logical shift because that would create a normal |
| 778 | // 'xor'. The 'not' is likely better for analysis, SCEV, and codegen. |
| 779 | return !(Shift.isLogicalShift() && match(V: BO, P: m_Not(V: m_Value()))); |
| 780 | } |
| 781 | } |
| 782 | |
| 783 | Instruction *InstCombinerImpl::FoldShiftByConstant(Value *Op0, Constant *C1, |
| 784 | BinaryOperator &I) { |
| 785 | // (C2 << X) << C1 --> (C2 << C1) << X |
| 786 | // (C2 >> X) >> C1 --> (C2 >> C1) >> X |
| 787 | Constant *C2; |
| 788 | Value *X; |
| 789 | bool IsLeftShift = I.getOpcode() == Instruction::Shl; |
| 790 | if (match(V: Op0, P: m_BinOp(Opcode: I.getOpcode(), L: m_ImmConstant(C&: C2), R: m_Value(V&: X)))) { |
| 791 | Instruction *R = BinaryOperator::Create( |
| 792 | Op: I.getOpcode(), S1: Builder.CreateBinOp(Opc: I.getOpcode(), LHS: C2, RHS: C1), S2: X); |
| 793 | BinaryOperator *BO0 = cast<BinaryOperator>(Val: Op0); |
| 794 | if (IsLeftShift) { |
| 795 | R->setHasNoUnsignedWrap(I.hasNoUnsignedWrap() && |
| 796 | BO0->hasNoUnsignedWrap()); |
| 797 | R->setHasNoSignedWrap(I.hasNoSignedWrap() && BO0->hasNoSignedWrap()); |
| 798 | } else |
| 799 | R->setIsExact(I.isExact() && BO0->isExact()); |
| 800 | return R; |
| 801 | } |
| 802 | |
| 803 | Type *Ty = I.getType(); |
| 804 | unsigned TypeBits = Ty->getScalarSizeInBits(); |
| 805 | |
| 806 | // (X / +DivC) >> (Width - 1) --> ext (X <= -DivC) |
| 807 | // (X / -DivC) >> (Width - 1) --> ext (X >= +DivC) |
| 808 | const APInt *DivC; |
| 809 | if (!IsLeftShift && match(V: C1, P: m_SpecificIntAllowPoison(V: TypeBits - 1)) && |
| 810 | match(V: Op0, P: m_SDiv(L: m_Value(V&: X), R: m_APInt(Res&: DivC))) && !DivC->isZero() && |
| 811 | !DivC->isMinSignedValue()) { |
| 812 | Constant *NegDivC = ConstantInt::get(Ty, V: -(*DivC)); |
| 813 | ICmpInst::Predicate Pred = |
| 814 | DivC->isNegative() ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_SLE; |
| 815 | Value *Cmp = Builder.CreateICmp(P: Pred, LHS: X, RHS: NegDivC); |
| 816 | auto ExtOpcode = (I.getOpcode() == Instruction::AShr) ? Instruction::SExt |
| 817 | : Instruction::ZExt; |
| 818 | return CastInst::Create(ExtOpcode, S: Cmp, Ty); |
| 819 | } |
| 820 | |
| 821 | const APInt *Op1C; |
| 822 | if (!match(V: C1, P: m_APInt(Res&: Op1C))) |
| 823 | return nullptr; |
| 824 | |
| 825 | assert(!Op1C->uge(TypeBits) && |
| 826 | "Shift over the type width should have been removed already" ); |
| 827 | |
| 828 | // See if we can propagate this shift into the input, this covers the trivial |
| 829 | // cast of lshr(shl(x,c1),c2) as well as other more complex cases. |
| 830 | if (I.getOpcode() != Instruction::AShr && |
| 831 | canEvaluateShifted(V: Op0, NumBits: Op1C->getZExtValue(), IsLeftShift, IC&: *this, CxtI: &I)) { |
| 832 | LLVM_DEBUG( |
| 833 | dbgs() << "ICE: GetShiftedValue propagating shift through expression" |
| 834 | " to eliminate shift:\n IN: " |
| 835 | << *Op0 << "\n SH: " << I << "\n" ); |
| 836 | |
| 837 | return replaceInstUsesWith( |
| 838 | I, V: getShiftedValue(V: Op0, NumBits: Op1C->getZExtValue(), isLeftShift: IsLeftShift, IC&: *this, DL)); |
| 839 | } |
| 840 | |
| 841 | if (Instruction *FoldedShift = foldBinOpIntoSelectOrPhi(I)) |
| 842 | return FoldedShift; |
| 843 | |
| 844 | if (!Op0->hasOneUse()) |
| 845 | return nullptr; |
| 846 | |
| 847 | if (auto *Op0BO = dyn_cast<BinaryOperator>(Val: Op0)) { |
| 848 | // If the operand is a bitwise operator with a constant RHS, and the |
| 849 | // shift is the only use, we can pull it out of the shift. |
| 850 | const APInt *Op0C; |
| 851 | if (match(V: Op0BO->getOperand(i_nocapture: 1), P: m_APInt(Res&: Op0C))) { |
| 852 | if (canShiftBinOpWithConstantRHS(Shift&: I, BO: Op0BO)) { |
| 853 | Value *NewRHS = |
| 854 | Builder.CreateBinOp(Opc: I.getOpcode(), LHS: Op0BO->getOperand(i_nocapture: 1), RHS: C1); |
| 855 | |
| 856 | Value *NewShift = |
| 857 | Builder.CreateBinOp(Opc: I.getOpcode(), LHS: Op0BO->getOperand(i_nocapture: 0), RHS: C1); |
| 858 | NewShift->takeName(V: Op0BO); |
| 859 | |
| 860 | return BinaryOperator::Create(Op: Op0BO->getOpcode(), S1: NewShift, S2: NewRHS); |
| 861 | } |
| 862 | } |
| 863 | } |
| 864 | |
| 865 | // If we have a select that conditionally executes some binary operator, |
| 866 | // see if we can pull it the select and operator through the shift. |
| 867 | // |
| 868 | // For example, turning: |
| 869 | // shl (select C, (add X, C1), X), C2 |
| 870 | // Into: |
| 871 | // Y = shl X, C2 |
| 872 | // select C, (add Y, C1 << C2), Y |
| 873 | Value *Cond; |
| 874 | BinaryOperator *TBO; |
| 875 | Value *FalseVal; |
| 876 | if (match(V: Op0, P: m_Select(C: m_Value(V&: Cond), L: m_OneUse(SubPattern: m_BinOp(I&: TBO)), |
| 877 | R: m_Value(V&: FalseVal)))) { |
| 878 | const APInt *C; |
| 879 | if (!isa<Constant>(Val: FalseVal) && TBO->getOperand(i_nocapture: 0) == FalseVal && |
| 880 | match(V: TBO->getOperand(i_nocapture: 1), P: m_APInt(Res&: C)) && |
| 881 | canShiftBinOpWithConstantRHS(Shift&: I, BO: TBO)) { |
| 882 | Value *NewRHS = |
| 883 | Builder.CreateBinOp(Opc: I.getOpcode(), LHS: TBO->getOperand(i_nocapture: 1), RHS: C1); |
| 884 | |
| 885 | Value *NewShift = Builder.CreateBinOp(Opc: I.getOpcode(), LHS: FalseVal, RHS: C1); |
| 886 | Value *NewOp = Builder.CreateBinOp(Opc: TBO->getOpcode(), LHS: NewShift, RHS: NewRHS); |
| 887 | return SelectInst::Create(C: Cond, S1: NewOp, S2: NewShift); |
| 888 | } |
| 889 | } |
| 890 | |
| 891 | BinaryOperator *FBO; |
| 892 | Value *TrueVal; |
| 893 | if (match(V: Op0, P: m_Select(C: m_Value(V&: Cond), L: m_Value(V&: TrueVal), |
| 894 | R: m_OneUse(SubPattern: m_BinOp(I&: FBO))))) { |
| 895 | const APInt *C; |
| 896 | if (!isa<Constant>(Val: TrueVal) && FBO->getOperand(i_nocapture: 0) == TrueVal && |
| 897 | match(V: FBO->getOperand(i_nocapture: 1), P: m_APInt(Res&: C)) && |
| 898 | canShiftBinOpWithConstantRHS(Shift&: I, BO: FBO)) { |
| 899 | Value *NewRHS = |
| 900 | Builder.CreateBinOp(Opc: I.getOpcode(), LHS: FBO->getOperand(i_nocapture: 1), RHS: C1); |
| 901 | |
| 902 | Value *NewShift = Builder.CreateBinOp(Opc: I.getOpcode(), LHS: TrueVal, RHS: C1); |
| 903 | Value *NewOp = Builder.CreateBinOp(Opc: FBO->getOpcode(), LHS: NewShift, RHS: NewRHS); |
| 904 | return SelectInst::Create(C: Cond, S1: NewShift, S2: NewOp); |
| 905 | } |
| 906 | } |
| 907 | |
| 908 | return nullptr; |
| 909 | } |
| 910 | |
| 911 | // Tries to perform |
| 912 | // (lshr (add (zext X), (zext Y)), K) |
| 913 | // -> (icmp ult (add X, Y), X) |
| 914 | // where |
| 915 | // - The add's operands are zexts from a K-bits integer to a bigger type. |
| 916 | // - The add is only used by the shr, or by iK (or narrower) truncates. |
| 917 | // - The lshr type has more than 2 bits (other types are boolean math). |
| 918 | // - K > 1 |
| 919 | // note that |
| 920 | // - The resulting add cannot have nuw/nsw, else on overflow we get a |
| 921 | // poison value and the transform isn't legal anymore. |
| 922 | Instruction *InstCombinerImpl::foldLShrOverflowBit(BinaryOperator &I) { |
| 923 | assert(I.getOpcode() == Instruction::LShr); |
| 924 | |
| 925 | Value *Add = I.getOperand(i_nocapture: 0); |
| 926 | Value *ShiftAmt = I.getOperand(i_nocapture: 1); |
| 927 | Type *Ty = I.getType(); |
| 928 | |
| 929 | if (Ty->getScalarSizeInBits() < 3) |
| 930 | return nullptr; |
| 931 | |
| 932 | const APInt *ShAmtAPInt = nullptr; |
| 933 | Value *X = nullptr, *Y = nullptr; |
| 934 | if (!match(V: ShiftAmt, P: m_APInt(Res&: ShAmtAPInt)) || |
| 935 | !match(V: Add, |
| 936 | P: m_Add(L: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: X))), R: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: Y)))))) |
| 937 | return nullptr; |
| 938 | |
| 939 | const unsigned ShAmt = ShAmtAPInt->getZExtValue(); |
| 940 | if (ShAmt == 1) |
| 941 | return nullptr; |
| 942 | |
| 943 | // X/Y are zexts from `ShAmt`-sized ints. |
| 944 | if (X->getType()->getScalarSizeInBits() != ShAmt || |
| 945 | Y->getType()->getScalarSizeInBits() != ShAmt) |
| 946 | return nullptr; |
| 947 | |
| 948 | // Make sure that `Add` is only used by `I` and `ShAmt`-truncates. |
| 949 | if (!Add->hasOneUse()) { |
| 950 | for (User *U : Add->users()) { |
| 951 | if (U == &I) |
| 952 | continue; |
| 953 | |
| 954 | TruncInst *Trunc = dyn_cast<TruncInst>(Val: U); |
| 955 | if (!Trunc || Trunc->getType()->getScalarSizeInBits() > ShAmt) |
| 956 | return nullptr; |
| 957 | } |
| 958 | } |
| 959 | |
| 960 | // Insert at Add so that the newly created `NarrowAdd` will dominate it's |
| 961 | // users (i.e. `Add`'s users). |
| 962 | Instruction *AddInst = cast<Instruction>(Val: Add); |
| 963 | Builder.SetInsertPoint(AddInst); |
| 964 | |
| 965 | Value *NarrowAdd = Builder.CreateAdd(LHS: X, RHS: Y, Name: "add.narrowed" ); |
| 966 | Value *Overflow = |
| 967 | Builder.CreateICmpULT(LHS: NarrowAdd, RHS: X, Name: "add.narrowed.overflow" ); |
| 968 | |
| 969 | // Replace the uses of the original add with a zext of the |
| 970 | // NarrowAdd's result. Note that all users at this stage are known to |
| 971 | // be ShAmt-sized truncs, or the lshr itself. |
| 972 | if (!Add->hasOneUse()) { |
| 973 | replaceInstUsesWith(I&: *AddInst, V: Builder.CreateZExt(V: NarrowAdd, DestTy: Ty)); |
| 974 | eraseInstFromFunction(I&: *AddInst); |
| 975 | } |
| 976 | |
| 977 | // Replace the LShr with a zext of the overflow check. |
| 978 | return new ZExtInst(Overflow, Ty); |
| 979 | } |
| 980 | |
| 981 | // Try to set nuw/nsw flags on shl or exact flag on lshr/ashr using knownbits. |
| 982 | static bool setShiftFlags(BinaryOperator &I, const SimplifyQuery &Q) { |
| 983 | assert(I.isShift() && "Expected a shift as input" ); |
| 984 | // We already have all the flags. |
| 985 | if (I.getOpcode() == Instruction::Shl) { |
| 986 | if (I.hasNoUnsignedWrap() && I.hasNoSignedWrap()) |
| 987 | return false; |
| 988 | } else { |
| 989 | if (I.isExact()) |
| 990 | return false; |
| 991 | |
| 992 | // shr (shl X, Y), Y |
| 993 | if (match(V: I.getOperand(i_nocapture: 0), P: m_Shl(L: m_Value(), R: m_Specific(V: I.getOperand(i_nocapture: 1))))) { |
| 994 | I.setIsExact(); |
| 995 | return true; |
| 996 | } |
| 997 | // Infer 'exact' flag if shift amount is cttz(x) on the same operand. |
| 998 | if (match(V: I.getOperand(i_nocapture: 1), P: m_Intrinsic<Intrinsic::cttz>( |
| 999 | Op0: m_Specific(V: I.getOperand(i_nocapture: 0)), Op1: m_Value()))) { |
| 1000 | I.setIsExact(); |
| 1001 | return true; |
| 1002 | } |
| 1003 | } |
| 1004 | |
| 1005 | // Compute what we know about shift count. |
| 1006 | KnownBits KnownCnt = computeKnownBits(V: I.getOperand(i_nocapture: 1), Q); |
| 1007 | unsigned BitWidth = KnownCnt.getBitWidth(); |
| 1008 | // Since shift produces a poison value if RHS is equal to or larger than the |
| 1009 | // bit width, we can safely assume that RHS is less than the bit width. |
| 1010 | uint64_t MaxCnt = KnownCnt.getMaxValue().getLimitedValue(Limit: BitWidth - 1); |
| 1011 | |
| 1012 | KnownBits KnownAmt = computeKnownBits(V: I.getOperand(i_nocapture: 0), Q); |
| 1013 | bool Changed = false; |
| 1014 | |
| 1015 | if (I.getOpcode() == Instruction::Shl) { |
| 1016 | // If we have as many leading zeros than maximum shift cnt we have nuw. |
| 1017 | if (!I.hasNoUnsignedWrap() && MaxCnt <= KnownAmt.countMinLeadingZeros()) { |
| 1018 | I.setHasNoUnsignedWrap(); |
| 1019 | Changed = true; |
| 1020 | } |
| 1021 | // If we have more sign bits than maximum shift cnt we have nsw. |
| 1022 | if (!I.hasNoSignedWrap()) { |
| 1023 | if (MaxCnt < KnownAmt.countMinSignBits() || |
| 1024 | MaxCnt < |
| 1025 | ComputeNumSignBits(Op: I.getOperand(i_nocapture: 0), DL: Q.DL, AC: Q.AC, CxtI: Q.CxtI, DT: Q.DT)) { |
| 1026 | I.setHasNoSignedWrap(); |
| 1027 | Changed = true; |
| 1028 | } |
| 1029 | } |
| 1030 | return Changed; |
| 1031 | } |
| 1032 | |
| 1033 | // If we have at least as many trailing zeros as maximum count then we have |
| 1034 | // exact. |
| 1035 | Changed = MaxCnt <= KnownAmt.countMinTrailingZeros(); |
| 1036 | I.setIsExact(Changed); |
| 1037 | |
| 1038 | return Changed; |
| 1039 | } |
| 1040 | |
| 1041 | Instruction *InstCombinerImpl::visitShl(BinaryOperator &I) { |
| 1042 | const SimplifyQuery Q = SQ.getWithInstruction(I: &I); |
| 1043 | |
| 1044 | if (Value *V = simplifyShlInst(Op0: I.getOperand(i_nocapture: 0), Op1: I.getOperand(i_nocapture: 1), |
| 1045 | IsNSW: I.hasNoSignedWrap(), IsNUW: I.hasNoUnsignedWrap(), Q)) |
| 1046 | return replaceInstUsesWith(I, V); |
| 1047 | |
| 1048 | if (Instruction *X = foldVectorBinop(Inst&: I)) |
| 1049 | return X; |
| 1050 | |
| 1051 | if (Instruction *V = commonShiftTransforms(I)) |
| 1052 | return V; |
| 1053 | |
| 1054 | if (Instruction *V = dropRedundantMaskingOfLeftShiftInput(OuterShift: &I, Q, Builder)) |
| 1055 | return V; |
| 1056 | |
| 1057 | Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1); |
| 1058 | Type *Ty = I.getType(); |
| 1059 | unsigned BitWidth = Ty->getScalarSizeInBits(); |
| 1060 | |
| 1061 | const APInt *C; |
| 1062 | if (match(V: Op1, P: m_APInt(Res&: C))) { |
| 1063 | unsigned ShAmtC = C->getZExtValue(); |
| 1064 | |
| 1065 | // shl (zext X), C --> zext (shl X, C) |
| 1066 | // This is only valid if X would have zeros shifted out. |
| 1067 | Value *X; |
| 1068 | if (match(V: Op0, P: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: X))))) { |
| 1069 | unsigned SrcWidth = X->getType()->getScalarSizeInBits(); |
| 1070 | if (ShAmtC < SrcWidth && |
| 1071 | MaskedValueIsZero(V: X, Mask: APInt::getHighBitsSet(numBits: SrcWidth, hiBitsSet: ShAmtC), CxtI: &I)) |
| 1072 | return new ZExtInst(Builder.CreateShl(LHS: X, RHS: ShAmtC), Ty); |
| 1073 | } |
| 1074 | |
| 1075 | // (X >> C) << C --> X & (-1 << C) |
| 1076 | if (match(V: Op0, P: m_Shr(L: m_Value(V&: X), R: m_Specific(V: Op1)))) { |
| 1077 | APInt Mask(APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - ShAmtC)); |
| 1078 | return BinaryOperator::CreateAnd(V1: X, V2: ConstantInt::get(Ty, V: Mask)); |
| 1079 | } |
| 1080 | |
| 1081 | const APInt *C1; |
| 1082 | if (match(V: Op0, P: m_Exact(SubPattern: m_Shr(L: m_Value(V&: X), R: m_APInt(Res&: C1)))) && |
| 1083 | C1->ult(RHS: BitWidth)) { |
| 1084 | unsigned ShrAmt = C1->getZExtValue(); |
| 1085 | if (ShrAmt < ShAmtC) { |
| 1086 | // If C1 < C: (X >>?,exact C1) << C --> X << (C - C1) |
| 1087 | Constant *ShiftDiff = ConstantInt::get(Ty, V: ShAmtC - ShrAmt); |
| 1088 | auto *NewShl = BinaryOperator::CreateShl(V1: X, V2: ShiftDiff); |
| 1089 | NewShl->setHasNoUnsignedWrap( |
| 1090 | I.hasNoUnsignedWrap() || |
| 1091 | (ShrAmt && |
| 1092 | cast<Instruction>(Val: Op0)->getOpcode() == Instruction::LShr && |
| 1093 | I.hasNoSignedWrap())); |
| 1094 | NewShl->setHasNoSignedWrap(I.hasNoSignedWrap()); |
| 1095 | return NewShl; |
| 1096 | } |
| 1097 | if (ShrAmt > ShAmtC) { |
| 1098 | // If C1 > C: (X >>?exact C1) << C --> X >>?exact (C1 - C) |
| 1099 | Constant *ShiftDiff = ConstantInt::get(Ty, V: ShrAmt - ShAmtC); |
| 1100 | auto *NewShr = BinaryOperator::Create( |
| 1101 | Op: cast<BinaryOperator>(Val: Op0)->getOpcode(), S1: X, S2: ShiftDiff); |
| 1102 | NewShr->setIsExact(true); |
| 1103 | return NewShr; |
| 1104 | } |
| 1105 | } |
| 1106 | |
| 1107 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Shr(L: m_Value(V&: X), R: m_APInt(Res&: C1)))) && |
| 1108 | C1->ult(RHS: BitWidth)) { |
| 1109 | unsigned ShrAmt = C1->getZExtValue(); |
| 1110 | if (ShrAmt < ShAmtC) { |
| 1111 | // If C1 < C: (X >>? C1) << C --> (X << (C - C1)) & (-1 << C) |
| 1112 | Constant *ShiftDiff = ConstantInt::get(Ty, V: ShAmtC - ShrAmt); |
| 1113 | auto *NewShl = BinaryOperator::CreateShl(V1: X, V2: ShiftDiff); |
| 1114 | NewShl->setHasNoUnsignedWrap( |
| 1115 | I.hasNoUnsignedWrap() || |
| 1116 | (ShrAmt && |
| 1117 | cast<Instruction>(Val: Op0)->getOpcode() == Instruction::LShr && |
| 1118 | I.hasNoSignedWrap())); |
| 1119 | NewShl->setHasNoSignedWrap(I.hasNoSignedWrap()); |
| 1120 | Builder.Insert(I: NewShl); |
| 1121 | APInt Mask(APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - ShAmtC)); |
| 1122 | return BinaryOperator::CreateAnd(V1: NewShl, V2: ConstantInt::get(Ty, V: Mask)); |
| 1123 | } |
| 1124 | if (ShrAmt > ShAmtC) { |
| 1125 | // If C1 > C: (X >>? C1) << C --> (X >>? (C1 - C)) & (-1 << C) |
| 1126 | Constant *ShiftDiff = ConstantInt::get(Ty, V: ShrAmt - ShAmtC); |
| 1127 | auto *OldShr = cast<BinaryOperator>(Val: Op0); |
| 1128 | auto *NewShr = |
| 1129 | BinaryOperator::Create(Op: OldShr->getOpcode(), S1: X, S2: ShiftDiff); |
| 1130 | NewShr->setIsExact(OldShr->isExact()); |
| 1131 | Builder.Insert(I: NewShr); |
| 1132 | APInt Mask(APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - ShAmtC)); |
| 1133 | return BinaryOperator::CreateAnd(V1: NewShr, V2: ConstantInt::get(Ty, V: Mask)); |
| 1134 | } |
| 1135 | } |
| 1136 | |
| 1137 | // Similar to above, but look through an intermediate trunc instruction. |
| 1138 | BinaryOperator *Shr; |
| 1139 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Trunc(Op: m_OneUse(SubPattern: m_BinOp(I&: Shr))))) && |
| 1140 | match(V: Shr, P: m_Shr(L: m_Value(V&: X), R: m_APInt(Res&: C1)))) { |
| 1141 | // The larger shift direction survives through the transform. |
| 1142 | unsigned ShrAmtC = C1->getZExtValue(); |
| 1143 | unsigned ShDiff = ShrAmtC > ShAmtC ? ShrAmtC - ShAmtC : ShAmtC - ShrAmtC; |
| 1144 | Constant *ShiftDiffC = ConstantInt::get(Ty: X->getType(), V: ShDiff); |
| 1145 | auto ShiftOpc = ShrAmtC > ShAmtC ? Shr->getOpcode() : Instruction::Shl; |
| 1146 | |
| 1147 | // If C1 > C: |
| 1148 | // (trunc (X >> C1)) << C --> (trunc (X >> (C1 - C))) && (-1 << C) |
| 1149 | // If C > C1: |
| 1150 | // (trunc (X >> C1)) << C --> (trunc (X << (C - C1))) && (-1 << C) |
| 1151 | Value *NewShift = Builder.CreateBinOp(Opc: ShiftOpc, LHS: X, RHS: ShiftDiffC, Name: "sh.diff" ); |
| 1152 | Value *Trunc = Builder.CreateTrunc(V: NewShift, DestTy: Ty, Name: "tr.sh.diff" ); |
| 1153 | APInt Mask(APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - ShAmtC)); |
| 1154 | return BinaryOperator::CreateAnd(V1: Trunc, V2: ConstantInt::get(Ty, V: Mask)); |
| 1155 | } |
| 1156 | |
| 1157 | // If we have an opposite shift by the same amount, we may be able to |
| 1158 | // reorder binops and shifts to eliminate math/logic. |
| 1159 | auto isSuitableBinOpcode = [](Instruction::BinaryOps BinOpcode) { |
| 1160 | switch (BinOpcode) { |
| 1161 | default: |
| 1162 | return false; |
| 1163 | case Instruction::Add: |
| 1164 | case Instruction::And: |
| 1165 | case Instruction::Or: |
| 1166 | case Instruction::Xor: |
| 1167 | case Instruction::Sub: |
| 1168 | // NOTE: Sub is not commutable and the tranforms below may not be valid |
| 1169 | // when the shift-right is operand 1 (RHS) of the sub. |
| 1170 | return true; |
| 1171 | } |
| 1172 | }; |
| 1173 | BinaryOperator *Op0BO; |
| 1174 | if (match(V: Op0, P: m_OneUse(SubPattern: m_BinOp(I&: Op0BO))) && |
| 1175 | isSuitableBinOpcode(Op0BO->getOpcode())) { |
| 1176 | // Commute so shift-right is on LHS of the binop. |
| 1177 | // (Y bop (X >> C)) << C -> ((X >> C) bop Y) << C |
| 1178 | // (Y bop ((X >> C) & CC)) << C -> (((X >> C) & CC) bop Y) << C |
| 1179 | Value *Shr = Op0BO->getOperand(i_nocapture: 0); |
| 1180 | Value *Y = Op0BO->getOperand(i_nocapture: 1); |
| 1181 | Value *X; |
| 1182 | const APInt *CC; |
| 1183 | if (Op0BO->isCommutative() && Y->hasOneUse() && |
| 1184 | (match(V: Y, P: m_Shr(L: m_Value(), R: m_Specific(V: Op1))) || |
| 1185 | match(V: Y, P: m_And(L: m_OneUse(SubPattern: m_Shr(L: m_Value(), R: m_Specific(V: Op1))), |
| 1186 | R: m_APInt(Res&: CC))))) |
| 1187 | std::swap(a&: Shr, b&: Y); |
| 1188 | |
| 1189 | // ((X >> C) bop Y) << C -> (X bop (Y << C)) & (~0 << C) |
| 1190 | if (match(V: Shr, P: m_OneUse(SubPattern: m_Shr(L: m_Value(V&: X), R: m_Specific(V: Op1))))) { |
| 1191 | // Y << C |
| 1192 | Value *YS = Builder.CreateShl(LHS: Y, RHS: Op1, Name: Op0BO->getName()); |
| 1193 | // (X bop (Y << C)) |
| 1194 | Value *B = |
| 1195 | Builder.CreateBinOp(Opc: Op0BO->getOpcode(), LHS: X, RHS: YS, Name: Shr->getName()); |
| 1196 | unsigned Op1Val = C->getLimitedValue(Limit: BitWidth); |
| 1197 | APInt Bits = APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - Op1Val); |
| 1198 | Constant *Mask = ConstantInt::get(Ty, V: Bits); |
| 1199 | return BinaryOperator::CreateAnd(V1: B, V2: Mask); |
| 1200 | } |
| 1201 | |
| 1202 | // (((X >> C) & CC) bop Y) << C -> (X & (CC << C)) bop (Y << C) |
| 1203 | if (match(V: Shr, |
| 1204 | P: m_OneUse(SubPattern: m_And(L: m_OneUse(SubPattern: m_Shr(L: m_Value(V&: X), R: m_Specific(V: Op1))), |
| 1205 | R: m_APInt(Res&: CC))))) { |
| 1206 | // Y << C |
| 1207 | Value *YS = Builder.CreateShl(LHS: Y, RHS: Op1, Name: Op0BO->getName()); |
| 1208 | // X & (CC << C) |
| 1209 | Value *M = Builder.CreateAnd(LHS: X, RHS: ConstantInt::get(Ty, V: CC->shl(ShiftAmt: *C)), |
| 1210 | Name: X->getName() + ".mask" ); |
| 1211 | auto *NewOp = BinaryOperator::Create(Op: Op0BO->getOpcode(), S1: M, S2: YS); |
| 1212 | if (auto *Disjoint = dyn_cast<PossiblyDisjointInst>(Val: Op0BO); |
| 1213 | Disjoint && Disjoint->isDisjoint()) |
| 1214 | cast<PossiblyDisjointInst>(Val: NewOp)->setIsDisjoint(true); |
| 1215 | return NewOp; |
| 1216 | } |
| 1217 | } |
| 1218 | |
| 1219 | // (C1 - X) << C --> (C1 << C) - (X << C) |
| 1220 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Sub(L: m_APInt(Res&: C1), R: m_Value(V&: X))))) { |
| 1221 | Constant *NewLHS = ConstantInt::get(Ty, V: C1->shl(ShiftAmt: *C)); |
| 1222 | Value *NewShift = Builder.CreateShl(LHS: X, RHS: Op1); |
| 1223 | return BinaryOperator::CreateSub(V1: NewLHS, V2: NewShift); |
| 1224 | } |
| 1225 | } |
| 1226 | |
| 1227 | if (setShiftFlags(I, Q)) |
| 1228 | return &I; |
| 1229 | |
| 1230 | // Transform (x >> y) << y to x & (-1 << y) |
| 1231 | // Valid for any type of right-shift. |
| 1232 | Value *X; |
| 1233 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Shr(L: m_Value(V&: X), R: m_Specific(V: Op1))))) { |
| 1234 | Constant *AllOnes = ConstantInt::getAllOnesValue(Ty); |
| 1235 | Value *Mask = Builder.CreateShl(LHS: AllOnes, RHS: Op1); |
| 1236 | return BinaryOperator::CreateAnd(V1: Mask, V2: X); |
| 1237 | } |
| 1238 | |
| 1239 | // Transform (-1 >> y) << y to -1 << y |
| 1240 | if (match(V: Op0, P: m_LShr(L: m_AllOnes(), R: m_Specific(V: Op1)))) { |
| 1241 | Constant *AllOnes = ConstantInt::getAllOnesValue(Ty); |
| 1242 | return BinaryOperator::CreateShl(V1: AllOnes, V2: Op1); |
| 1243 | } |
| 1244 | |
| 1245 | Constant *C1; |
| 1246 | if (match(V: Op1, P: m_ImmConstant(C&: C1))) { |
| 1247 | Constant *C2; |
| 1248 | Value *X; |
| 1249 | // (X * C2) << C1 --> X * (C2 << C1) |
| 1250 | if (match(V: Op0, P: m_Mul(L: m_Value(V&: X), R: m_ImmConstant(C&: C2)))) |
| 1251 | return BinaryOperator::CreateMul(V1: X, V2: Builder.CreateShl(LHS: C2, RHS: C1)); |
| 1252 | |
| 1253 | // shl (zext i1 X), C1 --> select (X, 1 << C1, 0) |
| 1254 | if (match(V: Op0, P: m_ZExt(Op: m_Value(V&: X))) && X->getType()->isIntOrIntVectorTy(BitWidth: 1)) { |
| 1255 | auto *NewC = Builder.CreateShl(LHS: ConstantInt::get(Ty, V: 1), RHS: C1); |
| 1256 | return SelectInst::Create(C: X, S1: NewC, S2: ConstantInt::getNullValue(Ty)); |
| 1257 | } |
| 1258 | } |
| 1259 | |
| 1260 | if (match(V: Op0, P: m_One())) { |
| 1261 | // (1 << (C - x)) -> ((1 << C) >> x) if C is bitwidth - 1 |
| 1262 | if (match(V: Op1, P: m_Sub(L: m_SpecificInt(V: BitWidth - 1), R: m_Value(V&: X)))) |
| 1263 | return BinaryOperator::CreateLShr( |
| 1264 | V1: ConstantInt::get(Ty, V: APInt::getSignMask(BitWidth)), V2: X); |
| 1265 | |
| 1266 | // Canonicalize "extract lowest set bit" using cttz to and-with-negate: |
| 1267 | // 1 << (cttz X) --> -X & X |
| 1268 | if (match(V: Op1, |
| 1269 | P: m_OneUse(SubPattern: m_Intrinsic<Intrinsic::cttz>(Op0: m_Value(V&: X), Op1: m_Value())))) { |
| 1270 | Value *NegX = Builder.CreateNeg(V: X, Name: "neg" ); |
| 1271 | return BinaryOperator::CreateAnd(V1: NegX, V2: X); |
| 1272 | } |
| 1273 | } |
| 1274 | |
| 1275 | return nullptr; |
| 1276 | } |
| 1277 | |
| 1278 | Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) { |
| 1279 | if (Value *V = simplifyLShrInst(Op0: I.getOperand(i_nocapture: 0), Op1: I.getOperand(i_nocapture: 1), IsExact: I.isExact(), |
| 1280 | Q: SQ.getWithInstruction(I: &I))) |
| 1281 | return replaceInstUsesWith(I, V); |
| 1282 | |
| 1283 | if (Instruction *X = foldVectorBinop(Inst&: I)) |
| 1284 | return X; |
| 1285 | |
| 1286 | if (Instruction *R = commonShiftTransforms(I)) |
| 1287 | return R; |
| 1288 | |
| 1289 | Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1); |
| 1290 | Type *Ty = I.getType(); |
| 1291 | Value *X; |
| 1292 | const APInt *C; |
| 1293 | unsigned BitWidth = Ty->getScalarSizeInBits(); |
| 1294 | |
| 1295 | // (iN (~X) u>> (N - 1)) --> zext (X > -1) |
| 1296 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Not(V: m_Value(V&: X)))) && |
| 1297 | match(V: Op1, P: m_SpecificIntAllowPoison(V: BitWidth - 1))) |
| 1298 | return new ZExtInst(Builder.CreateIsNotNeg(Arg: X, Name: "isnotneg" ), Ty); |
| 1299 | |
| 1300 | // ((X << nuw Z) sub nuw Y) >>u exact Z --> X sub nuw (Y >>u exact Z) |
| 1301 | Value *Y; |
| 1302 | if (I.isExact() && |
| 1303 | match(V: Op0, P: m_OneUse(SubPattern: m_NUWSub(L: m_NUWShl(L: m_Value(V&: X), R: m_Specific(V: Op1)), |
| 1304 | R: m_Value(V&: Y))))) { |
| 1305 | Value *NewLshr = Builder.CreateLShr(LHS: Y, RHS: Op1, Name: "" , /*isExact=*/true); |
| 1306 | auto *NewSub = BinaryOperator::CreateNUWSub(V1: X, V2: NewLshr); |
| 1307 | NewSub->setHasNoSignedWrap( |
| 1308 | cast<OverflowingBinaryOperator>(Val: Op0)->hasNoSignedWrap()); |
| 1309 | return NewSub; |
| 1310 | } |
| 1311 | |
| 1312 | // Fold (X + Y) / 2 --> (X & Y) iff (X u<= 1) && (Y u<= 1) |
| 1313 | if (match(V: Op0, P: m_Add(L: m_Value(V&: X), R: m_Value(V&: Y))) && match(V: Op1, P: m_One()) && |
| 1314 | computeKnownBits(V: X, CxtI: &I).countMaxActiveBits() <= 1 && |
| 1315 | computeKnownBits(V: Y, CxtI: &I).countMaxActiveBits() <= 1) |
| 1316 | return BinaryOperator::CreateAnd(V1: X, V2: Y); |
| 1317 | |
| 1318 | // (sub nuw X, (Y << nuw Z)) >>u exact Z --> (X >>u exact Z) sub nuw Y |
| 1319 | if (I.isExact() && |
| 1320 | match(V: Op0, P: m_OneUse(SubPattern: m_NUWSub(L: m_Value(V&: X), |
| 1321 | R: m_NUWShl(L: m_Value(V&: Y), R: m_Specific(V: Op1)))))) { |
| 1322 | Value *NewLshr = Builder.CreateLShr(LHS: X, RHS: Op1, Name: "" , /*isExact=*/true); |
| 1323 | auto *NewSub = BinaryOperator::CreateNUWSub(V1: NewLshr, V2: Y); |
| 1324 | NewSub->setHasNoSignedWrap( |
| 1325 | cast<OverflowingBinaryOperator>(Val: Op0)->hasNoSignedWrap()); |
| 1326 | return NewSub; |
| 1327 | } |
| 1328 | |
| 1329 | auto isSuitableBinOpcode = [](Instruction::BinaryOps BinOpcode) { |
| 1330 | switch (BinOpcode) { |
| 1331 | default: |
| 1332 | return false; |
| 1333 | case Instruction::Add: |
| 1334 | case Instruction::And: |
| 1335 | case Instruction::Or: |
| 1336 | case Instruction::Xor: |
| 1337 | // Sub is handled separately. |
| 1338 | return true; |
| 1339 | } |
| 1340 | }; |
| 1341 | |
| 1342 | // If both the binop and the shift are nuw, then: |
| 1343 | // ((X << nuw Z) binop nuw Y) >>u Z --> X binop nuw (Y >>u Z) |
| 1344 | if (match(V: Op0, P: m_OneUse(SubPattern: m_c_BinOp(L: m_NUWShl(L: m_Value(V&: X), R: m_Specific(V: Op1)), |
| 1345 | R: m_Value(V&: Y))))) { |
| 1346 | BinaryOperator *Op0OB = cast<BinaryOperator>(Val: Op0); |
| 1347 | if (isSuitableBinOpcode(Op0OB->getOpcode())) { |
| 1348 | if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Val: Op0); |
| 1349 | !OBO || OBO->hasNoUnsignedWrap()) { |
| 1350 | Value *NewLshr = Builder.CreateLShr( |
| 1351 | LHS: Y, RHS: Op1, Name: "" , isExact: I.isExact() && Op0OB->getOpcode() != Instruction::And); |
| 1352 | auto *NewBinOp = BinaryOperator::Create(Op: Op0OB->getOpcode(), S1: NewLshr, S2: X); |
| 1353 | if (OBO) { |
| 1354 | NewBinOp->setHasNoUnsignedWrap(true); |
| 1355 | NewBinOp->setHasNoSignedWrap(OBO->hasNoSignedWrap()); |
| 1356 | } else if (auto *Disjoint = dyn_cast<PossiblyDisjointInst>(Val: Op0)) { |
| 1357 | cast<PossiblyDisjointInst>(Val: NewBinOp)->setIsDisjoint( |
| 1358 | Disjoint->isDisjoint()); |
| 1359 | } |
| 1360 | return NewBinOp; |
| 1361 | } |
| 1362 | } |
| 1363 | } |
| 1364 | |
| 1365 | if (match(V: Op1, P: m_APInt(Res&: C))) { |
| 1366 | unsigned ShAmtC = C->getZExtValue(); |
| 1367 | auto *II = dyn_cast<IntrinsicInst>(Val: Op0); |
| 1368 | if (II && isPowerOf2_32(Value: BitWidth) && Log2_32(Value: BitWidth) == ShAmtC && |
| 1369 | (II->getIntrinsicID() == Intrinsic::ctlz || |
| 1370 | II->getIntrinsicID() == Intrinsic::cttz || |
| 1371 | II->getIntrinsicID() == Intrinsic::ctpop)) { |
| 1372 | // ctlz.i32(x)>>5 --> zext(x == 0) |
| 1373 | // cttz.i32(x)>>5 --> zext(x == 0) |
| 1374 | // ctpop.i32(x)>>5 --> zext(x == -1) |
| 1375 | bool IsPop = II->getIntrinsicID() == Intrinsic::ctpop; |
| 1376 | Constant *RHS = ConstantInt::getSigned(Ty, V: IsPop ? -1 : 0); |
| 1377 | Value *Cmp = Builder.CreateICmpEQ(LHS: II->getArgOperand(i: 0), RHS); |
| 1378 | return new ZExtInst(Cmp, Ty); |
| 1379 | } |
| 1380 | |
| 1381 | const APInt *C1; |
| 1382 | if (match(V: Op0, P: m_Shl(L: m_Value(V&: X), R: m_APInt(Res&: C1))) && C1->ult(RHS: BitWidth)) { |
| 1383 | if (C1->ult(RHS: ShAmtC)) { |
| 1384 | unsigned ShlAmtC = C1->getZExtValue(); |
| 1385 | Constant *ShiftDiff = ConstantInt::get(Ty, V: ShAmtC - ShlAmtC); |
| 1386 | if (cast<BinaryOperator>(Val: Op0)->hasNoUnsignedWrap()) { |
| 1387 | // (X <<nuw C1) >>u C --> X >>u (C - C1) |
| 1388 | auto *NewLShr = BinaryOperator::CreateLShr(V1: X, V2: ShiftDiff); |
| 1389 | NewLShr->setIsExact(I.isExact()); |
| 1390 | return NewLShr; |
| 1391 | } |
| 1392 | if (Op0->hasOneUse()) { |
| 1393 | // (X << C1) >>u C --> (X >>u (C - C1)) & (-1 >> C) |
| 1394 | Value *NewLShr = Builder.CreateLShr(LHS: X, RHS: ShiftDiff, Name: "" , isExact: I.isExact()); |
| 1395 | APInt Mask(APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: BitWidth - ShAmtC)); |
| 1396 | return BinaryOperator::CreateAnd(V1: NewLShr, V2: ConstantInt::get(Ty, V: Mask)); |
| 1397 | } |
| 1398 | } else if (C1->ugt(RHS: ShAmtC)) { |
| 1399 | unsigned ShlAmtC = C1->getZExtValue(); |
| 1400 | Constant *ShiftDiff = ConstantInt::get(Ty, V: ShlAmtC - ShAmtC); |
| 1401 | if (cast<BinaryOperator>(Val: Op0)->hasNoUnsignedWrap()) { |
| 1402 | // (X <<nuw C1) >>u C --> X <<nuw/nsw (C1 - C) |
| 1403 | auto *NewShl = BinaryOperator::CreateShl(V1: X, V2: ShiftDiff); |
| 1404 | NewShl->setHasNoUnsignedWrap(true); |
| 1405 | NewShl->setHasNoSignedWrap(ShAmtC > 0); |
| 1406 | return NewShl; |
| 1407 | } |
| 1408 | if (Op0->hasOneUse()) { |
| 1409 | // (X << C1) >>u C --> X << (C1 - C) & (-1 >> C) |
| 1410 | Value *NewShl = Builder.CreateShl(LHS: X, RHS: ShiftDiff); |
| 1411 | APInt Mask(APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: BitWidth - ShAmtC)); |
| 1412 | return BinaryOperator::CreateAnd(V1: NewShl, V2: ConstantInt::get(Ty, V: Mask)); |
| 1413 | } |
| 1414 | } else { |
| 1415 | assert(*C1 == ShAmtC); |
| 1416 | // (X << C) >>u C --> X & (-1 >>u C) |
| 1417 | APInt Mask(APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: BitWidth - ShAmtC)); |
| 1418 | return BinaryOperator::CreateAnd(V1: X, V2: ConstantInt::get(Ty, V: Mask)); |
| 1419 | } |
| 1420 | } |
| 1421 | |
| 1422 | // ((X << C) + Y) >>u C --> (X + (Y >>u C)) & (-1 >>u C) |
| 1423 | // TODO: Consolidate with the more general transform that starts from shl |
| 1424 | // (the shifts are in the opposite order). |
| 1425 | if (match(V: Op0, |
| 1426 | P: m_OneUse(SubPattern: m_c_Add(L: m_OneUse(SubPattern: m_Shl(L: m_Value(V&: X), R: m_Specific(V: Op1))), |
| 1427 | R: m_Value(V&: Y))))) { |
| 1428 | Value *NewLshr = Builder.CreateLShr(LHS: Y, RHS: Op1); |
| 1429 | Value *NewAdd = Builder.CreateAdd(LHS: NewLshr, RHS: X); |
| 1430 | unsigned Op1Val = C->getLimitedValue(Limit: BitWidth); |
| 1431 | APInt Bits = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: BitWidth - Op1Val); |
| 1432 | Constant *Mask = ConstantInt::get(Ty, V: Bits); |
| 1433 | return BinaryOperator::CreateAnd(V1: NewAdd, V2: Mask); |
| 1434 | } |
| 1435 | |
| 1436 | if (match(V: Op0, P: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: X)))) && |
| 1437 | (!Ty->isIntegerTy() || shouldChangeType(From: Ty, To: X->getType()))) { |
| 1438 | assert(ShAmtC < X->getType()->getScalarSizeInBits() && |
| 1439 | "Big shift not simplified to zero?" ); |
| 1440 | // lshr (zext iM X to iN), C --> zext (lshr X, C) to iN |
| 1441 | Value *NewLShr = Builder.CreateLShr(LHS: X, RHS: ShAmtC); |
| 1442 | return new ZExtInst(NewLShr, Ty); |
| 1443 | } |
| 1444 | |
| 1445 | if (match(V: Op0, P: m_SExt(Op: m_Value(V&: X)))) { |
| 1446 | unsigned SrcTyBitWidth = X->getType()->getScalarSizeInBits(); |
| 1447 | // lshr (sext i1 X to iN), C --> select (X, -1 >> C, 0) |
| 1448 | if (SrcTyBitWidth == 1) { |
| 1449 | auto *NewC = ConstantInt::get( |
| 1450 | Ty, V: APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: BitWidth - ShAmtC)); |
| 1451 | return SelectInst::Create(C: X, S1: NewC, S2: ConstantInt::getNullValue(Ty)); |
| 1452 | } |
| 1453 | |
| 1454 | if ((!Ty->isIntegerTy() || shouldChangeType(From: Ty, To: X->getType())) && |
| 1455 | Op0->hasOneUse()) { |
| 1456 | // Are we moving the sign bit to the low bit and widening with high |
| 1457 | // zeros? lshr (sext iM X to iN), N-1 --> zext (lshr X, M-1) to iN |
| 1458 | if (ShAmtC == BitWidth - 1) { |
| 1459 | Value *NewLShr = Builder.CreateLShr(LHS: X, RHS: SrcTyBitWidth - 1); |
| 1460 | return new ZExtInst(NewLShr, Ty); |
| 1461 | } |
| 1462 | |
| 1463 | // lshr (sext iM X to iN), N-M --> zext (ashr X, min(N-M, M-1)) to iN |
| 1464 | if (ShAmtC == BitWidth - SrcTyBitWidth) { |
| 1465 | // The new shift amount can't be more than the narrow source type. |
| 1466 | unsigned NewShAmt = std::min(a: ShAmtC, b: SrcTyBitWidth - 1); |
| 1467 | Value *AShr = Builder.CreateAShr(LHS: X, RHS: NewShAmt); |
| 1468 | return new ZExtInst(AShr, Ty); |
| 1469 | } |
| 1470 | } |
| 1471 | } |
| 1472 | |
| 1473 | if (ShAmtC == BitWidth - 1) { |
| 1474 | // lshr i32 or(X,-X), 31 --> zext (X != 0) |
| 1475 | if (match(V: Op0, P: m_OneUse(SubPattern: m_c_Or(L: m_Neg(V: m_Value(V&: X)), R: m_Deferred(V: X))))) |
| 1476 | return new ZExtInst(Builder.CreateIsNotNull(Arg: X), Ty); |
| 1477 | |
| 1478 | // lshr i32 (X -nsw Y), 31 --> zext (X < Y) |
| 1479 | if (match(V: Op0, P: m_OneUse(SubPattern: m_NSWSub(L: m_Value(V&: X), R: m_Value(V&: Y))))) |
| 1480 | return new ZExtInst(Builder.CreateICmpSLT(LHS: X, RHS: Y), Ty); |
| 1481 | |
| 1482 | // Check if a number is negative and odd: |
| 1483 | // lshr i32 (srem X, 2), 31 --> and (X >> 31), X |
| 1484 | if (match(V: Op0, P: m_OneUse(SubPattern: m_SRem(L: m_Value(V&: X), R: m_SpecificInt(V: 2))))) { |
| 1485 | Value *Signbit = Builder.CreateLShr(LHS: X, RHS: ShAmtC); |
| 1486 | return BinaryOperator::CreateAnd(V1: Signbit, V2: X); |
| 1487 | } |
| 1488 | |
| 1489 | // lshr iN (X - 1) & ~X, N-1 --> zext (X == 0) |
| 1490 | if (match(V: Op0, P: m_OneUse(SubPattern: m_c_And(L: m_Add(L: m_Value(V&: X), R: m_AllOnes()), |
| 1491 | R: m_Not(V: m_Deferred(V: X)))))) |
| 1492 | return new ZExtInst(Builder.CreateIsNull(Arg: X), Ty); |
| 1493 | } |
| 1494 | |
| 1495 | Instruction *TruncSrc; |
| 1496 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Trunc(Op: m_Instruction(I&: TruncSrc)))) && |
| 1497 | match(V: TruncSrc, P: m_LShr(L: m_Value(V&: X), R: m_APInt(Res&: C1)))) { |
| 1498 | unsigned SrcWidth = X->getType()->getScalarSizeInBits(); |
| 1499 | unsigned AmtSum = ShAmtC + C1->getZExtValue(); |
| 1500 | |
| 1501 | // If the combined shift fits in the source width: |
| 1502 | // (trunc (X >>u C1)) >>u C --> and (trunc (X >>u (C1 + C)), MaskC |
| 1503 | // |
| 1504 | // If the first shift covers the number of bits truncated, then the |
| 1505 | // mask instruction is eliminated (and so the use check is relaxed). |
| 1506 | if (AmtSum < SrcWidth && |
| 1507 | (TruncSrc->hasOneUse() || C1->uge(RHS: SrcWidth - BitWidth))) { |
| 1508 | Value *SumShift = Builder.CreateLShr(LHS: X, RHS: AmtSum, Name: "sum.shift" ); |
| 1509 | Value *Trunc = Builder.CreateTrunc(V: SumShift, DestTy: Ty, Name: I.getName()); |
| 1510 | |
| 1511 | // If the first shift does not cover the number of bits truncated, then |
| 1512 | // we require a mask to get rid of high bits in the result. |
| 1513 | APInt MaskC = APInt::getAllOnes(numBits: BitWidth).lshr(shiftAmt: ShAmtC); |
| 1514 | return BinaryOperator::CreateAnd(V1: Trunc, V2: ConstantInt::get(Ty, V: MaskC)); |
| 1515 | } |
| 1516 | } |
| 1517 | |
| 1518 | const APInt *MulC; |
| 1519 | if (match(V: Op0, P: m_NUWMul(L: m_Value(V&: X), R: m_APInt(Res&: MulC)))) { |
| 1520 | if (BitWidth > 2 && (*MulC - 1).isPowerOf2() && |
| 1521 | MulC->logBase2() == ShAmtC) { |
| 1522 | // Look for a "splat" mul pattern - it replicates bits across each half |
| 1523 | // of a value, so a right shift simplifies back to just X: |
| 1524 | // lshr i[2N] (mul nuw X, (2^N)+1), N --> X |
| 1525 | if (ShAmtC * 2 == BitWidth) |
| 1526 | return replaceInstUsesWith(I, V: X); |
| 1527 | |
| 1528 | // lshr (mul nuw (X, 2^N + 1)), N -> add nuw (X, lshr(X, N)) |
| 1529 | if (Op0->hasOneUse()) { |
| 1530 | auto *NewAdd = BinaryOperator::CreateNUWAdd( |
| 1531 | V1: X, V2: Builder.CreateLShr(LHS: X, RHS: ConstantInt::get(Ty, V: ShAmtC), Name: "" , |
| 1532 | isExact: I.isExact())); |
| 1533 | NewAdd->setHasNoSignedWrap( |
| 1534 | cast<OverflowingBinaryOperator>(Val: Op0)->hasNoSignedWrap()); |
| 1535 | return NewAdd; |
| 1536 | } |
| 1537 | } |
| 1538 | |
| 1539 | // The one-use check is not strictly necessary, but codegen may not be |
| 1540 | // able to invert the transform and perf may suffer with an extra mul |
| 1541 | // instruction. |
| 1542 | if (Op0->hasOneUse()) { |
| 1543 | APInt NewMulC = MulC->lshr(shiftAmt: ShAmtC); |
| 1544 | // if c is divisible by (1 << ShAmtC): |
| 1545 | // lshr (mul nuw x, MulC), ShAmtC -> mul nuw nsw x, (MulC >> ShAmtC) |
| 1546 | if (MulC->eq(RHS: NewMulC.shl(shiftAmt: ShAmtC))) { |
| 1547 | auto *NewMul = |
| 1548 | BinaryOperator::CreateNUWMul(V1: X, V2: ConstantInt::get(Ty, V: NewMulC)); |
| 1549 | assert(ShAmtC != 0 && |
| 1550 | "lshr X, 0 should be handled by simplifyLShrInst." ); |
| 1551 | NewMul->setHasNoSignedWrap(true); |
| 1552 | return NewMul; |
| 1553 | } |
| 1554 | } |
| 1555 | } |
| 1556 | |
| 1557 | // lshr (mul nsw (X, 2^N + 1)), N -> add nsw (X, lshr(X, N)) |
| 1558 | if (match(V: Op0, P: m_OneUse(SubPattern: m_NSWMul(L: m_Value(V&: X), R: m_APInt(Res&: MulC))))) { |
| 1559 | if (BitWidth > 2 && (*MulC - 1).isPowerOf2() && |
| 1560 | MulC->logBase2() == ShAmtC) { |
| 1561 | return BinaryOperator::CreateNSWAdd( |
| 1562 | V1: X, V2: Builder.CreateLShr(LHS: X, RHS: ConstantInt::get(Ty, V: ShAmtC), Name: "" , |
| 1563 | isExact: I.isExact())); |
| 1564 | } |
| 1565 | } |
| 1566 | |
| 1567 | // Try to narrow bswap. |
| 1568 | // In the case where the shift amount equals the bitwidth difference, the |
| 1569 | // shift is eliminated. |
| 1570 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Intrinsic<Intrinsic::bswap>( |
| 1571 | Op0: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: X))))))) { |
| 1572 | unsigned SrcWidth = X->getType()->getScalarSizeInBits(); |
| 1573 | unsigned WidthDiff = BitWidth - SrcWidth; |
| 1574 | if (SrcWidth % 16 == 0) { |
| 1575 | Value *NarrowSwap = Builder.CreateUnaryIntrinsic(ID: Intrinsic::bswap, V: X); |
| 1576 | if (ShAmtC >= WidthDiff) { |
| 1577 | // (bswap (zext X)) >> C --> zext (bswap X >> C') |
| 1578 | Value *NewShift = Builder.CreateLShr(LHS: NarrowSwap, RHS: ShAmtC - WidthDiff); |
| 1579 | return new ZExtInst(NewShift, Ty); |
| 1580 | } else { |
| 1581 | // (bswap (zext X)) >> C --> (zext (bswap X)) << C' |
| 1582 | Value *NewZExt = Builder.CreateZExt(V: NarrowSwap, DestTy: Ty); |
| 1583 | Constant *ShiftDiff = ConstantInt::get(Ty, V: WidthDiff - ShAmtC); |
| 1584 | return BinaryOperator::CreateShl(V1: NewZExt, V2: ShiftDiff); |
| 1585 | } |
| 1586 | } |
| 1587 | } |
| 1588 | |
| 1589 | // Reduce add-carry of bools to logic: |
| 1590 | // ((zext BoolX) + (zext BoolY)) >> 1 --> zext (BoolX && BoolY) |
| 1591 | Value *BoolX, *BoolY; |
| 1592 | if (ShAmtC == 1 && match(V: Op0, P: m_Add(L: m_Value(V&: X), R: m_Value(V&: Y))) && |
| 1593 | match(V: X, P: m_ZExt(Op: m_Value(V&: BoolX))) && match(V: Y, P: m_ZExt(Op: m_Value(V&: BoolY))) && |
| 1594 | BoolX->getType()->isIntOrIntVectorTy(BitWidth: 1) && |
| 1595 | BoolY->getType()->isIntOrIntVectorTy(BitWidth: 1) && |
| 1596 | (X->hasOneUse() || Y->hasOneUse() || Op0->hasOneUse())) { |
| 1597 | Value *And = Builder.CreateAnd(LHS: BoolX, RHS: BoolY); |
| 1598 | return new ZExtInst(And, Ty); |
| 1599 | } |
| 1600 | } |
| 1601 | |
| 1602 | const SimplifyQuery Q = SQ.getWithInstruction(I: &I); |
| 1603 | if (setShiftFlags(I, Q)) |
| 1604 | return &I; |
| 1605 | |
| 1606 | // Transform (x << y) >> y to x & (-1 >> y) |
| 1607 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Shl(L: m_Value(V&: X), R: m_Specific(V: Op1))))) { |
| 1608 | Constant *AllOnes = ConstantInt::getAllOnesValue(Ty); |
| 1609 | Value *Mask = Builder.CreateLShr(LHS: AllOnes, RHS: Op1); |
| 1610 | return BinaryOperator::CreateAnd(V1: Mask, V2: X); |
| 1611 | } |
| 1612 | |
| 1613 | // Transform (-1 << y) >> y to -1 >> y |
| 1614 | if (match(V: Op0, P: m_Shl(L: m_AllOnes(), R: m_Specific(V: Op1)))) { |
| 1615 | Constant *AllOnes = ConstantInt::getAllOnesValue(Ty); |
| 1616 | return BinaryOperator::CreateLShr(V1: AllOnes, V2: Op1); |
| 1617 | } |
| 1618 | |
| 1619 | if (Instruction *Overflow = foldLShrOverflowBit(I)) |
| 1620 | return Overflow; |
| 1621 | |
| 1622 | // Transform ((pow2 << x) >> cttz(pow2 << y)) -> ((1 << x) >> y) |
| 1623 | Value *Shl0_Op0, *Shl0_Op1, *Shl1_Op1; |
| 1624 | BinaryOperator *Shl1; |
| 1625 | if (match(V: Op0, P: m_Shl(L: m_Value(V&: Shl0_Op0), R: m_Value(V&: Shl0_Op1))) && |
| 1626 | match(V: Op1, P: m_Intrinsic<Intrinsic::cttz>(Op0: m_BinOp(I&: Shl1))) && |
| 1627 | match(V: Shl1, P: m_Shl(L: m_Specific(V: Shl0_Op0), R: m_Value(V&: Shl1_Op1))) && |
| 1628 | isKnownToBeAPowerOfTwo(V: Shl0_Op0, /*OrZero=*/true, CxtI: &I)) { |
| 1629 | auto *Shl0 = cast<BinaryOperator>(Val: Op0); |
| 1630 | bool HasNUW = Shl0->hasNoUnsignedWrap() && Shl1->hasNoUnsignedWrap(); |
| 1631 | bool HasNSW = Shl0->hasNoSignedWrap() && Shl1->hasNoSignedWrap(); |
| 1632 | if (HasNUW || HasNSW) { |
| 1633 | Value *NewShl = Builder.CreateShl(LHS: ConstantInt::get(Ty: Shl1->getType(), V: 1), |
| 1634 | RHS: Shl0_Op1, Name: "" , HasNUW, HasNSW); |
| 1635 | return BinaryOperator::CreateLShr(V1: NewShl, V2: Shl1_Op1); |
| 1636 | } |
| 1637 | } |
| 1638 | return nullptr; |
| 1639 | } |
| 1640 | |
| 1641 | Instruction * |
| 1642 | InstCombinerImpl::( |
| 1643 | BinaryOperator &OldAShr) { |
| 1644 | assert(OldAShr.getOpcode() == Instruction::AShr && |
| 1645 | "Must be called with arithmetic right-shift instruction only." ); |
| 1646 | |
| 1647 | // Check that constant C is a splat of the element-wise bitwidth of V. |
| 1648 | auto BitWidthSplat = [](Constant *C, Value *V) { |
| 1649 | return match( |
| 1650 | V: C, P: m_SpecificInt_ICMP(Predicate: ICmpInst::Predicate::ICMP_EQ, |
| 1651 | Threshold: APInt(C->getType()->getScalarSizeInBits(), |
| 1652 | V->getType()->getScalarSizeInBits()))); |
| 1653 | }; |
| 1654 | |
| 1655 | // It should look like variable-length sign-extension on the outside: |
| 1656 | // (Val << (bitwidth(Val)-Nbits)) a>> (bitwidth(Val)-Nbits) |
| 1657 | Value *NBits; |
| 1658 | Instruction *MaybeTrunc; |
| 1659 | Constant *C1, *C2; |
| 1660 | if (!match(V: &OldAShr, |
| 1661 | P: m_AShr(L: m_Shl(L: m_Instruction(I&: MaybeTrunc), |
| 1662 | R: m_ZExtOrSelf(Op: m_Sub(L: m_Constant(C&: C1), |
| 1663 | R: m_ZExtOrSelf(Op: m_Value(V&: NBits))))), |
| 1664 | R: m_ZExtOrSelf(Op: m_Sub(L: m_Constant(C&: C2), |
| 1665 | R: m_ZExtOrSelf(Op: m_Deferred(V: NBits)))))) || |
| 1666 | !BitWidthSplat(C1, &OldAShr) || !BitWidthSplat(C2, &OldAShr)) |
| 1667 | return nullptr; |
| 1668 | |
| 1669 | // There may or may not be a truncation after outer two shifts. |
| 1670 | Instruction *; |
| 1671 | match(V: MaybeTrunc, P: m_TruncOrSelf(Op: m_Instruction(I&: HighBitExtract))); |
| 1672 | bool HadTrunc = MaybeTrunc != HighBitExtract; |
| 1673 | |
| 1674 | // And finally, the innermost part of the pattern must be a right-shift. |
| 1675 | Value *X, *NumLowBitsToSkip; |
| 1676 | if (!match(V: HighBitExtract, P: m_Shr(L: m_Value(V&: X), R: m_Value(V&: NumLowBitsToSkip)))) |
| 1677 | return nullptr; |
| 1678 | |
| 1679 | // Said right-shift must extract high NBits bits - C0 must be it's bitwidth. |
| 1680 | Constant *C0; |
| 1681 | if (!match(V: NumLowBitsToSkip, |
| 1682 | P: m_ZExtOrSelf( |
| 1683 | Op: m_Sub(L: m_Constant(C&: C0), R: m_ZExtOrSelf(Op: m_Specific(V: NBits))))) || |
| 1684 | !BitWidthSplat(C0, HighBitExtract)) |
| 1685 | return nullptr; |
| 1686 | |
| 1687 | // Since the NBits is identical for all shifts, if the outermost and |
| 1688 | // innermost shifts are identical, then outermost shifts are redundant. |
| 1689 | // If we had truncation, do keep it though. |
| 1690 | if (HighBitExtract->getOpcode() == OldAShr.getOpcode()) |
| 1691 | return replaceInstUsesWith(I&: OldAShr, V: MaybeTrunc); |
| 1692 | |
| 1693 | // Else, if there was a truncation, then we need to ensure that one |
| 1694 | // instruction will go away. |
| 1695 | if (HadTrunc && !match(V: &OldAShr, P: m_c_BinOp(L: m_OneUse(SubPattern: m_Value()), R: m_Value()))) |
| 1696 | return nullptr; |
| 1697 | |
| 1698 | // Finally, bypass two innermost shifts, and perform the outermost shift on |
| 1699 | // the operands of the innermost shift. |
| 1700 | Instruction *NewAShr = |
| 1701 | BinaryOperator::Create(Op: OldAShr.getOpcode(), S1: X, S2: NumLowBitsToSkip); |
| 1702 | NewAShr->copyIRFlags(V: HighBitExtract); // We can preserve 'exact'-ness. |
| 1703 | if (!HadTrunc) |
| 1704 | return NewAShr; |
| 1705 | |
| 1706 | Builder.Insert(I: NewAShr); |
| 1707 | return TruncInst::CreateTruncOrBitCast(S: NewAShr, Ty: OldAShr.getType()); |
| 1708 | } |
| 1709 | |
| 1710 | Instruction *InstCombinerImpl::visitAShr(BinaryOperator &I) { |
| 1711 | if (Value *V = simplifyAShrInst(Op0: I.getOperand(i_nocapture: 0), Op1: I.getOperand(i_nocapture: 1), IsExact: I.isExact(), |
| 1712 | Q: SQ.getWithInstruction(I: &I))) |
| 1713 | return replaceInstUsesWith(I, V); |
| 1714 | |
| 1715 | if (Instruction *X = foldVectorBinop(Inst&: I)) |
| 1716 | return X; |
| 1717 | |
| 1718 | if (Instruction *R = commonShiftTransforms(I)) |
| 1719 | return R; |
| 1720 | |
| 1721 | Value *Op0 = I.getOperand(i_nocapture: 0), *Op1 = I.getOperand(i_nocapture: 1); |
| 1722 | Type *Ty = I.getType(); |
| 1723 | unsigned BitWidth = Ty->getScalarSizeInBits(); |
| 1724 | const APInt *ShAmtAPInt; |
| 1725 | if (match(V: Op1, P: m_APInt(Res&: ShAmtAPInt)) && ShAmtAPInt->ult(RHS: BitWidth)) { |
| 1726 | unsigned ShAmt = ShAmtAPInt->getZExtValue(); |
| 1727 | |
| 1728 | // If the shift amount equals the difference in width of the destination |
| 1729 | // and source scalar types: |
| 1730 | // ashr (shl (zext X), C), C --> sext X |
| 1731 | Value *X; |
| 1732 | if (match(V: Op0, P: m_Shl(L: m_ZExt(Op: m_Value(V&: X)), R: m_Specific(V: Op1))) && |
| 1733 | ShAmt == BitWidth - X->getType()->getScalarSizeInBits()) |
| 1734 | return new SExtInst(X, Ty); |
| 1735 | |
| 1736 | // We can't handle (X << C1) >>s C2. It shifts arbitrary bits in. However, |
| 1737 | // we can handle (X <<nsw C1) >>s C2 since it only shifts in sign bits. |
| 1738 | const APInt *ShOp1; |
| 1739 | if (match(V: Op0, P: m_NSWShl(L: m_Value(V&: X), R: m_APInt(Res&: ShOp1))) && |
| 1740 | ShOp1->ult(RHS: BitWidth)) { |
| 1741 | unsigned ShlAmt = ShOp1->getZExtValue(); |
| 1742 | if (ShlAmt < ShAmt) { |
| 1743 | // (X <<nsw C1) >>s C2 --> X >>s (C2 - C1) |
| 1744 | Constant *ShiftDiff = ConstantInt::get(Ty, V: ShAmt - ShlAmt); |
| 1745 | auto *NewAShr = BinaryOperator::CreateAShr(V1: X, V2: ShiftDiff); |
| 1746 | NewAShr->setIsExact(I.isExact()); |
| 1747 | return NewAShr; |
| 1748 | } |
| 1749 | if (ShlAmt > ShAmt) { |
| 1750 | // (X <<nsw C1) >>s C2 --> X <<nsw (C1 - C2) |
| 1751 | Constant *ShiftDiff = ConstantInt::get(Ty, V: ShlAmt - ShAmt); |
| 1752 | auto *NewShl = BinaryOperator::Create(Op: Instruction::Shl, S1: X, S2: ShiftDiff); |
| 1753 | NewShl->setHasNoSignedWrap(true); |
| 1754 | return NewShl; |
| 1755 | } |
| 1756 | } |
| 1757 | |
| 1758 | if (match(V: Op0, P: m_AShr(L: m_Value(V&: X), R: m_APInt(Res&: ShOp1))) && |
| 1759 | ShOp1->ult(RHS: BitWidth)) { |
| 1760 | unsigned AmtSum = ShAmt + ShOp1->getZExtValue(); |
| 1761 | // Oversized arithmetic shifts replicate the sign bit. |
| 1762 | AmtSum = std::min(a: AmtSum, b: BitWidth - 1); |
| 1763 | // (X >>s C1) >>s C2 --> X >>s (C1 + C2) |
| 1764 | return BinaryOperator::CreateAShr(V1: X, V2: ConstantInt::get(Ty, V: AmtSum)); |
| 1765 | } |
| 1766 | |
| 1767 | if (match(V: Op0, P: m_OneUse(SubPattern: m_SExt(Op: m_Value(V&: X)))) && |
| 1768 | (Ty->isVectorTy() || shouldChangeType(From: Ty, To: X->getType()))) { |
| 1769 | // ashr (sext X), C --> sext (ashr X, C') |
| 1770 | Type *SrcTy = X->getType(); |
| 1771 | ShAmt = std::min(a: ShAmt, b: SrcTy->getScalarSizeInBits() - 1); |
| 1772 | Value *NewSh = Builder.CreateAShr(LHS: X, RHS: ConstantInt::get(Ty: SrcTy, V: ShAmt)); |
| 1773 | return new SExtInst(NewSh, Ty); |
| 1774 | } |
| 1775 | |
| 1776 | if (ShAmt == BitWidth - 1) { |
| 1777 | // ashr i32 or(X,-X), 31 --> sext (X != 0) |
| 1778 | if (match(V: Op0, P: m_OneUse(SubPattern: m_c_Or(L: m_Neg(V: m_Value(V&: X)), R: m_Deferred(V: X))))) |
| 1779 | return new SExtInst(Builder.CreateIsNotNull(Arg: X), Ty); |
| 1780 | |
| 1781 | // ashr i32 (X -nsw Y), 31 --> sext (X < Y) |
| 1782 | Value *Y; |
| 1783 | if (match(V: Op0, P: m_OneUse(SubPattern: m_NSWSub(L: m_Value(V&: X), R: m_Value(V&: Y))))) |
| 1784 | return new SExtInst(Builder.CreateICmpSLT(LHS: X, RHS: Y), Ty); |
| 1785 | |
| 1786 | // ashr iN (X - 1) & ~X, N-1 --> sext (X == 0) |
| 1787 | if (match(V: Op0, P: m_OneUse(SubPattern: m_c_And(L: m_Add(L: m_Value(V&: X), R: m_AllOnes()), |
| 1788 | R: m_Not(V: m_Deferred(V: X)))))) |
| 1789 | return new SExtInst(Builder.CreateIsNull(Arg: X), Ty); |
| 1790 | } |
| 1791 | |
| 1792 | const APInt *MulC; |
| 1793 | if (match(V: Op0, P: m_OneUse(SubPattern: m_NSWMul(L: m_Value(V&: X), R: m_APInt(Res&: MulC)))) && |
| 1794 | (BitWidth > 2 && (*MulC - 1).isPowerOf2() && |
| 1795 | MulC->logBase2() == ShAmt && |
| 1796 | (ShAmt < BitWidth - 1))) /* Minus 1 for the sign bit */ { |
| 1797 | |
| 1798 | // ashr (mul nsw (X, 2^N + 1)), N -> add nsw (X, ashr(X, N)) |
| 1799 | auto *NewAdd = BinaryOperator::CreateNSWAdd( |
| 1800 | V1: X, |
| 1801 | V2: Builder.CreateAShr(LHS: X, RHS: ConstantInt::get(Ty, V: ShAmt), Name: "" , isExact: I.isExact())); |
| 1802 | NewAdd->setHasNoUnsignedWrap( |
| 1803 | cast<OverflowingBinaryOperator>(Val: Op0)->hasNoUnsignedWrap()); |
| 1804 | return NewAdd; |
| 1805 | } |
| 1806 | } |
| 1807 | |
| 1808 | const SimplifyQuery Q = SQ.getWithInstruction(I: &I); |
| 1809 | if (setShiftFlags(I, Q)) |
| 1810 | return &I; |
| 1811 | |
| 1812 | // Prefer `-(x & 1)` over `(x << (bitwidth(x)-1)) a>> (bitwidth(x)-1)` |
| 1813 | // as the pattern to splat the lowest bit. |
| 1814 | // FIXME: iff X is already masked, we don't need the one-use check. |
| 1815 | Value *X; |
| 1816 | if (match(V: Op1, P: m_SpecificIntAllowPoison(V: BitWidth - 1)) && |
| 1817 | match(V: Op0, P: m_OneUse(SubPattern: m_Shl(L: m_Value(V&: X), |
| 1818 | R: m_SpecificIntAllowPoison(V: BitWidth - 1))))) { |
| 1819 | Constant *Mask = ConstantInt::get(Ty, V: 1); |
| 1820 | // Retain the knowledge about the ignored lanes. |
| 1821 | Mask = Constant::mergeUndefsWith( |
| 1822 | C: Constant::mergeUndefsWith(C: Mask, Other: cast<Constant>(Val: Op1)), |
| 1823 | Other: cast<Constant>(Val: cast<Instruction>(Val: Op0)->getOperand(i: 1))); |
| 1824 | X = Builder.CreateAnd(LHS: X, RHS: Mask); |
| 1825 | return BinaryOperator::CreateNeg(Op: X); |
| 1826 | } |
| 1827 | |
| 1828 | if (Instruction *R = foldVariableSignZeroExtensionOfVariableHighBitExtract(OldAShr&: I)) |
| 1829 | return R; |
| 1830 | |
| 1831 | // See if we can turn a signed shr into an unsigned shr. |
| 1832 | if (MaskedValueIsZero(V: Op0, Mask: APInt::getSignMask(BitWidth), CxtI: &I)) { |
| 1833 | Instruction *Lshr = BinaryOperator::CreateLShr(V1: Op0, V2: Op1); |
| 1834 | Lshr->setIsExact(I.isExact()); |
| 1835 | return Lshr; |
| 1836 | } |
| 1837 | |
| 1838 | // ashr (xor %x, -1), %y --> xor (ashr %x, %y), -1 |
| 1839 | if (match(V: Op0, P: m_OneUse(SubPattern: m_Not(V: m_Value(V&: X))))) { |
| 1840 | // Note that we must drop 'exact'-ness of the shift! |
| 1841 | // Note that we can't keep undef's in -1 vector constant! |
| 1842 | auto *NewAShr = Builder.CreateAShr(LHS: X, RHS: Op1, Name: Op0->getName() + ".not" ); |
| 1843 | return BinaryOperator::CreateNot(Op: NewAShr); |
| 1844 | } |
| 1845 | |
| 1846 | return nullptr; |
| 1847 | } |
| 1848 | |