| 1 | //===- InstCombineSimplifyDemanded.cpp ------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file contains logic for simplifying instructions based on information |
| 10 | // about how they are used. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "InstCombineInternal.h" |
| 15 | #include "llvm/Analysis/ValueTracking.h" |
| 16 | #include "llvm/IR/GetElementPtrTypeIterator.h" |
| 17 | #include "llvm/IR/IntrinsicInst.h" |
| 18 | #include "llvm/IR/PatternMatch.h" |
| 19 | #include "llvm/IR/ProfDataUtils.h" |
| 20 | #include "llvm/Support/KnownBits.h" |
| 21 | #include "llvm/Transforms/InstCombine/InstCombiner.h" |
| 22 | |
| 23 | using namespace llvm; |
| 24 | using namespace llvm::PatternMatch; |
| 25 | |
| 26 | #define DEBUG_TYPE "instcombine" |
| 27 | |
| 28 | static cl::opt<bool> |
| 29 | VerifyKnownBits("instcombine-verify-known-bits" , |
| 30 | cl::desc("Verify that computeKnownBits() and " |
| 31 | "SimplifyDemandedBits() are consistent" ), |
| 32 | cl::Hidden, cl::init(Val: false)); |
| 33 | |
| 34 | static cl::opt<unsigned> SimplifyDemandedVectorEltsDepthLimit( |
| 35 | "instcombine-simplify-vector-elts-depth" , |
| 36 | cl::desc( |
| 37 | "Depth limit when simplifying vector instructions and their operands" ), |
| 38 | cl::Hidden, cl::init(Val: 10)); |
| 39 | |
| 40 | /// Check to see if the specified operand of the specified instruction is a |
| 41 | /// constant integer. If so, check to see if there are any bits set in the |
| 42 | /// constant that are not demanded. If so, shrink the constant and return true. |
| 43 | static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, |
| 44 | const APInt &Demanded) { |
| 45 | assert(I && "No instruction?" ); |
| 46 | assert(OpNo < I->getNumOperands() && "Operand index too large" ); |
| 47 | |
| 48 | // The operand must be a constant integer or splat integer. |
| 49 | Value *Op = I->getOperand(i: OpNo); |
| 50 | const APInt *C; |
| 51 | if (!match(V: Op, P: m_APInt(Res&: C))) |
| 52 | return false; |
| 53 | |
| 54 | // If there are no bits set that aren't demanded, nothing to do. |
| 55 | if (C->isSubsetOf(RHS: Demanded)) |
| 56 | return false; |
| 57 | |
| 58 | // This instruction is producing bits that are not demanded. Shrink the RHS. |
| 59 | I->setOperand(i: OpNo, Val: ConstantInt::get(Ty: Op->getType(), V: *C & Demanded)); |
| 60 | |
| 61 | return true; |
| 62 | } |
| 63 | |
| 64 | /// Let N = 2 * M. |
| 65 | /// Given an N-bit integer representing a pack of two M-bit integers, |
| 66 | /// we can select one of the packed integers by right-shifting by either |
| 67 | /// zero or M (which is the most straightforward to check if M is a power |
| 68 | /// of 2), and then isolating the lower M bits. In this case, we can |
| 69 | /// represent the shift as a select on whether the shr amount is nonzero. |
| 70 | static Value *simplifyShiftSelectingPackedElement(Instruction *I, |
| 71 | const APInt &DemandedMask, |
| 72 | InstCombinerImpl &IC, |
| 73 | unsigned Depth) { |
| 74 | assert(I->getOpcode() == Instruction::LShr && |
| 75 | "Only lshr instruction supported" ); |
| 76 | |
| 77 | uint64_t ShlAmt; |
| 78 | Value *Upper, *Lower; |
| 79 | if (!match(V: I->getOperand(i: 0), |
| 80 | P: m_OneUse(SubPattern: m_c_DisjointOr( |
| 81 | L: m_OneUse(SubPattern: m_Shl(L: m_Value(V&: Upper), R: m_ConstantInt(V&: ShlAmt))), |
| 82 | R: m_Value(V&: Lower))))) |
| 83 | return nullptr; |
| 84 | |
| 85 | if (!isPowerOf2_64(Value: ShlAmt)) |
| 86 | return nullptr; |
| 87 | |
| 88 | const uint64_t DemandedBitWidth = DemandedMask.getActiveBits(); |
| 89 | if (DemandedBitWidth > ShlAmt) |
| 90 | return nullptr; |
| 91 | |
| 92 | // Check that upper demanded bits are not lost from lshift. |
| 93 | if (Upper->getType()->getScalarSizeInBits() < ShlAmt + DemandedBitWidth) |
| 94 | return nullptr; |
| 95 | |
| 96 | KnownBits KnownLowerBits = IC.computeKnownBits(V: Lower, CxtI: I, Depth); |
| 97 | if (!KnownLowerBits.getMaxValue().isIntN(N: ShlAmt)) |
| 98 | return nullptr; |
| 99 | |
| 100 | Value *ShrAmt = I->getOperand(i: 1); |
| 101 | KnownBits KnownShrBits = IC.computeKnownBits(V: ShrAmt, CxtI: I, Depth); |
| 102 | |
| 103 | // Verify that ShrAmt is either exactly ShlAmt (which is a power of 2) or |
| 104 | // zero. |
| 105 | if (~KnownShrBits.Zero != ShlAmt) |
| 106 | return nullptr; |
| 107 | |
| 108 | IRBuilderBase::InsertPointGuard Guard(IC.Builder); |
| 109 | IC.Builder.SetInsertPoint(I); |
| 110 | Value *ShrAmtZ = |
| 111 | IC.Builder.CreateICmpEQ(LHS: ShrAmt, RHS: Constant::getNullValue(Ty: ShrAmt->getType()), |
| 112 | Name: ShrAmt->getName() + ".z" ); |
| 113 | // There is no existing !prof metadata we can derive the !prof metadata for |
| 114 | // this select. |
| 115 | Value *Select = IC.Builder.CreateSelectWithUnknownProfile(C: ShrAmtZ, True: Lower, |
| 116 | False: Upper, DEBUG_TYPE); |
| 117 | Select->takeName(V: I); |
| 118 | return Select; |
| 119 | } |
| 120 | |
| 121 | /// Returns the bitwidth of the given scalar or pointer type. For vector types, |
| 122 | /// returns the element type's bitwidth. |
| 123 | static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { |
| 124 | if (unsigned BitWidth = Ty->getScalarSizeInBits()) |
| 125 | return BitWidth; |
| 126 | |
| 127 | return DL.getPointerTypeSizeInBits(Ty); |
| 128 | } |
| 129 | |
| 130 | /// Inst is an integer instruction that SimplifyDemandedBits knows about. See if |
| 131 | /// the instruction has any properties that allow us to simplify its operands. |
| 132 | bool InstCombinerImpl::SimplifyDemandedInstructionBits(Instruction &Inst, |
| 133 | KnownBits &Known) { |
| 134 | APInt DemandedMask(APInt::getAllOnes(numBits: Known.getBitWidth())); |
| 135 | Value *V = SimplifyDemandedUseBits(I: &Inst, DemandedMask, Known, |
| 136 | Q: SQ.getWithInstruction(I: &Inst)); |
| 137 | if (!V) return false; |
| 138 | if (V == &Inst) return true; |
| 139 | replaceInstUsesWith(I&: Inst, V); |
| 140 | return true; |
| 141 | } |
| 142 | |
| 143 | /// Inst is an integer instruction that SimplifyDemandedBits knows about. See if |
| 144 | /// the instruction has any properties that allow us to simplify its operands. |
| 145 | bool InstCombinerImpl::SimplifyDemandedInstructionBits(Instruction &Inst) { |
| 146 | KnownBits Known(getBitWidth(Ty: Inst.getType(), DL)); |
| 147 | return SimplifyDemandedInstructionBits(Inst, Known); |
| 148 | } |
| 149 | |
| 150 | /// This form of SimplifyDemandedBits simplifies the specified instruction |
| 151 | /// operand if possible, updating it in place. It returns true if it made any |
| 152 | /// change and false otherwise. |
| 153 | bool InstCombinerImpl::SimplifyDemandedBits(Instruction *I, unsigned OpNo, |
| 154 | const APInt &DemandedMask, |
| 155 | KnownBits &Known, |
| 156 | const SimplifyQuery &Q, |
| 157 | unsigned Depth) { |
| 158 | Use &U = I->getOperandUse(i: OpNo); |
| 159 | Value *V = U.get(); |
| 160 | if (isa<Constant>(Val: V)) { |
| 161 | llvm::computeKnownBits(V, Known, Q, Depth); |
| 162 | return false; |
| 163 | } |
| 164 | |
| 165 | Known.resetAll(); |
| 166 | if (DemandedMask.isZero()) { |
| 167 | // Not demanding any bits from V. |
| 168 | replaceUse(U, NewValue: UndefValue::get(T: V->getType())); |
| 169 | return true; |
| 170 | } |
| 171 | |
| 172 | Instruction *VInst = dyn_cast<Instruction>(Val: V); |
| 173 | if (!VInst) { |
| 174 | llvm::computeKnownBits(V, Known, Q, Depth); |
| 175 | return false; |
| 176 | } |
| 177 | |
| 178 | if (Depth == MaxAnalysisRecursionDepth) |
| 179 | return false; |
| 180 | |
| 181 | Value *NewVal; |
| 182 | if (VInst->hasOneUse()) { |
| 183 | // If the instruction has one use, we can directly simplify it. |
| 184 | NewVal = SimplifyDemandedUseBits(I: VInst, DemandedMask, Known, Q, Depth); |
| 185 | } else { |
| 186 | // If there are multiple uses of this instruction, then we can simplify |
| 187 | // VInst to some other value, but not modify the instruction. |
| 188 | NewVal = |
| 189 | SimplifyMultipleUseDemandedBits(I: VInst, DemandedMask, Known, Q, Depth); |
| 190 | } |
| 191 | if (!NewVal) return false; |
| 192 | if (Instruction* OpInst = dyn_cast<Instruction>(Val&: U)) |
| 193 | salvageDebugInfo(I&: *OpInst); |
| 194 | |
| 195 | replaceUse(U, NewValue: NewVal); |
| 196 | return true; |
| 197 | } |
| 198 | |
| 199 | /// This function attempts to replace V with a simpler value based on the |
| 200 | /// demanded bits. When this function is called, it is known that only the bits |
| 201 | /// set in DemandedMask of the result of V are ever used downstream. |
| 202 | /// Consequently, depending on the mask and V, it may be possible to replace V |
| 203 | /// with a constant or one of its operands. In such cases, this function does |
| 204 | /// the replacement and returns true. In all other cases, it returns false after |
| 205 | /// analyzing the expression and setting KnownOne and known to be one in the |
| 206 | /// expression. Known.Zero contains all the bits that are known to be zero in |
| 207 | /// the expression. These are provided to potentially allow the caller (which |
| 208 | /// might recursively be SimplifyDemandedBits itself) to simplify the |
| 209 | /// expression. |
| 210 | /// Known.One and Known.Zero always follow the invariant that: |
| 211 | /// Known.One & Known.Zero == 0. |
| 212 | /// That is, a bit can't be both 1 and 0. The bits in Known.One and Known.Zero |
| 213 | /// are accurate even for bits not in DemandedMask. Note |
| 214 | /// also that the bitwidth of V, DemandedMask, Known.Zero and Known.One must all |
| 215 | /// be the same. |
| 216 | /// |
| 217 | /// This returns null if it did not change anything and it permits no |
| 218 | /// simplification. This returns V itself if it did some simplification of V's |
| 219 | /// operands based on the information about what bits are demanded. This returns |
| 220 | /// some other non-null value if it found out that V is equal to another value |
| 221 | /// in the context where the specified bits are demanded, but not for all users. |
| 222 | Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I, |
| 223 | const APInt &DemandedMask, |
| 224 | KnownBits &Known, |
| 225 | const SimplifyQuery &Q, |
| 226 | unsigned Depth) { |
| 227 | assert(I != nullptr && "Null pointer of Value???" ); |
| 228 | assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth" ); |
| 229 | uint32_t BitWidth = DemandedMask.getBitWidth(); |
| 230 | Type *VTy = I->getType(); |
| 231 | assert( |
| 232 | (!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits() == BitWidth) && |
| 233 | Known.getBitWidth() == BitWidth && |
| 234 | "Value *V, DemandedMask and Known must have same BitWidth" ); |
| 235 | |
| 236 | KnownBits LHSKnown(BitWidth), RHSKnown(BitWidth); |
| 237 | |
| 238 | // Update flags after simplifying an operand based on the fact that some high |
| 239 | // order bits are not demanded. |
| 240 | auto disableWrapFlagsBasedOnUnusedHighBits = [](Instruction *I, |
| 241 | unsigned NLZ) { |
| 242 | if (NLZ > 0) { |
| 243 | // Disable the nsw and nuw flags here: We can no longer guarantee that |
| 244 | // we won't wrap after simplification. Removing the nsw/nuw flags is |
| 245 | // legal here because the top bit is not demanded. |
| 246 | I->setHasNoSignedWrap(false); |
| 247 | I->setHasNoUnsignedWrap(false); |
| 248 | } |
| 249 | return I; |
| 250 | }; |
| 251 | |
| 252 | // If the high-bits of an ADD/SUB/MUL are not demanded, then we do not care |
| 253 | // about the high bits of the operands. |
| 254 | auto simplifyOperandsBasedOnUnusedHighBits = [&](APInt &DemandedFromOps) { |
| 255 | unsigned NLZ = DemandedMask.countl_zero(); |
| 256 | // Right fill the mask of bits for the operands to demand the most |
| 257 | // significant bit and all those below it. |
| 258 | DemandedFromOps = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: BitWidth - NLZ); |
| 259 | if (ShrinkDemandedConstant(I, OpNo: 0, Demanded: DemandedFromOps) || |
| 260 | SimplifyDemandedBits(I, OpNo: 0, DemandedMask: DemandedFromOps, Known&: LHSKnown, Q, Depth: Depth + 1) || |
| 261 | ShrinkDemandedConstant(I, OpNo: 1, Demanded: DemandedFromOps) || |
| 262 | SimplifyDemandedBits(I, OpNo: 1, DemandedMask: DemandedFromOps, Known&: RHSKnown, Q, Depth: Depth + 1)) { |
| 263 | disableWrapFlagsBasedOnUnusedHighBits(I, NLZ); |
| 264 | return true; |
| 265 | } |
| 266 | return false; |
| 267 | }; |
| 268 | |
| 269 | switch (I->getOpcode()) { |
| 270 | default: |
| 271 | llvm::computeKnownBits(V: I, Known, Q, Depth); |
| 272 | break; |
| 273 | case Instruction::And: { |
| 274 | // If either the LHS or the RHS are Zero, the result is zero. |
| 275 | if (SimplifyDemandedBits(I, OpNo: 1, DemandedMask, Known&: RHSKnown, Q, Depth: Depth + 1) || |
| 276 | SimplifyDemandedBits(I, OpNo: 0, DemandedMask: DemandedMask & ~RHSKnown.Zero, Known&: LHSKnown, Q, |
| 277 | Depth: Depth + 1)) |
| 278 | return I; |
| 279 | |
| 280 | Known = analyzeKnownBitsFromAndXorOr(I: cast<Operator>(Val: I), KnownLHS: LHSKnown, KnownRHS: RHSKnown, |
| 281 | SQ: Q, Depth); |
| 282 | |
| 283 | // If the client is only demanding bits that we know, return the known |
| 284 | // constant. |
| 285 | if (DemandedMask.isSubsetOf(RHS: Known.Zero | Known.One)) |
| 286 | return Constant::getIntegerValue(Ty: VTy, V: Known.One); |
| 287 | |
| 288 | // If all of the demanded bits are known 1 on one side, return the other. |
| 289 | // These bits cannot contribute to the result of the 'and'. |
| 290 | if (DemandedMask.isSubsetOf(RHS: LHSKnown.Zero | RHSKnown.One)) |
| 291 | return I->getOperand(i: 0); |
| 292 | if (DemandedMask.isSubsetOf(RHS: RHSKnown.Zero | LHSKnown.One)) |
| 293 | return I->getOperand(i: 1); |
| 294 | |
| 295 | // If the RHS is a constant, see if we can simplify it. |
| 296 | if (ShrinkDemandedConstant(I, OpNo: 1, Demanded: DemandedMask & ~LHSKnown.Zero)) |
| 297 | return I; |
| 298 | |
| 299 | break; |
| 300 | } |
| 301 | case Instruction::Or: { |
| 302 | // If either the LHS or the RHS are One, the result is One. |
| 303 | if (SimplifyDemandedBits(I, OpNo: 1, DemandedMask, Known&: RHSKnown, Q, Depth: Depth + 1) || |
| 304 | SimplifyDemandedBits(I, OpNo: 0, DemandedMask: DemandedMask & ~RHSKnown.One, Known&: LHSKnown, Q, |
| 305 | Depth: Depth + 1)) { |
| 306 | // Disjoint flag may not longer hold. |
| 307 | I->dropPoisonGeneratingFlags(); |
| 308 | return I; |
| 309 | } |
| 310 | |
| 311 | Known = analyzeKnownBitsFromAndXorOr(I: cast<Operator>(Val: I), KnownLHS: LHSKnown, KnownRHS: RHSKnown, |
| 312 | SQ: Q, Depth); |
| 313 | |
| 314 | // If the client is only demanding bits that we know, return the known |
| 315 | // constant. |
| 316 | if (DemandedMask.isSubsetOf(RHS: Known.Zero | Known.One)) |
| 317 | return Constant::getIntegerValue(Ty: VTy, V: Known.One); |
| 318 | |
| 319 | // If all of the demanded bits are known zero on one side, return the other. |
| 320 | // These bits cannot contribute to the result of the 'or'. |
| 321 | if (DemandedMask.isSubsetOf(RHS: LHSKnown.One | RHSKnown.Zero)) |
| 322 | return I->getOperand(i: 0); |
| 323 | if (DemandedMask.isSubsetOf(RHS: RHSKnown.One | LHSKnown.Zero)) |
| 324 | return I->getOperand(i: 1); |
| 325 | |
| 326 | // If the RHS is a constant, see if we can simplify it. |
| 327 | if (ShrinkDemandedConstant(I, OpNo: 1, Demanded: DemandedMask)) |
| 328 | return I; |
| 329 | |
| 330 | // Infer disjoint flag if no common bits are set. |
| 331 | if (!cast<PossiblyDisjointInst>(Val: I)->isDisjoint()) { |
| 332 | WithCache<const Value *> LHSCache(I->getOperand(i: 0), LHSKnown), |
| 333 | RHSCache(I->getOperand(i: 1), RHSKnown); |
| 334 | if (haveNoCommonBitsSet(LHSCache, RHSCache, SQ: Q)) { |
| 335 | cast<PossiblyDisjointInst>(Val: I)->setIsDisjoint(true); |
| 336 | return I; |
| 337 | } |
| 338 | } |
| 339 | |
| 340 | break; |
| 341 | } |
| 342 | case Instruction::Xor: { |
| 343 | if (SimplifyDemandedBits(I, OpNo: 1, DemandedMask, Known&: RHSKnown, Q, Depth: Depth + 1) || |
| 344 | SimplifyDemandedBits(I, OpNo: 0, DemandedMask, Known&: LHSKnown, Q, Depth: Depth + 1)) |
| 345 | return I; |
| 346 | Value *LHS, *RHS; |
| 347 | if (DemandedMask == 1 && |
| 348 | match(V: I->getOperand(i: 0), P: m_Intrinsic<Intrinsic::ctpop>(Op0: m_Value(V&: LHS))) && |
| 349 | match(V: I->getOperand(i: 1), P: m_Intrinsic<Intrinsic::ctpop>(Op0: m_Value(V&: RHS)))) { |
| 350 | // (ctpop(X) ^ ctpop(Y)) & 1 --> ctpop(X^Y) & 1 |
| 351 | IRBuilderBase::InsertPointGuard Guard(Builder); |
| 352 | Builder.SetInsertPoint(I); |
| 353 | auto *Xor = Builder.CreateXor(LHS, RHS); |
| 354 | return Builder.CreateUnaryIntrinsic(ID: Intrinsic::ctpop, V: Xor); |
| 355 | } |
| 356 | |
| 357 | Known = analyzeKnownBitsFromAndXorOr(I: cast<Operator>(Val: I), KnownLHS: LHSKnown, KnownRHS: RHSKnown, |
| 358 | SQ: Q, Depth); |
| 359 | |
| 360 | // If the client is only demanding bits that we know, return the known |
| 361 | // constant. |
| 362 | if (DemandedMask.isSubsetOf(RHS: Known.Zero | Known.One)) |
| 363 | return Constant::getIntegerValue(Ty: VTy, V: Known.One); |
| 364 | |
| 365 | // If all of the demanded bits are known zero on one side, return the other. |
| 366 | // These bits cannot contribute to the result of the 'xor'. |
| 367 | if (DemandedMask.isSubsetOf(RHS: RHSKnown.Zero)) |
| 368 | return I->getOperand(i: 0); |
| 369 | if (DemandedMask.isSubsetOf(RHS: LHSKnown.Zero)) |
| 370 | return I->getOperand(i: 1); |
| 371 | |
| 372 | // If all of the demanded bits are known to be zero on one side or the |
| 373 | // other, turn this into an *inclusive* or. |
| 374 | // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 |
| 375 | if (DemandedMask.isSubsetOf(RHS: RHSKnown.Zero | LHSKnown.Zero)) { |
| 376 | Instruction *Or = |
| 377 | BinaryOperator::CreateOr(V1: I->getOperand(i: 0), V2: I->getOperand(i: 1)); |
| 378 | if (DemandedMask.isAllOnes()) |
| 379 | cast<PossiblyDisjointInst>(Val: Or)->setIsDisjoint(true); |
| 380 | Or->takeName(V: I); |
| 381 | return InsertNewInstWith(New: Or, Old: I->getIterator()); |
| 382 | } |
| 383 | |
| 384 | // If all of the demanded bits on one side are known, and all of the set |
| 385 | // bits on that side are also known to be set on the other side, turn this |
| 386 | // into an AND, as we know the bits will be cleared. |
| 387 | // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 |
| 388 | if (DemandedMask.isSubsetOf(RHS: RHSKnown.Zero|RHSKnown.One) && |
| 389 | RHSKnown.One.isSubsetOf(RHS: LHSKnown.One)) { |
| 390 | Constant *AndC = Constant::getIntegerValue(Ty: VTy, |
| 391 | V: ~RHSKnown.One & DemandedMask); |
| 392 | Instruction *And = BinaryOperator::CreateAnd(V1: I->getOperand(i: 0), V2: AndC); |
| 393 | return InsertNewInstWith(New: And, Old: I->getIterator()); |
| 394 | } |
| 395 | |
| 396 | // If the RHS is a constant, see if we can change it. Don't alter a -1 |
| 397 | // constant because that's a canonical 'not' op, and that is better for |
| 398 | // combining, SCEV, and codegen. |
| 399 | const APInt *C; |
| 400 | if (match(V: I->getOperand(i: 1), P: m_APInt(Res&: C)) && !C->isAllOnes()) { |
| 401 | if ((*C | ~DemandedMask).isAllOnes()) { |
| 402 | // Force bits to 1 to create a 'not' op. |
| 403 | I->setOperand(i: 1, Val: ConstantInt::getAllOnesValue(Ty: VTy)); |
| 404 | return I; |
| 405 | } |
| 406 | // If we can't turn this into a 'not', try to shrink the constant. |
| 407 | if (ShrinkDemandedConstant(I, OpNo: 1, Demanded: DemandedMask)) |
| 408 | return I; |
| 409 | } |
| 410 | |
| 411 | // If our LHS is an 'and' and if it has one use, and if any of the bits we |
| 412 | // are flipping are known to be set, then the xor is just resetting those |
| 413 | // bits to zero. We can just knock out bits from the 'and' and the 'xor', |
| 414 | // simplifying both of them. |
| 415 | if (Instruction *LHSInst = dyn_cast<Instruction>(Val: I->getOperand(i: 0))) { |
| 416 | ConstantInt *AndRHS, *XorRHS; |
| 417 | if (LHSInst->getOpcode() == Instruction::And && LHSInst->hasOneUse() && |
| 418 | match(V: I->getOperand(i: 1), P: m_ConstantInt(CI&: XorRHS)) && |
| 419 | match(V: LHSInst->getOperand(i: 1), P: m_ConstantInt(CI&: AndRHS)) && |
| 420 | (LHSKnown.One & RHSKnown.One & DemandedMask) != 0) { |
| 421 | APInt NewMask = ~(LHSKnown.One & RHSKnown.One & DemandedMask); |
| 422 | |
| 423 | Constant *AndC = ConstantInt::get(Ty: VTy, V: NewMask & AndRHS->getValue()); |
| 424 | Instruction *NewAnd = BinaryOperator::CreateAnd(V1: I->getOperand(i: 0), V2: AndC); |
| 425 | InsertNewInstWith(New: NewAnd, Old: I->getIterator()); |
| 426 | |
| 427 | Constant *XorC = ConstantInt::get(Ty: VTy, V: NewMask & XorRHS->getValue()); |
| 428 | Instruction *NewXor = BinaryOperator::CreateXor(V1: NewAnd, V2: XorC); |
| 429 | return InsertNewInstWith(New: NewXor, Old: I->getIterator()); |
| 430 | } |
| 431 | } |
| 432 | break; |
| 433 | } |
| 434 | case Instruction::Select: { |
| 435 | if (SimplifyDemandedBits(I, OpNo: 2, DemandedMask, Known&: RHSKnown, Q, Depth: Depth + 1) || |
| 436 | SimplifyDemandedBits(I, OpNo: 1, DemandedMask, Known&: LHSKnown, Q, Depth: Depth + 1)) |
| 437 | return I; |
| 438 | |
| 439 | // If the operands are constants, see if we can simplify them. |
| 440 | // This is similar to ShrinkDemandedConstant, but for a select we want to |
| 441 | // try to keep the selected constants the same as icmp value constants, if |
| 442 | // we can. This helps not break apart (or helps put back together) |
| 443 | // canonical patterns like min and max. |
| 444 | auto CanonicalizeSelectConstant = [](Instruction *I, unsigned OpNo, |
| 445 | const APInt &DemandedMask) { |
| 446 | const APInt *SelC; |
| 447 | if (!match(V: I->getOperand(i: OpNo), P: m_APInt(Res&: SelC))) |
| 448 | return false; |
| 449 | |
| 450 | // Get the constant out of the ICmp, if there is one. |
| 451 | // Only try this when exactly 1 operand is a constant (if both operands |
| 452 | // are constant, the icmp should eventually simplify). Otherwise, we may |
| 453 | // invert the transform that reduces set bits and infinite-loop. |
| 454 | Value *X; |
| 455 | const APInt *CmpC; |
| 456 | if (!match(V: I->getOperand(i: 0), P: m_ICmp(L: m_Value(V&: X), R: m_APInt(Res&: CmpC))) || |
| 457 | isa<Constant>(Val: X) || CmpC->getBitWidth() != SelC->getBitWidth()) |
| 458 | return ShrinkDemandedConstant(I, OpNo, Demanded: DemandedMask); |
| 459 | |
| 460 | // If the constant is already the same as the ICmp, leave it as-is. |
| 461 | if (*CmpC == *SelC) |
| 462 | return false; |
| 463 | // If the constants are not already the same, but can be with the demand |
| 464 | // mask, use the constant value from the ICmp. |
| 465 | if ((*CmpC & DemandedMask) == (*SelC & DemandedMask)) { |
| 466 | I->setOperand(i: OpNo, Val: ConstantInt::get(Ty: I->getType(), V: *CmpC)); |
| 467 | return true; |
| 468 | } |
| 469 | return ShrinkDemandedConstant(I, OpNo, Demanded: DemandedMask); |
| 470 | }; |
| 471 | if (CanonicalizeSelectConstant(I, 1, DemandedMask) || |
| 472 | CanonicalizeSelectConstant(I, 2, DemandedMask)) |
| 473 | return I; |
| 474 | |
| 475 | // Only known if known in both the LHS and RHS. |
| 476 | adjustKnownBitsForSelectArm(Known&: LHSKnown, Cond: I->getOperand(i: 0), Arm: I->getOperand(i: 1), |
| 477 | /*Invert=*/false, Q, Depth); |
| 478 | adjustKnownBitsForSelectArm(Known&: RHSKnown, Cond: I->getOperand(i: 0), Arm: I->getOperand(i: 2), |
| 479 | /*Invert=*/true, Q, Depth); |
| 480 | Known = LHSKnown.intersectWith(RHS: RHSKnown); |
| 481 | break; |
| 482 | } |
| 483 | case Instruction::Trunc: { |
| 484 | // If we do not demand the high bits of a right-shifted and truncated value, |
| 485 | // then we may be able to truncate it before the shift. |
| 486 | Value *X; |
| 487 | const APInt *C; |
| 488 | if (match(V: I->getOperand(i: 0), P: m_OneUse(SubPattern: m_LShr(L: m_Value(V&: X), R: m_APInt(Res&: C))))) { |
| 489 | // The shift amount must be valid (not poison) in the narrow type, and |
| 490 | // it must not be greater than the high bits demanded of the result. |
| 491 | if (C->ult(RHS: VTy->getScalarSizeInBits()) && |
| 492 | C->ule(RHS: DemandedMask.countl_zero())) { |
| 493 | // trunc (lshr X, C) --> lshr (trunc X), C |
| 494 | IRBuilderBase::InsertPointGuard Guard(Builder); |
| 495 | Builder.SetInsertPoint(I); |
| 496 | Value *Trunc = Builder.CreateTrunc(V: X, DestTy: VTy); |
| 497 | return Builder.CreateLShr(LHS: Trunc, RHS: C->getZExtValue()); |
| 498 | } |
| 499 | } |
| 500 | } |
| 501 | [[fallthrough]]; |
| 502 | case Instruction::ZExt: { |
| 503 | unsigned SrcBitWidth = I->getOperand(i: 0)->getType()->getScalarSizeInBits(); |
| 504 | |
| 505 | APInt InputDemandedMask = DemandedMask.zextOrTrunc(width: SrcBitWidth); |
| 506 | KnownBits InputKnown(SrcBitWidth); |
| 507 | if (SimplifyDemandedBits(I, OpNo: 0, DemandedMask: InputDemandedMask, Known&: InputKnown, Q, |
| 508 | Depth: Depth + 1)) { |
| 509 | // For zext nneg, we may have dropped the instruction which made the |
| 510 | // input non-negative. |
| 511 | I->dropPoisonGeneratingFlags(); |
| 512 | return I; |
| 513 | } |
| 514 | assert(InputKnown.getBitWidth() == SrcBitWidth && "Src width changed?" ); |
| 515 | if (I->getOpcode() == Instruction::ZExt && I->hasNonNeg() && |
| 516 | !InputKnown.isNegative()) |
| 517 | InputKnown.makeNonNegative(); |
| 518 | Known = InputKnown.zextOrTrunc(BitWidth); |
| 519 | |
| 520 | break; |
| 521 | } |
| 522 | case Instruction::SExt: { |
| 523 | // Compute the bits in the result that are not present in the input. |
| 524 | unsigned SrcBitWidth = I->getOperand(i: 0)->getType()->getScalarSizeInBits(); |
| 525 | |
| 526 | APInt InputDemandedBits = DemandedMask.trunc(width: SrcBitWidth); |
| 527 | |
| 528 | // If any of the sign extended bits are demanded, we know that the sign |
| 529 | // bit is demanded. |
| 530 | if (DemandedMask.getActiveBits() > SrcBitWidth) |
| 531 | InputDemandedBits.setBit(SrcBitWidth-1); |
| 532 | |
| 533 | KnownBits InputKnown(SrcBitWidth); |
| 534 | if (SimplifyDemandedBits(I, OpNo: 0, DemandedMask: InputDemandedBits, Known&: InputKnown, Q, Depth: Depth + 1)) |
| 535 | return I; |
| 536 | |
| 537 | // If the input sign bit is known zero, or if the NewBits are not demanded |
| 538 | // convert this into a zero extension. |
| 539 | if (InputKnown.isNonNegative() || |
| 540 | DemandedMask.getActiveBits() <= SrcBitWidth) { |
| 541 | // Convert to ZExt cast. |
| 542 | CastInst *NewCast = new ZExtInst(I->getOperand(i: 0), VTy); |
| 543 | NewCast->takeName(V: I); |
| 544 | return InsertNewInstWith(New: NewCast, Old: I->getIterator()); |
| 545 | } |
| 546 | |
| 547 | // If the sign bit of the input is known set or clear, then we know the |
| 548 | // top bits of the result. |
| 549 | Known = InputKnown.sext(BitWidth); |
| 550 | break; |
| 551 | } |
| 552 | case Instruction::Add: { |
| 553 | if ((DemandedMask & 1) == 0) { |
| 554 | // If we do not need the low bit, try to convert bool math to logic: |
| 555 | // add iN (zext i1 X), (sext i1 Y) --> sext (~X & Y) to iN |
| 556 | Value *X, *Y; |
| 557 | if (match(V: I, P: m_c_Add(L: m_OneUse(SubPattern: m_ZExt(Op: m_Value(V&: X))), |
| 558 | R: m_OneUse(SubPattern: m_SExt(Op: m_Value(V&: Y))))) && |
| 559 | X->getType()->isIntOrIntVectorTy(BitWidth: 1) && X->getType() == Y->getType()) { |
| 560 | // Truth table for inputs and output signbits: |
| 561 | // X:0 | X:1 |
| 562 | // ---------- |
| 563 | // Y:0 | 0 | 0 | |
| 564 | // Y:1 | -1 | 0 | |
| 565 | // ---------- |
| 566 | IRBuilderBase::InsertPointGuard Guard(Builder); |
| 567 | Builder.SetInsertPoint(I); |
| 568 | Value *AndNot = Builder.CreateAnd(LHS: Builder.CreateNot(V: X), RHS: Y); |
| 569 | return Builder.CreateSExt(V: AndNot, DestTy: VTy); |
| 570 | } |
| 571 | |
| 572 | // add iN (sext i1 X), (sext i1 Y) --> sext (X | Y) to iN |
| 573 | if (match(V: I, P: m_Add(L: m_SExt(Op: m_Value(V&: X)), R: m_SExt(Op: m_Value(V&: Y)))) && |
| 574 | X->getType()->isIntOrIntVectorTy(BitWidth: 1) && X->getType() == Y->getType() && |
| 575 | (I->getOperand(i: 0)->hasOneUse() || I->getOperand(i: 1)->hasOneUse())) { |
| 576 | |
| 577 | // Truth table for inputs and output signbits: |
| 578 | // X:0 | X:1 |
| 579 | // ----------- |
| 580 | // Y:0 | -1 | -1 | |
| 581 | // Y:1 | -1 | 0 | |
| 582 | // ----------- |
| 583 | IRBuilderBase::InsertPointGuard Guard(Builder); |
| 584 | Builder.SetInsertPoint(I); |
| 585 | Value *Or = Builder.CreateOr(LHS: X, RHS: Y); |
| 586 | return Builder.CreateSExt(V: Or, DestTy: VTy); |
| 587 | } |
| 588 | } |
| 589 | |
| 590 | // Right fill the mask of bits for the operands to demand the most |
| 591 | // significant bit and all those below it. |
| 592 | unsigned NLZ = DemandedMask.countl_zero(); |
| 593 | APInt DemandedFromOps = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: BitWidth - NLZ); |
| 594 | if (ShrinkDemandedConstant(I, OpNo: 1, Demanded: DemandedFromOps) || |
| 595 | SimplifyDemandedBits(I, OpNo: 1, DemandedMask: DemandedFromOps, Known&: RHSKnown, Q, Depth: Depth + 1)) |
| 596 | return disableWrapFlagsBasedOnUnusedHighBits(I, NLZ); |
| 597 | |
| 598 | // If low order bits are not demanded and known to be zero in one operand, |
| 599 | // then we don't need to demand them from the other operand, since they |
| 600 | // can't cause overflow into any bits that are demanded in the result. |
| 601 | unsigned NTZ = (~DemandedMask & RHSKnown.Zero).countr_one(); |
| 602 | APInt DemandedFromLHS = DemandedFromOps; |
| 603 | DemandedFromLHS.clearLowBits(loBits: NTZ); |
| 604 | if (ShrinkDemandedConstant(I, OpNo: 0, Demanded: DemandedFromLHS) || |
| 605 | SimplifyDemandedBits(I, OpNo: 0, DemandedMask: DemandedFromLHS, Known&: LHSKnown, Q, Depth: Depth + 1)) |
| 606 | return disableWrapFlagsBasedOnUnusedHighBits(I, NLZ); |
| 607 | |
| 608 | // If we are known to be adding zeros to every bit below |
| 609 | // the highest demanded bit, we just return the other side. |
| 610 | if (DemandedFromOps.isSubsetOf(RHS: RHSKnown.Zero)) |
| 611 | return I->getOperand(i: 0); |
| 612 | if (DemandedFromOps.isSubsetOf(RHS: LHSKnown.Zero)) |
| 613 | return I->getOperand(i: 1); |
| 614 | |
| 615 | // (add X, C) --> (xor X, C) IFF C is equal to the top bit of the DemandMask |
| 616 | { |
| 617 | const APInt *C; |
| 618 | if (match(V: I->getOperand(i: 1), P: m_APInt(Res&: C)) && |
| 619 | C->isOneBitSet(BitNo: DemandedMask.getActiveBits() - 1)) { |
| 620 | IRBuilderBase::InsertPointGuard Guard(Builder); |
| 621 | Builder.SetInsertPoint(I); |
| 622 | return Builder.CreateXor(LHS: I->getOperand(i: 0), RHS: ConstantInt::get(Ty: VTy, V: *C)); |
| 623 | } |
| 624 | } |
| 625 | |
| 626 | // Otherwise just compute the known bits of the result. |
| 627 | bool NSW = cast<OverflowingBinaryOperator>(Val: I)->hasNoSignedWrap(); |
| 628 | bool NUW = cast<OverflowingBinaryOperator>(Val: I)->hasNoUnsignedWrap(); |
| 629 | Known = KnownBits::add(LHS: LHSKnown, RHS: RHSKnown, NSW, NUW); |
| 630 | break; |
| 631 | } |
| 632 | case Instruction::Sub: { |
| 633 | // Right fill the mask of bits for the operands to demand the most |
| 634 | // significant bit and all those below it. |
| 635 | unsigned NLZ = DemandedMask.countl_zero(); |
| 636 | APInt DemandedFromOps = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: BitWidth - NLZ); |
| 637 | if (ShrinkDemandedConstant(I, OpNo: 1, Demanded: DemandedFromOps) || |
| 638 | SimplifyDemandedBits(I, OpNo: 1, DemandedMask: DemandedFromOps, Known&: RHSKnown, Q, Depth: Depth + 1)) |
| 639 | return disableWrapFlagsBasedOnUnusedHighBits(I, NLZ); |
| 640 | |
| 641 | // If low order bits are not demanded and are known to be zero in RHS, |
| 642 | // then we don't need to demand them from LHS, since they can't cause a |
| 643 | // borrow from any bits that are demanded in the result. |
| 644 | unsigned NTZ = (~DemandedMask & RHSKnown.Zero).countr_one(); |
| 645 | APInt DemandedFromLHS = DemandedFromOps; |
| 646 | DemandedFromLHS.clearLowBits(loBits: NTZ); |
| 647 | if (ShrinkDemandedConstant(I, OpNo: 0, Demanded: DemandedFromLHS) || |
| 648 | SimplifyDemandedBits(I, OpNo: 0, DemandedMask: DemandedFromLHS, Known&: LHSKnown, Q, Depth: Depth + 1)) |
| 649 | return disableWrapFlagsBasedOnUnusedHighBits(I, NLZ); |
| 650 | |
| 651 | // If we are known to be subtracting zeros from every bit below |
| 652 | // the highest demanded bit, we just return the other side. |
| 653 | if (DemandedFromOps.isSubsetOf(RHS: RHSKnown.Zero)) |
| 654 | return I->getOperand(i: 0); |
| 655 | // We can't do this with the LHS for subtraction, unless we are only |
| 656 | // demanding the LSB. |
| 657 | if (DemandedFromOps.isOne() && DemandedFromOps.isSubsetOf(RHS: LHSKnown.Zero)) |
| 658 | return I->getOperand(i: 1); |
| 659 | |
| 660 | // Canonicalize sub mask, X -> ~X |
| 661 | const APInt *LHSC; |
| 662 | if (match(V: I->getOperand(i: 0), P: m_LowBitMask(V&: LHSC)) && |
| 663 | DemandedFromOps.isSubsetOf(RHS: *LHSC)) { |
| 664 | IRBuilderBase::InsertPointGuard Guard(Builder); |
| 665 | Builder.SetInsertPoint(I); |
| 666 | return Builder.CreateNot(V: I->getOperand(i: 1)); |
| 667 | } |
| 668 | |
| 669 | // Otherwise just compute the known bits of the result. |
| 670 | bool NSW = cast<OverflowingBinaryOperator>(Val: I)->hasNoSignedWrap(); |
| 671 | bool NUW = cast<OverflowingBinaryOperator>(Val: I)->hasNoUnsignedWrap(); |
| 672 | Known = KnownBits::sub(LHS: LHSKnown, RHS: RHSKnown, NSW, NUW); |
| 673 | break; |
| 674 | } |
| 675 | case Instruction::Mul: { |
| 676 | APInt DemandedFromOps; |
| 677 | if (simplifyOperandsBasedOnUnusedHighBits(DemandedFromOps)) |
| 678 | return I; |
| 679 | |
| 680 | if (DemandedMask.isPowerOf2()) { |
| 681 | // The LSB of X*Y is set only if (X & 1) == 1 and (Y & 1) == 1. |
| 682 | // If we demand exactly one bit N and we have "X * (C' << N)" where C' is |
| 683 | // odd (has LSB set), then the left-shifted low bit of X is the answer. |
| 684 | unsigned CTZ = DemandedMask.countr_zero(); |
| 685 | const APInt *C; |
| 686 | if (match(V: I->getOperand(i: 1), P: m_APInt(Res&: C)) && C->countr_zero() == CTZ) { |
| 687 | Constant *ShiftC = ConstantInt::get(Ty: VTy, V: CTZ); |
| 688 | Instruction *Shl = BinaryOperator::CreateShl(V1: I->getOperand(i: 0), V2: ShiftC); |
| 689 | return InsertNewInstWith(New: Shl, Old: I->getIterator()); |
| 690 | } |
| 691 | } |
| 692 | // For a squared value "X * X", the bottom 2 bits are 0 and X[0] because: |
| 693 | // X * X is odd iff X is odd. |
| 694 | // 'Quadratic Reciprocity': X * X -> 0 for bit[1] |
| 695 | if (I->getOperand(i: 0) == I->getOperand(i: 1) && DemandedMask.ult(RHS: 4)) { |
| 696 | Constant *One = ConstantInt::get(Ty: VTy, V: 1); |
| 697 | Instruction *And1 = BinaryOperator::CreateAnd(V1: I->getOperand(i: 0), V2: One); |
| 698 | return InsertNewInstWith(New: And1, Old: I->getIterator()); |
| 699 | } |
| 700 | |
| 701 | llvm::computeKnownBits(V: I, Known, Q, Depth); |
| 702 | break; |
| 703 | } |
| 704 | case Instruction::Shl: { |
| 705 | const APInt *SA; |
| 706 | if (match(V: I->getOperand(i: 1), P: m_APInt(Res&: SA))) { |
| 707 | const APInt *ShrAmt; |
| 708 | if (match(V: I->getOperand(i: 0), P: m_Shr(L: m_Value(), R: m_APInt(Res&: ShrAmt)))) |
| 709 | if (Instruction *Shr = dyn_cast<Instruction>(Val: I->getOperand(i: 0))) |
| 710 | if (Value *R = simplifyShrShlDemandedBits(Shr, ShrOp1: *ShrAmt, Shl: I, ShlOp1: *SA, |
| 711 | DemandedMask, Known)) |
| 712 | return R; |
| 713 | |
| 714 | // Do not simplify if shl is part of funnel-shift pattern |
| 715 | if (I->hasOneUse()) { |
| 716 | auto *Inst = dyn_cast<Instruction>(Val: I->user_back()); |
| 717 | if (Inst && Inst->getOpcode() == BinaryOperator::Or) { |
| 718 | if (auto Opt = convertOrOfShiftsToFunnelShift(Or&: *Inst)) { |
| 719 | auto [IID, FShiftArgs] = *Opt; |
| 720 | if ((IID == Intrinsic::fshl || IID == Intrinsic::fshr) && |
| 721 | FShiftArgs[0] == FShiftArgs[1]) { |
| 722 | llvm::computeKnownBits(V: I, Known, Q, Depth); |
| 723 | break; |
| 724 | } |
| 725 | } |
| 726 | } |
| 727 | } |
| 728 | |
| 729 | // We only want bits that already match the signbit then we don't |
| 730 | // need to shift. |
| 731 | uint64_t ShiftAmt = SA->getLimitedValue(Limit: BitWidth - 1); |
| 732 | if (DemandedMask.countr_zero() >= ShiftAmt) { |
| 733 | if (I->hasNoSignedWrap()) { |
| 734 | unsigned NumHiDemandedBits = BitWidth - DemandedMask.countr_zero(); |
| 735 | unsigned SignBits = |
| 736 | ComputeNumSignBits(Op: I->getOperand(i: 0), CxtI: Q.CxtI, Depth: Depth + 1); |
| 737 | if (SignBits > ShiftAmt && SignBits - ShiftAmt >= NumHiDemandedBits) |
| 738 | return I->getOperand(i: 0); |
| 739 | } |
| 740 | |
| 741 | // If we can pre-shift a right-shifted constant to the left without |
| 742 | // losing any high bits and we don't demand the low bits, then eliminate |
| 743 | // the left-shift: |
| 744 | // (C >> X) << LeftShiftAmtC --> (C << LeftShiftAmtC) >> X |
| 745 | Value *X; |
| 746 | Constant *C; |
| 747 | if (match(V: I->getOperand(i: 0), P: m_LShr(L: m_ImmConstant(C), R: m_Value(V&: X)))) { |
| 748 | Constant *LeftShiftAmtC = ConstantInt::get(Ty: VTy, V: ShiftAmt); |
| 749 | Constant *NewC = ConstantFoldBinaryOpOperands(Opcode: Instruction::Shl, LHS: C, |
| 750 | RHS: LeftShiftAmtC, DL); |
| 751 | if (ConstantFoldBinaryOpOperands(Opcode: Instruction::LShr, LHS: NewC, |
| 752 | RHS: LeftShiftAmtC, DL) == C) { |
| 753 | Instruction *Lshr = BinaryOperator::CreateLShr(V1: NewC, V2: X); |
| 754 | return InsertNewInstWith(New: Lshr, Old: I->getIterator()); |
| 755 | } |
| 756 | } |
| 757 | } |
| 758 | |
| 759 | APInt DemandedMaskIn(DemandedMask.lshr(shiftAmt: ShiftAmt)); |
| 760 | |
| 761 | // If the shift is NUW/NSW, then it does demand the high bits. |
| 762 | ShlOperator *IOp = cast<ShlOperator>(Val: I); |
| 763 | if (IOp->hasNoSignedWrap()) |
| 764 | DemandedMaskIn.setHighBits(ShiftAmt+1); |
| 765 | else if (IOp->hasNoUnsignedWrap()) |
| 766 | DemandedMaskIn.setHighBits(ShiftAmt); |
| 767 | |
| 768 | if (SimplifyDemandedBits(I, OpNo: 0, DemandedMask: DemandedMaskIn, Known, Q, Depth: Depth + 1)) |
| 769 | return I; |
| 770 | |
| 771 | Known = KnownBits::shl(LHS: Known, |
| 772 | RHS: KnownBits::makeConstant(C: APInt(BitWidth, ShiftAmt)), |
| 773 | /* NUW */ IOp->hasNoUnsignedWrap(), |
| 774 | /* NSW */ IOp->hasNoSignedWrap()); |
| 775 | } else { |
| 776 | // This is a variable shift, so we can't shift the demand mask by a known |
| 777 | // amount. But if we are not demanding high bits, then we are not |
| 778 | // demanding those bits from the pre-shifted operand either. |
| 779 | if (unsigned CTLZ = DemandedMask.countl_zero()) { |
| 780 | APInt DemandedFromOp(APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: BitWidth - CTLZ)); |
| 781 | if (SimplifyDemandedBits(I, OpNo: 0, DemandedMask: DemandedFromOp, Known, Q, Depth: Depth + 1)) { |
| 782 | // We can't guarantee that nsw/nuw hold after simplifying the operand. |
| 783 | I->dropPoisonGeneratingFlags(); |
| 784 | return I; |
| 785 | } |
| 786 | } |
| 787 | llvm::computeKnownBits(V: I, Known, Q, Depth); |
| 788 | } |
| 789 | break; |
| 790 | } |
| 791 | case Instruction::LShr: { |
| 792 | const APInt *SA; |
| 793 | if (match(V: I->getOperand(i: 1), P: m_APInt(Res&: SA))) { |
| 794 | uint64_t ShiftAmt = SA->getLimitedValue(Limit: BitWidth-1); |
| 795 | |
| 796 | // Do not simplify if lshr is part of funnel-shift pattern |
| 797 | if (I->hasOneUse()) { |
| 798 | auto *Inst = dyn_cast<Instruction>(Val: I->user_back()); |
| 799 | if (Inst && Inst->getOpcode() == BinaryOperator::Or) { |
| 800 | if (auto Opt = convertOrOfShiftsToFunnelShift(Or&: *Inst)) { |
| 801 | auto [IID, FShiftArgs] = *Opt; |
| 802 | if ((IID == Intrinsic::fshl || IID == Intrinsic::fshr) && |
| 803 | FShiftArgs[0] == FShiftArgs[1]) { |
| 804 | llvm::computeKnownBits(V: I, Known, Q, Depth); |
| 805 | break; |
| 806 | } |
| 807 | } |
| 808 | } |
| 809 | } |
| 810 | |
| 811 | // If we are just demanding the shifted sign bit and below, then this can |
| 812 | // be treated as an ASHR in disguise. |
| 813 | if (DemandedMask.countl_zero() >= ShiftAmt) { |
| 814 | // If we only want bits that already match the signbit then we don't |
| 815 | // need to shift. |
| 816 | unsigned NumHiDemandedBits = BitWidth - DemandedMask.countr_zero(); |
| 817 | unsigned SignBits = |
| 818 | ComputeNumSignBits(Op: I->getOperand(i: 0), CxtI: Q.CxtI, Depth: Depth + 1); |
| 819 | if (SignBits >= NumHiDemandedBits) |
| 820 | return I->getOperand(i: 0); |
| 821 | |
| 822 | // If we can pre-shift a left-shifted constant to the right without |
| 823 | // losing any low bits (we already know we don't demand the high bits), |
| 824 | // then eliminate the right-shift: |
| 825 | // (C << X) >> RightShiftAmtC --> (C >> RightShiftAmtC) << X |
| 826 | Value *X; |
| 827 | Constant *C; |
| 828 | if (match(V: I->getOperand(i: 0), P: m_Shl(L: m_ImmConstant(C), R: m_Value(V&: X)))) { |
| 829 | Constant *RightShiftAmtC = ConstantInt::get(Ty: VTy, V: ShiftAmt); |
| 830 | Constant *NewC = ConstantFoldBinaryOpOperands(Opcode: Instruction::LShr, LHS: C, |
| 831 | RHS: RightShiftAmtC, DL); |
| 832 | if (ConstantFoldBinaryOpOperands(Opcode: Instruction::Shl, LHS: NewC, |
| 833 | RHS: RightShiftAmtC, DL) == C) { |
| 834 | Instruction *Shl = BinaryOperator::CreateShl(V1: NewC, V2: X); |
| 835 | return InsertNewInstWith(New: Shl, Old: I->getIterator()); |
| 836 | } |
| 837 | } |
| 838 | |
| 839 | const APInt *Factor; |
| 840 | if (match(V: I->getOperand(i: 0), |
| 841 | P: m_OneUse(SubPattern: m_Mul(L: m_Value(V&: X), R: m_APInt(Res&: Factor)))) && |
| 842 | Factor->countr_zero() >= ShiftAmt) { |
| 843 | BinaryOperator *Mul = BinaryOperator::CreateMul( |
| 844 | V1: X, V2: ConstantInt::get(Ty: X->getType(), V: Factor->lshr(shiftAmt: ShiftAmt))); |
| 845 | return InsertNewInstWith(New: Mul, Old: I->getIterator()); |
| 846 | } |
| 847 | } |
| 848 | |
| 849 | // Unsigned shift right. |
| 850 | APInt DemandedMaskIn(DemandedMask.shl(shiftAmt: ShiftAmt)); |
| 851 | if (SimplifyDemandedBits(I, OpNo: 0, DemandedMask: DemandedMaskIn, Known, Q, Depth: Depth + 1)) { |
| 852 | // exact flag may not longer hold. |
| 853 | I->dropPoisonGeneratingFlags(); |
| 854 | return I; |
| 855 | } |
| 856 | Known >>= ShiftAmt; |
| 857 | if (ShiftAmt) |
| 858 | Known.Zero.setHighBits(ShiftAmt); // high bits known zero. |
| 859 | break; |
| 860 | } |
| 861 | if (Value *V = |
| 862 | simplifyShiftSelectingPackedElement(I, DemandedMask, IC&: *this, Depth)) |
| 863 | return V; |
| 864 | |
| 865 | llvm::computeKnownBits(V: I, Known, Q, Depth); |
| 866 | break; |
| 867 | } |
| 868 | case Instruction::AShr: { |
| 869 | unsigned SignBits = ComputeNumSignBits(Op: I->getOperand(i: 0), CxtI: Q.CxtI, Depth: Depth + 1); |
| 870 | |
| 871 | // If we only want bits that already match the signbit then we don't need |
| 872 | // to shift. |
| 873 | unsigned NumHiDemandedBits = BitWidth - DemandedMask.countr_zero(); |
| 874 | if (SignBits >= NumHiDemandedBits) |
| 875 | return I->getOperand(i: 0); |
| 876 | |
| 877 | // If this is an arithmetic shift right and only the low-bit is set, we can |
| 878 | // always convert this into a logical shr, even if the shift amount is |
| 879 | // variable. The low bit of the shift cannot be an input sign bit unless |
| 880 | // the shift amount is >= the size of the datatype, which is undefined. |
| 881 | if (DemandedMask.isOne()) { |
| 882 | // Perform the logical shift right. |
| 883 | Instruction *NewVal = BinaryOperator::CreateLShr( |
| 884 | V1: I->getOperand(i: 0), V2: I->getOperand(i: 1), Name: I->getName()); |
| 885 | return InsertNewInstWith(New: NewVal, Old: I->getIterator()); |
| 886 | } |
| 887 | |
| 888 | const APInt *SA; |
| 889 | if (match(V: I->getOperand(i: 1), P: m_APInt(Res&: SA))) { |
| 890 | uint32_t ShiftAmt = SA->getLimitedValue(Limit: BitWidth-1); |
| 891 | |
| 892 | // Signed shift right. |
| 893 | APInt DemandedMaskIn(DemandedMask.shl(shiftAmt: ShiftAmt)); |
| 894 | // If any of the bits being shifted in are demanded, then we should set |
| 895 | // the sign bit as demanded. |
| 896 | bool ShiftedInBitsDemanded = DemandedMask.countl_zero() < ShiftAmt; |
| 897 | if (ShiftedInBitsDemanded) |
| 898 | DemandedMaskIn.setSignBit(); |
| 899 | if (SimplifyDemandedBits(I, OpNo: 0, DemandedMask: DemandedMaskIn, Known, Q, Depth: Depth + 1)) { |
| 900 | // exact flag may not longer hold. |
| 901 | I->dropPoisonGeneratingFlags(); |
| 902 | return I; |
| 903 | } |
| 904 | |
| 905 | // If the input sign bit is known to be zero, or if none of the shifted in |
| 906 | // bits are demanded, turn this into an unsigned shift right. |
| 907 | if (Known.Zero[BitWidth - 1] || !ShiftedInBitsDemanded) { |
| 908 | BinaryOperator *LShr = BinaryOperator::CreateLShr(V1: I->getOperand(i: 0), |
| 909 | V2: I->getOperand(i: 1)); |
| 910 | LShr->setIsExact(cast<BinaryOperator>(Val: I)->isExact()); |
| 911 | LShr->takeName(V: I); |
| 912 | return InsertNewInstWith(New: LShr, Old: I->getIterator()); |
| 913 | } |
| 914 | |
| 915 | Known = KnownBits::ashr( |
| 916 | LHS: Known, RHS: KnownBits::makeConstant(C: APInt(BitWidth, ShiftAmt)), |
| 917 | ShAmtNonZero: ShiftAmt != 0, Exact: I->isExact()); |
| 918 | } else { |
| 919 | llvm::computeKnownBits(V: I, Known, Q, Depth); |
| 920 | } |
| 921 | break; |
| 922 | } |
| 923 | case Instruction::UDiv: { |
| 924 | // UDiv doesn't demand low bits that are zero in the divisor. |
| 925 | const APInt *SA; |
| 926 | if (match(V: I->getOperand(i: 1), P: m_APInt(Res&: SA))) { |
| 927 | // TODO: Take the demanded mask of the result into account. |
| 928 | unsigned RHSTrailingZeros = SA->countr_zero(); |
| 929 | APInt DemandedMaskIn = |
| 930 | APInt::getHighBitsSet(numBits: BitWidth, hiBitsSet: BitWidth - RHSTrailingZeros); |
| 931 | if (SimplifyDemandedBits(I, OpNo: 0, DemandedMask: DemandedMaskIn, Known&: LHSKnown, Q, Depth: Depth + 1)) { |
| 932 | // We can't guarantee that "exact" is still true after changing the |
| 933 | // the dividend. |
| 934 | I->dropPoisonGeneratingFlags(); |
| 935 | return I; |
| 936 | } |
| 937 | |
| 938 | Known = KnownBits::udiv(LHS: LHSKnown, RHS: KnownBits::makeConstant(C: *SA), |
| 939 | Exact: cast<BinaryOperator>(Val: I)->isExact()); |
| 940 | } else { |
| 941 | llvm::computeKnownBits(V: I, Known, Q, Depth); |
| 942 | } |
| 943 | break; |
| 944 | } |
| 945 | case Instruction::SRem: { |
| 946 | const APInt *Rem; |
| 947 | if (match(V: I->getOperand(i: 1), P: m_APInt(Res&: Rem)) && Rem->isPowerOf2()) { |
| 948 | if (DemandedMask.ult(RHS: *Rem)) // srem won't affect demanded bits |
| 949 | return I->getOperand(i: 0); |
| 950 | |
| 951 | APInt LowBits = *Rem - 1; |
| 952 | APInt Mask2 = LowBits | APInt::getSignMask(BitWidth); |
| 953 | if (SimplifyDemandedBits(I, OpNo: 0, DemandedMask: Mask2, Known&: LHSKnown, Q, Depth: Depth + 1)) |
| 954 | return I; |
| 955 | Known = KnownBits::srem(LHS: LHSKnown, RHS: KnownBits::makeConstant(C: *Rem)); |
| 956 | break; |
| 957 | } |
| 958 | |
| 959 | llvm::computeKnownBits(V: I, Known, Q, Depth); |
| 960 | break; |
| 961 | } |
| 962 | case Instruction::Call: { |
| 963 | bool KnownBitsComputed = false; |
| 964 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: I)) { |
| 965 | switch (II->getIntrinsicID()) { |
| 966 | case Intrinsic::abs: { |
| 967 | if (DemandedMask == 1) |
| 968 | return II->getArgOperand(i: 0); |
| 969 | break; |
| 970 | } |
| 971 | case Intrinsic::ctpop: { |
| 972 | // Checking if the number of clear bits is odd (parity)? If the type has |
| 973 | // an even number of bits, that's the same as checking if the number of |
| 974 | // set bits is odd, so we can eliminate the 'not' op. |
| 975 | Value *X; |
| 976 | if (DemandedMask == 1 && VTy->getScalarSizeInBits() % 2 == 0 && |
| 977 | match(V: II->getArgOperand(i: 0), P: m_Not(V: m_Value(V&: X)))) { |
| 978 | Function *Ctpop = Intrinsic::getOrInsertDeclaration( |
| 979 | M: II->getModule(), id: Intrinsic::ctpop, Tys: VTy); |
| 980 | return InsertNewInstWith(New: CallInst::Create(Func: Ctpop, Args: {X}), Old: I->getIterator()); |
| 981 | } |
| 982 | break; |
| 983 | } |
| 984 | case Intrinsic::bswap: { |
| 985 | // If the only bits demanded come from one byte of the bswap result, |
| 986 | // just shift the input byte into position to eliminate the bswap. |
| 987 | unsigned NLZ = DemandedMask.countl_zero(); |
| 988 | unsigned NTZ = DemandedMask.countr_zero(); |
| 989 | |
| 990 | // Round NTZ down to the next byte. If we have 11 trailing zeros, then |
| 991 | // we need all the bits down to bit 8. Likewise, round NLZ. If we |
| 992 | // have 14 leading zeros, round to 8. |
| 993 | NLZ = alignDown(Value: NLZ, Align: 8); |
| 994 | NTZ = alignDown(Value: NTZ, Align: 8); |
| 995 | // If we need exactly one byte, we can do this transformation. |
| 996 | if (BitWidth - NLZ - NTZ == 8) { |
| 997 | // Replace this with either a left or right shift to get the byte into |
| 998 | // the right place. |
| 999 | Instruction *NewVal; |
| 1000 | if (NLZ > NTZ) |
| 1001 | NewVal = BinaryOperator::CreateLShr( |
| 1002 | V1: II->getArgOperand(i: 0), V2: ConstantInt::get(Ty: VTy, V: NLZ - NTZ)); |
| 1003 | else |
| 1004 | NewVal = BinaryOperator::CreateShl( |
| 1005 | V1: II->getArgOperand(i: 0), V2: ConstantInt::get(Ty: VTy, V: NTZ - NLZ)); |
| 1006 | NewVal->takeName(V: I); |
| 1007 | return InsertNewInstWith(New: NewVal, Old: I->getIterator()); |
| 1008 | } |
| 1009 | break; |
| 1010 | } |
| 1011 | case Intrinsic::ptrmask: { |
| 1012 | unsigned MaskWidth = I->getOperand(i: 1)->getType()->getScalarSizeInBits(); |
| 1013 | RHSKnown = KnownBits(MaskWidth); |
| 1014 | // If either the LHS or the RHS are Zero, the result is zero. |
| 1015 | if (SimplifyDemandedBits(I, OpNo: 0, DemandedMask, Known&: LHSKnown, Q, Depth: Depth + 1) || |
| 1016 | SimplifyDemandedBits( |
| 1017 | I, OpNo: 1, DemandedMask: (DemandedMask & ~LHSKnown.Zero).zextOrTrunc(width: MaskWidth), |
| 1018 | Known&: RHSKnown, Q, Depth: Depth + 1)) |
| 1019 | return I; |
| 1020 | |
| 1021 | // TODO: Should be 1-extend |
| 1022 | RHSKnown = RHSKnown.anyextOrTrunc(BitWidth); |
| 1023 | |
| 1024 | Known = LHSKnown & RHSKnown; |
| 1025 | KnownBitsComputed = true; |
| 1026 | |
| 1027 | // If the client is only demanding bits we know to be zero, return |
| 1028 | // `llvm.ptrmask(p, 0)`. We can't return `null` here due to pointer |
| 1029 | // provenance, but making the mask zero will be easily optimizable in |
| 1030 | // the backend. |
| 1031 | if (DemandedMask.isSubsetOf(RHS: Known.Zero) && |
| 1032 | !match(V: I->getOperand(i: 1), P: m_Zero())) |
| 1033 | return replaceOperand( |
| 1034 | I&: *I, OpNum: 1, V: Constant::getNullValue(Ty: I->getOperand(i: 1)->getType())); |
| 1035 | |
| 1036 | // Mask in demanded space does nothing. |
| 1037 | // NOTE: We may have attributes associated with the return value of the |
| 1038 | // llvm.ptrmask intrinsic that will be lost when we just return the |
| 1039 | // operand. We should try to preserve them. |
| 1040 | if (DemandedMask.isSubsetOf(RHS: RHSKnown.One | LHSKnown.Zero)) |
| 1041 | return I->getOperand(i: 0); |
| 1042 | |
| 1043 | // If the RHS is a constant, see if we can simplify it. |
| 1044 | if (ShrinkDemandedConstant( |
| 1045 | I, OpNo: 1, Demanded: (DemandedMask & ~LHSKnown.Zero).zextOrTrunc(width: MaskWidth))) |
| 1046 | return I; |
| 1047 | |
| 1048 | // Combine: |
| 1049 | // (ptrmask (getelementptr i8, ptr p, imm i), imm mask) |
| 1050 | // -> (ptrmask (getelementptr i8, ptr p, imm (i & mask)), imm mask) |
| 1051 | // where only the low bits known to be zero in the pointer are changed |
| 1052 | Value *InnerPtr; |
| 1053 | uint64_t GEPIndex; |
| 1054 | uint64_t PtrMaskImmediate; |
| 1055 | if (match(V: I, P: m_Intrinsic<Intrinsic::ptrmask>( |
| 1056 | Op0: m_PtrAdd(PointerOp: m_Value(V&: InnerPtr), OffsetOp: m_ConstantInt(V&: GEPIndex)), |
| 1057 | Op1: m_ConstantInt(V&: PtrMaskImmediate)))) { |
| 1058 | |
| 1059 | LHSKnown = computeKnownBits(V: InnerPtr, CxtI: I, Depth: Depth + 1); |
| 1060 | if (!LHSKnown.isZero()) { |
| 1061 | const unsigned trailingZeros = LHSKnown.countMinTrailingZeros(); |
| 1062 | uint64_t PointerAlignBits = (uint64_t(1) << trailingZeros) - 1; |
| 1063 | |
| 1064 | uint64_t HighBitsGEPIndex = GEPIndex & ~PointerAlignBits; |
| 1065 | uint64_t MaskedLowBitsGEPIndex = |
| 1066 | GEPIndex & PointerAlignBits & PtrMaskImmediate; |
| 1067 | |
| 1068 | uint64_t MaskedGEPIndex = HighBitsGEPIndex | MaskedLowBitsGEPIndex; |
| 1069 | |
| 1070 | if (MaskedGEPIndex != GEPIndex) { |
| 1071 | auto *GEP = cast<GEPOperator>(Val: II->getArgOperand(i: 0)); |
| 1072 | Builder.SetInsertPoint(I); |
| 1073 | Type *GEPIndexType = |
| 1074 | DL.getIndexType(PtrTy: GEP->getPointerOperand()->getType()); |
| 1075 | Value *MaskedGEP = Builder.CreateGEP( |
| 1076 | Ty: GEP->getSourceElementType(), Ptr: InnerPtr, |
| 1077 | IdxList: ConstantInt::get(Ty: GEPIndexType, V: MaskedGEPIndex), |
| 1078 | Name: GEP->getName(), NW: GEP->isInBounds()); |
| 1079 | |
| 1080 | replaceOperand(I&: *I, OpNum: 0, V: MaskedGEP); |
| 1081 | return I; |
| 1082 | } |
| 1083 | } |
| 1084 | } |
| 1085 | |
| 1086 | break; |
| 1087 | } |
| 1088 | |
| 1089 | case Intrinsic::fshr: |
| 1090 | case Intrinsic::fshl: { |
| 1091 | const APInt *SA; |
| 1092 | if (!match(V: I->getOperand(i: 2), P: m_APInt(Res&: SA))) |
| 1093 | break; |
| 1094 | |
| 1095 | // Normalize to funnel shift left. APInt shifts of BitWidth are well- |
| 1096 | // defined, so no need to special-case zero shifts here. |
| 1097 | uint64_t ShiftAmt = SA->urem(RHS: BitWidth); |
| 1098 | if (II->getIntrinsicID() == Intrinsic::fshr) |
| 1099 | ShiftAmt = BitWidth - ShiftAmt; |
| 1100 | |
| 1101 | APInt DemandedMaskLHS(DemandedMask.lshr(shiftAmt: ShiftAmt)); |
| 1102 | APInt DemandedMaskRHS(DemandedMask.shl(shiftAmt: BitWidth - ShiftAmt)); |
| 1103 | if (I->getOperand(i: 0) != I->getOperand(i: 1)) { |
| 1104 | if (SimplifyDemandedBits(I, OpNo: 0, DemandedMask: DemandedMaskLHS, Known&: LHSKnown, Q, |
| 1105 | Depth: Depth + 1) || |
| 1106 | SimplifyDemandedBits(I, OpNo: 1, DemandedMask: DemandedMaskRHS, Known&: RHSKnown, Q, |
| 1107 | Depth: Depth + 1)) { |
| 1108 | // Range attribute or metadata may no longer hold. |
| 1109 | I->dropPoisonGeneratingAnnotations(); |
| 1110 | return I; |
| 1111 | } |
| 1112 | } else { // fshl is a rotate |
| 1113 | // Avoid converting rotate into funnel shift. |
| 1114 | // Only simplify if one operand is constant. |
| 1115 | LHSKnown = computeKnownBits(V: I->getOperand(i: 0), CxtI: I, Depth: Depth + 1); |
| 1116 | if (DemandedMaskLHS.isSubsetOf(RHS: LHSKnown.Zero | LHSKnown.One) && |
| 1117 | !match(V: I->getOperand(i: 0), P: m_SpecificInt(V: LHSKnown.One))) { |
| 1118 | replaceOperand(I&: *I, OpNum: 0, V: Constant::getIntegerValue(Ty: VTy, V: LHSKnown.One)); |
| 1119 | return I; |
| 1120 | } |
| 1121 | |
| 1122 | RHSKnown = computeKnownBits(V: I->getOperand(i: 1), CxtI: I, Depth: Depth + 1); |
| 1123 | if (DemandedMaskRHS.isSubsetOf(RHS: RHSKnown.Zero | RHSKnown.One) && |
| 1124 | !match(V: I->getOperand(i: 1), P: m_SpecificInt(V: RHSKnown.One))) { |
| 1125 | replaceOperand(I&: *I, OpNum: 1, V: Constant::getIntegerValue(Ty: VTy, V: RHSKnown.One)); |
| 1126 | return I; |
| 1127 | } |
| 1128 | } |
| 1129 | |
| 1130 | LHSKnown <<= ShiftAmt; |
| 1131 | RHSKnown >>= BitWidth - ShiftAmt; |
| 1132 | Known = LHSKnown.unionWith(RHS: RHSKnown); |
| 1133 | KnownBitsComputed = true; |
| 1134 | break; |
| 1135 | } |
| 1136 | case Intrinsic::umax: { |
| 1137 | // UMax(A, C) == A if ... |
| 1138 | // The lowest non-zero bit of DemandMask is higher than the highest |
| 1139 | // non-zero bit of C. |
| 1140 | const APInt *C; |
| 1141 | unsigned CTZ = DemandedMask.countr_zero(); |
| 1142 | if (match(V: II->getArgOperand(i: 1), P: m_APInt(Res&: C)) && |
| 1143 | CTZ >= C->getActiveBits()) |
| 1144 | return II->getArgOperand(i: 0); |
| 1145 | break; |
| 1146 | } |
| 1147 | case Intrinsic::umin: { |
| 1148 | // UMin(A, C) == A if ... |
| 1149 | // The lowest non-zero bit of DemandMask is higher than the highest |
| 1150 | // non-one bit of C. |
| 1151 | // This comes from using DeMorgans on the above umax example. |
| 1152 | const APInt *C; |
| 1153 | unsigned CTZ = DemandedMask.countr_zero(); |
| 1154 | if (match(V: II->getArgOperand(i: 1), P: m_APInt(Res&: C)) && |
| 1155 | CTZ >= C->getBitWidth() - C->countl_one()) |
| 1156 | return II->getArgOperand(i: 0); |
| 1157 | break; |
| 1158 | } |
| 1159 | default: { |
| 1160 | // Handle target specific intrinsics |
| 1161 | std::optional<Value *> V = targetSimplifyDemandedUseBitsIntrinsic( |
| 1162 | II&: *II, DemandedMask, Known, KnownBitsComputed); |
| 1163 | if (V) |
| 1164 | return *V; |
| 1165 | break; |
| 1166 | } |
| 1167 | } |
| 1168 | } |
| 1169 | |
| 1170 | if (!KnownBitsComputed) |
| 1171 | llvm::computeKnownBits(V: I, Known, Q, Depth); |
| 1172 | break; |
| 1173 | } |
| 1174 | } |
| 1175 | |
| 1176 | if (I->getType()->isPointerTy()) { |
| 1177 | Align Alignment = I->getPointerAlignment(DL); |
| 1178 | Known.Zero.setLowBits(Log2(A: Alignment)); |
| 1179 | } |
| 1180 | |
| 1181 | // If the client is only demanding bits that we know, return the known |
| 1182 | // constant. We can't directly simplify pointers as a constant because of |
| 1183 | // pointer provenance. |
| 1184 | // TODO: We could return `(inttoptr const)` for pointers. |
| 1185 | if (!I->getType()->isPointerTy() && |
| 1186 | DemandedMask.isSubsetOf(RHS: Known.Zero | Known.One)) |
| 1187 | return Constant::getIntegerValue(Ty: VTy, V: Known.One); |
| 1188 | |
| 1189 | if (VerifyKnownBits) { |
| 1190 | KnownBits ReferenceKnown = llvm::computeKnownBits(V: I, Q, Depth); |
| 1191 | if (Known != ReferenceKnown) { |
| 1192 | errs() << "Mismatched known bits for " << *I << " in " |
| 1193 | << I->getFunction()->getName() << "\n" ; |
| 1194 | errs() << "computeKnownBits(): " << ReferenceKnown << "\n" ; |
| 1195 | errs() << "SimplifyDemandedBits(): " << Known << "\n" ; |
| 1196 | std::abort(); |
| 1197 | } |
| 1198 | } |
| 1199 | |
| 1200 | return nullptr; |
| 1201 | } |
| 1202 | |
| 1203 | /// Helper routine of SimplifyDemandedUseBits. It computes Known |
| 1204 | /// bits. It also tries to handle simplifications that can be done based on |
| 1205 | /// DemandedMask, but without modifying the Instruction. |
| 1206 | Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits( |
| 1207 | Instruction *I, const APInt &DemandedMask, KnownBits &Known, |
| 1208 | const SimplifyQuery &Q, unsigned Depth) { |
| 1209 | unsigned BitWidth = DemandedMask.getBitWidth(); |
| 1210 | Type *ITy = I->getType(); |
| 1211 | |
| 1212 | KnownBits LHSKnown(BitWidth); |
| 1213 | KnownBits RHSKnown(BitWidth); |
| 1214 | |
| 1215 | // Despite the fact that we can't simplify this instruction in all User's |
| 1216 | // context, we can at least compute the known bits, and we can |
| 1217 | // do simplifications that apply to *just* the one user if we know that |
| 1218 | // this instruction has a simpler value in that context. |
| 1219 | switch (I->getOpcode()) { |
| 1220 | case Instruction::And: { |
| 1221 | llvm::computeKnownBits(V: I->getOperand(i: 1), Known&: RHSKnown, Q, Depth: Depth + 1); |
| 1222 | llvm::computeKnownBits(V: I->getOperand(i: 0), Known&: LHSKnown, Q, Depth: Depth + 1); |
| 1223 | Known = analyzeKnownBitsFromAndXorOr(I: cast<Operator>(Val: I), KnownLHS: LHSKnown, KnownRHS: RHSKnown, |
| 1224 | SQ: Q, Depth); |
| 1225 | computeKnownBitsFromContext(V: I, Known, Q, Depth); |
| 1226 | |
| 1227 | // If the client is only demanding bits that we know, return the known |
| 1228 | // constant. |
| 1229 | if (DemandedMask.isSubsetOf(RHS: Known.Zero | Known.One)) |
| 1230 | return Constant::getIntegerValue(Ty: ITy, V: Known.One); |
| 1231 | |
| 1232 | // If all of the demanded bits are known 1 on one side, return the other. |
| 1233 | // These bits cannot contribute to the result of the 'and' in this context. |
| 1234 | if (DemandedMask.isSubsetOf(RHS: LHSKnown.Zero | RHSKnown.One)) |
| 1235 | return I->getOperand(i: 0); |
| 1236 | if (DemandedMask.isSubsetOf(RHS: RHSKnown.Zero | LHSKnown.One)) |
| 1237 | return I->getOperand(i: 1); |
| 1238 | |
| 1239 | break; |
| 1240 | } |
| 1241 | case Instruction::Or: { |
| 1242 | llvm::computeKnownBits(V: I->getOperand(i: 1), Known&: RHSKnown, Q, Depth: Depth + 1); |
| 1243 | llvm::computeKnownBits(V: I->getOperand(i: 0), Known&: LHSKnown, Q, Depth: Depth + 1); |
| 1244 | Known = analyzeKnownBitsFromAndXorOr(I: cast<Operator>(Val: I), KnownLHS: LHSKnown, KnownRHS: RHSKnown, |
| 1245 | SQ: Q, Depth); |
| 1246 | computeKnownBitsFromContext(V: I, Known, Q, Depth); |
| 1247 | |
| 1248 | // If the client is only demanding bits that we know, return the known |
| 1249 | // constant. |
| 1250 | if (DemandedMask.isSubsetOf(RHS: Known.Zero | Known.One)) |
| 1251 | return Constant::getIntegerValue(Ty: ITy, V: Known.One); |
| 1252 | |
| 1253 | // We can simplify (X|Y) -> X or Y in the user's context if we know that |
| 1254 | // only bits from X or Y are demanded. |
| 1255 | // If all of the demanded bits are known zero on one side, return the other. |
| 1256 | // These bits cannot contribute to the result of the 'or' in this context. |
| 1257 | if (DemandedMask.isSubsetOf(RHS: LHSKnown.One | RHSKnown.Zero)) |
| 1258 | return I->getOperand(i: 0); |
| 1259 | if (DemandedMask.isSubsetOf(RHS: RHSKnown.One | LHSKnown.Zero)) |
| 1260 | return I->getOperand(i: 1); |
| 1261 | |
| 1262 | break; |
| 1263 | } |
| 1264 | case Instruction::Xor: { |
| 1265 | llvm::computeKnownBits(V: I->getOperand(i: 1), Known&: RHSKnown, Q, Depth: Depth + 1); |
| 1266 | llvm::computeKnownBits(V: I->getOperand(i: 0), Known&: LHSKnown, Q, Depth: Depth + 1); |
| 1267 | Known = analyzeKnownBitsFromAndXorOr(I: cast<Operator>(Val: I), KnownLHS: LHSKnown, KnownRHS: RHSKnown, |
| 1268 | SQ: Q, Depth); |
| 1269 | computeKnownBitsFromContext(V: I, Known, Q, Depth); |
| 1270 | |
| 1271 | // If the client is only demanding bits that we know, return the known |
| 1272 | // constant. |
| 1273 | if (DemandedMask.isSubsetOf(RHS: Known.Zero | Known.One)) |
| 1274 | return Constant::getIntegerValue(Ty: ITy, V: Known.One); |
| 1275 | |
| 1276 | // We can simplify (X^Y) -> X or Y in the user's context if we know that |
| 1277 | // only bits from X or Y are demanded. |
| 1278 | // If all of the demanded bits are known zero on one side, return the other. |
| 1279 | if (DemandedMask.isSubsetOf(RHS: RHSKnown.Zero)) |
| 1280 | return I->getOperand(i: 0); |
| 1281 | if (DemandedMask.isSubsetOf(RHS: LHSKnown.Zero)) |
| 1282 | return I->getOperand(i: 1); |
| 1283 | |
| 1284 | break; |
| 1285 | } |
| 1286 | case Instruction::Add: { |
| 1287 | unsigned NLZ = DemandedMask.countl_zero(); |
| 1288 | APInt DemandedFromOps = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: BitWidth - NLZ); |
| 1289 | |
| 1290 | // If an operand adds zeros to every bit below the highest demanded bit, |
| 1291 | // that operand doesn't change the result. Return the other side. |
| 1292 | llvm::computeKnownBits(V: I->getOperand(i: 1), Known&: RHSKnown, Q, Depth: Depth + 1); |
| 1293 | if (DemandedFromOps.isSubsetOf(RHS: RHSKnown.Zero)) |
| 1294 | return I->getOperand(i: 0); |
| 1295 | |
| 1296 | llvm::computeKnownBits(V: I->getOperand(i: 0), Known&: LHSKnown, Q, Depth: Depth + 1); |
| 1297 | if (DemandedFromOps.isSubsetOf(RHS: LHSKnown.Zero)) |
| 1298 | return I->getOperand(i: 1); |
| 1299 | |
| 1300 | bool NSW = cast<OverflowingBinaryOperator>(Val: I)->hasNoSignedWrap(); |
| 1301 | bool NUW = cast<OverflowingBinaryOperator>(Val: I)->hasNoUnsignedWrap(); |
| 1302 | Known = KnownBits::add(LHS: LHSKnown, RHS: RHSKnown, NSW, NUW); |
| 1303 | computeKnownBitsFromContext(V: I, Known, Q, Depth); |
| 1304 | break; |
| 1305 | } |
| 1306 | case Instruction::Sub: { |
| 1307 | unsigned NLZ = DemandedMask.countl_zero(); |
| 1308 | APInt DemandedFromOps = APInt::getLowBitsSet(numBits: BitWidth, loBitsSet: BitWidth - NLZ); |
| 1309 | |
| 1310 | // If an operand subtracts zeros from every bit below the highest demanded |
| 1311 | // bit, that operand doesn't change the result. Return the other side. |
| 1312 | llvm::computeKnownBits(V: I->getOperand(i: 1), Known&: RHSKnown, Q, Depth: Depth + 1); |
| 1313 | if (DemandedFromOps.isSubsetOf(RHS: RHSKnown.Zero)) |
| 1314 | return I->getOperand(i: 0); |
| 1315 | |
| 1316 | bool NSW = cast<OverflowingBinaryOperator>(Val: I)->hasNoSignedWrap(); |
| 1317 | bool NUW = cast<OverflowingBinaryOperator>(Val: I)->hasNoUnsignedWrap(); |
| 1318 | llvm::computeKnownBits(V: I->getOperand(i: 0), Known&: LHSKnown, Q, Depth: Depth + 1); |
| 1319 | Known = KnownBits::sub(LHS: LHSKnown, RHS: RHSKnown, NSW, NUW); |
| 1320 | computeKnownBitsFromContext(V: I, Known, Q, Depth); |
| 1321 | break; |
| 1322 | } |
| 1323 | case Instruction::AShr: { |
| 1324 | // Compute the Known bits to simplify things downstream. |
| 1325 | llvm::computeKnownBits(V: I, Known, Q, Depth); |
| 1326 | |
| 1327 | // If this user is only demanding bits that we know, return the known |
| 1328 | // constant. |
| 1329 | if (DemandedMask.isSubsetOf(RHS: Known.Zero | Known.One)) |
| 1330 | return Constant::getIntegerValue(Ty: ITy, V: Known.One); |
| 1331 | |
| 1332 | // If the right shift operand 0 is a result of a left shift by the same |
| 1333 | // amount, this is probably a zero/sign extension, which may be unnecessary, |
| 1334 | // if we do not demand any of the new sign bits. So, return the original |
| 1335 | // operand instead. |
| 1336 | const APInt *ShiftRC; |
| 1337 | const APInt *ShiftLC; |
| 1338 | Value *X; |
| 1339 | unsigned BitWidth = DemandedMask.getBitWidth(); |
| 1340 | if (match(V: I, |
| 1341 | P: m_AShr(L: m_Shl(L: m_Value(V&: X), R: m_APInt(Res&: ShiftLC)), R: m_APInt(Res&: ShiftRC))) && |
| 1342 | ShiftLC == ShiftRC && ShiftLC->ult(RHS: BitWidth) && |
| 1343 | DemandedMask.isSubsetOf(RHS: APInt::getLowBitsSet( |
| 1344 | numBits: BitWidth, loBitsSet: BitWidth - ShiftRC->getZExtValue()))) { |
| 1345 | return X; |
| 1346 | } |
| 1347 | |
| 1348 | break; |
| 1349 | } |
| 1350 | default: |
| 1351 | // Compute the Known bits to simplify things downstream. |
| 1352 | llvm::computeKnownBits(V: I, Known, Q, Depth); |
| 1353 | |
| 1354 | // If this user is only demanding bits that we know, return the known |
| 1355 | // constant. |
| 1356 | if (DemandedMask.isSubsetOf(RHS: Known.Zero|Known.One)) |
| 1357 | return Constant::getIntegerValue(Ty: ITy, V: Known.One); |
| 1358 | |
| 1359 | break; |
| 1360 | } |
| 1361 | |
| 1362 | return nullptr; |
| 1363 | } |
| 1364 | |
| 1365 | /// Helper routine of SimplifyDemandedUseBits. It tries to simplify |
| 1366 | /// "E1 = (X lsr C1) << C2", where the C1 and C2 are constant, into |
| 1367 | /// "E2 = X << (C2 - C1)" or "E2 = X >> (C1 - C2)", depending on the sign |
| 1368 | /// of "C2-C1". |
| 1369 | /// |
| 1370 | /// Suppose E1 and E2 are generally different in bits S={bm, bm+1, |
| 1371 | /// ..., bn}, without considering the specific value X is holding. |
| 1372 | /// This transformation is legal iff one of following conditions is hold: |
| 1373 | /// 1) All the bit in S are 0, in this case E1 == E2. |
| 1374 | /// 2) We don't care those bits in S, per the input DemandedMask. |
| 1375 | /// 3) Combination of 1) and 2). Some bits in S are 0, and we don't care the |
| 1376 | /// rest bits. |
| 1377 | /// |
| 1378 | /// Currently we only test condition 2). |
| 1379 | /// |
| 1380 | /// As with SimplifyDemandedUseBits, it returns NULL if the simplification was |
| 1381 | /// not successful. |
| 1382 | Value *InstCombinerImpl::simplifyShrShlDemandedBits( |
| 1383 | Instruction *Shr, const APInt &ShrOp1, Instruction *Shl, |
| 1384 | const APInt &ShlOp1, const APInt &DemandedMask, KnownBits &Known) { |
| 1385 | if (!ShlOp1 || !ShrOp1) |
| 1386 | return nullptr; // No-op. |
| 1387 | |
| 1388 | Value *VarX = Shr->getOperand(i: 0); |
| 1389 | Type *Ty = VarX->getType(); |
| 1390 | unsigned BitWidth = Ty->getScalarSizeInBits(); |
| 1391 | if (ShlOp1.uge(RHS: BitWidth) || ShrOp1.uge(RHS: BitWidth)) |
| 1392 | return nullptr; // Undef. |
| 1393 | |
| 1394 | unsigned ShlAmt = ShlOp1.getZExtValue(); |
| 1395 | unsigned ShrAmt = ShrOp1.getZExtValue(); |
| 1396 | |
| 1397 | Known.One.clearAllBits(); |
| 1398 | Known.Zero.setLowBits(ShlAmt - 1); |
| 1399 | Known.Zero &= DemandedMask; |
| 1400 | |
| 1401 | APInt BitMask1(APInt::getAllOnes(numBits: BitWidth)); |
| 1402 | APInt BitMask2(APInt::getAllOnes(numBits: BitWidth)); |
| 1403 | |
| 1404 | bool isLshr = (Shr->getOpcode() == Instruction::LShr); |
| 1405 | BitMask1 = isLshr ? (BitMask1.lshr(shiftAmt: ShrAmt) << ShlAmt) : |
| 1406 | (BitMask1.ashr(ShiftAmt: ShrAmt) << ShlAmt); |
| 1407 | |
| 1408 | if (ShrAmt <= ShlAmt) { |
| 1409 | BitMask2 <<= (ShlAmt - ShrAmt); |
| 1410 | } else { |
| 1411 | BitMask2 = isLshr ? BitMask2.lshr(shiftAmt: ShrAmt - ShlAmt): |
| 1412 | BitMask2.ashr(ShiftAmt: ShrAmt - ShlAmt); |
| 1413 | } |
| 1414 | |
| 1415 | // Check if condition-2 (see the comment to this function) is satified. |
| 1416 | if ((BitMask1 & DemandedMask) == (BitMask2 & DemandedMask)) { |
| 1417 | if (ShrAmt == ShlAmt) |
| 1418 | return VarX; |
| 1419 | |
| 1420 | if (!Shr->hasOneUse()) |
| 1421 | return nullptr; |
| 1422 | |
| 1423 | BinaryOperator *New; |
| 1424 | if (ShrAmt < ShlAmt) { |
| 1425 | Constant *Amt = ConstantInt::get(Ty: VarX->getType(), V: ShlAmt - ShrAmt); |
| 1426 | New = BinaryOperator::CreateShl(V1: VarX, V2: Amt); |
| 1427 | BinaryOperator *Orig = cast<BinaryOperator>(Val: Shl); |
| 1428 | New->setHasNoSignedWrap(Orig->hasNoSignedWrap()); |
| 1429 | New->setHasNoUnsignedWrap(Orig->hasNoUnsignedWrap()); |
| 1430 | } else { |
| 1431 | Constant *Amt = ConstantInt::get(Ty: VarX->getType(), V: ShrAmt - ShlAmt); |
| 1432 | New = isLshr ? BinaryOperator::CreateLShr(V1: VarX, V2: Amt) : |
| 1433 | BinaryOperator::CreateAShr(V1: VarX, V2: Amt); |
| 1434 | if (cast<BinaryOperator>(Val: Shr)->isExact()) |
| 1435 | New->setIsExact(true); |
| 1436 | } |
| 1437 | |
| 1438 | return InsertNewInstWith(New, Old: Shl->getIterator()); |
| 1439 | } |
| 1440 | |
| 1441 | return nullptr; |
| 1442 | } |
| 1443 | |
| 1444 | /// The specified value produces a vector with any number of elements. |
| 1445 | /// This method analyzes which elements of the operand are poison and |
| 1446 | /// returns that information in PoisonElts. |
| 1447 | /// |
| 1448 | /// DemandedElts contains the set of elements that are actually used by the |
| 1449 | /// caller, and by default (AllowMultipleUsers equals false) the value is |
| 1450 | /// simplified only if it has a single caller. If AllowMultipleUsers is set |
| 1451 | /// to true, DemandedElts refers to the union of sets of elements that are |
| 1452 | /// used by all callers. |
| 1453 | /// |
| 1454 | /// If the information about demanded elements can be used to simplify the |
| 1455 | /// operation, the operation is simplified, then the resultant value is |
| 1456 | /// returned. This returns null if no change was made. |
| 1457 | Value *InstCombinerImpl::SimplifyDemandedVectorElts(Value *V, |
| 1458 | APInt DemandedElts, |
| 1459 | APInt &PoisonElts, |
| 1460 | unsigned Depth, |
| 1461 | bool AllowMultipleUsers) { |
| 1462 | // Cannot analyze scalable type. The number of vector elements is not a |
| 1463 | // compile-time constant. |
| 1464 | if (isa<ScalableVectorType>(Val: V->getType())) |
| 1465 | return nullptr; |
| 1466 | |
| 1467 | unsigned VWidth = cast<FixedVectorType>(Val: V->getType())->getNumElements(); |
| 1468 | APInt EltMask(APInt::getAllOnes(numBits: VWidth)); |
| 1469 | assert((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!" ); |
| 1470 | |
| 1471 | if (match(V, P: m_Poison())) { |
| 1472 | // If the entire vector is poison, just return this info. |
| 1473 | PoisonElts = EltMask; |
| 1474 | return nullptr; |
| 1475 | } |
| 1476 | |
| 1477 | if (DemandedElts.isZero()) { // If nothing is demanded, provide poison. |
| 1478 | PoisonElts = EltMask; |
| 1479 | return PoisonValue::get(T: V->getType()); |
| 1480 | } |
| 1481 | |
| 1482 | PoisonElts = 0; |
| 1483 | |
| 1484 | if (auto *C = dyn_cast<Constant>(Val: V)) { |
| 1485 | // Check if this is identity. If so, return 0 since we are not simplifying |
| 1486 | // anything. |
| 1487 | if (DemandedElts.isAllOnes()) |
| 1488 | return nullptr; |
| 1489 | |
| 1490 | Type *EltTy = cast<VectorType>(Val: V->getType())->getElementType(); |
| 1491 | Constant *Poison = PoisonValue::get(T: EltTy); |
| 1492 | SmallVector<Constant*, 16> Elts; |
| 1493 | for (unsigned i = 0; i != VWidth; ++i) { |
| 1494 | if (!DemandedElts[i]) { // If not demanded, set to poison. |
| 1495 | Elts.push_back(Elt: Poison); |
| 1496 | PoisonElts.setBit(i); |
| 1497 | continue; |
| 1498 | } |
| 1499 | |
| 1500 | Constant *Elt = C->getAggregateElement(Elt: i); |
| 1501 | if (!Elt) return nullptr; |
| 1502 | |
| 1503 | Elts.push_back(Elt); |
| 1504 | if (isa<PoisonValue>(Val: Elt)) // Already poison. |
| 1505 | PoisonElts.setBit(i); |
| 1506 | } |
| 1507 | |
| 1508 | // If we changed the constant, return it. |
| 1509 | Constant *NewCV = ConstantVector::get(V: Elts); |
| 1510 | return NewCV != C ? NewCV : nullptr; |
| 1511 | } |
| 1512 | |
| 1513 | // Limit search depth. |
| 1514 | if (Depth == SimplifyDemandedVectorEltsDepthLimit) |
| 1515 | return nullptr; |
| 1516 | |
| 1517 | if (!AllowMultipleUsers) { |
| 1518 | // If multiple users are using the root value, proceed with |
| 1519 | // simplification conservatively assuming that all elements |
| 1520 | // are needed. |
| 1521 | if (!V->hasOneUse()) { |
| 1522 | // Quit if we find multiple users of a non-root value though. |
| 1523 | // They'll be handled when it's their turn to be visited by |
| 1524 | // the main instcombine process. |
| 1525 | if (Depth != 0) |
| 1526 | // TODO: Just compute the PoisonElts information recursively. |
| 1527 | return nullptr; |
| 1528 | |
| 1529 | // Conservatively assume that all elements are needed. |
| 1530 | DemandedElts = EltMask; |
| 1531 | } |
| 1532 | } |
| 1533 | |
| 1534 | Instruction *I = dyn_cast<Instruction>(Val: V); |
| 1535 | if (!I) return nullptr; // Only analyze instructions. |
| 1536 | |
| 1537 | bool MadeChange = false; |
| 1538 | auto simplifyAndSetOp = [&](Instruction *Inst, unsigned OpNum, |
| 1539 | APInt Demanded, APInt &Undef) { |
| 1540 | auto *II = dyn_cast<IntrinsicInst>(Val: Inst); |
| 1541 | Value *Op = II ? II->getArgOperand(i: OpNum) : Inst->getOperand(i: OpNum); |
| 1542 | if (Value *V = SimplifyDemandedVectorElts(V: Op, DemandedElts: Demanded, PoisonElts&: Undef, Depth: Depth + 1)) { |
| 1543 | replaceOperand(I&: *Inst, OpNum, V); |
| 1544 | MadeChange = true; |
| 1545 | } |
| 1546 | }; |
| 1547 | |
| 1548 | APInt PoisonElts2(VWidth, 0); |
| 1549 | APInt PoisonElts3(VWidth, 0); |
| 1550 | switch (I->getOpcode()) { |
| 1551 | default: break; |
| 1552 | |
| 1553 | case Instruction::GetElementPtr: { |
| 1554 | // The LangRef requires that struct geps have all constant indices. As |
| 1555 | // such, we can't convert any operand to partial undef. |
| 1556 | auto mayIndexStructType = [](GetElementPtrInst &GEP) { |
| 1557 | for (auto I = gep_type_begin(GEP), E = gep_type_end(GEP); |
| 1558 | I != E; I++) |
| 1559 | if (I.isStruct()) |
| 1560 | return true; |
| 1561 | return false; |
| 1562 | }; |
| 1563 | if (mayIndexStructType(cast<GetElementPtrInst>(Val&: *I))) |
| 1564 | break; |
| 1565 | |
| 1566 | // Conservatively track the demanded elements back through any vector |
| 1567 | // operands we may have. We know there must be at least one, or we |
| 1568 | // wouldn't have a vector result to get here. Note that we intentionally |
| 1569 | // merge the undef bits here since gepping with either an poison base or |
| 1570 | // index results in poison. |
| 1571 | for (unsigned i = 0; i < I->getNumOperands(); i++) { |
| 1572 | if (i == 0 ? match(V: I->getOperand(i), P: m_Undef()) |
| 1573 | : match(V: I->getOperand(i), P: m_Poison())) { |
| 1574 | // If the entire vector is undefined, just return this info. |
| 1575 | PoisonElts = EltMask; |
| 1576 | return nullptr; |
| 1577 | } |
| 1578 | if (I->getOperand(i)->getType()->isVectorTy()) { |
| 1579 | APInt PoisonEltsOp(VWidth, 0); |
| 1580 | simplifyAndSetOp(I, i, DemandedElts, PoisonEltsOp); |
| 1581 | // gep(x, undef) is not undef, so skip considering idx ops here |
| 1582 | // Note that we could propagate poison, but we can't distinguish between |
| 1583 | // undef & poison bits ATM |
| 1584 | if (i == 0) |
| 1585 | PoisonElts |= PoisonEltsOp; |
| 1586 | } |
| 1587 | } |
| 1588 | |
| 1589 | break; |
| 1590 | } |
| 1591 | case Instruction::InsertElement: { |
| 1592 | // If this is a variable index, we don't know which element it overwrites. |
| 1593 | // demand exactly the same input as we produce. |
| 1594 | ConstantInt *Idx = dyn_cast<ConstantInt>(Val: I->getOperand(i: 2)); |
| 1595 | if (!Idx) { |
| 1596 | // Note that we can't propagate undef elt info, because we don't know |
| 1597 | // which elt is getting updated. |
| 1598 | simplifyAndSetOp(I, 0, DemandedElts, PoisonElts2); |
| 1599 | break; |
| 1600 | } |
| 1601 | |
| 1602 | // The element inserted overwrites whatever was there, so the input demanded |
| 1603 | // set is simpler than the output set. |
| 1604 | unsigned IdxNo = Idx->getZExtValue(); |
| 1605 | APInt PreInsertDemandedElts = DemandedElts; |
| 1606 | if (IdxNo < VWidth) |
| 1607 | PreInsertDemandedElts.clearBit(BitPosition: IdxNo); |
| 1608 | |
| 1609 | // If we only demand the element that is being inserted and that element |
| 1610 | // was extracted from the same index in another vector with the same type, |
| 1611 | // replace this insert with that other vector. |
| 1612 | // Note: This is attempted before the call to simplifyAndSetOp because that |
| 1613 | // may change PoisonElts to a value that does not match with Vec. |
| 1614 | Value *Vec; |
| 1615 | if (PreInsertDemandedElts == 0 && |
| 1616 | match(V: I->getOperand(i: 1), |
| 1617 | P: m_ExtractElt(Val: m_Value(V&: Vec), Idx: m_SpecificInt(V: IdxNo))) && |
| 1618 | Vec->getType() == I->getType()) { |
| 1619 | return Vec; |
| 1620 | } |
| 1621 | |
| 1622 | simplifyAndSetOp(I, 0, PreInsertDemandedElts, PoisonElts); |
| 1623 | |
| 1624 | // If this is inserting an element that isn't demanded, remove this |
| 1625 | // insertelement. |
| 1626 | if (IdxNo >= VWidth || !DemandedElts[IdxNo]) { |
| 1627 | Worklist.push(I); |
| 1628 | return I->getOperand(i: 0); |
| 1629 | } |
| 1630 | |
| 1631 | // The inserted element is defined. |
| 1632 | PoisonElts.clearBit(BitPosition: IdxNo); |
| 1633 | break; |
| 1634 | } |
| 1635 | case Instruction::ShuffleVector: { |
| 1636 | auto *Shuffle = cast<ShuffleVectorInst>(Val: I); |
| 1637 | assert(Shuffle->getOperand(0)->getType() == |
| 1638 | Shuffle->getOperand(1)->getType() && |
| 1639 | "Expected shuffle operands to have same type" ); |
| 1640 | unsigned OpWidth = cast<FixedVectorType>(Val: Shuffle->getOperand(i_nocapture: 0)->getType()) |
| 1641 | ->getNumElements(); |
| 1642 | // Handle trivial case of a splat. Only check the first element of LHS |
| 1643 | // operand. |
| 1644 | if (all_of(Range: Shuffle->getShuffleMask(), P: equal_to(Arg: 0)) && |
| 1645 | DemandedElts.isAllOnes()) { |
| 1646 | if (!isa<PoisonValue>(Val: I->getOperand(i: 1))) { |
| 1647 | I->setOperand(i: 1, Val: PoisonValue::get(T: I->getOperand(i: 1)->getType())); |
| 1648 | MadeChange = true; |
| 1649 | } |
| 1650 | APInt LeftDemanded(OpWidth, 1); |
| 1651 | APInt LHSPoisonElts(OpWidth, 0); |
| 1652 | simplifyAndSetOp(I, 0, LeftDemanded, LHSPoisonElts); |
| 1653 | if (LHSPoisonElts[0]) |
| 1654 | PoisonElts = EltMask; |
| 1655 | else |
| 1656 | PoisonElts.clearAllBits(); |
| 1657 | break; |
| 1658 | } |
| 1659 | |
| 1660 | APInt LeftDemanded(OpWidth, 0), RightDemanded(OpWidth, 0); |
| 1661 | for (unsigned i = 0; i < VWidth; i++) { |
| 1662 | if (DemandedElts[i]) { |
| 1663 | unsigned MaskVal = Shuffle->getMaskValue(Elt: i); |
| 1664 | if (MaskVal != -1u) { |
| 1665 | assert(MaskVal < OpWidth * 2 && |
| 1666 | "shufflevector mask index out of range!" ); |
| 1667 | if (MaskVal < OpWidth) |
| 1668 | LeftDemanded.setBit(MaskVal); |
| 1669 | else |
| 1670 | RightDemanded.setBit(MaskVal - OpWidth); |
| 1671 | } |
| 1672 | } |
| 1673 | } |
| 1674 | |
| 1675 | APInt LHSPoisonElts(OpWidth, 0); |
| 1676 | simplifyAndSetOp(I, 0, LeftDemanded, LHSPoisonElts); |
| 1677 | |
| 1678 | APInt RHSPoisonElts(OpWidth, 0); |
| 1679 | simplifyAndSetOp(I, 1, RightDemanded, RHSPoisonElts); |
| 1680 | |
| 1681 | // If this shuffle does not change the vector length and the elements |
| 1682 | // demanded by this shuffle are an identity mask, then this shuffle is |
| 1683 | // unnecessary. |
| 1684 | // |
| 1685 | // We are assuming canonical form for the mask, so the source vector is |
| 1686 | // operand 0 and operand 1 is not used. |
| 1687 | // |
| 1688 | // Note that if an element is demanded and this shuffle mask is undefined |
| 1689 | // for that element, then the shuffle is not considered an identity |
| 1690 | // operation. The shuffle prevents poison from the operand vector from |
| 1691 | // leaking to the result by replacing poison with an undefined value. |
| 1692 | if (VWidth == OpWidth) { |
| 1693 | bool IsIdentityShuffle = true; |
| 1694 | for (unsigned i = 0; i < VWidth; i++) { |
| 1695 | unsigned MaskVal = Shuffle->getMaskValue(Elt: i); |
| 1696 | if (DemandedElts[i] && i != MaskVal) { |
| 1697 | IsIdentityShuffle = false; |
| 1698 | break; |
| 1699 | } |
| 1700 | } |
| 1701 | if (IsIdentityShuffle) |
| 1702 | return Shuffle->getOperand(i_nocapture: 0); |
| 1703 | } |
| 1704 | |
| 1705 | bool NewPoisonElts = false; |
| 1706 | unsigned LHSIdx = -1u, LHSValIdx = -1u; |
| 1707 | unsigned RHSIdx = -1u, RHSValIdx = -1u; |
| 1708 | bool LHSUniform = true; |
| 1709 | bool RHSUniform = true; |
| 1710 | for (unsigned i = 0; i < VWidth; i++) { |
| 1711 | unsigned MaskVal = Shuffle->getMaskValue(Elt: i); |
| 1712 | if (MaskVal == -1u) { |
| 1713 | PoisonElts.setBit(i); |
| 1714 | } else if (!DemandedElts[i]) { |
| 1715 | NewPoisonElts = true; |
| 1716 | PoisonElts.setBit(i); |
| 1717 | } else if (MaskVal < OpWidth) { |
| 1718 | if (LHSPoisonElts[MaskVal]) { |
| 1719 | NewPoisonElts = true; |
| 1720 | PoisonElts.setBit(i); |
| 1721 | } else { |
| 1722 | LHSIdx = LHSIdx == -1u ? i : OpWidth; |
| 1723 | LHSValIdx = LHSValIdx == -1u ? MaskVal : OpWidth; |
| 1724 | LHSUniform = LHSUniform && (MaskVal == i); |
| 1725 | } |
| 1726 | } else { |
| 1727 | if (RHSPoisonElts[MaskVal - OpWidth]) { |
| 1728 | NewPoisonElts = true; |
| 1729 | PoisonElts.setBit(i); |
| 1730 | } else { |
| 1731 | RHSIdx = RHSIdx == -1u ? i : OpWidth; |
| 1732 | RHSValIdx = RHSValIdx == -1u ? MaskVal - OpWidth : OpWidth; |
| 1733 | RHSUniform = RHSUniform && (MaskVal - OpWidth == i); |
| 1734 | } |
| 1735 | } |
| 1736 | } |
| 1737 | |
| 1738 | // Try to transform shuffle with constant vector and single element from |
| 1739 | // this constant vector to single insertelement instruction. |
| 1740 | // shufflevector V, C, <v1, v2, .., ci, .., vm> -> |
| 1741 | // insertelement V, C[ci], ci-n |
| 1742 | if (OpWidth == |
| 1743 | cast<FixedVectorType>(Val: Shuffle->getType())->getNumElements()) { |
| 1744 | Value *Op = nullptr; |
| 1745 | Constant *Value = nullptr; |
| 1746 | unsigned Idx = -1u; |
| 1747 | |
| 1748 | // Find constant vector with the single element in shuffle (LHS or RHS). |
| 1749 | if (LHSIdx < OpWidth && RHSUniform) { |
| 1750 | if (auto *CV = dyn_cast<ConstantVector>(Val: Shuffle->getOperand(i_nocapture: 0))) { |
| 1751 | Op = Shuffle->getOperand(i_nocapture: 1); |
| 1752 | Value = CV->getOperand(i_nocapture: LHSValIdx); |
| 1753 | Idx = LHSIdx; |
| 1754 | } |
| 1755 | } |
| 1756 | if (RHSIdx < OpWidth && LHSUniform) { |
| 1757 | if (auto *CV = dyn_cast<ConstantVector>(Val: Shuffle->getOperand(i_nocapture: 1))) { |
| 1758 | Op = Shuffle->getOperand(i_nocapture: 0); |
| 1759 | Value = CV->getOperand(i_nocapture: RHSValIdx); |
| 1760 | Idx = RHSIdx; |
| 1761 | } |
| 1762 | } |
| 1763 | // Found constant vector with single element - convert to insertelement. |
| 1764 | if (Op && Value) { |
| 1765 | Instruction *New = InsertElementInst::Create( |
| 1766 | Vec: Op, NewElt: Value, Idx: ConstantInt::get(Ty: Type::getInt64Ty(C&: I->getContext()), V: Idx), |
| 1767 | NameStr: Shuffle->getName()); |
| 1768 | InsertNewInstWith(New, Old: Shuffle->getIterator()); |
| 1769 | return New; |
| 1770 | } |
| 1771 | } |
| 1772 | if (NewPoisonElts) { |
| 1773 | // Add additional discovered undefs. |
| 1774 | SmallVector<int, 16> Elts; |
| 1775 | for (unsigned i = 0; i < VWidth; ++i) { |
| 1776 | if (PoisonElts[i]) |
| 1777 | Elts.push_back(Elt: PoisonMaskElem); |
| 1778 | else |
| 1779 | Elts.push_back(Elt: Shuffle->getMaskValue(Elt: i)); |
| 1780 | } |
| 1781 | Shuffle->setShuffleMask(Elts); |
| 1782 | MadeChange = true; |
| 1783 | } |
| 1784 | break; |
| 1785 | } |
| 1786 | case Instruction::Select: { |
| 1787 | // If this is a vector select, try to transform the select condition based |
| 1788 | // on the current demanded elements. |
| 1789 | SelectInst *Sel = cast<SelectInst>(Val: I); |
| 1790 | if (Sel->getCondition()->getType()->isVectorTy()) { |
| 1791 | // TODO: We are not doing anything with PoisonElts based on this call. |
| 1792 | // It is overwritten below based on the other select operands. If an |
| 1793 | // element of the select condition is known undef, then we are free to |
| 1794 | // choose the output value from either arm of the select. If we know that |
| 1795 | // one of those values is undef, then the output can be undef. |
| 1796 | simplifyAndSetOp(I, 0, DemandedElts, PoisonElts); |
| 1797 | } |
| 1798 | |
| 1799 | // Next, see if we can transform the arms of the select. |
| 1800 | APInt DemandedLHS(DemandedElts), DemandedRHS(DemandedElts); |
| 1801 | if (auto *CV = dyn_cast<ConstantVector>(Val: Sel->getCondition())) { |
| 1802 | for (unsigned i = 0; i < VWidth; i++) { |
| 1803 | Constant *CElt = CV->getAggregateElement(Elt: i); |
| 1804 | |
| 1805 | // isNullValue() always returns false when called on a ConstantExpr. |
| 1806 | if (CElt->isNullValue()) |
| 1807 | DemandedLHS.clearBit(BitPosition: i); |
| 1808 | else if (CElt->isOneValue()) |
| 1809 | DemandedRHS.clearBit(BitPosition: i); |
| 1810 | } |
| 1811 | } |
| 1812 | |
| 1813 | simplifyAndSetOp(I, 1, DemandedLHS, PoisonElts2); |
| 1814 | simplifyAndSetOp(I, 2, DemandedRHS, PoisonElts3); |
| 1815 | |
| 1816 | // Output elements are undefined if the element from each arm is undefined. |
| 1817 | // TODO: This can be improved. See comment in select condition handling. |
| 1818 | PoisonElts = PoisonElts2 & PoisonElts3; |
| 1819 | break; |
| 1820 | } |
| 1821 | case Instruction::BitCast: { |
| 1822 | // Vector->vector casts only. |
| 1823 | VectorType *VTy = dyn_cast<VectorType>(Val: I->getOperand(i: 0)->getType()); |
| 1824 | if (!VTy) break; |
| 1825 | unsigned InVWidth = cast<FixedVectorType>(Val: VTy)->getNumElements(); |
| 1826 | APInt InputDemandedElts(InVWidth, 0); |
| 1827 | PoisonElts2 = APInt(InVWidth, 0); |
| 1828 | unsigned Ratio; |
| 1829 | |
| 1830 | if (VWidth == InVWidth) { |
| 1831 | // If we are converting from <4 x i32> -> <4 x f32>, we demand the same |
| 1832 | // elements as are demanded of us. |
| 1833 | Ratio = 1; |
| 1834 | InputDemandedElts = DemandedElts; |
| 1835 | } else if ((VWidth % InVWidth) == 0) { |
| 1836 | // If the number of elements in the output is a multiple of the number of |
| 1837 | // elements in the input then an input element is live if any of the |
| 1838 | // corresponding output elements are live. |
| 1839 | Ratio = VWidth / InVWidth; |
| 1840 | for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) |
| 1841 | if (DemandedElts[OutIdx]) |
| 1842 | InputDemandedElts.setBit(OutIdx / Ratio); |
| 1843 | } else if ((InVWidth % VWidth) == 0) { |
| 1844 | // If the number of elements in the input is a multiple of the number of |
| 1845 | // elements in the output then an input element is live if the |
| 1846 | // corresponding output element is live. |
| 1847 | Ratio = InVWidth / VWidth; |
| 1848 | for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx) |
| 1849 | if (DemandedElts[InIdx / Ratio]) |
| 1850 | InputDemandedElts.setBit(InIdx); |
| 1851 | } else { |
| 1852 | // Unsupported so far. |
| 1853 | break; |
| 1854 | } |
| 1855 | |
| 1856 | simplifyAndSetOp(I, 0, InputDemandedElts, PoisonElts2); |
| 1857 | |
| 1858 | if (VWidth == InVWidth) { |
| 1859 | PoisonElts = PoisonElts2; |
| 1860 | } else if ((VWidth % InVWidth) == 0) { |
| 1861 | // If the number of elements in the output is a multiple of the number of |
| 1862 | // elements in the input then an output element is undef if the |
| 1863 | // corresponding input element is undef. |
| 1864 | for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) |
| 1865 | if (PoisonElts2[OutIdx / Ratio]) |
| 1866 | PoisonElts.setBit(OutIdx); |
| 1867 | } else if ((InVWidth % VWidth) == 0) { |
| 1868 | // If the number of elements in the input is a multiple of the number of |
| 1869 | // elements in the output then an output element is undef if all of the |
| 1870 | // corresponding input elements are undef. |
| 1871 | for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) { |
| 1872 | APInt SubUndef = PoisonElts2.lshr(shiftAmt: OutIdx * Ratio).zextOrTrunc(width: Ratio); |
| 1873 | if (SubUndef.popcount() == Ratio) |
| 1874 | PoisonElts.setBit(OutIdx); |
| 1875 | } |
| 1876 | } else { |
| 1877 | llvm_unreachable("Unimp" ); |
| 1878 | } |
| 1879 | break; |
| 1880 | } |
| 1881 | case Instruction::FPTrunc: |
| 1882 | case Instruction::FPExt: |
| 1883 | simplifyAndSetOp(I, 0, DemandedElts, PoisonElts); |
| 1884 | break; |
| 1885 | |
| 1886 | case Instruction::Call: { |
| 1887 | IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: I); |
| 1888 | if (!II) break; |
| 1889 | switch (II->getIntrinsicID()) { |
| 1890 | case Intrinsic::masked_gather: // fallthrough |
| 1891 | case Intrinsic::masked_load: { |
| 1892 | // Subtlety: If we load from a pointer, the pointer must be valid |
| 1893 | // regardless of whether the element is demanded. Doing otherwise risks |
| 1894 | // segfaults which didn't exist in the original program. |
| 1895 | APInt DemandedPtrs(APInt::getAllOnes(numBits: VWidth)), |
| 1896 | DemandedPassThrough(DemandedElts); |
| 1897 | if (auto *CMask = dyn_cast<Constant>(Val: II->getOperand(i_nocapture: 1))) { |
| 1898 | for (unsigned i = 0; i < VWidth; i++) { |
| 1899 | if (Constant *CElt = CMask->getAggregateElement(Elt: i)) { |
| 1900 | if (CElt->isNullValue()) |
| 1901 | DemandedPtrs.clearBit(BitPosition: i); |
| 1902 | else if (CElt->isAllOnesValue()) |
| 1903 | DemandedPassThrough.clearBit(BitPosition: i); |
| 1904 | } |
| 1905 | } |
| 1906 | } |
| 1907 | |
| 1908 | if (II->getIntrinsicID() == Intrinsic::masked_gather) |
| 1909 | simplifyAndSetOp(II, 0, DemandedPtrs, PoisonElts2); |
| 1910 | simplifyAndSetOp(II, 2, DemandedPassThrough, PoisonElts3); |
| 1911 | |
| 1912 | // Output elements are undefined if the element from both sources are. |
| 1913 | // TODO: can strengthen via mask as well. |
| 1914 | PoisonElts = PoisonElts2 & PoisonElts3; |
| 1915 | break; |
| 1916 | } |
| 1917 | default: { |
| 1918 | // Handle target specific intrinsics |
| 1919 | std::optional<Value *> V = targetSimplifyDemandedVectorEltsIntrinsic( |
| 1920 | II&: *II, DemandedElts, UndefElts&: PoisonElts, UndefElts2&: PoisonElts2, UndefElts3&: PoisonElts3, |
| 1921 | SimplifyAndSetOp: simplifyAndSetOp); |
| 1922 | if (V) |
| 1923 | return *V; |
| 1924 | break; |
| 1925 | } |
| 1926 | } // switch on IntrinsicID |
| 1927 | break; |
| 1928 | } // case Call |
| 1929 | } // switch on Opcode |
| 1930 | |
| 1931 | // TODO: We bail completely on integer div/rem and shifts because they have |
| 1932 | // UB/poison potential, but that should be refined. |
| 1933 | BinaryOperator *BO; |
| 1934 | if (match(V: I, P: m_BinOp(I&: BO)) && !BO->isIntDivRem() && !BO->isShift()) { |
| 1935 | Value *X = BO->getOperand(i_nocapture: 0); |
| 1936 | Value *Y = BO->getOperand(i_nocapture: 1); |
| 1937 | |
| 1938 | // Look for an equivalent binop except that one operand has been shuffled. |
| 1939 | // If the demand for this binop only includes elements that are the same as |
| 1940 | // the other binop, then we may be able to replace this binop with a use of |
| 1941 | // the earlier one. |
| 1942 | // |
| 1943 | // Example: |
| 1944 | // %other_bo = bo (shuf X, {0}), Y |
| 1945 | // %this_extracted_bo = extelt (bo X, Y), 0 |
| 1946 | // --> |
| 1947 | // %other_bo = bo (shuf X, {0}), Y |
| 1948 | // %this_extracted_bo = extelt %other_bo, 0 |
| 1949 | // |
| 1950 | // TODO: Handle demand of an arbitrary single element or more than one |
| 1951 | // element instead of just element 0. |
| 1952 | // TODO: Unlike general demanded elements transforms, this should be safe |
| 1953 | // for any (div/rem/shift) opcode too. |
| 1954 | if (DemandedElts == 1 && !X->hasOneUse() && !Y->hasOneUse() && |
| 1955 | BO->hasOneUse() ) { |
| 1956 | |
| 1957 | auto findShufBO = [&](bool MatchShufAsOp0) -> User * { |
| 1958 | // Try to use shuffle-of-operand in place of an operand: |
| 1959 | // bo X, Y --> bo (shuf X), Y |
| 1960 | // bo X, Y --> bo X, (shuf Y) |
| 1961 | |
| 1962 | Value *OtherOp = MatchShufAsOp0 ? Y : X; |
| 1963 | if (!OtherOp->hasUseList()) |
| 1964 | return nullptr; |
| 1965 | |
| 1966 | BinaryOperator::BinaryOps Opcode = BO->getOpcode(); |
| 1967 | Value *ShufOp = MatchShufAsOp0 ? X : Y; |
| 1968 | |
| 1969 | for (User *U : OtherOp->users()) { |
| 1970 | ArrayRef<int> Mask; |
| 1971 | auto Shuf = m_Shuffle(v1: m_Specific(V: ShufOp), v2: m_Value(), mask: m_Mask(Mask)); |
| 1972 | if (BO->isCommutative() |
| 1973 | ? match(V: U, P: m_c_BinOp(Opcode, L: Shuf, R: m_Specific(V: OtherOp))) |
| 1974 | : MatchShufAsOp0 |
| 1975 | ? match(V: U, P: m_BinOp(Opcode, L: Shuf, R: m_Specific(V: OtherOp))) |
| 1976 | : match(V: U, P: m_BinOp(Opcode, L: m_Specific(V: OtherOp), R: Shuf))) |
| 1977 | if (match(Mask, P: m_ZeroMask()) && Mask[0] != PoisonMaskElem) |
| 1978 | if (DT.dominates(Def: U, User: I)) |
| 1979 | return U; |
| 1980 | } |
| 1981 | return nullptr; |
| 1982 | }; |
| 1983 | |
| 1984 | if (User *ShufBO = findShufBO(/* MatchShufAsOp0 */ true)) |
| 1985 | return ShufBO; |
| 1986 | if (User *ShufBO = findShufBO(/* MatchShufAsOp0 */ false)) |
| 1987 | return ShufBO; |
| 1988 | } |
| 1989 | |
| 1990 | simplifyAndSetOp(I, 0, DemandedElts, PoisonElts); |
| 1991 | simplifyAndSetOp(I, 1, DemandedElts, PoisonElts2); |
| 1992 | |
| 1993 | // Output elements are undefined if both are undefined. Consider things |
| 1994 | // like undef & 0. The result is known zero, not undef. |
| 1995 | PoisonElts &= PoisonElts2; |
| 1996 | } |
| 1997 | |
| 1998 | // If we've proven all of the lanes poison, return a poison value. |
| 1999 | // TODO: Intersect w/demanded lanes |
| 2000 | if (PoisonElts.isAllOnes()) |
| 2001 | return PoisonValue::get(T: I->getType()); |
| 2002 | |
| 2003 | return MadeChange ? I : nullptr; |
| 2004 | } |
| 2005 | |
| 2006 | /// For floating-point classes that resolve to a single bit pattern, return that |
| 2007 | /// value. |
| 2008 | static Constant *getFPClassConstant(Type *Ty, FPClassTest Mask, |
| 2009 | bool IsCanonicalizing = false) { |
| 2010 | if (Mask == fcNone) |
| 2011 | return PoisonValue::get(T: Ty); |
| 2012 | |
| 2013 | if (Mask == fcPosZero) |
| 2014 | return Constant::getNullValue(Ty); |
| 2015 | |
| 2016 | // TODO: Support aggregate types that are allowed by FPMathOperator. |
| 2017 | if (Ty->isAggregateType()) |
| 2018 | return nullptr; |
| 2019 | |
| 2020 | // Turn any possible snans into quiet if we can. |
| 2021 | if (Mask == fcNan && IsCanonicalizing) |
| 2022 | return ConstantFP::getQNaN(Ty); |
| 2023 | |
| 2024 | switch (Mask) { |
| 2025 | case fcNegZero: |
| 2026 | return ConstantFP::getZero(Ty, Negative: true); |
| 2027 | case fcPosInf: |
| 2028 | return ConstantFP::getInfinity(Ty); |
| 2029 | case fcNegInf: |
| 2030 | return ConstantFP::getInfinity(Ty, Negative: true); |
| 2031 | case fcQNan: |
| 2032 | // Payload bits cannot be dropped for pure signbit operations. |
| 2033 | return IsCanonicalizing ? ConstantFP::getQNaN(Ty) : nullptr; |
| 2034 | default: |
| 2035 | return nullptr; |
| 2036 | } |
| 2037 | } |
| 2038 | |
| 2039 | /// Perform multiple-use aware simplfications for fabs(\p Src). Returns a |
| 2040 | /// replacement value if it's simplified, otherwise nullptr. Updates \p Known |
| 2041 | /// with the known fpclass if not simplified. |
| 2042 | static Value *simplifyDemandedFPClassFabs(KnownFPClass &Known, Value *Src, |
| 2043 | FPClassTest DemandedMask, |
| 2044 | KnownFPClass KnownSrc, bool NSZ) { |
| 2045 | if ((DemandedMask & fcNan) == fcNone) |
| 2046 | KnownSrc.knownNot(RuleOut: fcNan); |
| 2047 | if ((DemandedMask & fcInf) == fcNone) |
| 2048 | KnownSrc.knownNot(RuleOut: fcInf); |
| 2049 | |
| 2050 | if (KnownSrc.SignBit == false || |
| 2051 | ((DemandedMask & fcNan) == fcNone && KnownSrc.isKnownNever(Mask: fcNegative))) |
| 2052 | return Src; |
| 2053 | |
| 2054 | // If the only sign bit difference is due to -0, ignore it with nsz |
| 2055 | if (NSZ && |
| 2056 | KnownSrc.isKnownNever(Mask: KnownFPClass::OrderedLessThanZeroMask | fcNan)) |
| 2057 | return Src; |
| 2058 | |
| 2059 | Known = KnownFPClass::fabs(Src: KnownSrc); |
| 2060 | Known.knownNot(RuleOut: ~DemandedMask); |
| 2061 | return nullptr; |
| 2062 | } |
| 2063 | |
| 2064 | /// Try to set an inferred no-nans or no-infs in \p FMF. \p ValidResults is a |
| 2065 | /// mask of known valid results for the operator (already computed from the |
| 2066 | /// result, and the known operand inputs in \p Known) |
| 2067 | static FastMathFlags inferFastMathValueFlags(FastMathFlags FMF, |
| 2068 | FPClassTest ValidResults, |
| 2069 | ArrayRef<KnownFPClass> Known) { |
| 2070 | if (!FMF.noNaNs() && (ValidResults & fcNan) == fcNone) { |
| 2071 | if (all_of(Range&: Known, P: [](const KnownFPClass KnownSrc) { |
| 2072 | return KnownSrc.isKnownNeverNaN(); |
| 2073 | })) |
| 2074 | FMF.setNoNaNs(); |
| 2075 | } |
| 2076 | |
| 2077 | if (!FMF.noInfs() && (ValidResults & fcInf) == fcNone) { |
| 2078 | if (all_of(Range&: Known, P: [](const KnownFPClass KnownSrc) { |
| 2079 | return KnownSrc.isKnownNeverInfinity(); |
| 2080 | })) |
| 2081 | FMF.setNoInfs(); |
| 2082 | } |
| 2083 | |
| 2084 | return FMF; |
| 2085 | } |
| 2086 | |
| 2087 | static FPClassTest adjustDemandedMaskFromFlags(FPClassTest DemandedMask, |
| 2088 | FastMathFlags FMF) { |
| 2089 | if (FMF.noNaNs()) |
| 2090 | DemandedMask &= ~fcNan; |
| 2091 | |
| 2092 | if (FMF.noInfs()) |
| 2093 | DemandedMask &= ~fcInf; |
| 2094 | return DemandedMask; |
| 2095 | } |
| 2096 | |
| 2097 | /// Apply epilog fixups to a floating-point intrinsic. See if the result can |
| 2098 | /// fold to a constant, or apply fast math flags. |
| 2099 | static Value *simplifyDemandedFPClassResult(Instruction *FPOp, |
| 2100 | FastMathFlags FMF, |
| 2101 | FPClassTest DemandedMask, |
| 2102 | KnownFPClass &Known, |
| 2103 | ArrayRef<KnownFPClass> KnownSrcs) { |
| 2104 | FPClassTest ValidResults = DemandedMask & Known.KnownFPClasses; |
| 2105 | Constant *SingleVal = getFPClassConstant(Ty: FPOp->getType(), Mask: ValidResults, |
| 2106 | /*IsCanonicalizing=*/true); |
| 2107 | if (SingleVal) |
| 2108 | return SingleVal; |
| 2109 | |
| 2110 | FastMathFlags InferredFMF = |
| 2111 | inferFastMathValueFlags(FMF, ValidResults, Known: KnownSrcs); |
| 2112 | if (InferredFMF != FMF) { |
| 2113 | FPOp->dropUBImplyingAttrsAndMetadata(); |
| 2114 | FPOp->setFastMathFlags(InferredFMF); |
| 2115 | return FPOp; |
| 2116 | } |
| 2117 | |
| 2118 | return nullptr; |
| 2119 | } |
| 2120 | |
| 2121 | /// Perform multiple-use aware simplfications for fneg(fabs(\p Src)). Returns a |
| 2122 | /// replacement value if it's simplified, otherwise nullptr. Updates \p Known |
| 2123 | /// with the known fpclass if not simplified. |
| 2124 | static Value *simplifyDemandedFPClassFnegFabs(KnownFPClass &Known, Value *Src, |
| 2125 | FPClassTest DemandedMask, |
| 2126 | KnownFPClass KnownSrc, bool NSZ) { |
| 2127 | if ((DemandedMask & fcNan) == fcNone) |
| 2128 | KnownSrc.knownNot(RuleOut: fcNan); |
| 2129 | if ((DemandedMask & fcInf) == fcNone) |
| 2130 | KnownSrc.knownNot(RuleOut: fcInf); |
| 2131 | |
| 2132 | // If the source value is known negative, we can directly fold to it. |
| 2133 | if (KnownSrc.SignBit == true) |
| 2134 | return Src; |
| 2135 | |
| 2136 | // If the only sign bit difference is for 0, ignore it with nsz. |
| 2137 | if (NSZ && |
| 2138 | KnownSrc.isKnownNever(Mask: KnownFPClass::OrderedGreaterThanZeroMask | fcNan)) |
| 2139 | return Src; |
| 2140 | |
| 2141 | Known = KnownFPClass::fneg(Src: KnownFPClass::fabs(Src: KnownSrc)); |
| 2142 | Known.knownNot(RuleOut: ~DemandedMask); |
| 2143 | return nullptr; |
| 2144 | } |
| 2145 | |
| 2146 | static Value *simplifyDemandedFPClassCopysignMag(Value *MagSrc, |
| 2147 | FPClassTest DemandedMask, |
| 2148 | KnownFPClass KnownSrc, |
| 2149 | bool NSZ) { |
| 2150 | if (NSZ) { |
| 2151 | constexpr FPClassTest NegOrZero = fcNegative | fcPosZero; |
| 2152 | constexpr FPClassTest PosOrZero = fcPositive | fcNegZero; |
| 2153 | |
| 2154 | if ((DemandedMask & ~NegOrZero) == fcNone && |
| 2155 | KnownSrc.isKnownAlways(Mask: NegOrZero)) |
| 2156 | return MagSrc; |
| 2157 | |
| 2158 | if ((DemandedMask & ~PosOrZero) == fcNone && |
| 2159 | KnownSrc.isKnownAlways(Mask: PosOrZero)) |
| 2160 | return MagSrc; |
| 2161 | } else { |
| 2162 | if ((DemandedMask & ~fcNegative) == fcNone && KnownSrc.SignBit == true) |
| 2163 | return MagSrc; |
| 2164 | |
| 2165 | if ((DemandedMask & ~fcPositive) == fcNone && KnownSrc.SignBit == false) |
| 2166 | return MagSrc; |
| 2167 | } |
| 2168 | |
| 2169 | return nullptr; |
| 2170 | } |
| 2171 | |
| 2172 | static Value * |
| 2173 | simplifyDemandedFPClassMinMax(KnownFPClass &Known, Intrinsic::ID IID, |
| 2174 | const CallInst *CI, FPClassTest DemandedMask, |
| 2175 | KnownFPClass KnownLHS, KnownFPClass KnownRHS, |
| 2176 | const Function &F, bool NSZ) { |
| 2177 | bool OrderedZeroSign = !NSZ; |
| 2178 | |
| 2179 | KnownFPClass::MinMaxKind OpKind; |
| 2180 | switch (IID) { |
| 2181 | case Intrinsic::maximum: { |
| 2182 | OpKind = KnownFPClass::MinMaxKind::maximum; |
| 2183 | |
| 2184 | // If one operand is known greater than the other, it must be that |
| 2185 | // operand unless the other is a nan. |
| 2186 | if (cannotOrderStrictlyLess(LHS: KnownLHS.KnownFPClasses, |
| 2187 | RHS: KnownRHS.KnownFPClasses, OrderedZeroSign) && |
| 2188 | KnownRHS.isKnownNever(Mask: fcNan)) |
| 2189 | return CI->getArgOperand(i: 0); |
| 2190 | |
| 2191 | if (cannotOrderStrictlyGreater(LHS: KnownLHS.KnownFPClasses, |
| 2192 | RHS: KnownRHS.KnownFPClasses, OrderedZeroSign) && |
| 2193 | KnownLHS.isKnownNever(Mask: fcNan)) |
| 2194 | return CI->getArgOperand(i: 1); |
| 2195 | |
| 2196 | break; |
| 2197 | } |
| 2198 | case Intrinsic::minimum: { |
| 2199 | OpKind = KnownFPClass::MinMaxKind::minimum; |
| 2200 | |
| 2201 | // If one operand is known less than the other, it must be that operand |
| 2202 | // unless the other is a nan. |
| 2203 | if (cannotOrderStrictlyGreater(LHS: KnownLHS.KnownFPClasses, |
| 2204 | RHS: KnownRHS.KnownFPClasses, OrderedZeroSign) && |
| 2205 | KnownRHS.isKnownNever(Mask: fcNan)) |
| 2206 | return CI->getArgOperand(i: 0); |
| 2207 | |
| 2208 | if (cannotOrderStrictlyLess(LHS: KnownLHS.KnownFPClasses, |
| 2209 | RHS: KnownRHS.KnownFPClasses, OrderedZeroSign) && |
| 2210 | KnownLHS.isKnownNever(Mask: fcNan)) |
| 2211 | return CI->getArgOperand(i: 1); |
| 2212 | |
| 2213 | break; |
| 2214 | } |
| 2215 | case Intrinsic::maxnum: |
| 2216 | case Intrinsic::maximumnum: { |
| 2217 | OpKind = IID == Intrinsic::maxnum ? KnownFPClass::MinMaxKind::maxnum |
| 2218 | : KnownFPClass::MinMaxKind::maximumnum; |
| 2219 | |
| 2220 | if (cannotOrderStrictlyLess(LHS: KnownLHS.KnownFPClasses, |
| 2221 | RHS: KnownRHS.KnownFPClasses, OrderedZeroSign) && |
| 2222 | KnownLHS.isKnownNever(Mask: fcNan)) |
| 2223 | return CI->getArgOperand(i: 0); |
| 2224 | |
| 2225 | if (cannotOrderStrictlyGreater(LHS: KnownLHS.KnownFPClasses, |
| 2226 | RHS: KnownRHS.KnownFPClasses, OrderedZeroSign) && |
| 2227 | KnownRHS.isKnownNever(Mask: fcNan)) |
| 2228 | return CI->getArgOperand(i: 1); |
| 2229 | |
| 2230 | break; |
| 2231 | } |
| 2232 | case Intrinsic::minnum: |
| 2233 | case Intrinsic::minimumnum: { |
| 2234 | OpKind = IID == Intrinsic::minnum ? KnownFPClass::MinMaxKind::minnum |
| 2235 | : KnownFPClass::MinMaxKind::minimumnum; |
| 2236 | |
| 2237 | if (cannotOrderStrictlyGreater(LHS: KnownLHS.KnownFPClasses, |
| 2238 | RHS: KnownRHS.KnownFPClasses, OrderedZeroSign) && |
| 2239 | KnownLHS.isKnownNever(Mask: fcNan)) |
| 2240 | return CI->getArgOperand(i: 0); |
| 2241 | |
| 2242 | if (cannotOrderStrictlyLess(LHS: KnownLHS.KnownFPClasses, |
| 2243 | RHS: KnownRHS.KnownFPClasses, OrderedZeroSign) && |
| 2244 | KnownRHS.isKnownNever(Mask: fcNan)) |
| 2245 | return CI->getArgOperand(i: 1); |
| 2246 | |
| 2247 | break; |
| 2248 | } |
| 2249 | default: |
| 2250 | llvm_unreachable("not a min/max intrinsic" ); |
| 2251 | } |
| 2252 | |
| 2253 | Type *EltTy = CI->getType()->getScalarType(); |
| 2254 | DenormalMode Mode = F.getDenormalMode(FPType: EltTy->getFltSemantics()); |
| 2255 | Known = KnownFPClass::minMaxLike(LHS: KnownLHS, RHS: KnownRHS, Kind: OpKind, DenormMode: Mode); |
| 2256 | Known.knownNot(RuleOut: ~DemandedMask); |
| 2257 | |
| 2258 | return getFPClassConstant(Ty: CI->getType(), Mask: Known.KnownFPClasses, |
| 2259 | /*IsCanonicalizing=*/true); |
| 2260 | } |
| 2261 | |
| 2262 | static Value * |
| 2263 | simplifyDemandedUseFPClassFPTrunc(InstCombinerImpl &IC, Instruction &I, |
| 2264 | FastMathFlags FMF, FPClassTest DemandedMask, |
| 2265 | KnownFPClass &Known, unsigned Depth) { |
| 2266 | |
| 2267 | FPClassTest SrcDemandedMask = DemandedMask; |
| 2268 | if (DemandedMask & fcNan) |
| 2269 | SrcDemandedMask |= fcNan; |
| 2270 | |
| 2271 | // Zero results may have been rounded from subnormal or normal sources. |
| 2272 | if (DemandedMask & fcNegZero) |
| 2273 | SrcDemandedMask |= fcNegSubnormal | fcNegNormal; |
| 2274 | if (DemandedMask & fcPosZero) |
| 2275 | SrcDemandedMask |= fcPosSubnormal | fcPosNormal; |
| 2276 | |
| 2277 | // Subnormal results may have been normal in the source type |
| 2278 | if (DemandedMask & fcNegSubnormal) |
| 2279 | SrcDemandedMask |= fcNegNormal; |
| 2280 | if (DemandedMask & fcPosSubnormal) |
| 2281 | SrcDemandedMask |= fcPosNormal; |
| 2282 | |
| 2283 | if (DemandedMask & fcPosInf) |
| 2284 | SrcDemandedMask |= fcPosNormal; |
| 2285 | if (DemandedMask & fcNegInf) |
| 2286 | SrcDemandedMask |= fcNegNormal; |
| 2287 | |
| 2288 | KnownFPClass KnownSrc; |
| 2289 | if (IC.SimplifyDemandedFPClass(I: &I, Op: 0, DemandedMask: SrcDemandedMask, Known&: KnownSrc, Depth: Depth + 1)) |
| 2290 | return &I; |
| 2291 | |
| 2292 | Known = KnownFPClass::fptrunc(KnownSrc); |
| 2293 | Known.knownNot(RuleOut: ~DemandedMask); |
| 2294 | |
| 2295 | return simplifyDemandedFPClassResult(FPOp: &I, FMF, DemandedMask, Known, |
| 2296 | KnownSrcs: {KnownSrc}); |
| 2297 | } |
| 2298 | |
| 2299 | Value *InstCombinerImpl::SimplifyDemandedUseFPClass(Instruction *I, |
| 2300 | FPClassTest DemandedMask, |
| 2301 | KnownFPClass &Known, |
| 2302 | Instruction *CxtI, |
| 2303 | unsigned Depth) { |
| 2304 | assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth" ); |
| 2305 | assert(Known == KnownFPClass() && "expected uninitialized state" ); |
| 2306 | assert(I->hasOneUse() && "wrong version called" ); |
| 2307 | |
| 2308 | Type *VTy = I->getType(); |
| 2309 | |
| 2310 | FastMathFlags FMF; |
| 2311 | if (auto *FPOp = dyn_cast<FPMathOperator>(Val: I)) { |
| 2312 | FMF = FPOp->getFastMathFlags(); |
| 2313 | DemandedMask = adjustDemandedMaskFromFlags(DemandedMask, FMF); |
| 2314 | } |
| 2315 | |
| 2316 | switch (I->getOpcode()) { |
| 2317 | case Instruction::FNeg: { |
| 2318 | // Special case fneg(fabs(x)) |
| 2319 | |
| 2320 | Value *FNegSrc = I->getOperand(i: 0); |
| 2321 | Value *FNegFAbsSrc; |
| 2322 | if (match(V: FNegSrc, P: m_OneUse(SubPattern: m_FAbs(Op0: m_Value(V&: FNegFAbsSrc))))) { |
| 2323 | KnownFPClass KnownSrc; |
| 2324 | if (SimplifyDemandedFPClass(I: cast<Instruction>(Val: FNegSrc), Op: 0, |
| 2325 | DemandedMask: llvm::unknown_sign(Mask: DemandedMask), Known&: KnownSrc, |
| 2326 | Depth: Depth + 1)) |
| 2327 | return I; |
| 2328 | |
| 2329 | FastMathFlags FabsFMF = cast<FPMathOperator>(Val: FNegSrc)->getFastMathFlags(); |
| 2330 | FPClassTest ThisDemandedMask = |
| 2331 | adjustDemandedMaskFromFlags(DemandedMask, FMF: FabsFMF); |
| 2332 | |
| 2333 | bool IsNSZ = FMF.noSignedZeros() || FabsFMF.noSignedZeros(); |
| 2334 | if (Value *Simplified = simplifyDemandedFPClassFnegFabs( |
| 2335 | Known, Src: FNegFAbsSrc, DemandedMask: ThisDemandedMask, KnownSrc, NSZ: IsNSZ)) |
| 2336 | return Simplified; |
| 2337 | |
| 2338 | if ((ThisDemandedMask & fcNan) == fcNone) |
| 2339 | KnownSrc.knownNot(RuleOut: fcNan); |
| 2340 | if ((ThisDemandedMask & fcInf) == fcNone) |
| 2341 | KnownSrc.knownNot(RuleOut: fcInf); |
| 2342 | |
| 2343 | // fneg(fabs(x)) => fneg(x) |
| 2344 | if (KnownSrc.SignBit == false) |
| 2345 | return replaceOperand(I&: *I, OpNum: 0, V: FNegFAbsSrc); |
| 2346 | |
| 2347 | // fneg(fabs(x)) => fneg(x), ignoring -0 if nsz. |
| 2348 | if (IsNSZ && |
| 2349 | KnownSrc.isKnownNever(Mask: KnownFPClass::OrderedLessThanZeroMask | fcNan)) |
| 2350 | return replaceOperand(I&: *I, OpNum: 0, V: FNegFAbsSrc); |
| 2351 | |
| 2352 | break; |
| 2353 | } |
| 2354 | |
| 2355 | if (SimplifyDemandedFPClass(I, Op: 0, DemandedMask: llvm::fneg(Mask: DemandedMask), Known, |
| 2356 | Depth: Depth + 1)) |
| 2357 | return I; |
| 2358 | Known.fneg(); |
| 2359 | Known.knownNot(RuleOut: ~DemandedMask); |
| 2360 | break; |
| 2361 | } |
| 2362 | case Instruction::FAdd: |
| 2363 | case Instruction::FSub: { |
| 2364 | KnownFPClass KnownLHS, KnownRHS; |
| 2365 | |
| 2366 | // fadd x, x can be handled more aggressively. |
| 2367 | if (I->getOperand(i: 0) == I->getOperand(i: 1) && |
| 2368 | I->getOpcode() == Instruction::FAdd && |
| 2369 | isGuaranteedNotToBeUndef(V: I->getOperand(i: 0), AC: SQ.AC, CtxI: CxtI, DT: SQ.DT, |
| 2370 | Depth: Depth + 1)) { |
| 2371 | Type *EltTy = VTy->getScalarType(); |
| 2372 | DenormalMode Mode = F.getDenormalMode(FPType: EltTy->getFltSemantics()); |
| 2373 | |
| 2374 | FPClassTest SrcDemandedMask = DemandedMask; |
| 2375 | if (DemandedMask & fcNan) |
| 2376 | SrcDemandedMask |= fcNan; |
| 2377 | |
| 2378 | // Doubling a subnormal could have resulted in a normal value. |
| 2379 | if (DemandedMask & fcPosNormal) |
| 2380 | SrcDemandedMask |= fcPosSubnormal; |
| 2381 | if (DemandedMask & fcNegNormal) |
| 2382 | SrcDemandedMask |= fcNegSubnormal; |
| 2383 | |
| 2384 | // Doubling a subnormal may produce 0 if FTZ/DAZ. |
| 2385 | if (Mode != DenormalMode::getIEEE()) { |
| 2386 | if (DemandedMask & fcPosZero) { |
| 2387 | SrcDemandedMask |= fcPosSubnormal; |
| 2388 | |
| 2389 | if (Mode.inputsMayBePositiveZero() || Mode.outputsMayBePositiveZero()) |
| 2390 | SrcDemandedMask |= fcNegSubnormal; |
| 2391 | } |
| 2392 | |
| 2393 | if (DemandedMask & fcNegZero) |
| 2394 | SrcDemandedMask |= fcNegSubnormal; |
| 2395 | } |
| 2396 | |
| 2397 | // Doubling a normal could have resulted in an infinity. |
| 2398 | if (DemandedMask & fcPosInf) |
| 2399 | SrcDemandedMask |= fcPosNormal; |
| 2400 | if (DemandedMask & fcNegInf) |
| 2401 | SrcDemandedMask |= fcNegNormal; |
| 2402 | |
| 2403 | if (SimplifyDemandedFPClass(I, Op: 0, DemandedMask: SrcDemandedMask, Known&: KnownLHS, Depth: Depth + 1)) |
| 2404 | return I; |
| 2405 | |
| 2406 | Known = KnownFPClass::fadd_self(Src: KnownLHS, Mode); |
| 2407 | KnownRHS = KnownLHS; |
| 2408 | } else { |
| 2409 | FPClassTest SrcDemandedMask = fcFinite; |
| 2410 | |
| 2411 | // inf + (-inf) = nan |
| 2412 | if (DemandedMask & fcNan) |
| 2413 | SrcDemandedMask |= fcNan | fcInf; |
| 2414 | |
| 2415 | if (DemandedMask & fcInf) |
| 2416 | SrcDemandedMask |= fcInf; |
| 2417 | |
| 2418 | if (SimplifyDemandedFPClass(I, Op: 1, DemandedMask: SrcDemandedMask, Known&: KnownRHS, Depth: Depth + 1) || |
| 2419 | SimplifyDemandedFPClass(I, Op: 0, DemandedMask: SrcDemandedMask, Known&: KnownLHS, Depth: Depth + 1)) |
| 2420 | return I; |
| 2421 | |
| 2422 | Type *EltTy = VTy->getScalarType(); |
| 2423 | DenormalMode Mode = F.getDenormalMode(FPType: EltTy->getFltSemantics()); |
| 2424 | |
| 2425 | Known = I->getOpcode() == Instruction::FAdd |
| 2426 | ? KnownFPClass::fadd(LHS: KnownLHS, RHS: KnownRHS, Mode) |
| 2427 | : KnownFPClass::fsub(LHS: KnownLHS, RHS: KnownRHS, Mode); |
| 2428 | } |
| 2429 | |
| 2430 | Known.knownNot(RuleOut: ~DemandedMask); |
| 2431 | |
| 2432 | if (Constant *SingleVal = getFPClassConstant(Ty: VTy, Mask: Known.KnownFPClasses, |
| 2433 | /*IsCanonicalizing=*/true)) |
| 2434 | return SingleVal; |
| 2435 | |
| 2436 | // Propagate known result to simplify edge case checks. |
| 2437 | bool ResultNotNan = (DemandedMask & fcNan) == fcNone; |
| 2438 | |
| 2439 | // With nnan: X + {+/-}Inf --> {+/-}Inf |
| 2440 | if (ResultNotNan && I->getOpcode() == Instruction::FAdd && |
| 2441 | KnownRHS.isKnownAlways(Mask: fcInf | fcNan) && KnownLHS.isKnownNever(Mask: fcNan)) |
| 2442 | return I->getOperand(i: 1); |
| 2443 | |
| 2444 | // With nnan: {+/-}Inf + X --> {+/-}Inf |
| 2445 | // With nnan: {+/-}Inf - X --> {+/-}Inf |
| 2446 | if (ResultNotNan && KnownLHS.isKnownAlways(Mask: fcInf | fcNan) && |
| 2447 | KnownRHS.isKnownNever(Mask: fcNan)) |
| 2448 | return I->getOperand(i: 0); |
| 2449 | |
| 2450 | FastMathFlags InferredFMF = inferFastMathValueFlags( |
| 2451 | FMF, ValidResults: Known.KnownFPClasses, Known: {KnownLHS, KnownRHS}); |
| 2452 | if (InferredFMF != FMF) { |
| 2453 | I->setFastMathFlags(InferredFMF); |
| 2454 | return I; |
| 2455 | } |
| 2456 | |
| 2457 | return nullptr; |
| 2458 | } |
| 2459 | case Instruction::FMul: { |
| 2460 | KnownFPClass KnownLHS, KnownRHS; |
| 2461 | |
| 2462 | Value *X = I->getOperand(i: 0); |
| 2463 | Value *Y = I->getOperand(i: 1); |
| 2464 | |
| 2465 | FPClassTest SrcDemandedMask = |
| 2466 | DemandedMask & (fcNan | fcZero | fcSubnormal | fcNormal); |
| 2467 | |
| 2468 | if (DemandedMask & fcInf) { |
| 2469 | // mul x, inf = inf |
| 2470 | // mul large_x, large_y = inf |
| 2471 | SrcDemandedMask |= fcSubnormal | fcNormal | fcInf; |
| 2472 | } |
| 2473 | |
| 2474 | if (DemandedMask & fcNan) { |
| 2475 | // mul +/-inf, 0 => nan |
| 2476 | SrcDemandedMask |= fcZero | fcInf | fcNan; |
| 2477 | |
| 2478 | // TODO: Mode check |
| 2479 | // mul +/-inf, sub => nan if daz |
| 2480 | SrcDemandedMask |= fcSubnormal; |
| 2481 | } |
| 2482 | |
| 2483 | // mul normal, subnormal = normal |
| 2484 | // Normal inputs may result in underflow. |
| 2485 | if (DemandedMask & (fcNormal | fcSubnormal)) |
| 2486 | SrcDemandedMask |= fcNormal | fcSubnormal; |
| 2487 | |
| 2488 | if (DemandedMask & fcZero) |
| 2489 | SrcDemandedMask |= fcNormal | fcSubnormal; |
| 2490 | |
| 2491 | if (X == Y && isGuaranteedNotToBeUndef(V: X, AC: SQ.AC, CtxI: CxtI, DT: SQ.DT, Depth: Depth + 1)) { |
| 2492 | if (SimplifyDemandedFPClass(I, Op: 0, DemandedMask: SrcDemandedMask, Known&: KnownLHS, Depth: Depth + 1)) |
| 2493 | return I; |
| 2494 | Type *EltTy = VTy->getScalarType(); |
| 2495 | |
| 2496 | DenormalMode Mode = F.getDenormalMode(FPType: EltTy->getFltSemantics()); |
| 2497 | Known = KnownFPClass::square(Src: KnownLHS, Mode); |
| 2498 | Known.knownNot(RuleOut: ~DemandedMask); |
| 2499 | |
| 2500 | if (Constant *Folded = getFPClassConstant(Ty: VTy, Mask: Known.KnownFPClasses, |
| 2501 | /*IsCanonicalizing=*/true)) |
| 2502 | return Folded; |
| 2503 | |
| 2504 | if (Known.isKnownAlways(Mask: fcPosZero | fcPosInf | fcNan) && |
| 2505 | KnownLHS.isKnownNever(Mask: fcSubnormal | fcNormal)) { |
| 2506 | // We can skip the fabs if the source was already known positive. |
| 2507 | if (KnownLHS.isKnownAlways(Mask: fcPositive)) |
| 2508 | return X; |
| 2509 | |
| 2510 | // => fabs(x), in case this was a -inf or -0. |
| 2511 | // Note: Dropping canonicalize. |
| 2512 | IRBuilderBase::InsertPointGuard Guard(Builder); |
| 2513 | Builder.SetInsertPoint(I); |
| 2514 | Value *Fabs = Builder.CreateUnaryIntrinsic(ID: Intrinsic::fabs, V: X, FMFSource: FMF); |
| 2515 | Fabs->takeName(V: I); |
| 2516 | return Fabs; |
| 2517 | } |
| 2518 | |
| 2519 | return nullptr; |
| 2520 | } |
| 2521 | |
| 2522 | if (SimplifyDemandedFPClass(I, Op: 1, DemandedMask: SrcDemandedMask, Known&: KnownRHS, Depth: Depth + 1) || |
| 2523 | SimplifyDemandedFPClass(I, Op: 0, DemandedMask: SrcDemandedMask, Known&: KnownLHS, Depth: Depth + 1)) |
| 2524 | return I; |
| 2525 | |
| 2526 | if (FMF.noInfs()) { |
| 2527 | // Flag implies inputs cannot be infinity. |
| 2528 | KnownLHS.knownNot(RuleOut: fcInf); |
| 2529 | KnownRHS.knownNot(RuleOut: fcInf); |
| 2530 | } |
| 2531 | |
| 2532 | bool NonNanResult = (DemandedMask & fcNan) == fcNone; |
| 2533 | |
| 2534 | // With no-nans/no-infs: |
| 2535 | // X * 0.0 --> copysign(0.0, X) |
| 2536 | // X * -0.0 --> copysign(0.0, -X) |
| 2537 | if ((NonNanResult || KnownLHS.isKnownNeverInfOrNaN()) && |
| 2538 | KnownRHS.isKnownAlways(Mask: fcPosZero | fcNan)) { |
| 2539 | // => copysign(+0, lhs) |
| 2540 | // Note: Dropping canonicalize |
| 2541 | Value *Copysign = Builder.CreateCopySign(LHS: Y, RHS: X, FMFSource: FMF); |
| 2542 | Copysign->takeName(V: I); |
| 2543 | return Copysign; |
| 2544 | } |
| 2545 | |
| 2546 | if (KnownLHS.isKnownAlways(Mask: fcPosZero | fcNan) && |
| 2547 | (NonNanResult || KnownRHS.isKnownNeverInfOrNaN())) { |
| 2548 | // => copysign(+0, rhs) |
| 2549 | // Note: Dropping canonicalize |
| 2550 | Value *Copysign = Builder.CreateCopySign(LHS: X, RHS: Y, FMFSource: FMF); |
| 2551 | Copysign->takeName(V: I); |
| 2552 | return Copysign; |
| 2553 | } |
| 2554 | |
| 2555 | if ((NonNanResult || KnownLHS.isKnownNeverInfOrNaN()) && |
| 2556 | KnownRHS.isKnownAlways(Mask: fcNegZero | fcNan)) { |
| 2557 | // => copysign(0, fneg(lhs)) |
| 2558 | // Note: Dropping canonicalize |
| 2559 | Value *Copysign = |
| 2560 | Builder.CreateCopySign(LHS: Y, RHS: Builder.CreateFNegFMF(V: X, FMFSource: FMF), FMFSource: FMF); |
| 2561 | Copysign->takeName(V: I); |
| 2562 | return Copysign; |
| 2563 | } |
| 2564 | |
| 2565 | if (KnownLHS.isKnownAlways(Mask: fcNegZero | fcNan) && |
| 2566 | (NonNanResult || KnownRHS.isKnownNeverInfOrNaN())) { |
| 2567 | // => copysign(+0, fneg(rhs)) |
| 2568 | // Note: Dropping canonicalize |
| 2569 | Value *Copysign = |
| 2570 | Builder.CreateCopySign(LHS: X, RHS: Builder.CreateFNegFMF(V: Y, FMFSource: FMF), FMFSource: FMF); |
| 2571 | Copysign->takeName(V: I); |
| 2572 | return Copysign; |
| 2573 | } |
| 2574 | |
| 2575 | Type *EltTy = VTy->getScalarType(); |
| 2576 | DenormalMode Mode = F.getDenormalMode(FPType: EltTy->getFltSemantics()); |
| 2577 | |
| 2578 | if (KnownLHS.isKnownAlways(Mask: fcInf | fcNan) && |
| 2579 | (KnownRHS.isKnownNeverNaN() && |
| 2580 | KnownRHS.cannotBeOrderedGreaterEqZero(Mode))) { |
| 2581 | // Note: Dropping canonicalize |
| 2582 | Value *Neg = Builder.CreateFNegFMF(V: X, FMFSource: FMF); |
| 2583 | Neg->takeName(V: I); |
| 2584 | return Neg; |
| 2585 | } |
| 2586 | |
| 2587 | if (KnownRHS.isKnownAlways(Mask: fcInf | fcNan) && |
| 2588 | (KnownLHS.isKnownNeverNaN() && |
| 2589 | KnownLHS.cannotBeOrderedGreaterEqZero(Mode))) { |
| 2590 | // Note: Dropping canonicalize |
| 2591 | Value *Neg = Builder.CreateFNegFMF(V: Y, FMFSource: FMF); |
| 2592 | Neg->takeName(V: I); |
| 2593 | return Neg; |
| 2594 | } |
| 2595 | |
| 2596 | Known = KnownFPClass::fmul(LHS: KnownLHS, RHS: KnownRHS, Mode); |
| 2597 | Known.knownNot(RuleOut: ~DemandedMask); |
| 2598 | |
| 2599 | if (Constant *SingleVal = getFPClassConstant(Ty: VTy, Mask: Known.KnownFPClasses, |
| 2600 | /*IsCanonicalizing=*/true)) |
| 2601 | return SingleVal; |
| 2602 | |
| 2603 | FastMathFlags InferredFMF = inferFastMathValueFlags( |
| 2604 | FMF, ValidResults: Known.KnownFPClasses, Known: {KnownLHS, KnownRHS}); |
| 2605 | if (InferredFMF != FMF) { |
| 2606 | I->setFastMathFlags(InferredFMF); |
| 2607 | return I; |
| 2608 | } |
| 2609 | |
| 2610 | return nullptr; |
| 2611 | } |
| 2612 | case Instruction::FDiv: { |
| 2613 | Value *X = I->getOperand(i: 0); |
| 2614 | Value *Y = I->getOperand(i: 1); |
| 2615 | if (X == Y && isGuaranteedNotToBeUndef(V: X, AC: SQ.AC, CtxI: CxtI, DT: SQ.DT, Depth: Depth + 1)) { |
| 2616 | // If the source is 0, inf or nan, the result is a nan |
| 2617 | |
| 2618 | Value *IsZeroOrNan = Builder.CreateFCmpFMF( |
| 2619 | P: FCmpInst::FCMP_UEQ, LHS: I->getOperand(i: 0), RHS: ConstantFP::getZero(Ty: VTy), FMFSource: FMF); |
| 2620 | |
| 2621 | Value *Fabs = |
| 2622 | Builder.CreateUnaryIntrinsic(ID: Intrinsic::fabs, V: I->getOperand(i: 0), FMFSource: FMF); |
| 2623 | Value *IsInfOrNan = Builder.CreateFCmpFMF( |
| 2624 | P: FCmpInst::FCMP_UEQ, LHS: Fabs, RHS: ConstantFP::getInfinity(Ty: VTy), FMFSource: FMF); |
| 2625 | |
| 2626 | Value *IsInfOrZeroOrNan = Builder.CreateOr(LHS: IsInfOrNan, RHS: IsZeroOrNan); |
| 2627 | |
| 2628 | return Builder.CreateSelectFMFWithUnknownProfile( |
| 2629 | C: IsInfOrZeroOrNan, True: ConstantFP::getQNaN(Ty: VTy), |
| 2630 | False: ConstantFP::get( |
| 2631 | Ty: VTy, V: APFloat::getOne(Sem: VTy->getScalarType()->getFltSemantics())), |
| 2632 | FMFSource: FMF, DEBUG_TYPE); |
| 2633 | } |
| 2634 | |
| 2635 | Type *EltTy = VTy->getScalarType(); |
| 2636 | DenormalMode Mode = F.getDenormalMode(FPType: EltTy->getFltSemantics()); |
| 2637 | |
| 2638 | // Every output class could require denormal inputs (except for the |
| 2639 | // degenerate case of only-nan results, without DAZ). |
| 2640 | FPClassTest SrcDemandedMask = (DemandedMask & fcNan) | fcSubnormal; |
| 2641 | |
| 2642 | // Normal inputs may result in underflow. |
| 2643 | // x / x = 1.0 for non0/inf/nan |
| 2644 | // -x = +y / -z |
| 2645 | // -x = -y / +z |
| 2646 | if (DemandedMask & (fcSubnormal | fcNormal)) |
| 2647 | SrcDemandedMask |= fcNormal; |
| 2648 | |
| 2649 | if (DemandedMask & fcNan) { |
| 2650 | // 0 / 0 = nan |
| 2651 | // inf / inf = nan |
| 2652 | |
| 2653 | // Subnormal is added in case of DAZ, but this isn't strictly |
| 2654 | // necessary. Every other input class implies a possible subnormal source, |
| 2655 | // so this only could matter in the degenerate case of only-nan results. |
| 2656 | SrcDemandedMask |= fcZero | fcInf | fcNan; |
| 2657 | } |
| 2658 | |
| 2659 | // Zero outputs may be the result of underflow. |
| 2660 | if (DemandedMask & fcZero) |
| 2661 | SrcDemandedMask |= fcNormal | fcSubnormal; |
| 2662 | |
| 2663 | FPClassTest LHSDemandedMask = SrcDemandedMask; |
| 2664 | FPClassTest RHSDemandedMask = SrcDemandedMask; |
| 2665 | |
| 2666 | // 0 / inf = 0 |
| 2667 | if (DemandedMask & fcZero) { |
| 2668 | assert((LHSDemandedMask & fcSubnormal) && |
| 2669 | "should not have to worry about daz here" ); |
| 2670 | LHSDemandedMask |= fcZero; |
| 2671 | RHSDemandedMask |= fcInf; |
| 2672 | } |
| 2673 | |
| 2674 | // x / 0 = inf |
| 2675 | // large_normal / small_normal = inf |
| 2676 | // inf / 1 = inf |
| 2677 | // large_normal / subnormal = inf |
| 2678 | if (DemandedMask & fcInf) { |
| 2679 | LHSDemandedMask |= fcInf | fcNormal | fcSubnormal; |
| 2680 | RHSDemandedMask |= fcZero | fcSubnormal | fcNormal; |
| 2681 | } |
| 2682 | |
| 2683 | KnownFPClass KnownLHS, KnownRHS; |
| 2684 | if (SimplifyDemandedFPClass(I, Op: 0, DemandedMask: LHSDemandedMask, Known&: KnownLHS, Depth: Depth + 1) || |
| 2685 | SimplifyDemandedFPClass(I, Op: 1, DemandedMask: RHSDemandedMask, Known&: KnownRHS, Depth: Depth + 1)) |
| 2686 | return I; |
| 2687 | |
| 2688 | // nsz [+-]0 / x -> 0 |
| 2689 | if (FMF.noSignedZeros() && KnownLHS.isKnownAlways(Mask: fcZero) && |
| 2690 | KnownRHS.isKnownNeverNaN()) |
| 2691 | return ConstantFP::getZero(Ty: VTy); |
| 2692 | |
| 2693 | if (KnownLHS.isKnownAlways(Mask: fcPosZero) && KnownRHS.isKnownNeverNaN()) { |
| 2694 | // nnan +0 / x -> copysign(0, rhs) |
| 2695 | // TODO: -0 / x => copysign(0, fneg(rhs)) |
| 2696 | Value *Copysign = Builder.CreateCopySign(LHS: X, RHS: Y, FMFSource: FMF); |
| 2697 | Copysign->takeName(V: I); |
| 2698 | return Copysign; |
| 2699 | } |
| 2700 | |
| 2701 | bool ResultNotNan = (DemandedMask & fcNan) == fcNone; |
| 2702 | bool ResultNotInf = (DemandedMask & fcInf) == fcNone; |
| 2703 | |
| 2704 | if (!ResultNotInf && |
| 2705 | ((ResultNotNan || (KnownLHS.isKnownNeverNaN() && |
| 2706 | KnownLHS.isKnownNeverLogicalZero(Mode))) && |
| 2707 | (KnownRHS.isKnownAlways(Mask: fcPosZero) || |
| 2708 | (FMF.noSignedZeros() && KnownRHS.isKnownAlways(Mask: fcZero))))) { |
| 2709 | // nnan x / 0 => copysign(inf, x); |
| 2710 | // nnan nsz x / -0 => copysign(inf, x); |
| 2711 | Value *Copysign = |
| 2712 | Builder.CreateCopySign(LHS: ConstantFP::getInfinity(Ty: VTy), RHS: X, FMFSource: FMF); |
| 2713 | Copysign->takeName(V: I); |
| 2714 | return Copysign; |
| 2715 | } |
| 2716 | |
| 2717 | // nnan ninf X / [-]0.0 -> poison |
| 2718 | if (ResultNotNan && ResultNotInf && KnownRHS.isKnownAlways(Mask: fcZero)) |
| 2719 | return PoisonValue::get(T: VTy); |
| 2720 | |
| 2721 | Known = KnownFPClass::fdiv(LHS: KnownLHS, RHS: KnownRHS, Mode); |
| 2722 | Known.knownNot(RuleOut: ~DemandedMask); |
| 2723 | |
| 2724 | if (Constant *SingleVal = getFPClassConstant(Ty: VTy, Mask: Known.KnownFPClasses, |
| 2725 | /*IsCanonicalizing=*/true)) |
| 2726 | return SingleVal; |
| 2727 | |
| 2728 | FastMathFlags InferredFMF = inferFastMathValueFlags( |
| 2729 | FMF, ValidResults: Known.KnownFPClasses, Known: {KnownLHS, KnownRHS}); |
| 2730 | if (InferredFMF != FMF) { |
| 2731 | I->setFastMathFlags(InferredFMF); |
| 2732 | return I; |
| 2733 | } |
| 2734 | |
| 2735 | return nullptr; |
| 2736 | } |
| 2737 | case Instruction::FPTrunc: |
| 2738 | return simplifyDemandedUseFPClassFPTrunc(IC&: *this, I&: *I, FMF, DemandedMask, |
| 2739 | Known, Depth); |
| 2740 | case Instruction::FPExt: { |
| 2741 | FPClassTest SrcDemandedMask = DemandedMask; |
| 2742 | if (DemandedMask & fcNan) |
| 2743 | SrcDemandedMask |= fcNan; |
| 2744 | |
| 2745 | // No subnormal result does not imply not-subnormal in the source type. |
| 2746 | if ((DemandedMask & fcNegNormal) != fcNone) |
| 2747 | SrcDemandedMask |= fcNegSubnormal; |
| 2748 | if ((DemandedMask & fcPosNormal) != fcNone) |
| 2749 | SrcDemandedMask |= fcPosSubnormal; |
| 2750 | |
| 2751 | KnownFPClass KnownSrc; |
| 2752 | if (SimplifyDemandedFPClass(I, Op: 0, DemandedMask: SrcDemandedMask, Known&: KnownSrc, Depth: Depth + 1)) |
| 2753 | return I; |
| 2754 | |
| 2755 | const fltSemantics &DstTy = VTy->getScalarType()->getFltSemantics(); |
| 2756 | const fltSemantics &SrcTy = |
| 2757 | I->getOperand(i: 0)->getType()->getScalarType()->getFltSemantics(); |
| 2758 | |
| 2759 | Known = KnownFPClass::fpext(KnownSrc, DstTy, SrcTy); |
| 2760 | Known.knownNot(RuleOut: ~DemandedMask); |
| 2761 | |
| 2762 | return simplifyDemandedFPClassResult(FPOp: I, FMF, DemandedMask, Known, |
| 2763 | KnownSrcs: {KnownSrc}); |
| 2764 | } |
| 2765 | case Instruction::Call: { |
| 2766 | CallInst *CI = cast<CallInst>(Val: I); |
| 2767 | const Intrinsic::ID IID = CI->getIntrinsicID(); |
| 2768 | switch (IID) { |
| 2769 | case Intrinsic::fabs: { |
| 2770 | KnownFPClass KnownSrc; |
| 2771 | if (SimplifyDemandedFPClass(I, Op: 0, DemandedMask: llvm::inverse_fabs(Mask: DemandedMask), |
| 2772 | Known&: KnownSrc, Depth: Depth + 1)) |
| 2773 | return I; |
| 2774 | |
| 2775 | if (Value *Simplified = simplifyDemandedFPClassFabs( |
| 2776 | Known, Src: CI->getArgOperand(i: 0), DemandedMask, KnownSrc, |
| 2777 | NSZ: FMF.noSignedZeros())) |
| 2778 | return Simplified; |
| 2779 | break; |
| 2780 | } |
| 2781 | case Intrinsic::arithmetic_fence: |
| 2782 | if (SimplifyDemandedFPClass(I, Op: 0, DemandedMask, Known, Depth: Depth + 1)) |
| 2783 | return I; |
| 2784 | break; |
| 2785 | case Intrinsic::copysign: { |
| 2786 | // Flip on more potentially demanded classes |
| 2787 | const FPClassTest DemandedMaskAnySign = llvm::unknown_sign(Mask: DemandedMask); |
| 2788 | KnownFPClass KnownMag; |
| 2789 | if (SimplifyDemandedFPClass(I: CI, Op: 0, DemandedMask: DemandedMaskAnySign, Known&: KnownMag, |
| 2790 | Depth: Depth + 1)) |
| 2791 | return I; |
| 2792 | |
| 2793 | if ((DemandedMask & fcNegative) == DemandedMask) { |
| 2794 | // Roundabout way of replacing with fneg(fabs) |
| 2795 | CI->setOperand(i_nocapture: 1, Val_nocapture: ConstantFP::get(Ty: VTy, V: -1.0)); |
| 2796 | return I; |
| 2797 | } |
| 2798 | |
| 2799 | if ((DemandedMask & fcPositive) == DemandedMask) { |
| 2800 | // Roundabout way of replacing with fabs |
| 2801 | CI->setOperand(i_nocapture: 1, Val_nocapture: ConstantFP::getZero(Ty: VTy)); |
| 2802 | return I; |
| 2803 | } |
| 2804 | |
| 2805 | if (Value *Simplified = simplifyDemandedFPClassCopysignMag( |
| 2806 | MagSrc: CI->getArgOperand(i: 0), DemandedMask, KnownSrc: KnownMag, |
| 2807 | NSZ: FMF.noSignedZeros())) |
| 2808 | return Simplified; |
| 2809 | |
| 2810 | KnownFPClass KnownSign = computeKnownFPClass(Val: CI->getArgOperand(i: 1), |
| 2811 | Interested: fcAllFlags, CtxI: CxtI, Depth: Depth + 1); |
| 2812 | if (KnownMag.SignBit && KnownSign.SignBit && |
| 2813 | *KnownMag.SignBit == *KnownSign.SignBit) |
| 2814 | return CI->getOperand(i_nocapture: 0); |
| 2815 | |
| 2816 | // TODO: Call argument attribute not considered |
| 2817 | // Input implied not-nan from flag. |
| 2818 | if (FMF.noNaNs()) |
| 2819 | KnownSign.knownNot(RuleOut: fcNan); |
| 2820 | |
| 2821 | if (KnownSign.SignBit == false) { |
| 2822 | CI->dropUBImplyingAttrsAndMetadata(); |
| 2823 | CI->setOperand(i_nocapture: 1, Val_nocapture: ConstantFP::getZero(Ty: VTy)); |
| 2824 | return I; |
| 2825 | } |
| 2826 | |
| 2827 | if (KnownSign.SignBit == true) { |
| 2828 | CI->dropUBImplyingAttrsAndMetadata(); |
| 2829 | CI->setOperand(i_nocapture: 1, Val_nocapture: ConstantFP::get(Ty: VTy, V: -1.0)); |
| 2830 | return I; |
| 2831 | } |
| 2832 | |
| 2833 | Known = KnownFPClass::copysign(KnownMag, KnownSign); |
| 2834 | Known.knownNot(RuleOut: ~DemandedMask); |
| 2835 | break; |
| 2836 | } |
| 2837 | case Intrinsic::fma: |
| 2838 | case Intrinsic::fmuladd: { |
| 2839 | // We can't do any simplification on the source besides stripping out |
| 2840 | // unneeded nans. |
| 2841 | FPClassTest SrcDemandedMask = DemandedMask | ~fcNan; |
| 2842 | if (DemandedMask & fcNan) |
| 2843 | SrcDemandedMask |= fcNan; |
| 2844 | |
| 2845 | KnownFPClass KnownSrc[3]; |
| 2846 | |
| 2847 | Type *EltTy = VTy->getScalarType(); |
| 2848 | if (CI->getArgOperand(i: 0) == CI->getArgOperand(i: 1) && |
| 2849 | isGuaranteedNotToBeUndef(V: CI->getArgOperand(i: 0), AC: SQ.AC, CtxI: CxtI, DT: SQ.DT, |
| 2850 | Depth: Depth + 1)) { |
| 2851 | if (SimplifyDemandedFPClass(I: CI, Op: 0, DemandedMask: SrcDemandedMask, Known&: KnownSrc[0], |
| 2852 | Depth: Depth + 1) || |
| 2853 | SimplifyDemandedFPClass(I: CI, Op: 2, DemandedMask: SrcDemandedMask, Known&: KnownSrc[2], |
| 2854 | Depth: Depth + 1)) |
| 2855 | return I; |
| 2856 | |
| 2857 | KnownSrc[1] = KnownSrc[0]; |
| 2858 | DenormalMode Mode = F.getDenormalMode(FPType: EltTy->getFltSemantics()); |
| 2859 | Known = KnownFPClass::fma_square(Squared: KnownSrc[0], Addend: KnownSrc[2], Mode); |
| 2860 | } else { |
| 2861 | for (int OpIdx = 0; OpIdx != 3; ++OpIdx) { |
| 2862 | if (SimplifyDemandedFPClass(I: CI, Op: OpIdx, DemandedMask: SrcDemandedMask, |
| 2863 | Known&: KnownSrc[OpIdx], Depth: Depth + 1)) |
| 2864 | return CI; |
| 2865 | } |
| 2866 | |
| 2867 | DenormalMode Mode = F.getDenormalMode(FPType: EltTy->getFltSemantics()); |
| 2868 | Known = KnownFPClass::fma(LHS: KnownSrc[0], RHS: KnownSrc[1], Addend: KnownSrc[2], Mode); |
| 2869 | } |
| 2870 | |
| 2871 | return simplifyDemandedFPClassResult(FPOp: CI, FMF, DemandedMask, Known, |
| 2872 | KnownSrcs: {KnownSrc}); |
| 2873 | } |
| 2874 | case Intrinsic::maximum: |
| 2875 | case Intrinsic::minimum: |
| 2876 | case Intrinsic::maximumnum: |
| 2877 | case Intrinsic::minimumnum: |
| 2878 | case Intrinsic::maxnum: |
| 2879 | case Intrinsic::minnum: { |
| 2880 | const bool PropagateNaN = |
| 2881 | IID == Intrinsic::maximum || IID == Intrinsic::minimum; |
| 2882 | |
| 2883 | // We can't tell much based on the demanded result without inspecting the |
| 2884 | // operands (e.g., a known-positive result could have been clamped), but |
| 2885 | // we can still prune known-nan inputs. |
| 2886 | FPClassTest SrcDemandedMask = |
| 2887 | PropagateNaN && ((DemandedMask & fcNan) == fcNone) |
| 2888 | ? DemandedMask | ~fcNan |
| 2889 | : fcAllFlags; |
| 2890 | |
| 2891 | KnownFPClass KnownLHS, KnownRHS; |
| 2892 | if (SimplifyDemandedFPClass(I: CI, Op: 1, DemandedMask: SrcDemandedMask, Known&: KnownRHS, |
| 2893 | Depth: Depth + 1) || |
| 2894 | SimplifyDemandedFPClass(I: CI, Op: 0, DemandedMask: SrcDemandedMask, Known&: KnownLHS, Depth: Depth + 1)) |
| 2895 | return I; |
| 2896 | |
| 2897 | Value *Simplified = |
| 2898 | simplifyDemandedFPClassMinMax(Known, IID, CI, DemandedMask, KnownLHS, |
| 2899 | KnownRHS, F, NSZ: FMF.noSignedZeros()); |
| 2900 | if (Simplified) |
| 2901 | return Simplified; |
| 2902 | |
| 2903 | auto *FPOp = cast<FPMathOperator>(Val: CI); |
| 2904 | |
| 2905 | FPClassTest ValidResults = DemandedMask & Known.KnownFPClasses; |
| 2906 | FastMathFlags InferredFMF = FMF; |
| 2907 | |
| 2908 | if (!FMF.noSignedZeros()) { |
| 2909 | // Add NSZ flag if we know the result will not be sensitive to the sign |
| 2910 | // of 0. |
| 2911 | FPClassTest ZeroMask = fcZero; |
| 2912 | |
| 2913 | Type *EltTy = VTy->getScalarType(); |
| 2914 | DenormalMode Mode = F.getDenormalMode(FPType: EltTy->getFltSemantics()); |
| 2915 | if (Mode != DenormalMode::getIEEE()) |
| 2916 | ZeroMask |= fcSubnormal; |
| 2917 | |
| 2918 | bool ResultNotLogical0 = (ValidResults & ZeroMask) == fcNone; |
| 2919 | if (ResultNotLogical0 || ((KnownLHS.isKnownNeverLogicalNegZero(Mode) || |
| 2920 | KnownRHS.isKnownNeverLogicalPosZero(Mode)) && |
| 2921 | (KnownLHS.isKnownNeverLogicalPosZero(Mode) || |
| 2922 | KnownRHS.isKnownNeverLogicalNegZero(Mode)))) |
| 2923 | InferredFMF.setNoSignedZeros(true); |
| 2924 | } |
| 2925 | |
| 2926 | if (!FMF.noNaNs() && |
| 2927 | ((PropagateNaN && (ValidResults & fcNan) == fcNone) || |
| 2928 | (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN()))) { |
| 2929 | CI->dropUBImplyingAttrsAndMetadata(); |
| 2930 | InferredFMF.setNoNaNs(true); |
| 2931 | } |
| 2932 | |
| 2933 | if (InferredFMF != FMF) { |
| 2934 | CI->setFastMathFlags(InferredFMF); |
| 2935 | return FPOp; |
| 2936 | } |
| 2937 | |
| 2938 | return nullptr; |
| 2939 | } |
| 2940 | case Intrinsic::exp: |
| 2941 | case Intrinsic::exp2: |
| 2942 | case Intrinsic::exp10: { |
| 2943 | if ((DemandedMask & fcPositive) == fcNone) { |
| 2944 | // Only returns positive values or nans. |
| 2945 | if ((DemandedMask & fcNan) == fcNone) |
| 2946 | return PoisonValue::get(T: VTy); |
| 2947 | |
| 2948 | // Only need nan propagation. |
| 2949 | if ((DemandedMask & ~fcNan) == fcNone) |
| 2950 | return ConstantFP::getQNaN(Ty: VTy); |
| 2951 | |
| 2952 | return CI->getArgOperand(i: 0); |
| 2953 | } |
| 2954 | |
| 2955 | FPClassTest SrcDemandedMask = DemandedMask & fcNan; |
| 2956 | if (DemandedMask & fcNan) |
| 2957 | SrcDemandedMask |= fcNan; |
| 2958 | |
| 2959 | if (DemandedMask & fcZero) { |
| 2960 | // exp(-infinity) = 0 |
| 2961 | SrcDemandedMask |= fcNegInf; |
| 2962 | |
| 2963 | // exp(-largest_normal) = 0 |
| 2964 | // |
| 2965 | // Negative numbers of sufficiently large magnitude underflow to 0. No |
| 2966 | // subnormal input has a 0 result. |
| 2967 | SrcDemandedMask |= fcNegNormal; |
| 2968 | } |
| 2969 | |
| 2970 | if (DemandedMask & fcPosSubnormal) { |
| 2971 | // Negative numbers of sufficiently large magnitude underflow to 0. No |
| 2972 | // subnormal input has a 0 result. |
| 2973 | SrcDemandedMask |= fcNegNormal; |
| 2974 | } |
| 2975 | |
| 2976 | if (DemandedMask & fcPosNormal) { |
| 2977 | // exp(0) = 1 |
| 2978 | // exp(+/- smallest_normal) = 1 |
| 2979 | // exp(+/- largest_denormal) = 1 |
| 2980 | // exp(+/- smallest_denormal) = 1 |
| 2981 | // exp(-1) = pos normal |
| 2982 | SrcDemandedMask |= fcNormal | fcSubnormal | fcZero; |
| 2983 | } |
| 2984 | |
| 2985 | // exp(inf), exp(largest_normal) = inf |
| 2986 | if (DemandedMask & fcPosInf) |
| 2987 | SrcDemandedMask |= fcPosInf | fcPosNormal; |
| 2988 | |
| 2989 | KnownFPClass KnownSrc; |
| 2990 | |
| 2991 | // TODO: This could really make use of KnownFPClass of specific value |
| 2992 | // range, (i.e., close enough to 1) |
| 2993 | if (SimplifyDemandedFPClass(I, Op: 0, DemandedMask: SrcDemandedMask, Known&: KnownSrc, Depth: Depth + 1)) |
| 2994 | return I; |
| 2995 | |
| 2996 | // exp(+/-0) = 1 |
| 2997 | if (KnownSrc.isKnownAlways(Mask: fcZero)) |
| 2998 | return ConstantFP::get(Ty: VTy, V: 1.0); |
| 2999 | |
| 3000 | // Only perform nan propagation. |
| 3001 | // Note: Dropping canonicalize / quiet of signaling nan. |
| 3002 | if (KnownSrc.isKnownAlways(Mask: fcNan)) |
| 3003 | return CI->getArgOperand(i: 0); |
| 3004 | |
| 3005 | // exp(0 | nan) => x == 0.0 ? 1.0 : x |
| 3006 | if (KnownSrc.isKnownAlways(Mask: fcZero | fcNan)) { |
| 3007 | IRBuilderBase::InsertPointGuard Guard(Builder); |
| 3008 | Builder.SetInsertPoint(CI); |
| 3009 | |
| 3010 | // fadd +/-0, 1.0 => 1.0 |
| 3011 | // fadd nan, 1.0 => nan |
| 3012 | return Builder.CreateFAddFMF(L: CI->getArgOperand(i: 0), |
| 3013 | R: ConstantFP::get(Ty: VTy, V: 1.0), FMFSource: FMF); |
| 3014 | } |
| 3015 | |
| 3016 | if (KnownSrc.isKnownAlways(Mask: fcInf | fcNan)) { |
| 3017 | // exp(-inf) = 0 |
| 3018 | // exp(+inf) = +inf |
| 3019 | IRBuilderBase::InsertPointGuard Guard(Builder); |
| 3020 | Builder.SetInsertPoint(CI); |
| 3021 | |
| 3022 | // Note: Dropping canonicalize / quiet of signaling nan. |
| 3023 | Value *X = CI->getArgOperand(i: 0); |
| 3024 | Value *IsPosInfOrNan = Builder.CreateFCmpFMF( |
| 3025 | P: FCmpInst::FCMP_UEQ, LHS: X, RHS: ConstantFP::getInfinity(Ty: VTy), FMFSource: FMF); |
| 3026 | // We do not know whether an infinity or a NaN is more likely here, |
| 3027 | // so mark the branch weights as unkown. |
| 3028 | Value *ZeroOrInf = Builder.CreateSelectFMFWithUnknownProfile( |
| 3029 | C: IsPosInfOrNan, True: X, False: ConstantFP::getZero(Ty: VTy), FMFSource: FMF, DEBUG_TYPE); |
| 3030 | return ZeroOrInf; |
| 3031 | } |
| 3032 | |
| 3033 | Known = KnownFPClass::exp(Src: KnownSrc); |
| 3034 | Known.knownNot(RuleOut: ~DemandedMask); |
| 3035 | |
| 3036 | return simplifyDemandedFPClassResult(FPOp: CI, FMF, DemandedMask, Known, |
| 3037 | KnownSrcs: KnownSrc); |
| 3038 | } |
| 3039 | case Intrinsic::log: |
| 3040 | case Intrinsic::log2: |
| 3041 | case Intrinsic::log10: { |
| 3042 | FPClassTest DemandedSrcMask = DemandedMask & (fcNan | fcPosInf); |
| 3043 | if (DemandedMask & fcNan) |
| 3044 | DemandedSrcMask |= fcNan; |
| 3045 | |
| 3046 | Type *EltTy = VTy->getScalarType(); |
| 3047 | DenormalMode Mode = F.getDenormalMode(FPType: EltTy->getFltSemantics()); |
| 3048 | |
| 3049 | // log(x < 0) = nan |
| 3050 | if (DemandedMask & fcNan) |
| 3051 | DemandedSrcMask |= (fcNegative & ~fcNegZero); |
| 3052 | |
| 3053 | // log(0) = -inf |
| 3054 | if (DemandedMask & fcNegInf) { |
| 3055 | DemandedSrcMask |= fcZero; |
| 3056 | |
| 3057 | // No value produces subnormal result. |
| 3058 | if (Mode.inputsMayBeZero()) |
| 3059 | DemandedSrcMask |= fcSubnormal; |
| 3060 | } |
| 3061 | |
| 3062 | if (DemandedMask & fcNormal) |
| 3063 | DemandedSrcMask |= fcNormal | fcSubnormal; |
| 3064 | |
| 3065 | // log(1) = 0 |
| 3066 | if (DemandedMask & fcZero) |
| 3067 | DemandedSrcMask |= fcPosNormal; |
| 3068 | |
| 3069 | KnownFPClass KnownSrc; |
| 3070 | if (SimplifyDemandedFPClass(I, Op: 0, DemandedMask: DemandedSrcMask, Known&: KnownSrc, Depth: Depth + 1)) |
| 3071 | return I; |
| 3072 | |
| 3073 | Known = KnownFPClass::log(Src: KnownSrc, Mode); |
| 3074 | Known.knownNot(RuleOut: ~DemandedMask); |
| 3075 | |
| 3076 | return simplifyDemandedFPClassResult(FPOp: CI, FMF, DemandedMask, Known, |
| 3077 | KnownSrcs: KnownSrc); |
| 3078 | } |
| 3079 | case Intrinsic::sqrt: { |
| 3080 | FPClassTest DemandedSrcMask = |
| 3081 | DemandedMask & (fcNegZero | fcPositive | fcNan); |
| 3082 | |
| 3083 | if (DemandedMask & fcNan) |
| 3084 | DemandedSrcMask |= fcNan | (fcNegative & ~fcNegZero); |
| 3085 | |
| 3086 | // sqrt(max_subnormal) is a normal value |
| 3087 | if (DemandedMask & fcPosNormal) |
| 3088 | DemandedSrcMask |= fcPosSubnormal; |
| 3089 | |
| 3090 | KnownFPClass KnownSrc; |
| 3091 | if (SimplifyDemandedFPClass(I, Op: 0, DemandedMask: DemandedSrcMask, Known&: KnownSrc, Depth: Depth + 1)) |
| 3092 | return I; |
| 3093 | |
| 3094 | // Infer the source cannot be negative if the result cannot be nan. |
| 3095 | if ((DemandedMask & fcNan) == fcNone) |
| 3096 | KnownSrc.knownNot(RuleOut: (fcNegative & ~fcNegZero) | fcNan); |
| 3097 | |
| 3098 | // Infer the source cannot be +inf if the result is not +nf |
| 3099 | if ((DemandedMask & fcPosInf) == fcNone) |
| 3100 | KnownSrc.knownNot(RuleOut: fcPosInf); |
| 3101 | |
| 3102 | Type *EltTy = VTy->getScalarType(); |
| 3103 | DenormalMode Mode = F.getDenormalMode(FPType: EltTy->getFltSemantics()); |
| 3104 | |
| 3105 | // sqrt(-x) = nan, but be careful of negative subnormals flushed to 0. |
| 3106 | if (KnownSrc.isKnownNever(Mask: fcPositive) && |
| 3107 | KnownSrc.isKnownNeverLogicalZero(Mode)) |
| 3108 | return ConstantFP::getQNaN(Ty: VTy); |
| 3109 | |
| 3110 | Known = KnownFPClass::sqrt(Src: KnownSrc, Mode); |
| 3111 | Known.knownNot(RuleOut: ~DemandedMask); |
| 3112 | |
| 3113 | if (Known.KnownFPClasses == fcZero) { |
| 3114 | if (FMF.noSignedZeros()) |
| 3115 | return ConstantFP::getZero(Ty: VTy); |
| 3116 | |
| 3117 | Value *Copysign = Builder.CreateCopySign(LHS: ConstantFP::getZero(Ty: VTy), |
| 3118 | RHS: CI->getArgOperand(i: 0), FMFSource: FMF); |
| 3119 | Copysign->takeName(V: CI); |
| 3120 | return Copysign; |
| 3121 | } |
| 3122 | |
| 3123 | return simplifyDemandedFPClassResult(FPOp: CI, FMF, DemandedMask, Known, |
| 3124 | KnownSrcs: {KnownSrc}); |
| 3125 | } |
| 3126 | case Intrinsic::trunc: |
| 3127 | case Intrinsic::floor: |
| 3128 | case Intrinsic::ceil: |
| 3129 | case Intrinsic::rint: |
| 3130 | case Intrinsic::nearbyint: |
| 3131 | case Intrinsic::round: |
| 3132 | case Intrinsic::roundeven: { |
| 3133 | FPClassTest DemandedSrcMask = DemandedMask; |
| 3134 | if (DemandedMask & fcNan) |
| 3135 | DemandedSrcMask |= fcNan; |
| 3136 | |
| 3137 | // Zero results imply valid subnormal sources. |
| 3138 | if (DemandedMask & fcNegZero) |
| 3139 | DemandedSrcMask |= fcNegSubnormal | fcNegNormal; |
| 3140 | |
| 3141 | if (DemandedMask & fcPosZero) |
| 3142 | DemandedSrcMask |= fcPosSubnormal | fcPosNormal; |
| 3143 | |
| 3144 | KnownFPClass KnownSrc; |
| 3145 | if (SimplifyDemandedFPClass(I: CI, Op: 0, DemandedMask: DemandedSrcMask, Known&: KnownSrc, Depth: Depth + 1)) |
| 3146 | return I; |
| 3147 | |
| 3148 | // Note: Possibly dropping snan quiet. |
| 3149 | if (KnownSrc.isKnownAlways(Mask: fcInf | fcNan | fcZero)) |
| 3150 | return CI->getArgOperand(i: 0); |
| 3151 | |
| 3152 | bool IsRoundNearestOrTrunc = |
| 3153 | IID == Intrinsic::round || IID == Intrinsic::roundeven || |
| 3154 | IID == Intrinsic::nearbyint || IID == Intrinsic::rint || |
| 3155 | IID == Intrinsic::trunc; |
| 3156 | |
| 3157 | // Ignore denormals-as-zero, as canonicalization is not mandated. |
| 3158 | if ((IID == Intrinsic::floor || IsRoundNearestOrTrunc) && |
| 3159 | KnownSrc.isKnownAlways(Mask: fcPosZero | fcPosSubnormal)) |
| 3160 | return ConstantFP::getZero(Ty: VTy); |
| 3161 | |
| 3162 | if ((IID == Intrinsic::ceil || IsRoundNearestOrTrunc) && |
| 3163 | KnownSrc.isKnownAlways(Mask: fcNegZero | fcNegSubnormal)) |
| 3164 | return ConstantFP::getZero(Ty: VTy, Negative: true); |
| 3165 | |
| 3166 | if (IID == Intrinsic::floor && KnownSrc.isKnownAlways(Mask: fcNegSubnormal)) |
| 3167 | return ConstantFP::get(Ty: VTy, V: -1.0); |
| 3168 | |
| 3169 | if (IID == Intrinsic::ceil && KnownSrc.isKnownAlways(Mask: fcPosSubnormal)) |
| 3170 | return ConstantFP::get(Ty: VTy, V: 1.0); |
| 3171 | |
| 3172 | Known = KnownFPClass::roundToIntegral( |
| 3173 | Src: KnownSrc, IsTrunc: IID == Intrinsic::trunc, |
| 3174 | IsMultiUnitFPType: VTy->getScalarType()->isMultiUnitFPType()); |
| 3175 | |
| 3176 | Known.knownNot(RuleOut: ~DemandedMask); |
| 3177 | |
| 3178 | if (Constant *SingleVal = getFPClassConstant(Ty: VTy, Mask: Known.KnownFPClasses, |
| 3179 | /*IsCanonicalizing=*/true)) |
| 3180 | return SingleVal; |
| 3181 | |
| 3182 | if ((IID == Intrinsic::trunc || IsRoundNearestOrTrunc) && |
| 3183 | KnownSrc.isKnownAlways(Mask: fcZero | fcSubnormal)) { |
| 3184 | Value *Copysign = Builder.CreateCopySign(LHS: ConstantFP::getZero(Ty: VTy), |
| 3185 | RHS: CI->getArgOperand(i: 0)); |
| 3186 | Copysign->takeName(V: CI); |
| 3187 | return Copysign; |
| 3188 | } |
| 3189 | |
| 3190 | FastMathFlags InferredFMF = |
| 3191 | inferFastMathValueFlags(FMF, ValidResults: Known.KnownFPClasses, Known: KnownSrc); |
| 3192 | if (InferredFMF != FMF) { |
| 3193 | CI->dropUBImplyingAttrsAndMetadata(); |
| 3194 | CI->setFastMathFlags(InferredFMF); |
| 3195 | return CI; |
| 3196 | } |
| 3197 | |
| 3198 | return nullptr; |
| 3199 | } |
| 3200 | case Intrinsic::fptrunc_round: |
| 3201 | return simplifyDemandedUseFPClassFPTrunc(IC&: *this, I&: *CI, FMF, DemandedMask, |
| 3202 | Known, Depth); |
| 3203 | case Intrinsic::canonicalize: { |
| 3204 | Type *EltTy = VTy->getScalarType(); |
| 3205 | |
| 3206 | // TODO: This could have more refined support for PositiveZero denormal |
| 3207 | // mode. |
| 3208 | if (EltTy->isIEEELikeFPTy()) { |
| 3209 | DenormalMode Mode = F.getDenormalMode(FPType: EltTy->getFltSemantics()); |
| 3210 | |
| 3211 | FPClassTest SrcDemandedMask = DemandedMask; |
| 3212 | |
| 3213 | // A demanded quiet nan result may have come from a signaling nan, so we |
| 3214 | // need to expand the demanded mask. |
| 3215 | if ((DemandedMask & fcQNan) != fcNone) |
| 3216 | SrcDemandedMask |= fcSNan; |
| 3217 | |
| 3218 | if (Mode != DenormalMode::getIEEE()) { |
| 3219 | // Any zero results may have come from flushed denormals. |
| 3220 | if (DemandedMask & fcPosZero) |
| 3221 | SrcDemandedMask |= fcPosSubnormal; |
| 3222 | if (DemandedMask & fcNegZero) |
| 3223 | SrcDemandedMask |= fcNegSubnormal; |
| 3224 | } |
| 3225 | |
| 3226 | if (Mode == DenormalMode::getPreserveSign()) { |
| 3227 | // If a denormal input will be flushed, and we don't need zeros, we |
| 3228 | // don't need denormals either. |
| 3229 | if ((DemandedMask & fcPosZero) == fcNone) |
| 3230 | SrcDemandedMask &= ~fcPosSubnormal; |
| 3231 | |
| 3232 | if ((DemandedMask & fcNegZero) == fcNone) |
| 3233 | SrcDemandedMask &= ~fcNegSubnormal; |
| 3234 | } |
| 3235 | |
| 3236 | KnownFPClass KnownSrc; |
| 3237 | |
| 3238 | // Simplify upstream operations before trying to simplify this call. |
| 3239 | if (SimplifyDemandedFPClass(I, Op: 0, DemandedMask: SrcDemandedMask, Known&: KnownSrc, Depth: Depth + 1)) |
| 3240 | return I; |
| 3241 | |
| 3242 | // Perform the canonicalization to see if this folded to a constant. |
| 3243 | Known = KnownFPClass::canonicalize(Src: KnownSrc, DenormMode: Mode); |
| 3244 | Known.knownNot(RuleOut: ~DemandedMask); |
| 3245 | |
| 3246 | if (Constant *SingleVal = getFPClassConstant(Ty: VTy, Mask: Known.KnownFPClasses)) |
| 3247 | return SingleVal; |
| 3248 | |
| 3249 | // For IEEE handling, there is only a bit change for nan inputs, so we |
| 3250 | // can drop it if we do not demand nan results or we know the input |
| 3251 | // isn't a nan. |
| 3252 | // Otherwise, we also need to avoid denormal inputs to drop the |
| 3253 | // canonicalize. |
| 3254 | if (KnownSrc.isKnownNeverNaN() && (Mode == DenormalMode::getIEEE() || |
| 3255 | KnownSrc.isKnownNeverSubnormal())) |
| 3256 | return CI->getArgOperand(i: 0); |
| 3257 | |
| 3258 | FastMathFlags InferredFMF = |
| 3259 | inferFastMathValueFlags(FMF, ValidResults: Known.KnownFPClasses, Known: KnownSrc); |
| 3260 | if (InferredFMF != FMF) { |
| 3261 | CI->dropUBImplyingAttrsAndMetadata(); |
| 3262 | CI->setFastMathFlags(InferredFMF); |
| 3263 | return CI; |
| 3264 | } |
| 3265 | |
| 3266 | return nullptr; |
| 3267 | } |
| 3268 | |
| 3269 | [[fallthrough]]; |
| 3270 | } |
| 3271 | default: |
| 3272 | Known = computeKnownFPClass(Val: I, Interested: DemandedMask, CtxI: CxtI, Depth: Depth + 1); |
| 3273 | Known.knownNot(RuleOut: ~DemandedMask); |
| 3274 | break; |
| 3275 | } |
| 3276 | |
| 3277 | break; |
| 3278 | } |
| 3279 | case Instruction::Select: { |
| 3280 | KnownFPClass KnownLHS, KnownRHS; |
| 3281 | if (SimplifyDemandedFPClass(I, Op: 2, DemandedMask, Known&: KnownRHS, Depth: Depth + 1) || |
| 3282 | SimplifyDemandedFPClass(I, Op: 1, DemandedMask, Known&: KnownLHS, Depth: Depth + 1)) |
| 3283 | return I; |
| 3284 | |
| 3285 | if (KnownLHS.isKnownNever(Mask: DemandedMask)) |
| 3286 | return I->getOperand(i: 2); |
| 3287 | if (KnownRHS.isKnownNever(Mask: DemandedMask)) |
| 3288 | return I->getOperand(i: 1); |
| 3289 | |
| 3290 | adjustKnownFPClassForSelectArm(Known&: KnownLHS, Cond: I->getOperand(i: 0), Arm: I->getOperand(i: 1), |
| 3291 | /*Invert=*/false, Q: SQ, Depth); |
| 3292 | adjustKnownFPClassForSelectArm(Known&: KnownRHS, Cond: I->getOperand(i: 0), Arm: I->getOperand(i: 2), |
| 3293 | /*Invert=*/true, Q: SQ, Depth); |
| 3294 | Known = KnownLHS.intersectWith(RHS: KnownRHS); |
| 3295 | Known.knownNot(RuleOut: ~DemandedMask); |
| 3296 | break; |
| 3297 | } |
| 3298 | case Instruction::ExtractElement: { |
| 3299 | // TODO: Handle demanded element mask |
| 3300 | if (SimplifyDemandedFPClass(I, Op: 0, DemandedMask, Known, Depth: Depth + 1)) |
| 3301 | return I; |
| 3302 | Known.knownNot(RuleOut: ~DemandedMask); |
| 3303 | break; |
| 3304 | } |
| 3305 | case Instruction::InsertElement: { |
| 3306 | KnownFPClass KnownInserted, KnownVec; |
| 3307 | if (SimplifyDemandedFPClass(I, Op: 1, DemandedMask, Known&: KnownInserted, Depth: Depth + 1) || |
| 3308 | SimplifyDemandedFPClass(I, Op: 0, DemandedMask, Known&: KnownVec, Depth: Depth + 1)) |
| 3309 | return I; |
| 3310 | |
| 3311 | // TODO: Use demanded elements logic from computeKnownFPClass |
| 3312 | Known = KnownVec | KnownInserted; |
| 3313 | Known.knownNot(RuleOut: ~DemandedMask); |
| 3314 | break; |
| 3315 | } |
| 3316 | case Instruction::ShuffleVector: { |
| 3317 | KnownFPClass KnownLHS, KnownRHS; |
| 3318 | if (SimplifyDemandedFPClass(I, Op: 1, DemandedMask, Known&: KnownRHS, Depth: Depth + 1) || |
| 3319 | SimplifyDemandedFPClass(I, Op: 0, DemandedMask, Known&: KnownLHS, Depth: Depth + 1)) |
| 3320 | return I; |
| 3321 | |
| 3322 | // TODO: This is overly conservative and should consider demanded elements, |
| 3323 | // and splats. |
| 3324 | Known = KnownLHS | KnownRHS; |
| 3325 | Known.knownNot(RuleOut: ~DemandedMask); |
| 3326 | break; |
| 3327 | } |
| 3328 | case Instruction::ExtractValue: { |
| 3329 | Value *; |
| 3330 | if (match(V: I, P: m_ExtractValue<0>(V: m_OneUse(SubPattern: m_Value(V&: ExtractSrc))))) { |
| 3331 | if (auto *II = dyn_cast<IntrinsicInst>(Val: ExtractSrc)) { |
| 3332 | const Intrinsic::ID IID = II->getIntrinsicID(); |
| 3333 | switch (IID) { |
| 3334 | case Intrinsic::frexp: { |
| 3335 | FPClassTest SrcDemandedMask = fcNone; |
| 3336 | if (DemandedMask & fcNan) |
| 3337 | SrcDemandedMask |= fcNan; |
| 3338 | if (DemandedMask & fcNegFinite) |
| 3339 | SrcDemandedMask |= fcNegFinite; |
| 3340 | if (DemandedMask & fcPosFinite) |
| 3341 | SrcDemandedMask |= fcPosFinite; |
| 3342 | if (DemandedMask & fcPosInf) |
| 3343 | SrcDemandedMask |= fcPosInf; |
| 3344 | if (DemandedMask & fcNegInf) |
| 3345 | SrcDemandedMask |= fcNegInf; |
| 3346 | |
| 3347 | KnownFPClass KnownSrc; |
| 3348 | if (SimplifyDemandedFPClass(I: II, Op: 0, DemandedMask: SrcDemandedMask, Known&: KnownSrc, |
| 3349 | Depth: Depth + 1)) |
| 3350 | return I; |
| 3351 | |
| 3352 | Type *EltTy = VTy->getScalarType(); |
| 3353 | DenormalMode Mode = F.getDenormalMode(FPType: EltTy->getFltSemantics()); |
| 3354 | |
| 3355 | Known = KnownFPClass::frexp_mant(Src: KnownSrc, Mode); |
| 3356 | Known.KnownFPClasses &= DemandedMask; |
| 3357 | |
| 3358 | if (Constant *SingleVal = |
| 3359 | getFPClassConstant(Ty: VTy, Mask: Known.KnownFPClasses, |
| 3360 | /*IsCanonicalizing=*/true)) |
| 3361 | return SingleVal; |
| 3362 | |
| 3363 | if (Known.isKnownAlways(Mask: fcInf | fcNan)) |
| 3364 | return II->getArgOperand(i: 0); |
| 3365 | |
| 3366 | return nullptr; |
| 3367 | } |
| 3368 | default: |
| 3369 | break; |
| 3370 | } |
| 3371 | } |
| 3372 | } |
| 3373 | [[fallthrough]]; |
| 3374 | } |
| 3375 | default: |
| 3376 | Known = computeKnownFPClass(Val: I, Interested: DemandedMask, CtxI: CxtI, Depth: Depth + 1); |
| 3377 | Known.knownNot(RuleOut: ~DemandedMask); |
| 3378 | break; |
| 3379 | } |
| 3380 | |
| 3381 | return getFPClassConstant(Ty: VTy, Mask: Known.KnownFPClasses); |
| 3382 | } |
| 3383 | |
| 3384 | /// Helper routine of SimplifyDemandedUseFPClass. It computes Known |
| 3385 | /// floating-point classes. It also tries to handle simplifications that can be |
| 3386 | /// done based on DemandedMask, but without modifying the Instruction. |
| 3387 | Value *InstCombinerImpl::SimplifyMultipleUseDemandedFPClass( |
| 3388 | Instruction *I, FPClassTest DemandedMask, KnownFPClass &Known, |
| 3389 | Instruction *CxtI, unsigned Depth) { |
| 3390 | FastMathFlags FMF; |
| 3391 | if (auto *FPOp = dyn_cast<FPMathOperator>(Val: I)) { |
| 3392 | FMF = FPOp->getFastMathFlags(); |
| 3393 | DemandedMask = adjustDemandedMaskFromFlags(DemandedMask, FMF); |
| 3394 | } |
| 3395 | |
| 3396 | switch (I->getOpcode()) { |
| 3397 | case Instruction::Select: { |
| 3398 | // TODO: Can we infer which side it came from based on adjusted result |
| 3399 | // class? |
| 3400 | KnownFPClass KnownRHS = |
| 3401 | computeKnownFPClass(Val: I->getOperand(i: 2), Interested: DemandedMask, CtxI: CxtI, Depth: Depth + 1); |
| 3402 | if (KnownRHS.isKnownNever(Mask: DemandedMask)) |
| 3403 | return I->getOperand(i: 1); |
| 3404 | |
| 3405 | KnownFPClass KnownLHS = |
| 3406 | computeKnownFPClass(Val: I->getOperand(i: 1), Interested: DemandedMask, CtxI: CxtI, Depth: Depth + 1); |
| 3407 | if (KnownLHS.isKnownNever(Mask: DemandedMask)) |
| 3408 | return I->getOperand(i: 2); |
| 3409 | |
| 3410 | adjustKnownFPClassForSelectArm(Known&: KnownLHS, Cond: I->getOperand(i: 0), Arm: I->getOperand(i: 1), |
| 3411 | /*Invert=*/false, Q: SQ, Depth); |
| 3412 | adjustKnownFPClassForSelectArm(Known&: KnownRHS, Cond: I->getOperand(i: 0), Arm: I->getOperand(i: 2), |
| 3413 | /*Invert=*/true, Q: SQ, Depth); |
| 3414 | Known = KnownLHS.intersectWith(RHS: KnownRHS); |
| 3415 | Known.knownNot(RuleOut: ~DemandedMask); |
| 3416 | break; |
| 3417 | } |
| 3418 | case Instruction::FNeg: { |
| 3419 | // Special case fneg(fabs(x)) |
| 3420 | Value *Src; |
| 3421 | |
| 3422 | Value *FNegSrc = I->getOperand(i: 0); |
| 3423 | if (!match(V: FNegSrc, P: m_FAbs(Op0: m_Value(V&: Src)))) { |
| 3424 | Known = computeKnownFPClass(Val: I, Interested: DemandedMask, CtxI: CxtI, Depth: Depth + 1); |
| 3425 | break; |
| 3426 | } |
| 3427 | |
| 3428 | KnownFPClass KnownSrc = |
| 3429 | computeKnownFPClass(Val: Src, Interested: fcAllFlags, CtxI: CxtI, Depth: Depth + 1); |
| 3430 | |
| 3431 | FastMathFlags FabsFMF = cast<FPMathOperator>(Val: FNegSrc)->getFastMathFlags(); |
| 3432 | FPClassTest ThisDemandedMask = |
| 3433 | adjustDemandedMaskFromFlags(DemandedMask, FMF: FabsFMF); |
| 3434 | |
| 3435 | // We cannot apply the NSZ logic with multiple uses. We can apply it if the |
| 3436 | // inner fabs has it and this is the only use. |
| 3437 | if (Value *Simplified = simplifyDemandedFPClassFnegFabs( |
| 3438 | Known, Src, DemandedMask: ThisDemandedMask, KnownSrc, /*NSZ=*/false)) |
| 3439 | return Simplified; |
| 3440 | break; |
| 3441 | } |
| 3442 | case Instruction::Call: { |
| 3443 | const CallInst *CI = cast<CallInst>(Val: I); |
| 3444 | const Intrinsic::ID IID = CI->getIntrinsicID(); |
| 3445 | switch (IID) { |
| 3446 | case Intrinsic::fabs: { |
| 3447 | Value *Src = CI->getArgOperand(i: 0); |
| 3448 | KnownFPClass KnownSrc = |
| 3449 | computeKnownFPClass(Val: Src, Interested: fcAllFlags, CtxI: CxtI, Depth: Depth + 1); |
| 3450 | |
| 3451 | // NSZ cannot be applied in multiple use case (maybe it could if all uses |
| 3452 | // were known nsz) |
| 3453 | if (Value *Simplified = simplifyDemandedFPClassFabs( |
| 3454 | Known, Src: CI->getArgOperand(i: 0), DemandedMask, KnownSrc, |
| 3455 | /*NSZ=*/false)) |
| 3456 | return Simplified; |
| 3457 | break; |
| 3458 | } |
| 3459 | case Intrinsic::copysign: { |
| 3460 | Value *Mag = CI->getArgOperand(i: 0); |
| 3461 | Value *Sign = CI->getArgOperand(i: 1); |
| 3462 | KnownFPClass KnownMag = |
| 3463 | computeKnownFPClass(Val: Mag, Interested: fcAllFlags, CtxI: CxtI, Depth: Depth + 1); |
| 3464 | |
| 3465 | // Rule out some cases by magnitude, which may help prove the sign bit is |
| 3466 | // one direction or the other. |
| 3467 | KnownMag.knownNot(RuleOut: ~llvm::unknown_sign(Mask: DemandedMask)); |
| 3468 | |
| 3469 | // Cannot use nsz in the multiple use case. |
| 3470 | if (Value *Simplified = simplifyDemandedFPClassCopysignMag( |
| 3471 | MagSrc: Mag, DemandedMask, KnownSrc: KnownMag, /*NSZ=*/false)) |
| 3472 | return Simplified; |
| 3473 | |
| 3474 | KnownFPClass KnownSign = |
| 3475 | computeKnownFPClass(Val: Sign, Interested: fcAllFlags, CtxI: CxtI, Depth: Depth + 1); |
| 3476 | |
| 3477 | if (FMF.noInfs()) |
| 3478 | KnownSign.knownNot(RuleOut: fcInf); |
| 3479 | if (FMF.noNaNs()) |
| 3480 | KnownSign.knownNot(RuleOut: fcNan); |
| 3481 | |
| 3482 | if (KnownSign.SignBit && KnownMag.SignBit && |
| 3483 | *KnownSign.SignBit == *KnownMag.SignBit) |
| 3484 | return Mag; |
| 3485 | |
| 3486 | Known = KnownFPClass::copysign(KnownMag, KnownSign); |
| 3487 | break; |
| 3488 | } |
| 3489 | case Intrinsic::maxnum: |
| 3490 | case Intrinsic::minnum: |
| 3491 | case Intrinsic::maximum: |
| 3492 | case Intrinsic::minimum: |
| 3493 | case Intrinsic::maximumnum: |
| 3494 | case Intrinsic::minimumnum: { |
| 3495 | KnownFPClass KnownRHS = computeKnownFPClass( |
| 3496 | Val: CI->getArgOperand(i: 1), Interested: DemandedMask, CtxI: CxtI, Depth: Depth + 1); |
| 3497 | if (KnownRHS.isUnknown()) |
| 3498 | return nullptr; |
| 3499 | |
| 3500 | KnownFPClass KnownLHS = computeKnownFPClass( |
| 3501 | Val: CI->getArgOperand(i: 0), Interested: DemandedMask, CtxI: CxtI, Depth: Depth + 1); |
| 3502 | |
| 3503 | // Cannot use NSZ in the multiple use case. |
| 3504 | return simplifyDemandedFPClassMinMax(Known, IID, CI, DemandedMask, |
| 3505 | KnownLHS, KnownRHS, F, |
| 3506 | /*NSZ=*/false); |
| 3507 | } |
| 3508 | default: |
| 3509 | break; |
| 3510 | } |
| 3511 | |
| 3512 | [[fallthrough]]; |
| 3513 | } |
| 3514 | default: |
| 3515 | Known = computeKnownFPClass(Val: I, Interested: DemandedMask, CtxI: CxtI, Depth: Depth + 1); |
| 3516 | Known.knownNot(RuleOut: ~DemandedMask); |
| 3517 | break; |
| 3518 | } |
| 3519 | |
| 3520 | return getFPClassConstant(Ty: I->getType(), Mask: Known.KnownFPClasses); |
| 3521 | } |
| 3522 | |
| 3523 | bool InstCombinerImpl::SimplifyDemandedFPClass(Instruction *I, unsigned OpNo, |
| 3524 | FPClassTest DemandedMask, |
| 3525 | KnownFPClass &Known, |
| 3526 | unsigned Depth) { |
| 3527 | Use &U = I->getOperandUse(i: OpNo); |
| 3528 | Value *V = U.get(); |
| 3529 | Type *VTy = V->getType(); |
| 3530 | |
| 3531 | if (DemandedMask == fcNone) { |
| 3532 | if (isa<PoisonValue>(Val: V)) |
| 3533 | return false; |
| 3534 | replaceUse(U, NewValue: PoisonValue::get(T: VTy)); |
| 3535 | return true; |
| 3536 | } |
| 3537 | |
| 3538 | // Handle constant |
| 3539 | Instruction *VInst = dyn_cast<Instruction>(Val: V); |
| 3540 | if (!VInst) { |
| 3541 | // Handle constants and arguments |
| 3542 | Known = computeKnownFPClass(Val: V, Interested: fcAllFlags, CtxI: I, Depth); |
| 3543 | Known.knownNot(RuleOut: ~DemandedMask); |
| 3544 | |
| 3545 | if (Known.KnownFPClasses == fcNone) { |
| 3546 | if (isa<PoisonValue>(Val: V)) |
| 3547 | return false; |
| 3548 | replaceUse(U, NewValue: PoisonValue::get(T: VTy)); |
| 3549 | return true; |
| 3550 | } |
| 3551 | |
| 3552 | // Do not try to replace values which are already constants (unless we are |
| 3553 | // folding to poison). Doing so could promote poison elements to non-poison |
| 3554 | // constants. |
| 3555 | if (isa<Constant>(Val: V)) |
| 3556 | return false; |
| 3557 | |
| 3558 | Value *FoldedToConst = getFPClassConstant(Ty: VTy, Mask: Known.KnownFPClasses); |
| 3559 | if (!FoldedToConst || FoldedToConst == V) |
| 3560 | return false; |
| 3561 | |
| 3562 | replaceUse(U, NewValue: FoldedToConst); |
| 3563 | return true; |
| 3564 | } |
| 3565 | |
| 3566 | if (const CallBase *CB = dyn_cast<CallBase>(Val: VInst)) { |
| 3567 | FPClassTest NoFPClass = CB->getParamNoFPClass(i: U.getOperandNo()); |
| 3568 | DemandedMask &= ~NoFPClass; |
| 3569 | } |
| 3570 | |
| 3571 | if (Depth == MaxAnalysisRecursionDepth) { |
| 3572 | Known.knownNot(RuleOut: ~DemandedMask); |
| 3573 | return false; |
| 3574 | } |
| 3575 | |
| 3576 | Value *NewVal; |
| 3577 | |
| 3578 | if (VInst->hasOneUse()) { |
| 3579 | // If the instruction has one use, we can directly simplify it. |
| 3580 | NewVal = SimplifyDemandedUseFPClass(I: VInst, DemandedMask, Known, CxtI: I, Depth); |
| 3581 | } else { |
| 3582 | // If there are multiple uses of this instruction, then we can simplify |
| 3583 | // VInst to some other value, but not modify the instruction. |
| 3584 | NewVal = SimplifyMultipleUseDemandedFPClass(I: VInst, DemandedMask, Known, CxtI: I, |
| 3585 | Depth); |
| 3586 | } |
| 3587 | |
| 3588 | if (!NewVal) |
| 3589 | return false; |
| 3590 | if (Instruction *OpInst = dyn_cast<Instruction>(Val&: U)) |
| 3591 | salvageDebugInfo(I&: *OpInst); |
| 3592 | |
| 3593 | replaceUse(U, NewValue: NewVal); |
| 3594 | return true; |
| 3595 | } |
| 3596 | |