| 1 | //===- llvm/Analysis/IVDescriptors.cpp - IndVar Descriptors -----*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file "describes" induction and recurrence variables. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
| 13 | #include "llvm/Analysis/IVDescriptors.h" |
| 14 | #include "llvm/Analysis/DemandedBits.h" |
| 15 | #include "llvm/Analysis/LoopInfo.h" |
| 16 | #include "llvm/Analysis/ScalarEvolution.h" |
| 17 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" |
| 18 | #include "llvm/Analysis/ScalarEvolutionPatternMatch.h" |
| 19 | #include "llvm/Analysis/ValueTracking.h" |
| 20 | #include "llvm/IR/Dominators.h" |
| 21 | #include "llvm/IR/Instructions.h" |
| 22 | #include "llvm/IR/PatternMatch.h" |
| 23 | #include "llvm/IR/ValueHandle.h" |
| 24 | #include "llvm/Support/Debug.h" |
| 25 | #include "llvm/Support/KnownBits.h" |
| 26 | |
| 27 | using namespace llvm; |
| 28 | using namespace llvm::PatternMatch; |
| 29 | using namespace llvm::SCEVPatternMatch; |
| 30 | |
| 31 | #define DEBUG_TYPE "iv-descriptors" |
| 32 | |
| 33 | bool RecurrenceDescriptor::areAllUsesIn(Instruction *I, |
| 34 | SmallPtrSetImpl<Instruction *> &Set) { |
| 35 | for (const Use &Use : I->operands()) |
| 36 | if (!Set.count(Ptr: dyn_cast<Instruction>(Val: Use))) |
| 37 | return false; |
| 38 | return true; |
| 39 | } |
| 40 | |
| 41 | bool RecurrenceDescriptor::isIntegerRecurrenceKind(RecurKind Kind) { |
| 42 | switch (Kind) { |
| 43 | default: |
| 44 | break; |
| 45 | case RecurKind::AddChainWithSubs: |
| 46 | case RecurKind::Sub: |
| 47 | case RecurKind::Add: |
| 48 | case RecurKind::Mul: |
| 49 | case RecurKind::Or: |
| 50 | case RecurKind::And: |
| 51 | case RecurKind::Xor: |
| 52 | case RecurKind::SMax: |
| 53 | case RecurKind::SMin: |
| 54 | case RecurKind::UMax: |
| 55 | case RecurKind::UMin: |
| 56 | case RecurKind::AnyOf: |
| 57 | case RecurKind::FindIV: |
| 58 | case RecurKind::FindLast: |
| 59 | return true; |
| 60 | } |
| 61 | return false; |
| 62 | } |
| 63 | |
| 64 | bool RecurrenceDescriptor::isFloatingPointRecurrenceKind(RecurKind Kind) { |
| 65 | return (Kind != RecurKind::None) && !isIntegerRecurrenceKind(Kind); |
| 66 | } |
| 67 | |
| 68 | /// Determines if Phi may have been type-promoted. If Phi has a single user |
| 69 | /// that ANDs the Phi with a type mask, return the user. RT is updated to |
| 70 | /// account for the narrower bit width represented by the mask, and the AND |
| 71 | /// instruction is added to CI. |
| 72 | static Instruction *lookThroughAnd(PHINode *Phi, Type *&RT, |
| 73 | SmallPtrSetImpl<Instruction *> &Visited, |
| 74 | SmallPtrSetImpl<Instruction *> &CI) { |
| 75 | if (!Phi->hasOneUse()) |
| 76 | return Phi; |
| 77 | |
| 78 | const APInt *M = nullptr; |
| 79 | Instruction *I, *J = cast<Instruction>(Val: Phi->use_begin()->getUser()); |
| 80 | |
| 81 | // Matches either I & 2^x-1 or 2^x-1 & I. If we find a match, we update RT |
| 82 | // with a new integer type of the corresponding bit width. |
| 83 | if (match(V: J, P: m_And(L: m_Instruction(I), R: m_APInt(Res&: M)))) { |
| 84 | int32_t Bits = (*M + 1).exactLogBase2(); |
| 85 | if (Bits > 0) { |
| 86 | RT = IntegerType::get(C&: Phi->getContext(), NumBits: Bits); |
| 87 | Visited.insert(Ptr: Phi); |
| 88 | CI.insert(Ptr: J); |
| 89 | return J; |
| 90 | } |
| 91 | } |
| 92 | return Phi; |
| 93 | } |
| 94 | |
| 95 | /// Compute the minimal bit width needed to represent a reduction whose exit |
| 96 | /// instruction is given by Exit. |
| 97 | static std::pair<Type *, bool> computeRecurrenceType(Instruction *Exit, |
| 98 | DemandedBits *DB, |
| 99 | AssumptionCache *AC, |
| 100 | DominatorTree *DT) { |
| 101 | bool IsSigned = false; |
| 102 | const DataLayout &DL = Exit->getDataLayout(); |
| 103 | uint64_t MaxBitWidth = DL.getTypeSizeInBits(Ty: Exit->getType()); |
| 104 | |
| 105 | if (DB) { |
| 106 | // Use the demanded bits analysis to determine the bits that are live out |
| 107 | // of the exit instruction, rounding up to the nearest power of two. If the |
| 108 | // use of demanded bits results in a smaller bit width, we know the value |
| 109 | // must be positive (i.e., IsSigned = false), because if this were not the |
| 110 | // case, the sign bit would have been demanded. |
| 111 | auto Mask = DB->getDemandedBits(I: Exit); |
| 112 | MaxBitWidth = Mask.getBitWidth() - Mask.countl_zero(); |
| 113 | } |
| 114 | |
| 115 | if (MaxBitWidth == DL.getTypeSizeInBits(Ty: Exit->getType()) && AC && DT) { |
| 116 | // If demanded bits wasn't able to limit the bit width, we can try to use |
| 117 | // value tracking instead. This can be the case, for example, if the value |
| 118 | // may be negative. |
| 119 | auto NumSignBits = ComputeNumSignBits(Op: Exit, DL, AC, CxtI: nullptr, DT); |
| 120 | auto NumTypeBits = DL.getTypeSizeInBits(Ty: Exit->getType()); |
| 121 | MaxBitWidth = NumTypeBits - NumSignBits; |
| 122 | KnownBits Bits = computeKnownBits(V: Exit, DL); |
| 123 | if (!Bits.isNonNegative()) { |
| 124 | // If the value is not known to be non-negative, we set IsSigned to true, |
| 125 | // meaning that we will use sext instructions instead of zext |
| 126 | // instructions to restore the original type. |
| 127 | IsSigned = true; |
| 128 | // Make sure at least one sign bit is included in the result, so it |
| 129 | // will get properly sign-extended. |
| 130 | ++MaxBitWidth; |
| 131 | } |
| 132 | } |
| 133 | MaxBitWidth = llvm::bit_ceil(Value: MaxBitWidth); |
| 134 | |
| 135 | return std::make_pair(x: Type::getIntNTy(C&: Exit->getContext(), N: MaxBitWidth), |
| 136 | y&: IsSigned); |
| 137 | } |
| 138 | |
| 139 | /// Collect cast instructions that can be ignored in the vectorizer's cost |
| 140 | /// model, given a reduction exit value and the minimal type in which the |
| 141 | // reduction can be represented. Also search casts to the recurrence type |
| 142 | // to find the minimum width used by the recurrence. |
| 143 | static void collectCastInstrs(Loop *TheLoop, Instruction *Exit, |
| 144 | Type *RecurrenceType, |
| 145 | SmallPtrSetImpl<Instruction *> &Casts, |
| 146 | unsigned &MinWidthCastToRecurTy) { |
| 147 | |
| 148 | SmallVector<Instruction *, 8> Worklist; |
| 149 | SmallPtrSet<Instruction *, 8> Visited; |
| 150 | Worklist.push_back(Elt: Exit); |
| 151 | MinWidthCastToRecurTy = -1U; |
| 152 | |
| 153 | while (!Worklist.empty()) { |
| 154 | Instruction *Val = Worklist.pop_back_val(); |
| 155 | Visited.insert(Ptr: Val); |
| 156 | if (auto *Cast = dyn_cast<CastInst>(Val)) { |
| 157 | if (Cast->getSrcTy() == RecurrenceType) { |
| 158 | // If the source type of a cast instruction is equal to the recurrence |
| 159 | // type, it will be eliminated, and should be ignored in the vectorizer |
| 160 | // cost model. |
| 161 | Casts.insert(Ptr: Cast); |
| 162 | continue; |
| 163 | } |
| 164 | if (Cast->getDestTy() == RecurrenceType) { |
| 165 | // The minimum width used by the recurrence is found by checking for |
| 166 | // casts on its operands. The minimum width is used by the vectorizer |
| 167 | // when finding the widest type for in-loop reductions without any |
| 168 | // loads/stores. |
| 169 | MinWidthCastToRecurTy = std::min<unsigned>( |
| 170 | a: MinWidthCastToRecurTy, b: Cast->getSrcTy()->getScalarSizeInBits()); |
| 171 | continue; |
| 172 | } |
| 173 | } |
| 174 | // Add all operands to the work list if they are loop-varying values that |
| 175 | // we haven't yet visited. |
| 176 | for (Value *O : cast<User>(Val)->operands()) |
| 177 | if (auto *I = dyn_cast<Instruction>(Val: O)) |
| 178 | if (TheLoop->contains(Inst: I) && !Visited.count(Ptr: I)) |
| 179 | Worklist.push_back(Elt: I); |
| 180 | } |
| 181 | } |
| 182 | |
| 183 | // Check if a given Phi node can be recognized as an ordered reduction for |
| 184 | // vectorizing floating point operations without unsafe math. |
| 185 | static bool checkOrderedReduction(RecurKind Kind, Instruction *ExactFPMathInst, |
| 186 | Instruction *Exit, PHINode *Phi) { |
| 187 | // Currently only FAdd and FMulAdd are supported. |
| 188 | if (Kind != RecurKind::FAdd && Kind != RecurKind::FMulAdd) |
| 189 | return false; |
| 190 | |
| 191 | if (Kind == RecurKind::FAdd && Exit->getOpcode() != Instruction::FAdd) |
| 192 | return false; |
| 193 | |
| 194 | if (Kind == RecurKind::FMulAdd && |
| 195 | !RecurrenceDescriptor::isFMulAddIntrinsic(I: Exit)) |
| 196 | return false; |
| 197 | |
| 198 | // Ensure the exit instruction has only one user other than the reduction PHI |
| 199 | if (Exit != ExactFPMathInst || Exit->hasNUsesOrMore(N: 3)) |
| 200 | return false; |
| 201 | |
| 202 | // The only pattern accepted is the one in which the reduction PHI |
| 203 | // is used as one of the operands of the exit instruction |
| 204 | auto *Op0 = Exit->getOperand(i: 0); |
| 205 | auto *Op1 = Exit->getOperand(i: 1); |
| 206 | if (Kind == RecurKind::FAdd && Op0 != Phi && Op1 != Phi) |
| 207 | return false; |
| 208 | if (Kind == RecurKind::FMulAdd && Exit->getOperand(i: 2) != Phi) |
| 209 | return false; |
| 210 | |
| 211 | LLVM_DEBUG(dbgs() << "LV: Found an ordered reduction: Phi: " << *Phi |
| 212 | << ", ExitInst: " << *Exit << "\n" ); |
| 213 | |
| 214 | return true; |
| 215 | } |
| 216 | |
| 217 | // Collect FMF from a value and its associated fcmp in select patterns |
| 218 | static FastMathFlags collectMinMaxFMF(Value *V) { |
| 219 | FastMathFlags FMF = cast<FPMathOperator>(Val: V)->getFastMathFlags(); |
| 220 | if (auto *Sel = dyn_cast<SelectInst>(Val: V)) { |
| 221 | // Accept FMF from either fcmp or select in a min/max idiom. |
| 222 | // TODO: Remove this when FMF propagation is fixed or we standardize on |
| 223 | // intrinsics. |
| 224 | if (auto *FCmp = dyn_cast<FCmpInst>(Val: Sel->getCondition())) |
| 225 | FMF |= FCmp->getFastMathFlags(); |
| 226 | } |
| 227 | return FMF; |
| 228 | } |
| 229 | |
| 230 | static std::optional<FastMathFlags> |
| 231 | hasRequiredFastMathFlags(FPMathOperator *FPOp, RecurKind &RK) { |
| 232 | bool HasRequiredFMF = FPOp && FPOp->hasNoNaNs() && FPOp->hasNoSignedZeros(); |
| 233 | if (HasRequiredFMF) |
| 234 | return collectMinMaxFMF(V: FPOp); |
| 235 | |
| 236 | switch (RK) { |
| 237 | case RecurKind::FMinimum: |
| 238 | case RecurKind::FMaximum: |
| 239 | case RecurKind::FMinimumNum: |
| 240 | case RecurKind::FMaximumNum: |
| 241 | break; |
| 242 | |
| 243 | case RecurKind::FMax: |
| 244 | if (!match(V: FPOp, P: m_Intrinsic<Intrinsic::maxnum>(Op0: m_Value(), Op1: m_Value()))) |
| 245 | return std::nullopt; |
| 246 | RK = RecurKind::FMaxNum; |
| 247 | break; |
| 248 | case RecurKind::FMin: |
| 249 | if (!match(V: FPOp, P: m_Intrinsic<Intrinsic::minnum>(Op0: m_Value(), Op1: m_Value()))) |
| 250 | return std::nullopt; |
| 251 | RK = RecurKind::FMinNum; |
| 252 | break; |
| 253 | default: |
| 254 | return std::nullopt; |
| 255 | } |
| 256 | return collectMinMaxFMF(V: FPOp); |
| 257 | } |
| 258 | |
| 259 | static RecurrenceDescriptor getMinMaxRecurrence(PHINode *Phi, Loop *TheLoop, |
| 260 | ScalarEvolution *SE) { |
| 261 | Type *Ty = Phi->getType(); |
| 262 | BasicBlock *Latch = TheLoop->getLoopLatch(); |
| 263 | if (Phi->getNumIncomingValues() != 2 || |
| 264 | Phi->getParent() != TheLoop->getHeader() || |
| 265 | (!Ty->isIntegerTy() && !Ty->isFloatingPointTy()) || !Latch) |
| 266 | return {}; |
| 267 | |
| 268 | auto GetMinMaxRK = [](Value *V, Value *&A, Value *&B) -> RecurKind { |
| 269 | if (match(V, P: m_UMin(L: m_Value(V&: A), R: m_Value(V&: B)))) |
| 270 | return RecurKind::UMin; |
| 271 | if (match(V, P: m_UMax(L: m_Value(V&: A), R: m_Value(V&: B)))) |
| 272 | return RecurKind::UMax; |
| 273 | if (match(V, P: m_SMax(L: m_Value(V&: A), R: m_Value(V&: B)))) |
| 274 | return RecurKind::SMax; |
| 275 | if (match(V, P: m_SMin(L: m_Value(V&: A), R: m_Value(V&: B)))) |
| 276 | return RecurKind::SMin; |
| 277 | if (match(V, P: m_OrdOrUnordFMin(L: m_Value(V&: A), R: m_Value(V&: B))) || |
| 278 | match(V, P: m_Intrinsic<Intrinsic::minnum>(Op0: m_Value(V&: A), Op1: m_Value(V&: B)))) |
| 279 | return RecurKind::FMin; |
| 280 | if (match(V, P: m_OrdOrUnordFMax(L: m_Value(V&: A), R: m_Value(V&: B))) || |
| 281 | match(V, P: m_Intrinsic<Intrinsic::maxnum>(Op0: m_Value(V&: A), Op1: m_Value(V&: B)))) |
| 282 | return RecurKind::FMax; |
| 283 | if (match(V, P: m_FMinimum(Op0: m_Value(V&: A), Op1: m_Value(V&: B)))) |
| 284 | return RecurKind::FMinimum; |
| 285 | if (match(V, P: m_FMaximum(Op0: m_Value(V&: A), Op1: m_Value(V&: B)))) |
| 286 | return RecurKind::FMaximum; |
| 287 | if (match(V, P: m_Intrinsic<Intrinsic::minimumnum>(Op0: m_Value(V&: A), Op1: m_Value(V&: B)))) |
| 288 | return RecurKind::FMinimumNum; |
| 289 | if (match(V, P: m_Intrinsic<Intrinsic::maximumnum>(Op0: m_Value(V&: A), Op1: m_Value(V&: B)))) |
| 290 | return RecurKind::FMaximumNum; |
| 291 | return RecurKind::None; |
| 292 | }; |
| 293 | |
| 294 | FastMathFlags FMF = FastMathFlags::getFast(); |
| 295 | Value *BackedgeValue = Phi->getIncomingValueForBlock(BB: Latch); |
| 296 | RecurKind RK = RecurKind::None; |
| 297 | // Walk def-use chains upwards from BackedgeValue to identify min/max |
| 298 | // recurrences. |
| 299 | SmallVector<Value *> WorkList({BackedgeValue}); |
| 300 | SmallPtrSet<Value *, 8> Chain({Phi}); |
| 301 | while (!WorkList.empty()) { |
| 302 | Value *Cur = WorkList.pop_back_val(); |
| 303 | if (!Chain.insert(Ptr: Cur).second) |
| 304 | continue; |
| 305 | auto *I = dyn_cast<Instruction>(Val: Cur); |
| 306 | if (!I || !TheLoop->contains(Inst: I)) |
| 307 | return {}; |
| 308 | if (auto *PN = dyn_cast<PHINode>(Val: I)) { |
| 309 | append_range(C&: WorkList, R: PN->operands()); |
| 310 | continue; |
| 311 | } |
| 312 | Value *A, *B; |
| 313 | RecurKind CurRK = GetMinMaxRK(Cur, A, B); |
| 314 | if (CurRK == RecurKind::None || (RK != RecurKind::None && CurRK != RK)) |
| 315 | return {}; |
| 316 | |
| 317 | RK = CurRK; |
| 318 | // Check required fast-math flags for FP recurrences. |
| 319 | if (RecurrenceDescriptor::isFPMinMaxRecurrenceKind(Kind: CurRK)) { |
| 320 | auto CurFMF = hasRequiredFastMathFlags(FPOp: cast<FPMathOperator>(Val: Cur), RK); |
| 321 | if (!CurFMF) |
| 322 | return {}; |
| 323 | FMF &= *CurFMF; |
| 324 | } |
| 325 | |
| 326 | if (auto *SI = dyn_cast<SelectInst>(Val: I)) |
| 327 | Chain.insert(Ptr: SI->getCondition()); |
| 328 | |
| 329 | if (A == Phi || B == Phi) |
| 330 | continue; |
| 331 | |
| 332 | // Add operand to worklist if it matches the pattern (exactly one must |
| 333 | // match) |
| 334 | Value *X, *Y; |
| 335 | auto *IA = dyn_cast<Instruction>(Val: A); |
| 336 | auto *IB = dyn_cast<Instruction>(Val: B); |
| 337 | bool AMatches = IA && TheLoop->contains(Inst: IA) && GetMinMaxRK(A, X, Y) == RK; |
| 338 | bool BMatches = IB && TheLoop->contains(Inst: IB) && GetMinMaxRK(B, X, Y) == RK; |
| 339 | if (AMatches == BMatches) // Both or neither match |
| 340 | return {}; |
| 341 | WorkList.push_back(Elt: AMatches ? A : B); |
| 342 | } |
| 343 | |
| 344 | // Handle argmin/argmax pattern: PHI has uses outside the reduction chain |
| 345 | // that are not intermediate min/max operations (which are handled below). |
| 346 | // Requires integer min/max, and single-use BackedgeValue (so vectorizer can |
| 347 | // handle both PHIs together). |
| 348 | bool PhiHasInvalidUses = any_of(Range: Phi->users(), P: [&](User *U) { |
| 349 | Value *A, *B; |
| 350 | return !Chain.contains(Ptr: U) && TheLoop->contains(Inst: cast<Instruction>(Val: U)) && |
| 351 | GetMinMaxRK(U, A, B) == RecurKind::None; |
| 352 | }); |
| 353 | if (PhiHasInvalidUses) { |
| 354 | if (!RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind: RK) || |
| 355 | !BackedgeValue->hasOneUse()) |
| 356 | return {}; |
| 357 | return RecurrenceDescriptor( |
| 358 | Phi->getIncomingValueForBlock(BB: TheLoop->getLoopPreheader()), |
| 359 | /*Exit=*/nullptr, /*Store=*/nullptr, RK, FastMathFlags(), |
| 360 | /*ExactFP=*/nullptr, Phi->getType(), /*IsMultiUse=*/true); |
| 361 | } |
| 362 | |
| 363 | // Validate chain entries and collect stores from chain entries and |
| 364 | // intermediate ops. |
| 365 | SmallVector<StoreInst *> Stores; |
| 366 | unsigned OutOfLoopUses = 0; |
| 367 | for (Value *V : Chain) { |
| 368 | for (User *U : V->users()) { |
| 369 | if (Chain.contains(Ptr: U)) |
| 370 | continue; |
| 371 | auto *I = dyn_cast<Instruction>(Val: U); |
| 372 | if (!I || (!TheLoop->contains(Inst: I) && |
| 373 | (V != BackedgeValue || ++OutOfLoopUses > 1))) |
| 374 | return {}; |
| 375 | if (!TheLoop->contains(Inst: I)) |
| 376 | continue; |
| 377 | if (auto *SI = dyn_cast<StoreInst>(Val: I)) { |
| 378 | Stores.push_back(Elt: SI); |
| 379 | continue; |
| 380 | } |
| 381 | // Must be intermediate min/max of the same kind. |
| 382 | Value *A, *B; |
| 383 | if (GetMinMaxRK(I, A, B) != RK) |
| 384 | return {}; |
| 385 | for (User *IU : I->users()) { |
| 386 | if (auto *SI = dyn_cast<StoreInst>(Val: IU)) |
| 387 | Stores.push_back(Elt: SI); |
| 388 | else if (!Chain.contains(Ptr: IU)) |
| 389 | return {}; |
| 390 | } |
| 391 | } |
| 392 | } |
| 393 | |
| 394 | // Validate all stores go to same invariant address and are in the same block. |
| 395 | StoreInst *IntermediateStore = nullptr; |
| 396 | const SCEV *StorePtrSCEV = nullptr; |
| 397 | for (StoreInst *SI : Stores) { |
| 398 | const SCEV *Ptr = SE->getSCEV(V: SI->getPointerOperand()); |
| 399 | if (!SE->isLoopInvariant(S: Ptr, L: TheLoop) || |
| 400 | (StorePtrSCEV && StorePtrSCEV != Ptr)) |
| 401 | return {}; |
| 402 | StorePtrSCEV = Ptr; |
| 403 | if (!IntermediateStore) |
| 404 | IntermediateStore = SI; |
| 405 | else if (IntermediateStore->getParent() != SI->getParent()) |
| 406 | return {}; |
| 407 | else if (IntermediateStore->comesBefore(Other: SI)) |
| 408 | IntermediateStore = SI; |
| 409 | } |
| 410 | |
| 411 | return RecurrenceDescriptor( |
| 412 | Phi->getIncomingValueForBlock(BB: TheLoop->getLoopPreheader()), |
| 413 | cast<Instruction>(Val: BackedgeValue), IntermediateStore, RK, FMF, nullptr, |
| 414 | Phi->getType()); |
| 415 | } |
| 416 | |
| 417 | // This matches a phi that selects between the original value (HeaderPhi) and an |
| 418 | // arbitrary non-reduction value. |
| 419 | static bool isFindLastLikePhi(PHINode *Phi, PHINode *, |
| 420 | SmallPtrSetImpl<Instruction *> &ReductionInstrs) { |
| 421 | unsigned NumNonReduxInputs = 0; |
| 422 | for (const Value *Op : Phi->operands()) { |
| 423 | if (!ReductionInstrs.contains(Ptr: dyn_cast<Instruction>(Val: Op))) { |
| 424 | if (++NumNonReduxInputs > 1) |
| 425 | return false; |
| 426 | } else if (Op != HeaderPhi) { |
| 427 | // TODO: Remove this restriction once chained phis are supported. |
| 428 | return false; |
| 429 | } |
| 430 | } |
| 431 | return NumNonReduxInputs == 1; |
| 432 | } |
| 433 | |
| 434 | bool RecurrenceDescriptor::AddReductionVar( |
| 435 | PHINode *Phi, RecurKind Kind, Loop *TheLoop, RecurrenceDescriptor &RedDes, |
| 436 | DemandedBits *DB, AssumptionCache *AC, DominatorTree *DT, |
| 437 | ScalarEvolution *SE) { |
| 438 | if (Phi->getNumIncomingValues() != 2) |
| 439 | return false; |
| 440 | |
| 441 | // Reduction variables are only found in the loop header block. |
| 442 | if (Phi->getParent() != TheLoop->getHeader()) |
| 443 | return false; |
| 444 | |
| 445 | // Obtain the reduction start value from the value that comes from the loop |
| 446 | // preheader. |
| 447 | if (!TheLoop->getLoopPreheader()) |
| 448 | return false; |
| 449 | |
| 450 | Value *RdxStart = Phi->getIncomingValueForBlock(BB: TheLoop->getLoopPreheader()); |
| 451 | // ExitInstruction is the single value which is used outside the loop. |
| 452 | // We only allow for a single reduction value to be used outside the loop. |
| 453 | // This includes users of the reduction, variables (which form a cycle |
| 454 | // which ends in the phi node). |
| 455 | Instruction *ExitInstruction = nullptr; |
| 456 | |
| 457 | // Variable to keep last visited store instruction. By the end of the |
| 458 | // algorithm this variable will be either empty or having intermediate |
| 459 | // reduction value stored in invariant address. |
| 460 | StoreInst *IntermediateStore = nullptr; |
| 461 | |
| 462 | // Indicates that we found a reduction operation in our scan. |
| 463 | bool FoundReduxOp = false; |
| 464 | |
| 465 | // We start with the PHI node and scan for all of the users of this |
| 466 | // instruction. All users must be instructions that can be used as reduction |
| 467 | // variables (such as ADD). We must have a single out-of-block user. The cycle |
| 468 | // must include the original PHI. |
| 469 | bool FoundStartPHI = false; |
| 470 | |
| 471 | // To recognize AnyOf patterns formed by a icmp select sequence, we store |
| 472 | // the number of instruction we saw to make sure we only see one. |
| 473 | unsigned NumCmpSelectPatternInst = 0; |
| 474 | InstDesc ReduxDesc(false, nullptr); |
| 475 | |
| 476 | // To recognize find-lasts of conditional operations (such as loads or |
| 477 | // divides), that need masking, we track non-phi users and if we've found a |
| 478 | // "find-last-like" phi (see isFindLastLikePhi). We currently only support |
| 479 | // find-last reduction chains with a single "find-last-like" phi and do not |
| 480 | // allow any other operations. |
| 481 | [[maybe_unused]] unsigned NumNonPHIUsers = 0; |
| 482 | bool FoundFindLastLikePhi = false; |
| 483 | |
| 484 | // Data used for determining if the recurrence has been type-promoted. |
| 485 | Type *RecurrenceType = Phi->getType(); |
| 486 | SmallPtrSet<Instruction *, 4> CastInsts; |
| 487 | unsigned MinWidthCastToRecurrenceType; |
| 488 | Instruction *Start = Phi; |
| 489 | bool IsSigned = false; |
| 490 | |
| 491 | SmallPtrSet<Instruction *, 8> VisitedInsts; |
| 492 | SmallVector<Instruction *, 8> Worklist; |
| 493 | |
| 494 | // Return early if the recurrence kind does not match the type of Phi. If the |
| 495 | // recurrence kind is arithmetic, we attempt to look through AND operations |
| 496 | // resulting from the type promotion performed by InstCombine. Vector |
| 497 | // operations are not limited to the legal integer widths, so we may be able |
| 498 | // to evaluate the reduction in the narrower width. |
| 499 | // Check the scalar type to handle both scalar and vector types. |
| 500 | Type *ScalarTy = RecurrenceType->getScalarType(); |
| 501 | if (Kind == RecurKind::FindLast) { |
| 502 | // FindLast supports all primitive scalar types. |
| 503 | if (!ScalarTy->isFloatingPointTy() && !ScalarTy->isIntegerTy() && |
| 504 | !ScalarTy->isPointerTy()) |
| 505 | return false; |
| 506 | } else if (ScalarTy->isFloatingPointTy()) { |
| 507 | if (!isFloatingPointRecurrenceKind(Kind)) |
| 508 | return false; |
| 509 | } else if (ScalarTy->isIntegerTy()) { |
| 510 | if (!isIntegerRecurrenceKind(Kind)) |
| 511 | return false; |
| 512 | Start = lookThroughAnd(Phi, RT&: RecurrenceType, Visited&: VisitedInsts, CI&: CastInsts); |
| 513 | } else { |
| 514 | // Pointer min/max may exist, but it is not supported as a reduction op. |
| 515 | return false; |
| 516 | } |
| 517 | |
| 518 | Worklist.push_back(Elt: Start); |
| 519 | VisitedInsts.insert(Ptr: Start); |
| 520 | |
| 521 | // Start with all flags set because we will intersect this with the reduction |
| 522 | // flags from all the reduction operations. |
| 523 | FastMathFlags FMF = FastMathFlags::getFast(); |
| 524 | |
| 525 | // The first instruction in the use-def chain of the Phi node that requires |
| 526 | // exact floating point operations. |
| 527 | Instruction *ExactFPMathInst = nullptr; |
| 528 | |
| 529 | // A value in the reduction can be used: |
| 530 | // - By the reduction: |
| 531 | // - Reduction operation: |
| 532 | // - One use of reduction value (safe). |
| 533 | // - Multiple use of reduction value (not safe). |
| 534 | // - PHI: |
| 535 | // - All uses of the PHI must be the reduction (safe). |
| 536 | // - Otherwise, not safe. |
| 537 | // - By instructions outside of the loop (safe). |
| 538 | // * One value may have several outside users, but all outside |
| 539 | // uses must be of the same value. |
| 540 | // - By store instructions with a loop invariant address (safe with |
| 541 | // the following restrictions): |
| 542 | // * If there are several stores, all must have the same address. |
| 543 | // * Final value should be stored in that loop invariant address. |
| 544 | // - By an instruction that is not part of the reduction (not safe). |
| 545 | // This is either: |
| 546 | // * An instruction type other than PHI or the reduction operation. |
| 547 | // * A PHI in the header other than the initial PHI. |
| 548 | while (!Worklist.empty()) { |
| 549 | Instruction *Cur = Worklist.pop_back_val(); |
| 550 | |
| 551 | // Store instructions are allowed iff it is the store of the reduction |
| 552 | // value to the same loop invariant memory location. |
| 553 | if (auto *SI = dyn_cast<StoreInst>(Val: Cur)) { |
| 554 | if (!SE) { |
| 555 | LLVM_DEBUG(dbgs() << "Store instructions are not processed without " |
| 556 | << "Scalar Evolution Analysis\n" ); |
| 557 | return false; |
| 558 | } |
| 559 | |
| 560 | const SCEV *PtrScev = SE->getSCEV(V: SI->getPointerOperand()); |
| 561 | // Check it is the same address as previous stores |
| 562 | if (IntermediateStore) { |
| 563 | const SCEV *OtherScev = |
| 564 | SE->getSCEV(V: IntermediateStore->getPointerOperand()); |
| 565 | |
| 566 | if (OtherScev != PtrScev) { |
| 567 | LLVM_DEBUG(dbgs() << "Storing reduction value to different addresses " |
| 568 | << "inside the loop: " << *SI->getPointerOperand() |
| 569 | << " and " |
| 570 | << *IntermediateStore->getPointerOperand() << '\n'); |
| 571 | return false; |
| 572 | } |
| 573 | } |
| 574 | |
| 575 | // Check the pointer is loop invariant |
| 576 | if (!SE->isLoopInvariant(S: PtrScev, L: TheLoop)) { |
| 577 | LLVM_DEBUG(dbgs() << "Storing reduction value to non-uniform address " |
| 578 | << "inside the loop: " << *SI->getPointerOperand() |
| 579 | << '\n'); |
| 580 | return false; |
| 581 | } |
| 582 | |
| 583 | // IntermediateStore is always the last store in the loop. |
| 584 | IntermediateStore = SI; |
| 585 | continue; |
| 586 | } |
| 587 | |
| 588 | // No Users. |
| 589 | // If the instruction has no users then this is a broken chain and can't be |
| 590 | // a reduction variable. |
| 591 | if (Cur->use_empty()) |
| 592 | return false; |
| 593 | |
| 594 | bool IsAPhi = isa<PHINode>(Val: Cur); |
| 595 | if (!IsAPhi) |
| 596 | ++NumNonPHIUsers; |
| 597 | |
| 598 | // A header PHI use other than the original PHI. |
| 599 | if (Cur != Phi && IsAPhi && Cur->getParent() == Phi->getParent()) |
| 600 | return false; |
| 601 | |
| 602 | // Reductions of instructions such as Div, and Sub is only possible if the |
| 603 | // LHS is the reduction variable. |
| 604 | if (!Cur->isCommutative() && !IsAPhi && !isa<SelectInst>(Val: Cur) && |
| 605 | !isa<ICmpInst>(Val: Cur) && !isa<FCmpInst>(Val: Cur) && |
| 606 | !VisitedInsts.count(Ptr: dyn_cast<Instruction>(Val: Cur->getOperand(i: 0)))) |
| 607 | return false; |
| 608 | |
| 609 | // Any reduction instruction must be of one of the allowed kinds. We ignore |
| 610 | // the starting value (the Phi or an AND instruction if the Phi has been |
| 611 | // type-promoted). |
| 612 | if (Cur != Start) { |
| 613 | ReduxDesc = isRecurrenceInstr(L: TheLoop, Phi, I: Cur, Kind, Prev&: ReduxDesc, SE); |
| 614 | ExactFPMathInst = ExactFPMathInst == nullptr |
| 615 | ? ReduxDesc.getExactFPMathInst() |
| 616 | : ExactFPMathInst; |
| 617 | if (!ReduxDesc.isRecurrence()) |
| 618 | return false; |
| 619 | // FIXME: FMF is allowed on phi, but propagation is not handled correctly. |
| 620 | if (isa<FPMathOperator>(Val: ReduxDesc.getPatternInst()) && !IsAPhi) |
| 621 | FMF &= collectMinMaxFMF(V: ReduxDesc.getPatternInst()); |
| 622 | // Update this reduction kind if we matched a new instruction. |
| 623 | // TODO: Can we eliminate the need for a 2nd InstDesc by keeping 'Kind' |
| 624 | // state accurate while processing the worklist? |
| 625 | if (ReduxDesc.getRecKind() != RecurKind::None) |
| 626 | Kind = ReduxDesc.getRecKind(); |
| 627 | } |
| 628 | |
| 629 | bool IsASelect = isa<SelectInst>(Val: Cur); |
| 630 | |
| 631 | // A conditional reduction operation must only have 2 or less uses in |
| 632 | // VisitedInsts. |
| 633 | if (IsASelect && (Kind == RecurKind::FAdd || Kind == RecurKind::FMul) && |
| 634 | hasMultipleUsesOf(I: Cur, Insts&: VisitedInsts, MaxNumUses: 2)) |
| 635 | return false; |
| 636 | |
| 637 | // A reduction operation must only have one use of the reduction value. |
| 638 | if (!IsAPhi && !IsASelect && !isAnyOfRecurrenceKind(Kind) && |
| 639 | hasMultipleUsesOf(I: Cur, Insts&: VisitedInsts, MaxNumUses: 1)) |
| 640 | return false; |
| 641 | |
| 642 | // All inputs to a PHI node must be a reduction value, unless the phi is a |
| 643 | // "FindLast-like" phi (described below). |
| 644 | if (IsAPhi && Cur != Phi) { |
| 645 | if (!areAllUsesIn(I: Cur, Set&: VisitedInsts)) { |
| 646 | // A "FindLast-like" phi acts like a conditional select between the |
| 647 | // previous reduction value, and an arbitrary value. Note: Multiple |
| 648 | // "FindLast-like" phis are not supported see: |
| 649 | // IVDescriptorsTest.UnsupportedFindLastPhi. |
| 650 | FoundFindLastLikePhi = |
| 651 | Kind == RecurKind::FindLast && !FoundFindLastLikePhi && |
| 652 | isFindLastLikePhi(Phi: cast<PHINode>(Val: Cur), HeaderPhi: Phi, ReductionInstrs&: VisitedInsts); |
| 653 | if (!FoundFindLastLikePhi) |
| 654 | return false; |
| 655 | } |
| 656 | } |
| 657 | |
| 658 | if (isAnyOfRecurrenceKind(Kind) && IsASelect) |
| 659 | ++NumCmpSelectPatternInst; |
| 660 | |
| 661 | // Check whether we found a reduction operator. |
| 662 | FoundReduxOp |= (!IsAPhi || FoundFindLastLikePhi) && Cur != Start; |
| 663 | |
| 664 | // Process users of current instruction. Push non-PHI nodes after PHI nodes |
| 665 | // onto the stack. This way we are going to have seen all inputs to PHI |
| 666 | // nodes once we get to them. |
| 667 | SmallVector<Instruction *, 8> NonPHIs; |
| 668 | SmallVector<Instruction *, 8> PHIs; |
| 669 | for (User *U : Cur->users()) { |
| 670 | Instruction *UI = cast<Instruction>(Val: U); |
| 671 | |
| 672 | // If the user is a call to llvm.fmuladd then the instruction can only be |
| 673 | // the final operand. |
| 674 | if (isFMulAddIntrinsic(I: UI)) |
| 675 | if (Cur == UI->getOperand(i: 0) || Cur == UI->getOperand(i: 1)) |
| 676 | return false; |
| 677 | |
| 678 | // Check if we found the exit user. |
| 679 | BasicBlock *Parent = UI->getParent(); |
| 680 | if (!TheLoop->contains(BB: Parent)) { |
| 681 | // If we already know this instruction is used externally, move on to |
| 682 | // the next user. |
| 683 | if (ExitInstruction == Cur) |
| 684 | continue; |
| 685 | |
| 686 | // Exit if you find multiple values used outside or if the header phi |
| 687 | // node is being used. In this case the user uses the value of the |
| 688 | // previous iteration, in which case we would loose "VF-1" iterations of |
| 689 | // the reduction operation if we vectorize. |
| 690 | if (ExitInstruction != nullptr || Cur == Phi) |
| 691 | return false; |
| 692 | |
| 693 | // The instruction used by an outside user must be the last instruction |
| 694 | // before we feed back to the reduction phi. Otherwise, we loose VF-1 |
| 695 | // operations on the value. |
| 696 | if (!is_contained(Range: Phi->operands(), Element: Cur)) |
| 697 | return false; |
| 698 | |
| 699 | ExitInstruction = Cur; |
| 700 | continue; |
| 701 | } |
| 702 | |
| 703 | // Process instructions only once (termination). Each reduction cycle |
| 704 | // value must only be used once, except by phi nodes and conditional |
| 705 | // reductions which are represented as a cmp followed by a select. |
| 706 | InstDesc IgnoredVal(false, nullptr); |
| 707 | if (VisitedInsts.insert(Ptr: UI).second) { |
| 708 | if (isa<PHINode>(Val: UI)) { |
| 709 | PHIs.push_back(Elt: UI); |
| 710 | } else { |
| 711 | StoreInst *SI = dyn_cast<StoreInst>(Val: UI); |
| 712 | if (SI && SI->getPointerOperand() == Cur) { |
| 713 | // Reduction variable chain can only be stored somewhere but it |
| 714 | // can't be used as an address. |
| 715 | return false; |
| 716 | } |
| 717 | NonPHIs.push_back(Elt: UI); |
| 718 | } |
| 719 | } else if (!isa<PHINode>(Val: UI) && |
| 720 | ((!isConditionalRdxPattern(I: UI).isRecurrence() && |
| 721 | !isAnyOfPattern(Loop: TheLoop, OrigPhi: Phi, I: UI, Prev&: IgnoredVal) |
| 722 | .isRecurrence()))) |
| 723 | return false; |
| 724 | |
| 725 | // Remember that we completed the cycle. |
| 726 | if (UI == Phi) |
| 727 | FoundStartPHI = true; |
| 728 | } |
| 729 | Worklist.append(in_start: PHIs.begin(), in_end: PHIs.end()); |
| 730 | Worklist.append(in_start: NonPHIs.begin(), in_end: NonPHIs.end()); |
| 731 | } |
| 732 | |
| 733 | // We only expect to match a single "find-last-like" phi per find-last |
| 734 | // reduction, with no non-phi operations in the reduction use chain. |
| 735 | assert((!FoundFindLastLikePhi || |
| 736 | (Kind == RecurKind::FindLast && NumNonPHIUsers == 0)) && |
| 737 | "Unexpectedly matched a 'find-last-like' phi" ); |
| 738 | |
| 739 | if (isAnyOfRecurrenceKind(Kind) && NumCmpSelectPatternInst != 1) |
| 740 | return false; |
| 741 | |
| 742 | if (IntermediateStore) { |
| 743 | // Check that stored value goes to the phi node again. This way we make sure |
| 744 | // that the value stored in IntermediateStore is indeed the final reduction |
| 745 | // value. |
| 746 | if (!is_contained(Range: Phi->operands(), Element: IntermediateStore->getValueOperand())) { |
| 747 | LLVM_DEBUG(dbgs() << "Not a final reduction value stored: " |
| 748 | << *IntermediateStore << '\n'); |
| 749 | return false; |
| 750 | } |
| 751 | |
| 752 | // If there is an exit instruction it's value should be stored in |
| 753 | // IntermediateStore |
| 754 | if (ExitInstruction && |
| 755 | IntermediateStore->getValueOperand() != ExitInstruction) { |
| 756 | LLVM_DEBUG(dbgs() << "Last store Instruction of reduction value does not " |
| 757 | "store last calculated value of the reduction: " |
| 758 | << *IntermediateStore << '\n'); |
| 759 | return false; |
| 760 | } |
| 761 | |
| 762 | // If all uses are inside the loop (intermediate stores), then the |
| 763 | // reduction value after the loop will be the one used in the last store. |
| 764 | if (!ExitInstruction) |
| 765 | ExitInstruction = cast<Instruction>(Val: IntermediateStore->getValueOperand()); |
| 766 | } |
| 767 | |
| 768 | if (!FoundStartPHI || !FoundReduxOp || !ExitInstruction) |
| 769 | return false; |
| 770 | |
| 771 | const bool IsOrdered = |
| 772 | checkOrderedReduction(Kind, ExactFPMathInst, Exit: ExitInstruction, Phi); |
| 773 | |
| 774 | if (Start != Phi) { |
| 775 | // If the starting value is not the same as the phi node, we speculatively |
| 776 | // looked through an 'and' instruction when evaluating a potential |
| 777 | // arithmetic reduction to determine if it may have been type-promoted. |
| 778 | // |
| 779 | // We now compute the minimal bit width that is required to represent the |
| 780 | // reduction. If this is the same width that was indicated by the 'and', we |
| 781 | // can represent the reduction in the smaller type. The 'and' instruction |
| 782 | // will be eliminated since it will essentially be a cast instruction that |
| 783 | // can be ignore in the cost model. If we compute a different type than we |
| 784 | // did when evaluating the 'and', the 'and' will not be eliminated, and we |
| 785 | // will end up with different kinds of operations in the recurrence |
| 786 | // expression (e.g., IntegerAND, IntegerADD). We give up if this is |
| 787 | // the case. |
| 788 | // |
| 789 | // The vectorizer relies on InstCombine to perform the actual |
| 790 | // type-shrinking. It does this by inserting instructions to truncate the |
| 791 | // exit value of the reduction to the width indicated by RecurrenceType and |
| 792 | // then extend this value back to the original width. If IsSigned is false, |
| 793 | // a 'zext' instruction will be generated; otherwise, a 'sext' will be |
| 794 | // used. |
| 795 | // |
| 796 | // TODO: We should not rely on InstCombine to rewrite the reduction in the |
| 797 | // smaller type. We should just generate a correctly typed expression |
| 798 | // to begin with. |
| 799 | Type *ComputedType; |
| 800 | std::tie(args&: ComputedType, args&: IsSigned) = |
| 801 | computeRecurrenceType(Exit: ExitInstruction, DB, AC, DT); |
| 802 | if (ComputedType != RecurrenceType) |
| 803 | return false; |
| 804 | } |
| 805 | |
| 806 | // Collect cast instructions and the minimum width used by the recurrence. |
| 807 | // If the starting value is not the same as the phi node and the computed |
| 808 | // recurrence type is equal to the recurrence type, the recurrence expression |
| 809 | // will be represented in a narrower or wider type. If there are any cast |
| 810 | // instructions that will be unnecessary, collect them in CastsFromRecurTy. |
| 811 | // Note that the 'and' instruction was already included in this list. |
| 812 | // |
| 813 | // TODO: A better way to represent this may be to tag in some way all the |
| 814 | // instructions that are a part of the reduction. The vectorizer cost |
| 815 | // model could then apply the recurrence type to these instructions, |
| 816 | // without needing a white list of instructions to ignore. |
| 817 | // This may also be useful for the inloop reductions, if it can be |
| 818 | // kept simple enough. |
| 819 | collectCastInstrs(TheLoop, Exit: ExitInstruction, RecurrenceType, Casts&: CastInsts, |
| 820 | MinWidthCastToRecurTy&: MinWidthCastToRecurrenceType); |
| 821 | |
| 822 | // We found a reduction var if we have reached the original phi node and we |
| 823 | // only have a single instruction with out-of-loop users. |
| 824 | |
| 825 | // The ExitInstruction(Instruction which is allowed to have out-of-loop users) |
| 826 | // is saved as part of the RecurrenceDescriptor. |
| 827 | |
| 828 | // Save the description of this reduction variable. |
| 829 | RedDes = |
| 830 | RecurrenceDescriptor(RdxStart, ExitInstruction, IntermediateStore, Kind, |
| 831 | FMF, ExactFPMathInst, RecurrenceType, IsSigned, |
| 832 | IsOrdered, CastInsts, MinWidthCastToRecurrenceType); |
| 833 | return true; |
| 834 | } |
| 835 | |
| 836 | // We are looking for loops that do something like this: |
| 837 | // int r = 0; |
| 838 | // for (int i = 0; i < n; i++) { |
| 839 | // if (src[i] > 3) |
| 840 | // r = 3; |
| 841 | // } |
| 842 | // where the reduction value (r) only has two states, in this example 0 or 3. |
| 843 | // The generated LLVM IR for this type of loop will be like this: |
| 844 | // for.body: |
| 845 | // %r = phi i32 [ %spec.select, %for.body ], [ 0, %entry ] |
| 846 | // ... |
| 847 | // %cmp = icmp sgt i32 %5, 3 |
| 848 | // %spec.select = select i1 %cmp, i32 3, i32 %r |
| 849 | // ... |
| 850 | // In general we can support vectorization of loops where 'r' flips between |
| 851 | // any two non-constants, provided they are loop invariant. The only thing |
| 852 | // we actually care about at the end of the loop is whether or not any lane |
| 853 | // in the selected vector is different from the start value. The final |
| 854 | // across-vector reduction after the loop simply involves choosing the start |
| 855 | // value if nothing changed (0 in the example above) or the other selected |
| 856 | // value (3 in the example above). |
| 857 | RecurrenceDescriptor::InstDesc |
| 858 | RecurrenceDescriptor::isAnyOfPattern(Loop *Loop, PHINode *OrigPhi, |
| 859 | Instruction *I, InstDesc &Prev) { |
| 860 | // We must handle the select(cmp(),x,y) as a single instruction. Advance to |
| 861 | // the select. |
| 862 | if (match(V: I, P: m_OneUse(SubPattern: m_Cmp()))) { |
| 863 | if (auto *Select = dyn_cast<SelectInst>(Val: *I->user_begin())) |
| 864 | return InstDesc(Select, Prev.getRecKind()); |
| 865 | } |
| 866 | |
| 867 | if (!match(V: I, P: m_Select(C: m_Cmp(), L: m_Value(), R: m_Value()))) |
| 868 | return InstDesc(false, I); |
| 869 | |
| 870 | SelectInst *SI = cast<SelectInst>(Val: I); |
| 871 | Value *NonPhi = nullptr; |
| 872 | |
| 873 | if (OrigPhi == dyn_cast<PHINode>(Val: SI->getTrueValue())) |
| 874 | NonPhi = SI->getFalseValue(); |
| 875 | else if (OrigPhi == dyn_cast<PHINode>(Val: SI->getFalseValue())) |
| 876 | NonPhi = SI->getTrueValue(); |
| 877 | else |
| 878 | return InstDesc(false, I); |
| 879 | |
| 880 | // We are looking for selects of the form: |
| 881 | // select(cmp(), phi, loop_invariant) or |
| 882 | // select(cmp(), loop_invariant, phi) |
| 883 | if (!Loop->isLoopInvariant(V: NonPhi)) |
| 884 | return InstDesc(false, I); |
| 885 | |
| 886 | return InstDesc(I, RecurKind::AnyOf); |
| 887 | } |
| 888 | |
| 889 | // We are looking for loops that do something like this: |
| 890 | // int r = 0; |
| 891 | // for (int i = 0; i < n; i++) { |
| 892 | // if (src[i] > 3) |
| 893 | // r = i; |
| 894 | // } |
| 895 | // or like this: |
| 896 | // int r = 0; |
| 897 | // for (int i = 0; i < n; i++) { |
| 898 | // if (src[i] > 3) |
| 899 | // r = <loop-varying value>; |
| 900 | // } |
| 901 | // The reduction value (r) is derived from either the values of an induction |
| 902 | // variable (i) sequence, an arbitrary loop-varying value, or from the start |
| 903 | // value (0). The LLVM IR generated for such loops would be as follows: |
| 904 | // for.body: |
| 905 | // %r = phi i32 [ %spec.select, %for.body ], [ 0, %entry ] |
| 906 | // %i = phi i32 [ %inc, %for.body ], [ 0, %entry ] |
| 907 | // ... |
| 908 | // %cmp = icmp sgt i32 %5, 3 |
| 909 | // %spec.select = select i1 %cmp, i32 %i, i32 %r |
| 910 | // %inc = add nsw i32 %i, 1 |
| 911 | // ... |
| 912 | // |
| 913 | // When searching for an arbitrary loop-varying value, the reduction value will |
| 914 | // either be the initial value (0) if the condition was never met, or the value |
| 915 | // of the loop-varying value in the most recent loop iteration where the |
| 916 | // condition was met. |
| 917 | RecurrenceDescriptor::InstDesc |
| 918 | RecurrenceDescriptor::isFindPattern(Loop *TheLoop, PHINode *OrigPhi, |
| 919 | Instruction *I, ScalarEvolution &SE) { |
| 920 | // TODO: Support the vectorization of FindLastIV when the reduction phi is |
| 921 | // used by more than one select instruction. This vectorization is only |
| 922 | // performed when the SCEV of each increasing induction variable used by the |
| 923 | // select instructions is identical. |
| 924 | if (!OrigPhi->hasOneUse()) |
| 925 | return InstDesc(false, I); |
| 926 | |
| 927 | // We are looking for selects of the form: |
| 928 | // select(cmp(), phi, value) or |
| 929 | // select(cmp(), value, phi) |
| 930 | if (!match(V: I, P: m_CombineOr(L: m_Select(C: m_Cmp(), L: m_Value(), R: m_Specific(V: OrigPhi)), |
| 931 | R: m_Select(C: m_Cmp(), L: m_Specific(V: OrigPhi), R: m_Value())))) |
| 932 | return InstDesc(false, I); |
| 933 | |
| 934 | return InstDesc(I, RecurKind::FindLast); |
| 935 | } |
| 936 | |
| 937 | /// Returns true if the select instruction has users in the compare-and-add |
| 938 | /// reduction pattern below. The select instruction argument is the last one |
| 939 | /// in the sequence. |
| 940 | /// |
| 941 | /// %sum.1 = phi ... |
| 942 | /// ... |
| 943 | /// %cmp = fcmp pred %0, %CFP |
| 944 | /// %add = fadd %0, %sum.1 |
| 945 | /// %sum.2 = select %cmp, %add, %sum.1 |
| 946 | RecurrenceDescriptor::InstDesc |
| 947 | RecurrenceDescriptor::isConditionalRdxPattern(Instruction *I) { |
| 948 | Value *TrueVal, *FalseVal; |
| 949 | // Only handle single use cases for now. |
| 950 | if (!match(V: I, |
| 951 | P: m_Select(C: m_OneUse(SubPattern: m_Cmp()), L: m_Value(V&: TrueVal), R: m_Value(V&: FalseVal)))) |
| 952 | return InstDesc(false, I); |
| 953 | |
| 954 | // Handle only when either of operands of select instruction is a PHI |
| 955 | // node for now. |
| 956 | if ((isa<PHINode>(Val: TrueVal) && isa<PHINode>(Val: FalseVal)) || |
| 957 | (!isa<PHINode>(Val: TrueVal) && !isa<PHINode>(Val: FalseVal))) |
| 958 | return InstDesc(false, I); |
| 959 | |
| 960 | Instruction *I1 = isa<PHINode>(Val: TrueVal) ? dyn_cast<Instruction>(Val: FalseVal) |
| 961 | : dyn_cast<Instruction>(Val: TrueVal); |
| 962 | if (!I1 || !I1->isBinaryOp()) |
| 963 | return InstDesc(false, I); |
| 964 | |
| 965 | Value *Op1, *Op2; |
| 966 | if (!(((m_FAdd(L: m_Value(V&: Op1), R: m_Value(V&: Op2)).match(V: I1) || |
| 967 | m_FSub(L: m_Value(V&: Op1), R: m_Value(V&: Op2)).match(V: I1)) && |
| 968 | I1->isFast()) || |
| 969 | (m_FMul(L: m_Value(V&: Op1), R: m_Value(V&: Op2)).match(V: I1) && (I1->isFast())) || |
| 970 | ((m_Add(L: m_Value(V&: Op1), R: m_Value(V&: Op2)).match(V: I1) || |
| 971 | m_Sub(L: m_Value(V&: Op1), R: m_Value(V&: Op2)).match(V: I1))) || |
| 972 | (m_Mul(L: m_Value(V&: Op1), R: m_Value(V&: Op2)).match(V: I1)))) |
| 973 | return InstDesc(false, I); |
| 974 | |
| 975 | Instruction *IPhi = isa<PHINode>(Val: Op1) ? dyn_cast<Instruction>(Val: Op1) |
| 976 | : dyn_cast<Instruction>(Val: Op2); |
| 977 | if (!IPhi || IPhi != FalseVal) |
| 978 | return InstDesc(false, I); |
| 979 | |
| 980 | return InstDesc(true, I); |
| 981 | } |
| 982 | |
| 983 | RecurrenceDescriptor::InstDesc |
| 984 | RecurrenceDescriptor::isRecurrenceInstr(Loop *L, PHINode *OrigPhi, |
| 985 | Instruction *I, RecurKind Kind, |
| 986 | InstDesc &Prev, ScalarEvolution *SE) { |
| 987 | assert(Prev.getRecKind() == RecurKind::None || Prev.getRecKind() == Kind); |
| 988 | switch (I->getOpcode()) { |
| 989 | default: |
| 990 | return InstDesc(false, I); |
| 991 | case Instruction::PHI: |
| 992 | return InstDesc(I, Prev.getRecKind(), Prev.getExactFPMathInst()); |
| 993 | case Instruction::Sub: |
| 994 | return InstDesc( |
| 995 | Kind == RecurKind::Sub || Kind == RecurKind::AddChainWithSubs, I); |
| 996 | case Instruction::Add: |
| 997 | return InstDesc( |
| 998 | Kind == RecurKind::Add || Kind == RecurKind::AddChainWithSubs, I); |
| 999 | case Instruction::Mul: |
| 1000 | return InstDesc(Kind == RecurKind::Mul, I); |
| 1001 | case Instruction::And: |
| 1002 | return InstDesc(Kind == RecurKind::And, I); |
| 1003 | case Instruction::Or: |
| 1004 | return InstDesc(Kind == RecurKind::Or, I); |
| 1005 | case Instruction::Xor: |
| 1006 | return InstDesc(Kind == RecurKind::Xor, I); |
| 1007 | case Instruction::FDiv: |
| 1008 | case Instruction::FMul: |
| 1009 | return InstDesc(Kind == RecurKind::FMul, I, |
| 1010 | I->hasAllowReassoc() ? nullptr : I); |
| 1011 | case Instruction::FSub: |
| 1012 | case Instruction::FAdd: |
| 1013 | return InstDesc(Kind == RecurKind::FAdd, I, |
| 1014 | I->hasAllowReassoc() ? nullptr : I); |
| 1015 | case Instruction::Select: |
| 1016 | if (Kind == RecurKind::FAdd || Kind == RecurKind::FMul || |
| 1017 | Kind == RecurKind::Add || Kind == RecurKind::Mul || |
| 1018 | Kind == RecurKind::Sub || Kind == RecurKind::AddChainWithSubs) |
| 1019 | return isConditionalRdxPattern(I); |
| 1020 | if (isFindRecurrenceKind(Kind) && SE) |
| 1021 | return isFindPattern(TheLoop: L, OrigPhi, I, SE&: *SE); |
| 1022 | [[fallthrough]]; |
| 1023 | case Instruction::FCmp: |
| 1024 | case Instruction::ICmp: |
| 1025 | case Instruction::Call: |
| 1026 | if (isAnyOfRecurrenceKind(Kind)) |
| 1027 | return isAnyOfPattern(Loop: L, OrigPhi, I, Prev); |
| 1028 | if (isFMulAddIntrinsic(I)) |
| 1029 | return InstDesc(Kind == RecurKind::FMulAdd, I, |
| 1030 | I->hasAllowReassoc() ? nullptr : I); |
| 1031 | return InstDesc(false, I); |
| 1032 | } |
| 1033 | } |
| 1034 | |
| 1035 | bool RecurrenceDescriptor::hasMultipleUsesOf( |
| 1036 | Instruction *I, SmallPtrSetImpl<Instruction *> &Insts, |
| 1037 | unsigned MaxNumUses) { |
| 1038 | unsigned NumUses = 0; |
| 1039 | for (const Use &U : I->operands()) { |
| 1040 | if (Insts.count(Ptr: dyn_cast<Instruction>(Val: U))) |
| 1041 | ++NumUses; |
| 1042 | if (NumUses > MaxNumUses) |
| 1043 | return true; |
| 1044 | } |
| 1045 | |
| 1046 | return false; |
| 1047 | } |
| 1048 | |
| 1049 | bool RecurrenceDescriptor::isReductionPHI(PHINode *Phi, Loop *TheLoop, |
| 1050 | RecurrenceDescriptor &RedDes, |
| 1051 | DemandedBits *DB, AssumptionCache *AC, |
| 1052 | DominatorTree *DT, |
| 1053 | ScalarEvolution *SE) { |
| 1054 | if (AddReductionVar(Phi, Kind: RecurKind::Add, TheLoop, RedDes, DB, AC, DT, SE)) { |
| 1055 | LLVM_DEBUG(dbgs() << "Found an ADD reduction PHI." << *Phi << "\n" ); |
| 1056 | return true; |
| 1057 | } |
| 1058 | if (AddReductionVar(Phi, Kind: RecurKind::Sub, TheLoop, RedDes, DB, AC, DT, SE)) { |
| 1059 | LLVM_DEBUG(dbgs() << "Found a SUB reduction PHI." << *Phi << "\n" ); |
| 1060 | return true; |
| 1061 | } |
| 1062 | if (AddReductionVar(Phi, Kind: RecurKind::AddChainWithSubs, TheLoop, RedDes, DB, AC, |
| 1063 | DT, SE)) { |
| 1064 | LLVM_DEBUG(dbgs() << "Found a chained ADD-SUB reduction PHI." << *Phi |
| 1065 | << "\n" ); |
| 1066 | return true; |
| 1067 | } |
| 1068 | if (AddReductionVar(Phi, Kind: RecurKind::Mul, TheLoop, RedDes, DB, AC, DT, SE)) { |
| 1069 | LLVM_DEBUG(dbgs() << "Found a MUL reduction PHI." << *Phi << "\n" ); |
| 1070 | return true; |
| 1071 | } |
| 1072 | if (AddReductionVar(Phi, Kind: RecurKind::Or, TheLoop, RedDes, DB, AC, DT, SE)) { |
| 1073 | LLVM_DEBUG(dbgs() << "Found an OR reduction PHI." << *Phi << "\n" ); |
| 1074 | return true; |
| 1075 | } |
| 1076 | if (AddReductionVar(Phi, Kind: RecurKind::And, TheLoop, RedDes, DB, AC, DT, SE)) { |
| 1077 | LLVM_DEBUG(dbgs() << "Found an AND reduction PHI." << *Phi << "\n" ); |
| 1078 | return true; |
| 1079 | } |
| 1080 | if (AddReductionVar(Phi, Kind: RecurKind::Xor, TheLoop, RedDes, DB, AC, DT, SE)) { |
| 1081 | LLVM_DEBUG(dbgs() << "Found a XOR reduction PHI." << *Phi << "\n" ); |
| 1082 | return true; |
| 1083 | } |
| 1084 | auto RD = getMinMaxRecurrence(Phi, TheLoop, SE); |
| 1085 | if (RD.getRecurrenceKind() != RecurKind::None) { |
| 1086 | assert( |
| 1087 | RecurrenceDescriptor::isMinMaxRecurrenceKind(RD.getRecurrenceKind()) && |
| 1088 | "Expected a min/max recurrence kind" ); |
| 1089 | LLVM_DEBUG(dbgs() << "Found a min/max reduction PHI." << *Phi << "\n" ); |
| 1090 | RedDes = std::move(RD); |
| 1091 | return true; |
| 1092 | } |
| 1093 | if (AddReductionVar(Phi, Kind: RecurKind::AnyOf, TheLoop, RedDes, DB, AC, DT, SE)) { |
| 1094 | LLVM_DEBUG(dbgs() << "Found a conditional select reduction PHI." << *Phi |
| 1095 | << "\n" ); |
| 1096 | return true; |
| 1097 | } |
| 1098 | if (AddReductionVar(Phi, Kind: RecurKind::FindLast, TheLoop, RedDes, DB, AC, DT, |
| 1099 | SE)) { |
| 1100 | LLVM_DEBUG(dbgs() << "Found a Find reduction PHI." << *Phi << "\n" ); |
| 1101 | return true; |
| 1102 | } |
| 1103 | if (AddReductionVar(Phi, Kind: RecurKind::FMul, TheLoop, RedDes, DB, AC, DT, SE)) { |
| 1104 | LLVM_DEBUG(dbgs() << "Found an FMult reduction PHI." << *Phi << "\n" ); |
| 1105 | return true; |
| 1106 | } |
| 1107 | if (AddReductionVar(Phi, Kind: RecurKind::FAdd, TheLoop, RedDes, DB, AC, DT, SE)) { |
| 1108 | LLVM_DEBUG(dbgs() << "Found an FAdd reduction PHI." << *Phi << "\n" ); |
| 1109 | return true; |
| 1110 | } |
| 1111 | if (AddReductionVar(Phi, Kind: RecurKind::FMulAdd, TheLoop, RedDes, DB, AC, DT, |
| 1112 | SE)) { |
| 1113 | LLVM_DEBUG(dbgs() << "Found an FMulAdd reduction PHI." << *Phi << "\n" ); |
| 1114 | return true; |
| 1115 | } |
| 1116 | |
| 1117 | // Not a reduction of known type. |
| 1118 | return false; |
| 1119 | } |
| 1120 | |
| 1121 | bool RecurrenceDescriptor::isFixedOrderRecurrence(PHINode *Phi, Loop *TheLoop, |
| 1122 | DominatorTree *DT) { |
| 1123 | |
| 1124 | // Ensure the phi node is in the loop header and has two incoming values. |
| 1125 | if (Phi->getParent() != TheLoop->getHeader() || |
| 1126 | Phi->getNumIncomingValues() != 2) |
| 1127 | return false; |
| 1128 | |
| 1129 | // Ensure the loop has a preheader and a single latch block. The loop |
| 1130 | // vectorizer will need the latch to set up the next iteration of the loop. |
| 1131 | auto * = TheLoop->getLoopPreheader(); |
| 1132 | auto *Latch = TheLoop->getLoopLatch(); |
| 1133 | if (!Preheader || !Latch) |
| 1134 | return false; |
| 1135 | |
| 1136 | // Ensure the phi node's incoming blocks are the loop preheader and latch. |
| 1137 | if (Phi->getBasicBlockIndex(BB: Preheader) < 0 || |
| 1138 | Phi->getBasicBlockIndex(BB: Latch) < 0) |
| 1139 | return false; |
| 1140 | |
| 1141 | // Get the previous value. The previous value comes from the latch edge while |
| 1142 | // the initial value comes from the preheader edge. |
| 1143 | auto *Previous = dyn_cast<Instruction>(Val: Phi->getIncomingValueForBlock(BB: Latch)); |
| 1144 | |
| 1145 | // If Previous is a phi in the header, go through incoming values from the |
| 1146 | // latch until we find a non-phi value. Use this as the new Previous, all uses |
| 1147 | // in the header will be dominated by the original phi, but need to be moved |
| 1148 | // after the non-phi previous value. |
| 1149 | SmallPtrSet<PHINode *, 4> SeenPhis; |
| 1150 | while (auto *PrevPhi = dyn_cast_or_null<PHINode>(Val: Previous)) { |
| 1151 | if (PrevPhi->getParent() != Phi->getParent()) |
| 1152 | return false; |
| 1153 | if (!SeenPhis.insert(Ptr: PrevPhi).second) |
| 1154 | return false; |
| 1155 | Previous = dyn_cast<Instruction>(Val: PrevPhi->getIncomingValueForBlock(BB: Latch)); |
| 1156 | } |
| 1157 | |
| 1158 | if (!Previous || !TheLoop->contains(Inst: Previous) || isa<PHINode>(Val: Previous)) |
| 1159 | return false; |
| 1160 | |
| 1161 | // Ensure every user of the phi node (recursively) is dominated by the |
| 1162 | // previous value. The dominance requirement ensures the loop vectorizer will |
| 1163 | // not need to vectorize the initial value prior to the first iteration of the |
| 1164 | // loop. |
| 1165 | // TODO: Consider extending this sinking to handle memory instructions. |
| 1166 | |
| 1167 | SmallPtrSet<Value *, 8> Seen; |
| 1168 | BasicBlock *PhiBB = Phi->getParent(); |
| 1169 | SmallVector<Instruction *, 8> WorkList; |
| 1170 | auto TryToPushSinkCandidate = [&](Instruction *SinkCandidate) { |
| 1171 | // Cyclic dependence. |
| 1172 | if (Previous == SinkCandidate) |
| 1173 | return false; |
| 1174 | |
| 1175 | if (!Seen.insert(Ptr: SinkCandidate).second) |
| 1176 | return true; |
| 1177 | if (DT->dominates(Def: Previous, |
| 1178 | User: SinkCandidate)) // We already are good w/o sinking. |
| 1179 | return true; |
| 1180 | |
| 1181 | if (SinkCandidate->getParent() != PhiBB || |
| 1182 | SinkCandidate->mayHaveSideEffects() || |
| 1183 | SinkCandidate->mayReadFromMemory() || SinkCandidate->isTerminator()) |
| 1184 | return false; |
| 1185 | |
| 1186 | // If we reach a PHI node that is not dominated by Previous, we reached a |
| 1187 | // header PHI. No need for sinking. |
| 1188 | if (isa<PHINode>(Val: SinkCandidate)) |
| 1189 | return true; |
| 1190 | |
| 1191 | // Sink User tentatively and check its users |
| 1192 | WorkList.push_back(Elt: SinkCandidate); |
| 1193 | return true; |
| 1194 | }; |
| 1195 | |
| 1196 | WorkList.push_back(Elt: Phi); |
| 1197 | // Try to recursively sink instructions and their users after Previous. |
| 1198 | while (!WorkList.empty()) { |
| 1199 | Instruction *Current = WorkList.pop_back_val(); |
| 1200 | for (User *User : Current->users()) { |
| 1201 | if (!TryToPushSinkCandidate(cast<Instruction>(Val: User))) |
| 1202 | return false; |
| 1203 | } |
| 1204 | } |
| 1205 | |
| 1206 | return true; |
| 1207 | } |
| 1208 | |
| 1209 | unsigned RecurrenceDescriptor::getOpcode(RecurKind Kind) { |
| 1210 | switch (Kind) { |
| 1211 | case RecurKind::Sub: |
| 1212 | return Instruction::Sub; |
| 1213 | case RecurKind::AddChainWithSubs: |
| 1214 | case RecurKind::Add: |
| 1215 | return Instruction::Add; |
| 1216 | case RecurKind::Mul: |
| 1217 | return Instruction::Mul; |
| 1218 | case RecurKind::Or: |
| 1219 | return Instruction::Or; |
| 1220 | case RecurKind::And: |
| 1221 | return Instruction::And; |
| 1222 | case RecurKind::Xor: |
| 1223 | return Instruction::Xor; |
| 1224 | case RecurKind::FMul: |
| 1225 | return Instruction::FMul; |
| 1226 | case RecurKind::FMulAdd: |
| 1227 | case RecurKind::FAdd: |
| 1228 | return Instruction::FAdd; |
| 1229 | case RecurKind::SMax: |
| 1230 | case RecurKind::SMin: |
| 1231 | case RecurKind::UMax: |
| 1232 | case RecurKind::UMin: |
| 1233 | return Instruction::ICmp; |
| 1234 | case RecurKind::FMax: |
| 1235 | case RecurKind::FMin: |
| 1236 | case RecurKind::FMaximum: |
| 1237 | case RecurKind::FMinimum: |
| 1238 | case RecurKind::FMaximumNum: |
| 1239 | case RecurKind::FMinimumNum: |
| 1240 | return Instruction::FCmp; |
| 1241 | case RecurKind::FindLast: |
| 1242 | case RecurKind::AnyOf: |
| 1243 | case RecurKind::FindIV: |
| 1244 | // TODO: Set AnyOf and FindIV to Instruction::Select once in-loop reductions |
| 1245 | // are supported. |
| 1246 | default: |
| 1247 | llvm_unreachable("Unknown recurrence operation" ); |
| 1248 | } |
| 1249 | } |
| 1250 | |
| 1251 | SmallVector<Instruction *, 4> |
| 1252 | RecurrenceDescriptor::getReductionOpChain(PHINode *Phi, Loop *L) const { |
| 1253 | SmallVector<Instruction *, 4> ReductionOperations; |
| 1254 | const bool IsMinMax = isMinMaxRecurrenceKind(Kind); |
| 1255 | |
| 1256 | // Search down from the Phi to the LoopExitInstr, looking for instructions |
| 1257 | // with a single user of the correct type for the reduction. |
| 1258 | |
| 1259 | // Note that we check that the type of the operand is correct for each item in |
| 1260 | // the chain, including the last (the loop exit value). This can come up from |
| 1261 | // sub, which would otherwise be treated as an add reduction. MinMax also need |
| 1262 | // to check for a pair of icmp/select, for which we use getNextInstruction and |
| 1263 | // isCorrectOpcode functions to step the right number of instruction, and |
| 1264 | // check the icmp/select pair. |
| 1265 | // FIXME: We also do not attempt to look through Select's yet, which might |
| 1266 | // be part of the reduction chain, or attempt to looks through And's to find a |
| 1267 | // smaller bitwidth. Subs are also currently not allowed (which are usually |
| 1268 | // treated as part of a add reduction) as they are expected to generally be |
| 1269 | // more expensive than out-of-loop reductions, and need to be costed more |
| 1270 | // carefully. |
| 1271 | unsigned ExpectedUses = 1; |
| 1272 | if (IsMinMax) |
| 1273 | ExpectedUses = 2; |
| 1274 | |
| 1275 | auto getNextInstruction = [&](Instruction *Cur) -> Instruction * { |
| 1276 | for (auto *User : Cur->users()) { |
| 1277 | Instruction *UI = cast<Instruction>(Val: User); |
| 1278 | if (isa<PHINode>(Val: UI)) |
| 1279 | continue; |
| 1280 | if (IsMinMax) { |
| 1281 | // We are expecting a icmp/select pair, which we go to the next select |
| 1282 | // instruction if we can. We already know that Cur has 2 uses. |
| 1283 | if (isa<SelectInst>(Val: UI)) |
| 1284 | return UI; |
| 1285 | continue; |
| 1286 | } |
| 1287 | return UI; |
| 1288 | } |
| 1289 | return nullptr; |
| 1290 | }; |
| 1291 | auto isCorrectOpcode = [&](Instruction *Cur) { |
| 1292 | if (IsMinMax) { |
| 1293 | Value *LHS, *RHS; |
| 1294 | return SelectPatternResult::isMinOrMax( |
| 1295 | SPF: matchSelectPattern(V: Cur, LHS, RHS).Flavor); |
| 1296 | } |
| 1297 | // Recognize a call to the llvm.fmuladd intrinsic. |
| 1298 | if (isFMulAddIntrinsic(I: Cur)) |
| 1299 | return true; |
| 1300 | |
| 1301 | if (Cur->getOpcode() == Instruction::Sub && |
| 1302 | Kind == RecurKind::AddChainWithSubs) |
| 1303 | return true; |
| 1304 | |
| 1305 | return Cur->getOpcode() == getOpcode(); |
| 1306 | }; |
| 1307 | |
| 1308 | // Attempt to look through Phis which are part of the reduction chain |
| 1309 | unsigned = 0; |
| 1310 | Instruction *RdxInstr = LoopExitInstr; |
| 1311 | if (auto ExitPhi = dyn_cast<PHINode>(Val: LoopExitInstr)) { |
| 1312 | if (ExitPhi->getNumIncomingValues() != 2) |
| 1313 | return {}; |
| 1314 | |
| 1315 | Instruction *Inc0 = dyn_cast<Instruction>(Val: ExitPhi->getIncomingValue(i: 0)); |
| 1316 | Instruction *Inc1 = dyn_cast<Instruction>(Val: ExitPhi->getIncomingValue(i: 1)); |
| 1317 | |
| 1318 | Instruction *Chain = nullptr; |
| 1319 | if (Inc0 == Phi) |
| 1320 | Chain = Inc1; |
| 1321 | else if (Inc1 == Phi) |
| 1322 | Chain = Inc0; |
| 1323 | else |
| 1324 | return {}; |
| 1325 | |
| 1326 | RdxInstr = Chain; |
| 1327 | ExtraPhiUses = 1; |
| 1328 | } |
| 1329 | |
| 1330 | // The loop exit instruction we check first (as a quick test) but add last. We |
| 1331 | // check the opcode is correct (and dont allow them to be Subs) and that they |
| 1332 | // have expected to have the expected number of uses. They will have one use |
| 1333 | // from the phi and one from a LCSSA value, no matter the type. |
| 1334 | if (!isCorrectOpcode(RdxInstr) || !LoopExitInstr->hasNUses(N: 2)) |
| 1335 | return {}; |
| 1336 | |
| 1337 | // Check that the Phi has one (or two for min/max) uses, plus an extra use |
| 1338 | // for conditional reductions. |
| 1339 | if (!Phi->hasNUses(N: ExpectedUses + ExtraPhiUses)) |
| 1340 | return {}; |
| 1341 | |
| 1342 | Instruction *Cur = getNextInstruction(Phi); |
| 1343 | |
| 1344 | // Each other instruction in the chain should have the expected number of uses |
| 1345 | // and be the correct opcode. |
| 1346 | while (Cur != RdxInstr) { |
| 1347 | if (!Cur || !isCorrectOpcode(Cur) || !Cur->hasNUses(N: ExpectedUses)) |
| 1348 | return {}; |
| 1349 | |
| 1350 | ReductionOperations.push_back(Elt: Cur); |
| 1351 | Cur = getNextInstruction(Cur); |
| 1352 | } |
| 1353 | |
| 1354 | ReductionOperations.push_back(Elt: Cur); |
| 1355 | return ReductionOperations; |
| 1356 | } |
| 1357 | |
| 1358 | InductionDescriptor::InductionDescriptor(Value *Start, InductionKind K, |
| 1359 | const SCEV *Step, BinaryOperator *BOp, |
| 1360 | SmallVectorImpl<Instruction *> *Casts) |
| 1361 | : StartValue(Start), IK(K), Step(Step), InductionBinOp(BOp) { |
| 1362 | assert(IK != IK_NoInduction && "Not an induction" ); |
| 1363 | |
| 1364 | // Start value type should match the induction kind and the value |
| 1365 | // itself should not be null. |
| 1366 | assert(StartValue && "StartValue is null" ); |
| 1367 | assert((IK != IK_PtrInduction || StartValue->getType()->isPointerTy()) && |
| 1368 | "StartValue is not a pointer for pointer induction" ); |
| 1369 | assert((IK != IK_IntInduction || StartValue->getType()->isIntegerTy()) && |
| 1370 | "StartValue is not an integer for integer induction" ); |
| 1371 | |
| 1372 | // Check the Step Value. It should be non-zero integer value. |
| 1373 | assert((!getConstIntStepValue() || !getConstIntStepValue()->isZero()) && |
| 1374 | "Step value is zero" ); |
| 1375 | |
| 1376 | assert((IK == IK_FpInduction || Step->getType()->isIntegerTy()) && |
| 1377 | "StepValue is not an integer" ); |
| 1378 | |
| 1379 | assert((IK != IK_FpInduction || Step->getType()->isFloatingPointTy()) && |
| 1380 | "StepValue is not FP for FpInduction" ); |
| 1381 | assert((IK != IK_FpInduction || |
| 1382 | (InductionBinOp && |
| 1383 | (InductionBinOp->getOpcode() == Instruction::FAdd || |
| 1384 | InductionBinOp->getOpcode() == Instruction::FSub))) && |
| 1385 | "Binary opcode should be specified for FP induction" ); |
| 1386 | |
| 1387 | if (Casts) |
| 1388 | llvm::append_range(C&: RedundantCasts, R&: *Casts); |
| 1389 | } |
| 1390 | |
| 1391 | ConstantInt *InductionDescriptor::getConstIntStepValue() const { |
| 1392 | if (isa<SCEVConstant>(Val: Step)) |
| 1393 | return dyn_cast<ConstantInt>(Val: cast<SCEVConstant>(Val: Step)->getValue()); |
| 1394 | return nullptr; |
| 1395 | } |
| 1396 | |
| 1397 | bool InductionDescriptor::isFPInductionPHI(PHINode *Phi, const Loop *TheLoop, |
| 1398 | ScalarEvolution *SE, |
| 1399 | InductionDescriptor &D) { |
| 1400 | |
| 1401 | // Here we only handle FP induction variables. |
| 1402 | assert(Phi->getType()->isFloatingPointTy() && "Unexpected Phi type" ); |
| 1403 | |
| 1404 | if (TheLoop->getHeader() != Phi->getParent()) |
| 1405 | return false; |
| 1406 | |
| 1407 | // The loop may have multiple entrances or multiple exits; we can analyze |
| 1408 | // this phi if it has a unique entry value and a unique backedge value. |
| 1409 | if (Phi->getNumIncomingValues() != 2) |
| 1410 | return false; |
| 1411 | Value *BEValue = nullptr, *StartValue = nullptr; |
| 1412 | if (TheLoop->contains(BB: Phi->getIncomingBlock(i: 0))) { |
| 1413 | BEValue = Phi->getIncomingValue(i: 0); |
| 1414 | StartValue = Phi->getIncomingValue(i: 1); |
| 1415 | } else { |
| 1416 | assert(TheLoop->contains(Phi->getIncomingBlock(1)) && |
| 1417 | "Unexpected Phi node in the loop" ); |
| 1418 | BEValue = Phi->getIncomingValue(i: 1); |
| 1419 | StartValue = Phi->getIncomingValue(i: 0); |
| 1420 | } |
| 1421 | |
| 1422 | BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val: BEValue); |
| 1423 | if (!BOp) |
| 1424 | return false; |
| 1425 | |
| 1426 | Value *Addend = nullptr; |
| 1427 | if (BOp->getOpcode() == Instruction::FAdd) { |
| 1428 | if (BOp->getOperand(i_nocapture: 0) == Phi) |
| 1429 | Addend = BOp->getOperand(i_nocapture: 1); |
| 1430 | else if (BOp->getOperand(i_nocapture: 1) == Phi) |
| 1431 | Addend = BOp->getOperand(i_nocapture: 0); |
| 1432 | } else if (BOp->getOpcode() == Instruction::FSub) |
| 1433 | if (BOp->getOperand(i_nocapture: 0) == Phi) |
| 1434 | Addend = BOp->getOperand(i_nocapture: 1); |
| 1435 | |
| 1436 | if (!Addend) |
| 1437 | return false; |
| 1438 | |
| 1439 | // The addend should be loop invariant |
| 1440 | if (auto *I = dyn_cast<Instruction>(Val: Addend)) |
| 1441 | if (TheLoop->contains(Inst: I)) |
| 1442 | return false; |
| 1443 | |
| 1444 | // FP Step has unknown SCEV |
| 1445 | const SCEV *Step = SE->getUnknown(V: Addend); |
| 1446 | D = InductionDescriptor(StartValue, IK_FpInduction, Step, BOp); |
| 1447 | return true; |
| 1448 | } |
| 1449 | |
| 1450 | /// This function is called when we suspect that the update-chain of a phi node |
| 1451 | /// (whose symbolic SCEV expression sin \p PhiScev) contains redundant casts, |
| 1452 | /// that can be ignored. (This can happen when the PSCEV rewriter adds a runtime |
| 1453 | /// predicate P under which the SCEV expression for the phi can be the |
| 1454 | /// AddRecurrence \p AR; See createAddRecFromPHIWithCast). We want to find the |
| 1455 | /// cast instructions that are involved in the update-chain of this induction. |
| 1456 | /// A caller that adds the required runtime predicate can be free to drop these |
| 1457 | /// cast instructions, and compute the phi using \p AR (instead of some scev |
| 1458 | /// expression with casts). |
| 1459 | /// |
| 1460 | /// For example, without a predicate the scev expression can take the following |
| 1461 | /// form: |
| 1462 | /// (Ext ix (Trunc iy ( Start + i*Step ) to ix) to iy) |
| 1463 | /// |
| 1464 | /// It corresponds to the following IR sequence: |
| 1465 | /// %for.body: |
| 1466 | /// %x = phi i64 [ 0, %ph ], [ %add, %for.body ] |
| 1467 | /// %casted_phi = "ExtTrunc i64 %x" |
| 1468 | /// %add = add i64 %casted_phi, %step |
| 1469 | /// |
| 1470 | /// where %x is given in \p PN, |
| 1471 | /// PSE.getSCEV(%x) is equal to PSE.getSCEV(%casted_phi) under a predicate, |
| 1472 | /// and the IR sequence that "ExtTrunc i64 %x" represents can take one of |
| 1473 | /// several forms, for example, such as: |
| 1474 | /// ExtTrunc1: %casted_phi = and %x, 2^n-1 |
| 1475 | /// or: |
| 1476 | /// ExtTrunc2: %t = shl %x, m |
| 1477 | /// %casted_phi = ashr %t, m |
| 1478 | /// |
| 1479 | /// If we are able to find such sequence, we return the instructions |
| 1480 | /// we found, namely %casted_phi and the instructions on its use-def chain up |
| 1481 | /// to the phi (not including the phi). |
| 1482 | static bool getCastsForInductionPHI(PredicatedScalarEvolution &PSE, |
| 1483 | const SCEVUnknown *PhiScev, |
| 1484 | const SCEVAddRecExpr *AR, |
| 1485 | SmallVectorImpl<Instruction *> &CastInsts) { |
| 1486 | |
| 1487 | assert(CastInsts.empty() && "CastInsts is expected to be empty." ); |
| 1488 | auto *PN = cast<PHINode>(Val: PhiScev->getValue()); |
| 1489 | assert(PSE.getSCEV(PN) == AR && "Unexpected phi node SCEV expression" ); |
| 1490 | const Loop *L = AR->getLoop(); |
| 1491 | |
| 1492 | // Find any cast instructions that participate in the def-use chain of |
| 1493 | // PhiScev in the loop. |
| 1494 | // FORNOW/TODO: We currently expect the def-use chain to include only |
| 1495 | // two-operand instructions, where one of the operands is an invariant. |
| 1496 | // createAddRecFromPHIWithCasts() currently does not support anything more |
| 1497 | // involved than that, so we keep the search simple. This can be |
| 1498 | // extended/generalized as needed. |
| 1499 | |
| 1500 | auto getDef = [&](const Value *Val) -> Value * { |
| 1501 | const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Val); |
| 1502 | if (!BinOp) |
| 1503 | return nullptr; |
| 1504 | Value *Op0 = BinOp->getOperand(i_nocapture: 0); |
| 1505 | Value *Op1 = BinOp->getOperand(i_nocapture: 1); |
| 1506 | Value *Def = nullptr; |
| 1507 | if (L->isLoopInvariant(V: Op0)) |
| 1508 | Def = Op1; |
| 1509 | else if (L->isLoopInvariant(V: Op1)) |
| 1510 | Def = Op0; |
| 1511 | return Def; |
| 1512 | }; |
| 1513 | |
| 1514 | // Look for the instruction that defines the induction via the |
| 1515 | // loop backedge. |
| 1516 | BasicBlock *Latch = L->getLoopLatch(); |
| 1517 | if (!Latch) |
| 1518 | return false; |
| 1519 | Value *Val = PN->getIncomingValueForBlock(BB: Latch); |
| 1520 | if (!Val) |
| 1521 | return false; |
| 1522 | |
| 1523 | // Follow the def-use chain until the induction phi is reached. |
| 1524 | // If on the way we encounter a Value that has the same SCEV Expr as the |
| 1525 | // phi node, we can consider the instructions we visit from that point |
| 1526 | // as part of the cast-sequence that can be ignored. |
| 1527 | bool InCastSequence = false; |
| 1528 | auto *Inst = dyn_cast<Instruction>(Val); |
| 1529 | while (Val != PN) { |
| 1530 | // If we encountered a phi node other than PN, or if we left the loop, |
| 1531 | // we bail out. |
| 1532 | if (!Inst || !L->contains(Inst)) { |
| 1533 | return false; |
| 1534 | } |
| 1535 | auto *AddRec = dyn_cast<SCEVAddRecExpr>(Val: PSE.getSCEV(V: Val)); |
| 1536 | if (AddRec && PSE.areAddRecsEqualWithPreds(AR1: AddRec, AR2: AR)) |
| 1537 | InCastSequence = true; |
| 1538 | if (InCastSequence) { |
| 1539 | // Only the last instruction in the cast sequence is expected to have |
| 1540 | // uses outside the induction def-use chain. |
| 1541 | if (!CastInsts.empty()) |
| 1542 | if (!Inst->hasOneUse()) |
| 1543 | return false; |
| 1544 | CastInsts.push_back(Elt: Inst); |
| 1545 | } |
| 1546 | Val = getDef(Val); |
| 1547 | if (!Val) |
| 1548 | return false; |
| 1549 | Inst = dyn_cast<Instruction>(Val); |
| 1550 | } |
| 1551 | |
| 1552 | return InCastSequence; |
| 1553 | } |
| 1554 | |
| 1555 | bool InductionDescriptor::isInductionPHI(PHINode *Phi, const Loop *TheLoop, |
| 1556 | PredicatedScalarEvolution &PSE, |
| 1557 | InductionDescriptor &D, bool Assume) { |
| 1558 | Type *PhiTy = Phi->getType(); |
| 1559 | |
| 1560 | // Handle integer and pointer inductions variables. |
| 1561 | // Now we handle also FP induction but not trying to make a |
| 1562 | // recurrent expression from the PHI node in-place. |
| 1563 | |
| 1564 | if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy() && !PhiTy->isFloatTy() && |
| 1565 | !PhiTy->isDoubleTy() && !PhiTy->isHalfTy()) |
| 1566 | return false; |
| 1567 | |
| 1568 | if (PhiTy->isFloatingPointTy()) |
| 1569 | return isFPInductionPHI(Phi, TheLoop, SE: PSE.getSE(), D); |
| 1570 | |
| 1571 | const SCEV *PhiScev = PSE.getSCEV(V: Phi); |
| 1572 | const auto *AR = dyn_cast<SCEVAddRecExpr>(Val: PhiScev); |
| 1573 | |
| 1574 | // We need this expression to be an AddRecExpr. |
| 1575 | if (Assume && !AR) |
| 1576 | AR = PSE.getAsAddRec(V: Phi); |
| 1577 | |
| 1578 | if (!AR) { |
| 1579 | LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n" ); |
| 1580 | return false; |
| 1581 | } |
| 1582 | |
| 1583 | // Record any Cast instructions that participate in the induction update |
| 1584 | const auto *SymbolicPhi = dyn_cast<SCEVUnknown>(Val: PhiScev); |
| 1585 | // If we started from an UnknownSCEV, and managed to build an addRecurrence |
| 1586 | // only after enabling Assume with PSCEV, this means we may have encountered |
| 1587 | // cast instructions that required adding a runtime check in order to |
| 1588 | // guarantee the correctness of the AddRecurrence respresentation of the |
| 1589 | // induction. |
| 1590 | if (PhiScev != AR && SymbolicPhi) { |
| 1591 | SmallVector<Instruction *, 2> Casts; |
| 1592 | if (getCastsForInductionPHI(PSE, PhiScev: SymbolicPhi, AR, CastInsts&: Casts)) |
| 1593 | return isInductionPHI(Phi, L: TheLoop, SE: PSE.getSE(), D, Expr: AR, CastsToIgnore: &Casts); |
| 1594 | } |
| 1595 | |
| 1596 | return isInductionPHI(Phi, L: TheLoop, SE: PSE.getSE(), D, Expr: AR); |
| 1597 | } |
| 1598 | |
| 1599 | bool InductionDescriptor::isInductionPHI( |
| 1600 | PHINode *Phi, const Loop *TheLoop, ScalarEvolution *SE, |
| 1601 | InductionDescriptor &D, const SCEV *Expr, |
| 1602 | SmallVectorImpl<Instruction *> *CastsToIgnore) { |
| 1603 | Type *PhiTy = Phi->getType(); |
| 1604 | // isSCEVable returns true for integer and pointer types. |
| 1605 | if (!SE->isSCEVable(Ty: PhiTy)) |
| 1606 | return false; |
| 1607 | |
| 1608 | // Check that the PHI is consecutive. |
| 1609 | const SCEV *PhiScev = Expr ? Expr : SE->getSCEV(V: Phi); |
| 1610 | const SCEV *Step; |
| 1611 | |
| 1612 | // FIXME: We are currently matching the specific loop TheLoop; if it doesn't |
| 1613 | // match, we should treat it as a uniform. Unfortunately, we don't currently |
| 1614 | // know how to handled uniform PHIs. |
| 1615 | if (!match(S: PhiScev, P: m_scev_AffineAddRec(Op0: m_SCEV(), Op1: m_SCEV(V&: Step), |
| 1616 | L: m_SpecificLoop(L: TheLoop)))) { |
| 1617 | LLVM_DEBUG( |
| 1618 | dbgs() << "LV: PHI is not a poly recurrence for requested loop.\n" ); |
| 1619 | return false; |
| 1620 | } |
| 1621 | |
| 1622 | // This function assumes that InductionPhi is called only on Phi nodes |
| 1623 | // present inside loop headers. Check for the same, and throw an assert if |
| 1624 | // the current Phi is not present inside the loop header. |
| 1625 | assert(Phi->getParent() == TheLoop->getHeader() && |
| 1626 | "Invalid Phi node, not present in loop header" ); |
| 1627 | |
| 1628 | if (!TheLoop->getLoopPreheader()) |
| 1629 | return false; |
| 1630 | |
| 1631 | Value *StartValue = |
| 1632 | Phi->getIncomingValueForBlock(BB: TheLoop->getLoopPreheader()); |
| 1633 | |
| 1634 | BasicBlock *Latch = TheLoop->getLoopLatch(); |
| 1635 | if (!Latch) |
| 1636 | return false; |
| 1637 | |
| 1638 | if (PhiTy->isIntegerTy()) { |
| 1639 | BinaryOperator *BOp = |
| 1640 | dyn_cast<BinaryOperator>(Val: Phi->getIncomingValueForBlock(BB: Latch)); |
| 1641 | D = InductionDescriptor(StartValue, IK_IntInduction, Step, BOp, |
| 1642 | CastsToIgnore); |
| 1643 | return true; |
| 1644 | } |
| 1645 | |
| 1646 | assert(PhiTy->isPointerTy() && "The PHI must be a pointer" ); |
| 1647 | |
| 1648 | // This allows induction variables w/non-constant steps. |
| 1649 | D = InductionDescriptor(StartValue, IK_PtrInduction, Step); |
| 1650 | return true; |
| 1651 | } |
| 1652 | |