1//===-- VPlanConstruction.cpp - Transforms for initial VPlan construction -===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements transforms for initial VPlan construction.
11///
12//===----------------------------------------------------------------------===//
13
14#include "LoopVectorizationPlanner.h"
15#include "VPlan.h"
16#include "VPlanAnalysis.h"
17#include "VPlanCFG.h"
18#include "VPlanDominatorTree.h"
19#include "VPlanHelpers.h"
20#include "VPlanPatternMatch.h"
21#include "VPlanTransforms.h"
22#include "VPlanUtils.h"
23#include "llvm/Analysis/Loads.h"
24#include "llvm/Analysis/LoopInfo.h"
25#include "llvm/Analysis/LoopIterator.h"
26#include "llvm/Analysis/OptimizationRemarkEmitter.h"
27#include "llvm/Analysis/ScalarEvolution.h"
28#include "llvm/Analysis/ScalarEvolutionExpressions.h"
29#include "llvm/Analysis/TargetTransformInfo.h"
30#include "llvm/IR/InstrTypes.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Transforms/Utils/LoopUtils.h"
34#include "llvm/Transforms/Utils/LoopVersioning.h"
35
36#define DEBUG_TYPE "vplan"
37
38using namespace llvm;
39using namespace VPlanPatternMatch;
40
41namespace {
42// Class that is used to build the plain CFG for the incoming IR.
43class PlainCFGBuilder {
44 // The outermost loop of the input loop nest considered for vectorization.
45 Loop *TheLoop;
46
47 // Loop Info analysis.
48 LoopInfo *LI;
49
50 // Loop versioning for alias metadata.
51 LoopVersioning *LVer;
52
53 // Vectorization plan that we are working on.
54 std::unique_ptr<VPlan> Plan;
55
56 // Builder of the VPlan instruction-level representation.
57 VPBuilder VPIRBuilder;
58
59 // NOTE: The following maps are intentionally destroyed after the plain CFG
60 // construction because subsequent VPlan-to-VPlan transformation may
61 // invalidate them.
62 // Map incoming BasicBlocks to their newly-created VPBasicBlocks.
63 DenseMap<BasicBlock *, VPBasicBlock *> BB2VPBB;
64 // Map incoming Value definitions to their newly-created VPValues.
65 DenseMap<Value *, VPValue *> IRDef2VPValue;
66
67 // Hold phi node's that need to be fixed once the plain CFG has been built.
68 SmallVector<PHINode *, 8> PhisToFix;
69
70 // Utility functions.
71 void setVPBBPredsFromBB(VPBasicBlock *VPBB, BasicBlock *BB);
72 void fixHeaderPhis();
73 VPBasicBlock *getOrCreateVPBB(BasicBlock *BB);
74#ifndef NDEBUG
75 bool isExternalDef(Value *Val);
76#endif
77 VPValue *getOrCreateVPOperand(Value *IRVal);
78 void createVPInstructionsForVPBB(VPBasicBlock *VPBB, BasicBlock *BB);
79
80public:
81 PlainCFGBuilder(Loop *Lp, LoopInfo *LI, LoopVersioning *LVer)
82 : TheLoop(Lp), LI(LI), LVer(LVer), Plan(std::make_unique<VPlan>(args&: Lp)) {}
83
84 /// Build plain CFG for TheLoop and connect it to Plan's entry.
85 std::unique_ptr<VPlan> buildPlainCFG();
86};
87} // anonymous namespace
88
89// Set predecessors of \p VPBB in the same order as they are in \p BB. \p VPBB
90// must have no predecessors.
91void PlainCFGBuilder::setVPBBPredsFromBB(VPBasicBlock *VPBB, BasicBlock *BB) {
92 // Collect VPBB predecessors.
93 SmallVector<VPBlockBase *, 2> VPBBPreds;
94 for (BasicBlock *Pred : predecessors(BB))
95 VPBBPreds.push_back(Elt: getOrCreateVPBB(BB: Pred));
96 VPBB->setPredecessors(VPBBPreds);
97}
98
99static bool isHeaderBB(BasicBlock *BB, Loop *L) {
100 return L && BB == L->getHeader();
101}
102
103// Add operands to VPInstructions representing phi nodes from the input IR.
104void PlainCFGBuilder::fixHeaderPhis() {
105 for (auto *Phi : PhisToFix) {
106 assert(IRDef2VPValue.count(Phi) && "Missing VPInstruction for PHINode.");
107 VPValue *VPVal = IRDef2VPValue[Phi];
108 assert(isa<VPPhi>(VPVal) && "Expected VPPhi for phi node.");
109 auto *PhiR = cast<VPPhi>(Val: VPVal);
110 assert(PhiR->getNumOperands() == 0 && "Expected VPPhi with no operands.");
111 assert(isHeaderBB(Phi->getParent(), LI->getLoopFor(Phi->getParent())) &&
112 "Expected Phi in header block.");
113 assert(Phi->getNumOperands() == 2 &&
114 "header phi must have exactly 2 operands");
115 for (BasicBlock *Pred : predecessors(BB: Phi->getParent()))
116 PhiR->addOperand(
117 Operand: getOrCreateVPOperand(IRVal: Phi->getIncomingValueForBlock(BB: Pred)));
118 }
119}
120
121// Create a new empty VPBasicBlock for an incoming BasicBlock or retrieve an
122// existing one if it was already created.
123VPBasicBlock *PlainCFGBuilder::getOrCreateVPBB(BasicBlock *BB) {
124 if (auto *VPBB = BB2VPBB.lookup(Val: BB)) {
125 // Retrieve existing VPBB.
126 return VPBB;
127 }
128
129 // Create new VPBB.
130 StringRef Name = BB->getName();
131 LLVM_DEBUG(dbgs() << "Creating VPBasicBlock for " << Name << "\n");
132 VPBasicBlock *VPBB = Plan->createVPBasicBlock(Name);
133 BB2VPBB[BB] = VPBB;
134 return VPBB;
135}
136
137#ifndef NDEBUG
138// Return true if \p Val is considered an external definition. An external
139// definition is either:
140// 1. A Value that is not an Instruction. This will be refined in the future.
141// 2. An Instruction that is outside of the IR region represented in VPlan,
142// i.e., is not part of the loop nest.
143bool PlainCFGBuilder::isExternalDef(Value *Val) {
144 // All the Values that are not Instructions are considered external
145 // definitions for now.
146 Instruction *Inst = dyn_cast<Instruction>(Val);
147 if (!Inst)
148 return true;
149
150 // Check whether Instruction definition is in loop body.
151 return !TheLoop->contains(Inst);
152}
153#endif
154
155// Create a new VPValue or retrieve an existing one for the Instruction's
156// operand \p IRVal. This function must only be used to create/retrieve VPValues
157// for *Instruction's operands* and not to create regular VPInstruction's. For
158// the latter, please, look at 'createVPInstructionsForVPBB'.
159VPValue *PlainCFGBuilder::getOrCreateVPOperand(Value *IRVal) {
160 auto VPValIt = IRDef2VPValue.find(Val: IRVal);
161 if (VPValIt != IRDef2VPValue.end())
162 // Operand has an associated VPInstruction or VPValue that was previously
163 // created.
164 return VPValIt->second;
165
166 // Operand doesn't have a previously created VPInstruction/VPValue. This
167 // means that operand is:
168 // A) a definition external to VPlan,
169 // B) any other Value without specific representation in VPlan.
170 // For now, we use VPValue to represent A and B and classify both as external
171 // definitions. We may introduce specific VPValue subclasses for them in the
172 // future.
173 assert(isExternalDef(IRVal) && "Expected external definition as operand.");
174
175 // A and B: Create VPValue and add it to the pool of external definitions and
176 // to the Value->VPValue map.
177 VPValue *NewVPVal = Plan->getOrAddLiveIn(V: IRVal);
178 IRDef2VPValue[IRVal] = NewVPVal;
179 return NewVPVal;
180}
181
182// Create new VPInstructions in a VPBasicBlock, given its BasicBlock
183// counterpart. This function must be invoked in RPO so that the operands of a
184// VPInstruction in \p BB have been visited before (except for Phi nodes).
185void PlainCFGBuilder::createVPInstructionsForVPBB(VPBasicBlock *VPBB,
186 BasicBlock *BB) {
187 VPIRBuilder.setInsertPoint(VPBB);
188 // TODO: Model and preserve debug intrinsics in VPlan.
189 for (Instruction &InstRef : *BB) {
190 Instruction *Inst = &InstRef;
191
192 // There shouldn't be any VPValue for Inst at this point. Otherwise, we
193 // visited Inst when we shouldn't, breaking the RPO traversal order.
194 assert(!IRDef2VPValue.count(Inst) &&
195 "Instruction shouldn't have been visited.");
196
197 if (isa<UncondBrInst>(Val: Inst))
198 // Skip the rest of the Instruction processing for Branch instructions.
199 continue;
200
201 if (auto *Br = dyn_cast<CondBrInst>(Val: Inst)) {
202 // Conditional branch instruction are represented using BranchOnCond
203 // recipes.
204 VPValue *Cond = getOrCreateVPOperand(IRVal: Br->getCondition());
205 VPIRBuilder.createNaryOp(Opcode: VPInstruction::BranchOnCond, Operands: {Cond}, Inst, Flags: {},
206 MD: VPIRMetadata(*Inst), DL: Inst->getDebugLoc());
207 continue;
208 }
209
210 if (auto *SI = dyn_cast<SwitchInst>(Val: Inst)) {
211 // Don't emit recipes for unconditional switch instructions.
212 if (SI->getNumCases() == 0)
213 continue;
214 SmallVector<VPValue *> Ops = {getOrCreateVPOperand(IRVal: SI->getCondition())};
215 for (auto Case : SI->cases())
216 Ops.push_back(Elt: getOrCreateVPOperand(IRVal: Case.getCaseValue()));
217 VPIRBuilder.createNaryOp(Opcode: Instruction::Switch, Operands: Ops, Inst, Flags: {},
218 MD: VPIRMetadata(*Inst), DL: Inst->getDebugLoc());
219 continue;
220 }
221
222 VPSingleDefRecipe *NewR;
223 if (auto *Phi = dyn_cast<PHINode>(Val: Inst)) {
224 // Phi node's operands may not have been visited at this point. We create
225 // an empty VPInstruction that we will fix once the whole plain CFG has
226 // been built.
227 NewR =
228 VPIRBuilder.createScalarPhi(IncomingValues: {}, DL: Phi->getDebugLoc(), Name: "vec.phi", Flags: *Phi);
229 NewR->setUnderlyingValue(Phi);
230 if (isHeaderBB(BB: Phi->getParent(), L: LI->getLoopFor(BB: Phi->getParent()))) {
231 // Header phis need to be fixed after the VPBB for the latch has been
232 // created.
233 PhisToFix.push_back(Elt: Phi);
234 } else {
235 // Add operands for VPPhi in the order matching its predecessors in
236 // VPlan.
237 DenseMap<const VPBasicBlock *, VPValue *> VPPredToIncomingValue;
238 for (unsigned I = 0; I != Phi->getNumOperands(); ++I) {
239 VPPredToIncomingValue[BB2VPBB[Phi->getIncomingBlock(i: I)]] =
240 getOrCreateVPOperand(IRVal: Phi->getIncomingValue(i: I));
241 }
242 for (VPBlockBase *Pred : VPBB->getPredecessors())
243 NewR->addOperand(
244 Operand: VPPredToIncomingValue.lookup(Val: Pred->getExitingBasicBlock()));
245 }
246 } else {
247 // Build VPIRMetadata from the instruction and add loop versioning
248 // metadata for loads and stores.
249 VPIRMetadata MD(*Inst);
250 if (isa<LoadInst, StoreInst>(Val: Inst) && LVer) {
251 const auto &[AliasScopeMD, NoAliasMD] =
252 LVer->getNoAliasMetadataFor(OrigInst: Inst);
253 if (AliasScopeMD)
254 MD.setMetadata(Kind: LLVMContext::MD_alias_scope, Node: AliasScopeMD);
255 if (NoAliasMD)
256 MD.setMetadata(Kind: LLVMContext::MD_noalias, Node: NoAliasMD);
257 }
258
259 // Translate LLVM-IR operands into VPValue operands and set them in the
260 // new VPInstruction.
261 SmallVector<VPValue *, 4> VPOperands;
262 for (Value *Op : Inst->operands())
263 VPOperands.push_back(Elt: getOrCreateVPOperand(IRVal: Op));
264
265 if (auto *CI = dyn_cast<CastInst>(Val: Inst)) {
266 NewR = VPIRBuilder.createScalarCast(Opcode: CI->getOpcode(), Op: VPOperands[0],
267 ResultTy: CI->getType(), DL: CI->getDebugLoc(),
268 Flags: VPIRFlags(*CI), Metadata: MD);
269 NewR->setUnderlyingValue(CI);
270 } else if (auto *LI = dyn_cast<LoadInst>(Val: Inst)) {
271 NewR = VPIRBuilder.createScalarLoad(ResultTy: LI->getType(), Addr: VPOperands[0],
272 DL: LI->getDebugLoc(), Metadata: MD);
273 NewR->setUnderlyingValue(LI);
274 } else {
275 // Build VPInstruction for any arbitrary Instruction without specific
276 // representation in VPlan.
277 NewR =
278 VPIRBuilder.createNaryOp(Opcode: Inst->getOpcode(), Operands: VPOperands, Inst,
279 Flags: VPIRFlags(*Inst), MD, DL: Inst->getDebugLoc());
280 }
281 }
282
283 IRDef2VPValue[Inst] = NewR;
284 }
285}
286
287// Main interface to build the plain CFG.
288std::unique_ptr<VPlan> PlainCFGBuilder::buildPlainCFG() {
289 VPIRBasicBlock *Entry = cast<VPIRBasicBlock>(Val: Plan->getEntry());
290 BB2VPBB[Entry->getIRBasicBlock()] = Entry;
291 for (VPIRBasicBlock *ExitVPBB : Plan->getExitBlocks())
292 BB2VPBB[ExitVPBB->getIRBasicBlock()] = ExitVPBB;
293
294 // 1. Scan the body of the loop in a topological order to visit each basic
295 // block after having visited its predecessor basic blocks. Create a VPBB for
296 // each BB and link it to its successor and predecessor VPBBs. Note that
297 // predecessors must be set in the same order as they are in the incomming IR.
298 // Otherwise, there might be problems with existing phi nodes and algorithm
299 // based on predecessors traversal.
300
301 // Loop PH needs to be explicitly visited since it's not taken into account by
302 // LoopBlocksDFS.
303 BasicBlock *ThePreheaderBB = TheLoop->getLoopPreheader();
304 assert((ThePreheaderBB->getTerminator()->getNumSuccessors() == 1) &&
305 "Unexpected loop preheader");
306 for (auto &I : *ThePreheaderBB) {
307 if (I.getType()->isVoidTy())
308 continue;
309 IRDef2VPValue[&I] = Plan->getOrAddLiveIn(V: &I);
310 }
311
312 LoopBlocksRPO RPO(TheLoop);
313 RPO.perform(LI);
314
315 for (BasicBlock *BB : RPO) {
316 // Create or retrieve the VPBasicBlock for this BB.
317 VPBasicBlock *VPBB = getOrCreateVPBB(BB);
318 // Set VPBB predecessors in the same order as they are in the incoming BB.
319 setVPBBPredsFromBB(VPBB, BB);
320
321 // Create VPInstructions for BB.
322 createVPInstructionsForVPBB(VPBB, BB);
323
324 // Set VPBB successors. We create empty VPBBs for successors if they don't
325 // exist already. Recipes will be created when the successor is visited
326 // during the RPO traversal.
327 if (auto *SI = dyn_cast<SwitchInst>(Val: BB->getTerminator())) {
328 SmallVector<VPBlockBase *> Succs = {
329 getOrCreateVPBB(BB: SI->getDefaultDest())};
330 for (auto Case : SI->cases())
331 Succs.push_back(Elt: getOrCreateVPBB(BB: Case.getCaseSuccessor()));
332 VPBB->setSuccessors(Succs);
333 continue;
334 }
335 if (auto *BI = dyn_cast<UncondBrInst>(Val: BB->getTerminator())) {
336 VPBB->setOneSuccessor(getOrCreateVPBB(BB: BI->getSuccessor()));
337 continue;
338 }
339 auto *BI = cast<CondBrInst>(Val: BB->getTerminator());
340 BasicBlock *IRSucc0 = BI->getSuccessor(i: 0);
341 BasicBlock *IRSucc1 = BI->getSuccessor(i: 1);
342 VPBasicBlock *Successor0 = getOrCreateVPBB(BB: IRSucc0);
343 VPBasicBlock *Successor1 = getOrCreateVPBB(BB: IRSucc1);
344 VPBB->setTwoSuccessors(IfTrue: Successor0, IfFalse: Successor1);
345 }
346
347 for (auto *EB : Plan->getExitBlocks())
348 setVPBBPredsFromBB(VPBB: EB, BB: EB->getIRBasicBlock());
349
350 // 2. The whole CFG has been built at this point so all the input Values must
351 // have a VPlan counterpart. Fix VPlan header phi by adding their
352 // corresponding VPlan operands.
353 fixHeaderPhis();
354
355 Plan->getEntry()->setOneSuccessor(getOrCreateVPBB(BB: TheLoop->getHeader()));
356 Plan->getEntry()->setPlan(&*Plan);
357
358 // Fix VPlan loop-closed-ssa exit phi's by adding incoming operands to the
359 // VPIRInstructions wrapping them.
360 // // Note that the operand order corresponds to IR predecessor order, and may
361 // need adjusting when VPlan predecessors are added, if an exit block has
362 // multiple predecessor.
363 for (auto *EB : Plan->getExitBlocks()) {
364 for (VPRecipeBase &R : EB->phis()) {
365 auto *PhiR = cast<VPIRPhi>(Val: &R);
366 PHINode &Phi = PhiR->getIRPhi();
367 assert(PhiR->getNumOperands() == 0 &&
368 "no phi operands should be added yet");
369 for (BasicBlock *Pred : predecessors(BB: EB->getIRBasicBlock()))
370 PhiR->addOperand(
371 Operand: getOrCreateVPOperand(IRVal: Phi.getIncomingValueForBlock(BB: Pred)));
372 }
373 }
374
375 LLVM_DEBUG(Plan->setName("Plain CFG\n"); dbgs() << *Plan);
376 return std::move(Plan);
377}
378
379/// Checks if \p HeaderVPB is a loop header block in the plain CFG; that is, it
380/// has exactly 2 predecessors (preheader and latch), where the block
381/// dominates the latch and the preheader dominates the block. If it is a
382/// header block return true and canonicalize the predecessors of the header
383/// (making sure the preheader appears first and the latch second) and the
384/// successors of the latch (making sure the loop exit comes first). Otherwise
385/// return false.
386static bool canonicalHeaderAndLatch(VPBlockBase *HeaderVPB,
387 const VPDominatorTree &VPDT) {
388 ArrayRef<VPBlockBase *> Preds = HeaderVPB->getPredecessors();
389 if (Preds.size() != 2)
390 return false;
391
392 auto *PreheaderVPBB = Preds[0];
393 auto *LatchVPBB = Preds[1];
394 if (!VPDT.dominates(A: PreheaderVPBB, B: HeaderVPB) ||
395 !VPDT.dominates(A: HeaderVPB, B: LatchVPBB)) {
396 std::swap(a&: PreheaderVPBB, b&: LatchVPBB);
397
398 if (!VPDT.dominates(A: PreheaderVPBB, B: HeaderVPB) ||
399 !VPDT.dominates(A: HeaderVPB, B: LatchVPBB))
400 return false;
401
402 // Canonicalize predecessors of header so that preheader is first and
403 // latch second.
404 HeaderVPB->swapPredecessors();
405 for (VPRecipeBase &R : cast<VPBasicBlock>(Val: HeaderVPB)->phis())
406 R.swapOperands();
407 }
408
409 // The two successors of conditional branch match the condition, with the
410 // first successor corresponding to true and the second to false. We
411 // canonicalize the successors of the latch when introducing the region, such
412 // that the latch exits the region when its condition is true; invert the
413 // original condition if the original CFG branches to the header on true.
414 // Note that the exit edge is not yet connected for top-level loops.
415 if (LatchVPBB->getSingleSuccessor() ||
416 LatchVPBB->getSuccessors()[0] != HeaderVPB)
417 return true;
418
419 assert(LatchVPBB->getNumSuccessors() == 2 && "Must have 2 successors");
420 auto *Term = cast<VPBasicBlock>(Val: LatchVPBB)->getTerminator();
421 assert(cast<VPInstruction>(Term)->getOpcode() ==
422 VPInstruction::BranchOnCond &&
423 "terminator must be a BranchOnCond");
424 auto *Not = new VPInstruction(VPInstruction::Not, {Term->getOperand(N: 0)});
425 Not->insertBefore(InsertPos: Term);
426 Term->setOperand(I: 0, New: Not);
427 LatchVPBB->swapSuccessors();
428
429 return true;
430}
431
432/// Create a new VPRegionBlock for the loop starting at \p HeaderVPB.
433static void createLoopRegion(VPlan &Plan, VPBlockBase *HeaderVPB) {
434 auto *PreheaderVPBB = HeaderVPB->getPredecessors()[0];
435 auto *LatchVPBB = HeaderVPB->getPredecessors()[1];
436
437 VPBlockUtils::disconnectBlocks(From: PreheaderVPBB, To: HeaderVPB);
438 VPBlockUtils::disconnectBlocks(From: LatchVPBB, To: HeaderVPB);
439
440 // Create an empty region first and insert it between PreheaderVPBB and
441 // the exit blocks, taking care to preserve the original predecessor &
442 // successor order of blocks. Set region entry and exiting after both
443 // HeaderVPB and LatchVPBB have been disconnected from their
444 // predecessors/successors.
445 auto *R = Plan.createLoopRegion();
446
447 // Transfer latch's successors to the region.
448 VPBlockUtils::transferSuccessors(Old: LatchVPBB, New: R);
449
450 VPBlockUtils::connectBlocks(From: PreheaderVPBB, To: R);
451 R->setEntry(HeaderVPB);
452 R->setExiting(LatchVPBB);
453
454 // All VPBB's reachable shallowly from HeaderVPB belong to the current region.
455 for (VPBlockBase *VPBB : vp_depth_first_shallow(G: HeaderVPB))
456 VPBB->setParent(R);
457}
458
459// Add the necessary canonical IV and branch recipes required to control the
460// loop.
461static void addCanonicalIVRecipes(VPlan &Plan, VPBasicBlock *HeaderVPBB,
462 VPBasicBlock *LatchVPBB, Type *IdxTy,
463 DebugLoc DL) {
464 auto *StartV = Plan.getConstantInt(Ty: IdxTy, Val: 0);
465
466 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header.
467 auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL);
468 HeaderVPBB->insert(Recipe: CanonicalIVPHI, InsertPt: HeaderVPBB->begin());
469
470 // We are about to replace the branch to exit the region. Remove the original
471 // BranchOnCond, if there is any.
472 DebugLoc LatchDL = DL;
473 if (!LatchVPBB->empty() && match(V: &LatchVPBB->back(), P: m_BranchOnCond())) {
474 LatchDL = LatchVPBB->getTerminator()->getDebugLoc();
475 LatchVPBB->getTerminator()->eraseFromParent();
476 }
477
478 VPBuilder Builder(LatchVPBB);
479 // Add a VPInstruction to increment the scalar canonical IV by VF * UF.
480 // Initially the induction increment is guaranteed to not wrap, but that may
481 // change later, e.g. when tail-folding, when the flags need to be dropped.
482 auto *CanonicalIVIncrement = Builder.createAdd(
483 LHS: CanonicalIVPHI, RHS: &Plan.getVFxUF(), DL, Name: "index.next", WrapFlags: {true, false});
484 CanonicalIVPHI->addOperand(Operand: CanonicalIVIncrement);
485
486 // Add the BranchOnCount VPInstruction to the latch.
487 Builder.createNaryOp(Opcode: VPInstruction::BranchOnCount,
488 Operands: {CanonicalIVIncrement, &Plan.getVectorTripCount()},
489 DL: LatchDL);
490}
491
492/// Creates extracts for values in \p Plan defined in a loop region and used
493/// outside a loop region.
494static void createExtractsForLiveOuts(VPlan &Plan, VPBasicBlock *MiddleVPBB) {
495 VPBuilder B(MiddleVPBB, MiddleVPBB->getFirstNonPhi());
496 for (VPBasicBlock *EB : Plan.getExitBlocks()) {
497 if (!is_contained(Range: EB->predecessors(), Element: MiddleVPBB))
498 continue;
499
500 for (VPRecipeBase &R : EB->phis()) {
501 auto *ExitIRI = cast<VPIRPhi>(Val: &R);
502 VPValue *Exiting = ExitIRI->getIncomingValueForBlock(VPBB: MiddleVPBB);
503 if (isa<VPIRValue>(Val: Exiting))
504 continue;
505 Exiting = B.createNaryOp(Opcode: VPInstruction::ExtractLastPart, Operands: Exiting);
506 Exiting = B.createNaryOp(Opcode: VPInstruction::ExtractLastLane, Operands: Exiting);
507 ExitIRI->setIncomingValueForBlock(VPBB: MiddleVPBB, V: Exiting);
508 }
509 }
510}
511
512/// Return an iterator range to iterate over pairs of matching phi nodes in
513/// \p Header and \p ScalarHeader, skipping the canonical IV in the former.
514static auto getMatchingPhisForScalarLoop(VPBasicBlock *Header,
515 VPBasicBlock *ScalarHeader) {
516 return zip_equal(t: drop_begin(RangeOrContainer: Header->phis()), u: ScalarHeader->phis());
517}
518
519static void addInitialSkeleton(VPlan &Plan, Type *InductionTy, DebugLoc IVDL,
520 PredicatedScalarEvolution &PSE, Loop *TheLoop) {
521 VPDominatorTree VPDT(Plan);
522
523 auto *HeaderVPBB = cast<VPBasicBlock>(Val: Plan.getEntry()->getSingleSuccessor());
524 canonicalHeaderAndLatch(HeaderVPB: HeaderVPBB, VPDT);
525 auto *LatchVPBB = cast<VPBasicBlock>(Val: HeaderVPBB->getPredecessors()[1]);
526
527 VPBasicBlock *VecPreheader = Plan.createVPBasicBlock(Name: "vector.ph");
528 VPBlockUtils::insertBlockAfter(NewBlock: VecPreheader, BlockPtr: Plan.getEntry());
529
530 VPBasicBlock *MiddleVPBB = Plan.createVPBasicBlock(Name: "middle.block");
531 // The canonical LatchVPBB has the header block as last successor. If it has
532 // another successor, this successor is an exit block - insert middle block on
533 // its edge. Otherwise, add middle block as another successor retaining header
534 // as last.
535 if (LatchVPBB->getNumSuccessors() == 2) {
536 VPBlockBase *LatchExitVPB = LatchVPBB->getSuccessors()[0];
537 VPBlockUtils::insertOnEdge(From: LatchVPBB, To: LatchExitVPB, BlockPtr: MiddleVPBB);
538 } else {
539 VPBlockUtils::connectBlocks(From: LatchVPBB, To: MiddleVPBB);
540 LatchVPBB->swapSuccessors();
541 }
542
543 addCanonicalIVRecipes(Plan, HeaderVPBB, LatchVPBB, IdxTy: InductionTy, DL: IVDL);
544
545 // Create SCEV and VPValue for the trip count.
546 // We use the symbolic max backedge-taken-count, which works also when
547 // vectorizing loops with uncountable early exits.
548 const SCEV *BackedgeTakenCountSCEV = PSE.getSymbolicMaxBackedgeTakenCount();
549 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCountSCEV) &&
550 "Invalid backedge-taken count");
551 ScalarEvolution &SE = *PSE.getSE();
552 const SCEV *TripCount = SE.getTripCountFromExitCount(ExitCount: BackedgeTakenCountSCEV,
553 EvalTy: InductionTy, L: TheLoop);
554 Plan.setTripCount(vputils::getOrCreateVPValueForSCEVExpr(Plan, Expr: TripCount));
555
556 VPBasicBlock *ScalarPH = Plan.createVPBasicBlock(Name: "scalar.ph");
557 VPBlockUtils::connectBlocks(From: ScalarPH, To: Plan.getScalarHeader());
558
559 // The connection order corresponds to the operands of the conditional branch,
560 // with the middle block already connected to the exit block.
561 VPBlockUtils::connectBlocks(From: MiddleVPBB, To: ScalarPH);
562 // Also connect the entry block to the scalar preheader.
563 // TODO: Also introduce a branch recipe together with the minimum trip count
564 // check.
565 VPBlockUtils::connectBlocks(From: Plan.getEntry(), To: ScalarPH);
566 Plan.getEntry()->swapSuccessors();
567
568 createExtractsForLiveOuts(Plan, MiddleVPBB);
569
570 // Create resume phis in the scalar preheader for each phi in the scalar loop.
571 // Their incoming value from the vector loop will be the last lane of the
572 // corresponding vector loop header phi.
573 VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->getFirstNonPhi());
574 VPBuilder ScalarPHBuilder(ScalarPH);
575 assert(equal(ScalarPH->getPredecessors(),
576 ArrayRef<VPBlockBase *>({MiddleVPBB, Plan.getEntry()})) &&
577 "unexpected predecessor order of scalar ph");
578 for (const auto &[PhiR, ScalarPhiR] :
579 getMatchingPhisForScalarLoop(Header: HeaderVPBB, ScalarHeader: Plan.getScalarHeader())) {
580 auto *VectorPhiR = cast<VPPhi>(Val: &PhiR);
581 VPValue *BackedgeVal = VectorPhiR->getOperand(N: 1);
582 VPValue *ResumeFromVectorLoop =
583 MiddleBuilder.createNaryOp(Opcode: VPInstruction::ExtractLastPart, Operands: BackedgeVal);
584 ResumeFromVectorLoop = MiddleBuilder.createNaryOp(
585 Opcode: VPInstruction::ExtractLastLane, Operands: ResumeFromVectorLoop);
586 // Create scalar resume phi, with the first operand being the incoming value
587 // from the middle block and the second operand coming from the entry block.
588 auto *ResumePhiR = ScalarPHBuilder.createScalarPhi(
589 IncomingValues: {ResumeFromVectorLoop, VectorPhiR->getOperand(N: 0)},
590 DL: VectorPhiR->getDebugLoc());
591 cast<VPIRPhi>(Val: &ScalarPhiR)->addOperand(Operand: ResumePhiR);
592 }
593}
594
595/// Check \p Plan's live-in and replace them with constants, if they can be
596/// simplified via SCEV.
597static void simplifyLiveInsWithSCEV(VPlan &Plan,
598 PredicatedScalarEvolution &PSE) {
599 auto GetSimplifiedLiveInViaSCEV = [&](VPValue *VPV) -> VPValue * {
600 const SCEV *Expr = vputils::getSCEVExprForVPValue(V: VPV, PSE);
601 if (auto *C = dyn_cast<SCEVConstant>(Val: Expr))
602 return Plan.getOrAddLiveIn(V: C->getValue());
603 return nullptr;
604 };
605
606 for (VPValue *LiveIn : to_vector(Range: Plan.getLiveIns())) {
607 if (VPValue *SimplifiedLiveIn = GetSimplifiedLiveInViaSCEV(LiveIn))
608 LiveIn->replaceAllUsesWith(New: SimplifiedLiveIn);
609 }
610}
611
612/// To make RUN_VPLAN_PASS print initial VPlan.
613static void printAfterInitialConstruction(VPlan &) {}
614
615std::unique_ptr<VPlan>
616VPlanTransforms::buildVPlan0(Loop *TheLoop, LoopInfo &LI, Type *InductionTy,
617 DebugLoc IVDL, PredicatedScalarEvolution &PSE,
618 LoopVersioning *LVer) {
619 PlainCFGBuilder Builder(TheLoop, &LI, LVer);
620 std::unique_ptr<VPlan> VPlan0 = Builder.buildPlainCFG();
621 addInitialSkeleton(Plan&: *VPlan0, InductionTy, IVDL, PSE, TheLoop);
622 simplifyLiveInsWithSCEV(Plan&: *VPlan0, PSE);
623
624 RUN_VPLAN_PASS_NO_VERIFY(printAfterInitialConstruction, *VPlan0);
625 return VPlan0;
626}
627
628/// Creates a VPWidenIntOrFpInductionRecipe or VPWidenPointerInductionRecipe
629/// for \p Phi based on \p IndDesc.
630static VPHeaderPHIRecipe *
631createWidenInductionRecipe(PHINode *Phi, VPPhi *PhiR, VPIRValue *Start,
632 const InductionDescriptor &IndDesc, VPlan &Plan,
633 PredicatedScalarEvolution &PSE, Loop &OrigLoop,
634 DebugLoc DL) {
635 [[maybe_unused]] ScalarEvolution &SE = *PSE.getSE();
636 assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) &&
637 "step must be loop invariant");
638 assert((Plan.getLiveIn(IndDesc.getStartValue()) == Start ||
639 (SE.isSCEVable(IndDesc.getStartValue()->getType()) &&
640 SE.getSCEV(IndDesc.getStartValue()) ==
641 vputils::getSCEVExprForVPValue(Start, PSE))) &&
642 "Start VPValue must match IndDesc's start value");
643
644 VPValue *Step =
645 vputils::getOrCreateVPValueForSCEVExpr(Plan, Expr: IndDesc.getStep());
646
647 VPValue *BackedgeVal = PhiR->getOperand(N: 1);
648 // Replace live-out extracts of WideIV's backedge value by ExitingIVValue
649 // recipes. optimizeInductionLiveOutUsers will later compute the proper
650 // DerivedIV.
651 auto ReplaceExtractsWithExitingIVValue = [&](VPHeaderPHIRecipe *WideIV) {
652 for (VPUser *U : to_vector(Range: BackedgeVal->users())) {
653 if (!match(U, P: m_ExtractLastPart(Op0: m_VPValue())))
654 continue;
655 auto *ExtractLastPart = cast<VPInstruction>(Val: U);
656 VPUser *ExtractLastPartUser = ExtractLastPart->getSingleUser();
657 assert(ExtractLastPartUser && "must have a single user");
658 if (!match(U: ExtractLastPartUser, P: m_ExtractLastLane(Op0: m_VPValue())))
659 continue;
660 auto *ExtractLastLane = cast<VPInstruction>(Val: ExtractLastPartUser);
661 assert(is_contained(ExtractLastLane->getParent()->successors(),
662 Plan.getScalarPreheader()) &&
663 "last lane must be extracted in the middle block");
664 VPBuilder Builder(ExtractLastLane);
665 ExtractLastLane->replaceAllUsesWith(
666 New: Builder.createNaryOp(Opcode: VPInstruction::ExitingIVValue, Operands: {WideIV}));
667 ExtractLastLane->eraseFromParent();
668 ExtractLastPart->eraseFromParent();
669 }
670 };
671
672 if (IndDesc.getKind() == InductionDescriptor::IK_PtrInduction) {
673 auto *WideIV = new VPWidenPointerInductionRecipe(
674 Phi, Start, Step, &Plan.getVFxUF(), IndDesc, DL);
675 ReplaceExtractsWithExitingIVValue(WideIV);
676 return WideIV;
677 }
678
679 assert((IndDesc.getKind() == InductionDescriptor::IK_IntInduction ||
680 IndDesc.getKind() == InductionDescriptor::IK_FpInduction) &&
681 "must have an integer or float induction at this point");
682
683 // Update wide induction increments to use the same step as the corresponding
684 // wide induction. This enables detecting induction increments directly in
685 // VPlan and removes redundant splats.
686 if (match(V: BackedgeVal, P: m_Add(Op0: m_Specific(VPV: PhiR), Op1: m_VPValue())))
687 BackedgeVal->getDefiningRecipe()->setOperand(I: 1, New: Step);
688
689 // It is always safe to copy over the NoWrap and FastMath flags. In
690 // particular, when folding tail by masking, the masked-off lanes are never
691 // used, so it is safe.
692 VPIRFlags Flags = vputils::getFlagsFromIndDesc(ID: IndDesc);
693
694 auto *WideIV = new VPWidenIntOrFpInductionRecipe(
695 Phi, Start, Step, &Plan.getVF(), IndDesc, Flags, DL);
696
697 ReplaceExtractsWithExitingIVValue(WideIV);
698 return WideIV;
699}
700
701void VPlanTransforms::createHeaderPhiRecipes(
702 VPlan &Plan, PredicatedScalarEvolution &PSE, Loop &OrigLoop,
703 const MapVector<PHINode *, InductionDescriptor> &Inductions,
704 const MapVector<PHINode *, RecurrenceDescriptor> &Reductions,
705 const SmallPtrSetImpl<const PHINode *> &FixedOrderRecurrences,
706 const SmallPtrSetImpl<PHINode *> &InLoopReductions, bool AllowReordering) {
707 // Retrieve the header manually from the intial plain-CFG VPlan.
708 VPBasicBlock *HeaderVPBB = cast<VPBasicBlock>(
709 Val: Plan.getEntry()->getSuccessors()[1]->getSingleSuccessor());
710 assert(VPDominatorTree(Plan).dominates(HeaderVPBB,
711 HeaderVPBB->getPredecessors()[1]) &&
712 "header must dominate its latch");
713
714 auto CreateHeaderPhiRecipe = [&](VPPhi *PhiR) -> VPHeaderPHIRecipe * {
715 // TODO: Gradually replace uses of underlying instruction by analyses on
716 // VPlan.
717 auto *Phi = cast<PHINode>(Val: PhiR->getUnderlyingInstr());
718 assert(PhiR->getNumOperands() == 2 &&
719 "Must have 2 operands for header phis");
720
721 // Extract common values once.
722 VPIRValue *Start = cast<VPIRValue>(Val: PhiR->getOperand(N: 0));
723 VPValue *BackedgeValue = PhiR->getOperand(N: 1);
724
725 if (FixedOrderRecurrences.contains(Ptr: Phi)) {
726 // TODO: Currently fixed-order recurrences are modeled as chains of
727 // first-order recurrences. If there are no users of the intermediate
728 // recurrences in the chain, the fixed order recurrence should be
729 // modeled directly, enabling more efficient codegen.
730 return new VPFirstOrderRecurrencePHIRecipe(Phi, *Start, *BackedgeValue);
731 }
732
733 auto InductionIt = Inductions.find(Key: Phi);
734 if (InductionIt != Inductions.end())
735 return createWidenInductionRecipe(Phi, PhiR, Start, IndDesc: InductionIt->second,
736 Plan, PSE, OrigLoop,
737 DL: PhiR->getDebugLoc());
738
739 assert(Reductions.contains(Phi) && "only reductions are expected now");
740 const RecurrenceDescriptor &RdxDesc = Reductions.lookup(Key: Phi);
741 assert(RdxDesc.getRecurrenceStartValue() ==
742 Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader()) &&
743 "incoming value must match start value");
744 // Will be updated later to >1 if reduction is partial.
745 unsigned ScaleFactor = 1;
746 bool UseOrderedReductions = !AllowReordering && RdxDesc.isOrdered();
747 return new VPReductionPHIRecipe(
748 Phi, RdxDesc.getRecurrenceKind(), *Start, *BackedgeValue,
749 getReductionStyle(InLoop: InLoopReductions.contains(Ptr: Phi), Ordered: UseOrderedReductions,
750 ScaleFactor),
751 Phi->getType()->isFloatingPointTy() ? RdxDesc.getFastMathFlags()
752 : VPIRFlags(),
753 RdxDesc.hasUsesOutsideReductionChain());
754 };
755
756 assert(isa<VPCanonicalIVPHIRecipe>(HeaderVPBB->front()) &&
757 "first recipe must be canonical IV phi");
758 for (VPRecipeBase &R : make_early_inc_range(Range: drop_begin(RangeOrContainer: HeaderVPBB->phis()))) {
759 auto *PhiR = cast<VPPhi>(Val: &R);
760 VPHeaderPHIRecipe *HeaderPhiR = CreateHeaderPhiRecipe(PhiR);
761 HeaderPhiR->insertBefore(InsertPos: PhiR);
762 PhiR->replaceAllUsesWith(New: HeaderPhiR);
763 PhiR->eraseFromParent();
764 }
765
766 for (const auto &[HeaderPhiR, ScalarPhiR] :
767 getMatchingPhisForScalarLoop(Header: HeaderVPBB, ScalarHeader: Plan.getScalarPreheader())) {
768 auto *ResumePhiR = cast<VPPhi>(Val: &ScalarPhiR);
769 if (isa<VPFirstOrderRecurrencePHIRecipe>(Val: &HeaderPhiR)) {
770 ResumePhiR->setName("scalar.recur.init");
771 auto *ExtractLastLane = cast<VPInstruction>(Val: ResumePhiR->getOperand(N: 0));
772 ExtractLastLane->setName("vector.recur.extract");
773 continue;
774 }
775 ResumePhiR->setName(isa<VPWidenInductionRecipe>(Val: HeaderPhiR)
776 ? "bc.resume.val"
777 : "bc.merge.rdx");
778 }
779}
780
781void VPlanTransforms::createInLoopReductionRecipes(
782 VPlan &Plan, const DenseSet<BasicBlock *> &BlocksNeedingPredication,
783 ElementCount MinVF) {
784 VPTypeAnalysis TypeInfo(Plan);
785 VPBasicBlock *Header = Plan.getVectorLoopRegion()->getEntryBasicBlock();
786 SmallVector<VPRecipeBase *> ToDelete;
787
788 for (VPRecipeBase &R : Header->phis()) {
789 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(Val: &R);
790 if (!PhiR || !PhiR->isInLoop() || (MinVF.isScalar() && !PhiR->isOrdered()))
791 continue;
792
793 RecurKind Kind = PhiR->getRecurrenceKind();
794 assert(!RecurrenceDescriptor::isFindLastRecurrenceKind(Kind) &&
795 !RecurrenceDescriptor::isAnyOfRecurrenceKind(Kind) &&
796 !RecurrenceDescriptor::isFindIVRecurrenceKind(Kind) &&
797 "AnyOf and Find reductions are not allowed for in-loop reductions");
798
799 bool IsFPRecurrence =
800 RecurrenceDescriptor::isFloatingPointRecurrenceKind(Kind);
801 FastMathFlags FMFs =
802 IsFPRecurrence ? FastMathFlags::getFast() : FastMathFlags();
803
804 // Collect the chain of "link" recipes for the reduction starting at PhiR.
805 SetVector<VPSingleDefRecipe *> Worklist;
806 Worklist.insert(X: PhiR);
807 for (unsigned I = 0; I != Worklist.size(); ++I) {
808 VPSingleDefRecipe *Cur = Worklist[I];
809 for (VPUser *U : Cur->users()) {
810 auto *UserRecipe = cast<VPSingleDefRecipe>(Val: U);
811 if (!UserRecipe->getParent()->getEnclosingLoopRegion()) {
812 assert((UserRecipe->getParent() == Plan.getMiddleBlock() ||
813 UserRecipe->getParent() == Plan.getScalarPreheader()) &&
814 "U must be either in the loop region, the middle block or the "
815 "scalar preheader.");
816 continue;
817 }
818
819 // Stores using instructions will be sunk later.
820 if (match(R: UserRecipe, P: m_VPInstruction<Instruction::Store>()))
821 continue;
822 Worklist.insert(X: UserRecipe);
823 }
824 }
825
826 // Visit operation "Links" along the reduction chain top-down starting from
827 // the phi until LoopExitValue. We keep track of the previous item
828 // (PreviousLink) to tell which of the two operands of a Link will remain
829 // scalar and which will be reduced. For minmax by select(cmp), Link will be
830 // the select instructions. Blend recipes of in-loop reduction phi's will
831 // get folded to their non-phi operand, as the reduction recipe handles the
832 // condition directly.
833 VPSingleDefRecipe *PreviousLink = PhiR; // Aka Worklist[0].
834 for (VPSingleDefRecipe *CurrentLink : drop_begin(RangeOrContainer&: Worklist)) {
835 if (auto *Blend = dyn_cast<VPBlendRecipe>(Val: CurrentLink)) {
836 assert(Blend->getNumIncomingValues() == 2 &&
837 "Blend must have 2 incoming values");
838 unsigned PhiRIdx = Blend->getIncomingValue(Idx: 0) == PhiR ? 0 : 1;
839 assert(Blend->getIncomingValue(PhiRIdx) == PhiR &&
840 "PhiR must be an operand of the blend");
841 Blend->replaceAllUsesWith(New: Blend->getIncomingValue(Idx: 1 - PhiRIdx));
842 continue;
843 }
844
845 if (IsFPRecurrence) {
846 FastMathFlags CurFMF =
847 cast<VPRecipeWithIRFlags>(Val: CurrentLink)->getFastMathFlags();
848 if (match(R: CurrentLink, P: m_Select(Op0: m_VPValue(), Op1: m_VPValue(), Op2: m_VPValue())))
849 CurFMF |= cast<VPRecipeWithIRFlags>(Val: CurrentLink->getOperand(N: 0))
850 ->getFastMathFlags();
851 FMFs &= CurFMF;
852 }
853
854 Instruction *CurrentLinkI = CurrentLink->getUnderlyingInstr();
855
856 // Recognize a call to the llvm.fmuladd intrinsic.
857 bool IsFMulAdd = Kind == RecurKind::FMulAdd;
858 VPValue *VecOp;
859 VPBasicBlock *LinkVPBB = CurrentLink->getParent();
860 if (IsFMulAdd) {
861 assert(RecurrenceDescriptor::isFMulAddIntrinsic(CurrentLinkI) &&
862 "Expected current VPInstruction to be a call to the "
863 "llvm.fmuladd intrinsic");
864 assert(CurrentLink->getOperand(2) == PreviousLink &&
865 "expected a call where the previous link is the added operand");
866
867 // If the instruction is a call to the llvm.fmuladd intrinsic then we
868 // need to create an fmul recipe (multiplying the first two operands of
869 // the fmuladd together) to use as the vector operand for the fadd
870 // reduction.
871 auto *FMulRecipe = new VPInstruction(
872 Instruction::FMul,
873 {CurrentLink->getOperand(N: 0), CurrentLink->getOperand(N: 1)},
874 CurrentLinkI->getFastMathFlags());
875 LinkVPBB->insert(Recipe: FMulRecipe, InsertPt: CurrentLink->getIterator());
876 VecOp = FMulRecipe;
877 } else if (Kind == RecurKind::AddChainWithSubs &&
878 match(R: CurrentLink, P: m_Sub(Op0: m_VPValue(), Op1: m_VPValue()))) {
879 Type *PhiTy = TypeInfo.inferScalarType(V: PhiR);
880 auto *Zero = Plan.getConstantInt(Ty: PhiTy, Val: 0);
881 VPBuilder Builder(LinkVPBB, CurrentLink->getIterator());
882 auto *Sub = Builder.createSub(LHS: Zero, RHS: CurrentLink->getOperand(N: 1),
883 DL: CurrentLinkI->getDebugLoc());
884 Sub->setUnderlyingValue(CurrentLinkI);
885 VecOp = Sub;
886 } else {
887 // Index of the first operand which holds a non-mask vector operand.
888 unsigned IndexOfFirstOperand = 0;
889 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
890 if (match(R: CurrentLink, P: m_Cmp(Op0: m_VPValue(), Op1: m_VPValue())))
891 continue;
892 assert(match(CurrentLink,
893 m_Select(m_VPValue(), m_VPValue(), m_VPValue())) &&
894 "must be a select recipe");
895 IndexOfFirstOperand = 1;
896 }
897 // Note that for non-commutable operands (cmp-selects), the semantics of
898 // the cmp-select are captured in the recurrence kind.
899 unsigned VecOpId =
900 CurrentLink->getOperand(N: IndexOfFirstOperand) == PreviousLink
901 ? IndexOfFirstOperand + 1
902 : IndexOfFirstOperand;
903 VecOp = CurrentLink->getOperand(N: VecOpId);
904 assert(
905 VecOp != PreviousLink &&
906 CurrentLink->getOperand(
907 cast<VPInstruction>(CurrentLink)->getNumOperandsWithoutMask() -
908 1 - (VecOpId - IndexOfFirstOperand)) == PreviousLink &&
909 "PreviousLink must be the operand other than VecOp");
910 }
911
912 // Get block mask from CurrentLink, if it needs predication.
913 VPValue *CondOp = nullptr;
914 if (BlocksNeedingPredication.contains(V: CurrentLinkI->getParent()))
915 CondOp = cast<VPInstruction>(Val: CurrentLink)->getMask();
916
917 assert(PhiR->getVFScaleFactor() == 1 &&
918 "inloop reductions must be unscaled");
919 auto *RedRecipe = new VPReductionRecipe(
920 Kind, FMFs, CurrentLinkI, PreviousLink, VecOp, CondOp,
921 getReductionStyle(/*IsInLoop=*/InLoop: true, Ordered: PhiR->isOrdered(), ScaleFactor: 1),
922 CurrentLinkI->getDebugLoc());
923 // Append the recipe to the end of the VPBasicBlock because we need to
924 // ensure that it comes after all of it's inputs, including CondOp.
925 // Delete CurrentLink as it will be invalid if its operand is replaced
926 // with a reduction defined at the bottom of the block in the next link.
927 if (LinkVPBB->getNumSuccessors() == 0)
928 RedRecipe->insertBefore(InsertPos: &*std::prev(x: std::prev(x: LinkVPBB->end())));
929 else
930 LinkVPBB->appendRecipe(Recipe: RedRecipe);
931
932 CurrentLink->replaceAllUsesWith(New: RedRecipe);
933 ToDelete.push_back(Elt: CurrentLink);
934 PreviousLink = RedRecipe;
935 }
936 }
937
938 for (VPRecipeBase *R : ToDelete)
939 R->eraseFromParent();
940}
941
942/// Check if all loads in the loop are dereferenceable. Iterates over all blocks
943/// reachable from \p HeaderVPBB, skipping \p MiddleVPBB. Returns false if any
944/// non-dereferenceable load is found.
945static bool areAllLoadsDereferenceable(VPBasicBlock *HeaderVPBB,
946 VPBasicBlock *MiddleVPBB, Loop *TheLoop,
947 PredicatedScalarEvolution &PSE,
948 DominatorTree &DT, AssumptionCache *AC) {
949 ScalarEvolution &SE = *PSE.getSE();
950 const DataLayout &DL = TheLoop->getHeader()->getDataLayout();
951 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
952 Range: vp_depth_first_shallow(G: HeaderVPBB))) {
953 // Skip blocks outside the loop (exit blocks and their successors).
954 if (VPBB == MiddleVPBB)
955 continue;
956 for (VPRecipeBase &R : *VPBB) {
957 auto *VPI = dyn_cast<VPInstructionWithType>(Val: &R);
958 if (!VPI || VPI->getOpcode() != Instruction::Load)
959 continue;
960
961 // Get the pointer SCEV for dereferenceability checking.
962 VPValue *Ptr = VPI->getOperand(N: 0);
963 const SCEV *PtrSCEV = vputils::getSCEVExprForVPValue(V: Ptr, PSE, L: TheLoop);
964 if (isa<SCEVCouldNotCompute>(Val: PtrSCEV)) {
965 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Found non-dereferenceable "
966 "load with SCEVCouldNotCompute pointer\n");
967 return false;
968 }
969
970 // Check dereferenceability using the SCEV-based version.
971 Type *LoadTy = VPI->getResultType();
972 const SCEV *SizeSCEV =
973 SE.getStoreSizeOfExpr(IntTy: DL.getIndexType(PtrTy: PtrSCEV->getType()), StoreTy: LoadTy);
974 auto *Load = cast<LoadInst>(Val: VPI->getUnderlyingValue());
975 SmallVector<const SCEVPredicate *> Preds;
976 if (isDereferenceableAndAlignedInLoop(PtrSCEV, Alignment: Load->getAlign(), EltSizeSCEV: SizeSCEV,
977 L: TheLoop, SE, DT, AC, Predicates: &Preds))
978 continue;
979
980 LLVM_DEBUG(
981 dbgs() << "LV: Not vectorizing: Auto-vectorization of loops with "
982 "potentially faulting load is not supported.\n");
983 return false;
984 }
985 }
986 return true;
987}
988
989bool VPlanTransforms::handleEarlyExits(VPlan &Plan, UncountableExitStyle Style,
990 Loop *TheLoop,
991 PredicatedScalarEvolution &PSE,
992 DominatorTree &DT, AssumptionCache *AC) {
993 auto *MiddleVPBB = cast<VPBasicBlock>(
994 Val: Plan.getScalarHeader()->getSinglePredecessor()->getPredecessors()[0]);
995 auto *LatchVPBB = cast<VPBasicBlock>(Val: MiddleVPBB->getSinglePredecessor());
996 VPBasicBlock *HeaderVPBB = cast<VPBasicBlock>(Val: LatchVPBB->getSuccessors()[1]);
997
998 // TODO: We would like to detect uncountable exits and stores within loops
999 // with such exits from the VPlan alone. Exit detection can be moved
1000 // here from handleUncountableEarlyExits, but we need to improve
1001 // detection of recipes which may write to memory.
1002 if (Style != UncountableExitStyle::NoUncountableExit) {
1003 if (!areAllLoadsDereferenceable(HeaderVPBB, MiddleVPBB, TheLoop, PSE, DT,
1004 AC))
1005 return false;
1006 // TODO: Check target preference for style.
1007 handleUncountableEarlyExits(Plan, HeaderVPBB, LatchVPBB, MiddleVPBB, Style);
1008 return true;
1009 }
1010
1011 // Disconnect countable early exits from the loop, leaving it with a single
1012 // exit from the latch. Countable early exits are left for a scalar epilog.
1013 for (VPIRBasicBlock *EB : Plan.getExitBlocks()) {
1014 for (VPBlockBase *Pred : to_vector(Range&: EB->getPredecessors())) {
1015 if (Pred == MiddleVPBB)
1016 continue;
1017
1018 // Remove phi operands for the early exiting block.
1019 for (VPRecipeBase &R : EB->phis())
1020 cast<VPIRPhi>(Val: &R)->removeIncomingValueFor(IncomingBlock: Pred);
1021 auto *EarlyExitingVPBB = cast<VPBasicBlock>(Val: Pred);
1022 EarlyExitingVPBB->getTerminator()->eraseFromParent();
1023 VPBlockUtils::disconnectBlocks(From: Pred, To: EB);
1024 }
1025 }
1026 return true;
1027}
1028
1029void VPlanTransforms::addMiddleCheck(VPlan &Plan, bool TailFolded) {
1030 auto *MiddleVPBB = cast<VPBasicBlock>(
1031 Val: Plan.getScalarHeader()->getSinglePredecessor()->getPredecessors()[0]);
1032 // If MiddleVPBB has a single successor then the original loop does not exit
1033 // via the latch and the single successor must be the scalar preheader.
1034 // There's no need to add a runtime check to MiddleVPBB.
1035 if (MiddleVPBB->getNumSuccessors() == 1) {
1036 assert(MiddleVPBB->getSingleSuccessor() == Plan.getScalarPreheader() &&
1037 "must have ScalarPH as single successor");
1038 return;
1039 }
1040
1041 assert(MiddleVPBB->getNumSuccessors() == 2 && "must have 2 successors");
1042
1043 // Add a check in the middle block to see if we have completed all of the
1044 // iterations in the first vector loop.
1045 //
1046 // Three cases:
1047 // 1) If we require a scalar epilogue, the scalar ph must execute. Set the
1048 // condition to false.
1049 // 2) If (N - N%VF) == N, then we *don't* need to run the
1050 // remainder. Thus if tail is to be folded, we know we don't need to run
1051 // the remainder and we can set the condition to true.
1052 // 3) Otherwise, construct a runtime check.
1053
1054 // We use the same DebugLoc as the scalar loop latch terminator instead of
1055 // the corresponding compare because they may have ended up with different
1056 // line numbers and we want to avoid awkward line stepping while debugging.
1057 // E.g., if the compare has got a line number inside the loop.
1058 auto *LatchVPBB = cast<VPBasicBlock>(Val: MiddleVPBB->getSinglePredecessor());
1059 DebugLoc LatchDL = LatchVPBB->getTerminator()->getDebugLoc();
1060 VPBuilder Builder(MiddleVPBB);
1061 VPValue *Cmp;
1062 if (TailFolded)
1063 Cmp = Plan.getTrue();
1064 else
1065 Cmp = Builder.createICmp(Pred: CmpInst::ICMP_EQ, A: Plan.getTripCount(),
1066 B: &Plan.getVectorTripCount(), DL: LatchDL, Name: "cmp.n");
1067 Builder.createNaryOp(Opcode: VPInstruction::BranchOnCond, Operands: {Cmp}, DL: LatchDL);
1068}
1069
1070void VPlanTransforms::createLoopRegions(VPlan &Plan) {
1071 VPDominatorTree VPDT(Plan);
1072 for (VPBlockBase *HeaderVPB : vp_post_order_shallow(G: Plan.getEntry()))
1073 if (canonicalHeaderAndLatch(HeaderVPB, VPDT))
1074 createLoopRegion(Plan, HeaderVPB);
1075
1076 VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
1077 TopRegion->setName("vector loop");
1078 TopRegion->getEntryBasicBlock()->setName("vector.body");
1079}
1080
1081void VPlanTransforms::foldTailByMasking(VPlan &Plan) {
1082 assert(Plan.getExitBlocks().size() == 1 &&
1083 "only a single-exit block is supported currently");
1084 assert(Plan.getExitBlocks().front()->getSinglePredecessor() ==
1085 Plan.getMiddleBlock() &&
1086 "the exit block must have middle block as single predecessor");
1087
1088 VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion();
1089 assert(LoopRegion->getSingleSuccessor() == Plan.getMiddleBlock() &&
1090 "The vector loop region must have the middle block as its single "
1091 "successor for now");
1092 VPBasicBlock *Header = LoopRegion->getEntryBasicBlock();
1093
1094 Header->splitAt(SplitAt: Header->getFirstNonPhi());
1095
1096 // Create the header mask, insert it in the header and branch on it.
1097 auto *IV =
1098 new VPWidenCanonicalIVRecipe(Header->getParent()->getCanonicalIV());
1099 VPBuilder Builder(Header, Header->getFirstNonPhi());
1100 Builder.insert(R: IV);
1101 VPValue *BTC = Plan.getOrCreateBackedgeTakenCount();
1102 VPValue *HeaderMask = Builder.createICmp(Pred: CmpInst::ICMP_ULE, A: IV, B: BTC);
1103 Builder.createNaryOp(Opcode: VPInstruction::BranchOnCond, Operands: HeaderMask);
1104
1105 VPBasicBlock *OrigLatch = LoopRegion->getExitingBasicBlock();
1106 VPValue *IVInc;
1107 [[maybe_unused]] bool TermBranchOnCount =
1108 match(V: OrigLatch->getTerminator(),
1109 P: m_BranchOnCount(Op0: m_VPValue(V&: IVInc),
1110 Op1: m_Specific(VPV: &Plan.getVectorTripCount())));
1111 assert(TermBranchOnCount &&
1112 match(IVInc, m_Add(m_Specific(LoopRegion->getCanonicalIV()),
1113 m_Specific(&Plan.getVFxUF()))) &&
1114 std::next(IVInc->getDefiningRecipe()->getIterator()) ==
1115 OrigLatch->getTerminator()->getIterator() &&
1116 "Unexpected canonical iv increment");
1117
1118 // Split the latch at the IV update, and branch to it from the header mask.
1119 VPBasicBlock *Latch =
1120 OrigLatch->splitAt(SplitAt: IVInc->getDefiningRecipe()->getIterator());
1121 Latch->setName("vector.latch");
1122 VPBlockUtils::connectBlocks(From: Header, To: Latch);
1123
1124 // Collect any values defined in the loop that need a phi. Currently this
1125 // includes header phi backedges and live-outs extracted in the middle block.
1126 // TODO: Handle early exits via Plan.getExitBlocks()
1127 MapVector<VPValue *, SmallVector<VPUser *>> NeedsPhi;
1128 for (VPRecipeBase &R : Header->phis())
1129 if (!isa<VPCanonicalIVPHIRecipe, VPWidenInductionRecipe>(Val: R))
1130 NeedsPhi[cast<VPHeaderPHIRecipe>(Val&: R).getBackedgeValue()].push_back(Elt: &R);
1131
1132 VPValue *V;
1133 for (VPRecipeBase &R : *Plan.getMiddleBlock())
1134 if (match(V: &R, P: m_ExtractLastPart(Op0: m_VPValue(V))))
1135 NeedsPhi[V].push_back(Elt: &R);
1136
1137 // Insert phis with a poison incoming value for past the end of the tail.
1138 Builder.setInsertPoint(TheBB: Latch, IP: Latch->begin());
1139 VPTypeAnalysis TypeInfo(Plan);
1140 for (const auto &[V, Users] : NeedsPhi) {
1141 if (isa<VPIRValue>(Val: V))
1142 continue;
1143 // TODO: For reduction phis, use phi value instead of poison so we can
1144 // remove the special casing for tail folding in
1145 // LoopVectorizationPlanner::addReductionResultComputation
1146 VPValue *Poison =
1147 Plan.getOrAddLiveIn(V: PoisonValue::get(T: TypeInfo.inferScalarType(V)));
1148 VPInstruction *Phi = Builder.createScalarPhi(IncomingValues: {V, Poison});
1149 for (VPUser *U : Users)
1150 U->replaceUsesOfWith(From: V, To: Phi);
1151 }
1152
1153 // Any extract of the last element must be updated to extract from the last
1154 // active lane of the header mask instead (i.e., the lane corresponding to the
1155 // last active iteration).
1156 Builder.setInsertPoint(Plan.getMiddleBlock()->getTerminator());
1157 for (VPRecipeBase &R : *Plan.getMiddleBlock()) {
1158 VPValue *Op;
1159 if (!match(V: &R, P: m_ExtractLastLaneOfLastPart(Op0: m_VPValue(V&: Op))))
1160 continue;
1161
1162 // Compute the index of the last active lane.
1163 VPValue *LastActiveLane =
1164 Builder.createNaryOp(Opcode: VPInstruction::LastActiveLane, Operands: HeaderMask);
1165 auto *Ext =
1166 Builder.createNaryOp(Opcode: VPInstruction::ExtractLane, Operands: {LastActiveLane, Op});
1167 R.getVPSingleValue()->replaceAllUsesWith(New: Ext);
1168 }
1169}
1170
1171/// Insert \p CheckBlockVPBB on the edge leading to the vector preheader,
1172/// connecting it to both vector and scalar preheaders. Updates scalar
1173/// preheader phis to account for the new predecessor.
1174static void insertCheckBlockBeforeVectorLoop(VPlan &Plan,
1175 VPBasicBlock *CheckBlockVPBB) {
1176 VPBlockBase *VectorPH = Plan.getVectorPreheader();
1177 auto *ScalarPH = cast<VPBasicBlock>(Val: Plan.getScalarPreheader());
1178 VPBlockBase *PreVectorPH = VectorPH->getSinglePredecessor();
1179 VPBlockUtils::insertOnEdge(From: PreVectorPH, To: VectorPH, BlockPtr: CheckBlockVPBB);
1180 VPBlockUtils::connectBlocks(From: CheckBlockVPBB, To: ScalarPH);
1181 CheckBlockVPBB->swapSuccessors();
1182 unsigned NumPreds = ScalarPH->getNumPredecessors();
1183 for (VPRecipeBase &R : ScalarPH->phis()) {
1184 auto *Phi = cast<VPPhi>(Val: &R);
1185 assert(Phi->getNumIncoming() == NumPreds - 1 &&
1186 "must have incoming values for all predecessors");
1187 Phi->addOperand(Operand: Phi->getOperand(N: NumPreds - 2));
1188 }
1189}
1190
1191// Likelyhood of bypassing the vectorized loop due to a runtime check block,
1192// including memory overlap checks block and wrapping/unit-stride checks block.
1193static constexpr uint32_t CheckBypassWeights[] = {1, 127};
1194
1195/// Create a BranchOnCond terminator in \p CheckBlockVPBB. Optionally adds
1196/// branch weights.
1197static void addBypassBranch(VPlan &Plan, VPBasicBlock *CheckBlockVPBB,
1198 VPValue *Cond, bool AddBranchWeights) {
1199 DebugLoc DL = Plan.getVectorLoopRegion()->getCanonicalIV()->getDebugLoc();
1200 auto *Term = VPBuilder(CheckBlockVPBB)
1201 .createNaryOp(Opcode: VPInstruction::BranchOnCond, Operands: {Cond}, DL);
1202 if (AddBranchWeights) {
1203 MDBuilder MDB(Plan.getContext());
1204 MDNode *BranchWeights =
1205 MDB.createBranchWeights(Weights: CheckBypassWeights, /*IsExpected=*/false);
1206 Term->setMetadata(Kind: LLVMContext::MD_prof, Node: BranchWeights);
1207 }
1208}
1209
1210void VPlanTransforms::attachCheckBlock(VPlan &Plan, Value *Cond,
1211 BasicBlock *CheckBlock,
1212 bool AddBranchWeights) {
1213 VPValue *CondVPV = Plan.getOrAddLiveIn(V: Cond);
1214 VPBasicBlock *CheckBlockVPBB = Plan.createVPIRBasicBlock(IRBB: CheckBlock);
1215 insertCheckBlockBeforeVectorLoop(Plan, CheckBlockVPBB);
1216 addBypassBranch(Plan, CheckBlockVPBB, Cond: CondVPV, AddBranchWeights);
1217}
1218
1219/// Return an insert point in \p EntryVPBB after existing VPIRPhi,
1220/// VPIRInstruction and VPExpandSCEVRecipe recipes.
1221static VPBasicBlock::iterator getExpandSCEVInsertPt(VPBasicBlock *EntryVPBB) {
1222 auto InsertPt = EntryVPBB->begin();
1223 while (InsertPt != EntryVPBB->end() &&
1224 isa<VPExpandSCEVRecipe, VPIRPhi, VPIRInstruction>(Val: &*InsertPt))
1225 ++InsertPt;
1226 return InsertPt;
1227}
1228
1229void VPlanTransforms::addMinimumIterationCheck(
1230 VPlan &Plan, ElementCount VF, unsigned UF,
1231 ElementCount MinProfitableTripCount, bool RequiresScalarEpilogue,
1232 bool TailFolded, Loop *OrigLoop, const uint32_t *MinItersBypassWeights,
1233 DebugLoc DL, PredicatedScalarEvolution &PSE, VPBasicBlock *CheckBlock) {
1234 // Generate code to check if the loop's trip count is less than VF * UF, or
1235 // equal to it in case a scalar epilogue is required; this implies that the
1236 // vector trip count is zero. This check also covers the case where adding one
1237 // to the backedge-taken count overflowed leading to an incorrect trip count
1238 // of zero. In this case we will also jump to the scalar loop.
1239 CmpInst::Predicate CmpPred =
1240 RequiresScalarEpilogue ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
1241 // If tail is to be folded, vector loop takes care of all iterations.
1242 VPValue *TripCountVPV = Plan.getTripCount();
1243 const SCEV *TripCount = vputils::getSCEVExprForVPValue(V: TripCountVPV, PSE);
1244 Type *TripCountTy = TripCount->getType();
1245 ScalarEvolution &SE = *PSE.getSE();
1246 auto GetMinTripCount = [&]() -> const SCEV * {
1247 // Compute max(MinProfitableTripCount, UF * VF) and return it.
1248 const SCEV *VFxUF =
1249 SE.getElementCount(Ty: TripCountTy, EC: (VF * UF), Flags: SCEV::FlagNUW);
1250 if (UF * VF.getKnownMinValue() >=
1251 MinProfitableTripCount.getKnownMinValue()) {
1252 // TODO: SCEV should be able to simplify test.
1253 return VFxUF;
1254 }
1255 const SCEV *MinProfitableTripCountSCEV =
1256 SE.getElementCount(Ty: TripCountTy, EC: MinProfitableTripCount, Flags: SCEV::FlagNUW);
1257 return SE.getUMaxExpr(LHS: MinProfitableTripCountSCEV, RHS: VFxUF);
1258 };
1259
1260 VPBasicBlock *EntryVPBB = Plan.getEntry();
1261 // Place compare and branch in CheckBlock if given, ExpandSCEVs in Entry.
1262 VPBasicBlock *CheckVPBB = CheckBlock ? CheckBlock : EntryVPBB;
1263 VPBuilder Builder(CheckVPBB);
1264 VPValue *TripCountCheck = Plan.getFalse();
1265 const SCEV *Step = GetMinTripCount();
1266 // TripCountCheck = false, folding tail implies positive vector trip
1267 // count.
1268 if (!TailFolded) {
1269 // TODO: Emit unconditional branch to vector preheader instead of
1270 // conditional branch with known condition.
1271 TripCount = SE.applyLoopGuards(Expr: TripCount, L: OrigLoop);
1272 // Check if the trip count is < the step.
1273 if (SE.isKnownPredicate(Pred: CmpPred, LHS: TripCount, RHS: Step)) {
1274 // TODO: Ensure step is at most the trip count when determining max VF and
1275 // UF, w/o tail folding.
1276 TripCountCheck = Plan.getTrue();
1277 } else if (!SE.isKnownPredicate(Pred: CmpInst::getInversePredicate(pred: CmpPred),
1278 LHS: TripCount, RHS: Step)) {
1279 // Generate the minimum iteration check only if we cannot prove the
1280 // check is known to be true, or known to be false.
1281 // ExpandSCEV must be placed in Entry.
1282 VPBuilder SCEVBuilder(EntryVPBB, getExpandSCEVInsertPt(EntryVPBB));
1283 VPValue *MinTripCountVPV = SCEVBuilder.createExpandSCEV(Expr: Step);
1284 TripCountCheck = Builder.createICmp(
1285 Pred: CmpPred, A: TripCountVPV, B: MinTripCountVPV, DL, Name: "min.iters.check");
1286 } // else step known to be < trip count, use TripCountCheck preset to false.
1287 }
1288 VPInstruction *Term =
1289 Builder.createNaryOp(Opcode: VPInstruction::BranchOnCond, Operands: {TripCountCheck}, DL);
1290 if (MinItersBypassWeights) {
1291 MDBuilder MDB(Plan.getContext());
1292 MDNode *BranchWeights = MDB.createBranchWeights(
1293 Weights: ArrayRef(MinItersBypassWeights, 2), /*IsExpected=*/false);
1294 Term->setMetadata(Kind: LLVMContext::MD_prof, Node: BranchWeights);
1295 }
1296}
1297
1298void VPlanTransforms::addIterationCountCheckBlock(
1299 VPlan &Plan, ElementCount VF, unsigned UF, bool RequiresScalarEpilogue,
1300 Loop *OrigLoop, const uint32_t *MinItersBypassWeights, DebugLoc DL,
1301 PredicatedScalarEvolution &PSE) {
1302 auto *CheckBlock = Plan.createVPBasicBlock(Name: "vector.main.loop.iter.check");
1303 insertCheckBlockBeforeVectorLoop(Plan, CheckBlockVPBB: CheckBlock);
1304 addMinimumIterationCheck(Plan, VF, UF, MinProfitableTripCount: ElementCount::getFixed(MinVal: 0),
1305 RequiresScalarEpilogue, /*TailFolded=*/false,
1306 OrigLoop, MinItersBypassWeights, DL, PSE,
1307 CheckBlock);
1308}
1309
1310void VPlanTransforms::addMinimumVectorEpilogueIterationCheck(
1311 VPlan &Plan, Value *VectorTripCount, bool RequiresScalarEpilogue,
1312 ElementCount EpilogueVF, unsigned EpilogueUF, unsigned MainLoopStep,
1313 unsigned EpilogueLoopStep, ScalarEvolution &SE) {
1314 // Add the minimum iteration check for the epilogue vector loop.
1315 VPValue *TC = Plan.getTripCount();
1316 Value *TripCount = TC->getLiveInIRValue();
1317 VPBuilder Builder(cast<VPBasicBlock>(Val: Plan.getEntry()));
1318 VPValue *VFxUF = Builder.createExpandSCEV(Expr: SE.getElementCount(
1319 Ty: TripCount->getType(), EC: (EpilogueVF * EpilogueUF), Flags: SCEV::FlagNUW));
1320 VPValue *Count = Builder.createSub(LHS: TC, RHS: Plan.getOrAddLiveIn(V: VectorTripCount),
1321 DL: DebugLoc::getUnknown(), Name: "n.vec.remaining");
1322
1323 // Generate code to check if the loop's trip count is less than VF * UF of
1324 // the vector epilogue loop.
1325 auto P = RequiresScalarEpilogue ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
1326 auto *CheckMinIters = Builder.createICmp(
1327 Pred: P, A: Count, B: VFxUF, DL: DebugLoc::getUnknown(), Name: "min.epilog.iters.check");
1328 VPInstruction *Branch =
1329 Builder.createNaryOp(Opcode: VPInstruction::BranchOnCond, Operands: CheckMinIters);
1330
1331 // We assume the remaining `Count` is equally distributed in
1332 // [0, MainLoopStep)
1333 // So the probability for `Count < EpilogueLoopStep` should be
1334 // min(MainLoopStep, EpilogueLoopStep) / MainLoopStep
1335 // TODO: Improve the estimate by taking the estimated trip count into
1336 // consideration.
1337 unsigned EstimatedSkipCount = std::min(a: MainLoopStep, b: EpilogueLoopStep);
1338 const uint32_t Weights[] = {EstimatedSkipCount,
1339 MainLoopStep - EstimatedSkipCount};
1340 MDBuilder MDB(Plan.getContext());
1341 MDNode *BranchWeights =
1342 MDB.createBranchWeights(Weights, /*IsExpected=*/false);
1343 Branch->setMetadata(Kind: LLVMContext::MD_prof, Node: BranchWeights);
1344}
1345
1346/// Find and return the final select instruction of the FindIV result pattern
1347/// for the given \p BackedgeVal:
1348/// select(icmp ne ComputeReductionResult(ReducedIV), Sentinel),
1349/// ComputeReductionResult(ReducedIV), Start.
1350static VPInstruction *findFindIVSelect(VPValue *BackedgeVal) {
1351 return cast<VPInstruction>(
1352 Val: vputils::findRecipe(Start: BackedgeVal, Pred: [BackedgeVal](VPRecipeBase *R) {
1353 auto *VPI = dyn_cast<VPInstruction>(Val: R);
1354 return VPI &&
1355 matchFindIVResult(VPI, ReducedIV: m_Specific(VPV: BackedgeVal), Start: m_VPValue());
1356 }));
1357}
1358
1359bool VPlanTransforms::handleMaxMinNumReductions(VPlan &Plan) {
1360 auto GetMinOrMaxCompareValue =
1361 [](VPReductionPHIRecipe *RedPhiR) -> VPValue * {
1362 auto *MinOrMaxR =
1363 dyn_cast_or_null<VPRecipeWithIRFlags>(Val: RedPhiR->getBackedgeValue());
1364 if (!MinOrMaxR)
1365 return nullptr;
1366
1367 // Check that MinOrMaxR is a VPWidenIntrinsicRecipe or VPReplicateRecipe
1368 // with an intrinsic that matches the reduction kind.
1369 Intrinsic::ID ExpectedIntrinsicID =
1370 getMinMaxReductionIntrinsicOp(RK: RedPhiR->getRecurrenceKind());
1371 if (!match(V: MinOrMaxR, P: m_Intrinsic(IntrID: ExpectedIntrinsicID)))
1372 return nullptr;
1373
1374 if (MinOrMaxR->getOperand(N: 0) == RedPhiR)
1375 return MinOrMaxR->getOperand(N: 1);
1376
1377 assert(MinOrMaxR->getOperand(1) == RedPhiR &&
1378 "Reduction phi operand expected");
1379 return MinOrMaxR->getOperand(N: 0);
1380 };
1381
1382 VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion();
1383 SmallVector<std::pair<VPReductionPHIRecipe *, VPValue *>>
1384 MinOrMaxNumReductionsToHandle;
1385 bool HasUnsupportedPhi = false;
1386 for (auto &R : LoopRegion->getEntryBasicBlock()->phis()) {
1387 if (isa<VPCanonicalIVPHIRecipe, VPWidenIntOrFpInductionRecipe>(Val: &R))
1388 continue;
1389 auto *Cur = dyn_cast<VPReductionPHIRecipe>(Val: &R);
1390 if (!Cur) {
1391 // TODO: Also support fixed-order recurrence phis.
1392 HasUnsupportedPhi = true;
1393 continue;
1394 }
1395 if (!RecurrenceDescriptor::isFPMinMaxNumRecurrenceKind(
1396 Kind: Cur->getRecurrenceKind())) {
1397 HasUnsupportedPhi = true;
1398 continue;
1399 }
1400
1401 VPValue *MinOrMaxOp = GetMinOrMaxCompareValue(Cur);
1402 if (!MinOrMaxOp)
1403 return false;
1404
1405 MinOrMaxNumReductionsToHandle.emplace_back(Args&: Cur, Args&: MinOrMaxOp);
1406 }
1407
1408 if (MinOrMaxNumReductionsToHandle.empty())
1409 return true;
1410
1411 // We won't be able to resume execution in the scalar tail, if there are
1412 // unsupported header phis or there is no scalar tail at all, due to
1413 // tail-folding.
1414 if (HasUnsupportedPhi || !Plan.hasScalarTail())
1415 return false;
1416
1417 /// Check if the vector loop of \p Plan can early exit and restart
1418 /// execution of last vector iteration in the scalar loop. This requires all
1419 /// recipes up to early exit point be side-effect free as they are
1420 /// re-executed. Currently we check that the loop is free of any recipe that
1421 /// may write to memory. Expected to operate on an early VPlan w/o nested
1422 /// regions.
1423 for (VPBlockBase *VPB : vp_depth_first_shallow(
1424 G: Plan.getVectorLoopRegion()->getEntryBasicBlock())) {
1425 auto *VPBB = cast<VPBasicBlock>(Val: VPB);
1426 for (auto &R : *VPBB) {
1427 if (R.mayWriteToMemory() && !match(V: &R, P: m_BranchOnCount()))
1428 return false;
1429 }
1430 }
1431
1432 VPBasicBlock *LatchVPBB = LoopRegion->getExitingBasicBlock();
1433 VPBuilder LatchBuilder(LatchVPBB->getTerminator());
1434 VPValue *AllNaNLanes = nullptr;
1435 SmallPtrSet<VPValue *, 2> RdxResults;
1436 for (const auto &[_, MinOrMaxOp] : MinOrMaxNumReductionsToHandle) {
1437 VPValue *RedNaNLanes =
1438 LatchBuilder.createFCmp(Pred: CmpInst::FCMP_UNO, A: MinOrMaxOp, B: MinOrMaxOp);
1439 AllNaNLanes = AllNaNLanes ? LatchBuilder.createOr(LHS: AllNaNLanes, RHS: RedNaNLanes)
1440 : RedNaNLanes;
1441 }
1442
1443 VPValue *AnyNaNLane =
1444 LatchBuilder.createNaryOp(Opcode: VPInstruction::AnyOf, Operands: {AllNaNLanes});
1445 VPBasicBlock *MiddleVPBB = Plan.getMiddleBlock();
1446 VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->begin());
1447 for (const auto &[RedPhiR, _] : MinOrMaxNumReductionsToHandle) {
1448 assert(RecurrenceDescriptor::isFPMinMaxNumRecurrenceKind(
1449 RedPhiR->getRecurrenceKind()) &&
1450 "unsupported reduction");
1451
1452 // If we exit early due to NaNs, compute the final reduction result based on
1453 // the reduction phi at the beginning of the last vector iteration.
1454 auto *RdxResult = vputils::findComputeReductionResult(PhiR: RedPhiR);
1455 assert(RdxResult && "must find a ComputeReductionResult");
1456
1457 auto *NewSel = MiddleBuilder.createSelect(Cond: AnyNaNLane, TrueVal: RedPhiR,
1458 FalseVal: RdxResult->getOperand(N: 0));
1459 RdxResult->setOperand(I: 0, New: NewSel);
1460 assert(!RdxResults.contains(RdxResult) && "RdxResult already used");
1461 RdxResults.insert(Ptr: RdxResult);
1462 }
1463
1464 auto *LatchExitingBranch = LatchVPBB->getTerminator();
1465 assert(match(LatchExitingBranch, m_BranchOnCount(m_VPValue(), m_VPValue())) &&
1466 "Unexpected terminator");
1467 auto *IsLatchExitTaken = LatchBuilder.createICmp(
1468 Pred: CmpInst::ICMP_EQ, A: LatchExitingBranch->getOperand(N: 0),
1469 B: LatchExitingBranch->getOperand(N: 1));
1470 auto *AnyExitTaken = LatchBuilder.createOr(LHS: AnyNaNLane, RHS: IsLatchExitTaken);
1471 LatchBuilder.createNaryOp(Opcode: VPInstruction::BranchOnCond, Operands: AnyExitTaken);
1472 LatchExitingBranch->eraseFromParent();
1473
1474 // Update resume phis for inductions in the scalar preheader. If AnyNaNLane is
1475 // true, the resume from the start of the last vector iteration via the
1476 // canonical IV, otherwise from the original value.
1477 auto IsTC = [&Plan](VPValue *V) {
1478 return V == &Plan.getVectorTripCount() || V == Plan.getTripCount();
1479 };
1480 for (auto &R : Plan.getScalarPreheader()->phis()) {
1481 auto *ResumeR = cast<VPPhi>(Val: &R);
1482 VPValue *VecV = ResumeR->getOperand(N: 0);
1483 if (RdxResults.contains(Ptr: VecV))
1484 continue;
1485 if (auto *DerivedIV = dyn_cast<VPDerivedIVRecipe>(Val: VecV)) {
1486 VPValue *DIVTC = DerivedIV->getOperand(N: 1);
1487 if (DerivedIV->getNumUsers() == 1 && IsTC(DIVTC)) {
1488 auto *NewSel = MiddleBuilder.createSelect(
1489 Cond: AnyNaNLane, TrueVal: LoopRegion->getCanonicalIV(), FalseVal: DIVTC);
1490 DerivedIV->moveAfter(MovePos: &*MiddleBuilder.getInsertPoint());
1491 DerivedIV->setOperand(I: 1, New: NewSel);
1492 continue;
1493 }
1494 }
1495 // Bail out and abandon the current, partially modified, VPlan if we
1496 // encounter resume phi that cannot be updated yet.
1497 if (!IsTC(VecV)) {
1498 LLVM_DEBUG(dbgs() << "Found resume phi we cannot update for VPlan with "
1499 "FMaxNum/FMinNum reduction.\n");
1500 return false;
1501 }
1502 auto *NewSel = MiddleBuilder.createSelect(
1503 Cond: AnyNaNLane, TrueVal: LoopRegion->getCanonicalIV(), FalseVal: VecV);
1504 ResumeR->setOperand(I: 0, New: NewSel);
1505 }
1506
1507 auto *MiddleTerm = MiddleVPBB->getTerminator();
1508 MiddleBuilder.setInsertPoint(MiddleTerm);
1509 VPValue *MiddleCond = MiddleTerm->getOperand(N: 0);
1510 VPValue *NewCond =
1511 MiddleBuilder.createAnd(LHS: MiddleCond, RHS: MiddleBuilder.createNot(Operand: AnyNaNLane));
1512 MiddleTerm->setOperand(I: 0, New: NewCond);
1513 return true;
1514}
1515
1516bool VPlanTransforms::handleFindLastReductions(VPlan &Plan) {
1517 if (Plan.hasScalarVFOnly())
1518 return false;
1519
1520 // We want to create the following nodes:
1521 // vector.body:
1522 // ...new WidenPHI recipe introduced to keep the mask value for the latest
1523 // iteration where any lane was active.
1524 // mask.phi = phi [ ir<false>, vector.ph ], [ vp<new.mask>, vector.body ]
1525 // ...data.phi (a VPReductionPHIRecipe for a FindLast reduction) already
1526 // exists, but needs updating to use 'new.data' for the backedge value.
1527 // data.phi = phi ir<default.val>, vp<new.data>
1528 //
1529 // ...'data' and 'compare' created by existing nodes...
1530 //
1531 // ...new recipes introduced to determine whether to update the reduction
1532 // values or keep the current one.
1533 // any.active = i1 any-of ir<compare>
1534 // new.mask = select vp<any.active>, ir<compare>, vp<mask.phi>
1535 // new.data = select vp<any.active>, ir<data>, ir<data.phi>
1536 //
1537 // middle.block:
1538 // ...extract-last-active replaces compute-reduction-result.
1539 // result = extract-last-active vp<new.data>, vp<new.mask>, ir<default.val>
1540
1541 SmallVector<VPReductionPHIRecipe *, 4> Phis;
1542 for (VPRecipeBase &Phi :
1543 Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
1544 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(Val: &Phi);
1545 if (PhiR && RecurrenceDescriptor::isFindLastRecurrenceKind(
1546 Kind: PhiR->getRecurrenceKind()))
1547 Phis.push_back(Elt: PhiR);
1548 }
1549
1550 if (Phis.empty())
1551 return true;
1552
1553 VPValue *HeaderMask = vputils::findHeaderMask(Plan);
1554 for (VPReductionPHIRecipe *PhiR : Phis) {
1555 // Find the condition for the select/blend.
1556 VPValue *BackedgeSelect = PhiR->getBackedgeValue();
1557 VPValue *CondSelect = BackedgeSelect;
1558
1559 // If there's a header mask, the backedge select will not be the find-last
1560 // select.
1561 if (HeaderMask && !match(V: BackedgeSelect,
1562 P: m_Select(Op0: m_Specific(VPV: HeaderMask),
1563 Op1: m_VPValue(V&: CondSelect), Op2: m_Specific(VPV: PhiR))))
1564 llvm_unreachable("expected header mask select");
1565
1566 VPValue *Cond = nullptr, *Op1 = nullptr, *Op2 = nullptr;
1567
1568 // If we're matching a blend rather than a select, there should be one
1569 // incoming value which is the data, then all other incoming values should
1570 // be the phi.
1571 auto MatchBlend = [&](VPRecipeBase *R) {
1572 auto *Blend = dyn_cast<VPBlendRecipe>(Val: R);
1573 if (!Blend)
1574 return false;
1575 assert(!Blend->isNormalized() && "must run before blend normalizaion");
1576 unsigned NumIncomingDataValues = 0;
1577 for (unsigned I = 0; I < Blend->getNumIncomingValues(); ++I) {
1578 VPValue *Incoming = Blend->getIncomingValue(Idx: I);
1579 if (Incoming != PhiR) {
1580 ++NumIncomingDataValues;
1581 Cond = Blend->getMask(Idx: I);
1582 Op1 = Incoming;
1583 Op2 = PhiR;
1584 }
1585 }
1586 return NumIncomingDataValues == 1;
1587 };
1588
1589 VPSingleDefRecipe *SelectR =
1590 cast<VPSingleDefRecipe>(Val: CondSelect->getDefiningRecipe());
1591 if (!match(R: SelectR,
1592 P: m_Select(Op0: m_VPValue(V&: Cond), Op1: m_VPValue(V&: Op1), Op2: m_VPValue(V&: Op2))) &&
1593 !MatchBlend(SelectR))
1594 return false;
1595
1596 assert(Cond != HeaderMask && "Cond must not be HeaderMask");
1597
1598 // Find final reduction computation and replace it with an
1599 // extract.last.active intrinsic.
1600 auto *RdxResult =
1601 vputils::findUserOf<VPInstruction::ComputeReductionResult>(
1602 V: BackedgeSelect);
1603 assert(RdxResult && "Could not find reduction result");
1604
1605 // Add mask phi.
1606 VPBuilder Builder = VPBuilder::getToInsertAfter(R: PhiR);
1607 auto *MaskPHI = new VPWidenPHIRecipe(nullptr, /*Start=*/Plan.getFalse());
1608 Builder.insert(R: MaskPHI);
1609
1610 // Add select for mask.
1611 Builder.setInsertPoint(SelectR);
1612
1613 if (Op1 == PhiR) {
1614 // Normalize to selecting the data operand when the condition is true by
1615 // swapping operands and negating the condition.
1616 std::swap(a&: Op1, b&: Op2);
1617 Cond = Builder.createNot(Operand: Cond);
1618 }
1619 assert(Op2 == PhiR && "data value must be selected if Cond is true");
1620
1621 if (HeaderMask)
1622 Cond = Builder.createLogicalAnd(LHS: HeaderMask, RHS: Cond);
1623
1624 VPValue *AnyOf = Builder.createNaryOp(Opcode: VPInstruction::AnyOf, Operands: {Cond});
1625 VPValue *MaskSelect = Builder.createSelect(Cond: AnyOf, TrueVal: Cond, FalseVal: MaskPHI);
1626 MaskPHI->addOperand(Operand: MaskSelect);
1627
1628 // Replace select for data.
1629 VPValue *DataSelect =
1630 Builder.createSelect(Cond: AnyOf, TrueVal: Op1, FalseVal: Op2, DL: SelectR->getDebugLoc());
1631 SelectR->replaceAllUsesWith(New: DataSelect);
1632 PhiR->setBackedgeValue(DataSelect);
1633 SelectR->eraseFromParent();
1634
1635 Builder.setInsertPoint(RdxResult);
1636 auto *ExtractLastActive =
1637 Builder.createNaryOp(Opcode: VPInstruction::ExtractLastActive,
1638 Operands: {PhiR->getStartValue(), DataSelect, MaskSelect},
1639 DL: RdxResult->getDebugLoc());
1640 RdxResult->replaceAllUsesWith(New: ExtractLastActive);
1641 RdxResult->eraseFromParent();
1642 }
1643
1644 return true;
1645}
1646
1647/// Given a first argmin/argmax pattern with strict predicate consisting of
1648/// 1) a MinOrMax reduction \p MinOrMaxPhiR producing \p MinOrMaxResult,
1649/// 2) a wide induction \p WideIV,
1650/// 3) a FindLastIV reduction \p FindLastIVPhiR using \p WideIV,
1651/// return the smallest index of the FindLastIV reduction result using UMin,
1652/// unless \p MinOrMaxResult equals the start value of its MinOrMax reduction.
1653/// In that case, return the start value of the FindLastIV reduction instead.
1654/// If \p WideIV is not canonical, a new canonical wide IV is added, and the
1655/// final result is scaled back to the non-canonical \p WideIV.
1656/// The final value of the FindLastIV reduction is originally computed using
1657/// \p FindIVSelect, \p FindIVCmp, and \p FindIVRdxResult, which are replaced
1658/// and removed.
1659/// Returns true if the pattern was handled successfully, false otherwise.
1660static bool handleFirstArgMinOrMax(
1661 VPlan &Plan, VPReductionPHIRecipe *MinOrMaxPhiR,
1662 VPReductionPHIRecipe *FindLastIVPhiR, VPWidenIntOrFpInductionRecipe *WideIV,
1663 VPInstruction *MinOrMaxResult, VPInstruction *FindIVSelect,
1664 VPRecipeBase *FindIVCmp, VPInstruction *FindIVRdxResult) {
1665 assert(!FindLastIVPhiR->isInLoop() && !FindLastIVPhiR->isOrdered() &&
1666 "inloop and ordered reductions not supported");
1667 assert(FindLastIVPhiR->getVFScaleFactor() == 1 &&
1668 "FindIV reduction must not be scaled");
1669
1670 Type *Ty = Plan.getVectorLoopRegion()->getCanonicalIVType();
1671 // TODO: Support non (i.e., narrower than) canonical IV types.
1672 // TODO: Emit remarks for failed transformations.
1673 if (Ty != VPTypeAnalysis(Plan).inferScalarType(V: WideIV))
1674 return false;
1675
1676 auto *FindIVSelectR = cast<VPSingleDefRecipe>(
1677 Val: FindLastIVPhiR->getBackedgeValue()->getDefiningRecipe());
1678 assert(
1679 match(FindIVSelectR, m_Select(m_VPValue(), m_VPValue(), m_VPValue())) &&
1680 "backedge value must be a select");
1681 if (FindIVSelectR->getOperand(N: 1) != WideIV &&
1682 FindIVSelectR->getOperand(N: 2) != WideIV)
1683 return false;
1684
1685 // If the original wide IV is not canonical, create a new one. The canonical
1686 // wide IV is guaranteed to not wrap for all lanes that are active in the
1687 // vector loop.
1688 if (!WideIV->isCanonical()) {
1689 VPIRValue *Zero = Plan.getConstantInt(Ty, Val: 0);
1690 VPIRValue *One = Plan.getConstantInt(Ty, Val: 1);
1691 auto *WidenCanIV = new VPWidenIntOrFpInductionRecipe(
1692 nullptr, Zero, One, WideIV->getVFValue(),
1693 WideIV->getInductionDescriptor(),
1694 VPIRFlags::WrapFlagsTy(/*HasNUW=*/true, /*HasNSW=*/false),
1695 WideIV->getDebugLoc());
1696 WidenCanIV->insertBefore(InsertPos: WideIV);
1697
1698 // Update the select to use the wide canonical IV.
1699 FindIVSelectR->setOperand(I: FindIVSelectR->getOperand(N: 1) == WideIV ? 1 : 2,
1700 New: WidenCanIV);
1701 }
1702 FindLastIVPhiR->setOperand(I: 0, New: Plan.getOrAddLiveIn(V: PoisonValue::get(T: Ty)));
1703
1704 // The reduction using MinOrMaxPhiR needs adjusting to compute the correct
1705 // result:
1706 // 1. Find the first canonical indices corresponding to partial min/max
1707 // values, using loop reductions.
1708 // 2. Find which of the partial min/max values are equal to the overall
1709 // min/max value.
1710 // 3. Select among the canonical indices those corresponding to the overall
1711 // min/max value.
1712 // 4. Find the first canonical index of overall min/max and scale it back to
1713 // the original IV using VPDerivedIVRecipe.
1714 // 5. If the overall min/max equals the starting min/max, the condition in
1715 // the loop was always false, due to being strict; return the start value
1716 // of FindLastIVPhiR in that case.
1717 //
1718 // For example, we transforms two independent reduction result computations
1719 // for
1720 //
1721 // <x1> vector loop: {
1722 // vector.body:
1723 // ...
1724 // ir<%iv> = WIDEN-INDUCTION nuw nsw ir<10>, ir<1>, vp<%0>
1725 // WIDEN-REDUCTION-PHI ir<%min.idx> = phi ir<sentinel.min.start>,
1726 // ir<%min.idx.next>
1727 // WIDEN-REDUCTION-PHI ir<%min.val> = phi ir<100>, ir<%min.val.next>
1728 // ....
1729 // WIDEN-INTRINSIC ir<%min.val.next> = call llvm.umin(ir<%min.val>, ir<%l>)
1730 // WIDEN ir<%min.idx.next> = select ir<%cmp>, ir<%iv>, ir<%min.idx>
1731 // ...
1732 // }
1733 // Successor(s): middle.block
1734 //
1735 // middle.block:
1736 // vp<%iv.rdx> = compute-reduction-result (smax) vp<%min.idx.next>
1737 // vp<%min.result> = compute-reduction-result (umin) ir<%min.val.next>
1738 // vp<%cmp> = icmp ne vp<%iv.rdx>, ir<sentinel.min.start>
1739 // vp<%find.iv.result> = select vp<%cmp>, vp<%iv.rdx>, ir<10>
1740 //
1741 //
1742 // Into:
1743 //
1744 // vp<%reduced.min> = compute-reduction-result (umin) ir<%min.val.next>
1745 // vp<%reduced.mins.mask> = icmp eq ir<%min.val.next>, vp<%reduced.min>
1746 // vp<%idxs2reduce> = select vp<%reduced.mins.mask>, ir<%min.idx.next>,
1747 // ir<MaxUInt>
1748 // vp<%reduced.idx> = compute-reduction-result (umin) vp<%idxs2reduce>
1749 // vp<%scaled.idx> = DERIVED-IV ir<20> + vp<%reduced.idx> * ir<1>
1750 // vp<%always.false> = icmp eq vp<%reduced.min>, ir<100>
1751 // vp<%final.idx> = select vp<%always.false>, ir<10>,
1752 // vp<%scaled.idx>
1753
1754 VPBuilder Builder(FindIVRdxResult);
1755 VPValue *MinOrMaxExiting = MinOrMaxResult->getOperand(N: 0);
1756 auto *FinalMinOrMaxCmp =
1757 Builder.createICmp(Pred: CmpInst::ICMP_EQ, A: MinOrMaxExiting, B: MinOrMaxResult);
1758 VPValue *LastIVExiting = FindIVRdxResult->getOperand(N: 0);
1759 VPValue *MaxIV =
1760 Plan.getConstantInt(Val: APInt::getMaxValue(numBits: Ty->getIntegerBitWidth()));
1761 auto *FinalIVSelect =
1762 Builder.createSelect(Cond: FinalMinOrMaxCmp, TrueVal: LastIVExiting, FalseVal: MaxIV);
1763 VPIRFlags RdxFlags(RecurKind::UMin, false, false, FastMathFlags());
1764 VPSingleDefRecipe *FinalCanIV = Builder.createNaryOp(
1765 Opcode: VPInstruction::ComputeReductionResult, Operands: {FinalIVSelect}, Flags: RdxFlags,
1766 DL: FindIVRdxResult->getDebugLoc());
1767
1768 // If we used a new wide canonical IV convert the reduction result back to the
1769 // original IV scale before the final select.
1770 if (!WideIV->isCanonical()) {
1771 auto *DerivedIVRecipe =
1772 new VPDerivedIVRecipe(InductionDescriptor::IK_IntInduction,
1773 nullptr, // No FPBinOp for integer induction
1774 WideIV->getStartValue(), FinalCanIV,
1775 WideIV->getStepValue(), "derived.iv.result");
1776 DerivedIVRecipe->insertBefore(InsertPos: &*Builder.getInsertPoint());
1777 FinalCanIV = DerivedIVRecipe;
1778 }
1779
1780 // If the final min/max value matches its start value, the condition in the
1781 // loop was always false, i.e. no induction value has been selected. If that's
1782 // the case, set the result of the IV reduction to its start value.
1783 VPValue *AlwaysFalse = Builder.createICmp(Pred: CmpInst::ICMP_EQ, A: MinOrMaxResult,
1784 B: MinOrMaxPhiR->getStartValue());
1785 VPValue *FinalIV = Builder.createSelect(
1786 Cond: AlwaysFalse, TrueVal: FindIVSelect->getOperand(N: 2), FalseVal: FinalCanIV);
1787 FindIVSelect->replaceAllUsesWith(New: FinalIV);
1788
1789 // Erase the old FindIV result pattern which is now dead.
1790 FindIVSelect->eraseFromParent();
1791 FindIVCmp->eraseFromParent();
1792 FindIVRdxResult->eraseFromParent();
1793 return true;
1794}
1795
1796bool VPlanTransforms::handleMultiUseReductions(VPlan &Plan,
1797 OptimizationRemarkEmitter *ORE,
1798 Loop *TheLoop) {
1799 for (auto &PhiR : make_early_inc_range(
1800 Range: Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis())) {
1801 auto *MinOrMaxPhiR = dyn_cast<VPReductionPHIRecipe>(Val: &PhiR);
1802 // TODO: check for multi-uses in VPlan directly.
1803 if (!MinOrMaxPhiR || !MinOrMaxPhiR->hasUsesOutsideReductionChain())
1804 continue;
1805
1806 // MinOrMaxPhiR has users outside the reduction cycle in the loop. Check if
1807 // the only other user is a FindLastIV reduction. MinOrMaxPhiR must have
1808 // exactly 2 users:
1809 // 1) the min/max operation of the reduction cycle, and
1810 // 2) the compare of a FindLastIV reduction cycle. This compare must match
1811 // the min/max operation - comparing MinOrMaxPhiR with the operand of the
1812 // min/max operation, and be used only by the select of the FindLastIV
1813 // reduction cycle.
1814 RecurKind RdxKind = MinOrMaxPhiR->getRecurrenceKind();
1815 assert(
1816 RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind) &&
1817 "only min/max recurrences support users outside the reduction chain");
1818
1819 auto *MinOrMaxOp =
1820 dyn_cast<VPRecipeWithIRFlags>(Val: MinOrMaxPhiR->getBackedgeValue());
1821 if (!MinOrMaxOp)
1822 return false;
1823
1824 // Check that MinOrMaxOp is a VPWidenIntrinsicRecipe or VPReplicateRecipe
1825 // with an intrinsic that matches the reduction kind.
1826 Intrinsic::ID ExpectedIntrinsicID = getMinMaxReductionIntrinsicOp(RK: RdxKind);
1827 if (!match(V: MinOrMaxOp, P: m_Intrinsic(IntrID: ExpectedIntrinsicID)))
1828 return false;
1829
1830 // MinOrMaxOp must have 2 users: 1) MinOrMaxPhiR and 2)
1831 // ComputeReductionResult.
1832 assert(MinOrMaxOp->getNumUsers() == 2 &&
1833 "MinOrMaxOp must have exactly 2 users");
1834 VPValue *MinOrMaxOpValue = MinOrMaxOp->getOperand(N: 0);
1835 if (MinOrMaxOpValue == MinOrMaxPhiR)
1836 MinOrMaxOpValue = MinOrMaxOp->getOperand(N: 1);
1837
1838 VPValue *CmpOpA;
1839 VPValue *CmpOpB;
1840 CmpPredicate Pred;
1841 auto *Cmp = dyn_cast_or_null<VPRecipeWithIRFlags>(Val: vputils::findUserOf(
1842 V: MinOrMaxPhiR, P: m_Cmp(Pred, Op0: m_VPValue(V&: CmpOpA), Op1: m_VPValue(V&: CmpOpB))));
1843 if (!Cmp || Cmp->getNumUsers() != 1 ||
1844 (CmpOpA != MinOrMaxOpValue && CmpOpB != MinOrMaxOpValue))
1845 return false;
1846
1847 if (MinOrMaxOpValue != CmpOpB)
1848 Pred = CmpInst::getSwappedPredicate(pred: Pred);
1849
1850 // MinOrMaxPhiR must have exactly 2 users:
1851 // * MinOrMaxOp,
1852 // * Cmp (that's part of a FindLastIV chain).
1853 if (MinOrMaxPhiR->getNumUsers() != 2)
1854 return false;
1855
1856 VPInstruction *MinOrMaxResult =
1857 vputils::findUserOf<VPInstruction::ComputeReductionResult>(V: MinOrMaxOp);
1858 assert(is_contained(MinOrMaxPhiR->users(), MinOrMaxOp) &&
1859 "one user must be MinOrMaxOp");
1860 assert(MinOrMaxResult && "MinOrMaxResult must be a user of MinOrMaxOp");
1861
1862 // Cmp must be used by the select of a FindLastIV chain.
1863 VPValue *Sel = dyn_cast<VPSingleDefRecipe>(Val: Cmp->getSingleUser());
1864 VPValue *IVOp, *FindIV;
1865 if (!Sel || Sel->getNumUsers() != 2 ||
1866 !match(V: Sel,
1867 P: m_Select(Op0: m_Specific(VPV: Cmp), Op1: m_VPValue(V&: IVOp), Op2: m_VPValue(V&: FindIV))))
1868 return false;
1869
1870 if (!isa<VPReductionPHIRecipe>(Val: FindIV)) {
1871 std::swap(a&: FindIV, b&: IVOp);
1872 Pred = CmpInst::getInversePredicate(pred: Pred);
1873 }
1874
1875 auto *FindIVPhiR = dyn_cast<VPReductionPHIRecipe>(Val: FindIV);
1876 if (!FindIVPhiR || !RecurrenceDescriptor::isFindIVRecurrenceKind(
1877 Kind: FindIVPhiR->getRecurrenceKind()))
1878 return false;
1879
1880 assert(!FindIVPhiR->isInLoop() && !FindIVPhiR->isOrdered() &&
1881 "cannot handle inloop/ordered reductions yet");
1882
1883 // Check if FindIVPhiR is a FindLast pattern by checking the MinMaxKind
1884 // on its ComputeReductionResult. SMax/UMax indicates FindLast.
1885 VPInstruction *FindIVResult =
1886 vputils::findUserOf<VPInstruction::ComputeReductionResult>(
1887 V: FindIVPhiR->getBackedgeValue());
1888 assert(FindIVResult &&
1889 "must be able to retrieve the FindIVResult VPInstruction");
1890 RecurKind FindIVMinMaxKind = FindIVResult->getRecurKind();
1891 if (FindIVMinMaxKind != RecurKind::SMax &&
1892 FindIVMinMaxKind != RecurKind::UMax)
1893 return false;
1894
1895 // TODO: Support cases where IVOp is the IV increment.
1896 if (!match(V: IVOp, P: m_TruncOrSelf(Op0: m_VPValue(V&: IVOp))) ||
1897 !isa<VPWidenIntOrFpInductionRecipe>(Val: IVOp))
1898 return false;
1899
1900 // Check if the predicate is compatible with the reduction kind.
1901 bool IsValidKindPred = [RdxKind, Pred]() {
1902 switch (RdxKind) {
1903 case RecurKind::UMin:
1904 return Pred == CmpInst::ICMP_UGE || Pred == CmpInst::ICMP_UGT;
1905 case RecurKind::UMax:
1906 return Pred == CmpInst::ICMP_ULE || Pred == CmpInst::ICMP_ULT;
1907 case RecurKind::SMax:
1908 return Pred == CmpInst::ICMP_SLE || Pred == CmpInst::ICMP_SLT;
1909 case RecurKind::SMin:
1910 return Pred == CmpInst::ICMP_SGE || Pred == CmpInst::ICMP_SGT;
1911 default:
1912 llvm_unreachable("unhandled recurrence kind");
1913 }
1914 }();
1915 if (!IsValidKindPred) {
1916 ORE->emit(RemarkBuilder: [&]() {
1917 return OptimizationRemarkMissed(
1918 DEBUG_TYPE, "VectorizationMultiUseReductionPredicate",
1919 TheLoop->getStartLoc(), TheLoop->getHeader())
1920 << "Multi-use reduction with predicate "
1921 << CmpInst::getPredicateName(P: Pred)
1922 << " incompatible with reduction kind";
1923 });
1924 return false;
1925 }
1926
1927 auto *FindIVSelect = findFindIVSelect(BackedgeVal: FindIVPhiR->getBackedgeValue());
1928 auto *FindIVCmp = FindIVSelect->getOperand(N: 0)->getDefiningRecipe();
1929 auto *FindIVRdxResult = cast<VPInstruction>(Val: FindIVCmp->getOperand(N: 0));
1930 assert(FindIVSelect->getParent() == MinOrMaxResult->getParent() &&
1931 "both results must be computed in the same block");
1932 // Reducing to a scalar min or max value is placed right before reducing to
1933 // its scalar iteration, in order to generate instructions that use both
1934 // their operands.
1935 MinOrMaxResult->moveBefore(BB&: *FindIVRdxResult->getParent(),
1936 I: FindIVRdxResult->getIterator());
1937
1938 bool IsStrictPredicate = ICmpInst::isLT(P: Pred) || ICmpInst::isGT(P: Pred);
1939 if (IsStrictPredicate) {
1940 if (!handleFirstArgMinOrMax(Plan, MinOrMaxPhiR, FindLastIVPhiR: FindIVPhiR,
1941 WideIV: cast<VPWidenIntOrFpInductionRecipe>(Val: IVOp),
1942 MinOrMaxResult, FindIVSelect, FindIVCmp,
1943 FindIVRdxResult))
1944 return false;
1945 continue;
1946 }
1947
1948 // The reduction using MinOrMaxPhiR needs adjusting to compute the correct
1949 // result:
1950 // 1. We need to find the last IV for which the condition based on the
1951 // min/max recurrence is true,
1952 // 2. Compare the partial min/max reduction result to its final value and,
1953 // 3. Select the lanes of the partial FindLastIV reductions which
1954 // correspond to the lanes matching the min/max reduction result.
1955 //
1956 // For example, this transforms
1957 // vp<%min.result> = compute-reduction-result ir<%min.val.next>
1958 // vp<%iv.rdx> = compute-reduction-result (smax) vp<%min.idx.next>
1959 // vp<%cmp> = icmp ne vp<%iv.rdx>, SENTINEL
1960 // vp<%find.iv.result> = select vp<%cmp>, vp<%iv.rdx>, ir<0>
1961 //
1962 // into:
1963 //
1964 // vp<min.result> = compute-reduction-result ir<%min.val.next>
1965 // vp<%final.min.cmp> = icmp eq ir<%min.val.next>, vp<min.result>
1966 // vp<%final.iv> = select vp<%final.min.cmp>, vp<%min.idx.next>, SENTINEL
1967 // vp<%iv.rdx> = compute-reduction-result (smax) vp<%final.iv>
1968 // vp<%cmp> = icmp ne vp<%iv.rdx>, SENTINEL
1969 // vp<%find.iv.result> = select vp<%cmp>, vp<%iv.rdx>, ir<0>
1970 //
1971 VPBuilder B(FindIVRdxResult);
1972 VPValue *MinOrMaxExiting = MinOrMaxResult->getOperand(N: 0);
1973 auto *FinalMinOrMaxCmp =
1974 B.createICmp(Pred: CmpInst::ICMP_EQ, A: MinOrMaxExiting, B: MinOrMaxResult);
1975 VPValue *Sentinel = FindIVCmp->getOperand(N: 1);
1976 VPValue *LastIVExiting = FindIVRdxResult->getOperand(N: 0);
1977 auto *FinalIVSelect =
1978 B.createSelect(Cond: FinalMinOrMaxCmp, TrueVal: LastIVExiting, FalseVal: Sentinel);
1979 FindIVRdxResult->setOperand(I: 0, New: FinalIVSelect);
1980 }
1981 return true;
1982}
1983