1//===-- VPlanConstruction.cpp - Transforms for initial VPlan construction -===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements transforms for initial VPlan construction.
11///
12//===----------------------------------------------------------------------===//
13
14#include "LoopVectorizationPlanner.h"
15#include "VPlan.h"
16#include "VPlanAnalysis.h"
17#include "VPlanCFG.h"
18#include "VPlanDominatorTree.h"
19#include "VPlanHelpers.h"
20#include "VPlanPatternMatch.h"
21#include "VPlanTransforms.h"
22#include "VPlanUtils.h"
23#include "llvm/Analysis/LoopInfo.h"
24#include "llvm/Analysis/LoopIterator.h"
25#include "llvm/Analysis/ScalarEvolution.h"
26#include "llvm/Analysis/ScalarEvolutionExpressions.h"
27#include "llvm/Analysis/TargetTransformInfo.h"
28#include "llvm/IR/InstrTypes.h"
29#include "llvm/IR/MDBuilder.h"
30#include "llvm/Transforms/Utils/LoopUtils.h"
31#include "llvm/Transforms/Utils/LoopVersioning.h"
32
33#define DEBUG_TYPE "vplan"
34
35using namespace llvm;
36using namespace VPlanPatternMatch;
37
38namespace {
39// Class that is used to build the plain CFG for the incoming IR.
40class PlainCFGBuilder {
41 // The outermost loop of the input loop nest considered for vectorization.
42 Loop *TheLoop;
43
44 // Loop Info analysis.
45 LoopInfo *LI;
46
47 // Loop versioning for alias metadata.
48 LoopVersioning *LVer;
49
50 // Vectorization plan that we are working on.
51 std::unique_ptr<VPlan> Plan;
52
53 // Builder of the VPlan instruction-level representation.
54 VPBuilder VPIRBuilder;
55
56 // NOTE: The following maps are intentionally destroyed after the plain CFG
57 // construction because subsequent VPlan-to-VPlan transformation may
58 // invalidate them.
59 // Map incoming BasicBlocks to their newly-created VPBasicBlocks.
60 DenseMap<BasicBlock *, VPBasicBlock *> BB2VPBB;
61 // Map incoming Value definitions to their newly-created VPValues.
62 DenseMap<Value *, VPValue *> IRDef2VPValue;
63
64 // Hold phi node's that need to be fixed once the plain CFG has been built.
65 SmallVector<PHINode *, 8> PhisToFix;
66
67 // Utility functions.
68 void setVPBBPredsFromBB(VPBasicBlock *VPBB, BasicBlock *BB);
69 void fixHeaderPhis();
70 VPBasicBlock *getOrCreateVPBB(BasicBlock *BB);
71#ifndef NDEBUG
72 bool isExternalDef(Value *Val);
73#endif
74 VPValue *getOrCreateVPOperand(Value *IRVal);
75 void createVPInstructionsForVPBB(VPBasicBlock *VPBB, BasicBlock *BB);
76
77public:
78 PlainCFGBuilder(Loop *Lp, LoopInfo *LI, LoopVersioning *LVer)
79 : TheLoop(Lp), LI(LI), LVer(LVer), Plan(std::make_unique<VPlan>(args&: Lp)) {}
80
81 /// Build plain CFG for TheLoop and connect it to Plan's entry.
82 std::unique_ptr<VPlan> buildPlainCFG();
83};
84} // anonymous namespace
85
86// Set predecessors of \p VPBB in the same order as they are in \p BB. \p VPBB
87// must have no predecessors.
88void PlainCFGBuilder::setVPBBPredsFromBB(VPBasicBlock *VPBB, BasicBlock *BB) {
89 // Collect VPBB predecessors.
90 SmallVector<VPBlockBase *, 2> VPBBPreds;
91 for (BasicBlock *Pred : predecessors(BB))
92 VPBBPreds.push_back(Elt: getOrCreateVPBB(BB: Pred));
93 VPBB->setPredecessors(VPBBPreds);
94}
95
96static bool isHeaderBB(BasicBlock *BB, Loop *L) {
97 return L && BB == L->getHeader();
98}
99
100// Add operands to VPInstructions representing phi nodes from the input IR.
101void PlainCFGBuilder::fixHeaderPhis() {
102 for (auto *Phi : PhisToFix) {
103 assert(IRDef2VPValue.count(Phi) && "Missing VPInstruction for PHINode.");
104 VPValue *VPVal = IRDef2VPValue[Phi];
105 assert(isa<VPPhi>(VPVal) && "Expected VPPhi for phi node.");
106 auto *PhiR = cast<VPPhi>(Val: VPVal);
107 assert(PhiR->getNumOperands() == 0 && "Expected VPPhi with no operands.");
108 assert(isHeaderBB(Phi->getParent(), LI->getLoopFor(Phi->getParent())) &&
109 "Expected Phi in header block.");
110 assert(Phi->getNumOperands() == 2 &&
111 "header phi must have exactly 2 operands");
112 for (BasicBlock *Pred : predecessors(BB: Phi->getParent()))
113 PhiR->addOperand(
114 Operand: getOrCreateVPOperand(IRVal: Phi->getIncomingValueForBlock(BB: Pred)));
115 }
116}
117
118// Create a new empty VPBasicBlock for an incoming BasicBlock or retrieve an
119// existing one if it was already created.
120VPBasicBlock *PlainCFGBuilder::getOrCreateVPBB(BasicBlock *BB) {
121 if (auto *VPBB = BB2VPBB.lookup(Val: BB)) {
122 // Retrieve existing VPBB.
123 return VPBB;
124 }
125
126 // Create new VPBB.
127 StringRef Name = BB->getName();
128 LLVM_DEBUG(dbgs() << "Creating VPBasicBlock for " << Name << "\n");
129 VPBasicBlock *VPBB = Plan->createVPBasicBlock(Name);
130 BB2VPBB[BB] = VPBB;
131 return VPBB;
132}
133
134#ifndef NDEBUG
135// Return true if \p Val is considered an external definition. An external
136// definition is either:
137// 1. A Value that is not an Instruction. This will be refined in the future.
138// 2. An Instruction that is outside of the IR region represented in VPlan,
139// i.e., is not part of the loop nest.
140bool PlainCFGBuilder::isExternalDef(Value *Val) {
141 // All the Values that are not Instructions are considered external
142 // definitions for now.
143 Instruction *Inst = dyn_cast<Instruction>(Val);
144 if (!Inst)
145 return true;
146
147 // Check whether Instruction definition is in loop body.
148 return !TheLoop->contains(Inst);
149}
150#endif
151
152// Create a new VPValue or retrieve an existing one for the Instruction's
153// operand \p IRVal. This function must only be used to create/retrieve VPValues
154// for *Instruction's operands* and not to create regular VPInstruction's. For
155// the latter, please, look at 'createVPInstructionsForVPBB'.
156VPValue *PlainCFGBuilder::getOrCreateVPOperand(Value *IRVal) {
157 auto VPValIt = IRDef2VPValue.find(Val: IRVal);
158 if (VPValIt != IRDef2VPValue.end())
159 // Operand has an associated VPInstruction or VPValue that was previously
160 // created.
161 return VPValIt->second;
162
163 // Operand doesn't have a previously created VPInstruction/VPValue. This
164 // means that operand is:
165 // A) a definition external to VPlan,
166 // B) any other Value without specific representation in VPlan.
167 // For now, we use VPValue to represent A and B and classify both as external
168 // definitions. We may introduce specific VPValue subclasses for them in the
169 // future.
170 assert(isExternalDef(IRVal) && "Expected external definition as operand.");
171
172 // A and B: Create VPValue and add it to the pool of external definitions and
173 // to the Value->VPValue map.
174 VPValue *NewVPVal = Plan->getOrAddLiveIn(V: IRVal);
175 IRDef2VPValue[IRVal] = NewVPVal;
176 return NewVPVal;
177}
178
179// Create new VPInstructions in a VPBasicBlock, given its BasicBlock
180// counterpart. This function must be invoked in RPO so that the operands of a
181// VPInstruction in \p BB have been visited before (except for Phi nodes).
182void PlainCFGBuilder::createVPInstructionsForVPBB(VPBasicBlock *VPBB,
183 BasicBlock *BB) {
184 VPIRBuilder.setInsertPoint(VPBB);
185 // TODO: Model and preserve debug intrinsics in VPlan.
186 for (Instruction &InstRef : BB->instructionsWithoutDebug(SkipPseudoOp: false)) {
187 Instruction *Inst = &InstRef;
188
189 // There shouldn't be any VPValue for Inst at this point. Otherwise, we
190 // visited Inst when we shouldn't, breaking the RPO traversal order.
191 assert(!IRDef2VPValue.count(Inst) &&
192 "Instruction shouldn't have been visited.");
193
194 if (auto *Br = dyn_cast<BranchInst>(Val: Inst)) {
195 // Conditional branch instruction are represented using BranchOnCond
196 // recipes.
197 if (Br->isConditional()) {
198 VPValue *Cond = getOrCreateVPOperand(IRVal: Br->getCondition());
199 VPIRBuilder.createNaryOp(Opcode: VPInstruction::BranchOnCond, Operands: {Cond}, Inst, Flags: {},
200 MD: VPIRMetadata(*Inst), DL: Inst->getDebugLoc());
201 }
202
203 // Skip the rest of the Instruction processing for Branch instructions.
204 continue;
205 }
206
207 if (auto *SI = dyn_cast<SwitchInst>(Val: Inst)) {
208 // Don't emit recipes for unconditional switch instructions.
209 if (SI->getNumCases() == 0)
210 continue;
211 SmallVector<VPValue *> Ops = {getOrCreateVPOperand(IRVal: SI->getCondition())};
212 for (auto Case : SI->cases())
213 Ops.push_back(Elt: getOrCreateVPOperand(IRVal: Case.getCaseValue()));
214 VPIRBuilder.createNaryOp(Opcode: Instruction::Switch, Operands: Ops, Inst, Flags: {},
215 MD: VPIRMetadata(*Inst), DL: Inst->getDebugLoc());
216 continue;
217 }
218
219 VPSingleDefRecipe *NewR;
220 if (auto *Phi = dyn_cast<PHINode>(Val: Inst)) {
221 // Phi node's operands may not have been visited at this point. We create
222 // an empty VPInstruction that we will fix once the whole plain CFG has
223 // been built.
224 NewR = VPIRBuilder.createScalarPhi(IncomingValues: {}, DL: Phi->getDebugLoc(), Name: "vec.phi");
225 NewR->setUnderlyingValue(Phi);
226 if (isHeaderBB(BB: Phi->getParent(), L: LI->getLoopFor(BB: Phi->getParent()))) {
227 // Header phis need to be fixed after the VPBB for the latch has been
228 // created.
229 PhisToFix.push_back(Elt: Phi);
230 } else {
231 // Add operands for VPPhi in the order matching its predecessors in
232 // VPlan.
233 DenseMap<const VPBasicBlock *, VPValue *> VPPredToIncomingValue;
234 for (unsigned I = 0; I != Phi->getNumOperands(); ++I) {
235 VPPredToIncomingValue[BB2VPBB[Phi->getIncomingBlock(i: I)]] =
236 getOrCreateVPOperand(IRVal: Phi->getIncomingValue(i: I));
237 }
238 for (VPBlockBase *Pred : VPBB->getPredecessors())
239 NewR->addOperand(
240 Operand: VPPredToIncomingValue.lookup(Val: Pred->getExitingBasicBlock()));
241 }
242 } else {
243 // Build VPIRMetadata from the instruction and add loop versioning
244 // metadata for loads and stores.
245 VPIRMetadata MD(*Inst);
246 if (isa<LoadInst, StoreInst>(Val: Inst) && LVer) {
247 const auto &[AliasScopeMD, NoAliasMD] =
248 LVer->getNoAliasMetadataFor(OrigInst: Inst);
249 if (AliasScopeMD)
250 MD.setMetadata(Kind: LLVMContext::MD_alias_scope, Node: AliasScopeMD);
251 if (NoAliasMD)
252 MD.setMetadata(Kind: LLVMContext::MD_noalias, Node: NoAliasMD);
253 }
254
255 // Translate LLVM-IR operands into VPValue operands and set them in the
256 // new VPInstruction.
257 SmallVector<VPValue *, 4> VPOperands;
258 for (Value *Op : Inst->operands())
259 VPOperands.push_back(Elt: getOrCreateVPOperand(IRVal: Op));
260
261 if (auto *CI = dyn_cast<CastInst>(Val: Inst)) {
262 NewR = VPIRBuilder.createScalarCast(Opcode: CI->getOpcode(), Op: VPOperands[0],
263 ResultTy: CI->getType(), DL: CI->getDebugLoc(),
264 Flags: VPIRFlags(*CI), Metadata: MD);
265 NewR->setUnderlyingValue(CI);
266 } else {
267 // Build VPInstruction for any arbitrary Instruction without specific
268 // representation in VPlan.
269 NewR =
270 VPIRBuilder.createNaryOp(Opcode: Inst->getOpcode(), Operands: VPOperands, Inst,
271 Flags: VPIRFlags(*Inst), MD, DL: Inst->getDebugLoc());
272 }
273 }
274
275 IRDef2VPValue[Inst] = NewR;
276 }
277}
278
279// Main interface to build the plain CFG.
280std::unique_ptr<VPlan> PlainCFGBuilder::buildPlainCFG() {
281 VPIRBasicBlock *Entry = cast<VPIRBasicBlock>(Val: Plan->getEntry());
282 BB2VPBB[Entry->getIRBasicBlock()] = Entry;
283 for (VPIRBasicBlock *ExitVPBB : Plan->getExitBlocks())
284 BB2VPBB[ExitVPBB->getIRBasicBlock()] = ExitVPBB;
285
286 // 1. Scan the body of the loop in a topological order to visit each basic
287 // block after having visited its predecessor basic blocks. Create a VPBB for
288 // each BB and link it to its successor and predecessor VPBBs. Note that
289 // predecessors must be set in the same order as they are in the incomming IR.
290 // Otherwise, there might be problems with existing phi nodes and algorithm
291 // based on predecessors traversal.
292
293 // Loop PH needs to be explicitly visited since it's not taken into account by
294 // LoopBlocksDFS.
295 BasicBlock *ThePreheaderBB = TheLoop->getLoopPreheader();
296 assert((ThePreheaderBB->getTerminator()->getNumSuccessors() == 1) &&
297 "Unexpected loop preheader");
298 for (auto &I : *ThePreheaderBB) {
299 if (I.getType()->isVoidTy())
300 continue;
301 IRDef2VPValue[&I] = Plan->getOrAddLiveIn(V: &I);
302 }
303
304 LoopBlocksRPO RPO(TheLoop);
305 RPO.perform(LI);
306
307 for (BasicBlock *BB : RPO) {
308 // Create or retrieve the VPBasicBlock for this BB.
309 VPBasicBlock *VPBB = getOrCreateVPBB(BB);
310 // Set VPBB predecessors in the same order as they are in the incoming BB.
311 setVPBBPredsFromBB(VPBB, BB);
312
313 // Create VPInstructions for BB.
314 createVPInstructionsForVPBB(VPBB, BB);
315
316 // Set VPBB successors. We create empty VPBBs for successors if they don't
317 // exist already. Recipes will be created when the successor is visited
318 // during the RPO traversal.
319 if (auto *SI = dyn_cast<SwitchInst>(Val: BB->getTerminator())) {
320 SmallVector<VPBlockBase *> Succs = {
321 getOrCreateVPBB(BB: SI->getDefaultDest())};
322 for (auto Case : SI->cases())
323 Succs.push_back(Elt: getOrCreateVPBB(BB: Case.getCaseSuccessor()));
324 VPBB->setSuccessors(Succs);
325 continue;
326 }
327 auto *BI = cast<BranchInst>(Val: BB->getTerminator());
328 unsigned NumSuccs = succ_size(BB);
329 if (NumSuccs == 1) {
330 VPBB->setOneSuccessor(getOrCreateVPBB(BB: BB->getSingleSuccessor()));
331 continue;
332 }
333 assert(BI->isConditional() && NumSuccs == 2 && BI->isConditional() &&
334 "block must have conditional branch with 2 successors");
335
336 BasicBlock *IRSucc0 = BI->getSuccessor(i: 0);
337 BasicBlock *IRSucc1 = BI->getSuccessor(i: 1);
338 VPBasicBlock *Successor0 = getOrCreateVPBB(BB: IRSucc0);
339 VPBasicBlock *Successor1 = getOrCreateVPBB(BB: IRSucc1);
340 VPBB->setTwoSuccessors(IfTrue: Successor0, IfFalse: Successor1);
341 }
342
343 for (auto *EB : Plan->getExitBlocks())
344 setVPBBPredsFromBB(VPBB: EB, BB: EB->getIRBasicBlock());
345
346 // 2. The whole CFG has been built at this point so all the input Values must
347 // have a VPlan counterpart. Fix VPlan header phi by adding their
348 // corresponding VPlan operands.
349 fixHeaderPhis();
350
351 Plan->getEntry()->setOneSuccessor(getOrCreateVPBB(BB: TheLoop->getHeader()));
352 Plan->getEntry()->setPlan(&*Plan);
353
354 // Fix VPlan loop-closed-ssa exit phi's by adding incoming operands to the
355 // VPIRInstructions wrapping them.
356 // // Note that the operand order corresponds to IR predecessor order, and may
357 // need adjusting when VPlan predecessors are added, if an exit block has
358 // multiple predecessor.
359 for (auto *EB : Plan->getExitBlocks()) {
360 for (VPRecipeBase &R : EB->phis()) {
361 auto *PhiR = cast<VPIRPhi>(Val: &R);
362 PHINode &Phi = PhiR->getIRPhi();
363 assert(PhiR->getNumOperands() == 0 &&
364 "no phi operands should be added yet");
365 for (BasicBlock *Pred : predecessors(BB: EB->getIRBasicBlock()))
366 PhiR->addOperand(
367 Operand: getOrCreateVPOperand(IRVal: Phi.getIncomingValueForBlock(BB: Pred)));
368 }
369 }
370
371 LLVM_DEBUG(Plan->setName("Plain CFG\n"); dbgs() << *Plan);
372 return std::move(Plan);
373}
374
375/// Checks if \p HeaderVPB is a loop header block in the plain CFG; that is, it
376/// has exactly 2 predecessors (preheader and latch), where the block
377/// dominates the latch and the preheader dominates the block. If it is a
378/// header block return true and canonicalize the predecessors of the header
379/// (making sure the preheader appears first and the latch second) and the
380/// successors of the latch (making sure the loop exit comes first). Otherwise
381/// return false.
382static bool canonicalHeaderAndLatch(VPBlockBase *HeaderVPB,
383 const VPDominatorTree &VPDT) {
384 ArrayRef<VPBlockBase *> Preds = HeaderVPB->getPredecessors();
385 if (Preds.size() != 2)
386 return false;
387
388 auto *PreheaderVPBB = Preds[0];
389 auto *LatchVPBB = Preds[1];
390 if (!VPDT.dominates(A: PreheaderVPBB, B: HeaderVPB) ||
391 !VPDT.dominates(A: HeaderVPB, B: LatchVPBB)) {
392 std::swap(a&: PreheaderVPBB, b&: LatchVPBB);
393
394 if (!VPDT.dominates(A: PreheaderVPBB, B: HeaderVPB) ||
395 !VPDT.dominates(A: HeaderVPB, B: LatchVPBB))
396 return false;
397
398 // Canonicalize predecessors of header so that preheader is first and
399 // latch second.
400 HeaderVPB->swapPredecessors();
401 for (VPRecipeBase &R : cast<VPBasicBlock>(Val: HeaderVPB)->phis())
402 R.swapOperands();
403 }
404
405 // The two successors of conditional branch match the condition, with the
406 // first successor corresponding to true and the second to false. We
407 // canonicalize the successors of the latch when introducing the region, such
408 // that the latch exits the region when its condition is true; invert the
409 // original condition if the original CFG branches to the header on true.
410 // Note that the exit edge is not yet connected for top-level loops.
411 if (LatchVPBB->getSingleSuccessor() ||
412 LatchVPBB->getSuccessors()[0] != HeaderVPB)
413 return true;
414
415 assert(LatchVPBB->getNumSuccessors() == 2 && "Must have 2 successors");
416 auto *Term = cast<VPBasicBlock>(Val: LatchVPBB)->getTerminator();
417 assert(cast<VPInstruction>(Term)->getOpcode() ==
418 VPInstruction::BranchOnCond &&
419 "terminator must be a BranchOnCond");
420 auto *Not = new VPInstruction(VPInstruction::Not, {Term->getOperand(N: 0)});
421 Not->insertBefore(InsertPos: Term);
422 Term->setOperand(I: 0, New: Not);
423 LatchVPBB->swapSuccessors();
424
425 return true;
426}
427
428/// Create a new VPRegionBlock for the loop starting at \p HeaderVPB.
429static void createLoopRegion(VPlan &Plan, VPBlockBase *HeaderVPB) {
430 auto *PreheaderVPBB = HeaderVPB->getPredecessors()[0];
431 auto *LatchVPBB = HeaderVPB->getPredecessors()[1];
432
433 VPBlockUtils::disconnectBlocks(From: PreheaderVPBB, To: HeaderVPB);
434 VPBlockUtils::disconnectBlocks(From: LatchVPBB, To: HeaderVPB);
435
436 // Create an empty region first and insert it between PreheaderVPBB and
437 // the exit blocks, taking care to preserve the original predecessor &
438 // successor order of blocks. Set region entry and exiting after both
439 // HeaderVPB and LatchVPBB have been disconnected from their
440 // predecessors/successors.
441 auto *R = Plan.createLoopRegion();
442
443 // Transfer latch's successors to the region.
444 VPBlockUtils::transferSuccessors(Old: LatchVPBB, New: R);
445
446 VPBlockUtils::connectBlocks(From: PreheaderVPBB, To: R);
447 R->setEntry(HeaderVPB);
448 R->setExiting(LatchVPBB);
449
450 // All VPBB's reachable shallowly from HeaderVPB belong to the current region.
451 for (VPBlockBase *VPBB : vp_depth_first_shallow(G: HeaderVPB))
452 VPBB->setParent(R);
453}
454
455// Add the necessary canonical IV and branch recipes required to control the
456// loop.
457static void addCanonicalIVRecipes(VPlan &Plan, VPBasicBlock *HeaderVPBB,
458 VPBasicBlock *LatchVPBB, Type *IdxTy,
459 DebugLoc DL) {
460 Value *StartIdx = ConstantInt::get(Ty: IdxTy, V: 0);
461 auto *StartV = Plan.getOrAddLiveIn(V: StartIdx);
462
463 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header.
464 auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL);
465 HeaderVPBB->insert(Recipe: CanonicalIVPHI, InsertPt: HeaderVPBB->begin());
466
467 // We are about to replace the branch to exit the region. Remove the original
468 // BranchOnCond, if there is any.
469 DebugLoc LatchDL = DL;
470 if (!LatchVPBB->empty() && match(V: &LatchVPBB->back(), P: m_BranchOnCond())) {
471 LatchDL = LatchVPBB->getTerminator()->getDebugLoc();
472 LatchVPBB->getTerminator()->eraseFromParent();
473 }
474
475 VPBuilder Builder(LatchVPBB);
476 // Add a VPInstruction to increment the scalar canonical IV by VF * UF.
477 // Initially the induction increment is guaranteed to not wrap, but that may
478 // change later, e.g. when tail-folding, when the flags need to be dropped.
479 auto *CanonicalIVIncrement = Builder.createAdd(
480 LHS: CanonicalIVPHI, RHS: &Plan.getVFxUF(), DL, Name: "index.next", WrapFlags: {true, false});
481 CanonicalIVPHI->addOperand(Operand: CanonicalIVIncrement);
482
483 // Add the BranchOnCount VPInstruction to the latch.
484 Builder.createNaryOp(Opcode: VPInstruction::BranchOnCount,
485 Operands: {CanonicalIVIncrement, &Plan.getVectorTripCount()},
486 DL: LatchDL);
487}
488
489/// Creates extracts for values in \p Plan defined in a loop region and used
490/// outside a loop region.
491static void createExtractsForLiveOuts(VPlan &Plan, VPBasicBlock *MiddleVPBB) {
492 VPBuilder B(MiddleVPBB, MiddleVPBB->getFirstNonPhi());
493 for (VPBasicBlock *EB : Plan.getExitBlocks()) {
494 if (EB->getSinglePredecessor() != MiddleVPBB)
495 continue;
496
497 for (VPRecipeBase &R : EB->phis()) {
498 auto *ExitIRI = cast<VPIRPhi>(Val: &R);
499 for (unsigned Idx = 0; Idx != ExitIRI->getNumIncoming(); ++Idx) {
500 VPRecipeBase *Inc = ExitIRI->getIncomingValue(Idx)->getDefiningRecipe();
501 if (!Inc)
502 continue;
503 assert(ExitIRI->getNumOperands() == 1 &&
504 ExitIRI->getParent()->getSinglePredecessor() == MiddleVPBB &&
505 "exit values from early exits must be fixed when branch to "
506 "early-exit is added");
507 ExitIRI->extractLastLaneOfLastPartOfFirstOperand(Builder&: B);
508 }
509 }
510 }
511}
512
513static void addInitialSkeleton(VPlan &Plan, Type *InductionTy, DebugLoc IVDL,
514 PredicatedScalarEvolution &PSE, Loop *TheLoop) {
515 VPDominatorTree VPDT(Plan);
516
517 auto *HeaderVPBB = cast<VPBasicBlock>(Val: Plan.getEntry()->getSingleSuccessor());
518 canonicalHeaderAndLatch(HeaderVPB: HeaderVPBB, VPDT);
519 auto *LatchVPBB = cast<VPBasicBlock>(Val: HeaderVPBB->getPredecessors()[1]);
520
521 VPBasicBlock *VecPreheader = Plan.createVPBasicBlock(Name: "vector.ph");
522 VPBlockUtils::insertBlockAfter(NewBlock: VecPreheader, BlockPtr: Plan.getEntry());
523
524 VPBasicBlock *MiddleVPBB = Plan.createVPBasicBlock(Name: "middle.block");
525 // The canonical LatchVPBB has the header block as last successor. If it has
526 // another successor, this successor is an exit block - insert middle block on
527 // its edge. Otherwise, add middle block as another successor retaining header
528 // as last.
529 if (LatchVPBB->getNumSuccessors() == 2) {
530 VPBlockBase *LatchExitVPB = LatchVPBB->getSuccessors()[0];
531 VPBlockUtils::insertOnEdge(From: LatchVPBB, To: LatchExitVPB, BlockPtr: MiddleVPBB);
532 } else {
533 VPBlockUtils::connectBlocks(From: LatchVPBB, To: MiddleVPBB);
534 LatchVPBB->swapSuccessors();
535 }
536
537 addCanonicalIVRecipes(Plan, HeaderVPBB, LatchVPBB, IdxTy: InductionTy, DL: IVDL);
538
539 // Create SCEV and VPValue for the trip count.
540 // We use the symbolic max backedge-taken-count, which works also when
541 // vectorizing loops with uncountable early exits.
542 const SCEV *BackedgeTakenCountSCEV = PSE.getSymbolicMaxBackedgeTakenCount();
543 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCountSCEV) &&
544 "Invalid backedge-taken count");
545 ScalarEvolution &SE = *PSE.getSE();
546 const SCEV *TripCount = SE.getTripCountFromExitCount(ExitCount: BackedgeTakenCountSCEV,
547 EvalTy: InductionTy, L: TheLoop);
548 Plan.setTripCount(vputils::getOrCreateVPValueForSCEVExpr(Plan, Expr: TripCount));
549
550 VPBasicBlock *ScalarPH = Plan.createVPBasicBlock(Name: "scalar.ph");
551 VPBlockUtils::connectBlocks(From: ScalarPH, To: Plan.getScalarHeader());
552
553 // The connection order corresponds to the operands of the conditional branch,
554 // with the middle block already connected to the exit block.
555 VPBlockUtils::connectBlocks(From: MiddleVPBB, To: ScalarPH);
556 // Also connect the entry block to the scalar preheader.
557 // TODO: Also introduce a branch recipe together with the minimum trip count
558 // check.
559 VPBlockUtils::connectBlocks(From: Plan.getEntry(), To: ScalarPH);
560 Plan.getEntry()->swapSuccessors();
561
562 createExtractsForLiveOuts(Plan, MiddleVPBB);
563
564 VPBuilder ScalarPHBuilder(ScalarPH);
565 for (const auto &[PhiR, ScalarPhiR] : zip_equal(
566 t: drop_begin(RangeOrContainer: HeaderVPBB->phis()), u: Plan.getScalarHeader()->phis())) {
567 auto *VectorPhiR = cast<VPPhi>(Val: &PhiR);
568 auto *ResumePhiR = ScalarPHBuilder.createScalarPhi(
569 IncomingValues: {VectorPhiR, VectorPhiR->getOperand(N: 0)}, DL: VectorPhiR->getDebugLoc());
570 cast<VPIRPhi>(Val: &ScalarPhiR)->addOperand(Operand: ResumePhiR);
571 }
572}
573
574/// Check \p Plan's live-in and replace them with constants, if they can be
575/// simplified via SCEV.
576static void simplifyLiveInsWithSCEV(VPlan &Plan,
577 PredicatedScalarEvolution &PSE) {
578 auto GetSimplifiedLiveInViaSCEV = [&](VPValue *VPV) -> VPValue * {
579 const SCEV *Expr = vputils::getSCEVExprForVPValue(V: VPV, PSE);
580 if (auto *C = dyn_cast<SCEVConstant>(Val: Expr))
581 return Plan.getOrAddLiveIn(V: C->getValue());
582 return nullptr;
583 };
584
585 for (VPValue *LiveIn : to_vector(Range: Plan.getLiveIns())) {
586 if (VPValue *SimplifiedLiveIn = GetSimplifiedLiveInViaSCEV(LiveIn))
587 LiveIn->replaceAllUsesWith(New: SimplifiedLiveIn);
588 }
589}
590
591/// To make RUN_VPLAN_PASS print initial VPlan.
592static void printAfterInitialConstruction(VPlan &) {}
593
594std::unique_ptr<VPlan>
595VPlanTransforms::buildVPlan0(Loop *TheLoop, LoopInfo &LI, Type *InductionTy,
596 DebugLoc IVDL, PredicatedScalarEvolution &PSE,
597 LoopVersioning *LVer) {
598 PlainCFGBuilder Builder(TheLoop, &LI, LVer);
599 std::unique_ptr<VPlan> VPlan0 = Builder.buildPlainCFG();
600 addInitialSkeleton(Plan&: *VPlan0, InductionTy, IVDL, PSE, TheLoop);
601 simplifyLiveInsWithSCEV(Plan&: *VPlan0, PSE);
602
603 RUN_VPLAN_PASS_NO_VERIFY(printAfterInitialConstruction, *VPlan0);
604 return VPlan0;
605}
606
607/// Creates a VPWidenIntOrFpInductionRecipe or VPWidenPointerInductionRecipe
608/// for \p Phi based on \p IndDesc.
609static VPHeaderPHIRecipe *
610createWidenInductionRecipe(PHINode *Phi, VPPhi *PhiR, VPIRValue *Start,
611 const InductionDescriptor &IndDesc, VPlan &Plan,
612 PredicatedScalarEvolution &PSE, Loop &OrigLoop,
613 DebugLoc DL) {
614 [[maybe_unused]] ScalarEvolution &SE = *PSE.getSE();
615 assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) &&
616 "step must be loop invariant");
617 assert((Plan.getLiveIn(IndDesc.getStartValue()) == Start ||
618 (SE.isSCEVable(IndDesc.getStartValue()->getType()) &&
619 SE.getSCEV(IndDesc.getStartValue()) ==
620 vputils::getSCEVExprForVPValue(Start, PSE))) &&
621 "Start VPValue must match IndDesc's start value");
622
623 VPValue *Step =
624 vputils::getOrCreateVPValueForSCEVExpr(Plan, Expr: IndDesc.getStep());
625
626 if (IndDesc.getKind() == InductionDescriptor::IK_PtrInduction)
627 return new VPWidenPointerInductionRecipe(Phi, Start, Step, &Plan.getVFxUF(),
628 IndDesc, DL);
629
630 assert((IndDesc.getKind() == InductionDescriptor::IK_IntInduction ||
631 IndDesc.getKind() == InductionDescriptor::IK_FpInduction) &&
632 "must have an integer or float induction at this point");
633
634 // Update wide induction increments to use the same step as the corresponding
635 // wide induction. This enables detecting induction increments directly in
636 // VPlan and removes redundant splats.
637 using namespace llvm::VPlanPatternMatch;
638 if (match(V: PhiR->getOperand(N: 1), P: m_Add(Op0: m_Specific(VPV: PhiR), Op1: m_VPValue())))
639 PhiR->getOperand(N: 1)->getDefiningRecipe()->setOperand(I: 1, New: Step);
640
641 // It is always safe to copy over the NoWrap and FastMath flags. In
642 // particular, when folding tail by masking, the masked-off lanes are never
643 // used, so it is safe.
644 VPIRFlags Flags = vputils::getFlagsFromIndDesc(ID: IndDesc);
645
646 return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, &Plan.getVF(),
647 IndDesc, Flags, DL);
648}
649
650void VPlanTransforms::createHeaderPhiRecipes(
651 VPlan &Plan, PredicatedScalarEvolution &PSE, Loop &OrigLoop,
652 const MapVector<PHINode *, InductionDescriptor> &Inductions,
653 const MapVector<PHINode *, RecurrenceDescriptor> &Reductions,
654 const SmallPtrSetImpl<const PHINode *> &FixedOrderRecurrences,
655 const SmallPtrSetImpl<PHINode *> &InLoopReductions, bool AllowReordering) {
656 // Retrieve the header manually from the intial plain-CFG VPlan.
657 VPBasicBlock *HeaderVPBB = cast<VPBasicBlock>(
658 Val: Plan.getEntry()->getSuccessors()[1]->getSingleSuccessor());
659 assert(VPDominatorTree(Plan).dominates(HeaderVPBB,
660 HeaderVPBB->getPredecessors()[1]) &&
661 "header must dominate its latch");
662
663 auto CreateHeaderPhiRecipe = [&](VPPhi *PhiR) -> VPHeaderPHIRecipe * {
664 // TODO: Gradually replace uses of underlying instruction by analyses on
665 // VPlan.
666 auto *Phi = cast<PHINode>(Val: PhiR->getUnderlyingInstr());
667 assert(PhiR->getNumOperands() == 2 &&
668 "Must have 2 operands for header phis");
669
670 // Extract common values once.
671 VPIRValue *Start = cast<VPIRValue>(Val: PhiR->getOperand(N: 0));
672 VPValue *BackedgeValue = PhiR->getOperand(N: 1);
673
674 if (FixedOrderRecurrences.contains(Ptr: Phi)) {
675 // TODO: Currently fixed-order recurrences are modeled as chains of
676 // first-order recurrences. If there are no users of the intermediate
677 // recurrences in the chain, the fixed order recurrence should be
678 // modeled directly, enabling more efficient codegen.
679 return new VPFirstOrderRecurrencePHIRecipe(Phi, *Start, *BackedgeValue);
680 }
681
682 auto InductionIt = Inductions.find(Key: Phi);
683 if (InductionIt != Inductions.end())
684 return createWidenInductionRecipe(Phi, PhiR, Start, IndDesc: InductionIt->second,
685 Plan, PSE, OrigLoop,
686 DL: PhiR->getDebugLoc());
687
688 assert(Reductions.contains(Phi) && "only reductions are expected now");
689 const RecurrenceDescriptor &RdxDesc = Reductions.lookup(Key: Phi);
690 assert(RdxDesc.getRecurrenceStartValue() ==
691 Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader()) &&
692 "incoming value must match start value");
693 // Will be updated later to >1 if reduction is partial.
694 unsigned ScaleFactor = 1;
695 bool UseOrderedReductions = !AllowReordering && RdxDesc.isOrdered();
696 return new VPReductionPHIRecipe(
697 Phi, RdxDesc.getRecurrenceKind(), *Start, *BackedgeValue,
698 getReductionStyle(InLoop: InLoopReductions.contains(Ptr: Phi), Ordered: UseOrderedReductions,
699 ScaleFactor),
700 RdxDesc.hasUsesOutsideReductionChain());
701 };
702
703 for (VPRecipeBase &R : make_early_inc_range(Range: HeaderVPBB->phis())) {
704 if (isa<VPCanonicalIVPHIRecipe>(Val: &R))
705 continue;
706 auto *PhiR = cast<VPPhi>(Val: &R);
707 VPHeaderPHIRecipe *HeaderPhiR = CreateHeaderPhiRecipe(PhiR);
708 HeaderPhiR->insertBefore(InsertPos: PhiR);
709 PhiR->replaceAllUsesWith(New: HeaderPhiR);
710 PhiR->eraseFromParent();
711 }
712}
713
714void VPlanTransforms::createInLoopReductionRecipes(
715 VPlan &Plan, const DenseMap<VPBasicBlock *, VPValue *> &BlockMaskCache,
716 const DenseSet<BasicBlock *> &BlocksNeedingPredication,
717 ElementCount MinVF) {
718 VPTypeAnalysis TypeInfo(Plan);
719 VPBasicBlock *Header = Plan.getVectorLoopRegion()->getEntryBasicBlock();
720 SmallVector<VPRecipeBase *> ToDelete;
721
722 for (VPRecipeBase &R : Header->phis()) {
723 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(Val: &R);
724 if (!PhiR || !PhiR->isInLoop() || (MinVF.isScalar() && !PhiR->isOrdered()))
725 continue;
726
727 RecurKind Kind = PhiR->getRecurrenceKind();
728 assert(!RecurrenceDescriptor::isFindLastRecurrenceKind(Kind) &&
729 !RecurrenceDescriptor::isAnyOfRecurrenceKind(Kind) &&
730 !RecurrenceDescriptor::isFindIVRecurrenceKind(Kind) &&
731 "AnyOf and Find reductions are not allowed for in-loop reductions");
732
733 bool IsFPRecurrence =
734 RecurrenceDescriptor::isFloatingPointRecurrenceKind(Kind);
735 FastMathFlags FMFs =
736 IsFPRecurrence ? FastMathFlags::getFast() : FastMathFlags();
737
738 // Collect the chain of "link" recipes for the reduction starting at PhiR.
739 SetVector<VPSingleDefRecipe *> Worklist;
740 Worklist.insert(X: PhiR);
741 for (unsigned I = 0; I != Worklist.size(); ++I) {
742 VPSingleDefRecipe *Cur = Worklist[I];
743 for (VPUser *U : Cur->users()) {
744 auto *UserRecipe = cast<VPSingleDefRecipe>(Val: U);
745 if (!UserRecipe->getParent()->getEnclosingLoopRegion()) {
746 assert((UserRecipe->getParent() == Plan.getMiddleBlock() ||
747 UserRecipe->getParent() == Plan.getScalarPreheader()) &&
748 "U must be either in the loop region, the middle block or the "
749 "scalar preheader.");
750 continue;
751 }
752
753 // Stores using instructions will be sunk later.
754 if (match(R: UserRecipe, P: m_VPInstruction<Instruction::Store>()))
755 continue;
756 Worklist.insert(X: UserRecipe);
757 }
758 }
759
760 // Visit operation "Links" along the reduction chain top-down starting from
761 // the phi until LoopExitValue. We keep track of the previous item
762 // (PreviousLink) to tell which of the two operands of a Link will remain
763 // scalar and which will be reduced. For minmax by select(cmp), Link will be
764 // the select instructions. Blend recipes of in-loop reduction phi's will
765 // get folded to their non-phi operand, as the reduction recipe handles the
766 // condition directly.
767 VPSingleDefRecipe *PreviousLink = PhiR; // Aka Worklist[0].
768 for (VPSingleDefRecipe *CurrentLink : drop_begin(RangeOrContainer&: Worklist)) {
769 if (auto *Blend = dyn_cast<VPBlendRecipe>(Val: CurrentLink)) {
770 assert(Blend->getNumIncomingValues() == 2 &&
771 "Blend must have 2 incoming values");
772 unsigned PhiRIdx = Blend->getIncomingValue(Idx: 0) == PhiR ? 0 : 1;
773 assert(Blend->getIncomingValue(PhiRIdx) == PhiR &&
774 "PhiR must be an operand of the blend");
775 Blend->replaceAllUsesWith(New: Blend->getIncomingValue(Idx: 1 - PhiRIdx));
776 continue;
777 }
778
779 if (IsFPRecurrence) {
780 FastMathFlags CurFMF =
781 cast<VPRecipeWithIRFlags>(Val: CurrentLink)->getFastMathFlags();
782 if (match(R: CurrentLink, P: m_Select(Op0: m_VPValue(), Op1: m_VPValue(), Op2: m_VPValue())))
783 CurFMF |= cast<VPRecipeWithIRFlags>(Val: CurrentLink->getOperand(N: 0))
784 ->getFastMathFlags();
785 FMFs &= CurFMF;
786 }
787
788 Instruction *CurrentLinkI = CurrentLink->getUnderlyingInstr();
789
790 // Recognize a call to the llvm.fmuladd intrinsic.
791 bool IsFMulAdd = Kind == RecurKind::FMulAdd;
792 VPValue *VecOp;
793 VPBasicBlock *LinkVPBB = CurrentLink->getParent();
794 if (IsFMulAdd) {
795 assert(RecurrenceDescriptor::isFMulAddIntrinsic(CurrentLinkI) &&
796 "Expected current VPInstruction to be a call to the "
797 "llvm.fmuladd intrinsic");
798 assert(CurrentLink->getOperand(2) == PreviousLink &&
799 "expected a call where the previous link is the added operand");
800
801 // If the instruction is a call to the llvm.fmuladd intrinsic then we
802 // need to create an fmul recipe (multiplying the first two operands of
803 // the fmuladd together) to use as the vector operand for the fadd
804 // reduction.
805 auto *FMulRecipe = new VPInstruction(
806 Instruction::FMul,
807 {CurrentLink->getOperand(N: 0), CurrentLink->getOperand(N: 1)},
808 CurrentLinkI->getFastMathFlags());
809 LinkVPBB->insert(Recipe: FMulRecipe, InsertPt: CurrentLink->getIterator());
810 VecOp = FMulRecipe;
811 } else if (Kind == RecurKind::AddChainWithSubs &&
812 match(R: CurrentLink, P: m_Sub(Op0: m_VPValue(), Op1: m_VPValue()))) {
813 Type *PhiTy = TypeInfo.inferScalarType(V: PhiR);
814 auto *Zero = Plan.getConstantInt(Ty: PhiTy, Val: 0);
815 VPBuilder Builder(LinkVPBB, CurrentLink->getIterator());
816 auto *Sub = Builder.createSub(LHS: Zero, RHS: CurrentLink->getOperand(N: 1),
817 DL: CurrentLinkI->getDebugLoc());
818 Sub->setUnderlyingValue(CurrentLinkI);
819 VecOp = Sub;
820 } else {
821 // Index of the first operand which holds a non-mask vector operand.
822 unsigned IndexOfFirstOperand = 0;
823 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
824 if (match(R: CurrentLink, P: m_Cmp(Op0: m_VPValue(), Op1: m_VPValue())))
825 continue;
826 assert(match(CurrentLink,
827 m_Select(m_VPValue(), m_VPValue(), m_VPValue())) &&
828 "must be a select recipe");
829 IndexOfFirstOperand = 1;
830 }
831 // Note that for non-commutable operands (cmp-selects), the semantics of
832 // the cmp-select are captured in the recurrence kind.
833 unsigned VecOpId =
834 CurrentLink->getOperand(N: IndexOfFirstOperand) == PreviousLink
835 ? IndexOfFirstOperand + 1
836 : IndexOfFirstOperand;
837 VecOp = CurrentLink->getOperand(N: VecOpId);
838 assert(VecOp != PreviousLink &&
839 CurrentLink->getOperand(CurrentLink->getNumOperands() - 1 -
840 (VecOpId - IndexOfFirstOperand)) ==
841 PreviousLink &&
842 "PreviousLink must be the operand other than VecOp");
843 }
844
845 // Get block mask from BlockMaskCache if the block needs predication.
846 VPValue *CondOp = nullptr;
847 if (BlocksNeedingPredication.contains(V: CurrentLinkI->getParent()))
848 CondOp = BlockMaskCache.lookup(Val: LinkVPBB);
849
850 assert(PhiR->getVFScaleFactor() == 1 &&
851 "inloop reductions must be unscaled");
852 auto *RedRecipe = new VPReductionRecipe(
853 Kind, FMFs, CurrentLinkI, PreviousLink, VecOp, CondOp,
854 getReductionStyle(/*IsInLoop=*/InLoop: true, Ordered: PhiR->isOrdered(), ScaleFactor: 1),
855 CurrentLinkI->getDebugLoc());
856 // Append the recipe to the end of the VPBasicBlock because we need to
857 // ensure that it comes after all of it's inputs, including CondOp.
858 // Delete CurrentLink as it will be invalid if its operand is replaced
859 // with a reduction defined at the bottom of the block in the next link.
860 if (LinkVPBB->getNumSuccessors() == 0)
861 RedRecipe->insertBefore(InsertPos: &*std::prev(x: std::prev(x: LinkVPBB->end())));
862 else
863 LinkVPBB->appendRecipe(Recipe: RedRecipe);
864
865 CurrentLink->replaceAllUsesWith(New: RedRecipe);
866 ToDelete.push_back(Elt: CurrentLink);
867 PreviousLink = RedRecipe;
868 }
869 }
870
871 for (VPRecipeBase *R : ToDelete)
872 R->eraseFromParent();
873}
874
875void VPlanTransforms::handleEarlyExits(VPlan &Plan,
876 bool HasUncountableEarlyExit) {
877 auto *MiddleVPBB = cast<VPBasicBlock>(
878 Val: Plan.getScalarHeader()->getSinglePredecessor()->getPredecessors()[0]);
879 auto *LatchVPBB = cast<VPBasicBlock>(Val: MiddleVPBB->getSinglePredecessor());
880 VPBlockBase *HeaderVPB = cast<VPBasicBlock>(Val: LatchVPBB->getSuccessors()[1]);
881
882 // Disconnect all early exits from the loop leaving it with a single exit from
883 // the latch. Early exits that are countable are left for a scalar epilog. The
884 // condition of uncountable early exits (currently at most one is supported)
885 // is fused into the latch exit, and used to branch from middle block to the
886 // early exit destination.
887 [[maybe_unused]] bool HandledUncountableEarlyExit = false;
888 for (VPIRBasicBlock *EB : Plan.getExitBlocks()) {
889 for (VPBlockBase *Pred : to_vector(Range&: EB->getPredecessors())) {
890 if (Pred == MiddleVPBB)
891 continue;
892 if (HasUncountableEarlyExit) {
893 assert(!HandledUncountableEarlyExit &&
894 "can handle exactly one uncountable early exit");
895 handleUncountableEarlyExit(EarlyExitingVPBB: cast<VPBasicBlock>(Val: Pred), EarlyExitVPBB: EB, Plan,
896 HeaderVPBB: cast<VPBasicBlock>(Val: HeaderVPB), LatchVPBB);
897 HandledUncountableEarlyExit = true;
898 } else {
899 for (VPRecipeBase &R : EB->phis())
900 cast<VPIRPhi>(Val: &R)->removeIncomingValueFor(IncomingBlock: Pred);
901 }
902 cast<VPBasicBlock>(Val: Pred)->getTerminator()->eraseFromParent();
903 VPBlockUtils::disconnectBlocks(From: Pred, To: EB);
904 }
905 }
906
907 assert((!HasUncountableEarlyExit || HandledUncountableEarlyExit) &&
908 "missed an uncountable exit that must be handled");
909}
910
911void VPlanTransforms::addMiddleCheck(VPlan &Plan,
912 bool RequiresScalarEpilogueCheck,
913 bool TailFolded) {
914 auto *MiddleVPBB = cast<VPBasicBlock>(
915 Val: Plan.getScalarHeader()->getSinglePredecessor()->getPredecessors()[0]);
916 // If MiddleVPBB has a single successor then the original loop does not exit
917 // via the latch and the single successor must be the scalar preheader.
918 // There's no need to add a runtime check to MiddleVPBB.
919 if (MiddleVPBB->getNumSuccessors() == 1) {
920 assert(MiddleVPBB->getSingleSuccessor() == Plan.getScalarPreheader() &&
921 "must have ScalarPH as single successor");
922 return;
923 }
924
925 assert(MiddleVPBB->getNumSuccessors() == 2 && "must have 2 successors");
926
927 // Add a check in the middle block to see if we have completed all of the
928 // iterations in the first vector loop.
929 //
930 // Three cases:
931 // 1) If we require a scalar epilogue, the scalar ph must execute. Set the
932 // condition to false.
933 // 2) If (N - N%VF) == N, then we *don't* need to run the
934 // remainder. Thus if tail is to be folded, we know we don't need to run
935 // the remainder and we can set the condition to true.
936 // 3) Otherwise, construct a runtime check.
937
938 // We use the same DebugLoc as the scalar loop latch terminator instead of
939 // the corresponding compare because they may have ended up with different
940 // line numbers and we want to avoid awkward line stepping while debugging.
941 // E.g., if the compare has got a line number inside the loop.
942 auto *LatchVPBB = cast<VPBasicBlock>(Val: MiddleVPBB->getSinglePredecessor());
943 DebugLoc LatchDL = LatchVPBB->getTerminator()->getDebugLoc();
944 VPBuilder Builder(MiddleVPBB);
945 VPValue *Cmp;
946 if (!RequiresScalarEpilogueCheck)
947 Cmp = Plan.getFalse();
948 else if (TailFolded)
949 Cmp = Plan.getTrue();
950 else
951 Cmp = Builder.createICmp(Pred: CmpInst::ICMP_EQ, A: Plan.getTripCount(),
952 B: &Plan.getVectorTripCount(), DL: LatchDL, Name: "cmp.n");
953 Builder.createNaryOp(Opcode: VPInstruction::BranchOnCond, Operands: {Cmp}, DL: LatchDL);
954}
955
956void VPlanTransforms::createLoopRegions(VPlan &Plan) {
957 VPDominatorTree VPDT(Plan);
958 for (VPBlockBase *HeaderVPB : vp_post_order_shallow(G: Plan.getEntry()))
959 if (canonicalHeaderAndLatch(HeaderVPB, VPDT))
960 createLoopRegion(Plan, HeaderVPB);
961
962 VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
963 TopRegion->setName("vector loop");
964 TopRegion->getEntryBasicBlock()->setName("vector.body");
965}
966
967/// Insert \p CheckBlockVPBB on the edge leading to the vector preheader,
968/// connecting it to both vector and scalar preheaders. Updates scalar
969/// preheader phis to account for the new predecessor.
970static void insertCheckBlockBeforeVectorLoop(VPlan &Plan,
971 VPBasicBlock *CheckBlockVPBB) {
972 VPBlockBase *VectorPH = Plan.getVectorPreheader();
973 auto *ScalarPH = cast<VPBasicBlock>(Val: Plan.getScalarPreheader());
974 VPBlockBase *PreVectorPH = VectorPH->getSinglePredecessor();
975 VPBlockUtils::insertOnEdge(From: PreVectorPH, To: VectorPH, BlockPtr: CheckBlockVPBB);
976 VPBlockUtils::connectBlocks(From: CheckBlockVPBB, To: ScalarPH);
977 CheckBlockVPBB->swapSuccessors();
978 unsigned NumPreds = ScalarPH->getNumPredecessors();
979 for (VPRecipeBase &R : ScalarPH->phis()) {
980 auto *Phi = cast<VPPhi>(Val: &R);
981 assert(Phi->getNumIncoming() == NumPreds - 1 &&
982 "must have incoming values for all predecessors");
983 Phi->addOperand(Operand: Phi->getOperand(N: NumPreds - 2));
984 }
985}
986
987// Likelyhood of bypassing the vectorized loop due to a runtime check block,
988// including memory overlap checks block and wrapping/unit-stride checks block.
989static constexpr uint32_t CheckBypassWeights[] = {1, 127};
990
991/// Create a BranchOnCond terminator in \p CheckBlockVPBB. Optionally adds
992/// branch weights.
993static void addBypassBranch(VPlan &Plan, VPBasicBlock *CheckBlockVPBB,
994 VPValue *Cond, bool AddBranchWeights) {
995 DebugLoc DL = Plan.getVectorLoopRegion()->getCanonicalIV()->getDebugLoc();
996 auto *Term = VPBuilder(CheckBlockVPBB)
997 .createNaryOp(Opcode: VPInstruction::BranchOnCond, Operands: {Cond}, DL);
998 if (AddBranchWeights) {
999 MDBuilder MDB(Plan.getContext());
1000 MDNode *BranchWeights =
1001 MDB.createBranchWeights(Weights: CheckBypassWeights, /*IsExpected=*/false);
1002 Term->setMetadata(Kind: LLVMContext::MD_prof, Node: BranchWeights);
1003 }
1004}
1005
1006void VPlanTransforms::attachCheckBlock(VPlan &Plan, Value *Cond,
1007 BasicBlock *CheckBlock,
1008 bool AddBranchWeights) {
1009 VPValue *CondVPV = Plan.getOrAddLiveIn(V: Cond);
1010 VPBasicBlock *CheckBlockVPBB = Plan.createVPIRBasicBlock(IRBB: CheckBlock);
1011 insertCheckBlockBeforeVectorLoop(Plan, CheckBlockVPBB);
1012 addBypassBranch(Plan, CheckBlockVPBB, Cond: CondVPV, AddBranchWeights);
1013}
1014
1015void VPlanTransforms::addMinimumIterationCheck(
1016 VPlan &Plan, ElementCount VF, unsigned UF,
1017 ElementCount MinProfitableTripCount, bool RequiresScalarEpilogue,
1018 bool TailFolded, bool CheckNeededWithTailFolding, Loop *OrigLoop,
1019 const uint32_t *MinItersBypassWeights, DebugLoc DL,
1020 PredicatedScalarEvolution &PSE) {
1021 // Generate code to check if the loop's trip count is less than VF * UF, or
1022 // equal to it in case a scalar epilogue is required; this implies that the
1023 // vector trip count is zero. This check also covers the case where adding one
1024 // to the backedge-taken count overflowed leading to an incorrect trip count
1025 // of zero. In this case we will also jump to the scalar loop.
1026 CmpInst::Predicate CmpPred =
1027 RequiresScalarEpilogue ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
1028 // If tail is to be folded, vector loop takes care of all iterations.
1029 VPValue *TripCountVPV = Plan.getTripCount();
1030 const SCEV *TripCount = vputils::getSCEVExprForVPValue(V: TripCountVPV, PSE);
1031 Type *TripCountTy = TripCount->getType();
1032 ScalarEvolution &SE = *PSE.getSE();
1033 auto GetMinTripCount = [&]() -> const SCEV * {
1034 // Compute max(MinProfitableTripCount, UF * VF) and return it.
1035 const SCEV *VFxUF =
1036 SE.getElementCount(Ty: TripCountTy, EC: (VF * UF), Flags: SCEV::FlagNUW);
1037 if (UF * VF.getKnownMinValue() >=
1038 MinProfitableTripCount.getKnownMinValue()) {
1039 // TODO: SCEV should be able to simplify test.
1040 return VFxUF;
1041 }
1042 const SCEV *MinProfitableTripCountSCEV =
1043 SE.getElementCount(Ty: TripCountTy, EC: MinProfitableTripCount, Flags: SCEV::FlagNUW);
1044 return SE.getUMaxExpr(LHS: MinProfitableTripCountSCEV, RHS: VFxUF);
1045 };
1046
1047 VPBasicBlock *EntryVPBB = Plan.getEntry();
1048 VPBuilder Builder(EntryVPBB);
1049 VPValue *TripCountCheck = Plan.getFalse();
1050 const SCEV *Step = GetMinTripCount();
1051 if (TailFolded) {
1052 if (CheckNeededWithTailFolding) {
1053 // vscale is not necessarily a power-of-2, which means we cannot guarantee
1054 // an overflow to zero when updating induction variables and so an
1055 // additional overflow check is required before entering the vector loop.
1056
1057 VPValue *StepVPV = Builder.createExpandSCEV(Expr: Step);
1058
1059 // Get the maximum unsigned value for the type.
1060 VPValue *MaxUIntTripCount =
1061 Plan.getConstantInt(Val: cast<IntegerType>(Val: TripCountTy)->getMask());
1062 VPValue *DistanceToMax =
1063 Builder.createSub(LHS: MaxUIntTripCount, RHS: TripCountVPV);
1064
1065 // Don't execute the vector loop if (UMax - n) < (VF * UF).
1066 // FIXME: Should only check VF * UF, but currently checks Step=max(VF*UF,
1067 // minProfitableTripCount).
1068 TripCountCheck =
1069 Builder.createICmp(Pred: ICmpInst::ICMP_ULT, A: DistanceToMax, B: StepVPV, DL);
1070 } else {
1071 // TripCountCheck = false, folding tail implies positive vector trip
1072 // count.
1073 }
1074 } else {
1075 // TODO: Emit unconditional branch to vector preheader instead of
1076 // conditional branch with known condition.
1077 TripCount = SE.applyLoopGuards(Expr: TripCount, L: OrigLoop);
1078 // Check if the trip count is < the step.
1079 if (SE.isKnownPredicate(Pred: CmpPred, LHS: TripCount, RHS: Step)) {
1080 // TODO: Ensure step is at most the trip count when determining max VF and
1081 // UF, w/o tail folding.
1082 TripCountCheck = Plan.getTrue();
1083 } else if (!SE.isKnownPredicate(Pred: CmpInst::getInversePredicate(pred: CmpPred),
1084 LHS: TripCount, RHS: Step)) {
1085 // Generate the minimum iteration check only if we cannot prove the
1086 // check is known to be true, or known to be false.
1087 VPValue *MinTripCountVPV = Builder.createExpandSCEV(Expr: Step);
1088 TripCountCheck = Builder.createICmp(
1089 Pred: CmpPred, A: TripCountVPV, B: MinTripCountVPV, DL, Name: "min.iters.check");
1090 } // else step known to be < trip count, use TripCountCheck preset to false.
1091 }
1092 VPInstruction *Term =
1093 Builder.createNaryOp(Opcode: VPInstruction::BranchOnCond, Operands: {TripCountCheck}, DL);
1094 if (MinItersBypassWeights) {
1095 MDBuilder MDB(Plan.getContext());
1096 MDNode *BranchWeights = MDB.createBranchWeights(
1097 Weights: ArrayRef(MinItersBypassWeights, 2), /*IsExpected=*/false);
1098 Term->setMetadata(Kind: LLVMContext::MD_prof, Node: BranchWeights);
1099 }
1100}
1101
1102void VPlanTransforms::addMinimumVectorEpilogueIterationCheck(
1103 VPlan &Plan, Value *TripCount, Value *VectorTripCount,
1104 bool RequiresScalarEpilogue, ElementCount EpilogueVF, unsigned EpilogueUF,
1105 unsigned MainLoopStep, unsigned EpilogueLoopStep, ScalarEvolution &SE) {
1106 // Add the minimum iteration check for the epilogue vector loop.
1107 VPValue *TC = Plan.getOrAddLiveIn(V: TripCount);
1108 VPBuilder Builder(cast<VPBasicBlock>(Val: Plan.getEntry()));
1109 VPValue *VFxUF = Builder.createExpandSCEV(Expr: SE.getElementCount(
1110 Ty: TripCount->getType(), EC: (EpilogueVF * EpilogueUF), Flags: SCEV::FlagNUW));
1111 VPValue *Count = Builder.createSub(LHS: TC, RHS: Plan.getOrAddLiveIn(V: VectorTripCount),
1112 DL: DebugLoc::getUnknown(), Name: "n.vec.remaining");
1113
1114 // Generate code to check if the loop's trip count is less than VF * UF of
1115 // the vector epilogue loop.
1116 auto P = RequiresScalarEpilogue ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
1117 auto *CheckMinIters = Builder.createICmp(
1118 Pred: P, A: Count, B: VFxUF, DL: DebugLoc::getUnknown(), Name: "min.epilog.iters.check");
1119 VPInstruction *Branch =
1120 Builder.createNaryOp(Opcode: VPInstruction::BranchOnCond, Operands: CheckMinIters);
1121
1122 // We assume the remaining `Count` is equally distributed in
1123 // [0, MainLoopStep)
1124 // So the probability for `Count < EpilogueLoopStep` should be
1125 // min(MainLoopStep, EpilogueLoopStep) / MainLoopStep
1126 // TODO: Improve the estimate by taking the estimated trip count into
1127 // consideration.
1128 unsigned EstimatedSkipCount = std::min(a: MainLoopStep, b: EpilogueLoopStep);
1129 const uint32_t Weights[] = {EstimatedSkipCount,
1130 MainLoopStep - EstimatedSkipCount};
1131 MDBuilder MDB(Plan.getContext());
1132 MDNode *BranchWeights =
1133 MDB.createBranchWeights(Weights, /*IsExpected=*/false);
1134 Branch->setMetadata(Kind: LLVMContext::MD_prof, Node: BranchWeights);
1135}
1136
1137/// Find and return the final select instruction of the FindIV result pattern
1138/// for the given \p BackedgeVal:
1139/// select(icmp ne ComputeReductionResult(ReducedIV), Sentinel),
1140/// ComputeReductionResult(ReducedIV), Start.
1141static VPInstruction *findFindIVSelect(VPValue *BackedgeVal) {
1142 return cast<VPInstruction>(
1143 Val: vputils::findRecipe(Start: BackedgeVal, Pred: [BackedgeVal](VPRecipeBase *R) {
1144 auto *VPI = dyn_cast<VPInstruction>(Val: R);
1145 return VPI &&
1146 matchFindIVResult(VPI, ReducedIV: m_Specific(VPV: BackedgeVal), Start: m_VPValue());
1147 }));
1148}
1149
1150bool VPlanTransforms::handleMaxMinNumReductions(VPlan &Plan) {
1151 auto GetMinOrMaxCompareValue =
1152 [](VPReductionPHIRecipe *RedPhiR) -> VPValue * {
1153 auto *MinOrMaxR =
1154 dyn_cast_or_null<VPRecipeWithIRFlags>(Val: RedPhiR->getBackedgeValue());
1155 if (!MinOrMaxR)
1156 return nullptr;
1157
1158 // Check that MinOrMaxR is a VPWidenIntrinsicRecipe or VPReplicateRecipe
1159 // with an intrinsic that matches the reduction kind.
1160 Intrinsic::ID ExpectedIntrinsicID =
1161 getMinMaxReductionIntrinsicOp(RK: RedPhiR->getRecurrenceKind());
1162 if (!match(V: MinOrMaxR, P: m_Intrinsic(IntrID: ExpectedIntrinsicID)))
1163 return nullptr;
1164
1165 if (MinOrMaxR->getOperand(N: 0) == RedPhiR)
1166 return MinOrMaxR->getOperand(N: 1);
1167
1168 assert(MinOrMaxR->getOperand(1) == RedPhiR &&
1169 "Reduction phi operand expected");
1170 return MinOrMaxR->getOperand(N: 0);
1171 };
1172
1173 VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion();
1174 SmallVector<std::pair<VPReductionPHIRecipe *, VPValue *>>
1175 MinOrMaxNumReductionsToHandle;
1176 bool HasUnsupportedPhi = false;
1177 for (auto &R : LoopRegion->getEntryBasicBlock()->phis()) {
1178 if (isa<VPCanonicalIVPHIRecipe, VPWidenIntOrFpInductionRecipe>(Val: &R))
1179 continue;
1180 auto *Cur = dyn_cast<VPReductionPHIRecipe>(Val: &R);
1181 if (!Cur) {
1182 // TODO: Also support fixed-order recurrence phis.
1183 HasUnsupportedPhi = true;
1184 continue;
1185 }
1186 if (!RecurrenceDescriptor::isFPMinMaxNumRecurrenceKind(
1187 Kind: Cur->getRecurrenceKind())) {
1188 HasUnsupportedPhi = true;
1189 continue;
1190 }
1191
1192 VPValue *MinOrMaxOp = GetMinOrMaxCompareValue(Cur);
1193 if (!MinOrMaxOp)
1194 return false;
1195
1196 MinOrMaxNumReductionsToHandle.emplace_back(Args&: Cur, Args&: MinOrMaxOp);
1197 }
1198
1199 if (MinOrMaxNumReductionsToHandle.empty())
1200 return true;
1201
1202 // We won't be able to resume execution in the scalar tail, if there are
1203 // unsupported header phis or there is no scalar tail at all, due to
1204 // tail-folding.
1205 if (HasUnsupportedPhi || !Plan.hasScalarTail())
1206 return false;
1207
1208 /// Check if the vector loop of \p Plan can early exit and restart
1209 /// execution of last vector iteration in the scalar loop. This requires all
1210 /// recipes up to early exit point be side-effect free as they are
1211 /// re-executed. Currently we check that the loop is free of any recipe that
1212 /// may write to memory. Expected to operate on an early VPlan w/o nested
1213 /// regions.
1214 for (VPBlockBase *VPB : vp_depth_first_shallow(
1215 G: Plan.getVectorLoopRegion()->getEntryBasicBlock())) {
1216 auto *VPBB = cast<VPBasicBlock>(Val: VPB);
1217 for (auto &R : *VPBB) {
1218 if (R.mayWriteToMemory() && !match(V: &R, P: m_BranchOnCount()))
1219 return false;
1220 }
1221 }
1222
1223 VPBasicBlock *LatchVPBB = LoopRegion->getExitingBasicBlock();
1224 VPBuilder LatchBuilder(LatchVPBB->getTerminator());
1225 VPValue *AllNaNLanes = nullptr;
1226 SmallPtrSet<VPValue *, 2> RdxResults;
1227 for (const auto &[_, MinOrMaxOp] : MinOrMaxNumReductionsToHandle) {
1228 VPValue *RedNaNLanes =
1229 LatchBuilder.createFCmp(Pred: CmpInst::FCMP_UNO, A: MinOrMaxOp, B: MinOrMaxOp);
1230 AllNaNLanes = AllNaNLanes ? LatchBuilder.createOr(LHS: AllNaNLanes, RHS: RedNaNLanes)
1231 : RedNaNLanes;
1232 }
1233
1234 VPValue *AnyNaNLane =
1235 LatchBuilder.createNaryOp(Opcode: VPInstruction::AnyOf, Operands: {AllNaNLanes});
1236 VPBasicBlock *MiddleVPBB = Plan.getMiddleBlock();
1237 VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->begin());
1238 for (const auto &[RedPhiR, _] : MinOrMaxNumReductionsToHandle) {
1239 assert(RecurrenceDescriptor::isFPMinMaxNumRecurrenceKind(
1240 RedPhiR->getRecurrenceKind()) &&
1241 "unsupported reduction");
1242
1243 // If we exit early due to NaNs, compute the final reduction result based on
1244 // the reduction phi at the beginning of the last vector iteration.
1245 auto *RdxResult = vputils::findComputeReductionResult(PhiR: RedPhiR);
1246 assert(RdxResult && "must find a ComputeReductionResult");
1247
1248 auto *NewSel = MiddleBuilder.createSelect(Cond: AnyNaNLane, TrueVal: RedPhiR,
1249 FalseVal: RdxResult->getOperand(N: 0));
1250 RdxResult->setOperand(I: 0, New: NewSel);
1251 assert(!RdxResults.contains(RdxResult) && "RdxResult already used");
1252 RdxResults.insert(Ptr: RdxResult);
1253 }
1254
1255 auto *LatchExitingBranch = LatchVPBB->getTerminator();
1256 assert(match(LatchExitingBranch, m_BranchOnCount(m_VPValue(), m_VPValue())) &&
1257 "Unexpected terminator");
1258 auto *IsLatchExitTaken = LatchBuilder.createICmp(
1259 Pred: CmpInst::ICMP_EQ, A: LatchExitingBranch->getOperand(N: 0),
1260 B: LatchExitingBranch->getOperand(N: 1));
1261 auto *AnyExitTaken = LatchBuilder.createOr(LHS: AnyNaNLane, RHS: IsLatchExitTaken);
1262 LatchBuilder.createNaryOp(Opcode: VPInstruction::BranchOnCond, Operands: AnyExitTaken);
1263 LatchExitingBranch->eraseFromParent();
1264
1265 // Update resume phis for inductions in the scalar preheader. If AnyNaNLane is
1266 // true, the resume from the start of the last vector iteration via the
1267 // canonical IV, otherwise from the original value.
1268 for (auto &R : Plan.getScalarPreheader()->phis()) {
1269 auto *ResumeR = cast<VPPhi>(Val: &R);
1270 VPValue *VecV = ResumeR->getOperand(N: 0);
1271 if (RdxResults.contains(Ptr: VecV))
1272 continue;
1273 if (auto *DerivedIV = dyn_cast<VPDerivedIVRecipe>(Val: VecV)) {
1274 if (DerivedIV->getNumUsers() == 1 &&
1275 DerivedIV->getOperand(N: 1) == &Plan.getVectorTripCount()) {
1276 auto *NewSel =
1277 MiddleBuilder.createSelect(Cond: AnyNaNLane, TrueVal: LoopRegion->getCanonicalIV(),
1278 FalseVal: &Plan.getVectorTripCount());
1279 DerivedIV->moveAfter(MovePos: &*MiddleBuilder.getInsertPoint());
1280 DerivedIV->setOperand(I: 1, New: NewSel);
1281 continue;
1282 }
1283 }
1284 // Bail out and abandon the current, partially modified, VPlan if we
1285 // encounter resume phi that cannot be updated yet.
1286 if (VecV != &Plan.getVectorTripCount()) {
1287 LLVM_DEBUG(dbgs() << "Found resume phi we cannot update for VPlan with "
1288 "FMaxNum/FMinNum reduction.\n");
1289 return false;
1290 }
1291 auto *NewSel = MiddleBuilder.createSelect(
1292 Cond: AnyNaNLane, TrueVal: LoopRegion->getCanonicalIV(), FalseVal: VecV);
1293 ResumeR->setOperand(I: 0, New: NewSel);
1294 }
1295
1296 auto *MiddleTerm = MiddleVPBB->getTerminator();
1297 MiddleBuilder.setInsertPoint(MiddleTerm);
1298 VPValue *MiddleCond = MiddleTerm->getOperand(N: 0);
1299 VPValue *NewCond =
1300 MiddleBuilder.createAnd(LHS: MiddleCond, RHS: MiddleBuilder.createNot(Operand: AnyNaNLane));
1301 MiddleTerm->setOperand(I: 0, New: NewCond);
1302 return true;
1303}
1304
1305bool VPlanTransforms::handleFindLastReductions(VPlan &Plan) {
1306 if (Plan.hasScalarVFOnly())
1307 return false;
1308
1309 // We want to create the following nodes:
1310 // vector.body:
1311 // ...new WidenPHI recipe introduced to keep the mask value for the latest
1312 // iteration where any lane was active.
1313 // mask.phi = phi [ ir<false>, vector.ph ], [ vp<new.mask>, vector.body ]
1314 // ...data.phi (a VPReductionPHIRecipe for a FindLast reduction) already
1315 // exists, but needs updating to use 'new.data' for the backedge value.
1316 // data.phi = phi ir<default.val>, vp<new.data>
1317 //
1318 // ...'data' and 'compare' created by existing nodes...
1319 //
1320 // ...new recipes introduced to determine whether to update the reduction
1321 // values or keep the current one.
1322 // any.active = i1 any-of ir<compare>
1323 // new.mask = select vp<any.active>, ir<compare>, vp<mask.phi>
1324 // new.data = select vp<any.active>, ir<data>, ir<data.phi>
1325 //
1326 // middle.block:
1327 // ...extract-last-active replaces compute-reduction-result.
1328 // result = extract-last-active vp<new.data>, vp<new.mask>, ir<default.val>
1329
1330 for (auto &Phi : Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
1331 auto *PhiR = dyn_cast<VPReductionPHIRecipe>(Val: &Phi);
1332 if (!PhiR || !RecurrenceDescriptor::isFindLastRecurrenceKind(
1333 Kind: PhiR->getRecurrenceKind()))
1334 continue;
1335
1336 // Find the condition for the select.
1337 auto *SelectR = cast<VPSingleDefRecipe>(Val: &PhiR->getBackedgeRecipe());
1338 VPValue *Cond = nullptr, *Op1 = nullptr, *Op2 = nullptr;
1339 if (!match(R: SelectR,
1340 P: m_Select(Op0: m_VPValue(V&: Cond), Op1: m_VPValue(V&: Op1), Op2: m_VPValue(V&: Op2))))
1341 return false;
1342
1343 // Add mask phi.
1344 VPBuilder Builder = VPBuilder::getToInsertAfter(R: PhiR);
1345 auto *MaskPHI = new VPWidenPHIRecipe(nullptr, /*Start=*/Plan.getFalse());
1346 Builder.insert(R: MaskPHI);
1347
1348 // Add select for mask.
1349 Builder.setInsertPoint(SelectR);
1350
1351 if (Op1 == PhiR) {
1352 // Normalize to selecting the data operand when the condition is true by
1353 // swapping operands and negating the condition.
1354 std::swap(a&: Op1, b&: Op2);
1355 Cond = Builder.createNot(Operand: Cond);
1356 }
1357 assert(Op2 == PhiR && "data value must be selected if Cond is true");
1358
1359 VPValue *AnyOf = Builder.createNaryOp(Opcode: VPInstruction::AnyOf, Operands: {Cond});
1360 VPValue *MaskSelect = Builder.createSelect(Cond: AnyOf, TrueVal: Cond, FalseVal: MaskPHI);
1361 MaskPHI->addOperand(Operand: MaskSelect);
1362
1363 // Replace select for data.
1364 VPValue *DataSelect =
1365 Builder.createSelect(Cond: AnyOf, TrueVal: Op1, FalseVal: Op2, DL: SelectR->getDebugLoc());
1366 SelectR->replaceAllUsesWith(New: DataSelect);
1367 SelectR->eraseFromParent();
1368
1369 // Find final reduction computation and replace it with an
1370 // extract.last.active intrinsic.
1371 auto *RdxResult =
1372 vputils::findUserOf<VPInstruction::ComputeReductionResult>(V: DataSelect);
1373 // TODO: Handle tail-folding.
1374 if (!RdxResult)
1375 return false;
1376 Builder.setInsertPoint(RdxResult);
1377 auto *ExtractLastActive =
1378 Builder.createNaryOp(Opcode: VPInstruction::ExtractLastActive,
1379 Operands: {DataSelect, MaskSelect, PhiR->getStartValue()},
1380 DL: RdxResult->getDebugLoc());
1381 RdxResult->replaceAllUsesWith(New: ExtractLastActive);
1382 RdxResult->eraseFromParent();
1383 }
1384
1385 return true;
1386}
1387
1388bool VPlanTransforms::handleMultiUseReductions(VPlan &Plan) {
1389 for (auto &PhiR : make_early_inc_range(
1390 Range: Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis())) {
1391 auto *MinOrMaxPhiR = dyn_cast<VPReductionPHIRecipe>(Val: &PhiR);
1392 // TODO: check for multi-uses in VPlan directly.
1393 if (!MinOrMaxPhiR || !MinOrMaxPhiR->hasUsesOutsideReductionChain())
1394 continue;
1395
1396 // MinOrMaxPhiR has users outside the reduction cycle in the loop. Check if
1397 // the only other user is a FindLastIV reduction. MinOrMaxPhiR must have
1398 // exactly 2 users:
1399 // 1) the min/max operation of the reduction cycle, and
1400 // 2) the compare of a FindLastIV reduction cycle. This compare must match
1401 // the min/max operation - comparing MinOrMaxPhiR with the operand of the
1402 // min/max operation, and be used only by the select of the FindLastIV
1403 // reduction cycle.
1404 RecurKind RdxKind = MinOrMaxPhiR->getRecurrenceKind();
1405 assert(
1406 RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind) &&
1407 "only min/max recurrences support users outside the reduction chain");
1408
1409 auto *MinOrMaxOp =
1410 dyn_cast<VPRecipeWithIRFlags>(Val: MinOrMaxPhiR->getBackedgeValue());
1411 if (!MinOrMaxOp)
1412 return false;
1413
1414 // Check that MinOrMaxOp is a VPWidenIntrinsicRecipe or VPReplicateRecipe
1415 // with an intrinsic that matches the reduction kind.
1416 Intrinsic::ID ExpectedIntrinsicID = getMinMaxReductionIntrinsicOp(RK: RdxKind);
1417 if (!match(V: MinOrMaxOp, P: m_Intrinsic(IntrID: ExpectedIntrinsicID)))
1418 return false;
1419
1420 // MinOrMaxOp must have 2 users: 1) MinOrMaxPhiR and 2)
1421 // ComputeReductionResult.
1422 assert(MinOrMaxOp->getNumUsers() == 2 &&
1423 "MinOrMaxOp must have exactly 2 users");
1424 VPValue *MinOrMaxOpValue = MinOrMaxOp->getOperand(N: 0);
1425 if (MinOrMaxOpValue == MinOrMaxPhiR)
1426 MinOrMaxOpValue = MinOrMaxOp->getOperand(N: 1);
1427
1428 VPValue *CmpOpA;
1429 VPValue *CmpOpB;
1430 CmpPredicate Pred;
1431 auto *Cmp = dyn_cast_or_null<VPRecipeWithIRFlags>(Val: vputils::findUserOf(
1432 V: MinOrMaxPhiR, P: m_Cmp(Pred, Op0: m_VPValue(V&: CmpOpA), Op1: m_VPValue(V&: CmpOpB))));
1433 if (!Cmp || Cmp->getNumUsers() != 1 ||
1434 (CmpOpA != MinOrMaxOpValue && CmpOpB != MinOrMaxOpValue))
1435 return false;
1436
1437 if (MinOrMaxOpValue != CmpOpB)
1438 Pred = CmpInst::getSwappedPredicate(pred: Pred);
1439
1440 // MinOrMaxPhiR must have exactly 2 users:
1441 // * MinOrMaxOp,
1442 // * Cmp (that's part of a FindLastIV chain).
1443 if (MinOrMaxPhiR->getNumUsers() != 2)
1444 return false;
1445
1446 VPInstruction *MinOrMaxResult =
1447 vputils::findUserOf<VPInstruction::ComputeReductionResult>(V: MinOrMaxOp);
1448 assert(is_contained(MinOrMaxPhiR->users(), MinOrMaxOp) &&
1449 "one user must be MinOrMaxOp");
1450 assert(MinOrMaxResult &&
1451 "MinOrMaxOp must have a ComputeReductionResult user");
1452
1453 // Cmp must be used by the select of a FindLastIV chain.
1454 VPValue *Sel = dyn_cast<VPSingleDefRecipe>(Val: Cmp->getSingleUser());
1455 VPValue *IVOp, *FindIV;
1456 if (!Sel || Sel->getNumUsers() != 2 ||
1457 !match(V: Sel,
1458 P: m_Select(Op0: m_Specific(VPV: Cmp), Op1: m_VPValue(V&: IVOp), Op2: m_VPValue(V&: FindIV))))
1459 return false;
1460
1461 if (!isa<VPReductionPHIRecipe>(Val: FindIV)) {
1462 std::swap(a&: FindIV, b&: IVOp);
1463 Pred = CmpInst::getInversePredicate(pred: Pred);
1464 }
1465
1466 auto *FindIVPhiR = dyn_cast<VPReductionPHIRecipe>(Val: FindIV);
1467 if (!FindIVPhiR || !RecurrenceDescriptor::isFindLastIVRecurrenceKind(
1468 Kind: FindIVPhiR->getRecurrenceKind()))
1469 return false;
1470
1471 // TODO: Support cases where IVOp is the IV increment.
1472 if (!match(V: IVOp, P: m_TruncOrSelf(Op0: m_VPValue(V&: IVOp))) ||
1473 !isa<VPWidenIntOrFpInductionRecipe>(Val: IVOp))
1474 return false;
1475
1476 CmpInst::Predicate RdxPredicate = [RdxKind]() {
1477 switch (RdxKind) {
1478 case RecurKind::UMin:
1479 return CmpInst::ICMP_UGE;
1480 case RecurKind::UMax:
1481 return CmpInst::ICMP_ULE;
1482 case RecurKind::SMax:
1483 return CmpInst::ICMP_SLE;
1484 case RecurKind::SMin:
1485 return CmpInst::ICMP_SGE;
1486 default:
1487 llvm_unreachable("unhandled recurrence kind");
1488 }
1489 }();
1490
1491 // TODO: Strict predicates need to find the first IV value for which the
1492 // predicate holds, not the last.
1493 if (Pred != RdxPredicate)
1494 return false;
1495
1496 assert(!FindIVPhiR->isInLoop() && !FindIVPhiR->isOrdered() &&
1497 "cannot handle inloop/ordered reductions yet");
1498
1499 // The reduction using MinOrMaxPhiR needs adjusting to compute the correct
1500 // result:
1501 // 1. We need to find the last IV for which the condition based on the
1502 // min/max recurrence is true,
1503 // 2. Compare the partial min/max reduction result to its final value and,
1504 // 3. Select the lanes of the partial FindLastIV reductions which
1505 // correspond to the lanes matching the min/max reduction result.
1506 //
1507 // For example, this transforms
1508 // vp<%min.result> = compute-reduction-result ir<%min.val.next>
1509 // vp<%iv.rdx> = compute-reduction-result (smax) vp<%min.idx.next>
1510 // vp<%cmp> = icmp ne vp<%iv.rdx>, SENTINEL
1511 // vp<%find.iv.result> = select vp<%cmp>, vp<%iv.rdx>, ir<0>
1512 //
1513 // into:
1514 //
1515 // vp<min.result> = compute-reduction-result ir<%min.val.next>
1516 // vp<%final.min.cmp> = icmp eq ir<%min.val.next>, vp<min.result>
1517 // vp<%final.iv> = select vp<%final.min.cmp>, vp<%min.idx.next>, SENTINEL
1518 // vp<%iv.rdx> = compute-reduction-result (smax) vp<%final.iv>
1519 // vp<%cmp> = icmp ne vp<%iv.rdx>, SENTINEL
1520 // vp<%find.iv.result> = select vp<%cmp>, vp<%iv.rdx>, ir<0>
1521 //
1522 // Find the FindIV result pattern.
1523 auto *FindIVSelect = findFindIVSelect(BackedgeVal: FindIVPhiR->getBackedgeValue());
1524 auto *FindIVCmp = FindIVSelect->getOperand(N: 0)->getDefiningRecipe();
1525 auto *FindIVRdxResult = cast<VPInstruction>(Val: FindIVCmp->getOperand(N: 0));
1526 assert(FindIVSelect->getParent() == MinOrMaxResult->getParent() &&
1527 "both results must be computed in the same block");
1528 MinOrMaxResult->moveBefore(BB&: *FindIVRdxResult->getParent(),
1529 I: FindIVRdxResult->getIterator());
1530
1531 VPBuilder B(FindIVRdxResult);
1532 VPValue *MinOrMaxExiting = MinOrMaxResult->getOperand(N: 0);
1533 auto *FinalMinOrMaxCmp =
1534 B.createICmp(Pred: CmpInst::ICMP_EQ, A: MinOrMaxExiting, B: MinOrMaxResult);
1535 VPValue *Sentinel = FindIVCmp->getOperand(N: 1);
1536 VPValue *LastIVExiting = FindIVRdxResult->getOperand(N: 0);
1537 auto *FinalIVSelect =
1538 B.createSelect(Cond: FinalMinOrMaxCmp, TrueVal: LastIVExiting, FalseVal: Sentinel);
1539 FindIVRdxResult->setOperand(I: 0, New: FinalIVSelect);
1540 }
1541 return true;
1542}
1543