1//===- VPlan.cpp - Vectorizer Plan ----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This is the LLVM vectorization plan. It represents a candidate for
11/// vectorization, allowing to plan and optimize how to vectorize a given loop
12/// before generating LLVM-IR.
13/// The vectorizer uses vectorization plans to estimate the costs of potential
14/// candidates and if profitable to execute the desired plan, generating vector
15/// LLVM-IR code.
16///
17//===----------------------------------------------------------------------===//
18
19#include "VPlan.h"
20#include "LoopVectorizationPlanner.h"
21#include "VPlanCFG.h"
22#include "VPlanDominatorTree.h"
23#include "VPlanPatternMatch.h"
24#include "llvm/ADT/PostOrderIterator.h"
25#include "llvm/ADT/STLExtras.h"
26#include "llvm/ADT/SmallVector.h"
27#include "llvm/ADT/StringExtras.h"
28#include "llvm/ADT/Twine.h"
29#include "llvm/Analysis/DomTreeUpdater.h"
30#include "llvm/Analysis/LoopInfo.h"
31#include "llvm/IR/BasicBlock.h"
32#include "llvm/IR/CFG.h"
33#include "llvm/IR/IRBuilder.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/Instructions.h"
36#include "llvm/IR/Type.h"
37#include "llvm/IR/Value.h"
38#include "llvm/Support/Casting.h"
39#include "llvm/Support/CommandLine.h"
40#include "llvm/Support/Debug.h"
41#include "llvm/Support/GenericDomTreeConstruction.h"
42#include "llvm/Support/GraphWriter.h"
43#include "llvm/Support/raw_ostream.h"
44#include "llvm/Transforms/Utils/BasicBlockUtils.h"
45#include "llvm/Transforms/Utils/LoopVersioning.h"
46#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
47#include <cassert>
48#include <string>
49#include <vector>
50
51using namespace llvm;
52using namespace llvm::VPlanPatternMatch;
53
54namespace llvm {
55extern cl::opt<bool> EnableVPlanNativePath;
56}
57
58#define DEBUG_TYPE "vplan"
59
60#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
61raw_ostream &llvm::operator<<(raw_ostream &OS, const VPValue &V) {
62 const VPInstruction *Instr = dyn_cast<VPInstruction>(&V);
63 VPSlotTracker SlotTracker(
64 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr);
65 V.print(OS, SlotTracker);
66 return OS;
67}
68#endif
69
70Value *VPLane::getAsRuntimeExpr(IRBuilderBase &Builder,
71 const ElementCount &VF) const {
72 switch (LaneKind) {
73 case VPLane::Kind::ScalableLast:
74 // Lane = RuntimeVF - VF.getKnownMinValue() + Lane
75 return Builder.CreateSub(LHS: getRuntimeVF(B&: Builder, Ty: Builder.getInt32Ty(), VF),
76 RHS: Builder.getInt32(C: VF.getKnownMinValue() - Lane));
77 case VPLane::Kind::First:
78 return Builder.getInt32(C: Lane);
79 }
80 llvm_unreachable("Unknown lane kind");
81}
82
83VPValue::VPValue(const unsigned char SC, Value *UV, VPDef *Def)
84 : SubclassID(SC), UnderlyingVal(UV), Def(Def) {
85 if (Def)
86 Def->addDefinedValue(V: this);
87}
88
89VPValue::~VPValue() {
90 assert(Users.empty() && "trying to delete a VPValue with remaining users");
91 if (Def)
92 Def->removeDefinedValue(V: this);
93}
94
95#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
96void VPValue::print(raw_ostream &OS, VPSlotTracker &SlotTracker) const {
97 if (const VPRecipeBase *R = dyn_cast_or_null<VPRecipeBase>(Def))
98 R->print(OS, "", SlotTracker);
99 else
100 printAsOperand(OS, SlotTracker);
101}
102
103void VPValue::dump() const {
104 const VPRecipeBase *Instr = dyn_cast_or_null<VPRecipeBase>(this->Def);
105 VPSlotTracker SlotTracker(
106 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr);
107 print(dbgs(), SlotTracker);
108 dbgs() << "\n";
109}
110
111void VPDef::dump() const {
112 const VPRecipeBase *Instr = dyn_cast_or_null<VPRecipeBase>(this);
113 VPSlotTracker SlotTracker(
114 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr);
115 print(dbgs(), "", SlotTracker);
116 dbgs() << "\n";
117}
118#endif
119
120VPRecipeBase *VPValue::getDefiningRecipe() {
121 return cast_or_null<VPRecipeBase>(Val: Def);
122}
123
124const VPRecipeBase *VPValue::getDefiningRecipe() const {
125 return cast_or_null<VPRecipeBase>(Val: Def);
126}
127
128// Get the top-most entry block of \p Start. This is the entry block of the
129// containing VPlan. This function is templated to support both const and non-const blocks
130template <typename T> static T *getPlanEntry(T *Start) {
131 T *Next = Start;
132 T *Current = Start;
133 while ((Next = Next->getParent()))
134 Current = Next;
135
136 SmallSetVector<T *, 8> WorkList;
137 WorkList.insert(Current);
138
139 for (unsigned i = 0; i < WorkList.size(); i++) {
140 T *Current = WorkList[i];
141 if (Current->getNumPredecessors() == 0)
142 return Current;
143 auto &Predecessors = Current->getPredecessors();
144 WorkList.insert(Predecessors.begin(), Predecessors.end());
145 }
146
147 llvm_unreachable("VPlan without any entry node without predecessors");
148}
149
150VPlan *VPBlockBase::getPlan() { return getPlanEntry(Start: this)->Plan; }
151
152const VPlan *VPBlockBase::getPlan() const { return getPlanEntry(Start: this)->Plan; }
153
154/// \return the VPBasicBlock that is the entry of Block, possibly indirectly.
155const VPBasicBlock *VPBlockBase::getEntryBasicBlock() const {
156 const VPBlockBase *Block = this;
157 while (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Val: Block))
158 Block = Region->getEntry();
159 return cast<VPBasicBlock>(Val: Block);
160}
161
162VPBasicBlock *VPBlockBase::getEntryBasicBlock() {
163 VPBlockBase *Block = this;
164 while (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Val: Block))
165 Block = Region->getEntry();
166 return cast<VPBasicBlock>(Val: Block);
167}
168
169void VPBlockBase::setPlan(VPlan *ParentPlan) {
170 assert(
171 (ParentPlan->getEntry() == this || ParentPlan->getPreheader() == this) &&
172 "Can only set plan on its entry or preheader block.");
173 Plan = ParentPlan;
174}
175
176/// \return the VPBasicBlock that is the exit of Block, possibly indirectly.
177const VPBasicBlock *VPBlockBase::getExitingBasicBlock() const {
178 const VPBlockBase *Block = this;
179 while (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Val: Block))
180 Block = Region->getExiting();
181 return cast<VPBasicBlock>(Val: Block);
182}
183
184VPBasicBlock *VPBlockBase::getExitingBasicBlock() {
185 VPBlockBase *Block = this;
186 while (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Val: Block))
187 Block = Region->getExiting();
188 return cast<VPBasicBlock>(Val: Block);
189}
190
191VPBlockBase *VPBlockBase::getEnclosingBlockWithSuccessors() {
192 if (!Successors.empty() || !Parent)
193 return this;
194 assert(Parent->getExiting() == this &&
195 "Block w/o successors not the exiting block of its parent.");
196 return Parent->getEnclosingBlockWithSuccessors();
197}
198
199VPBlockBase *VPBlockBase::getEnclosingBlockWithPredecessors() {
200 if (!Predecessors.empty() || !Parent)
201 return this;
202 assert(Parent->getEntry() == this &&
203 "Block w/o predecessors not the entry of its parent.");
204 return Parent->getEnclosingBlockWithPredecessors();
205}
206
207void VPBlockBase::deleteCFG(VPBlockBase *Entry) {
208 for (VPBlockBase *Block : to_vector(Range: vp_depth_first_shallow(G: Entry)))
209 delete Block;
210}
211
212VPBasicBlock::iterator VPBasicBlock::getFirstNonPhi() {
213 iterator It = begin();
214 while (It != end() && It->isPhi())
215 It++;
216 return It;
217}
218
219VPTransformState::VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI,
220 DominatorTree *DT, IRBuilderBase &Builder,
221 InnerLoopVectorizer *ILV, VPlan *Plan,
222 LLVMContext &Ctx)
223 : VF(VF), UF(UF), CFG(DT), LI(LI), Builder(Builder), ILV(ILV), Plan(Plan),
224 LVer(nullptr),
225 TypeAnalysis(Plan->getCanonicalIV()->getScalarType(), Ctx) {}
226
227Value *VPTransformState::get(VPValue *Def, const VPIteration &Instance) {
228 if (Def->isLiveIn())
229 return Def->getLiveInIRValue();
230
231 if (hasScalarValue(Def, Instance)) {
232 return Data
233 .PerPartScalars[Def][Instance.Part][Instance.Lane.mapToCacheIndex(VF)];
234 }
235 if (!Instance.Lane.isFirstLane() &&
236 vputils::isUniformAfterVectorization(VPV: Def) &&
237 hasScalarValue(Def, Instance: {Instance.Part, VPLane::getFirstLane()})) {
238 return Data.PerPartScalars[Def][Instance.Part][0];
239 }
240
241 assert(hasVectorValue(Def, Instance.Part));
242 auto *VecPart = Data.PerPartOutput[Def][Instance.Part];
243 if (!VecPart->getType()->isVectorTy()) {
244 assert(Instance.Lane.isFirstLane() && "cannot get lane > 0 for scalar");
245 return VecPart;
246 }
247 // TODO: Cache created scalar values.
248 Value *Lane = Instance.Lane.getAsRuntimeExpr(Builder, VF);
249 auto *Extract = Builder.CreateExtractElement(Vec: VecPart, Idx: Lane);
250 // set(Def, Extract, Instance);
251 return Extract;
252}
253
254Value *VPTransformState::get(VPValue *Def, unsigned Part, bool NeedsScalar) {
255 if (NeedsScalar) {
256 assert((VF.isScalar() || Def->isLiveIn() || hasVectorValue(Def, Part) ||
257 !vputils::onlyFirstLaneUsed(Def) ||
258 (hasScalarValue(Def, VPIteration(Part, 0)) &&
259 Data.PerPartScalars[Def][Part].size() == 1)) &&
260 "Trying to access a single scalar per part but has multiple scalars "
261 "per part.");
262 return get(Def, Instance: VPIteration(Part, 0));
263 }
264
265 // If Values have been set for this Def return the one relevant for \p Part.
266 if (hasVectorValue(Def, Part))
267 return Data.PerPartOutput[Def][Part];
268
269 auto GetBroadcastInstrs = [this, Def](Value *V) {
270 bool SafeToHoist = Def->isDefinedOutsideVectorRegions();
271 if (VF.isScalar())
272 return V;
273 // Place the code for broadcasting invariant variables in the new preheader.
274 IRBuilder<>::InsertPointGuard Guard(Builder);
275 if (SafeToHoist) {
276 BasicBlock *LoopVectorPreHeader = CFG.VPBB2IRBB[cast<VPBasicBlock>(
277 Val: Plan->getVectorLoopRegion()->getSinglePredecessor())];
278 if (LoopVectorPreHeader)
279 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
280 }
281
282 // Place the code for broadcasting invariant variables in the new preheader.
283 // Broadcast the scalar into all locations in the vector.
284 Value *Shuf = Builder.CreateVectorSplat(EC: VF, V, Name: "broadcast");
285
286 return Shuf;
287 };
288
289 if (!hasScalarValue(Def, Instance: {Part, 0})) {
290 assert(Def->isLiveIn() && "expected a live-in");
291 if (Part != 0)
292 return get(Def, Part: 0);
293 Value *IRV = Def->getLiveInIRValue();
294 Value *B = GetBroadcastInstrs(IRV);
295 set(Def, V: B, Part);
296 return B;
297 }
298
299 Value *ScalarValue = get(Def, Instance: {Part, 0});
300 // If we aren't vectorizing, we can just copy the scalar map values over
301 // to the vector map.
302 if (VF.isScalar()) {
303 set(Def, V: ScalarValue, Part);
304 return ScalarValue;
305 }
306
307 bool IsUniform = vputils::isUniformAfterVectorization(VPV: Def);
308
309 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
310 // Check if there is a scalar value for the selected lane.
311 if (!hasScalarValue(Def, Instance: {Part, LastLane})) {
312 // At the moment, VPWidenIntOrFpInductionRecipes, VPScalarIVStepsRecipes and
313 // VPExpandSCEVRecipes can also be uniform.
314 assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDefiningRecipe()) ||
315 isa<VPScalarIVStepsRecipe>(Def->getDefiningRecipe()) ||
316 isa<VPExpandSCEVRecipe>(Def->getDefiningRecipe())) &&
317 "unexpected recipe found to be invariant");
318 IsUniform = true;
319 LastLane = 0;
320 }
321
322 auto *LastInst = cast<Instruction>(Val: get(Def, Instance: {Part, LastLane}));
323 // Set the insert point after the last scalarized instruction or after the
324 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
325 // will directly follow the scalar definitions.
326 auto OldIP = Builder.saveIP();
327 auto NewIP =
328 isa<PHINode>(Val: LastInst)
329 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
330 : std::next(x: BasicBlock::iterator(LastInst));
331 Builder.SetInsertPoint(&*NewIP);
332
333 // However, if we are vectorizing, we need to construct the vector values.
334 // If the value is known to be uniform after vectorization, we can just
335 // broadcast the scalar value corresponding to lane zero for each unroll
336 // iteration. Otherwise, we construct the vector values using
337 // insertelement instructions. Since the resulting vectors are stored in
338 // State, we will only generate the insertelements once.
339 Value *VectorValue = nullptr;
340 if (IsUniform) {
341 VectorValue = GetBroadcastInstrs(ScalarValue);
342 set(Def, V: VectorValue, Part);
343 } else {
344 // Initialize packing with insertelements to start from undef.
345 assert(!VF.isScalable() && "VF is assumed to be non scalable.");
346 Value *Undef = PoisonValue::get(T: VectorType::get(ElementType: LastInst->getType(), EC: VF));
347 set(Def, V: Undef, Part);
348 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
349 packScalarIntoVectorValue(Def, Instance: {Part, Lane});
350 VectorValue = get(Def, Part);
351 }
352 Builder.restoreIP(IP: OldIP);
353 return VectorValue;
354}
355
356BasicBlock *VPTransformState::CFGState::getPreheaderBBFor(VPRecipeBase *R) {
357 VPRegionBlock *LoopRegion = R->getParent()->getEnclosingLoopRegion();
358 return VPBB2IRBB[LoopRegion->getPreheaderVPBB()];
359}
360
361void VPTransformState::addNewMetadata(Instruction *To,
362 const Instruction *Orig) {
363 // If the loop was versioned with memchecks, add the corresponding no-alias
364 // metadata.
365 if (LVer && (isa<LoadInst>(Val: Orig) || isa<StoreInst>(Val: Orig)))
366 LVer->annotateInstWithNoAlias(VersionedInst: To, OrigInst: Orig);
367}
368
369void VPTransformState::addMetadata(Value *To, Instruction *From) {
370 // No source instruction to transfer metadata from?
371 if (!From)
372 return;
373
374 if (Instruction *ToI = dyn_cast<Instruction>(Val: To)) {
375 propagateMetadata(I: ToI, VL: From);
376 addNewMetadata(To: ToI, Orig: From);
377 }
378}
379
380void VPTransformState::setDebugLocFrom(DebugLoc DL) {
381 const DILocation *DIL = DL;
382 // When a FSDiscriminator is enabled, we don't need to add the multiply
383 // factors to the discriminators.
384 if (DIL &&
385 Builder.GetInsertBlock()
386 ->getParent()
387 ->shouldEmitDebugInfoForProfiling() &&
388 !EnableFSDiscriminator) {
389 // FIXME: For scalable vectors, assume vscale=1.
390 auto NewDIL =
391 DIL->cloneByMultiplyingDuplicationFactor(DF: UF * VF.getKnownMinValue());
392 if (NewDIL)
393 Builder.SetCurrentDebugLocation(*NewDIL);
394 else
395 LLVM_DEBUG(dbgs() << "Failed to create new discriminator: "
396 << DIL->getFilename() << " Line: " << DIL->getLine());
397 } else
398 Builder.SetCurrentDebugLocation(DIL);
399}
400
401void VPTransformState::packScalarIntoVectorValue(VPValue *Def,
402 const VPIteration &Instance) {
403 Value *ScalarInst = get(Def, Instance);
404 Value *VectorValue = get(Def, Part: Instance.Part);
405 VectorValue = Builder.CreateInsertElement(
406 Vec: VectorValue, NewElt: ScalarInst, Idx: Instance.Lane.getAsRuntimeExpr(Builder, VF));
407 set(Def, V: VectorValue, Part: Instance.Part);
408}
409
410BasicBlock *
411VPBasicBlock::createEmptyBasicBlock(VPTransformState::CFGState &CFG) {
412 // BB stands for IR BasicBlocks. VPBB stands for VPlan VPBasicBlocks.
413 // Pred stands for Predessor. Prev stands for Previous - last visited/created.
414 BasicBlock *PrevBB = CFG.PrevBB;
415 BasicBlock *NewBB = BasicBlock::Create(Context&: PrevBB->getContext(), Name: getName(),
416 Parent: PrevBB->getParent(), InsertBefore: CFG.ExitBB);
417 LLVM_DEBUG(dbgs() << "LV: created " << NewBB->getName() << '\n');
418
419 // Hook up the new basic block to its predecessors.
420 for (VPBlockBase *PredVPBlock : getHierarchicalPredecessors()) {
421 VPBasicBlock *PredVPBB = PredVPBlock->getExitingBasicBlock();
422 auto &PredVPSuccessors = PredVPBB->getHierarchicalSuccessors();
423 BasicBlock *PredBB = CFG.VPBB2IRBB[PredVPBB];
424
425 assert(PredBB && "Predecessor basic-block not found building successor.");
426 auto *PredBBTerminator = PredBB->getTerminator();
427 LLVM_DEBUG(dbgs() << "LV: draw edge from" << PredBB->getName() << '\n');
428
429 auto *TermBr = dyn_cast<BranchInst>(Val: PredBBTerminator);
430 if (isa<UnreachableInst>(Val: PredBBTerminator)) {
431 assert(PredVPSuccessors.size() == 1 &&
432 "Predecessor ending w/o branch must have single successor.");
433 DebugLoc DL = PredBBTerminator->getDebugLoc();
434 PredBBTerminator->eraseFromParent();
435 auto *Br = BranchInst::Create(IfTrue: NewBB, InsertBefore: PredBB);
436 Br->setDebugLoc(DL);
437 } else if (TermBr && !TermBr->isConditional()) {
438 TermBr->setSuccessor(idx: 0, NewSucc: NewBB);
439 } else {
440 // Set each forward successor here when it is created, excluding
441 // backedges. A backward successor is set when the branch is created.
442 unsigned idx = PredVPSuccessors.front() == this ? 0 : 1;
443 assert(!TermBr->getSuccessor(idx) &&
444 "Trying to reset an existing successor block.");
445 TermBr->setSuccessor(idx, NewSucc: NewBB);
446 }
447 CFG.DTU.applyUpdates(Updates: {{DominatorTree::Insert, PredBB, NewBB}});
448 }
449 return NewBB;
450}
451
452void VPIRBasicBlock::execute(VPTransformState *State) {
453 assert(getHierarchicalSuccessors().size() <= 2 &&
454 "VPIRBasicBlock can have at most two successors at the moment!");
455 State->Builder.SetInsertPoint(getIRBasicBlock()->getTerminator());
456 executeRecipes(State, BB: getIRBasicBlock());
457 if (getSingleSuccessor()) {
458 assert(isa<UnreachableInst>(getIRBasicBlock()->getTerminator()));
459 auto *Br = State->Builder.CreateBr(Dest: getIRBasicBlock());
460 Br->setOperand(i_nocapture: 0, Val_nocapture: nullptr);
461 getIRBasicBlock()->getTerminator()->eraseFromParent();
462 }
463
464 for (VPBlockBase *PredVPBlock : getHierarchicalPredecessors()) {
465 VPBasicBlock *PredVPBB = PredVPBlock->getExitingBasicBlock();
466 BasicBlock *PredBB = State->CFG.VPBB2IRBB[PredVPBB];
467 assert(PredBB && "Predecessor basic-block not found building successor.");
468 LLVM_DEBUG(dbgs() << "LV: draw edge from" << PredBB->getName() << '\n');
469
470 auto *PredBBTerminator = PredBB->getTerminator();
471 auto *TermBr = cast<BranchInst>(Val: PredBBTerminator);
472 // Set each forward successor here when it is created, excluding
473 // backedges. A backward successor is set when the branch is created.
474 const auto &PredVPSuccessors = PredVPBB->getHierarchicalSuccessors();
475 unsigned idx = PredVPSuccessors.front() == this ? 0 : 1;
476 assert(!TermBr->getSuccessor(idx) &&
477 "Trying to reset an existing successor block.");
478 TermBr->setSuccessor(idx, NewSucc: IRBB);
479 State->CFG.DTU.applyUpdates(Updates: {{DominatorTree::Insert, PredBB, IRBB}});
480 }
481}
482
483void VPBasicBlock::execute(VPTransformState *State) {
484 bool Replica = State->Instance && !State->Instance->isFirstIteration();
485 VPBasicBlock *PrevVPBB = State->CFG.PrevVPBB;
486 VPBlockBase *SingleHPred = nullptr;
487 BasicBlock *NewBB = State->CFG.PrevBB; // Reuse it if possible.
488
489 auto IsLoopRegion = [](VPBlockBase *BB) {
490 auto *R = dyn_cast<VPRegionBlock>(Val: BB);
491 return R && !R->isReplicator();
492 };
493
494 // 1. Create an IR basic block.
495 if (PrevVPBB && /* A */
496 !((SingleHPred = getSingleHierarchicalPredecessor()) &&
497 SingleHPred->getExitingBasicBlock() == PrevVPBB &&
498 PrevVPBB->getSingleHierarchicalSuccessor() &&
499 (SingleHPred->getParent() == getEnclosingLoopRegion() &&
500 !IsLoopRegion(SingleHPred))) && /* B */
501 !(Replica && getPredecessors().empty())) { /* C */
502 // The last IR basic block is reused, as an optimization, in three cases:
503 // A. the first VPBB reuses the loop pre-header BB - when PrevVPBB is null;
504 // B. when the current VPBB has a single (hierarchical) predecessor which
505 // is PrevVPBB and the latter has a single (hierarchical) successor which
506 // both are in the same non-replicator region; and
507 // C. when the current VPBB is an entry of a region replica - where PrevVPBB
508 // is the exiting VPBB of this region from a previous instance, or the
509 // predecessor of this region.
510
511 NewBB = createEmptyBasicBlock(CFG&: State->CFG);
512 State->Builder.SetInsertPoint(NewBB);
513 // Temporarily terminate with unreachable until CFG is rewired.
514 UnreachableInst *Terminator = State->Builder.CreateUnreachable();
515 // Register NewBB in its loop. In innermost loops its the same for all
516 // BB's.
517 if (State->CurrentVectorLoop)
518 State->CurrentVectorLoop->addBasicBlockToLoop(NewBB, LI&: *State->LI);
519 State->Builder.SetInsertPoint(Terminator);
520 State->CFG.PrevBB = NewBB;
521 }
522
523 // 2. Fill the IR basic block with IR instructions.
524 executeRecipes(State, BB: NewBB);
525}
526
527void VPBasicBlock::dropAllReferences(VPValue *NewValue) {
528 for (VPRecipeBase &R : Recipes) {
529 for (auto *Def : R.definedValues())
530 Def->replaceAllUsesWith(New: NewValue);
531
532 for (unsigned I = 0, E = R.getNumOperands(); I != E; I++)
533 R.setOperand(I, New: NewValue);
534 }
535}
536
537void VPBasicBlock::executeRecipes(VPTransformState *State, BasicBlock *BB) {
538 LLVM_DEBUG(dbgs() << "LV: vectorizing VPBB:" << getName()
539 << " in BB:" << BB->getName() << '\n');
540
541 State->CFG.VPBB2IRBB[this] = BB;
542 State->CFG.PrevVPBB = this;
543
544 for (VPRecipeBase &Recipe : Recipes)
545 Recipe.execute(State&: *State);
546
547 LLVM_DEBUG(dbgs() << "LV: filled BB:" << *BB);
548}
549
550VPBasicBlock *VPBasicBlock::splitAt(iterator SplitAt) {
551 assert((SplitAt == end() || SplitAt->getParent() == this) &&
552 "can only split at a position in the same block");
553
554 SmallVector<VPBlockBase *, 2> Succs(successors());
555 // First, disconnect the current block from its successors.
556 for (VPBlockBase *Succ : Succs)
557 VPBlockUtils::disconnectBlocks(From: this, To: Succ);
558
559 // Create new empty block after the block to split.
560 auto *SplitBlock = new VPBasicBlock(getName() + ".split");
561 VPBlockUtils::insertBlockAfter(NewBlock: SplitBlock, BlockPtr: this);
562
563 // Add successors for block to split to new block.
564 for (VPBlockBase *Succ : Succs)
565 VPBlockUtils::connectBlocks(From: SplitBlock, To: Succ);
566
567 // Finally, move the recipes starting at SplitAt to new block.
568 for (VPRecipeBase &ToMove :
569 make_early_inc_range(Range: make_range(x: SplitAt, y: this->end())))
570 ToMove.moveBefore(BB&: *SplitBlock, I: SplitBlock->end());
571
572 return SplitBlock;
573}
574
575VPRegionBlock *VPBasicBlock::getEnclosingLoopRegion() {
576 VPRegionBlock *P = getParent();
577 if (P && P->isReplicator()) {
578 P = P->getParent();
579 assert(!cast<VPRegionBlock>(P)->isReplicator() &&
580 "unexpected nested replicate regions");
581 }
582 return P;
583}
584
585static bool hasConditionalTerminator(const VPBasicBlock *VPBB) {
586 if (VPBB->empty()) {
587 assert(
588 VPBB->getNumSuccessors() < 2 &&
589 "block with multiple successors doesn't have a recipe as terminator");
590 return false;
591 }
592
593 const VPRecipeBase *R = &VPBB->back();
594 bool IsCondBranch = isa<VPBranchOnMaskRecipe>(Val: R) ||
595 match(V: R, P: m_BranchOnCond(Op0: m_VPValue())) ||
596 match(V: R, P: m_BranchOnCount(Op0: m_VPValue(), Op1: m_VPValue()));
597 (void)IsCondBranch;
598
599 if (VPBB->getNumSuccessors() >= 2 ||
600 (VPBB->isExiting() && !VPBB->getParent()->isReplicator())) {
601 assert(IsCondBranch && "block with multiple successors not terminated by "
602 "conditional branch recipe");
603
604 return true;
605 }
606
607 assert(
608 !IsCondBranch &&
609 "block with 0 or 1 successors terminated by conditional branch recipe");
610 return false;
611}
612
613VPRecipeBase *VPBasicBlock::getTerminator() {
614 if (hasConditionalTerminator(VPBB: this))
615 return &back();
616 return nullptr;
617}
618
619const VPRecipeBase *VPBasicBlock::getTerminator() const {
620 if (hasConditionalTerminator(VPBB: this))
621 return &back();
622 return nullptr;
623}
624
625bool VPBasicBlock::isExiting() const {
626 return getParent() && getParent()->getExitingBasicBlock() == this;
627}
628
629#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
630void VPBlockBase::printSuccessors(raw_ostream &O, const Twine &Indent) const {
631 if (getSuccessors().empty()) {
632 O << Indent << "No successors\n";
633 } else {
634 O << Indent << "Successor(s): ";
635 ListSeparator LS;
636 for (auto *Succ : getSuccessors())
637 O << LS << Succ->getName();
638 O << '\n';
639 }
640}
641
642void VPBasicBlock::print(raw_ostream &O, const Twine &Indent,
643 VPSlotTracker &SlotTracker) const {
644 O << Indent << getName() << ":\n";
645
646 auto RecipeIndent = Indent + " ";
647 for (const VPRecipeBase &Recipe : *this) {
648 Recipe.print(O, RecipeIndent, SlotTracker);
649 O << '\n';
650 }
651
652 printSuccessors(O, Indent);
653}
654#endif
655
656static std::pair<VPBlockBase *, VPBlockBase *> cloneFrom(VPBlockBase *Entry);
657
658// Clone the CFG for all nodes reachable from \p Entry, this includes cloning
659// the blocks and their recipes. Operands of cloned recipes will NOT be updated.
660// Remapping of operands must be done separately. Returns a pair with the new
661// entry and exiting blocks of the cloned region. If \p Entry isn't part of a
662// region, return nullptr for the exiting block.
663static std::pair<VPBlockBase *, VPBlockBase *> cloneFrom(VPBlockBase *Entry) {
664 DenseMap<VPBlockBase *, VPBlockBase *> Old2NewVPBlocks;
665 VPBlockBase *Exiting = nullptr;
666 bool InRegion = Entry->getParent();
667 // First, clone blocks reachable from Entry.
668 for (VPBlockBase *BB : vp_depth_first_shallow(G: Entry)) {
669 VPBlockBase *NewBB = BB->clone();
670 Old2NewVPBlocks[BB] = NewBB;
671 if (InRegion && BB->getNumSuccessors() == 0) {
672 assert(!Exiting && "Multiple exiting blocks?");
673 Exiting = BB;
674 }
675 }
676 assert((!InRegion || Exiting) && "regions must have a single exiting block");
677
678 // Second, update the predecessors & successors of the cloned blocks.
679 for (VPBlockBase *BB : vp_depth_first_shallow(G: Entry)) {
680 VPBlockBase *NewBB = Old2NewVPBlocks[BB];
681 SmallVector<VPBlockBase *> NewPreds;
682 for (VPBlockBase *Pred : BB->getPredecessors()) {
683 NewPreds.push_back(Elt: Old2NewVPBlocks[Pred]);
684 }
685 NewBB->setPredecessors(NewPreds);
686 SmallVector<VPBlockBase *> NewSuccs;
687 for (VPBlockBase *Succ : BB->successors()) {
688 NewSuccs.push_back(Elt: Old2NewVPBlocks[Succ]);
689 }
690 NewBB->setSuccessors(NewSuccs);
691 }
692
693#if !defined(NDEBUG)
694 // Verify that the order of predecessors and successors matches in the cloned
695 // version.
696 for (const auto &[OldBB, NewBB] :
697 zip(vp_depth_first_shallow(Entry),
698 vp_depth_first_shallow(Old2NewVPBlocks[Entry]))) {
699 for (const auto &[OldPred, NewPred] :
700 zip(OldBB->getPredecessors(), NewBB->getPredecessors()))
701 assert(NewPred == Old2NewVPBlocks[OldPred] && "Different predecessors");
702
703 for (const auto &[OldSucc, NewSucc] :
704 zip(OldBB->successors(), NewBB->successors()))
705 assert(NewSucc == Old2NewVPBlocks[OldSucc] && "Different successors");
706 }
707#endif
708
709 return std::make_pair(x&: Old2NewVPBlocks[Entry],
710 y: Exiting ? Old2NewVPBlocks[Exiting] : nullptr);
711}
712
713VPRegionBlock *VPRegionBlock::clone() {
714 const auto &[NewEntry, NewExiting] = cloneFrom(Entry: getEntry());
715 auto *NewRegion =
716 new VPRegionBlock(NewEntry, NewExiting, getName(), isReplicator());
717 for (VPBlockBase *Block : vp_depth_first_shallow(G: NewEntry))
718 Block->setParent(NewRegion);
719 return NewRegion;
720}
721
722void VPRegionBlock::dropAllReferences(VPValue *NewValue) {
723 for (VPBlockBase *Block : vp_depth_first_shallow(G: Entry))
724 // Drop all references in VPBasicBlocks and replace all uses with
725 // DummyValue.
726 Block->dropAllReferences(NewValue);
727}
728
729void VPRegionBlock::execute(VPTransformState *State) {
730 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>>
731 RPOT(Entry);
732
733 if (!isReplicator()) {
734 // Create and register the new vector loop.
735 Loop *PrevLoop = State->CurrentVectorLoop;
736 State->CurrentVectorLoop = State->LI->AllocateLoop();
737 BasicBlock *VectorPH = State->CFG.VPBB2IRBB[getPreheaderVPBB()];
738 Loop *ParentLoop = State->LI->getLoopFor(BB: VectorPH);
739
740 // Insert the new loop into the loop nest and register the new basic blocks
741 // before calling any utilities such as SCEV that require valid LoopInfo.
742 if (ParentLoop)
743 ParentLoop->addChildLoop(NewChild: State->CurrentVectorLoop);
744 else
745 State->LI->addTopLevelLoop(New: State->CurrentVectorLoop);
746
747 // Visit the VPBlocks connected to "this", starting from it.
748 for (VPBlockBase *Block : RPOT) {
749 LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n');
750 Block->execute(State);
751 }
752
753 State->CurrentVectorLoop = PrevLoop;
754 return;
755 }
756
757 assert(!State->Instance && "Replicating a Region with non-null instance.");
758
759 // Enter replicating mode.
760 State->Instance = VPIteration(0, 0);
761
762 for (unsigned Part = 0, UF = State->UF; Part < UF; ++Part) {
763 State->Instance->Part = Part;
764 assert(!State->VF.isScalable() && "VF is assumed to be non scalable.");
765 for (unsigned Lane = 0, VF = State->VF.getKnownMinValue(); Lane < VF;
766 ++Lane) {
767 State->Instance->Lane = VPLane(Lane, VPLane::Kind::First);
768 // Visit the VPBlocks connected to \p this, starting from it.
769 for (VPBlockBase *Block : RPOT) {
770 LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n');
771 Block->execute(State);
772 }
773 }
774 }
775
776 // Exit replicating mode.
777 State->Instance.reset();
778}
779
780InstructionCost VPBasicBlock::cost(ElementCount VF, VPCostContext &Ctx) {
781 InstructionCost Cost = 0;
782 for (VPRecipeBase &R : Recipes)
783 Cost += R.cost(VF, Ctx);
784 return Cost;
785}
786
787InstructionCost VPRegionBlock::cost(ElementCount VF, VPCostContext &Ctx) {
788 if (!isReplicator()) {
789 InstructionCost Cost = 0;
790 for (VPBlockBase *Block : vp_depth_first_shallow(G: getEntry()))
791 Cost += Block->cost(VF, Ctx);
792 InstructionCost BackedgeCost =
793 Ctx.TTI.getCFInstrCost(Opcode: Instruction::Br, CostKind: TTI::TCK_RecipThroughput);
794 LLVM_DEBUG(dbgs() << "Cost of " << BackedgeCost << " for VF " << VF
795 << ": vector loop backedge\n");
796 Cost += BackedgeCost;
797 return Cost;
798 }
799
800 // Compute the cost of a replicate region. Replicating isn't supported for
801 // scalable vectors, return an invalid cost for them.
802 // TODO: Discard scalable VPlans with replicate recipes earlier after
803 // construction.
804 if (VF.isScalable())
805 return InstructionCost::getInvalid();
806
807 // First compute the cost of the conditionally executed recipes, followed by
808 // account for the branching cost, except if the mask is a header mask or
809 // uniform condition.
810 using namespace llvm::VPlanPatternMatch;
811 VPBasicBlock *Then = cast<VPBasicBlock>(Val: getEntry()->getSuccessors()[0]);
812 InstructionCost ThenCost = Then->cost(VF, Ctx);
813
814 // For the scalar case, we may not always execute the original predicated
815 // block, Thus, scale the block's cost by the probability of executing it.
816 if (VF.isScalar())
817 return ThenCost / getReciprocalPredBlockProb();
818
819 return ThenCost;
820}
821
822#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
823void VPRegionBlock::print(raw_ostream &O, const Twine &Indent,
824 VPSlotTracker &SlotTracker) const {
825 O << Indent << (isReplicator() ? "<xVFxUF> " : "<x1> ") << getName() << ": {";
826 auto NewIndent = Indent + " ";
827 for (auto *BlockBase : vp_depth_first_shallow(Entry)) {
828 O << '\n';
829 BlockBase->print(O, NewIndent, SlotTracker);
830 }
831 O << Indent << "}\n";
832
833 printSuccessors(O, Indent);
834}
835#endif
836
837VPlan::~VPlan() {
838 for (auto &KV : LiveOuts)
839 delete KV.second;
840 LiveOuts.clear();
841
842 if (Entry) {
843 VPValue DummyValue;
844 for (VPBlockBase *Block : vp_depth_first_shallow(G: Entry))
845 Block->dropAllReferences(NewValue: &DummyValue);
846
847 VPBlockBase::deleteCFG(Entry);
848
849 Preheader->dropAllReferences(NewValue: &DummyValue);
850 delete Preheader;
851 }
852 for (VPValue *VPV : VPLiveInsToFree)
853 delete VPV;
854 if (BackedgeTakenCount)
855 delete BackedgeTakenCount;
856}
857
858VPlanPtr VPlan::createInitialVPlan(const SCEV *TripCount, ScalarEvolution &SE,
859 bool RequiresScalarEpilogueCheck,
860 bool TailFolded, Loop *TheLoop) {
861 VPIRBasicBlock *Entry = new VPIRBasicBlock(TheLoop->getLoopPreheader());
862 VPBasicBlock *VecPreheader = new VPBasicBlock("vector.ph");
863 auto Plan = std::make_unique<VPlan>(args&: Entry, args&: VecPreheader);
864 Plan->TripCount =
865 vputils::getOrCreateVPValueForSCEVExpr(Plan&: *Plan, Expr: TripCount, SE);
866 // Create VPRegionBlock, with empty header and latch blocks, to be filled
867 // during processing later.
868 VPBasicBlock *HeaderVPBB = new VPBasicBlock("vector.body");
869 VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch");
870 VPBlockUtils::insertBlockAfter(NewBlock: LatchVPBB, BlockPtr: HeaderVPBB);
871 auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop",
872 false /*isReplicator*/);
873
874 VPBlockUtils::insertBlockAfter(NewBlock: TopRegion, BlockPtr: VecPreheader);
875 VPBasicBlock *MiddleVPBB = new VPBasicBlock("middle.block");
876 VPBlockUtils::insertBlockAfter(NewBlock: MiddleVPBB, BlockPtr: TopRegion);
877
878 VPBasicBlock *ScalarPH = new VPBasicBlock("scalar.ph");
879 if (!RequiresScalarEpilogueCheck) {
880 VPBlockUtils::connectBlocks(From: MiddleVPBB, To: ScalarPH);
881 return Plan;
882 }
883
884 // If needed, add a check in the middle block to see if we have completed
885 // all of the iterations in the first vector loop. Three cases:
886 // 1) If (N - N%VF) == N, then we *don't* need to run the remainder.
887 // Thus if tail is to be folded, we know we don't need to run the
888 // remainder and we can set the condition to true.
889 // 2) If we require a scalar epilogue, there is no conditional branch as
890 // we unconditionally branch to the scalar preheader. Do nothing.
891 // 3) Otherwise, construct a runtime check.
892 BasicBlock *IRExitBlock = TheLoop->getUniqueExitBlock();
893 auto *VPExitBlock = new VPIRBasicBlock(IRExitBlock);
894 // The connection order corresponds to the operands of the conditional branch.
895 VPBlockUtils::insertBlockAfter(NewBlock: VPExitBlock, BlockPtr: MiddleVPBB);
896 VPBlockUtils::connectBlocks(From: MiddleVPBB, To: ScalarPH);
897
898 auto *ScalarLatchTerm = TheLoop->getLoopLatch()->getTerminator();
899 // Here we use the same DebugLoc as the scalar loop latch terminator instead
900 // of the corresponding compare because they may have ended up with
901 // different line numbers and we want to avoid awkward line stepping while
902 // debugging. Eg. if the compare has got a line number inside the loop.
903 VPBuilder Builder(MiddleVPBB);
904 VPValue *Cmp =
905 TailFolded
906 ? Plan->getOrAddLiveIn(V: ConstantInt::getTrue(
907 Ty: IntegerType::getInt1Ty(C&: TripCount->getType()->getContext())))
908 : Builder.createICmp(Pred: CmpInst::ICMP_EQ, A: Plan->getTripCount(),
909 B: &Plan->getVectorTripCount(),
910 DL: ScalarLatchTerm->getDebugLoc(), Name: "cmp.n");
911 Builder.createNaryOp(Opcode: VPInstruction::BranchOnCond, Operands: {Cmp},
912 DL: ScalarLatchTerm->getDebugLoc());
913 return Plan;
914}
915
916void VPlan::prepareToExecute(Value *TripCountV, Value *VectorTripCountV,
917 Value *CanonicalIVStartValue,
918 VPTransformState &State) {
919 // Check if the backedge taken count is needed, and if so build it.
920 if (BackedgeTakenCount && BackedgeTakenCount->getNumUsers()) {
921 IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
922 auto *TCMO = Builder.CreateSub(LHS: TripCountV,
923 RHS: ConstantInt::get(Ty: TripCountV->getType(), V: 1),
924 Name: "trip.count.minus.1");
925 BackedgeTakenCount->setUnderlyingValue(TCMO);
926 }
927
928 VectorTripCount.setUnderlyingValue(VectorTripCountV);
929
930 IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
931 // FIXME: Model VF * UF computation completely in VPlan.
932 VFxUF.setUnderlyingValue(
933 createStepForVF(B&: Builder, Ty: TripCountV->getType(), VF: State.VF, Step: State.UF));
934
935 // When vectorizing the epilogue loop, the canonical induction start value
936 // needs to be changed from zero to the value after the main vector loop.
937 // FIXME: Improve modeling for canonical IV start values in the epilogue loop.
938 if (CanonicalIVStartValue) {
939 VPValue *VPV = getOrAddLiveIn(V: CanonicalIVStartValue);
940 auto *IV = getCanonicalIV();
941 assert(all_of(IV->users(),
942 [](const VPUser *U) {
943 return isa<VPScalarIVStepsRecipe>(U) ||
944 isa<VPScalarCastRecipe>(U) ||
945 isa<VPDerivedIVRecipe>(U) ||
946 cast<VPInstruction>(U)->getOpcode() ==
947 Instruction::Add;
948 }) &&
949 "the canonical IV should only be used by its increment or "
950 "ScalarIVSteps when resetting the start value");
951 IV->setOperand(I: 0, New: VPV);
952 }
953}
954
955/// Replace \p VPBB with a VPIRBasicBlock wrapping \p IRBB. All recipes from \p
956/// VPBB are moved to the newly created VPIRBasicBlock. VPBB must have a single
957/// predecessor, which is rewired to the new VPIRBasicBlock. All successors of
958/// VPBB, if any, are rewired to the new VPIRBasicBlock.
959static void replaceVPBBWithIRVPBB(VPBasicBlock *VPBB, BasicBlock *IRBB) {
960 VPIRBasicBlock *IRMiddleVPBB = new VPIRBasicBlock(IRBB);
961 for (auto &R : make_early_inc_range(Range&: *VPBB))
962 R.moveBefore(BB&: *IRMiddleVPBB, I: IRMiddleVPBB->end());
963 VPBlockBase *PredVPBB = VPBB->getSinglePredecessor();
964 VPBlockUtils::disconnectBlocks(From: PredVPBB, To: VPBB);
965 VPBlockUtils::connectBlocks(From: PredVPBB, To: IRMiddleVPBB);
966 for (auto *Succ : to_vector(Range&: VPBB->getSuccessors())) {
967 VPBlockUtils::connectBlocks(From: IRMiddleVPBB, To: Succ);
968 VPBlockUtils::disconnectBlocks(From: VPBB, To: Succ);
969 }
970 delete VPBB;
971}
972
973/// Generate the code inside the preheader and body of the vectorized loop.
974/// Assumes a single pre-header basic-block was created for this. Introduce
975/// additional basic-blocks as needed, and fill them all.
976void VPlan::execute(VPTransformState *State) {
977 // Initialize CFG state.
978 State->CFG.PrevVPBB = nullptr;
979 State->CFG.ExitBB = State->CFG.PrevBB->getSingleSuccessor();
980 BasicBlock *VectorPreHeader = State->CFG.PrevBB;
981 State->Builder.SetInsertPoint(VectorPreHeader->getTerminator());
982
983 // Disconnect VectorPreHeader from ExitBB in both the CFG and DT.
984 cast<BranchInst>(Val: VectorPreHeader->getTerminator())->setSuccessor(idx: 0, NewSucc: nullptr);
985 State->CFG.DTU.applyUpdates(
986 Updates: {{DominatorTree::Delete, VectorPreHeader, State->CFG.ExitBB}});
987
988 // Replace regular VPBB's for the middle and scalar preheader blocks with
989 // VPIRBasicBlocks wrapping their IR blocks. The IR blocks are created during
990 // skeleton creation, so we can only create the VPIRBasicBlocks now during
991 // VPlan execution rather than earlier during VPlan construction.
992 BasicBlock *MiddleBB = State->CFG.ExitBB;
993 VPBasicBlock *MiddleVPBB =
994 cast<VPBasicBlock>(Val: getVectorLoopRegion()->getSingleSuccessor());
995 // Find the VPBB for the scalar preheader, relying on the current structure
996 // when creating the middle block and its successrs: if there's a single
997 // predecessor, it must be the scalar preheader. Otherwise, the second
998 // successor is the scalar preheader.
999 BasicBlock *ScalarPh = MiddleBB->getSingleSuccessor();
1000 auto &MiddleSuccs = MiddleVPBB->getSuccessors();
1001 assert((MiddleSuccs.size() == 1 || MiddleSuccs.size() == 2) &&
1002 "middle block has unexpected successors");
1003 VPBasicBlock *ScalarPhVPBB = cast<VPBasicBlock>(
1004 Val: MiddleSuccs.size() == 1 ? MiddleSuccs[0] : MiddleSuccs[1]);
1005 assert(!isa<VPIRBasicBlock>(ScalarPhVPBB) &&
1006 "scalar preheader cannot be wrapped already");
1007 replaceVPBBWithIRVPBB(VPBB: ScalarPhVPBB, IRBB: ScalarPh);
1008 replaceVPBBWithIRVPBB(VPBB: MiddleVPBB, IRBB: MiddleBB);
1009
1010 // Disconnect the middle block from its single successor (the scalar loop
1011 // header) in both the CFG and DT. The branch will be recreated during VPlan
1012 // execution.
1013 auto *BrInst = new UnreachableInst(MiddleBB->getContext());
1014 BrInst->insertBefore(InsertPos: MiddleBB->getTerminator());
1015 MiddleBB->getTerminator()->eraseFromParent();
1016 State->CFG.DTU.applyUpdates(Updates: {{DominatorTree::Delete, MiddleBB, ScalarPh}});
1017
1018 // Generate code in the loop pre-header and body.
1019 for (VPBlockBase *Block : vp_depth_first_shallow(G: Entry))
1020 Block->execute(State);
1021
1022 VPBasicBlock *LatchVPBB = getVectorLoopRegion()->getExitingBasicBlock();
1023 BasicBlock *VectorLatchBB = State->CFG.VPBB2IRBB[LatchVPBB];
1024
1025 // Fix the latch value of canonical, reduction and first-order recurrences
1026 // phis in the vector loop.
1027 VPBasicBlock *Header = getVectorLoopRegion()->getEntryBasicBlock();
1028 for (VPRecipeBase &R : Header->phis()) {
1029 // Skip phi-like recipes that generate their backedege values themselves.
1030 if (isa<VPWidenPHIRecipe>(Val: &R))
1031 continue;
1032
1033 if (isa<VPWidenPointerInductionRecipe>(Val: &R) ||
1034 isa<VPWidenIntOrFpInductionRecipe>(Val: &R)) {
1035 PHINode *Phi = nullptr;
1036 if (isa<VPWidenIntOrFpInductionRecipe>(Val: &R)) {
1037 Phi = cast<PHINode>(Val: State->get(Def: R.getVPSingleValue(), Part: 0));
1038 } else {
1039 auto *WidenPhi = cast<VPWidenPointerInductionRecipe>(Val: &R);
1040 assert(!WidenPhi->onlyScalarsGenerated(State->VF.isScalable()) &&
1041 "recipe generating only scalars should have been replaced");
1042 auto *GEP = cast<GetElementPtrInst>(Val: State->get(Def: WidenPhi, Part: 0));
1043 Phi = cast<PHINode>(Val: GEP->getPointerOperand());
1044 }
1045
1046 Phi->setIncomingBlock(i: 1, BB: VectorLatchBB);
1047
1048 // Move the last step to the end of the latch block. This ensures
1049 // consistent placement of all induction updates.
1050 Instruction *Inc = cast<Instruction>(Val: Phi->getIncomingValue(i: 1));
1051 Inc->moveBefore(MovePos: VectorLatchBB->getTerminator()->getPrevNode());
1052 continue;
1053 }
1054
1055 auto *PhiR = cast<VPHeaderPHIRecipe>(Val: &R);
1056 // For canonical IV, first-order recurrences and in-order reduction phis,
1057 // only a single part is generated, which provides the last part from the
1058 // previous iteration. For non-ordered reductions all UF parts are
1059 // generated.
1060 bool SinglePartNeeded =
1061 isa<VPCanonicalIVPHIRecipe>(Val: PhiR) ||
1062 isa<VPFirstOrderRecurrencePHIRecipe, VPEVLBasedIVPHIRecipe>(Val: PhiR) ||
1063 (isa<VPReductionPHIRecipe>(Val: PhiR) &&
1064 cast<VPReductionPHIRecipe>(Val: PhiR)->isOrdered());
1065 bool NeedsScalar =
1066 isa<VPCanonicalIVPHIRecipe, VPEVLBasedIVPHIRecipe>(Val: PhiR) ||
1067 (isa<VPReductionPHIRecipe>(Val: PhiR) &&
1068 cast<VPReductionPHIRecipe>(Val: PhiR)->isInLoop());
1069 unsigned LastPartForNewPhi = SinglePartNeeded ? 1 : State->UF;
1070
1071 for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) {
1072 Value *Phi = State->get(Def: PhiR, Part, NeedsScalar);
1073 Value *Val =
1074 State->get(Def: PhiR->getBackedgeValue(),
1075 Part: SinglePartNeeded ? State->UF - 1 : Part, NeedsScalar);
1076 cast<PHINode>(Val: Phi)->addIncoming(V: Val, BB: VectorLatchBB);
1077 }
1078 }
1079
1080 State->CFG.DTU.flush();
1081 assert(State->CFG.DTU.getDomTree().verify(
1082 DominatorTree::VerificationLevel::Fast) &&
1083 "DT not preserved correctly");
1084}
1085
1086InstructionCost VPlan::cost(ElementCount VF, VPCostContext &Ctx) {
1087 // For now only return the cost of the vector loop region, ignoring any other
1088 // blocks, like the preheader or middle blocks.
1089 return getVectorLoopRegion()->cost(VF, Ctx);
1090}
1091
1092#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1093void VPlan::printLiveIns(raw_ostream &O) const {
1094 VPSlotTracker SlotTracker(this);
1095
1096 if (VFxUF.getNumUsers() > 0) {
1097 O << "\nLive-in ";
1098 VFxUF.printAsOperand(O, SlotTracker);
1099 O << " = VF * UF";
1100 }
1101
1102 if (VectorTripCount.getNumUsers() > 0) {
1103 O << "\nLive-in ";
1104 VectorTripCount.printAsOperand(O, SlotTracker);
1105 O << " = vector-trip-count";
1106 }
1107
1108 if (BackedgeTakenCount && BackedgeTakenCount->getNumUsers()) {
1109 O << "\nLive-in ";
1110 BackedgeTakenCount->printAsOperand(O, SlotTracker);
1111 O << " = backedge-taken count";
1112 }
1113
1114 O << "\n";
1115 if (TripCount->isLiveIn())
1116 O << "Live-in ";
1117 TripCount->printAsOperand(O, SlotTracker);
1118 O << " = original trip-count";
1119 O << "\n";
1120}
1121
1122LLVM_DUMP_METHOD
1123void VPlan::print(raw_ostream &O) const {
1124 VPSlotTracker SlotTracker(this);
1125
1126 O << "VPlan '" << getName() << "' {";
1127
1128 printLiveIns(O);
1129
1130 if (!getPreheader()->empty()) {
1131 O << "\n";
1132 getPreheader()->print(O, "", SlotTracker);
1133 }
1134
1135 for (const VPBlockBase *Block : vp_depth_first_shallow(getEntry())) {
1136 O << '\n';
1137 Block->print(O, "", SlotTracker);
1138 }
1139
1140 if (!LiveOuts.empty())
1141 O << "\n";
1142 for (const auto &KV : LiveOuts) {
1143 KV.second->print(O, SlotTracker);
1144 }
1145
1146 O << "}\n";
1147}
1148
1149std::string VPlan::getName() const {
1150 std::string Out;
1151 raw_string_ostream RSO(Out);
1152 RSO << Name << " for ";
1153 if (!VFs.empty()) {
1154 RSO << "VF={" << VFs[0];
1155 for (ElementCount VF : drop_begin(VFs))
1156 RSO << "," << VF;
1157 RSO << "},";
1158 }
1159
1160 if (UFs.empty()) {
1161 RSO << "UF>=1";
1162 } else {
1163 RSO << "UF={" << UFs[0];
1164 for (unsigned UF : drop_begin(UFs))
1165 RSO << "," << UF;
1166 RSO << "}";
1167 }
1168
1169 return Out;
1170}
1171
1172LLVM_DUMP_METHOD
1173void VPlan::printDOT(raw_ostream &O) const {
1174 VPlanPrinter Printer(O, *this);
1175 Printer.dump();
1176}
1177
1178LLVM_DUMP_METHOD
1179void VPlan::dump() const { print(dbgs()); }
1180#endif
1181
1182void VPlan::addLiveOut(PHINode *PN, VPValue *V) {
1183 assert(LiveOuts.count(PN) == 0 && "an exit value for PN already exists");
1184 LiveOuts.insert(KV: {PN, new VPLiveOut(PN, V)});
1185}
1186
1187static void remapOperands(VPBlockBase *Entry, VPBlockBase *NewEntry,
1188 DenseMap<VPValue *, VPValue *> &Old2NewVPValues) {
1189 // Update the operands of all cloned recipes starting at NewEntry. This
1190 // traverses all reachable blocks. This is done in two steps, to handle cycles
1191 // in PHI recipes.
1192 ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>>
1193 OldDeepRPOT(Entry);
1194 ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>>
1195 NewDeepRPOT(NewEntry);
1196 // First, collect all mappings from old to new VPValues defined by cloned
1197 // recipes.
1198 for (const auto &[OldBB, NewBB] :
1199 zip(t: VPBlockUtils::blocksOnly<VPBasicBlock>(Range: OldDeepRPOT),
1200 u: VPBlockUtils::blocksOnly<VPBasicBlock>(Range: NewDeepRPOT))) {
1201 assert(OldBB->getRecipeList().size() == NewBB->getRecipeList().size() &&
1202 "blocks must have the same number of recipes");
1203 for (const auto &[OldR, NewR] : zip(t&: *OldBB, u&: *NewBB)) {
1204 assert(OldR.getNumOperands() == NewR.getNumOperands() &&
1205 "recipes must have the same number of operands");
1206 assert(OldR.getNumDefinedValues() == NewR.getNumDefinedValues() &&
1207 "recipes must define the same number of operands");
1208 for (const auto &[OldV, NewV] :
1209 zip(t: OldR.definedValues(), u: NewR.definedValues()))
1210 Old2NewVPValues[OldV] = NewV;
1211 }
1212 }
1213
1214 // Update all operands to use cloned VPValues.
1215 for (VPBasicBlock *NewBB :
1216 VPBlockUtils::blocksOnly<VPBasicBlock>(Range: NewDeepRPOT)) {
1217 for (VPRecipeBase &NewR : *NewBB)
1218 for (unsigned I = 0, E = NewR.getNumOperands(); I != E; ++I) {
1219 VPValue *NewOp = Old2NewVPValues.lookup(Val: NewR.getOperand(N: I));
1220 NewR.setOperand(I, New: NewOp);
1221 }
1222 }
1223}
1224
1225VPlan *VPlan::duplicate() {
1226 // Clone blocks.
1227 VPBasicBlock *NewPreheader = Preheader->clone();
1228 const auto &[NewEntry, __] = cloneFrom(Entry);
1229
1230 // Create VPlan, clone live-ins and remap operands in the cloned blocks.
1231 auto *NewPlan = new VPlan(NewPreheader, cast<VPBasicBlock>(Val: NewEntry));
1232 DenseMap<VPValue *, VPValue *> Old2NewVPValues;
1233 for (VPValue *OldLiveIn : VPLiveInsToFree) {
1234 Old2NewVPValues[OldLiveIn] =
1235 NewPlan->getOrAddLiveIn(V: OldLiveIn->getLiveInIRValue());
1236 }
1237 Old2NewVPValues[&VectorTripCount] = &NewPlan->VectorTripCount;
1238 Old2NewVPValues[&VFxUF] = &NewPlan->VFxUF;
1239 if (BackedgeTakenCount) {
1240 NewPlan->BackedgeTakenCount = new VPValue();
1241 Old2NewVPValues[BackedgeTakenCount] = NewPlan->BackedgeTakenCount;
1242 }
1243 assert(TripCount && "trip count must be set");
1244 if (TripCount->isLiveIn())
1245 Old2NewVPValues[TripCount] =
1246 NewPlan->getOrAddLiveIn(V: TripCount->getLiveInIRValue());
1247 // else NewTripCount will be created and inserted into Old2NewVPValues when
1248 // TripCount is cloned. In any case NewPlan->TripCount is updated below.
1249
1250 remapOperands(Entry: Preheader, NewEntry: NewPreheader, Old2NewVPValues);
1251 remapOperands(Entry, NewEntry, Old2NewVPValues);
1252
1253 // Clone live-outs.
1254 for (const auto &[_, LO] : LiveOuts)
1255 NewPlan->addLiveOut(PN: LO->getPhi(), V: Old2NewVPValues[LO->getOperand(N: 0)]);
1256
1257 // Initialize remaining fields of cloned VPlan.
1258 NewPlan->VFs = VFs;
1259 NewPlan->UFs = UFs;
1260 // TODO: Adjust names.
1261 NewPlan->Name = Name;
1262 assert(Old2NewVPValues.contains(TripCount) &&
1263 "TripCount must have been added to Old2NewVPValues");
1264 NewPlan->TripCount = Old2NewVPValues[TripCount];
1265 return NewPlan;
1266}
1267
1268#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1269
1270Twine VPlanPrinter::getUID(const VPBlockBase *Block) {
1271 return (isa<VPRegionBlock>(Block) ? "cluster_N" : "N") +
1272 Twine(getOrCreateBID(Block));
1273}
1274
1275Twine VPlanPrinter::getOrCreateName(const VPBlockBase *Block) {
1276 const std::string &Name = Block->getName();
1277 if (!Name.empty())
1278 return Name;
1279 return "VPB" + Twine(getOrCreateBID(Block));
1280}
1281
1282void VPlanPrinter::dump() {
1283 Depth = 1;
1284 bumpIndent(0);
1285 OS << "digraph VPlan {\n";
1286 OS << "graph [labelloc=t, fontsize=30; label=\"Vectorization Plan";
1287 if (!Plan.getName().empty())
1288 OS << "\\n" << DOT::EscapeString(Plan.getName());
1289
1290 {
1291 // Print live-ins.
1292 std::string Str;
1293 raw_string_ostream SS(Str);
1294 Plan.printLiveIns(SS);
1295 SmallVector<StringRef, 0> Lines;
1296 StringRef(Str).rtrim('\n').split(Lines, "\n");
1297 for (auto Line : Lines)
1298 OS << DOT::EscapeString(Line.str()) << "\\n";
1299 }
1300
1301 OS << "\"]\n";
1302 OS << "node [shape=rect, fontname=Courier, fontsize=30]\n";
1303 OS << "edge [fontname=Courier, fontsize=30]\n";
1304 OS << "compound=true\n";
1305
1306 dumpBlock(Plan.getPreheader());
1307
1308 for (const VPBlockBase *Block : vp_depth_first_shallow(Plan.getEntry()))
1309 dumpBlock(Block);
1310
1311 OS << "}\n";
1312}
1313
1314void VPlanPrinter::dumpBlock(const VPBlockBase *Block) {
1315 if (const VPBasicBlock *BasicBlock = dyn_cast<VPBasicBlock>(Block))
1316 dumpBasicBlock(BasicBlock);
1317 else if (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block))
1318 dumpRegion(Region);
1319 else
1320 llvm_unreachable("Unsupported kind of VPBlock.");
1321}
1322
1323void VPlanPrinter::drawEdge(const VPBlockBase *From, const VPBlockBase *To,
1324 bool Hidden, const Twine &Label) {
1325 // Due to "dot" we print an edge between two regions as an edge between the
1326 // exiting basic block and the entry basic of the respective regions.
1327 const VPBlockBase *Tail = From->getExitingBasicBlock();
1328 const VPBlockBase *Head = To->getEntryBasicBlock();
1329 OS << Indent << getUID(Tail) << " -> " << getUID(Head);
1330 OS << " [ label=\"" << Label << '\"';
1331 if (Tail != From)
1332 OS << " ltail=" << getUID(From);
1333 if (Head != To)
1334 OS << " lhead=" << getUID(To);
1335 if (Hidden)
1336 OS << "; splines=none";
1337 OS << "]\n";
1338}
1339
1340void VPlanPrinter::dumpEdges(const VPBlockBase *Block) {
1341 auto &Successors = Block->getSuccessors();
1342 if (Successors.size() == 1)
1343 drawEdge(Block, Successors.front(), false, "");
1344 else if (Successors.size() == 2) {
1345 drawEdge(Block, Successors.front(), false, "T");
1346 drawEdge(Block, Successors.back(), false, "F");
1347 } else {
1348 unsigned SuccessorNumber = 0;
1349 for (auto *Successor : Successors)
1350 drawEdge(Block, Successor, false, Twine(SuccessorNumber++));
1351 }
1352}
1353
1354void VPlanPrinter::dumpBasicBlock(const VPBasicBlock *BasicBlock) {
1355 // Implement dot-formatted dump by performing plain-text dump into the
1356 // temporary storage followed by some post-processing.
1357 OS << Indent << getUID(BasicBlock) << " [label =\n";
1358 bumpIndent(1);
1359 std::string Str;
1360 raw_string_ostream SS(Str);
1361 // Use no indentation as we need to wrap the lines into quotes ourselves.
1362 BasicBlock->print(SS, "", SlotTracker);
1363
1364 // We need to process each line of the output separately, so split
1365 // single-string plain-text dump.
1366 SmallVector<StringRef, 0> Lines;
1367 StringRef(Str).rtrim('\n').split(Lines, "\n");
1368
1369 auto EmitLine = [&](StringRef Line, StringRef Suffix) {
1370 OS << Indent << '"' << DOT::EscapeString(Line.str()) << "\\l\"" << Suffix;
1371 };
1372
1373 // Don't need the "+" after the last line.
1374 for (auto Line : make_range(Lines.begin(), Lines.end() - 1))
1375 EmitLine(Line, " +\n");
1376 EmitLine(Lines.back(), "\n");
1377
1378 bumpIndent(-1);
1379 OS << Indent << "]\n";
1380
1381 dumpEdges(BasicBlock);
1382}
1383
1384void VPlanPrinter::dumpRegion(const VPRegionBlock *Region) {
1385 OS << Indent << "subgraph " << getUID(Region) << " {\n";
1386 bumpIndent(1);
1387 OS << Indent << "fontname=Courier\n"
1388 << Indent << "label=\""
1389 << DOT::EscapeString(Region->isReplicator() ? "<xVFxUF> " : "<x1> ")
1390 << DOT::EscapeString(Region->getName()) << "\"\n";
1391 // Dump the blocks of the region.
1392 assert(Region->getEntry() && "Region contains no inner blocks.");
1393 for (const VPBlockBase *Block : vp_depth_first_shallow(Region->getEntry()))
1394 dumpBlock(Block);
1395 bumpIndent(-1);
1396 OS << Indent << "}\n";
1397 dumpEdges(Region);
1398}
1399
1400void VPlanIngredient::print(raw_ostream &O) const {
1401 if (auto *Inst = dyn_cast<Instruction>(V)) {
1402 if (!Inst->getType()->isVoidTy()) {
1403 Inst->printAsOperand(O, false);
1404 O << " = ";
1405 }
1406 O << Inst->getOpcodeName() << " ";
1407 unsigned E = Inst->getNumOperands();
1408 if (E > 0) {
1409 Inst->getOperand(0)->printAsOperand(O, false);
1410 for (unsigned I = 1; I < E; ++I)
1411 Inst->getOperand(I)->printAsOperand(O << ", ", false);
1412 }
1413 } else // !Inst
1414 V->printAsOperand(O, false);
1415}
1416
1417#endif
1418
1419template void DomTreeBuilder::Calculate<VPDominatorTree>(VPDominatorTree &DT);
1420
1421void VPValue::replaceAllUsesWith(VPValue *New) {
1422 replaceUsesWithIf(New, ShouldReplace: [](VPUser &, unsigned) { return true; });
1423}
1424
1425void VPValue::replaceUsesWithIf(
1426 VPValue *New,
1427 llvm::function_ref<bool(VPUser &U, unsigned Idx)> ShouldReplace) {
1428 // Note that this early exit is required for correctness; the implementation
1429 // below relies on the number of users for this VPValue to decrease, which
1430 // isn't the case if this == New.
1431 if (this == New)
1432 return;
1433
1434 for (unsigned J = 0; J < getNumUsers();) {
1435 VPUser *User = Users[J];
1436 bool RemovedUser = false;
1437 for (unsigned I = 0, E = User->getNumOperands(); I < E; ++I) {
1438 if (User->getOperand(N: I) != this || !ShouldReplace(*User, I))
1439 continue;
1440
1441 RemovedUser = true;
1442 User->setOperand(I, New);
1443 }
1444 // If a user got removed after updating the current user, the next user to
1445 // update will be moved to the current position, so we only need to
1446 // increment the index if the number of users did not change.
1447 if (!RemovedUser)
1448 J++;
1449 }
1450}
1451
1452#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1453void VPValue::printAsOperand(raw_ostream &OS, VPSlotTracker &Tracker) const {
1454 OS << Tracker.getOrCreateName(this);
1455}
1456
1457void VPUser::printOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const {
1458 interleaveComma(operands(), O, [&O, &SlotTracker](VPValue *Op) {
1459 Op->printAsOperand(O, SlotTracker);
1460 });
1461}
1462#endif
1463
1464void VPInterleavedAccessInfo::visitRegion(VPRegionBlock *Region,
1465 Old2NewTy &Old2New,
1466 InterleavedAccessInfo &IAI) {
1467 ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>>
1468 RPOT(Region->getEntry());
1469 for (VPBlockBase *Base : RPOT) {
1470 visitBlock(Block: Base, Old2New, IAI);
1471 }
1472}
1473
1474void VPInterleavedAccessInfo::visitBlock(VPBlockBase *Block, Old2NewTy &Old2New,
1475 InterleavedAccessInfo &IAI) {
1476 if (VPBasicBlock *VPBB = dyn_cast<VPBasicBlock>(Val: Block)) {
1477 for (VPRecipeBase &VPI : *VPBB) {
1478 if (isa<VPWidenPHIRecipe>(Val: &VPI))
1479 continue;
1480 assert(isa<VPInstruction>(&VPI) && "Can only handle VPInstructions");
1481 auto *VPInst = cast<VPInstruction>(Val: &VPI);
1482
1483 auto *Inst = dyn_cast_or_null<Instruction>(Val: VPInst->getUnderlyingValue());
1484 if (!Inst)
1485 continue;
1486 auto *IG = IAI.getInterleaveGroup(Instr: Inst);
1487 if (!IG)
1488 continue;
1489
1490 auto NewIGIter = Old2New.find(Val: IG);
1491 if (NewIGIter == Old2New.end())
1492 Old2New[IG] = new InterleaveGroup<VPInstruction>(
1493 IG->getFactor(), IG->isReverse(), IG->getAlign());
1494
1495 if (Inst == IG->getInsertPos())
1496 Old2New[IG]->setInsertPos(VPInst);
1497
1498 InterleaveGroupMap[VPInst] = Old2New[IG];
1499 InterleaveGroupMap[VPInst]->insertMember(
1500 Instr: VPInst, Index: IG->getIndex(Instr: Inst),
1501 NewAlign: Align(IG->isReverse() ? (-1) * int(IG->getFactor())
1502 : IG->getFactor()));
1503 }
1504 } else if (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Val: Block))
1505 visitRegion(Region, Old2New, IAI);
1506 else
1507 llvm_unreachable("Unsupported kind of VPBlock.");
1508}
1509
1510VPInterleavedAccessInfo::VPInterleavedAccessInfo(VPlan &Plan,
1511 InterleavedAccessInfo &IAI) {
1512 Old2NewTy Old2New;
1513 visitRegion(Region: Plan.getVectorLoopRegion(), Old2New, IAI);
1514}
1515
1516void VPSlotTracker::assignName(const VPValue *V) {
1517 assert(!VPValue2Name.contains(V) && "VPValue already has a name!");
1518 auto *UV = V->getUnderlyingValue();
1519 if (!UV) {
1520 VPValue2Name[V] = (Twine("vp<%") + Twine(NextSlot) + ">").str();
1521 NextSlot++;
1522 return;
1523 }
1524
1525 // Use the name of the underlying Value, wrapped in "ir<>", and versioned by
1526 // appending ".Number" to the name if there are multiple uses.
1527 std::string Name;
1528 raw_string_ostream S(Name);
1529 UV->printAsOperand(O&: S, PrintType: false);
1530 assert(!Name.empty() && "Name cannot be empty.");
1531 std::string BaseName = (Twine("ir<") + Name + Twine(">")).str();
1532
1533 // First assign the base name for V.
1534 const auto &[A, _] = VPValue2Name.insert(KV: {V, BaseName});
1535 // Integer or FP constants with different types will result in he same string
1536 // due to stripping types.
1537 if (V->isLiveIn() && isa<ConstantInt, ConstantFP>(Val: UV))
1538 return;
1539
1540 // If it is already used by C > 0 other VPValues, increase the version counter
1541 // C and use it for V.
1542 const auto &[C, UseInserted] = BaseName2Version.insert(KV: {BaseName, 0});
1543 if (!UseInserted) {
1544 C->second++;
1545 A->second = (BaseName + Twine(".") + Twine(C->second)).str();
1546 }
1547}
1548
1549void VPSlotTracker::assignNames(const VPlan &Plan) {
1550 if (Plan.VFxUF.getNumUsers() > 0)
1551 assignName(V: &Plan.VFxUF);
1552 assignName(V: &Plan.VectorTripCount);
1553 if (Plan.BackedgeTakenCount)
1554 assignName(V: Plan.BackedgeTakenCount);
1555 for (VPValue *LI : Plan.VPLiveInsToFree)
1556 assignName(V: LI);
1557 assignNames(VPBB: Plan.getPreheader());
1558
1559 ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<const VPBlockBase *>>
1560 RPOT(VPBlockDeepTraversalWrapper<const VPBlockBase *>(Plan.getEntry()));
1561 for (const VPBasicBlock *VPBB :
1562 VPBlockUtils::blocksOnly<const VPBasicBlock>(Range: RPOT))
1563 assignNames(VPBB);
1564}
1565
1566void VPSlotTracker::assignNames(const VPBasicBlock *VPBB) {
1567 for (const VPRecipeBase &Recipe : *VPBB)
1568 for (VPValue *Def : Recipe.definedValues())
1569 assignName(V: Def);
1570}
1571
1572std::string VPSlotTracker::getOrCreateName(const VPValue *V) const {
1573 std::string Name = VPValue2Name.lookup(Val: V);
1574 if (!Name.empty())
1575 return Name;
1576
1577 // If no name was assigned, no VPlan was provided when creating the slot
1578 // tracker or it is not reachable from the provided VPlan. This can happen,
1579 // e.g. when trying to print a recipe that has not been inserted into a VPlan
1580 // in a debugger.
1581 // TODO: Update VPSlotTracker constructor to assign names to recipes &
1582 // VPValues not associated with a VPlan, instead of constructing names ad-hoc
1583 // here.
1584 const VPRecipeBase *DefR = V->getDefiningRecipe();
1585 (void)DefR;
1586 assert((!DefR || !DefR->getParent() || !DefR->getParent()->getPlan()) &&
1587 "VPValue defined by a recipe in a VPlan?");
1588
1589 // Use the underlying value's name, if there is one.
1590 if (auto *UV = V->getUnderlyingValue()) {
1591 std::string Name;
1592 raw_string_ostream S(Name);
1593 UV->printAsOperand(O&: S, PrintType: false);
1594 return (Twine("ir<") + Name + ">").str();
1595 }
1596
1597 return "<badref>";
1598}
1599
1600bool vputils::onlyFirstLaneUsed(const VPValue *Def) {
1601 return all_of(Range: Def->users(),
1602 P: [Def](const VPUser *U) { return U->onlyFirstLaneUsed(Op: Def); });
1603}
1604
1605bool vputils::onlyFirstPartUsed(const VPValue *Def) {
1606 return all_of(Range: Def->users(),
1607 P: [Def](const VPUser *U) { return U->onlyFirstPartUsed(Op: Def); });
1608}
1609
1610VPValue *vputils::getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr,
1611 ScalarEvolution &SE) {
1612 if (auto *Expanded = Plan.getSCEVExpansion(S: Expr))
1613 return Expanded;
1614 VPValue *Expanded = nullptr;
1615 if (auto *E = dyn_cast<SCEVConstant>(Val: Expr))
1616 Expanded = Plan.getOrAddLiveIn(V: E->getValue());
1617 else if (auto *E = dyn_cast<SCEVUnknown>(Val: Expr))
1618 Expanded = Plan.getOrAddLiveIn(V: E->getValue());
1619 else {
1620 Expanded = new VPExpandSCEVRecipe(Expr, SE);
1621 Plan.getPreheader()->appendRecipe(Recipe: Expanded->getDefiningRecipe());
1622 }
1623 Plan.addSCEVExpansion(S: Expr, V: Expanded);
1624 return Expanded;
1625}
1626
1627bool vputils::isHeaderMask(VPValue *V, VPlan &Plan) {
1628 if (isa<VPActiveLaneMaskPHIRecipe>(Val: V))
1629 return true;
1630
1631 auto IsWideCanonicalIV = [](VPValue *A) {
1632 return isa<VPWidenCanonicalIVRecipe>(Val: A) ||
1633 (isa<VPWidenIntOrFpInductionRecipe>(Val: A) &&
1634 cast<VPWidenIntOrFpInductionRecipe>(Val: A)->isCanonical());
1635 };
1636
1637 VPValue *A, *B;
1638 if (match(V, P: m_ActiveLaneMask(Op0: m_VPValue(V&: A), Op1: m_VPValue(V&: B))))
1639 return B == Plan.getTripCount() &&
1640 (match(V: A, P: m_ScalarIVSteps(Op0: m_CanonicalIV(), Op1: m_SpecificInt(V: 1))) ||
1641 IsWideCanonicalIV(A));
1642
1643 return match(V, P: m_Binary<Instruction::ICmp>(Op0: m_VPValue(V&: A), Op1: m_VPValue(V&: B))) &&
1644 IsWideCanonicalIV(A) && B == Plan.getOrCreateBackedgeTakenCount();
1645}
1646