1//===- VPlanAnalysis.cpp - Various Analyses working on VPlan ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "VPlanAnalysis.h"
10#include "VPlan.h"
11#include "VPlanCFG.h"
12#include "VPlanDominatorTree.h"
13#include "VPlanHelpers.h"
14#include "VPlanPatternMatch.h"
15#include "llvm/ADT/PostOrderIterator.h"
16#include "llvm/ADT/TypeSwitch.h"
17#include "llvm/Analysis/ScalarEvolution.h"
18#include "llvm/Analysis/TargetTransformInfo.h"
19#include "llvm/IR/Instruction.h"
20#include "llvm/IR/PatternMatch.h"
21
22using namespace llvm;
23using namespace VPlanPatternMatch;
24
25#define DEBUG_TYPE "vplan"
26
27VPTypeAnalysis::VPTypeAnalysis(const VPlan &Plan) : Ctx(Plan.getContext()) {
28 if (auto LoopRegion = Plan.getVectorLoopRegion()) {
29 if (const auto *CanIV = dyn_cast<VPCanonicalIVPHIRecipe>(
30 Val: &LoopRegion->getEntryBasicBlock()->front())) {
31 CanonicalIVTy = CanIV->getScalarType();
32 return;
33 }
34 }
35
36 // If there's no canonical IV, retrieve the type from the trip count
37 // expression.
38 auto *TC = Plan.getTripCount();
39 if (auto *TCIRV = dyn_cast<VPIRValue>(Val: TC)) {
40 CanonicalIVTy = TCIRV->getType();
41 return;
42 }
43 CanonicalIVTy = cast<VPExpandSCEVRecipe>(Val: TC)->getSCEV()->getType();
44}
45
46Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPBlendRecipe *R) {
47 Type *ResTy = inferScalarType(V: R->getIncomingValue(Idx: 0));
48 for (unsigned I = 1, E = R->getNumIncomingValues(); I != E; ++I) {
49 VPValue *Inc = R->getIncomingValue(Idx: I);
50 assert(inferScalarType(Inc) == ResTy &&
51 "different types inferred for different incoming values");
52 CachedTypes[Inc] = ResTy;
53 }
54 return ResTy;
55}
56
57Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPInstruction *R) {
58 // Set the result type from the first operand, check if the types for all
59 // other operands match and cache them.
60 auto SetResultTyFromOp = [this, R]() {
61 Type *ResTy = inferScalarType(V: R->getOperand(N: 0));
62 unsigned NumOperands = R->getNumOperandsWithoutMask();
63 for (unsigned Op = 1; Op != NumOperands; ++Op) {
64 VPValue *OtherV = R->getOperand(N: Op);
65 assert(inferScalarType(OtherV) == ResTy &&
66 "different types inferred for different operands");
67 CachedTypes[OtherV] = ResTy;
68 }
69 return ResTy;
70 };
71
72 unsigned Opcode = R->getOpcode();
73 if (Instruction::isBinaryOp(Opcode) || Instruction::isUnaryOp(Opcode))
74 return SetResultTyFromOp();
75
76 switch (Opcode) {
77 case Instruction::ExtractElement:
78 case Instruction::Freeze:
79 case Instruction::PHI:
80 case VPInstruction::Broadcast:
81 case VPInstruction::ComputeReductionResult:
82 case VPInstruction::ExitingIVValue:
83 case VPInstruction::ExtractLastLane:
84 case VPInstruction::ExtractPenultimateElement:
85 case VPInstruction::ExtractLastPart:
86 case VPInstruction::ExtractLastActive:
87 case VPInstruction::PtrAdd:
88 case VPInstruction::WidePtrAdd:
89 case VPInstruction::ReductionStartVector:
90 case VPInstruction::ResumeForEpilogue:
91 case VPInstruction::Reverse:
92 return inferScalarType(V: R->getOperand(N: 0));
93 case Instruction::Select: {
94 Type *ResTy = inferScalarType(V: R->getOperand(N: 1));
95 VPValue *OtherV = R->getOperand(N: 2);
96 assert(inferScalarType(OtherV) == ResTy &&
97 "different types inferred for different operands");
98 CachedTypes[OtherV] = ResTy;
99 return ResTy;
100 }
101 case Instruction::ICmp:
102 case Instruction::FCmp:
103 case VPInstruction::ActiveLaneMask:
104 assert(inferScalarType(R->getOperand(0)) ==
105 inferScalarType(R->getOperand(1)) &&
106 "different types inferred for different operands");
107 return IntegerType::get(C&: Ctx, NumBits: 1);
108 case VPInstruction::ComputeAnyOfResult:
109 return inferScalarType(V: R->getOperand(N: 1));
110 case VPInstruction::ExplicitVectorLength:
111 return Type::getIntNTy(C&: Ctx, N: 32);
112 case VPInstruction::FirstOrderRecurrenceSplice:
113 case VPInstruction::Not:
114 case VPInstruction::CalculateTripCountMinusVF:
115 case VPInstruction::CanonicalIVIncrementForPart:
116 case VPInstruction::AnyOf:
117 case VPInstruction::BuildStructVector:
118 case VPInstruction::BuildVector:
119 case VPInstruction::Unpack:
120 return SetResultTyFromOp();
121 case VPInstruction::ExtractLane:
122 return inferScalarType(V: R->getOperand(N: 1));
123 case VPInstruction::FirstActiveLane:
124 case VPInstruction::LastActiveLane:
125 return Type::getIntNTy(C&: Ctx, N: 64);
126 case VPInstruction::LogicalAnd:
127 case VPInstruction::LogicalOr:
128 assert(inferScalarType(R->getOperand(0))->isIntegerTy(1) &&
129 inferScalarType(R->getOperand(1))->isIntegerTy(1) &&
130 "LogicalAnd/Or operands should be bool");
131 return IntegerType::get(C&: Ctx, NumBits: 1);
132 case VPInstruction::BranchOnCond:
133 case VPInstruction::BranchOnTwoConds:
134 case VPInstruction::BranchOnCount:
135 case Instruction::Store:
136 return Type::getVoidTy(C&: Ctx);
137 case Instruction::Load:
138 return cast<LoadInst>(Val: R->getUnderlyingValue())->getType();
139 case Instruction::Alloca:
140 return cast<AllocaInst>(Val: R->getUnderlyingValue())->getType();
141 case Instruction::Call: {
142 unsigned CallIdx = R->getNumOperandsWithoutMask() - 1;
143 return cast<Function>(Val: R->getOperand(N: CallIdx)->getLiveInIRValue())
144 ->getReturnType();
145 }
146 case Instruction::GetElementPtr:
147 return inferScalarType(V: R->getOperand(N: 0));
148 case Instruction::ExtractValue:
149 return cast<ExtractValueInst>(Val: R->getUnderlyingValue())->getType();
150 default:
151 break;
152 }
153 // Type inference not implemented for opcode.
154 LLVM_DEBUG({
155 dbgs() << "LV: Found unhandled opcode for: ";
156 R->getVPSingleValue()->dump();
157 });
158 llvm_unreachable("Unhandled opcode!");
159}
160
161Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenRecipe *R) {
162 unsigned Opcode = R->getOpcode();
163 if (Instruction::isBinaryOp(Opcode) || Instruction::isShift(Opcode) ||
164 Instruction::isBitwiseLogicOp(Opcode)) {
165 Type *ResTy = inferScalarType(V: R->getOperand(N: 0));
166 assert(ResTy == inferScalarType(R->getOperand(1)) &&
167 "types for both operands must match for binary op");
168 CachedTypes[R->getOperand(N: 1)] = ResTy;
169 return ResTy;
170 }
171
172 switch (Opcode) {
173 case Instruction::ICmp:
174 case Instruction::FCmp:
175 return IntegerType::get(C&: Ctx, NumBits: 1);
176 case Instruction::FNeg:
177 case Instruction::Freeze:
178 return inferScalarType(V: R->getOperand(N: 0));
179 case Instruction::ExtractValue: {
180 assert(R->getNumOperands() == 2 && "expected single level extractvalue");
181 auto *StructTy = cast<StructType>(Val: inferScalarType(V: R->getOperand(N: 0)));
182 return StructTy->getTypeAtIndex(
183 N: cast<VPConstantInt>(Val: R->getOperand(N: 1))->getZExtValue());
184 }
185 case Instruction::Select: {
186 Type *ResTy = inferScalarType(V: R->getOperand(N: 1));
187 VPValue *OtherV = R->getOperand(N: 2);
188 assert(inferScalarType(OtherV) == ResTy &&
189 "different types inferred for different operands");
190 CachedTypes[OtherV] = ResTy;
191 return ResTy;
192 }
193 default:
194 break;
195 }
196
197 // Type inference not implemented for opcode.
198 LLVM_DEBUG({
199 dbgs() << "LV: Found unhandled opcode for: ";
200 R->getVPSingleValue()->dump();
201 });
202 llvm_unreachable("Unhandled opcode!");
203}
204
205Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenCallRecipe *R) {
206 auto &CI = *cast<CallInst>(Val: R->getUnderlyingInstr());
207 return CI.getType();
208}
209
210Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenMemoryRecipe *R) {
211 assert((isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe>(R)) &&
212 "Store recipes should not define any values");
213 return cast<LoadInst>(Val: &R->getIngredient())->getType();
214}
215
216Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPReplicateRecipe *R) {
217 unsigned Opcode = R->getUnderlyingInstr()->getOpcode();
218
219 if (Instruction::isBinaryOp(Opcode) || Instruction::isShift(Opcode) ||
220 Instruction::isBitwiseLogicOp(Opcode)) {
221 Type *ResTy = inferScalarType(V: R->getOperand(N: 0));
222 assert(ResTy == inferScalarType(R->getOperand(1)) &&
223 "inferred types for operands of binary op don't match");
224 CachedTypes[R->getOperand(N: 1)] = ResTy;
225 return ResTy;
226 }
227
228 if (Instruction::isCast(Opcode))
229 return R->getUnderlyingInstr()->getType();
230
231 switch (Opcode) {
232 case Instruction::Call: {
233 unsigned CallIdx = R->getNumOperands() - (R->isPredicated() ? 2 : 1);
234 return cast<Function>(Val: R->getOperand(N: CallIdx)->getLiveInIRValue())
235 ->getReturnType();
236 }
237 case Instruction::Select: {
238 Type *ResTy = inferScalarType(V: R->getOperand(N: 1));
239 assert(ResTy == inferScalarType(R->getOperand(2)) &&
240 "inferred types for operands of select op don't match");
241 CachedTypes[R->getOperand(N: 2)] = ResTy;
242 return ResTy;
243 }
244 case Instruction::ICmp:
245 case Instruction::FCmp:
246 return IntegerType::get(C&: Ctx, NumBits: 1);
247 case Instruction::Alloca:
248 case Instruction::ExtractValue:
249 return R->getUnderlyingInstr()->getType();
250 case Instruction::Freeze:
251 case Instruction::FNeg:
252 case Instruction::GetElementPtr:
253 return inferScalarType(V: R->getOperand(N: 0));
254 case Instruction::Load:
255 return cast<LoadInst>(Val: R->getUnderlyingInstr())->getType();
256 case Instruction::Store:
257 // FIXME: VPReplicateRecipes with store opcodes still define a result
258 // VPValue, so we need to handle them here. Remove the code here once this
259 // is modeled accurately in VPlan.
260 return Type::getVoidTy(C&: Ctx);
261 default:
262 break;
263 }
264 // Type inference not implemented for opcode.
265 LLVM_DEBUG({
266 dbgs() << "LV: Found unhandled opcode for: ";
267 R->getVPSingleValue()->dump();
268 });
269 llvm_unreachable("Unhandled opcode");
270}
271
272Type *VPTypeAnalysis::inferScalarType(const VPValue *V) {
273 if (Type *CachedTy = CachedTypes.lookup(Val: V))
274 return CachedTy;
275
276 if (auto *IRV = dyn_cast<VPIRValue>(Val: V))
277 return IRV->getType();
278
279 if (isa<VPSymbolicValue>(Val: V)) {
280 // All VPValues without any underlying IR value (like the vector trip count
281 // or the backedge-taken count) have the same type as the canonical IV.
282 return CanonicalIVTy;
283 }
284
285 Type *ResultTy =
286 TypeSwitch<const VPRecipeBase *, Type *>(V->getDefiningRecipe())
287 .Case<VPActiveLaneMaskPHIRecipe, VPCanonicalIVPHIRecipe,
288 VPFirstOrderRecurrencePHIRecipe, VPReductionPHIRecipe,
289 VPWidenPointerInductionRecipe, VPCurrentIterationPHIRecipe>(
290 caseFn: [this](const auto *R) {
291 // Handle header phi recipes, except VPWidenIntOrFpInduction
292 // which needs special handling due it being possibly truncated.
293 // TODO: consider inferring/caching type of siblings, e.g.,
294 // backedge value, here and in cases below.
295 return inferScalarType(V: R->getStartValue());
296 })
297 .Case<VPWidenIntOrFpInductionRecipe, VPDerivedIVRecipe>(
298 caseFn: [](const auto *R) { return R->getScalarType(); })
299 .Case<VPReductionRecipe, VPPredInstPHIRecipe, VPWidenPHIRecipe,
300 VPScalarIVStepsRecipe, VPWidenGEPRecipe, VPVectorPointerRecipe,
301 VPVectorEndPointerRecipe, VPWidenCanonicalIVRecipe>(
302 caseFn: [this](const VPRecipeBase *R) {
303 return inferScalarType(V: R->getOperand(N: 0));
304 })
305 // VPInstructionWithType must be handled before VPInstruction.
306 .Case<VPInstructionWithType, VPWidenIntrinsicRecipe,
307 VPWidenCastRecipe>(
308 caseFn: [](const auto *R) { return R->getResultType(); })
309 .Case<VPBlendRecipe, VPInstruction, VPWidenRecipe, VPReplicateRecipe,
310 VPWidenCallRecipe, VPWidenMemoryRecipe>(
311 caseFn: [this](const auto *R) { return inferScalarTypeForRecipe(R); })
312 .Case(caseFn: [V](const VPInterleaveBase *R) {
313 // TODO: Use info from interleave group.
314 return V->getUnderlyingValue()->getType();
315 })
316 .Case(caseFn: [](const VPExpandSCEVRecipe *R) {
317 return R->getSCEV()->getType();
318 })
319 .Case(caseFn: [this](const VPReductionRecipe *R) {
320 return inferScalarType(V: R->getChainOp());
321 })
322 .Case(caseFn: [this](const VPExpressionRecipe *R) {
323 return inferScalarType(V: R->getOperandOfResultType());
324 });
325
326 assert(ResultTy && "could not infer type for the given VPValue");
327 CachedTypes[V] = ResultTy;
328 return ResultTy;
329}
330
331void llvm::collectEphemeralRecipesForVPlan(
332 VPlan &Plan, DenseSet<VPRecipeBase *> &EphRecipes) {
333 // First, collect seed recipes which are operands of assumes.
334 SmallVector<VPRecipeBase *> Worklist;
335 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
336 Range: vp_depth_first_deep(G: Plan.getVectorLoopRegion()->getEntry()))) {
337 for (VPRecipeBase &R : *VPBB) {
338 auto *RepR = dyn_cast<VPReplicateRecipe>(Val: &R);
339 if (!RepR || !match(V: RepR, P: m_Intrinsic<Intrinsic::assume>()))
340 continue;
341 Worklist.push_back(Elt: RepR);
342 EphRecipes.insert(V: RepR);
343 }
344 }
345
346 // Process operands of candidates in worklist and add them to the set of
347 // ephemeral recipes, if they don't have side-effects and are only used by
348 // other ephemeral recipes.
349 while (!Worklist.empty()) {
350 VPRecipeBase *Cur = Worklist.pop_back_val();
351 for (VPValue *Op : Cur->operands()) {
352 auto *OpR = Op->getDefiningRecipe();
353 if (!OpR || OpR->mayHaveSideEffects() || EphRecipes.contains(V: OpR))
354 continue;
355 if (any_of(Range: Op->users(), P: [EphRecipes](VPUser *U) {
356 auto *UR = dyn_cast<VPRecipeBase>(Val: U);
357 return !UR || !EphRecipes.contains(V: UR);
358 }))
359 continue;
360 EphRecipes.insert(V: OpR);
361 Worklist.push_back(Elt: OpR);
362 }
363 }
364}
365
366template void DomTreeBuilder::Calculate<DominatorTreeBase<VPBlockBase, false>>(
367 DominatorTreeBase<VPBlockBase, false> &DT);
368
369bool VPDominatorTree::properlyDominates(const VPRecipeBase *A,
370 const VPRecipeBase *B) {
371 if (A == B)
372 return false;
373
374 auto LocalComesBefore = [](const VPRecipeBase *A, const VPRecipeBase *B) {
375 for (auto &R : *A->getParent()) {
376 if (&R == A)
377 return true;
378 if (&R == B)
379 return false;
380 }
381 llvm_unreachable("recipe not found");
382 };
383 const VPBlockBase *ParentA = A->getParent();
384 const VPBlockBase *ParentB = B->getParent();
385 if (ParentA == ParentB)
386 return LocalComesBefore(A, B);
387
388#ifndef NDEBUG
389 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
390 VPRegionBlock *Region = R->getRegion();
391 if (Region && Region->isReplicator()) {
392 assert(Region->getNumSuccessors() == 1 &&
393 Region->getNumPredecessors() == 1 && "Expected SESE region!");
394 assert(R->getParent()->size() == 1 &&
395 "A recipe in an original replicator region must be the only "
396 "recipe in its block");
397 return Region;
398 }
399 return nullptr;
400 };
401 assert(!GetReplicateRegion(const_cast<VPRecipeBase *>(A)) &&
402 "No replicate regions expected at this point");
403 assert(!GetReplicateRegion(const_cast<VPRecipeBase *>(B)) &&
404 "No replicate regions expected at this point");
405#endif
406 return Base::properlyDominates(A: ParentA, B: ParentB);
407}
408
409bool VPRegisterUsage::exceedsMaxNumRegs(const TargetTransformInfo &TTI,
410 unsigned OverrideMaxNumRegs) const {
411 return any_of(Range: MaxLocalUsers, P: [&TTI, &OverrideMaxNumRegs](auto &LU) {
412 return LU.second > (OverrideMaxNumRegs > 0
413 ? OverrideMaxNumRegs
414 : TTI.getNumberOfRegisters(ClassID: LU.first));
415 });
416}
417
418SmallVector<VPRegisterUsage, 8> llvm::calculateRegisterUsageForPlan(
419 VPlan &Plan, ArrayRef<ElementCount> VFs, const TargetTransformInfo &TTI,
420 const SmallPtrSetImpl<const Value *> &ValuesToIgnore) {
421 // Each 'key' in the map opens a new interval. The values
422 // of the map are the index of the 'last seen' usage of the
423 // VPValue that is the key.
424 using IntervalMap = SmallDenseMap<VPValue *, unsigned, 16>;
425
426 // Maps indices to recipes.
427 SmallVector<VPRecipeBase *, 64> Idx2Recipe;
428 // Marks the end of each interval.
429 IntervalMap EndPoint;
430 // Saves the list of VPValues that are used in the loop.
431 SmallPtrSet<VPValue *, 8> Ends;
432 // Saves the list of values that are used in the loop but are defined outside
433 // the loop (not including non-recipe values such as arguments and
434 // constants).
435 SmallSetVector<VPValue *, 8> LoopInvariants;
436 LoopInvariants.insert(X: &Plan.getVectorTripCount());
437
438 // We scan the loop in a topological order in order and assign a number to
439 // each recipe. We use RPO to ensure that defs are met before their users. We
440 // assume that each recipe that has in-loop users starts an interval. We
441 // record every time that an in-loop value is used, so we have a list of the
442 // first occurences of each recipe and last occurrence of each VPValue.
443 VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion();
444 ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>> RPOT(
445 LoopRegion);
446 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Range: RPOT)) {
447 if (!VPBB->getParent())
448 break;
449 for (VPRecipeBase &R : *VPBB) {
450 Idx2Recipe.push_back(Elt: &R);
451
452 // Save the end location of each USE.
453 for (VPValue *U : R.operands()) {
454 auto *DefR = U->getDefiningRecipe();
455
456 // Ignore non-recipe values such as arguments, constants, etc.
457 // FIXME: Might need some motivation why these values are ignored. If
458 // for example an argument is used inside the loop it will increase the
459 // register pressure (so shouldn't we add it to LoopInvariants).
460 auto *IRV = dyn_cast<VPIRValue>(Val: U);
461 if (!DefR && (!IRV || !isa<Instruction>(Val: IRV->getValue())))
462 continue;
463
464 // If this recipe is outside the loop then record it and continue.
465 if (!DefR) {
466 LoopInvariants.insert(X: U);
467 continue;
468 }
469
470 // Overwrite previous end points.
471 EndPoint[U] = Idx2Recipe.size();
472 Ends.insert(Ptr: U);
473 }
474 }
475 if (VPBB == LoopRegion->getExiting()) {
476 // VPWidenIntOrFpInductionRecipes are used implicitly at the end of the
477 // exiting block, where their increment will get materialized eventually.
478 for (auto &R : LoopRegion->getEntryBasicBlock()->phis()) {
479 if (auto *WideIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(Val: &R)) {
480 EndPoint[WideIV] = Idx2Recipe.size();
481 Ends.insert(Ptr: WideIV);
482 }
483 }
484 }
485 }
486
487 // Saves the list of intervals that end with the index in 'key'.
488 using VPValueList = SmallVector<VPValue *, 2>;
489 SmallDenseMap<unsigned, VPValueList, 16> TransposeEnds;
490
491 // Next, we transpose the EndPoints into a multi map that holds the list of
492 // intervals that *end* at a specific location.
493 for (auto &Interval : EndPoint)
494 TransposeEnds[Interval.second].push_back(Elt: Interval.first);
495
496 SmallPtrSet<VPValue *, 8> OpenIntervals;
497 SmallVector<VPRegisterUsage, 8> RUs(VFs.size());
498 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
499
500 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
501
502 VPTypeAnalysis TypeInfo(Plan);
503
504 const auto &TTICapture = TTI;
505 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned {
506 if (Ty->isTokenTy() || !VectorType::isValidElementType(ElemTy: Ty) ||
507 (VF.isScalable() &&
508 !TTICapture.isElementTypeLegalForScalableVector(Ty)))
509 return 0;
510 return TTICapture.getRegUsageForType(Ty: VectorType::get(ElementType: Ty, EC: VF));
511 };
512
513 // We scan the instructions linearly and record each time that a new interval
514 // starts, by placing it in a set. If we find this value in TransposEnds then
515 // we remove it from the set. The max register usage is the maximum register
516 // usage of the recipes of the set.
517 for (unsigned int Idx = 0, Sz = Idx2Recipe.size(); Idx < Sz; ++Idx) {
518 VPRecipeBase *R = Idx2Recipe[Idx];
519
520 // Remove all of the VPValues that end at this location.
521 VPValueList &List = TransposeEnds[Idx];
522 for (VPValue *ToRemove : List)
523 OpenIntervals.erase(Ptr: ToRemove);
524
525 // Ignore recipes that are never used within the loop and do not have side
526 // effects.
527 if (none_of(Range: R->definedValues(),
528 P: [&Ends](VPValue *Def) { return Ends.count(Ptr: Def); }) &&
529 !R->mayHaveSideEffects())
530 continue;
531
532 // Skip recipes for ignored values.
533 // TODO: Should mark recipes for ephemeral values that cannot be removed
534 // explictly in VPlan.
535 if (isa<VPSingleDefRecipe>(Val: R) &&
536 ValuesToIgnore.contains(
537 Ptr: cast<VPSingleDefRecipe>(Val: R)->getUnderlyingValue()))
538 continue;
539
540 // For each VF find the maximum usage of registers.
541 for (unsigned J = 0, E = VFs.size(); J < E; ++J) {
542 // Count the number of registers used, per register class, given all open
543 // intervals.
544 // Note that elements in this SmallMapVector will be default constructed
545 // as 0. So we can use "RegUsage[ClassID] += n" in the code below even if
546 // there is no previous entry for ClassID.
547 SmallMapVector<unsigned, unsigned, 4> RegUsage;
548
549 for (auto *VPV : OpenIntervals) {
550 // Skip artificial values or values that weren't present in the original
551 // loop.
552 // TODO: Remove skipping values that weren't present in the original
553 // loop after removing the legacy
554 // LoopVectorizationCostModel::calculateRegisterUsage
555 if (isa<VPVectorPointerRecipe, VPVectorEndPointerRecipe,
556 VPBranchOnMaskRecipe>(Val: VPV) ||
557 match(V: VPV, P: m_ExtractLastPart(Op0: m_VPValue())))
558 continue;
559
560 if (VFs[J].isScalar() ||
561 isa<VPCanonicalIVPHIRecipe, VPReplicateRecipe, VPDerivedIVRecipe,
562 VPCurrentIterationPHIRecipe, VPScalarIVStepsRecipe>(Val: VPV) ||
563 (isa<VPInstruction>(Val: VPV) && vputils::onlyScalarValuesUsed(Def: VPV)) ||
564 (isa<VPReductionPHIRecipe>(Val: VPV) &&
565 (cast<VPReductionPHIRecipe>(Val: VPV))->isInLoop())) {
566 unsigned ClassID =
567 TTI.getRegisterClassForType(Vector: false, Ty: TypeInfo.inferScalarType(V: VPV));
568 // FIXME: The target might use more than one register for the type
569 // even in the scalar case.
570 RegUsage[ClassID] += 1;
571 } else {
572 // The output from scaled phis and scaled reductions actually has
573 // fewer lanes than the VF.
574 unsigned ScaleFactor =
575 vputils::getVFScaleFactor(R: VPV->getDefiningRecipe());
576 ElementCount VF = VFs[J];
577 if (ScaleFactor > 1) {
578 VF = VFs[J].divideCoefficientBy(RHS: ScaleFactor);
579 LLVM_DEBUG(dbgs() << "LV(REG): Scaled down VF from " << VFs[J]
580 << " to " << VF << " for " << *R << "\n";);
581 }
582
583 Type *ScalarTy = TypeInfo.inferScalarType(V: VPV);
584 unsigned ClassID = TTI.getRegisterClassForType(Vector: true, Ty: ScalarTy);
585 RegUsage[ClassID] += GetRegUsage(ScalarTy, VF);
586 }
587 }
588
589 for (const auto &Pair : RegUsage) {
590 auto &Entry = MaxUsages[J][Pair.first];
591 Entry = std::max(a: Entry, b: Pair.second);
592 }
593 }
594
595 LLVM_DEBUG(dbgs() << "LV(REG): At #" << Idx << " Interval # "
596 << OpenIntervals.size() << '\n');
597
598 // Add used VPValues defined by the current recipe to the list of open
599 // intervals.
600 for (VPValue *DefV : R->definedValues())
601 if (Ends.contains(Ptr: DefV))
602 OpenIntervals.insert(Ptr: DefV);
603 }
604
605 // We also search for instructions that are defined outside the loop, but are
606 // used inside the loop. We need this number separately from the max-interval
607 // usage number because when we unroll, loop-invariant values do not take
608 // more register.
609 VPRegisterUsage RU;
610 for (unsigned Idx = 0, End = VFs.size(); Idx < End; ++Idx) {
611 // Note that elements in this SmallMapVector will be default constructed
612 // as 0. So we can use "Invariant[ClassID] += n" in the code below even if
613 // there is no previous entry for ClassID.
614 SmallMapVector<unsigned, unsigned, 4> Invariant;
615
616 for (auto *In : LoopInvariants) {
617 // FIXME: The target might use more than one register for the type
618 // even in the scalar case.
619 bool IsScalar = vputils::onlyScalarValuesUsed(Def: In);
620
621 ElementCount VF = IsScalar ? ElementCount::getFixed(MinVal: 1) : VFs[Idx];
622 unsigned ClassID = TTI.getRegisterClassForType(
623 Vector: VF.isVector(), Ty: TypeInfo.inferScalarType(V: In));
624 Invariant[ClassID] += GetRegUsage(TypeInfo.inferScalarType(V: In), VF);
625 }
626
627 LLVM_DEBUG({
628 dbgs() << "LV(REG): VF = " << VFs[Idx] << '\n';
629 dbgs() << "LV(REG): Found max usage: " << MaxUsages[Idx].size()
630 << " item\n";
631 for (const auto &pair : MaxUsages[Idx]) {
632 dbgs() << "LV(REG): RegisterClass: "
633 << TTI.getRegisterClassName(pair.first) << ", " << pair.second
634 << " registers\n";
635 }
636 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
637 << " item\n";
638 for (const auto &pair : Invariant) {
639 dbgs() << "LV(REG): RegisterClass: "
640 << TTI.getRegisterClassName(pair.first) << ", " << pair.second
641 << " registers\n";
642 }
643 });
644
645 RU.LoopInvariantRegs = Invariant;
646 RU.MaxLocalUsers = MaxUsages[Idx];
647 RUs[Idx] = RU;
648 }
649
650 return RUs;
651}
652