1//===- VPlanUtils.cpp - VPlan-related utilities ---------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "VPlanUtils.h"
10#include "VPlanCFG.h"
11#include "VPlanPatternMatch.h"
12#include "llvm/ADT/TypeSwitch.h"
13#include "llvm/Analysis/ScalarEvolutionExpressions.h"
14
15using namespace llvm;
16
17bool vputils::onlyFirstLaneUsed(const VPValue *Def) {
18 return all_of(Range: Def->users(),
19 P: [Def](const VPUser *U) { return U->onlyFirstLaneUsed(Op: Def); });
20}
21
22bool vputils::onlyFirstPartUsed(const VPValue *Def) {
23 return all_of(Range: Def->users(),
24 P: [Def](const VPUser *U) { return U->onlyFirstPartUsed(Op: Def); });
25}
26
27VPValue *vputils::getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr,
28 ScalarEvolution &SE) {
29 if (auto *Expanded = Plan.getSCEVExpansion(S: Expr))
30 return Expanded;
31 VPValue *Expanded = nullptr;
32 if (auto *E = dyn_cast<SCEVConstant>(Val: Expr))
33 Expanded = Plan.getOrAddLiveIn(V: E->getValue());
34 else {
35 auto *U = dyn_cast<SCEVUnknown>(Val: Expr);
36 // Skip SCEV expansion if Expr is a SCEVUnknown wrapping a non-instruction
37 // value. Otherwise the value may be defined in a loop and using it directly
38 // will break LCSSA form. The SCEV expansion takes care of preserving LCSSA
39 // form.
40 if (U && !isa<Instruction>(Val: U->getValue())) {
41 Expanded = Plan.getOrAddLiveIn(V: U->getValue());
42 } else {
43 Expanded = new VPExpandSCEVRecipe(Expr, SE);
44 Plan.getEntry()->appendRecipe(Recipe: Expanded->getDefiningRecipe());
45 }
46 }
47 Plan.addSCEVExpansion(S: Expr, V: Expanded);
48 return Expanded;
49}
50
51bool vputils::isHeaderMask(const VPValue *V, VPlan &Plan) {
52 if (isa<VPActiveLaneMaskPHIRecipe>(Val: V))
53 return true;
54
55 auto IsWideCanonicalIV = [](VPValue *A) {
56 return isa<VPWidenCanonicalIVRecipe>(Val: A) ||
57 (isa<VPWidenIntOrFpInductionRecipe>(Val: A) &&
58 cast<VPWidenIntOrFpInductionRecipe>(Val: A)->isCanonical());
59 };
60
61 VPValue *A, *B;
62 using namespace VPlanPatternMatch;
63
64 if (match(V, P: m_ActiveLaneMask(Op0: m_VPValue(V&: A), Op1: m_VPValue(V&: B))))
65 return B == Plan.getTripCount() &&
66 (match(V: A, P: m_ScalarIVSteps(Op0: m_Specific(VPV: Plan.getCanonicalIV()),
67 Op1: m_SpecificInt(V: 1),
68 Op2: m_Specific(VPV: &Plan.getVF()))) ||
69 IsWideCanonicalIV(A));
70
71 return match(V, P: m_Binary<Instruction::ICmp>(Op0: m_VPValue(V&: A), Op1: m_VPValue(V&: B))) &&
72 IsWideCanonicalIV(A) && B == Plan.getOrCreateBackedgeTakenCount();
73}
74
75const SCEV *vputils::getSCEVExprForVPValue(VPValue *V, ScalarEvolution &SE) {
76 if (V->isLiveIn())
77 return SE.getSCEV(V: V->getLiveInIRValue());
78
79 // TODO: Support constructing SCEVs for more recipes as needed.
80 return TypeSwitch<const VPRecipeBase *, const SCEV *>(V->getDefiningRecipe())
81 .Case<VPExpandSCEVRecipe>(
82 caseFn: [](const VPExpandSCEVRecipe *R) { return R->getSCEV(); })
83 .Default(defaultFn: [&SE](const VPRecipeBase *) { return SE.getCouldNotCompute(); });
84}
85
86bool vputils::isUniformAcrossVFsAndUFs(VPValue *V) {
87 using namespace VPlanPatternMatch;
88 // Live-ins are uniform.
89 if (V->isLiveIn())
90 return true;
91
92 VPRecipeBase *R = V->getDefiningRecipe();
93 if (R && V->isDefinedOutsideLoopRegions()) {
94 if (match(V: V->getDefiningRecipe(),
95 P: m_VPInstruction<VPInstruction::CanonicalIVIncrementForPart>(
96 Op0: m_VPValue())))
97 return false;
98 return all_of(Range: R->operands(), P: isUniformAcrossVFsAndUFs);
99 }
100
101 auto *CanonicalIV = R->getParent()->getPlan()->getCanonicalIV();
102 // Canonical IV chain is uniform.
103 if (V == CanonicalIV || V == CanonicalIV->getBackedgeValue())
104 return true;
105
106 return TypeSwitch<const VPRecipeBase *, bool>(R)
107 .Case<VPDerivedIVRecipe>(caseFn: [](const auto *R) { return true; })
108 .Case<VPReplicateRecipe>(caseFn: [](const auto *R) {
109 // Loads and stores that are uniform across VF lanes are handled by
110 // VPReplicateRecipe.IsUniform. They are also uniform across UF parts if
111 // all their operands are invariant.
112 // TODO: Further relax the restrictions.
113 return R->isSingleScalar() &&
114 (isa<LoadInst, StoreInst>(R->getUnderlyingValue())) &&
115 all_of(R->operands(), isUniformAcrossVFsAndUFs);
116 })
117 .Case<VPInstruction>(caseFn: [](const auto *VPI) {
118 return VPI->isScalarCast() &&
119 isUniformAcrossVFsAndUFs(VPI->getOperand(0));
120 })
121 .Case<VPWidenCastRecipe>(caseFn: [](const auto *R) {
122 // A cast is uniform according to its operand.
123 return isUniformAcrossVFsAndUFs(R->getOperand(0));
124 })
125 .Default(defaultFn: [](const VPRecipeBase *) { // A value is considered non-uniform
126 // unless proven otherwise.
127 return false;
128 });
129}
130
131VPBasicBlock *vputils::getFirstLoopHeader(VPlan &Plan, VPDominatorTree &VPDT) {
132 auto DepthFirst = vp_depth_first_shallow(G: Plan.getEntry());
133 auto I = find_if(Range&: DepthFirst, P: [&VPDT](VPBlockBase *VPB) {
134 return VPBlockUtils::isHeader(VPB, VPDT);
135 });
136 return I == DepthFirst.end() ? nullptr : cast<VPBasicBlock>(Val: *I);
137}
138