1//===- VPlanAnalysis.cpp - Various Analyses working on VPlan ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "VPlanAnalysis.h"
10#include "VPlan.h"
11#include "VPlanCFG.h"
12#include "llvm/ADT/TypeSwitch.h"
13#include "llvm/IR/Instruction.h"
14#include "llvm/IR/PatternMatch.h"
15
16using namespace llvm;
17
18#define DEBUG_TYPE "vplan"
19
20Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPBlendRecipe *R) {
21 Type *ResTy = inferScalarType(V: R->getIncomingValue(Idx: 0));
22 for (unsigned I = 1, E = R->getNumIncomingValues(); I != E; ++I) {
23 VPValue *Inc = R->getIncomingValue(Idx: I);
24 assert(inferScalarType(Inc) == ResTy &&
25 "different types inferred for different incoming values");
26 CachedTypes[Inc] = ResTy;
27 }
28 return ResTy;
29}
30
31Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPInstruction *R) {
32 // Set the result type from the first operand, check if the types for all
33 // other operands match and cache them.
34 auto SetResultTyFromOp = [this, R]() {
35 Type *ResTy = inferScalarType(V: R->getOperand(N: 0));
36 for (unsigned Op = 1; Op != R->getNumOperands(); ++Op) {
37 VPValue *OtherV = R->getOperand(N: Op);
38 assert(inferScalarType(OtherV) == ResTy &&
39 "different types inferred for different operands");
40 CachedTypes[OtherV] = ResTy;
41 }
42 return ResTy;
43 };
44
45 unsigned Opcode = R->getOpcode();
46 if (Instruction::isBinaryOp(Opcode) || Instruction::isUnaryOp(Opcode))
47 return SetResultTyFromOp();
48
49 switch (Opcode) {
50 case Instruction::Select: {
51 Type *ResTy = inferScalarType(V: R->getOperand(N: 1));
52 VPValue *OtherV = R->getOperand(N: 2);
53 assert(inferScalarType(OtherV) == ResTy &&
54 "different types inferred for different operands");
55 CachedTypes[OtherV] = ResTy;
56 return ResTy;
57 }
58 case Instruction::ICmp:
59 case VPInstruction::ActiveLaneMask:
60 return inferScalarType(V: R->getOperand(N: 1));
61 case VPInstruction::FirstOrderRecurrenceSplice:
62 case VPInstruction::Not:
63 return SetResultTyFromOp();
64 case VPInstruction::ExtractFromEnd: {
65 Type *BaseTy = inferScalarType(V: R->getOperand(N: 0));
66 if (auto *VecTy = dyn_cast<VectorType>(Val: BaseTy))
67 return VecTy->getElementType();
68 return BaseTy;
69 }
70 case VPInstruction::LogicalAnd:
71 return IntegerType::get(C&: Ctx, NumBits: 1);
72 case VPInstruction::PtrAdd:
73 // Return the type based on the pointer argument (i.e. first operand).
74 return inferScalarType(V: R->getOperand(N: 0));
75 case VPInstruction::BranchOnCond:
76 case VPInstruction::BranchOnCount:
77 return Type::getVoidTy(C&: Ctx);
78 default:
79 break;
80 }
81 // Type inference not implemented for opcode.
82 LLVM_DEBUG({
83 dbgs() << "LV: Found unhandled opcode for: ";
84 R->getVPSingleValue()->dump();
85 });
86 llvm_unreachable("Unhandled opcode!");
87}
88
89Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenRecipe *R) {
90 unsigned Opcode = R->getOpcode();
91 switch (Opcode) {
92 case Instruction::ICmp:
93 case Instruction::FCmp:
94 return IntegerType::get(C&: Ctx, NumBits: 1);
95 case Instruction::UDiv:
96 case Instruction::SDiv:
97 case Instruction::SRem:
98 case Instruction::URem:
99 case Instruction::Add:
100 case Instruction::FAdd:
101 case Instruction::Sub:
102 case Instruction::FSub:
103 case Instruction::Mul:
104 case Instruction::FMul:
105 case Instruction::FDiv:
106 case Instruction::FRem:
107 case Instruction::Shl:
108 case Instruction::LShr:
109 case Instruction::AShr:
110 case Instruction::And:
111 case Instruction::Or:
112 case Instruction::Xor: {
113 Type *ResTy = inferScalarType(V: R->getOperand(N: 0));
114 assert(ResTy == inferScalarType(R->getOperand(1)) &&
115 "types for both operands must match for binary op");
116 CachedTypes[R->getOperand(N: 1)] = ResTy;
117 return ResTy;
118 }
119 case Instruction::FNeg:
120 case Instruction::Freeze:
121 return inferScalarType(V: R->getOperand(N: 0));
122 default:
123 break;
124 }
125
126 // Type inference not implemented for opcode.
127 LLVM_DEBUG({
128 dbgs() << "LV: Found unhandled opcode for: ";
129 R->getVPSingleValue()->dump();
130 });
131 llvm_unreachable("Unhandled opcode!");
132}
133
134Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenCallRecipe *R) {
135 auto &CI = *cast<CallInst>(Val: R->getUnderlyingInstr());
136 return CI.getType();
137}
138
139Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenMemoryRecipe *R) {
140 assert((isa<VPWidenLoadRecipe>(R) || isa<VPWidenLoadEVLRecipe>(R)) &&
141 "Store recipes should not define any values");
142 return cast<LoadInst>(Val: &R->getIngredient())->getType();
143}
144
145Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenSelectRecipe *R) {
146 Type *ResTy = inferScalarType(V: R->getOperand(N: 1));
147 VPValue *OtherV = R->getOperand(N: 2);
148 assert(inferScalarType(OtherV) == ResTy &&
149 "different types inferred for different operands");
150 CachedTypes[OtherV] = ResTy;
151 return ResTy;
152}
153
154Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPReplicateRecipe *R) {
155 switch (R->getUnderlyingInstr()->getOpcode()) {
156 case Instruction::Call: {
157 unsigned CallIdx = R->getNumOperands() - (R->isPredicated() ? 2 : 1);
158 return cast<Function>(Val: R->getOperand(N: CallIdx)->getLiveInIRValue())
159 ->getReturnType();
160 }
161 case Instruction::UDiv:
162 case Instruction::SDiv:
163 case Instruction::SRem:
164 case Instruction::URem:
165 case Instruction::Add:
166 case Instruction::FAdd:
167 case Instruction::Sub:
168 case Instruction::FSub:
169 case Instruction::Mul:
170 case Instruction::FMul:
171 case Instruction::FDiv:
172 case Instruction::FRem:
173 case Instruction::Shl:
174 case Instruction::LShr:
175 case Instruction::AShr:
176 case Instruction::And:
177 case Instruction::Or:
178 case Instruction::Xor: {
179 Type *ResTy = inferScalarType(V: R->getOperand(N: 0));
180 assert(ResTy == inferScalarType(R->getOperand(1)) &&
181 "inferred types for operands of binary op don't match");
182 CachedTypes[R->getOperand(N: 1)] = ResTy;
183 return ResTy;
184 }
185 case Instruction::Select: {
186 Type *ResTy = inferScalarType(V: R->getOperand(N: 1));
187 assert(ResTy == inferScalarType(R->getOperand(2)) &&
188 "inferred types for operands of select op don't match");
189 CachedTypes[R->getOperand(N: 2)] = ResTy;
190 return ResTy;
191 }
192 case Instruction::ICmp:
193 case Instruction::FCmp:
194 return IntegerType::get(C&: Ctx, NumBits: 1);
195 case Instruction::AddrSpaceCast:
196 case Instruction::Alloca:
197 case Instruction::BitCast:
198 case Instruction::Trunc:
199 case Instruction::SExt:
200 case Instruction::ZExt:
201 case Instruction::FPExt:
202 case Instruction::FPTrunc:
203 case Instruction::ExtractValue:
204 case Instruction::SIToFP:
205 case Instruction::UIToFP:
206 case Instruction::FPToSI:
207 case Instruction::FPToUI:
208 case Instruction::PtrToInt:
209 case Instruction::IntToPtr:
210 return R->getUnderlyingInstr()->getType();
211 case Instruction::Freeze:
212 case Instruction::FNeg:
213 case Instruction::GetElementPtr:
214 return inferScalarType(V: R->getOperand(N: 0));
215 case Instruction::Load:
216 return cast<LoadInst>(Val: R->getUnderlyingInstr())->getType();
217 case Instruction::Store:
218 // FIXME: VPReplicateRecipes with store opcodes still define a result
219 // VPValue, so we need to handle them here. Remove the code here once this
220 // is modeled accurately in VPlan.
221 return Type::getVoidTy(C&: Ctx);
222 default:
223 break;
224 }
225 // Type inference not implemented for opcode.
226 LLVM_DEBUG({
227 dbgs() << "LV: Found unhandled opcode for: ";
228 R->getVPSingleValue()->dump();
229 });
230 llvm_unreachable("Unhandled opcode");
231}
232
233Type *VPTypeAnalysis::inferScalarType(const VPValue *V) {
234 if (Type *CachedTy = CachedTypes.lookup(Val: V))
235 return CachedTy;
236
237 if (V->isLiveIn()) {
238 if (auto *IRValue = V->getLiveInIRValue())
239 return IRValue->getType();
240 // All VPValues without any underlying IR value (like the vector trip count
241 // or the backedge-taken count) have the same type as the canonical IV.
242 return CanonicalIVTy;
243 }
244
245 Type *ResultTy =
246 TypeSwitch<const VPRecipeBase *, Type *>(V->getDefiningRecipe())
247 .Case<VPActiveLaneMaskPHIRecipe, VPCanonicalIVPHIRecipe,
248 VPFirstOrderRecurrencePHIRecipe, VPReductionPHIRecipe,
249 VPWidenPointerInductionRecipe, VPEVLBasedIVPHIRecipe>(
250 caseFn: [this](const auto *R) {
251 // Handle header phi recipes, except VPWidenIntOrFpInduction
252 // which needs special handling due it being possibly truncated.
253 // TODO: consider inferring/caching type of siblings, e.g.,
254 // backedge value, here and in cases below.
255 return inferScalarType(V: R->getStartValue());
256 })
257 .Case<VPWidenIntOrFpInductionRecipe, VPDerivedIVRecipe>(
258 caseFn: [](const auto *R) { return R->getScalarType(); })
259 .Case<VPReductionRecipe, VPPredInstPHIRecipe, VPWidenPHIRecipe,
260 VPScalarIVStepsRecipe, VPWidenGEPRecipe, VPVectorPointerRecipe,
261 VPWidenCanonicalIVRecipe>(caseFn: [this](const VPRecipeBase *R) {
262 return inferScalarType(V: R->getOperand(N: 0));
263 })
264 .Case<VPBlendRecipe, VPInstruction, VPWidenRecipe, VPReplicateRecipe,
265 VPWidenCallRecipe, VPWidenMemoryRecipe, VPWidenSelectRecipe>(
266 caseFn: [this](const auto *R) { return inferScalarTypeForRecipe(R); })
267 .Case<VPInterleaveRecipe>(caseFn: [V](const VPInterleaveRecipe *R) {
268 // TODO: Use info from interleave group.
269 return V->getUnderlyingValue()->getType();
270 })
271 .Case<VPWidenCastRecipe>(
272 caseFn: [](const VPWidenCastRecipe *R) { return R->getResultType(); })
273 .Case<VPScalarCastRecipe>(
274 caseFn: [](const VPScalarCastRecipe *R) { return R->getResultType(); })
275 .Case<VPExpandSCEVRecipe>(caseFn: [](const VPExpandSCEVRecipe *R) {
276 return R->getSCEV()->getType();
277 })
278 .Case<VPReductionRecipe>(caseFn: [this](const auto *R) {
279 return inferScalarType(V: R->getChainOp());
280 });
281
282 assert(ResultTy && "could not infer type for the given VPValue");
283 CachedTypes[V] = ResultTy;
284 return ResultTy;
285}
286
287void llvm::collectEphemeralRecipesForVPlan(
288 VPlan &Plan, DenseSet<VPRecipeBase *> &EphRecipes) {
289 // First, collect seed recipes which are operands of assumes.
290 SmallVector<VPRecipeBase *> Worklist;
291 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
292 Range: vp_depth_first_deep(G: Plan.getVectorLoopRegion()->getEntry()))) {
293 for (VPRecipeBase &R : *VPBB) {
294 auto *RepR = dyn_cast<VPReplicateRecipe>(Val: &R);
295 if (!RepR || !match(V: RepR->getUnderlyingInstr(),
296 P: PatternMatch::m_Intrinsic<Intrinsic::assume>()))
297 continue;
298 Worklist.push_back(Elt: RepR);
299 EphRecipes.insert(V: RepR);
300 }
301 }
302
303 // Process operands of candidates in worklist and add them to the set of
304 // ephemeral recipes, if they don't have side-effects and are only used by
305 // other ephemeral recipes.
306 while (!Worklist.empty()) {
307 VPRecipeBase *Cur = Worklist.pop_back_val();
308 for (VPValue *Op : Cur->operands()) {
309 auto *OpR = Op->getDefiningRecipe();
310 if (!OpR || OpR->mayHaveSideEffects() || EphRecipes.contains(V: OpR))
311 continue;
312 if (any_of(Range: Op->users(), P: [EphRecipes](VPUser *U) {
313 auto *UR = dyn_cast<VPRecipeBase>(Val: U);
314 return !UR || !EphRecipes.contains(V: UR);
315 }))
316 continue;
317 EphRecipes.insert(V: OpR);
318 Worklist.push_back(Elt: OpR);
319 }
320 }
321}
322