1 | //===- VPlanPatternMatch.h - Match on VPValues and recipes ------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file provides a simple and efficient mechanism for performing general |
10 | // tree-based pattern matches on the VPlan values and recipes, based on |
11 | // LLVM's IR pattern matchers. |
12 | // |
13 | // Currently it provides generic matchers for unary and binary VPInstructions, |
14 | // and specialized matchers like m_Not, m_ActiveLaneMask, m_BranchOnCond, |
15 | // m_BranchOnCount to match specific VPInstructions. |
16 | // TODO: Add missing matchers for additional opcodes and recipes as needed. |
17 | // |
18 | //===----------------------------------------------------------------------===// |
19 | |
20 | #ifndef LLVM_TRANSFORM_VECTORIZE_VPLANPATTERNMATCH_H |
21 | #define LLVM_TRANSFORM_VECTORIZE_VPLANPATTERNMATCH_H |
22 | |
23 | #include "VPlan.h" |
24 | |
25 | namespace llvm { |
26 | namespace VPlanPatternMatch { |
27 | |
28 | template <typename Val, typename Pattern> bool match(Val *V, const Pattern &P) { |
29 | return P.match(V); |
30 | } |
31 | |
32 | template <typename Pattern> bool match(VPUser *U, const Pattern &P) { |
33 | auto *R = dyn_cast<VPRecipeBase>(Val: U); |
34 | return R && match(R, P); |
35 | } |
36 | |
37 | template <typename Class> struct class_match { |
38 | template <typename ITy> bool match(ITy *V) const { return isa<Class>(V); } |
39 | }; |
40 | |
41 | /// Match an arbitrary VPValue and ignore it. |
42 | inline class_match<VPValue> m_VPValue() { return class_match<VPValue>(); } |
43 | |
44 | template <typename Class> struct bind_ty { |
45 | Class *&VR; |
46 | |
47 | bind_ty(Class *&V) : VR(V) {} |
48 | |
49 | template <typename ITy> bool match(ITy *V) const { |
50 | if (auto *CV = dyn_cast<Class>(V)) { |
51 | VR = CV; |
52 | return true; |
53 | } |
54 | return false; |
55 | } |
56 | }; |
57 | |
58 | /// Match a specified VPValue. |
59 | struct specificval_ty { |
60 | const VPValue *Val; |
61 | |
62 | specificval_ty(const VPValue *V) : Val(V) {} |
63 | |
64 | bool match(VPValue *VPV) const { return VPV == Val; } |
65 | }; |
66 | |
67 | inline specificval_ty m_Specific(const VPValue *VPV) { return VPV; } |
68 | |
69 | /// Stores a reference to the VPValue *, not the VPValue * itself, |
70 | /// thus can be used in commutative matchers. |
71 | struct deferredval_ty { |
72 | VPValue *const &Val; |
73 | |
74 | deferredval_ty(VPValue *const &V) : Val(V) {} |
75 | |
76 | bool match(VPValue *const V) const { return V == Val; } |
77 | }; |
78 | |
79 | /// Like m_Specific(), but works if the specific value to match is determined |
80 | /// as part of the same match() expression. For example: |
81 | /// m_Mul(m_VPValue(X), m_Specific(X)) is incorrect, because m_Specific() will |
82 | /// bind X before the pattern match starts. |
83 | /// m_Mul(m_VPValue(X), m_Deferred(X)) is correct, and will check against |
84 | /// whichever value m_VPValue(X) populated. |
85 | inline deferredval_ty m_Deferred(VPValue *const &V) { return V; } |
86 | |
87 | /// Match an integer constant or vector of constants if Pred::isValue returns |
88 | /// true for the APInt. \p BitWidth optionally specifies the bitwidth the |
89 | /// matched constant must have. If it is 0, the matched constant can have any |
90 | /// bitwidth. |
91 | template <typename Pred, unsigned BitWidth = 0> struct int_pred_ty { |
92 | Pred P; |
93 | |
94 | int_pred_ty(Pred P) : P(std::move(P)) {} |
95 | int_pred_ty() : P() {} |
96 | |
97 | bool match(VPValue *VPV) const { |
98 | if (!VPV->isLiveIn()) |
99 | return false; |
100 | Value *V = VPV->getLiveInIRValue(); |
101 | if (!V) |
102 | return false; |
103 | const auto *CI = dyn_cast<ConstantInt>(Val: V); |
104 | if (!CI && V->getType()->isVectorTy()) |
105 | if (const auto *C = dyn_cast<Constant>(Val: V)) |
106 | CI = dyn_cast_or_null<ConstantInt>( |
107 | Val: C->getSplatValue(/*AllowPoison=*/AllowPoison: false)); |
108 | if (!CI) |
109 | return false; |
110 | |
111 | if (BitWidth != 0 && CI->getBitWidth() != BitWidth) |
112 | return false; |
113 | return P.isValue(CI->getValue()); |
114 | } |
115 | }; |
116 | |
117 | /// Match a specified integer value or vector of all elements of that |
118 | /// value. \p BitWidth optionally specifies the bitwidth the matched constant |
119 | /// must have. If it is 0, the matched constant can have any bitwidth. |
120 | struct is_specific_int { |
121 | APInt Val; |
122 | |
123 | is_specific_int(APInt Val) : Val(std::move(Val)) {} |
124 | |
125 | bool isValue(const APInt &C) const { return APInt::isSameValue(I1: Val, I2: C); } |
126 | }; |
127 | |
128 | template <unsigned Bitwidth = 0> |
129 | using specific_intval = int_pred_ty<is_specific_int, Bitwidth>; |
130 | |
131 | inline specific_intval<0> m_SpecificInt(uint64_t V) { |
132 | return specific_intval<0>(is_specific_int(APInt(64, V))); |
133 | } |
134 | |
135 | inline specific_intval<1> m_False() { |
136 | return specific_intval<1>(is_specific_int(APInt(64, 0))); |
137 | } |
138 | |
139 | inline specific_intval<1> m_True() { |
140 | return specific_intval<1>(is_specific_int(APInt(64, 1))); |
141 | } |
142 | |
143 | struct is_all_ones { |
144 | bool isValue(const APInt &C) const { return C.isAllOnes(); } |
145 | }; |
146 | |
147 | /// Match an integer or vector with all bits set. |
148 | /// For vectors, this includes constants with undefined elements. |
149 | inline int_pred_ty<is_all_ones> m_AllOnes() { |
150 | return int_pred_ty<is_all_ones>(); |
151 | } |
152 | |
153 | /// Matching combinators |
154 | template <typename LTy, typename RTy> struct match_combine_or { |
155 | LTy L; |
156 | RTy R; |
157 | |
158 | match_combine_or(const LTy &Left, const RTy &Right) : L(Left), R(Right) {} |
159 | |
160 | template <typename ITy> bool match(ITy *V) const { |
161 | if (L.match(V)) |
162 | return true; |
163 | if (R.match(V)) |
164 | return true; |
165 | return false; |
166 | } |
167 | }; |
168 | |
169 | template <typename LTy, typename RTy> struct match_combine_and { |
170 | LTy L; |
171 | RTy R; |
172 | |
173 | match_combine_and(const LTy &Left, const RTy &Right) : L(Left), R(Right) {} |
174 | |
175 | template <typename ITy> bool match(ITy *V) const { |
176 | return L.match(V) && R.match(V); |
177 | } |
178 | }; |
179 | |
180 | /// Combine two pattern matchers matching L || R |
181 | template <typename LTy, typename RTy> |
182 | inline match_combine_or<LTy, RTy> m_CombineOr(const LTy &L, const RTy &R) { |
183 | return match_combine_or<LTy, RTy>(L, R); |
184 | } |
185 | |
186 | /// Combine two pattern matchers matching L && R |
187 | template <typename LTy, typename RTy> |
188 | inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) { |
189 | return match_combine_and<LTy, RTy>(L, R); |
190 | } |
191 | |
192 | /// Match a VPValue, capturing it if we match. |
193 | inline bind_ty<VPValue> m_VPValue(VPValue *&V) { return V; } |
194 | |
195 | /// Match a VPInstruction, capturing if we match. |
196 | inline bind_ty<VPInstruction> m_VPInstruction(VPInstruction *&V) { return V; } |
197 | |
198 | template <typename Ops_t, unsigned Opcode, bool Commutative, |
199 | typename... RecipeTys> |
200 | struct Recipe_match { |
201 | Ops_t Ops; |
202 | |
203 | Recipe_match() : Ops() { |
204 | static_assert(std::tuple_size<Ops_t>::value == 0 && |
205 | "constructor can only be used with zero operands" ); |
206 | } |
207 | Recipe_match(Ops_t Ops) : Ops(Ops) {} |
208 | template <typename A_t, typename B_t> |
209 | Recipe_match(A_t A, B_t B) : Ops({A, B}) { |
210 | static_assert(std::tuple_size<Ops_t>::value == 2 && |
211 | "constructor can only be used for binary matcher" ); |
212 | } |
213 | |
214 | bool match(const VPValue *V) const { |
215 | auto *DefR = V->getDefiningRecipe(); |
216 | return DefR && match(DefR); |
217 | } |
218 | |
219 | bool match(const VPSingleDefRecipe *R) const { |
220 | return match(static_cast<const VPRecipeBase *>(R)); |
221 | } |
222 | |
223 | bool match(const VPRecipeBase *R) const { |
224 | if (std::tuple_size<Ops_t>::value == 0) { |
225 | assert(Opcode == VPInstruction::BuildVector && |
226 | "can only match BuildVector with empty ops" ); |
227 | auto *VPI = dyn_cast<VPInstruction>(Val: R); |
228 | return VPI && VPI->getOpcode() == VPInstruction::BuildVector; |
229 | } |
230 | |
231 | if ((!matchRecipeAndOpcode<RecipeTys>(R) && ...)) |
232 | return false; |
233 | |
234 | assert(R->getNumOperands() == std::tuple_size<Ops_t>::value && |
235 | "recipe with matched opcode does not have the expected number of " |
236 | "operands" ); |
237 | |
238 | auto IdxSeq = std::make_index_sequence<std::tuple_size<Ops_t>::value>(); |
239 | if (all_of_tuple_elements(IdxSeq, [R](auto Op, unsigned Idx) { |
240 | return Op.match(R->getOperand(N: Idx)); |
241 | })) |
242 | return true; |
243 | |
244 | return Commutative && |
245 | all_of_tuple_elements(IdxSeq, [R](auto Op, unsigned Idx) { |
246 | return Op.match(R->getOperand(N: R->getNumOperands() - Idx - 1)); |
247 | }); |
248 | } |
249 | |
250 | private: |
251 | template <typename RecipeTy> |
252 | static bool matchRecipeAndOpcode(const VPRecipeBase *R) { |
253 | auto *DefR = dyn_cast<RecipeTy>(R); |
254 | // Check for recipes that do not have opcodes. |
255 | if constexpr (std::is_same<RecipeTy, VPScalarIVStepsRecipe>::value || |
256 | std::is_same<RecipeTy, VPCanonicalIVPHIRecipe>::value || |
257 | std::is_same<RecipeTy, VPWidenSelectRecipe>::value || |
258 | std::is_same<RecipeTy, VPDerivedIVRecipe>::value || |
259 | std::is_same<RecipeTy, VPWidenGEPRecipe>::value) |
260 | return DefR; |
261 | else |
262 | return DefR && DefR->getOpcode() == Opcode; |
263 | } |
264 | |
265 | /// Helper to check if predicate \p P holds on all tuple elements in Ops using |
266 | /// the provided index sequence. |
267 | template <typename Fn, std::size_t... Is> |
268 | bool all_of_tuple_elements(std::index_sequence<Is...>, Fn P) const { |
269 | return (P(std::get<Is>(Ops), Is) && ...); |
270 | } |
271 | }; |
272 | |
273 | template <unsigned Opcode, typename... RecipeTys> |
274 | using ZeroOpRecipe_match = |
275 | Recipe_match<std::tuple<>, Opcode, false, RecipeTys...>; |
276 | |
277 | template <typename Op0_t, unsigned Opcode, typename... RecipeTys> |
278 | using UnaryRecipe_match = |
279 | Recipe_match<std::tuple<Op0_t>, Opcode, false, RecipeTys...>; |
280 | |
281 | template <typename Op0_t, unsigned Opcode> |
282 | using UnaryVPInstruction_match = |
283 | UnaryRecipe_match<Op0_t, Opcode, VPInstruction>; |
284 | |
285 | template <unsigned Opcode> |
286 | using ZeroOpVPInstruction_match = ZeroOpRecipe_match<Opcode, VPInstruction>; |
287 | |
288 | template <typename Op0_t, unsigned Opcode> |
289 | using AllUnaryRecipe_match = |
290 | UnaryRecipe_match<Op0_t, Opcode, VPWidenRecipe, VPReplicateRecipe, |
291 | VPWidenCastRecipe, VPInstruction>; |
292 | |
293 | template <typename Op0_t, typename Op1_t, unsigned Opcode, bool Commutative, |
294 | typename... RecipeTys> |
295 | using BinaryRecipe_match = |
296 | Recipe_match<std::tuple<Op0_t, Op1_t>, Opcode, Commutative, RecipeTys...>; |
297 | |
298 | template <typename Op0_t, typename Op1_t, unsigned Opcode> |
299 | using BinaryVPInstruction_match = |
300 | BinaryRecipe_match<Op0_t, Op1_t, Opcode, /*Commutative*/ false, |
301 | VPInstruction>; |
302 | |
303 | template <typename Op0_t, typename Op1_t, typename Op2_t, unsigned Opcode, |
304 | bool Commutative, typename... RecipeTys> |
305 | using TernaryRecipe_match = Recipe_match<std::tuple<Op0_t, Op1_t, Op2_t>, |
306 | Opcode, Commutative, RecipeTys...>; |
307 | |
308 | template <typename Op0_t, typename Op1_t, typename Op2_t, unsigned Opcode> |
309 | using TernaryVPInstruction_match = |
310 | TernaryRecipe_match<Op0_t, Op1_t, Op2_t, Opcode, /*Commutative*/ false, |
311 | VPInstruction>; |
312 | |
313 | template <typename Op0_t, typename Op1_t, unsigned Opcode, |
314 | bool Commutative = false> |
315 | using AllBinaryRecipe_match = |
316 | BinaryRecipe_match<Op0_t, Op1_t, Opcode, Commutative, VPWidenRecipe, |
317 | VPReplicateRecipe, VPWidenCastRecipe, VPInstruction>; |
318 | |
319 | /// BuildVector is matches only its opcode, w/o matching its operands as the |
320 | /// number of operands is not fixed. |
321 | inline ZeroOpVPInstruction_match<VPInstruction::BuildVector> m_BuildVector() { |
322 | return ZeroOpVPInstruction_match<VPInstruction::BuildVector>(); |
323 | } |
324 | |
325 | template <unsigned Opcode, typename Op0_t> |
326 | inline UnaryVPInstruction_match<Op0_t, Opcode> |
327 | m_VPInstruction(const Op0_t &Op0) { |
328 | return UnaryVPInstruction_match<Op0_t, Opcode>(Op0); |
329 | } |
330 | |
331 | template <unsigned Opcode, typename Op0_t, typename Op1_t> |
332 | inline BinaryVPInstruction_match<Op0_t, Op1_t, Opcode> |
333 | m_VPInstruction(const Op0_t &Op0, const Op1_t &Op1) { |
334 | return BinaryVPInstruction_match<Op0_t, Op1_t, Opcode>(Op0, Op1); |
335 | } |
336 | |
337 | template <unsigned Opcode, typename Op0_t, typename Op1_t, typename Op2_t> |
338 | inline TernaryVPInstruction_match<Op0_t, Op1_t, Op2_t, Opcode> |
339 | m_VPInstruction(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) { |
340 | return TernaryVPInstruction_match<Op0_t, Op1_t, Op2_t, Opcode>( |
341 | {Op0, Op1, Op2}); |
342 | } |
343 | |
344 | template <typename Op0_t, typename Op1_t, typename Op2_t, typename Op3_t, |
345 | unsigned Opcode, bool Commutative, typename... RecipeTys> |
346 | using Recipe4Op_match = Recipe_match<std::tuple<Op0_t, Op1_t, Op2_t, Op3_t>, |
347 | Opcode, Commutative, RecipeTys...>; |
348 | |
349 | template <typename Op0_t, typename Op1_t, typename Op2_t, typename Op3_t, |
350 | unsigned Opcode> |
351 | using VPInstruction4Op_match = |
352 | Recipe4Op_match<Op0_t, Op1_t, Op2_t, Op3_t, Opcode, /*Commutative*/ false, |
353 | VPInstruction>; |
354 | |
355 | template <unsigned Opcode, typename Op0_t, typename Op1_t, typename Op2_t, |
356 | typename Op3_t> |
357 | inline VPInstruction4Op_match<Op0_t, Op1_t, Op2_t, Op3_t, Opcode> |
358 | m_VPInstruction(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2, |
359 | const Op3_t &Op3) { |
360 | return VPInstruction4Op_match<Op0_t, Op1_t, Op2_t, Op3_t, Opcode>( |
361 | {Op0, Op1, Op2, Op3}); |
362 | } |
363 | template <typename Op0_t> |
364 | inline UnaryVPInstruction_match<Op0_t, Instruction::Freeze> |
365 | m_Freeze(const Op0_t &Op0) { |
366 | return m_VPInstruction<Instruction::Freeze>(Op0); |
367 | } |
368 | |
369 | template <typename Op0_t> |
370 | inline UnaryVPInstruction_match<Op0_t, VPInstruction::Not> |
371 | m_Not(const Op0_t &Op0) { |
372 | return m_VPInstruction<VPInstruction::Not>(Op0); |
373 | } |
374 | |
375 | template <typename Op0_t> |
376 | inline UnaryVPInstruction_match<Op0_t, VPInstruction::BranchOnCond> |
377 | m_BranchOnCond(const Op0_t &Op0) { |
378 | return m_VPInstruction<VPInstruction::BranchOnCond>(Op0); |
379 | } |
380 | |
381 | template <typename Op0_t> |
382 | inline UnaryVPInstruction_match<Op0_t, VPInstruction::Broadcast> |
383 | m_Broadcast(const Op0_t &Op0) { |
384 | return m_VPInstruction<VPInstruction::Broadcast>(Op0); |
385 | } |
386 | |
387 | template <typename Op0_t, typename Op1_t> |
388 | inline BinaryVPInstruction_match<Op0_t, Op1_t, VPInstruction::ActiveLaneMask> |
389 | m_ActiveLaneMask(const Op0_t &Op0, const Op1_t &Op1) { |
390 | return m_VPInstruction<VPInstruction::ActiveLaneMask>(Op0, Op1); |
391 | } |
392 | |
393 | template <typename Op0_t, typename Op1_t> |
394 | inline BinaryVPInstruction_match<Op0_t, Op1_t, VPInstruction::BranchOnCount> |
395 | m_BranchOnCount(const Op0_t &Op0, const Op1_t &Op1) { |
396 | return m_VPInstruction<VPInstruction::BranchOnCount>(Op0, Op1); |
397 | } |
398 | |
399 | template <unsigned Opcode, typename Op0_t> |
400 | inline AllUnaryRecipe_match<Op0_t, Opcode> m_Unary(const Op0_t &Op0) { |
401 | return AllUnaryRecipe_match<Op0_t, Opcode>(Op0); |
402 | } |
403 | |
404 | template <typename Op0_t> |
405 | inline AllUnaryRecipe_match<Op0_t, Instruction::Trunc> |
406 | m_Trunc(const Op0_t &Op0) { |
407 | return m_Unary<Instruction::Trunc, Op0_t>(Op0); |
408 | } |
409 | |
410 | template <typename Op0_t> |
411 | inline AllUnaryRecipe_match<Op0_t, Instruction::ZExt> m_ZExt(const Op0_t &Op0) { |
412 | return m_Unary<Instruction::ZExt, Op0_t>(Op0); |
413 | } |
414 | |
415 | template <typename Op0_t> |
416 | inline AllUnaryRecipe_match<Op0_t, Instruction::SExt> m_SExt(const Op0_t &Op0) { |
417 | return m_Unary<Instruction::SExt, Op0_t>(Op0); |
418 | } |
419 | |
420 | template <typename Op0_t> |
421 | inline match_combine_or<AllUnaryRecipe_match<Op0_t, Instruction::ZExt>, |
422 | AllUnaryRecipe_match<Op0_t, Instruction::SExt>> |
423 | m_ZExtOrSExt(const Op0_t &Op0) { |
424 | return m_CombineOr(m_ZExt(Op0), m_SExt(Op0)); |
425 | } |
426 | |
427 | template <unsigned Opcode, typename Op0_t, typename Op1_t, |
428 | bool Commutative = false> |
429 | inline AllBinaryRecipe_match<Op0_t, Op1_t, Opcode, Commutative> |
430 | m_Binary(const Op0_t &Op0, const Op1_t &Op1) { |
431 | return AllBinaryRecipe_match<Op0_t, Op1_t, Opcode, Commutative>(Op0, Op1); |
432 | } |
433 | |
434 | template <unsigned Opcode, typename Op0_t, typename Op1_t> |
435 | inline AllBinaryRecipe_match<Op0_t, Op1_t, Opcode, true> |
436 | m_c_Binary(const Op0_t &Op0, const Op1_t &Op1) { |
437 | return AllBinaryRecipe_match<Op0_t, Op1_t, Opcode, true>(Op0, Op1); |
438 | } |
439 | |
440 | template <typename Op0_t, typename Op1_t> |
441 | inline AllBinaryRecipe_match<Op0_t, Op1_t, Instruction::Mul> |
442 | m_Mul(const Op0_t &Op0, const Op1_t &Op1) { |
443 | return m_Binary<Instruction::Mul, Op0_t, Op1_t>(Op0, Op1); |
444 | } |
445 | |
446 | template <typename Op0_t, typename Op1_t> |
447 | inline AllBinaryRecipe_match<Op0_t, Op1_t, Instruction::Mul, |
448 | /* Commutative =*/true> |
449 | m_c_Mul(const Op0_t &Op0, const Op1_t &Op1) { |
450 | return m_Binary<Instruction::Mul, Op0_t, Op1_t, true>(Op0, Op1); |
451 | } |
452 | |
453 | /// Match a binary OR operation. Note that while conceptually the operands can |
454 | /// be matched commutatively, \p Commutative defaults to false in line with the |
455 | /// IR-based pattern matching infrastructure. Use m_c_BinaryOr for a commutative |
456 | /// version of the matcher. |
457 | template <typename Op0_t, typename Op1_t, bool Commutative = false> |
458 | inline AllBinaryRecipe_match<Op0_t, Op1_t, Instruction::Or, Commutative> |
459 | m_BinaryOr(const Op0_t &Op0, const Op1_t &Op1) { |
460 | return m_Binary<Instruction::Or, Op0_t, Op1_t, Commutative>(Op0, Op1); |
461 | } |
462 | |
463 | template <typename Op0_t, typename Op1_t> |
464 | inline AllBinaryRecipe_match<Op0_t, Op1_t, Instruction::Or, |
465 | /*Commutative*/ true> |
466 | m_c_BinaryOr(const Op0_t &Op0, const Op1_t &Op1) { |
467 | return m_BinaryOr<Op0_t, Op1_t, /*Commutative*/ true>(Op0, Op1); |
468 | } |
469 | |
470 | template <typename Op0_t, typename Op1_t> |
471 | using GEPLikeRecipe_match = |
472 | BinaryRecipe_match<Op0_t, Op1_t, Instruction::GetElementPtr, false, |
473 | VPWidenRecipe, VPReplicateRecipe, VPWidenGEPRecipe, |
474 | VPInstruction>; |
475 | |
476 | template <typename Op0_t, typename Op1_t> |
477 | inline GEPLikeRecipe_match<Op0_t, Op1_t> m_GetElementPtr(const Op0_t &Op0, |
478 | const Op1_t &Op1) { |
479 | return GEPLikeRecipe_match<Op0_t, Op1_t>(Op0, Op1); |
480 | } |
481 | |
482 | template <typename Op0_t, typename Op1_t, typename Op2_t, unsigned Opcode> |
483 | using AllTernaryRecipe_match = |
484 | Recipe_match<std::tuple<Op0_t, Op1_t, Op2_t>, Opcode, false, |
485 | VPReplicateRecipe, VPInstruction, VPWidenSelectRecipe>; |
486 | |
487 | template <typename Op0_t, typename Op1_t, typename Op2_t> |
488 | inline AllTernaryRecipe_match<Op0_t, Op1_t, Op2_t, Instruction::Select> |
489 | m_Select(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) { |
490 | return AllTernaryRecipe_match<Op0_t, Op1_t, Op2_t, Instruction::Select>( |
491 | {Op0, Op1, Op2}); |
492 | } |
493 | |
494 | template <typename Op0_t, typename Op1_t> |
495 | inline match_combine_or< |
496 | BinaryVPInstruction_match<Op0_t, Op1_t, VPInstruction::LogicalAnd>, |
497 | AllTernaryRecipe_match<Op0_t, Op1_t, specific_intval<1>, |
498 | Instruction::Select>> |
499 | m_LogicalAnd(const Op0_t &Op0, const Op1_t &Op1) { |
500 | return m_CombineOr( |
501 | m_VPInstruction<VPInstruction::LogicalAnd, Op0_t, Op1_t>(Op0, Op1), |
502 | m_Select(Op0, Op1, m_False())); |
503 | } |
504 | |
505 | template <typename Op0_t, typename Op1_t> |
506 | inline AllTernaryRecipe_match<Op0_t, specific_intval<1>, Op1_t, |
507 | Instruction::Select> |
508 | m_LogicalOr(const Op0_t &Op0, const Op1_t &Op1) { |
509 | return m_Select(Op0, m_True(), Op1); |
510 | } |
511 | |
512 | template <typename Op0_t, typename Op1_t, typename Op2_t> |
513 | using VPScalarIVSteps_match = |
514 | TernaryRecipe_match<Op0_t, Op1_t, Op2_t, 0, false, VPScalarIVStepsRecipe>; |
515 | |
516 | template <typename Op0_t, typename Op1_t, typename Op2_t> |
517 | inline VPScalarIVSteps_match<Op0_t, Op1_t, Op2_t> |
518 | m_ScalarIVSteps(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) { |
519 | return VPScalarIVSteps_match<Op0_t, Op1_t, Op2_t>({Op0, Op1, Op2}); |
520 | } |
521 | |
522 | template <typename Op0_t, typename Op1_t, typename Op2_t> |
523 | using VPDerivedIV_match = |
524 | Recipe_match<std::tuple<Op0_t, Op1_t, Op2_t>, 0, false, VPDerivedIVRecipe>; |
525 | |
526 | template <typename Op0_t, typename Op1_t, typename Op2_t> |
527 | inline VPDerivedIV_match<Op0_t, Op1_t, Op2_t> |
528 | m_DerivedIV(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) { |
529 | return VPDerivedIV_match<Op0_t, Op1_t, Op2_t>({Op0, Op1, Op2}); |
530 | } |
531 | |
532 | /// Match a call argument at a given argument index. |
533 | template <typename Opnd_t> struct Argument_match { |
534 | /// Call argument index to match. |
535 | unsigned OpI; |
536 | Opnd_t Val; |
537 | |
538 | Argument_match(unsigned OpIdx, const Opnd_t &V) : OpI(OpIdx), Val(V) {} |
539 | |
540 | template <typename OpTy> bool match(OpTy *V) const { |
541 | if (const auto *R = dyn_cast<VPWidenIntrinsicRecipe>(V)) |
542 | return Val.match(R->getOperand(OpI)); |
543 | if (const auto *R = dyn_cast<VPWidenCallRecipe>(V)) |
544 | return Val.match(R->getOperand(OpI)); |
545 | if (const auto *R = dyn_cast<VPReplicateRecipe>(V)) |
546 | if (isa<CallInst>(R->getUnderlyingInstr())) |
547 | return Val.match(R->getOperand(OpI + 1)); |
548 | return false; |
549 | } |
550 | }; |
551 | |
552 | /// Match a call argument. |
553 | template <unsigned OpI, typename Opnd_t> |
554 | inline Argument_match<Opnd_t> m_Argument(const Opnd_t &Op) { |
555 | return Argument_match<Opnd_t>(OpI, Op); |
556 | } |
557 | |
558 | /// Intrinsic matchers. |
559 | struct IntrinsicID_match { |
560 | unsigned ID; |
561 | |
562 | IntrinsicID_match(Intrinsic::ID IntrID) : ID(IntrID) {} |
563 | |
564 | template <typename OpTy> bool match(OpTy *V) const { |
565 | if (const auto *R = dyn_cast<VPWidenIntrinsicRecipe>(V)) |
566 | return R->getVectorIntrinsicID() == ID; |
567 | if (const auto *R = dyn_cast<VPWidenCallRecipe>(V)) |
568 | return R->getCalledScalarFunction()->getIntrinsicID() == ID; |
569 | if (const auto *R = dyn_cast<VPReplicateRecipe>(V)) |
570 | if (const auto *CI = dyn_cast<CallInst>(R->getUnderlyingInstr())) |
571 | if (const auto *F = CI->getCalledFunction()) |
572 | return F->getIntrinsicID() == ID; |
573 | return false; |
574 | } |
575 | }; |
576 | |
577 | /// Intrinsic matches are combinations of ID matchers, and argument |
578 | /// matchers. Higher arity matcher are defined recursively in terms of and-ing |
579 | /// them with lower arity matchers. Here's some convenient typedefs for up to |
580 | /// several arguments, and more can be added as needed |
581 | template <typename T0 = void, typename T1 = void, typename T2 = void, |
582 | typename T3 = void> |
583 | struct m_Intrinsic_Ty; |
584 | template <typename T0> struct m_Intrinsic_Ty<T0> { |
585 | using Ty = match_combine_and<IntrinsicID_match, Argument_match<T0>>; |
586 | }; |
587 | template <typename T0, typename T1> struct m_Intrinsic_Ty<T0, T1> { |
588 | using Ty = |
589 | match_combine_and<typename m_Intrinsic_Ty<T0>::Ty, Argument_match<T1>>; |
590 | }; |
591 | template <typename T0, typename T1, typename T2> |
592 | struct m_Intrinsic_Ty<T0, T1, T2> { |
593 | using Ty = match_combine_and<typename m_Intrinsic_Ty<T0, T1>::Ty, |
594 | Argument_match<T2>>; |
595 | }; |
596 | template <typename T0, typename T1, typename T2, typename T3> |
597 | struct m_Intrinsic_Ty { |
598 | using Ty = match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2>::Ty, |
599 | Argument_match<T3>>; |
600 | }; |
601 | |
602 | /// Match intrinsic calls like this: |
603 | /// m_Intrinsic<Intrinsic::fabs>(m_VPValue(X), ...) |
604 | template <Intrinsic::ID IntrID> inline IntrinsicID_match m_Intrinsic() { |
605 | return IntrinsicID_match(IntrID); |
606 | } |
607 | |
608 | template <Intrinsic::ID IntrID, typename T0> |
609 | inline typename m_Intrinsic_Ty<T0>::Ty m_Intrinsic(const T0 &Op0) { |
610 | return m_CombineAnd(m_Intrinsic<IntrID>(), m_Argument<0>(Op0)); |
611 | } |
612 | |
613 | template <Intrinsic::ID IntrID, typename T0, typename T1> |
614 | inline typename m_Intrinsic_Ty<T0, T1>::Ty m_Intrinsic(const T0 &Op0, |
615 | const T1 &Op1) { |
616 | return m_CombineAnd(m_Intrinsic<IntrID>(Op0), m_Argument<1>(Op1)); |
617 | } |
618 | |
619 | template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2> |
620 | inline typename m_Intrinsic_Ty<T0, T1, T2>::Ty |
621 | m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2) { |
622 | return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1), m_Argument<2>(Op2)); |
623 | } |
624 | |
625 | template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2, |
626 | typename T3> |
627 | inline typename m_Intrinsic_Ty<T0, T1, T2, T3>::Ty |
628 | m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3) { |
629 | return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2), m_Argument<3>(Op3)); |
630 | } |
631 | |
632 | } // namespace VPlanPatternMatch |
633 | } // namespace llvm |
634 | |
635 | #endif |
636 | |