1//===- VPlanPatternMatch.h - Match on VPValues and recipes ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides a simple and efficient mechanism for performing general
10// tree-based pattern matches on the VPlan values and recipes, based on
11// LLVM's IR pattern matchers.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_TRANSFORM_VECTORIZE_VPLANPATTERNMATCH_H
16#define LLVM_TRANSFORM_VECTORIZE_VPLANPATTERNMATCH_H
17
18#include "VPlan.h"
19
20namespace llvm::VPlanPatternMatch {
21
22template <typename Val, typename Pattern> bool match(Val *V, const Pattern &P) {
23 return P.match(V);
24}
25
26/// A match functor that can be used as a UnaryPredicate in functional
27/// algorithms like all_of.
28template <typename Val, typename Pattern> auto match_fn(const Pattern &P) {
29 return bind_back<match<Val, Pattern>>(P);
30}
31
32template <typename Pattern> bool match(VPUser *U, const Pattern &P) {
33 auto *R = dyn_cast<VPRecipeBase>(Val: U);
34 return R && match(R, P);
35}
36
37/// Match functor for VPUser.
38template <typename Pattern> auto match_fn(const Pattern &P) {
39 return bind_back<match<Pattern>>(P);
40}
41
42template <typename Pattern> bool match(VPSingleDefRecipe *R, const Pattern &P) {
43 return P.match(static_cast<const VPRecipeBase *>(R));
44}
45
46/// A match-wrapper around isa.
47template <typename... To> struct match_isapred {
48 template <typename ArgTy> bool match(const ArgTy *V) const {
49 return isa<To...>(V);
50 }
51};
52
53template <typename... To> inline match_isapred<To...> m_Isa() {
54 return match_isapred<To...>();
55}
56
57/// Match an arbitrary VPValue and ignore it.
58inline auto m_VPValue() { return m_Isa<VPValue>(); }
59
60template <typename Class> struct bind_ty {
61 Class *&VR;
62
63 bind_ty(Class *&V) : VR(V) {}
64
65 template <typename ITy> bool match(ITy *V) const {
66 if (auto *CV = dyn_cast<Class>(V)) {
67 VR = CV;
68 return true;
69 }
70 return false;
71 }
72};
73
74/// Match a specified VPValue.
75struct specificval_ty {
76 const VPValue *Val;
77
78 specificval_ty(const VPValue *V) : Val(V) {}
79
80 bool match(const VPValue *VPV) const { return VPV == Val; }
81};
82
83inline specificval_ty m_Specific(const VPValue *VPV) { return VPV; }
84
85/// Stores a reference to the VPValue *, not the VPValue * itself,
86/// thus can be used in commutative matchers.
87struct deferredval_ty {
88 VPValue *const &Val;
89
90 deferredval_ty(VPValue *const &V) : Val(V) {}
91
92 bool match(const VPValue *const V) const { return V == Val; }
93};
94
95/// Like m_Specific(), but works if the specific value to match is determined
96/// as part of the same match() expression. For example:
97/// m_Mul(m_VPValue(X), m_Specific(X)) is incorrect, because m_Specific() will
98/// bind X before the pattern match starts.
99/// m_Mul(m_VPValue(X), m_Deferred(X)) is correct, and will check against
100/// whichever value m_VPValue(X) populated.
101inline deferredval_ty m_Deferred(VPValue *const &V) { return V; }
102
103/// Match an integer constant if Pred::isValue returns true for the APInt. \p
104/// BitWidth optionally specifies the bitwidth the matched constant must have.
105/// If it is 0, the matched constant can have any bitwidth.
106template <typename Pred, unsigned BitWidth = 0> struct int_pred_ty {
107 Pred P;
108
109 int_pred_ty(Pred P) : P(std::move(P)) {}
110 int_pred_ty() : P() {}
111
112 bool match(const VPValue *VPV) const {
113 auto *VPI = dyn_cast<VPInstruction>(Val: VPV);
114 if (VPI && VPI->getOpcode() == VPInstruction::Broadcast)
115 VPV = VPI->getOperand(N: 0);
116 auto *CI = dyn_cast<VPConstantInt>(Val: VPV);
117 if (!CI)
118 return false;
119
120 if (BitWidth != 0 && CI->getBitWidth() != BitWidth)
121 return false;
122 return P.isValue(CI->getAPInt());
123 }
124};
125
126/// Match a specified signed or unsigned integer value.
127struct is_specific_int {
128 APInt Val;
129 bool IsSigned;
130
131 is_specific_int(APInt Val, bool IsSigned = false)
132 : Val(std::move(Val)), IsSigned(IsSigned) {}
133
134 bool isValue(const APInt &C) const {
135 return APInt::isSameValue(I1: Val, I2: C, SignedCompare: IsSigned);
136 }
137};
138
139template <unsigned Bitwidth = 0>
140using specific_intval = int_pred_ty<is_specific_int, Bitwidth>;
141
142inline specific_intval<0> m_SpecificInt(uint64_t V) {
143 return specific_intval<0>(is_specific_int(APInt(64, V)));
144}
145
146inline specific_intval<0> m_SpecificSInt(int64_t V) {
147 return specific_intval<0>(
148 is_specific_int(APInt(64, V, /*isSigned=*/true), /*IsSigned=*/true));
149}
150
151inline specific_intval<1> m_False() {
152 return specific_intval<1>(is_specific_int(APInt(64, 0)));
153}
154
155inline specific_intval<1> m_True() {
156 return specific_intval<1>(is_specific_int(APInt(64, 1)));
157}
158
159struct is_all_ones {
160 bool isValue(const APInt &C) const { return C.isAllOnes(); }
161};
162
163/// Match an integer or vector with all bits set.
164/// For vectors, this includes constants with undefined elements.
165inline int_pred_ty<is_all_ones> m_AllOnes() {
166 return int_pred_ty<is_all_ones>();
167}
168
169struct is_zero_int {
170 bool isValue(const APInt &C) const { return C.isZero(); }
171};
172
173struct is_one {
174 bool isValue(const APInt &C) const { return C.isOne(); }
175};
176
177/// Match an integer 0 or a vector with all elements equal to 0.
178/// For vectors, this includes constants with undefined elements.
179inline int_pred_ty<is_zero_int> m_ZeroInt() {
180 return int_pred_ty<is_zero_int>();
181}
182
183/// Match an integer 1 or a vector with all elements equal to 1.
184/// For vectors, this includes constants with undefined elements.
185inline int_pred_ty<is_one> m_One() { return int_pred_ty<is_one>(); }
186
187struct bind_apint {
188 const APInt *&Res;
189
190 bind_apint(const APInt *&Res) : Res(Res) {}
191
192 bool match(const VPValue *VPV) const {
193 auto *CI = dyn_cast<VPConstantInt>(Val: VPV);
194 if (!CI)
195 return false;
196 Res = &CI->getAPInt();
197 return true;
198 }
199};
200
201inline bind_apint m_APInt(const APInt *&C) { return C; }
202
203struct bind_const_int {
204 uint64_t &Res;
205
206 bind_const_int(uint64_t &Res) : Res(Res) {}
207
208 bool match(const VPValue *VPV) const {
209 const APInt *APConst;
210 if (!bind_apint(APConst).match(VPV))
211 return false;
212 if (auto C = APConst->tryZExtValue()) {
213 Res = *C;
214 return true;
215 }
216 return false;
217 }
218};
219
220struct match_poison {
221 bool match(const VPValue *V) const {
222 return isa<VPIRValue>(Val: V) &&
223 isa<PoisonValue>(Val: cast<VPIRValue>(Val: V)->getValue());
224 }
225};
226
227/// Match a VPIRValue that's poison.
228inline match_poison m_Poison() { return match_poison(); }
229
230/// Match a plain integer constant no wider than 64-bits, capturing it if we
231/// match.
232inline bind_const_int m_ConstantInt(uint64_t &C) { return C; }
233
234/// Matching combinators
235template <typename LTy, typename RTy> struct match_combine_or {
236 LTy L;
237 RTy R;
238
239 match_combine_or(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
240
241 template <typename ITy> bool match(ITy *V) const {
242 return L.match(V) || R.match(V);
243 }
244};
245
246template <typename LTy, typename RTy> struct match_combine_and {
247 LTy L;
248 RTy R;
249
250 match_combine_and(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
251
252 template <typename ITy> bool match(ITy *V) const {
253 return L.match(V) && R.match(V);
254 }
255};
256
257/// Combine two pattern matchers matching L || R
258template <typename LTy, typename RTy>
259inline match_combine_or<LTy, RTy> m_CombineOr(const LTy &L, const RTy &R) {
260 return match_combine_or<LTy, RTy>(L, R);
261}
262
263/// Combine two pattern matchers matching L && R
264template <typename LTy, typename RTy>
265inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) {
266 return match_combine_and<LTy, RTy>(L, R);
267}
268
269/// Match a VPValue, capturing it if we match.
270inline bind_ty<VPValue> m_VPValue(VPValue *&V) { return V; }
271
272/// Match a VPIRValue.
273inline bind_ty<VPIRValue> m_VPIRValue(VPIRValue *&V) { return V; }
274
275/// Match a VPInstruction, capturing if we match.
276inline bind_ty<VPInstruction> m_VPInstruction(VPInstruction *&V) { return V; }
277
278template <typename Ops_t, unsigned Opcode, bool Commutative,
279 typename... RecipeTys>
280struct Recipe_match {
281 Ops_t Ops;
282
283 template <typename... OpTy> Recipe_match(OpTy... Ops) : Ops(Ops...) {
284 static_assert(std::tuple_size<Ops_t>::value == sizeof...(Ops) &&
285 "number of operands in constructor doesn't match Ops_t");
286 static_assert((!Commutative || std::tuple_size<Ops_t>::value == 2) &&
287 "only binary ops can be commutative");
288 }
289
290 bool match(const VPValue *V) const {
291 auto *DefR = V->getDefiningRecipe();
292 return DefR && match(DefR);
293 }
294
295 bool match(const VPSingleDefRecipe *R) const {
296 return match(static_cast<const VPRecipeBase *>(R));
297 }
298
299 bool match(const VPRecipeBase *R) const {
300 if (std::tuple_size_v<Ops_t> == 0) {
301 auto *VPI = dyn_cast<VPInstruction>(Val: R);
302 return VPI && VPI->getOpcode() == Opcode;
303 }
304
305 if ((!matchRecipeAndOpcode<RecipeTys>(R) && ...))
306 return false;
307
308 if (R->getNumOperands() < std::tuple_size<Ops_t>::value) {
309 [[maybe_unused]] auto *RepR = dyn_cast<VPReplicateRecipe>(Val: R);
310 assert(((isa<VPInstruction>(R) &&
311 cast<VPInstruction>(R)->getNumOperandsForOpcode() == -1u) ||
312 (RepR && std::tuple_size_v<Ops_t> ==
313 RepR->getNumOperands() - RepR->isPredicated())) &&
314 "non-variadic recipe with matched opcode does not have the "
315 "expected number of operands");
316 return false;
317 }
318
319 // If the recipe has more operands than expected, we only support matching
320 // masked VPInstructions where the number of operands of the matcher is the
321 // same as the number of operands excluding mask.
322 if (R->getNumOperands() > std::tuple_size<Ops_t>::value) {
323 auto *VPI = dyn_cast<VPInstruction>(Val: R);
324 if (!VPI || !VPI->isMasked() ||
325 VPI->getNumOperandsWithoutMask() != std::tuple_size<Ops_t>::value)
326 return false;
327 }
328
329 auto IdxSeq = std::make_index_sequence<std::tuple_size<Ops_t>::value>();
330 if (all_of_tuple_elements(IdxSeq, [R](auto Op, unsigned Idx) {
331 return Op.match(R->getOperand(N: Idx));
332 }))
333 return true;
334
335 return Commutative &&
336 all_of_tuple_elements(IdxSeq, [R](auto Op, unsigned Idx) {
337 return Op.match(R->getOperand(N: R->getNumOperands() - Idx - 1));
338 });
339 }
340
341private:
342 template <typename RecipeTy>
343 static bool matchRecipeAndOpcode(const VPRecipeBase *R) {
344 auto *DefR = dyn_cast<RecipeTy>(R);
345 // Check for recipes that do not have opcodes.
346 if constexpr (std::is_same_v<RecipeTy, VPScalarIVStepsRecipe> ||
347 std::is_same_v<RecipeTy, VPCanonicalIVPHIRecipe> ||
348 std::is_same_v<RecipeTy, VPDerivedIVRecipe> ||
349 std::is_same_v<RecipeTy, VPVectorEndPointerRecipe>)
350 return DefR;
351 else
352 return DefR && DefR->getOpcode() == Opcode;
353 }
354
355 /// Helper to check if predicate \p P holds on all tuple elements in Ops using
356 /// the provided index sequence.
357 template <typename Fn, std::size_t... Is>
358 bool all_of_tuple_elements(std::index_sequence<Is...>, Fn P) const {
359 return (P(std::get<Is>(Ops), Is) && ...);
360 }
361};
362
363template <unsigned Opcode, typename... OpTys>
364using AllRecipe_match =
365 Recipe_match<std::tuple<OpTys...>, Opcode, /*Commutative*/ false,
366 VPWidenRecipe, VPReplicateRecipe, VPWidenCastRecipe,
367 VPInstruction>;
368
369template <unsigned Opcode, typename... OpTys>
370using AllRecipe_commutative_match =
371 Recipe_match<std::tuple<OpTys...>, Opcode, /*Commutative*/ true,
372 VPWidenRecipe, VPReplicateRecipe, VPInstruction>;
373
374template <unsigned Opcode, typename... OpTys>
375using VPInstruction_match = Recipe_match<std::tuple<OpTys...>, Opcode,
376 /*Commutative*/ false, VPInstruction>;
377
378template <unsigned Opcode, typename... OpTys>
379using VPInstruction_commutative_match =
380 Recipe_match<std::tuple<OpTys...>, Opcode,
381 /*Commutative*/ true, VPInstruction>;
382
383template <unsigned Opcode, typename... OpTys>
384inline VPInstruction_match<Opcode, OpTys...>
385m_VPInstruction(const OpTys &...Ops) {
386 return VPInstruction_match<Opcode, OpTys...>(Ops...);
387}
388
389template <unsigned Opcode, typename Op0_t, typename Op1_t>
390inline VPInstruction_commutative_match<Opcode, Op0_t, Op1_t>
391m_c_VPInstruction(const Op0_t &Op0, const Op1_t &Op1) {
392 return VPInstruction_commutative_match<Opcode, Op0_t, Op1_t>(Op0, Op1);
393}
394
395/// BuildVector is matches only its opcode, w/o matching its operands as the
396/// number of operands is not fixed.
397inline VPInstruction_match<VPInstruction::BuildVector> m_BuildVector() {
398 return m_VPInstruction<VPInstruction::BuildVector>();
399}
400
401template <typename Op0_t>
402inline VPInstruction_match<Instruction::Freeze, Op0_t>
403m_Freeze(const Op0_t &Op0) {
404 return m_VPInstruction<Instruction::Freeze>(Op0);
405}
406
407inline VPInstruction_match<VPInstruction::BranchOnCond> m_BranchOnCond() {
408 return m_VPInstruction<VPInstruction::BranchOnCond>();
409}
410
411template <typename Op0_t>
412inline VPInstruction_match<VPInstruction::BranchOnCond, Op0_t>
413m_BranchOnCond(const Op0_t &Op0) {
414 return m_VPInstruction<VPInstruction::BranchOnCond>(Op0);
415}
416
417inline VPInstruction_match<VPInstruction::BranchOnTwoConds>
418m_BranchOnTwoConds() {
419 return m_VPInstruction<VPInstruction::BranchOnTwoConds>();
420}
421
422template <typename Op0_t, typename Op1_t>
423inline VPInstruction_match<VPInstruction::BranchOnTwoConds, Op0_t, Op1_t>
424m_BranchOnTwoConds(const Op0_t &Op0, const Op1_t &Op1) {
425 return m_VPInstruction<VPInstruction::BranchOnTwoConds>(Op0, Op1);
426}
427
428template <typename Op0_t>
429inline VPInstruction_match<VPInstruction::Broadcast, Op0_t>
430m_Broadcast(const Op0_t &Op0) {
431 return m_VPInstruction<VPInstruction::Broadcast>(Op0);
432}
433
434template <typename Op0_t>
435inline VPInstruction_match<VPInstruction::ExplicitVectorLength, Op0_t>
436m_EVL(const Op0_t &Op0) {
437 return m_VPInstruction<VPInstruction::ExplicitVectorLength>(Op0);
438}
439
440template <typename Op0_t>
441inline VPInstruction_match<VPInstruction::ExtractLastLane, Op0_t>
442m_ExtractLastLane(const Op0_t &Op0) {
443 return m_VPInstruction<VPInstruction::ExtractLastLane>(Op0);
444}
445
446template <typename Op0_t, typename Op1_t>
447inline VPInstruction_match<Instruction::ExtractElement, Op0_t, Op1_t>
448m_ExtractElement(const Op0_t &Op0, const Op1_t &Op1) {
449 return m_VPInstruction<Instruction::ExtractElement>(Op0, Op1);
450}
451
452template <typename Op0_t, typename Op1_t>
453inline VPInstruction_match<VPInstruction::ExtractLane, Op0_t, Op1_t>
454m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1) {
455 return m_VPInstruction<VPInstruction::ExtractLane>(Op0, Op1);
456}
457
458template <typename Op0_t>
459inline VPInstruction_match<VPInstruction::ExtractLastPart, Op0_t>
460m_ExtractLastPart(const Op0_t &Op0) {
461 return m_VPInstruction<VPInstruction::ExtractLastPart>(Op0);
462}
463
464template <typename Op0_t>
465inline VPInstruction_match<
466 VPInstruction::ExtractLastLane,
467 VPInstruction_match<VPInstruction::ExtractLastPart, Op0_t>>
468m_ExtractLastLaneOfLastPart(const Op0_t &Op0) {
469 return m_ExtractLastLane(m_ExtractLastPart(Op0));
470}
471
472template <typename Op0_t>
473inline VPInstruction_match<VPInstruction::ExtractPenultimateElement, Op0_t>
474m_ExtractPenultimateElement(const Op0_t &Op0) {
475 return m_VPInstruction<VPInstruction::ExtractPenultimateElement>(Op0);
476}
477
478template <typename Op0_t, typename Op1_t, typename Op2_t>
479inline VPInstruction_match<VPInstruction::ActiveLaneMask, Op0_t, Op1_t, Op2_t>
480m_ActiveLaneMask(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
481 return m_VPInstruction<VPInstruction::ActiveLaneMask>(Op0, Op1, Op2);
482}
483
484inline VPInstruction_match<VPInstruction::BranchOnCount> m_BranchOnCount() {
485 return m_VPInstruction<VPInstruction::BranchOnCount>();
486}
487
488template <typename Op0_t, typename Op1_t>
489inline VPInstruction_match<VPInstruction::BranchOnCount, Op0_t, Op1_t>
490m_BranchOnCount(const Op0_t &Op0, const Op1_t &Op1) {
491 return m_VPInstruction<VPInstruction::BranchOnCount>(Op0, Op1);
492}
493
494inline VPInstruction_match<VPInstruction::AnyOf> m_AnyOf() {
495 return m_VPInstruction<VPInstruction::AnyOf>();
496}
497
498template <typename Op0_t>
499inline VPInstruction_match<VPInstruction::AnyOf, Op0_t>
500m_AnyOf(const Op0_t &Op0) {
501 return m_VPInstruction<VPInstruction::AnyOf>(Op0);
502}
503
504template <typename Op0_t>
505inline VPInstruction_match<VPInstruction::FirstActiveLane, Op0_t>
506m_FirstActiveLane(const Op0_t &Op0) {
507 return m_VPInstruction<VPInstruction::FirstActiveLane>(Op0);
508}
509
510template <typename Op0_t>
511inline VPInstruction_match<VPInstruction::LastActiveLane, Op0_t>
512m_LastActiveLane(const Op0_t &Op0) {
513 return m_VPInstruction<VPInstruction::LastActiveLane>(Op0);
514}
515
516template <typename Op0_t, typename Op1_t, typename Op2_t>
517inline VPInstruction_match<VPInstruction::ExtractLastActive, Op0_t, Op1_t,
518 Op2_t>
519m_ExtractLastActive(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
520 return m_VPInstruction<VPInstruction::ExtractLastActive>(Op0, Op1, Op2);
521}
522
523template <typename Op0_t>
524inline VPInstruction_match<VPInstruction::ComputeReductionResult, Op0_t>
525m_ComputeReductionResult(const Op0_t &Op0) {
526 return m_VPInstruction<VPInstruction::ComputeReductionResult>(Op0);
527}
528
529/// Match FindIV result pattern:
530/// select(icmp ne ComputeReductionResult(ReducedIV), Sentinel),
531/// ComputeReductionResult(ReducedIV), Start.
532template <typename Op0_t, typename Op1_t>
533inline bool matchFindIVResult(VPInstruction *VPI, Op0_t ReducedIV, Op1_t Start) {
534 return match(VPI, m_Select(m_SpecificICmp(ICmpInst::ICMP_NE,
535 m_ComputeReductionResult(ReducedIV),
536 m_VPValue()),
537 m_ComputeReductionResult(ReducedIV), Start));
538}
539
540template <typename Op0_t, typename Op1_t, typename Op2_t>
541inline VPInstruction_match<VPInstruction::ComputeAnyOfResult, Op0_t, Op1_t,
542 Op2_t>
543m_ComputeAnyOfResult(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
544 return m_VPInstruction<VPInstruction::ComputeAnyOfResult>(Op0, Op1, Op2);
545}
546
547template <typename Op0_t>
548inline VPInstruction_match<VPInstruction::Reverse, Op0_t>
549m_Reverse(const Op0_t &Op0) {
550 return m_VPInstruction<VPInstruction::Reverse>(Op0);
551}
552
553inline VPInstruction_match<VPInstruction::StepVector> m_StepVector() {
554 return m_VPInstruction<VPInstruction::StepVector>();
555}
556
557template <typename Op0_t>
558inline VPInstruction_match<VPInstruction::ExitingIVValue, Op0_t>
559m_ExitingIVValue(const Op0_t &Op0) {
560 return m_VPInstruction<VPInstruction::ExitingIVValue>(Op0);
561}
562
563template <unsigned Opcode, typename Op0_t>
564inline AllRecipe_match<Opcode, Op0_t> m_Unary(const Op0_t &Op0) {
565 return AllRecipe_match<Opcode, Op0_t>(Op0);
566}
567
568template <typename Op0_t>
569inline AllRecipe_match<Instruction::Trunc, Op0_t> m_Trunc(const Op0_t &Op0) {
570 return m_Unary<Instruction::Trunc, Op0_t>(Op0);
571}
572
573template <typename Op0_t>
574inline match_combine_or<AllRecipe_match<Instruction::Trunc, Op0_t>, Op0_t>
575m_TruncOrSelf(const Op0_t &Op0) {
576 return m_CombineOr(m_Trunc(Op0), Op0);
577}
578
579template <typename Op0_t>
580inline AllRecipe_match<Instruction::ZExt, Op0_t> m_ZExt(const Op0_t &Op0) {
581 return m_Unary<Instruction::ZExt, Op0_t>(Op0);
582}
583
584template <typename Op0_t>
585inline AllRecipe_match<Instruction::SExt, Op0_t> m_SExt(const Op0_t &Op0) {
586 return m_Unary<Instruction::SExt, Op0_t>(Op0);
587}
588
589template <typename Op0_t>
590inline AllRecipe_match<Instruction::FPExt, Op0_t> m_FPExt(const Op0_t &Op0) {
591 return m_Unary<Instruction::FPExt, Op0_t>(Op0);
592}
593
594template <typename Op0_t>
595inline match_combine_or<AllRecipe_match<Instruction::ZExt, Op0_t>,
596 AllRecipe_match<Instruction::SExt, Op0_t>>
597m_ZExtOrSExt(const Op0_t &Op0) {
598 return m_CombineOr(m_ZExt(Op0), m_SExt(Op0));
599}
600
601/// A variant of m_Isa that also matches SubPattern.
602template <typename... To, typename SubPattern>
603inline auto m_Isa(const SubPattern &P) {
604 return m_CombineAnd(m_Isa<To...>(), P);
605}
606
607template <typename Op0_t> inline auto m_WidenAnyExtend(const Op0_t &Op0) {
608 return m_Isa<VPWidenCastRecipe>(m_CombineOr(m_ZExtOrSExt(Op0), m_FPExt(Op0)));
609}
610
611template <typename Op0_t>
612inline match_combine_or<AllRecipe_match<Instruction::ZExt, Op0_t>, Op0_t>
613m_ZExtOrSelf(const Op0_t &Op0) {
614 return m_CombineOr(m_ZExt(Op0), Op0);
615}
616
617template <typename Op0_t>
618inline match_combine_or<
619 match_combine_or<AllRecipe_match<Instruction::ZExt, Op0_t>,
620 AllRecipe_match<Instruction::Trunc, Op0_t>>,
621 Op0_t>
622m_ZExtOrTruncOrSelf(const Op0_t &Op0) {
623 return m_CombineOr(m_CombineOr(m_ZExt(Op0), m_Trunc(Op0)), Op0);
624}
625
626template <unsigned Opcode, typename Op0_t, typename Op1_t>
627inline AllRecipe_match<Opcode, Op0_t, Op1_t> m_Binary(const Op0_t &Op0,
628 const Op1_t &Op1) {
629 return AllRecipe_match<Opcode, Op0_t, Op1_t>(Op0, Op1);
630}
631
632template <unsigned Opcode, typename Op0_t, typename Op1_t>
633inline AllRecipe_commutative_match<Opcode, Op0_t, Op1_t>
634m_c_Binary(const Op0_t &Op0, const Op1_t &Op1) {
635 return AllRecipe_commutative_match<Opcode, Op0_t, Op1_t>(Op0, Op1);
636}
637
638template <typename Op0_t, typename Op1_t>
639inline AllRecipe_match<Instruction::Add, Op0_t, Op1_t> m_Add(const Op0_t &Op0,
640 const Op1_t &Op1) {
641 return m_Binary<Instruction::Add, Op0_t, Op1_t>(Op0, Op1);
642}
643
644template <typename Op0_t, typename Op1_t>
645inline AllRecipe_commutative_match<Instruction::Add, Op0_t, Op1_t>
646m_c_Add(const Op0_t &Op0, const Op1_t &Op1) {
647 return m_c_Binary<Instruction::Add, Op0_t, Op1_t>(Op0, Op1);
648}
649
650template <typename Op0_t, typename Op1_t>
651inline AllRecipe_match<Instruction::Sub, Op0_t, Op1_t> m_Sub(const Op0_t &Op0,
652 const Op1_t &Op1) {
653 return m_Binary<Instruction::Sub, Op0_t, Op1_t>(Op0, Op1);
654}
655
656template <typename Op0_t, typename Op1_t>
657inline AllRecipe_match<Instruction::Mul, Op0_t, Op1_t> m_Mul(const Op0_t &Op0,
658 const Op1_t &Op1) {
659 return m_Binary<Instruction::Mul, Op0_t, Op1_t>(Op0, Op1);
660}
661
662template <typename Op0_t, typename Op1_t>
663inline AllRecipe_commutative_match<Instruction::Mul, Op0_t, Op1_t>
664m_c_Mul(const Op0_t &Op0, const Op1_t &Op1) {
665 return m_c_Binary<Instruction::Mul, Op0_t, Op1_t>(Op0, Op1);
666}
667
668template <typename Op0_t, typename Op1_t>
669inline AllRecipe_match<Instruction::FMul, Op0_t, Op1_t>
670m_FMul(const Op0_t &Op0, const Op1_t &Op1) {
671 return m_Binary<Instruction::FMul, Op0_t, Op1_t>(Op0, Op1);
672}
673
674template <typename Op0_t, typename Op1_t>
675inline AllRecipe_match<Instruction::FAdd, Op0_t, Op1_t>
676m_FAdd(const Op0_t &Op0, const Op1_t &Op1) {
677 return m_Binary<Instruction::FAdd, Op0_t, Op1_t>(Op0, Op1);
678}
679
680template <typename Op0_t, typename Op1_t>
681inline AllRecipe_commutative_match<Instruction::FAdd, Op0_t, Op1_t>
682m_c_FAdd(const Op0_t &Op0, const Op1_t &Op1) {
683 return m_c_Binary<Instruction::FAdd, Op0_t, Op1_t>(Op0, Op1);
684}
685
686template <typename Op0_t, typename Op1_t>
687inline AllRecipe_match<Instruction::UDiv, Op0_t, Op1_t>
688m_UDiv(const Op0_t &Op0, const Op1_t &Op1) {
689 return m_Binary<Instruction::UDiv, Op0_t, Op1_t>(Op0, Op1);
690}
691
692/// Match a binary AND operation.
693template <typename Op0_t, typename Op1_t>
694inline AllRecipe_commutative_match<Instruction::And, Op0_t, Op1_t>
695m_c_BinaryAnd(const Op0_t &Op0, const Op1_t &Op1) {
696 return m_c_Binary<Instruction::And, Op0_t, Op1_t>(Op0, Op1);
697}
698
699/// Match a binary OR operation. Note that while conceptually the operands can
700/// be matched commutatively, \p Commutative defaults to false in line with the
701/// IR-based pattern matching infrastructure. Use m_c_BinaryOr for a commutative
702/// version of the matcher.
703template <typename Op0_t, typename Op1_t>
704inline AllRecipe_match<Instruction::Or, Op0_t, Op1_t>
705m_BinaryOr(const Op0_t &Op0, const Op1_t &Op1) {
706 return m_Binary<Instruction::Or, Op0_t, Op1_t>(Op0, Op1);
707}
708
709template <typename Op0_t, typename Op1_t>
710inline AllRecipe_commutative_match<Instruction::Or, Op0_t, Op1_t>
711m_c_BinaryOr(const Op0_t &Op0, const Op1_t &Op1) {
712 return m_c_Binary<Instruction::Or, Op0_t, Op1_t>(Op0, Op1);
713}
714
715/// Cmp_match is a variant of BinaryRecipe_match that also binds the comparison
716/// predicate. Opcodes must either be Instruction::ICmp or Instruction::FCmp, or
717/// both.
718template <typename Op0_t, typename Op1_t, unsigned... Opcodes>
719struct Cmp_match {
720 static_assert((sizeof...(Opcodes) == 1 || sizeof...(Opcodes) == 2) &&
721 "Expected one or two opcodes");
722 static_assert(
723 ((Opcodes == Instruction::ICmp || Opcodes == Instruction::FCmp) && ...) &&
724 "Expected a compare instruction opcode");
725
726 CmpPredicate *Predicate = nullptr;
727 Op0_t Op0;
728 Op1_t Op1;
729
730 Cmp_match(CmpPredicate &Pred, const Op0_t &Op0, const Op1_t &Op1)
731 : Predicate(&Pred), Op0(Op0), Op1(Op1) {}
732 Cmp_match(const Op0_t &Op0, const Op1_t &Op1) : Op0(Op0), Op1(Op1) {}
733
734 bool match(const VPValue *V) const {
735 auto *DefR = V->getDefiningRecipe();
736 return DefR && match(DefR);
737 }
738
739 bool match(const VPRecipeBase *V) const {
740 if ((m_Binary<Opcodes>(Op0, Op1).match(V) || ...)) {
741 if (Predicate)
742 *Predicate = cast<VPRecipeWithIRFlags>(Val: V)->getPredicate();
743 return true;
744 }
745 return false;
746 }
747};
748
749/// SpecificCmp_match is a variant of Cmp_match that matches the comparison
750/// predicate, instead of binding it.
751template <typename Op0_t, typename Op1_t, unsigned... Opcodes>
752struct SpecificCmp_match {
753 const CmpPredicate Predicate;
754 Op0_t Op0;
755 Op1_t Op1;
756
757 SpecificCmp_match(CmpPredicate Pred, const Op0_t &LHS, const Op1_t &RHS)
758 : Predicate(Pred), Op0(LHS), Op1(RHS) {}
759
760 bool match(const VPValue *V) const {
761 auto *DefR = V->getDefiningRecipe();
762 return DefR && match(DefR);
763 }
764
765 bool match(const VPRecipeBase *V) const {
766 CmpPredicate CurrentPred;
767 return Cmp_match<Op0_t, Op1_t, Opcodes...>(CurrentPred, Op0, Op1)
768 .match(V) &&
769 CmpPredicate::getMatching(A: CurrentPred, B: Predicate);
770 }
771};
772
773template <typename Op0_t, typename Op1_t>
774inline Cmp_match<Op0_t, Op1_t, Instruction::ICmp> m_ICmp(const Op0_t &Op0,
775 const Op1_t &Op1) {
776 return Cmp_match<Op0_t, Op1_t, Instruction::ICmp>(Op0, Op1);
777}
778
779template <typename Op0_t, typename Op1_t>
780inline Cmp_match<Op0_t, Op1_t, Instruction::ICmp>
781m_ICmp(CmpPredicate &Pred, const Op0_t &Op0, const Op1_t &Op1) {
782 return Cmp_match<Op0_t, Op1_t, Instruction::ICmp>(Pred, Op0, Op1);
783}
784
785template <typename Op0_t, typename Op1_t>
786inline SpecificCmp_match<Op0_t, Op1_t, Instruction::ICmp>
787m_SpecificICmp(CmpPredicate MatchPred, const Op0_t &Op0, const Op1_t &Op1) {
788 return SpecificCmp_match<Op0_t, Op1_t, Instruction::ICmp>(MatchPred, Op0,
789 Op1);
790}
791
792template <typename Op0_t, typename Op1_t>
793inline Cmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>
794m_Cmp(const Op0_t &Op0, const Op1_t &Op1) {
795 return Cmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>(Op0,
796 Op1);
797}
798
799template <typename Op0_t, typename Op1_t>
800inline Cmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>
801m_Cmp(CmpPredicate &Pred, const Op0_t &Op0, const Op1_t &Op1) {
802 return Cmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>(
803 Pred, Op0, Op1);
804}
805
806template <typename Op0_t, typename Op1_t>
807inline SpecificCmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>
808m_SpecificCmp(CmpPredicate MatchPred, const Op0_t &Op0, const Op1_t &Op1) {
809 return SpecificCmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>(
810 MatchPred, Op0, Op1);
811}
812
813template <typename Op0_t, typename Op1_t>
814inline auto m_GetElementPtr(const Op0_t &Op0, const Op1_t &Op1) {
815 return m_CombineOr(
816 Recipe_match<std::tuple<Op0_t, Op1_t>, Instruction::GetElementPtr,
817 /*Commutative*/ false, VPReplicateRecipe, VPWidenGEPRecipe>(
818 Op0, Op1),
819 m_CombineOr(
820 VPInstruction_match<VPInstruction::PtrAdd, Op0_t, Op1_t>(Op0, Op1),
821 VPInstruction_match<VPInstruction::WidePtrAdd, Op0_t, Op1_t>(Op0,
822 Op1)));
823}
824
825template <typename Op0_t, typename Op1_t, typename Op2_t>
826inline AllRecipe_match<Instruction::Select, Op0_t, Op1_t, Op2_t>
827m_Select(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
828 return AllRecipe_match<Instruction::Select, Op0_t, Op1_t, Op2_t>(
829 {Op0, Op1, Op2});
830}
831
832template <typename Op0_t> inline auto m_Not(const Op0_t &Op0) {
833 return m_CombineOr(m_VPInstruction<VPInstruction::Not>(Op0),
834 m_c_Binary<Instruction::Xor>(m_AllOnes(), Op0));
835}
836
837template <typename Op0_t, typename Op1_t, typename Op2_t>
838inline auto m_c_Select(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
839 return m_CombineOr(m_Select(Op0, Op1, Op2), m_Select(m_Not(Op0), Op2, Op1));
840}
841
842template <typename Op0_t, typename Op1_t>
843inline auto m_LogicalAnd(const Op0_t &Op0, const Op1_t &Op1) {
844 return m_CombineOr(
845 m_VPInstruction<VPInstruction::LogicalAnd, Op0_t, Op1_t>(Op0, Op1),
846 m_Select(Op0, Op1, m_False()));
847}
848
849template <typename Op0_t, typename Op1_t>
850inline auto m_c_LogicalAnd(const Op0_t &Op0, const Op1_t &Op1) {
851 return m_CombineOr(
852 m_c_VPInstruction<VPInstruction::LogicalAnd, Op0_t, Op1_t>(Op0, Op1),
853 m_c_Select(Op0, Op1, m_False()));
854}
855
856template <typename Op0_t, typename Op1_t>
857inline auto m_LogicalOr(const Op0_t &Op0, const Op1_t &Op1) {
858 return m_CombineOr(
859 m_c_VPInstruction<VPInstruction::LogicalOr, Op0_t, Op1_t>(Op0, Op1),
860 m_Select(Op0, m_True(), Op1));
861}
862
863template <typename Op0_t, typename Op1_t>
864inline auto m_c_LogicalOr(const Op0_t &Op0, const Op1_t &Op1) {
865 return m_c_Select(Op0, m_True(), Op1);
866}
867
868inline auto m_CanonicalIV() { return m_Isa<VPCanonicalIVPHIRecipe>(); }
869
870template <typename Op0_t, typename Op1_t, typename Op2_t>
871inline auto m_ScalarIVSteps(const Op0_t &Op0, const Op1_t &Op1,
872 const Op2_t &Op2) {
873 return Recipe_match<std::tuple<Op0_t, Op1_t, Op2_t>, 0, false,
874 VPScalarIVStepsRecipe>({Op0, Op1, Op2});
875}
876
877template <typename Op0_t, typename Op1_t, typename Op2_t>
878inline auto m_DerivedIV(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
879 return Recipe_match<std::tuple<Op0_t, Op1_t, Op2_t>, 0, false,
880 VPDerivedIVRecipe>({Op0, Op1, Op2});
881}
882
883template <typename Addr_t, typename Mask_t> struct Load_match {
884 Addr_t Addr;
885 Mask_t Mask;
886
887 Load_match(Addr_t Addr, Mask_t Mask) : Addr(Addr), Mask(Mask) {}
888
889 template <typename OpTy> bool match(const OpTy *V) const {
890 auto *Load = dyn_cast<VPWidenLoadRecipe>(V);
891 if (!Load || !Addr.match(Load->getAddr()) || !Load->isMasked() ||
892 !Mask.match(Load->getMask()))
893 return false;
894 return true;
895 }
896};
897
898/// Match a (possibly reversed) masked load.
899template <typename Addr_t, typename Mask_t>
900inline Load_match<Addr_t, Mask_t> m_MaskedLoad(const Addr_t &Addr,
901 const Mask_t &Mask) {
902 return Load_match<Addr_t, Mask_t>(Addr, Mask);
903}
904
905template <typename Addr_t, typename Val_t, typename Mask_t> struct Store_match {
906 Addr_t Addr;
907 Val_t Val;
908 Mask_t Mask;
909
910 Store_match(Addr_t Addr, Val_t Val, Mask_t Mask)
911 : Addr(Addr), Val(Val), Mask(Mask) {}
912
913 template <typename OpTy> bool match(const OpTy *V) const {
914 auto *Store = dyn_cast<VPWidenStoreRecipe>(V);
915 if (!Store || !Addr.match(Store->getAddr()) ||
916 !Val.match(Store->getStoredValue()) || !Store->isMasked() ||
917 !Mask.match(Store->getMask()))
918 return false;
919 return true;
920 }
921};
922
923/// Match a (possibly reversed) masked store.
924template <typename Addr_t, typename Val_t, typename Mask_t>
925inline Store_match<Addr_t, Val_t, Mask_t>
926m_MaskedStore(const Addr_t &Addr, const Val_t &Val, const Mask_t &Mask) {
927 return Store_match<Addr_t, Val_t, Mask_t>(Addr, Val, Mask);
928}
929
930template <typename Op0_t, typename Op1_t>
931using VectorEndPointerRecipe_match =
932 Recipe_match<std::tuple<Op0_t, Op1_t>, 0,
933 /*Commutative*/ false, VPVectorEndPointerRecipe>;
934
935template <typename Op0_t, typename Op1_t>
936VectorEndPointerRecipe_match<Op0_t, Op1_t> m_VecEndPtr(const Op0_t &Op0,
937 const Op1_t &Op1) {
938 return VectorEndPointerRecipe_match<Op0_t, Op1_t>(Op0, Op1);
939}
940
941/// Match a call argument at a given argument index.
942template <typename Opnd_t> struct Argument_match {
943 /// Call argument index to match.
944 unsigned OpI;
945 Opnd_t Val;
946
947 Argument_match(unsigned OpIdx, const Opnd_t &V) : OpI(OpIdx), Val(V) {}
948
949 template <typename OpTy> bool match(OpTy *V) const {
950 if (const auto *R = dyn_cast<VPWidenIntrinsicRecipe>(V))
951 return Val.match(R->getOperand(OpI));
952 if (const auto *R = dyn_cast<VPWidenCallRecipe>(V))
953 return Val.match(R->getOperand(OpI));
954 if (const auto *R = dyn_cast<VPReplicateRecipe>(V))
955 if (R->getOpcode() == Instruction::Call)
956 return Val.match(R->getOperand(OpI));
957 if (const auto *R = dyn_cast<VPInstruction>(V))
958 if (R->getOpcode() == Instruction::Call)
959 return Val.match(R->getOperand(OpI));
960 return false;
961 }
962};
963
964/// Match a call argument.
965template <unsigned OpI, typename Opnd_t>
966inline Argument_match<Opnd_t> m_Argument(const Opnd_t &Op) {
967 return Argument_match<Opnd_t>(OpI, Op);
968}
969
970/// Intrinsic matchers.
971struct IntrinsicID_match {
972 unsigned ID;
973
974 IntrinsicID_match(Intrinsic::ID IntrID) : ID(IntrID) {}
975
976 template <typename OpTy> bool match(OpTy *V) const {
977 if (const auto *R = dyn_cast<VPWidenIntrinsicRecipe>(V))
978 return R->getVectorIntrinsicID() == ID;
979 if (const auto *R = dyn_cast<VPWidenCallRecipe>(V))
980 return R->getCalledScalarFunction()->getIntrinsicID() == ID;
981
982 auto MatchCalleeIntrinsic = [&](VPValue *CalleeOp) {
983 if (!isa<VPIRValue>(Val: CalleeOp))
984 return false;
985 auto *F = cast<Function>(Val: CalleeOp->getLiveInIRValue());
986 return F->getIntrinsicID() == ID;
987 };
988 if (const auto *R = dyn_cast<VPReplicateRecipe>(V))
989 if (R->getOpcode() == Instruction::Call) {
990 // The mask is always the last operand if predicated.
991 return MatchCalleeIntrinsic(
992 R->getOperand(R->getNumOperands() - 1 - R->isPredicated()));
993 }
994 if (const auto *R = dyn_cast<VPInstruction>(V))
995 if (R->getOpcode() == Instruction::Call)
996 return MatchCalleeIntrinsic(R->getOperand(R->getNumOperands() - 1));
997 return false;
998 }
999};
1000
1001/// Intrinsic matches are combinations of ID matchers, and argument
1002/// matchers. Higher arity matcher are defined recursively in terms of and-ing
1003/// them with lower arity matchers. Here's some convenient typedefs for up to
1004/// several arguments, and more can be added as needed
1005template <typename T0 = void, typename T1 = void, typename T2 = void,
1006 typename T3 = void>
1007struct m_Intrinsic_Ty;
1008template <typename T0> struct m_Intrinsic_Ty<T0> {
1009 using Ty = match_combine_and<IntrinsicID_match, Argument_match<T0>>;
1010};
1011template <typename T0, typename T1> struct m_Intrinsic_Ty<T0, T1> {
1012 using Ty =
1013 match_combine_and<typename m_Intrinsic_Ty<T0>::Ty, Argument_match<T1>>;
1014};
1015template <typename T0, typename T1, typename T2>
1016struct m_Intrinsic_Ty<T0, T1, T2> {
1017 using Ty = match_combine_and<typename m_Intrinsic_Ty<T0, T1>::Ty,
1018 Argument_match<T2>>;
1019};
1020template <typename T0, typename T1, typename T2, typename T3>
1021struct m_Intrinsic_Ty {
1022 using Ty = match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2>::Ty,
1023 Argument_match<T3>>;
1024};
1025
1026/// Match intrinsic calls like this:
1027/// m_Intrinsic<Intrinsic::fabs>(m_VPValue(X), ...)
1028template <Intrinsic::ID IntrID> inline IntrinsicID_match m_Intrinsic() {
1029 return IntrinsicID_match(IntrID);
1030}
1031
1032/// Match intrinsic calls with a runtime intrinsic ID.
1033inline IntrinsicID_match m_Intrinsic(Intrinsic::ID IntrID) {
1034 return IntrinsicID_match(IntrID);
1035}
1036
1037template <Intrinsic::ID IntrID, typename T0>
1038inline typename m_Intrinsic_Ty<T0>::Ty m_Intrinsic(const T0 &Op0) {
1039 return m_CombineAnd(m_Intrinsic<IntrID>(), m_Argument<0>(Op0));
1040}
1041
1042template <Intrinsic::ID IntrID, typename T0, typename T1>
1043inline typename m_Intrinsic_Ty<T0, T1>::Ty m_Intrinsic(const T0 &Op0,
1044 const T1 &Op1) {
1045 return m_CombineAnd(m_Intrinsic<IntrID>(Op0), m_Argument<1>(Op1));
1046}
1047
1048template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2>
1049inline typename m_Intrinsic_Ty<T0, T1, T2>::Ty
1050m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2) {
1051 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1), m_Argument<2>(Op2));
1052}
1053
1054template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
1055 typename T3>
1056inline typename m_Intrinsic_Ty<T0, T1, T2, T3>::Ty
1057m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3) {
1058 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2), m_Argument<3>(Op3));
1059}
1060
1061inline auto m_LiveIn() { return m_Isa<VPIRValue, VPSymbolicValue>(); }
1062
1063/// Match a GEP recipe (VPWidenGEPRecipe, VPInstruction, or VPReplicateRecipe)
1064/// and bind the source element type and operands.
1065struct GetElementPtr_match {
1066 Type *&SourceElementType;
1067 ArrayRef<VPValue *> &Operands;
1068
1069 GetElementPtr_match(Type *&SourceElementType, ArrayRef<VPValue *> &Operands)
1070 : SourceElementType(SourceElementType), Operands(Operands) {}
1071
1072 template <typename ITy> bool match(ITy *V) const {
1073 return matchRecipeAndBind<VPWidenGEPRecipe>(V) ||
1074 matchRecipeAndBind<VPInstruction>(V) ||
1075 matchRecipeAndBind<VPReplicateRecipe>(V);
1076 }
1077
1078private:
1079 template <typename RecipeTy> bool matchRecipeAndBind(const VPValue *V) const {
1080 auto *DefR = dyn_cast<RecipeTy>(V);
1081 if (!DefR)
1082 return false;
1083
1084 if constexpr (std::is_same_v<RecipeTy, VPWidenGEPRecipe>) {
1085 SourceElementType = DefR->getSourceElementType();
1086 } else if (DefR->getOpcode() == Instruction::GetElementPtr) {
1087 SourceElementType = cast<GetElementPtrInst>(DefR->getUnderlyingInstr())
1088 ->getSourceElementType();
1089 } else if constexpr (std::is_same_v<RecipeTy, VPInstruction>) {
1090 if (DefR->getOpcode() == VPInstruction::PtrAdd) {
1091 // PtrAdd is a byte-offset GEP with i8 element type.
1092 LLVMContext &Ctx = DefR->getParent()->getPlan()->getContext();
1093 SourceElementType = Type::getInt8Ty(C&: Ctx);
1094 } else {
1095 return false;
1096 }
1097 } else {
1098 return false;
1099 }
1100
1101 Operands = ArrayRef<VPValue *>(DefR->op_begin(), DefR->op_end());
1102 return true;
1103 }
1104};
1105
1106/// Match a GEP recipe with any number of operands and bind source element type
1107/// and operands.
1108inline GetElementPtr_match m_GetElementPtr(Type *&SourceElementType,
1109 ArrayRef<VPValue *> &Operands) {
1110 return GetElementPtr_match(SourceElementType, Operands);
1111}
1112
1113template <typename SubPattern_t> struct OneUse_match {
1114 SubPattern_t SubPattern;
1115
1116 OneUse_match(const SubPattern_t &SP) : SubPattern(SP) {}
1117
1118 template <typename OpTy> bool match(OpTy *V) {
1119 return V->hasOneUse() && SubPattern.match(V);
1120 }
1121};
1122
1123template <typename T> inline OneUse_match<T> m_OneUse(const T &SubPattern) {
1124 return SubPattern;
1125}
1126
1127inline bind_ty<VPReductionPHIRecipe> m_ReductionPhi(VPReductionPHIRecipe *&V) {
1128 return V;
1129}
1130
1131template <typename Op0_t, typename Op1_t>
1132inline auto m_VPPhi(const Op0_t &Op0, const Op1_t &Op1) {
1133 return Recipe_match<std::tuple<Op0_t, Op1_t>, Instruction::PHI,
1134 /*Commutative*/ false, VPInstruction>({Op0, Op1});
1135}
1136
1137} // namespace llvm::VPlanPatternMatch
1138
1139#endif
1140