1//===- VPlanPatternMatch.h - Match on VPValues and recipes ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides a simple and efficient mechanism for performing general
10// tree-based pattern matches on the VPlan values and recipes, based on
11// LLVM's IR pattern matchers.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_TRANSFORM_VECTORIZE_VPLANPATTERNMATCH_H
16#define LLVM_TRANSFORM_VECTORIZE_VPLANPATTERNMATCH_H
17
18#include "VPlan.h"
19
20namespace llvm::VPlanPatternMatch {
21
22template <typename Val, typename Pattern> bool match(Val *V, const Pattern &P) {
23 return P.match(V);
24}
25
26/// A match functor that can be used as a UnaryPredicate in functional
27/// algorithms like all_of.
28template <typename Val, typename Pattern> auto match_fn(const Pattern &P) {
29 return bind_back<match<Val, Pattern>>(P);
30}
31
32template <typename Pattern> bool match(VPUser *U, const Pattern &P) {
33 auto *R = dyn_cast<VPRecipeBase>(Val: U);
34 return R && match(R, P);
35}
36
37/// Match functor for VPUser.
38template <typename Pattern> auto match_fn(const Pattern &P) {
39 return bind_back<match<Pattern>>(P);
40}
41
42template <typename Pattern> bool match(VPSingleDefRecipe *R, const Pattern &P) {
43 return P.match(static_cast<const VPRecipeBase *>(R));
44}
45
46template <typename... Classes> struct class_match {
47 template <typename ITy> bool match(ITy *V) const {
48 return isa<Classes...>(V);
49 }
50};
51
52/// Match an arbitrary VPValue and ignore it.
53inline class_match<VPValue> m_VPValue() { return class_match<VPValue>(); }
54
55template <typename Class> struct bind_ty {
56 Class *&VR;
57
58 bind_ty(Class *&V) : VR(V) {}
59
60 template <typename ITy> bool match(ITy *V) const {
61 if (auto *CV = dyn_cast<Class>(V)) {
62 VR = CV;
63 return true;
64 }
65 return false;
66 }
67};
68
69/// Match a specified VPValue.
70struct specificval_ty {
71 const VPValue *Val;
72
73 specificval_ty(const VPValue *V) : Val(V) {}
74
75 bool match(VPValue *VPV) const { return VPV == Val; }
76};
77
78inline specificval_ty m_Specific(const VPValue *VPV) { return VPV; }
79
80/// Stores a reference to the VPValue *, not the VPValue * itself,
81/// thus can be used in commutative matchers.
82struct deferredval_ty {
83 VPValue *const &Val;
84
85 deferredval_ty(VPValue *const &V) : Val(V) {}
86
87 bool match(VPValue *const V) const { return V == Val; }
88};
89
90/// Like m_Specific(), but works if the specific value to match is determined
91/// as part of the same match() expression. For example:
92/// m_Mul(m_VPValue(X), m_Specific(X)) is incorrect, because m_Specific() will
93/// bind X before the pattern match starts.
94/// m_Mul(m_VPValue(X), m_Deferred(X)) is correct, and will check against
95/// whichever value m_VPValue(X) populated.
96inline deferredval_ty m_Deferred(VPValue *const &V) { return V; }
97
98/// Match an integer constant if Pred::isValue returns true for the APInt. \p
99/// BitWidth optionally specifies the bitwidth the matched constant must have.
100/// If it is 0, the matched constant can have any bitwidth.
101template <typename Pred, unsigned BitWidth = 0> struct int_pred_ty {
102 Pred P;
103
104 int_pred_ty(Pred P) : P(std::move(P)) {}
105 int_pred_ty() : P() {}
106
107 bool match(VPValue *VPV) const {
108 auto *VPI = dyn_cast<VPInstruction>(Val: VPV);
109 if (VPI && VPI->getOpcode() == VPInstruction::Broadcast)
110 VPV = VPI->getOperand(N: 0);
111 auto *CI = dyn_cast<VPConstantInt>(Val: VPV);
112 if (!CI)
113 return false;
114
115 if (BitWidth != 0 && CI->getBitWidth() != BitWidth)
116 return false;
117 return P.isValue(CI->getAPInt());
118 }
119};
120
121/// Match a specified signed or unsigned integer value.
122struct is_specific_int {
123 APInt Val;
124 bool IsSigned;
125
126 is_specific_int(APInt Val, bool IsSigned = false)
127 : Val(std::move(Val)), IsSigned(IsSigned) {}
128
129 bool isValue(const APInt &C) const {
130 return APInt::isSameValue(I1: Val, I2: C, SignedCompare: IsSigned);
131 }
132};
133
134template <unsigned Bitwidth = 0>
135using specific_intval = int_pred_ty<is_specific_int, Bitwidth>;
136
137inline specific_intval<0> m_SpecificInt(uint64_t V) {
138 return specific_intval<0>(is_specific_int(APInt(64, V)));
139}
140
141inline specific_intval<0> m_SpecificSInt(int64_t V) {
142 return specific_intval<0>(
143 is_specific_int(APInt(64, V, /*isSigned=*/true), /*IsSigned=*/true));
144}
145
146inline specific_intval<1> m_False() {
147 return specific_intval<1>(is_specific_int(APInt(64, 0)));
148}
149
150inline specific_intval<1> m_True() {
151 return specific_intval<1>(is_specific_int(APInt(64, 1)));
152}
153
154struct is_all_ones {
155 bool isValue(const APInt &C) const { return C.isAllOnes(); }
156};
157
158/// Match an integer or vector with all bits set.
159/// For vectors, this includes constants with undefined elements.
160inline int_pred_ty<is_all_ones> m_AllOnes() {
161 return int_pred_ty<is_all_ones>();
162}
163
164struct is_zero_int {
165 bool isValue(const APInt &C) const { return C.isZero(); }
166};
167
168struct is_one {
169 bool isValue(const APInt &C) const { return C.isOne(); }
170};
171
172/// Match an integer 0 or a vector with all elements equal to 0.
173/// For vectors, this includes constants with undefined elements.
174inline int_pred_ty<is_zero_int> m_ZeroInt() {
175 return int_pred_ty<is_zero_int>();
176}
177
178/// Match an integer 1 or a vector with all elements equal to 1.
179/// For vectors, this includes constants with undefined elements.
180inline int_pred_ty<is_one> m_One() { return int_pred_ty<is_one>(); }
181
182struct bind_apint {
183 const APInt *&Res;
184
185 bind_apint(const APInt *&Res) : Res(Res) {}
186
187 bool match(VPValue *VPV) const {
188 auto *CI = dyn_cast<VPConstantInt>(Val: VPV);
189 if (!CI)
190 return false;
191 Res = &CI->getAPInt();
192 return true;
193 }
194};
195
196inline bind_apint m_APInt(const APInt *&C) { return C; }
197
198struct bind_const_int {
199 uint64_t &Res;
200
201 bind_const_int(uint64_t &Res) : Res(Res) {}
202
203 bool match(VPValue *VPV) const {
204 const APInt *APConst;
205 if (!bind_apint(APConst).match(VPV))
206 return false;
207 if (auto C = APConst->tryZExtValue()) {
208 Res = *C;
209 return true;
210 }
211 return false;
212 }
213};
214
215struct match_poison {
216 bool match(VPValue *V) const {
217 return isa<VPIRValue>(Val: V) &&
218 isa<PoisonValue>(Val: cast<VPIRValue>(Val: V)->getValue());
219 }
220};
221
222/// Match a VPIRValue that's poison.
223inline match_poison m_Poison() { return match_poison(); }
224
225/// Match a plain integer constant no wider than 64-bits, capturing it if we
226/// match.
227inline bind_const_int m_ConstantInt(uint64_t &C) { return C; }
228
229/// Matching combinators
230template <typename LTy, typename RTy> struct match_combine_or {
231 LTy L;
232 RTy R;
233
234 match_combine_or(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
235
236 template <typename ITy> bool match(ITy *V) const {
237 return L.match(V) || R.match(V);
238 }
239};
240
241template <typename LTy, typename RTy> struct match_combine_and {
242 LTy L;
243 RTy R;
244
245 match_combine_and(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
246
247 template <typename ITy> bool match(ITy *V) const {
248 return L.match(V) && R.match(V);
249 }
250};
251
252/// Combine two pattern matchers matching L || R
253template <typename LTy, typename RTy>
254inline match_combine_or<LTy, RTy> m_CombineOr(const LTy &L, const RTy &R) {
255 return match_combine_or<LTy, RTy>(L, R);
256}
257
258/// Combine two pattern matchers matching L && R
259template <typename LTy, typename RTy>
260inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) {
261 return match_combine_and<LTy, RTy>(L, R);
262}
263
264/// Match a VPValue, capturing it if we match.
265inline bind_ty<VPValue> m_VPValue(VPValue *&V) { return V; }
266
267/// Match a VPIRValue.
268inline bind_ty<VPIRValue> m_VPIRValue(VPIRValue *&V) { return V; }
269
270/// Match a VPInstruction, capturing if we match.
271inline bind_ty<VPInstruction> m_VPInstruction(VPInstruction *&V) { return V; }
272
273template <typename Ops_t, unsigned Opcode, bool Commutative,
274 typename... RecipeTys>
275struct Recipe_match {
276 Ops_t Ops;
277
278 template <typename... OpTy> Recipe_match(OpTy... Ops) : Ops(Ops...) {
279 static_assert(std::tuple_size<Ops_t>::value == sizeof...(Ops) &&
280 "number of operands in constructor doesn't match Ops_t");
281 static_assert((!Commutative || std::tuple_size<Ops_t>::value == 2) &&
282 "only binary ops can be commutative");
283 }
284
285 bool match(const VPValue *V) const {
286 auto *DefR = V->getDefiningRecipe();
287 return DefR && match(DefR);
288 }
289
290 bool match(const VPSingleDefRecipe *R) const {
291 return match(static_cast<const VPRecipeBase *>(R));
292 }
293
294 bool match(const VPRecipeBase *R) const {
295 if (std::tuple_size_v<Ops_t> == 0) {
296 auto *VPI = dyn_cast<VPInstruction>(Val: R);
297 return VPI && VPI->getOpcode() == Opcode;
298 }
299
300 if ((!matchRecipeAndOpcode<RecipeTys>(R) && ...))
301 return false;
302
303 if (R->getNumOperands() < std::tuple_size<Ops_t>::value) {
304 [[maybe_unused]] auto *RepR = dyn_cast<VPReplicateRecipe>(Val: R);
305 assert(((isa<VPInstruction>(R) &&
306 cast<VPInstruction>(R)->getNumOperandsForOpcode() == -1u) ||
307 (RepR && std::tuple_size_v<Ops_t> ==
308 RepR->getNumOperands() - RepR->isPredicated())) &&
309 "non-variadic recipe with matched opcode does not have the "
310 "expected number of operands");
311 return false;
312 }
313
314 // If the recipe has more operands than expected, we only support matching
315 // masked VPInstructions where the number of operands of the matcher is the
316 // same as the number of operands excluding mask.
317 if (R->getNumOperands() > std::tuple_size<Ops_t>::value) {
318 auto *VPI = dyn_cast<VPInstruction>(Val: R);
319 if (!VPI || !VPI->isMasked() ||
320 VPI->getNumOperandsWithoutMask() != std::tuple_size<Ops_t>::value)
321 return false;
322 }
323
324 auto IdxSeq = std::make_index_sequence<std::tuple_size<Ops_t>::value>();
325 if (all_of_tuple_elements(IdxSeq, [R](auto Op, unsigned Idx) {
326 return Op.match(R->getOperand(N: Idx));
327 }))
328 return true;
329
330 return Commutative &&
331 all_of_tuple_elements(IdxSeq, [R](auto Op, unsigned Idx) {
332 return Op.match(R->getOperand(N: R->getNumOperands() - Idx - 1));
333 });
334 }
335
336private:
337 template <typename RecipeTy>
338 static bool matchRecipeAndOpcode(const VPRecipeBase *R) {
339 auto *DefR = dyn_cast<RecipeTy>(R);
340 // Check for recipes that do not have opcodes.
341 if constexpr (std::is_same_v<RecipeTy, VPScalarIVStepsRecipe> ||
342 std::is_same_v<RecipeTy, VPCanonicalIVPHIRecipe> ||
343 std::is_same_v<RecipeTy, VPDerivedIVRecipe> ||
344 std::is_same_v<RecipeTy, VPVectorEndPointerRecipe>)
345 return DefR;
346 else
347 return DefR && DefR->getOpcode() == Opcode;
348 }
349
350 /// Helper to check if predicate \p P holds on all tuple elements in Ops using
351 /// the provided index sequence.
352 template <typename Fn, std::size_t... Is>
353 bool all_of_tuple_elements(std::index_sequence<Is...>, Fn P) const {
354 return (P(std::get<Is>(Ops), Is) && ...);
355 }
356};
357
358template <unsigned Opcode, typename... OpTys>
359using AllRecipe_match =
360 Recipe_match<std::tuple<OpTys...>, Opcode, /*Commutative*/ false,
361 VPWidenRecipe, VPReplicateRecipe, VPWidenCastRecipe,
362 VPInstruction>;
363
364template <unsigned Opcode, typename... OpTys>
365using AllRecipe_commutative_match =
366 Recipe_match<std::tuple<OpTys...>, Opcode, /*Commutative*/ true,
367 VPWidenRecipe, VPReplicateRecipe, VPInstruction>;
368
369template <unsigned Opcode, typename... OpTys>
370using VPInstruction_match = Recipe_match<std::tuple<OpTys...>, Opcode,
371 /*Commutative*/ false, VPInstruction>;
372
373template <unsigned Opcode, typename... OpTys>
374using VPInstruction_commutative_match =
375 Recipe_match<std::tuple<OpTys...>, Opcode,
376 /*Commutative*/ true, VPInstruction>;
377
378template <unsigned Opcode, typename... OpTys>
379inline VPInstruction_match<Opcode, OpTys...>
380m_VPInstruction(const OpTys &...Ops) {
381 return VPInstruction_match<Opcode, OpTys...>(Ops...);
382}
383
384template <unsigned Opcode, typename Op0_t, typename Op1_t>
385inline VPInstruction_commutative_match<Opcode, Op0_t, Op1_t>
386m_c_VPInstruction(const Op0_t &Op0, const Op1_t &Op1) {
387 return VPInstruction_commutative_match<Opcode, Op0_t, Op1_t>(Op0, Op1);
388}
389
390/// BuildVector is matches only its opcode, w/o matching its operands as the
391/// number of operands is not fixed.
392inline VPInstruction_match<VPInstruction::BuildVector> m_BuildVector() {
393 return m_VPInstruction<VPInstruction::BuildVector>();
394}
395
396template <typename Op0_t>
397inline VPInstruction_match<Instruction::Freeze, Op0_t>
398m_Freeze(const Op0_t &Op0) {
399 return m_VPInstruction<Instruction::Freeze>(Op0);
400}
401
402inline VPInstruction_match<VPInstruction::BranchOnCond> m_BranchOnCond() {
403 return m_VPInstruction<VPInstruction::BranchOnCond>();
404}
405
406template <typename Op0_t>
407inline VPInstruction_match<VPInstruction::BranchOnCond, Op0_t>
408m_BranchOnCond(const Op0_t &Op0) {
409 return m_VPInstruction<VPInstruction::BranchOnCond>(Op0);
410}
411
412inline VPInstruction_match<VPInstruction::BranchOnTwoConds>
413m_BranchOnTwoConds() {
414 return m_VPInstruction<VPInstruction::BranchOnTwoConds>();
415}
416
417template <typename Op0_t, typename Op1_t>
418inline VPInstruction_match<VPInstruction::BranchOnTwoConds, Op0_t, Op1_t>
419m_BranchOnTwoConds(const Op0_t &Op0, const Op1_t &Op1) {
420 return m_VPInstruction<VPInstruction::BranchOnTwoConds>(Op0, Op1);
421}
422
423template <typename Op0_t>
424inline VPInstruction_match<VPInstruction::Broadcast, Op0_t>
425m_Broadcast(const Op0_t &Op0) {
426 return m_VPInstruction<VPInstruction::Broadcast>(Op0);
427}
428
429template <typename Op0_t>
430inline VPInstruction_match<VPInstruction::ExplicitVectorLength, Op0_t>
431m_EVL(const Op0_t &Op0) {
432 return m_VPInstruction<VPInstruction::ExplicitVectorLength>(Op0);
433}
434
435template <typename Op0_t>
436inline VPInstruction_match<VPInstruction::ExtractLastLane, Op0_t>
437m_ExtractLastLane(const Op0_t &Op0) {
438 return m_VPInstruction<VPInstruction::ExtractLastLane>(Op0);
439}
440
441template <typename Op0_t, typename Op1_t>
442inline VPInstruction_match<Instruction::ExtractElement, Op0_t, Op1_t>
443m_ExtractElement(const Op0_t &Op0, const Op1_t &Op1) {
444 return m_VPInstruction<Instruction::ExtractElement>(Op0, Op1);
445}
446
447template <typename Op0_t, typename Op1_t>
448inline VPInstruction_match<VPInstruction::ExtractLane, Op0_t, Op1_t>
449m_ExtractLane(const Op0_t &Op0, const Op1_t &Op1) {
450 return m_VPInstruction<VPInstruction::ExtractLane>(Op0, Op1);
451}
452
453template <typename Op0_t>
454inline VPInstruction_match<VPInstruction::ExtractLastPart, Op0_t>
455m_ExtractLastPart(const Op0_t &Op0) {
456 return m_VPInstruction<VPInstruction::ExtractLastPart>(Op0);
457}
458
459template <typename Op0_t>
460inline VPInstruction_match<
461 VPInstruction::ExtractLastLane,
462 VPInstruction_match<VPInstruction::ExtractLastPart, Op0_t>>
463m_ExtractLastLaneOfLastPart(const Op0_t &Op0) {
464 return m_ExtractLastLane(m_ExtractLastPart(Op0));
465}
466
467template <typename Op0_t>
468inline VPInstruction_match<VPInstruction::ExtractPenultimateElement, Op0_t>
469m_ExtractPenultimateElement(const Op0_t &Op0) {
470 return m_VPInstruction<VPInstruction::ExtractPenultimateElement>(Op0);
471}
472
473template <typename Op0_t, typename Op1_t, typename Op2_t>
474inline VPInstruction_match<VPInstruction::ActiveLaneMask, Op0_t, Op1_t, Op2_t>
475m_ActiveLaneMask(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
476 return m_VPInstruction<VPInstruction::ActiveLaneMask>(Op0, Op1, Op2);
477}
478
479inline VPInstruction_match<VPInstruction::BranchOnCount> m_BranchOnCount() {
480 return m_VPInstruction<VPInstruction::BranchOnCount>();
481}
482
483template <typename Op0_t, typename Op1_t>
484inline VPInstruction_match<VPInstruction::BranchOnCount, Op0_t, Op1_t>
485m_BranchOnCount(const Op0_t &Op0, const Op1_t &Op1) {
486 return m_VPInstruction<VPInstruction::BranchOnCount>(Op0, Op1);
487}
488
489inline VPInstruction_match<VPInstruction::AnyOf> m_AnyOf() {
490 return m_VPInstruction<VPInstruction::AnyOf>();
491}
492
493template <typename Op0_t>
494inline VPInstruction_match<VPInstruction::AnyOf, Op0_t>
495m_AnyOf(const Op0_t &Op0) {
496 return m_VPInstruction<VPInstruction::AnyOf>(Op0);
497}
498
499template <typename Op0_t>
500inline VPInstruction_match<VPInstruction::FirstActiveLane, Op0_t>
501m_FirstActiveLane(const Op0_t &Op0) {
502 return m_VPInstruction<VPInstruction::FirstActiveLane>(Op0);
503}
504
505template <typename Op0_t>
506inline VPInstruction_match<VPInstruction::LastActiveLane, Op0_t>
507m_LastActiveLane(const Op0_t &Op0) {
508 return m_VPInstruction<VPInstruction::LastActiveLane>(Op0);
509}
510
511template <typename Op0_t>
512inline VPInstruction_match<VPInstruction::ComputeReductionResult, Op0_t>
513m_ComputeReductionResult(const Op0_t &Op0) {
514 return m_VPInstruction<VPInstruction::ComputeReductionResult>(Op0);
515}
516
517/// Match FindIV result pattern:
518/// select(icmp ne ComputeReductionResult(ReducedIV), Sentinel),
519/// ComputeReductionResult(ReducedIV), Start.
520template <typename Op0_t, typename Op1_t>
521inline bool matchFindIVResult(VPInstruction *VPI, Op0_t ReducedIV, Op1_t Start) {
522 return match(VPI, m_Select(m_SpecificICmp(ICmpInst::ICMP_NE,
523 m_ComputeReductionResult(ReducedIV),
524 m_VPValue()),
525 m_ComputeReductionResult(ReducedIV), Start));
526}
527
528template <typename Op0_t, typename Op1_t, typename Op2_t>
529inline VPInstruction_match<VPInstruction::ComputeAnyOfResult, Op0_t, Op1_t,
530 Op2_t>
531m_ComputeAnyOfResult(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
532 return m_VPInstruction<VPInstruction::ComputeAnyOfResult>(Op0, Op1, Op2);
533}
534
535template <typename Op0_t>
536inline VPInstruction_match<VPInstruction::Reverse, Op0_t>
537m_Reverse(const Op0_t &Op0) {
538 return m_VPInstruction<VPInstruction::Reverse>(Op0);
539}
540
541inline VPInstruction_match<VPInstruction::StepVector> m_StepVector() {
542 return m_VPInstruction<VPInstruction::StepVector>();
543}
544
545template <typename Op0_t, typename Op1_t>
546inline VPInstruction_match<VPInstruction::ExitingIVValue, Op0_t, Op1_t>
547m_ExitingIVValue(const Op0_t &Op0, const Op1_t &Op1) {
548 return m_VPInstruction<VPInstruction::ExitingIVValue>(Op0, Op1);
549}
550
551template <unsigned Opcode, typename Op0_t>
552inline AllRecipe_match<Opcode, Op0_t> m_Unary(const Op0_t &Op0) {
553 return AllRecipe_match<Opcode, Op0_t>(Op0);
554}
555
556template <typename Op0_t>
557inline AllRecipe_match<Instruction::Trunc, Op0_t> m_Trunc(const Op0_t &Op0) {
558 return m_Unary<Instruction::Trunc, Op0_t>(Op0);
559}
560
561template <typename Op0_t>
562inline match_combine_or<AllRecipe_match<Instruction::Trunc, Op0_t>, Op0_t>
563m_TruncOrSelf(const Op0_t &Op0) {
564 return m_CombineOr(m_Trunc(Op0), Op0);
565}
566
567template <typename Op0_t>
568inline AllRecipe_match<Instruction::ZExt, Op0_t> m_ZExt(const Op0_t &Op0) {
569 return m_Unary<Instruction::ZExt, Op0_t>(Op0);
570}
571
572template <typename Op0_t>
573inline AllRecipe_match<Instruction::SExt, Op0_t> m_SExt(const Op0_t &Op0) {
574 return m_Unary<Instruction::SExt, Op0_t>(Op0);
575}
576
577template <typename Op0_t>
578inline AllRecipe_match<Instruction::FPExt, Op0_t> m_FPExt(const Op0_t &Op0) {
579 return m_Unary<Instruction::FPExt, Op0_t>(Op0);
580}
581
582template <typename Op0_t>
583inline match_combine_or<AllRecipe_match<Instruction::ZExt, Op0_t>,
584 AllRecipe_match<Instruction::SExt, Op0_t>>
585m_ZExtOrSExt(const Op0_t &Op0) {
586 return m_CombineOr(m_ZExt(Op0), m_SExt(Op0));
587}
588
589template <typename Op0_t>
590inline match_combine_or<AllRecipe_match<Instruction::ZExt, Op0_t>, Op0_t>
591m_ZExtOrSelf(const Op0_t &Op0) {
592 return m_CombineOr(m_ZExt(Op0), Op0);
593}
594
595template <typename Op0_t>
596inline match_combine_or<
597 match_combine_or<AllRecipe_match<Instruction::ZExt, Op0_t>,
598 AllRecipe_match<Instruction::Trunc, Op0_t>>,
599 Op0_t>
600m_ZExtOrTruncOrSelf(const Op0_t &Op0) {
601 return m_CombineOr(m_CombineOr(m_ZExt(Op0), m_Trunc(Op0)), Op0);
602}
603
604template <unsigned Opcode, typename Op0_t, typename Op1_t>
605inline AllRecipe_match<Opcode, Op0_t, Op1_t> m_Binary(const Op0_t &Op0,
606 const Op1_t &Op1) {
607 return AllRecipe_match<Opcode, Op0_t, Op1_t>(Op0, Op1);
608}
609
610template <unsigned Opcode, typename Op0_t, typename Op1_t>
611inline AllRecipe_commutative_match<Opcode, Op0_t, Op1_t>
612m_c_Binary(const Op0_t &Op0, const Op1_t &Op1) {
613 return AllRecipe_commutative_match<Opcode, Op0_t, Op1_t>(Op0, Op1);
614}
615
616template <typename Op0_t, typename Op1_t>
617inline AllRecipe_match<Instruction::Add, Op0_t, Op1_t> m_Add(const Op0_t &Op0,
618 const Op1_t &Op1) {
619 return m_Binary<Instruction::Add, Op0_t, Op1_t>(Op0, Op1);
620}
621
622template <typename Op0_t, typename Op1_t>
623inline AllRecipe_commutative_match<Instruction::Add, Op0_t, Op1_t>
624m_c_Add(const Op0_t &Op0, const Op1_t &Op1) {
625 return m_c_Binary<Instruction::Add, Op0_t, Op1_t>(Op0, Op1);
626}
627
628template <typename Op0_t, typename Op1_t>
629inline AllRecipe_match<Instruction::Sub, Op0_t, Op1_t> m_Sub(const Op0_t &Op0,
630 const Op1_t &Op1) {
631 return m_Binary<Instruction::Sub, Op0_t, Op1_t>(Op0, Op1);
632}
633
634template <typename Op0_t, typename Op1_t>
635inline AllRecipe_match<Instruction::Mul, Op0_t, Op1_t> m_Mul(const Op0_t &Op0,
636 const Op1_t &Op1) {
637 return m_Binary<Instruction::Mul, Op0_t, Op1_t>(Op0, Op1);
638}
639
640template <typename Op0_t, typename Op1_t>
641inline AllRecipe_commutative_match<Instruction::Mul, Op0_t, Op1_t>
642m_c_Mul(const Op0_t &Op0, const Op1_t &Op1) {
643 return m_c_Binary<Instruction::Mul, Op0_t, Op1_t>(Op0, Op1);
644}
645
646template <typename Op0_t, typename Op1_t>
647inline AllRecipe_match<Instruction::FMul, Op0_t, Op1_t>
648m_FMul(const Op0_t &Op0, const Op1_t &Op1) {
649 return m_Binary<Instruction::FMul, Op0_t, Op1_t>(Op0, Op1);
650}
651
652template <typename Op0_t, typename Op1_t>
653inline AllRecipe_match<Instruction::FAdd, Op0_t, Op1_t>
654m_FAdd(const Op0_t &Op0, const Op1_t &Op1) {
655 return m_Binary<Instruction::FAdd, Op0_t, Op1_t>(Op0, Op1);
656}
657
658template <typename Op0_t, typename Op1_t>
659inline AllRecipe_commutative_match<Instruction::FAdd, Op0_t, Op1_t>
660m_c_FAdd(const Op0_t &Op0, const Op1_t &Op1) {
661 return m_c_Binary<Instruction::FAdd, Op0_t, Op1_t>(Op0, Op1);
662}
663
664template <typename Op0_t, typename Op1_t>
665inline AllRecipe_match<Instruction::UDiv, Op0_t, Op1_t>
666m_UDiv(const Op0_t &Op0, const Op1_t &Op1) {
667 return m_Binary<Instruction::UDiv, Op0_t, Op1_t>(Op0, Op1);
668}
669
670/// Match a binary AND operation.
671template <typename Op0_t, typename Op1_t>
672inline AllRecipe_commutative_match<Instruction::And, Op0_t, Op1_t>
673m_c_BinaryAnd(const Op0_t &Op0, const Op1_t &Op1) {
674 return m_c_Binary<Instruction::And, Op0_t, Op1_t>(Op0, Op1);
675}
676
677/// Match a binary OR operation. Note that while conceptually the operands can
678/// be matched commutatively, \p Commutative defaults to false in line with the
679/// IR-based pattern matching infrastructure. Use m_c_BinaryOr for a commutative
680/// version of the matcher.
681template <typename Op0_t, typename Op1_t>
682inline AllRecipe_match<Instruction::Or, Op0_t, Op1_t>
683m_BinaryOr(const Op0_t &Op0, const Op1_t &Op1) {
684 return m_Binary<Instruction::Or, Op0_t, Op1_t>(Op0, Op1);
685}
686
687template <typename Op0_t, typename Op1_t>
688inline AllRecipe_commutative_match<Instruction::Or, Op0_t, Op1_t>
689m_c_BinaryOr(const Op0_t &Op0, const Op1_t &Op1) {
690 return m_c_Binary<Instruction::Or, Op0_t, Op1_t>(Op0, Op1);
691}
692
693/// Cmp_match is a variant of BinaryRecipe_match that also binds the comparison
694/// predicate. Opcodes must either be Instruction::ICmp or Instruction::FCmp, or
695/// both.
696template <typename Op0_t, typename Op1_t, unsigned... Opcodes>
697struct Cmp_match {
698 static_assert((sizeof...(Opcodes) == 1 || sizeof...(Opcodes) == 2) &&
699 "Expected one or two opcodes");
700 static_assert(
701 ((Opcodes == Instruction::ICmp || Opcodes == Instruction::FCmp) && ...) &&
702 "Expected a compare instruction opcode");
703
704 CmpPredicate *Predicate = nullptr;
705 Op0_t Op0;
706 Op1_t Op1;
707
708 Cmp_match(CmpPredicate &Pred, const Op0_t &Op0, const Op1_t &Op1)
709 : Predicate(&Pred), Op0(Op0), Op1(Op1) {}
710 Cmp_match(const Op0_t &Op0, const Op1_t &Op1) : Op0(Op0), Op1(Op1) {}
711
712 bool match(const VPValue *V) const {
713 auto *DefR = V->getDefiningRecipe();
714 return DefR && match(DefR);
715 }
716
717 bool match(const VPRecipeBase *V) const {
718 if ((m_Binary<Opcodes>(Op0, Op1).match(V) || ...)) {
719 if (Predicate)
720 *Predicate = cast<VPRecipeWithIRFlags>(Val: V)->getPredicate();
721 return true;
722 }
723 return false;
724 }
725};
726
727/// SpecificCmp_match is a variant of Cmp_match that matches the comparison
728/// predicate, instead of binding it.
729template <typename Op0_t, typename Op1_t, unsigned... Opcodes>
730struct SpecificCmp_match {
731 const CmpPredicate Predicate;
732 Op0_t Op0;
733 Op1_t Op1;
734
735 SpecificCmp_match(CmpPredicate Pred, const Op0_t &LHS, const Op1_t &RHS)
736 : Predicate(Pred), Op0(LHS), Op1(RHS) {}
737
738 bool match(const VPValue *V) const {
739 auto *DefR = V->getDefiningRecipe();
740 return DefR && match(DefR);
741 }
742
743 bool match(const VPRecipeBase *V) const {
744 CmpPredicate CurrentPred;
745 return Cmp_match<Op0_t, Op1_t, Opcodes...>(CurrentPred, Op0, Op1)
746 .match(V) &&
747 CmpPredicate::getMatching(A: CurrentPred, B: Predicate);
748 }
749};
750
751template <typename Op0_t, typename Op1_t>
752inline Cmp_match<Op0_t, Op1_t, Instruction::ICmp> m_ICmp(const Op0_t &Op0,
753 const Op1_t &Op1) {
754 return Cmp_match<Op0_t, Op1_t, Instruction::ICmp>(Op0, Op1);
755}
756
757template <typename Op0_t, typename Op1_t>
758inline Cmp_match<Op0_t, Op1_t, Instruction::ICmp>
759m_ICmp(CmpPredicate &Pred, const Op0_t &Op0, const Op1_t &Op1) {
760 return Cmp_match<Op0_t, Op1_t, Instruction::ICmp>(Pred, Op0, Op1);
761}
762
763template <typename Op0_t, typename Op1_t>
764inline SpecificCmp_match<Op0_t, Op1_t, Instruction::ICmp>
765m_SpecificICmp(CmpPredicate MatchPred, const Op0_t &Op0, const Op1_t &Op1) {
766 return SpecificCmp_match<Op0_t, Op1_t, Instruction::ICmp>(MatchPred, Op0,
767 Op1);
768}
769
770template <typename Op0_t, typename Op1_t>
771inline Cmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>
772m_Cmp(const Op0_t &Op0, const Op1_t &Op1) {
773 return Cmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>(Op0,
774 Op1);
775}
776
777template <typename Op0_t, typename Op1_t>
778inline Cmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>
779m_Cmp(CmpPredicate &Pred, const Op0_t &Op0, const Op1_t &Op1) {
780 return Cmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>(
781 Pred, Op0, Op1);
782}
783
784template <typename Op0_t, typename Op1_t>
785inline SpecificCmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>
786m_SpecificCmp(CmpPredicate MatchPred, const Op0_t &Op0, const Op1_t &Op1) {
787 return SpecificCmp_match<Op0_t, Op1_t, Instruction::ICmp, Instruction::FCmp>(
788 MatchPred, Op0, Op1);
789}
790
791template <typename Op0_t, typename Op1_t>
792using GEPLikeRecipe_match = match_combine_or<
793 Recipe_match<std::tuple<Op0_t, Op1_t>, Instruction::GetElementPtr,
794 /*Commutative*/ false, VPReplicateRecipe, VPWidenGEPRecipe>,
795 match_combine_or<
796 VPInstruction_match<VPInstruction::PtrAdd, Op0_t, Op1_t>,
797 VPInstruction_match<VPInstruction::WidePtrAdd, Op0_t, Op1_t>>>;
798
799template <typename Op0_t, typename Op1_t>
800inline GEPLikeRecipe_match<Op0_t, Op1_t> m_GetElementPtr(const Op0_t &Op0,
801 const Op1_t &Op1) {
802 return m_CombineOr(
803 Recipe_match<std::tuple<Op0_t, Op1_t>, Instruction::GetElementPtr,
804 /*Commutative*/ false, VPReplicateRecipe, VPWidenGEPRecipe>(
805 Op0, Op1),
806 m_CombineOr(
807 VPInstruction_match<VPInstruction::PtrAdd, Op0_t, Op1_t>(Op0, Op1),
808 VPInstruction_match<VPInstruction::WidePtrAdd, Op0_t, Op1_t>(Op0,
809 Op1)));
810}
811
812template <typename Op0_t, typename Op1_t, typename Op2_t>
813inline AllRecipe_match<Instruction::Select, Op0_t, Op1_t, Op2_t>
814m_Select(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
815 return AllRecipe_match<Instruction::Select, Op0_t, Op1_t, Op2_t>(
816 {Op0, Op1, Op2});
817}
818
819template <typename Op0_t>
820inline match_combine_or<VPInstruction_match<VPInstruction::Not, Op0_t>,
821 AllRecipe_commutative_match<
822 Instruction::Xor, int_pred_ty<is_all_ones>, Op0_t>>
823m_Not(const Op0_t &Op0) {
824 return m_CombineOr(m_VPInstruction<VPInstruction::Not>(Op0),
825 m_c_Binary<Instruction::Xor>(m_AllOnes(), Op0));
826}
827
828template <typename Op0_t, typename Op1_t, typename Op2_t>
829inline auto m_c_Select(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
830 return m_CombineOr(m_Select(Op0, Op1, Op2), m_Select(m_Not(Op0), Op2, Op1));
831}
832
833template <typename Op0_t, typename Op1_t>
834inline match_combine_or<
835 VPInstruction_match<VPInstruction::LogicalAnd, Op0_t, Op1_t>,
836 AllRecipe_match<Instruction::Select, Op0_t, Op1_t, specific_intval<1>>>
837m_LogicalAnd(const Op0_t &Op0, const Op1_t &Op1) {
838 return m_CombineOr(
839 m_VPInstruction<VPInstruction::LogicalAnd, Op0_t, Op1_t>(Op0, Op1),
840 m_Select(Op0, Op1, m_False()));
841}
842
843template <typename Op0_t, typename Op1_t>
844inline auto m_c_LogicalAnd(const Op0_t &Op0, const Op1_t &Op1) {
845 return m_CombineOr(
846 m_c_VPInstruction<VPInstruction::LogicalAnd, Op0_t, Op1_t>(Op0, Op1),
847 m_c_Select(Op0, Op1, m_False()));
848}
849
850template <typename Op0_t, typename Op1_t>
851inline auto m_LogicalOr(const Op0_t &Op0, const Op1_t &Op1) {
852 return m_CombineOr(
853 m_c_VPInstruction<VPInstruction::LogicalOr, Op0_t, Op1_t>(Op0, Op1),
854 m_Select(Op0, m_True(), Op1));
855}
856
857template <typename Op0_t, typename Op1_t>
858inline auto m_c_LogicalOr(const Op0_t &Op0, const Op1_t &Op1) {
859 return m_c_Select(Op0, m_True(), Op1);
860}
861
862template <typename Op0_t, typename Op1_t, typename Op2_t>
863using VPScalarIVSteps_match = Recipe_match<std::tuple<Op0_t, Op1_t, Op2_t>, 0,
864 false, VPScalarIVStepsRecipe>;
865
866template <typename Op0_t, typename Op1_t, typename Op2_t>
867inline VPScalarIVSteps_match<Op0_t, Op1_t, Op2_t>
868m_ScalarIVSteps(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
869 return VPScalarIVSteps_match<Op0_t, Op1_t, Op2_t>({Op0, Op1, Op2});
870}
871
872template <typename Op0_t, typename Op1_t, typename Op2_t>
873using VPDerivedIV_match =
874 Recipe_match<std::tuple<Op0_t, Op1_t, Op2_t>, 0, false, VPDerivedIVRecipe>;
875
876template <typename Op0_t, typename Op1_t, typename Op2_t>
877inline VPDerivedIV_match<Op0_t, Op1_t, Op2_t>
878m_DerivedIV(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
879 return VPDerivedIV_match<Op0_t, Op1_t, Op2_t>({Op0, Op1, Op2});
880}
881
882template <typename Addr_t, typename Mask_t> struct Load_match {
883 Addr_t Addr;
884 Mask_t Mask;
885
886 Load_match(Addr_t Addr, Mask_t Mask) : Addr(Addr), Mask(Mask) {}
887
888 template <typename OpTy> bool match(const OpTy *V) const {
889 auto *Load = dyn_cast<VPWidenLoadRecipe>(V);
890 if (!Load || !Addr.match(Load->getAddr()) || !Load->isMasked() ||
891 !Mask.match(Load->getMask()))
892 return false;
893 return true;
894 }
895};
896
897/// Match a (possibly reversed) masked load.
898template <typename Addr_t, typename Mask_t>
899inline Load_match<Addr_t, Mask_t> m_MaskedLoad(const Addr_t &Addr,
900 const Mask_t &Mask) {
901 return Load_match<Addr_t, Mask_t>(Addr, Mask);
902}
903
904template <typename Addr_t, typename Val_t, typename Mask_t> struct Store_match {
905 Addr_t Addr;
906 Val_t Val;
907 Mask_t Mask;
908
909 Store_match(Addr_t Addr, Val_t Val, Mask_t Mask)
910 : Addr(Addr), Val(Val), Mask(Mask) {}
911
912 template <typename OpTy> bool match(const OpTy *V) const {
913 auto *Store = dyn_cast<VPWidenStoreRecipe>(V);
914 if (!Store || !Addr.match(Store->getAddr()) ||
915 !Val.match(Store->getStoredValue()) || !Store->isMasked() ||
916 !Mask.match(Store->getMask()))
917 return false;
918 return true;
919 }
920};
921
922/// Match a (possibly reversed) masked store.
923template <typename Addr_t, typename Val_t, typename Mask_t>
924inline Store_match<Addr_t, Val_t, Mask_t>
925m_MaskedStore(const Addr_t &Addr, const Val_t &Val, const Mask_t &Mask) {
926 return Store_match<Addr_t, Val_t, Mask_t>(Addr, Val, Mask);
927}
928
929template <typename Op0_t, typename Op1_t>
930using VectorEndPointerRecipe_match =
931 Recipe_match<std::tuple<Op0_t, Op1_t>, 0,
932 /*Commutative*/ false, VPVectorEndPointerRecipe>;
933
934template <typename Op0_t, typename Op1_t>
935VectorEndPointerRecipe_match<Op0_t, Op1_t> m_VecEndPtr(const Op0_t &Op0,
936 const Op1_t &Op1) {
937 return VectorEndPointerRecipe_match<Op0_t, Op1_t>(Op0, Op1);
938}
939
940/// Match a call argument at a given argument index.
941template <typename Opnd_t> struct Argument_match {
942 /// Call argument index to match.
943 unsigned OpI;
944 Opnd_t Val;
945
946 Argument_match(unsigned OpIdx, const Opnd_t &V) : OpI(OpIdx), Val(V) {}
947
948 template <typename OpTy> bool match(OpTy *V) const {
949 if (const auto *R = dyn_cast<VPWidenIntrinsicRecipe>(V))
950 return Val.match(R->getOperand(OpI));
951 if (const auto *R = dyn_cast<VPWidenCallRecipe>(V))
952 return Val.match(R->getOperand(OpI));
953 if (const auto *R = dyn_cast<VPReplicateRecipe>(V))
954 if (R->getOpcode() == Instruction::Call)
955 return Val.match(R->getOperand(OpI));
956 if (const auto *R = dyn_cast<VPInstruction>(V))
957 if (R->getOpcode() == Instruction::Call)
958 return Val.match(R->getOperand(OpI));
959 return false;
960 }
961};
962
963/// Match a call argument.
964template <unsigned OpI, typename Opnd_t>
965inline Argument_match<Opnd_t> m_Argument(const Opnd_t &Op) {
966 return Argument_match<Opnd_t>(OpI, Op);
967}
968
969/// Intrinsic matchers.
970struct IntrinsicID_match {
971 unsigned ID;
972
973 IntrinsicID_match(Intrinsic::ID IntrID) : ID(IntrID) {}
974
975 template <typename OpTy> bool match(OpTy *V) const {
976 if (const auto *R = dyn_cast<VPWidenIntrinsicRecipe>(V))
977 return R->getVectorIntrinsicID() == ID;
978 if (const auto *R = dyn_cast<VPWidenCallRecipe>(V))
979 return R->getCalledScalarFunction()->getIntrinsicID() == ID;
980
981 auto MatchCalleeIntrinsic = [&](VPValue *CalleeOp) {
982 if (!isa<VPIRValue>(Val: CalleeOp))
983 return false;
984 auto *F = cast<Function>(Val: CalleeOp->getLiveInIRValue());
985 return F->getIntrinsicID() == ID;
986 };
987 if (const auto *R = dyn_cast<VPReplicateRecipe>(V))
988 if (R->getOpcode() == Instruction::Call) {
989 // The mask is always the last operand if predicated.
990 return MatchCalleeIntrinsic(
991 R->getOperand(R->getNumOperands() - 1 - R->isPredicated()));
992 }
993 if (const auto *R = dyn_cast<VPInstruction>(V))
994 if (R->getOpcode() == Instruction::Call)
995 return MatchCalleeIntrinsic(R->getOperand(R->getNumOperands() - 1));
996 return false;
997 }
998};
999
1000/// Intrinsic matches are combinations of ID matchers, and argument
1001/// matchers. Higher arity matcher are defined recursively in terms of and-ing
1002/// them with lower arity matchers. Here's some convenient typedefs for up to
1003/// several arguments, and more can be added as needed
1004template <typename T0 = void, typename T1 = void, typename T2 = void,
1005 typename T3 = void>
1006struct m_Intrinsic_Ty;
1007template <typename T0> struct m_Intrinsic_Ty<T0> {
1008 using Ty = match_combine_and<IntrinsicID_match, Argument_match<T0>>;
1009};
1010template <typename T0, typename T1> struct m_Intrinsic_Ty<T0, T1> {
1011 using Ty =
1012 match_combine_and<typename m_Intrinsic_Ty<T0>::Ty, Argument_match<T1>>;
1013};
1014template <typename T0, typename T1, typename T2>
1015struct m_Intrinsic_Ty<T0, T1, T2> {
1016 using Ty = match_combine_and<typename m_Intrinsic_Ty<T0, T1>::Ty,
1017 Argument_match<T2>>;
1018};
1019template <typename T0, typename T1, typename T2, typename T3>
1020struct m_Intrinsic_Ty {
1021 using Ty = match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2>::Ty,
1022 Argument_match<T3>>;
1023};
1024
1025/// Match intrinsic calls like this:
1026/// m_Intrinsic<Intrinsic::fabs>(m_VPValue(X), ...)
1027template <Intrinsic::ID IntrID> inline IntrinsicID_match m_Intrinsic() {
1028 return IntrinsicID_match(IntrID);
1029}
1030
1031/// Match intrinsic calls with a runtime intrinsic ID.
1032inline IntrinsicID_match m_Intrinsic(Intrinsic::ID IntrID) {
1033 return IntrinsicID_match(IntrID);
1034}
1035
1036template <Intrinsic::ID IntrID, typename T0>
1037inline typename m_Intrinsic_Ty<T0>::Ty m_Intrinsic(const T0 &Op0) {
1038 return m_CombineAnd(m_Intrinsic<IntrID>(), m_Argument<0>(Op0));
1039}
1040
1041template <Intrinsic::ID IntrID, typename T0, typename T1>
1042inline typename m_Intrinsic_Ty<T0, T1>::Ty m_Intrinsic(const T0 &Op0,
1043 const T1 &Op1) {
1044 return m_CombineAnd(m_Intrinsic<IntrID>(Op0), m_Argument<1>(Op1));
1045}
1046
1047template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2>
1048inline typename m_Intrinsic_Ty<T0, T1, T2>::Ty
1049m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2) {
1050 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1), m_Argument<2>(Op2));
1051}
1052
1053template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
1054 typename T3>
1055inline typename m_Intrinsic_Ty<T0, T1, T2, T3>::Ty
1056m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3) {
1057 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2), m_Argument<3>(Op3));
1058}
1059
1060inline auto m_LiveIn() { return class_match<VPIRValue, VPSymbolicValue>(); }
1061
1062/// Match a GEP recipe (VPWidenGEPRecipe, VPInstruction, or VPReplicateRecipe)
1063/// and bind the source element type and operands.
1064struct GetElementPtr_match {
1065 Type *&SourceElementType;
1066 ArrayRef<VPValue *> &Operands;
1067
1068 GetElementPtr_match(Type *&SourceElementType, ArrayRef<VPValue *> &Operands)
1069 : SourceElementType(SourceElementType), Operands(Operands) {}
1070
1071 template <typename ITy> bool match(ITy *V) const {
1072 return matchRecipeAndBind<VPWidenGEPRecipe>(V) ||
1073 matchRecipeAndBind<VPInstruction>(V) ||
1074 matchRecipeAndBind<VPReplicateRecipe>(V);
1075 }
1076
1077private:
1078 template <typename RecipeTy> bool matchRecipeAndBind(const VPValue *V) const {
1079 auto *DefR = dyn_cast<RecipeTy>(V);
1080 if (!DefR)
1081 return false;
1082
1083 if constexpr (std::is_same_v<RecipeTy, VPWidenGEPRecipe>) {
1084 SourceElementType = DefR->getSourceElementType();
1085 } else if (DefR->getOpcode() == Instruction::GetElementPtr) {
1086 SourceElementType = cast<GetElementPtrInst>(DefR->getUnderlyingInstr())
1087 ->getSourceElementType();
1088 } else if constexpr (std::is_same_v<RecipeTy, VPInstruction>) {
1089 if (DefR->getOpcode() == VPInstruction::PtrAdd) {
1090 // PtrAdd is a byte-offset GEP with i8 element type.
1091 LLVMContext &Ctx = DefR->getParent()->getPlan()->getContext();
1092 SourceElementType = Type::getInt8Ty(C&: Ctx);
1093 } else {
1094 return false;
1095 }
1096 } else {
1097 return false;
1098 }
1099
1100 Operands = ArrayRef<VPValue *>(DefR->op_begin(), DefR->op_end());
1101 return true;
1102 }
1103};
1104
1105/// Match a GEP recipe with any number of operands and bind source element type
1106/// and operands.
1107inline GetElementPtr_match m_GetElementPtr(Type *&SourceElementType,
1108 ArrayRef<VPValue *> &Operands) {
1109 return GetElementPtr_match(SourceElementType, Operands);
1110}
1111
1112template <typename SubPattern_t> struct OneUse_match {
1113 SubPattern_t SubPattern;
1114
1115 OneUse_match(const SubPattern_t &SP) : SubPattern(SP) {}
1116
1117 template <typename OpTy> bool match(OpTy *V) {
1118 return V->hasOneUse() && SubPattern.match(V);
1119 }
1120};
1121
1122template <typename T> inline OneUse_match<T> m_OneUse(const T &SubPattern) {
1123 return SubPattern;
1124}
1125
1126inline bind_ty<VPReductionPHIRecipe> m_ReductionPhi(VPReductionPHIRecipe *&V) {
1127 return V;
1128}
1129
1130} // namespace llvm::VPlanPatternMatch
1131
1132#endif
1133