1//===----- CodeGen/ExpandVectorPredication.cpp - Expand VP intrinsics -----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements IR expansion for vector predication intrinsics, allowing
10// targets to enable vector predication until just before codegen.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/CodeGen/ExpandVectorPredication.h"
15#include "llvm/ADT/Statistic.h"
16#include "llvm/Analysis/TargetTransformInfo.h"
17#include "llvm/Analysis/ValueTracking.h"
18#include "llvm/Analysis/VectorUtils.h"
19#include "llvm/IR/Constants.h"
20#include "llvm/IR/Function.h"
21#include "llvm/IR/IRBuilder.h"
22#include "llvm/IR/Instructions.h"
23#include "llvm/IR/IntrinsicInst.h"
24#include "llvm/IR/Intrinsics.h"
25#include "llvm/Support/CommandLine.h"
26#include "llvm/Support/Compiler.h"
27#include "llvm/Support/Debug.h"
28#include "llvm/Transforms/Utils/LoopUtils.h"
29#include <optional>
30
31using namespace llvm;
32
33using VPLegalization = TargetTransformInfo::VPLegalization;
34using VPTransform = TargetTransformInfo::VPLegalization::VPTransform;
35
36// Keep this in sync with TargetTransformInfo::VPLegalization.
37#define VPINTERNAL_VPLEGAL_CASES \
38 VPINTERNAL_CASE(Legal) \
39 VPINTERNAL_CASE(Discard) \
40 VPINTERNAL_CASE(Convert)
41
42#define VPINTERNAL_CASE(X) "|" #X
43
44// Override options.
45static cl::opt<std::string> EVLTransformOverride(
46 "expandvp-override-evl-transform", cl::init(Val: ""), cl::Hidden,
47 cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES
48 ". If non-empty, ignore "
49 "TargetTransformInfo and "
50 "always use this transformation for the %evl parameter (Used in "
51 "testing)."));
52
53static cl::opt<std::string> MaskTransformOverride(
54 "expandvp-override-mask-transform", cl::init(Val: ""), cl::Hidden,
55 cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES
56 ". If non-empty, Ignore "
57 "TargetTransformInfo and "
58 "always use this transformation for the %mask parameter (Used in "
59 "testing)."));
60
61#undef VPINTERNAL_CASE
62#define VPINTERNAL_CASE(X) .Case(#X, VPLegalization::X)
63
64static VPTransform parseOverrideOption(const std::string &TextOpt) {
65 return StringSwitch<VPTransform>(TextOpt) VPINTERNAL_VPLEGAL_CASES;
66}
67
68#undef VPINTERNAL_VPLEGAL_CASES
69
70// Whether any override options are set.
71static bool anyExpandVPOverridesSet() {
72 return !EVLTransformOverride.empty() || !MaskTransformOverride.empty();
73}
74
75#define DEBUG_TYPE "expandvp"
76
77STATISTIC(NumFoldedVL, "Number of folded vector length params");
78STATISTIC(NumLoweredVPOps, "Number of folded vector predication operations");
79
80///// Helpers {
81
82/// \returns Whether the vector mask \p MaskVal has all lane bits set.
83static bool isAllTrueMask(Value *MaskVal) {
84 if (Value *SplattedVal = getSplatValue(V: MaskVal))
85 if (auto *ConstValue = dyn_cast<Constant>(Val: SplattedVal))
86 return ConstValue->isAllOnesValue();
87
88 return false;
89}
90
91/// \returns A non-excepting divisor constant for this type.
92static Constant *getSafeDivisor(Type *DivTy) {
93 assert(DivTy->isIntOrIntVectorTy() && "Unsupported divisor type");
94 return ConstantInt::get(Ty: DivTy, V: 1u, IsSigned: false);
95}
96
97/// Transfer operation properties from \p OldVPI to \p NewVal.
98static void transferDecorations(Value &NewVal, VPIntrinsic &VPI) {
99 auto *NewInst = dyn_cast<Instruction>(Val: &NewVal);
100 if (!NewInst || !isa<FPMathOperator>(Val: NewVal))
101 return;
102
103 auto *OldFMOp = dyn_cast<FPMathOperator>(Val: &VPI);
104 if (!OldFMOp)
105 return;
106
107 NewInst->setFastMathFlags(OldFMOp->getFastMathFlags());
108}
109
110/// Transfer all properties from \p OldOp to \p NewOp and replace all uses.
111/// OldVP gets erased.
112static void replaceOperation(Value &NewOp, VPIntrinsic &OldOp) {
113 transferDecorations(NewVal&: NewOp, VPI&: OldOp);
114
115 if (isa<Instruction>(Val: NewOp) && !NewOp.hasName() && OldOp.hasName())
116 NewOp.takeName(V: &OldOp);
117
118 OldOp.replaceAllUsesWith(V: &NewOp);
119 OldOp.eraseFromParent();
120}
121
122static bool maySpeculateLanes(VPIntrinsic &VPI) {
123 // The result of VP reductions depends on the mask and evl.
124 if (isa<VPReductionIntrinsic>(Val: VPI))
125 return false;
126 // Fallback to whether the intrinsic is speculatable.
127 if (auto IntrID = VPI.getFunctionalIntrinsicID())
128 return Intrinsic::getFnAttributes(C&: VPI.getContext(), id: *IntrID)
129 .hasAttribute(Kind: Attribute::AttrKind::Speculatable);
130 if (auto Opc = VPI.getFunctionalOpcode())
131 return isSafeToSpeculativelyExecuteWithOpcode(Opcode: *Opc, Inst: &VPI);
132 return false;
133}
134
135//// } Helpers
136
137namespace {
138
139// Expansion pass state at function scope.
140struct CachingVPExpander {
141 const TargetTransformInfo &TTI;
142
143 /// \returns A bitmask that is true where the lane position is less-than \p
144 /// EVLParam
145 ///
146 /// \p Builder
147 /// Used for instruction creation.
148 /// \p VLParam
149 /// The explicit vector length parameter to test against the lane
150 /// positions.
151 /// \p ElemCount
152 /// Static (potentially scalable) number of vector elements.
153 Value *convertEVLToMask(IRBuilder<> &Builder, Value *EVLParam,
154 ElementCount ElemCount);
155
156 /// If needed, folds the EVL in the mask operand and discards the EVL
157 /// parameter. Returns true if the mask was actually folded.
158 bool foldEVLIntoMask(VPIntrinsic &VPI);
159
160 /// "Remove" the %evl parameter of \p PI by setting it to the static vector
161 /// length of the operation. Returns true if the %evl (if any) was effectively
162 /// changed.
163 bool discardEVLParameter(VPIntrinsic &PI);
164
165 /// Lower this VP binary operator to a unpredicated binary operator.
166 bool expandPredicationInBinaryOperator(IRBuilder<> &Builder, VPIntrinsic &PI);
167
168 /// Lower this VP int call to a unpredicated int call.
169 bool expandPredicationToIntCall(IRBuilder<> &Builder, VPIntrinsic &PI);
170
171 /// Lower this VP fp call to a unpredicated fp call.
172 bool expandPredicationToFPCall(IRBuilder<> &Builder, VPIntrinsic &PI,
173 unsigned UnpredicatedIntrinsicID);
174
175 /// Lower this VP reduction to a call to an unpredicated reduction intrinsic.
176 bool expandPredicationInReduction(IRBuilder<> &Builder,
177 VPReductionIntrinsic &PI);
178
179 /// Lower this VP cast operation to a non-VP intrinsic.
180 bool expandPredicationToCastIntrinsic(IRBuilder<> &Builder, VPIntrinsic &VPI);
181
182 /// Lower this VP memory operation to a non-VP intrinsic.
183 bool expandPredicationInMemoryIntrinsic(IRBuilder<> &Builder,
184 VPIntrinsic &VPI);
185
186 /// Lower this VP comparison to a call to an unpredicated comparison.
187 bool expandPredicationInComparison(IRBuilder<> &Builder, VPCmpIntrinsic &PI);
188
189 /// Query TTI and expand the vector predication in \p P accordingly.
190 bool expandPredication(VPIntrinsic &PI);
191
192 /// Determine how and whether the VPIntrinsic \p VPI shall be expanded. This
193 /// overrides TTI with the cl::opts listed at the top of this file.
194 VPLegalization getVPLegalizationStrategy(const VPIntrinsic &VPI) const;
195 bool UsingTTIOverrides;
196
197public:
198 CachingVPExpander(const TargetTransformInfo &TTI)
199 : TTI(TTI), UsingTTIOverrides(anyExpandVPOverridesSet()) {}
200
201 /// Expand llvm.vp.* intrinsics as requested by \p TTI.
202 /// Returns the details of the expansion.
203 VPExpansionDetails expandVectorPredication(VPIntrinsic &VPI);
204};
205
206//// CachingVPExpander {
207
208Value *CachingVPExpander::convertEVLToMask(IRBuilder<> &Builder,
209 Value *EVLParam,
210 ElementCount ElemCount) {
211 // TODO add caching
212 // Scalable vector %evl conversion.
213 if (ElemCount.isScalable()) {
214 Type *BoolVecTy = VectorType::get(ElementType: Builder.getInt1Ty(), EC: ElemCount);
215 // `get_active_lane_mask` performs an implicit less-than comparison.
216 Value *ConstZero = Builder.getInt32(C: 0);
217 return Builder.CreateIntrinsic(ID: Intrinsic::get_active_lane_mask,
218 Types: {BoolVecTy, EVLParam->getType()},
219 Args: {ConstZero, EVLParam});
220 }
221
222 // Fixed vector %evl conversion.
223 Type *LaneTy = EVLParam->getType();
224 unsigned NumElems = ElemCount.getFixedValue();
225 Value *VLSplat = Builder.CreateVectorSplat(NumElts: NumElems, V: EVLParam);
226 Value *IdxVec = Builder.CreateStepVector(DstType: VectorType::get(ElementType: LaneTy, EC: ElemCount));
227 return Builder.CreateICmp(P: CmpInst::ICMP_ULT, LHS: IdxVec, RHS: VLSplat);
228}
229
230bool CachingVPExpander::expandPredicationInBinaryOperator(IRBuilder<> &Builder,
231 VPIntrinsic &VPI) {
232 assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
233 "Implicitly dropping %evl in non-speculatable operator!");
234
235 auto OC = static_cast<Instruction::BinaryOps>(*VPI.getFunctionalOpcode());
236 assert(Instruction::isBinaryOp(OC));
237
238 Value *Op0 = VPI.getOperand(i_nocapture: 0);
239 Value *Op1 = VPI.getOperand(i_nocapture: 1);
240 Value *Mask = VPI.getMaskParam();
241
242 // Blend in safe operands.
243 if (Mask && !isAllTrueMask(MaskVal: Mask)) {
244 switch (OC) {
245 default:
246 // Can safely ignore the predicate.
247 break;
248
249 // Division operators need a safe divisor on masked-off lanes (1).
250 case Instruction::UDiv:
251 case Instruction::SDiv:
252 case Instruction::URem:
253 case Instruction::SRem:
254 // 2nd operand must not be zero.
255 Value *SafeDivisor = getSafeDivisor(DivTy: VPI.getType());
256 Op1 = Builder.CreateSelect(C: Mask, True: Op1, False: SafeDivisor);
257 }
258 }
259
260 Value *NewBinOp = Builder.CreateBinOp(Opc: OC, LHS: Op0, RHS: Op1);
261
262 replaceOperation(NewOp&: *NewBinOp, OldOp&: VPI);
263 return true;
264}
265
266bool CachingVPExpander::expandPredicationToIntCall(IRBuilder<> &Builder,
267 VPIntrinsic &VPI) {
268 std::optional<unsigned> FID = VPI.getFunctionalIntrinsicID();
269 if (!FID)
270 return false;
271 SmallVector<Value *, 2> Argument;
272 for (unsigned i = 0; i < VPI.getNumOperands() - 3; i++) {
273 Argument.push_back(Elt: VPI.getOperand(i_nocapture: i));
274 }
275 Value *NewOp =
276 Builder.CreateIntrinsic(ID: FID.value(), Types: {VPI.getType()}, Args: Argument);
277 replaceOperation(NewOp&: *NewOp, OldOp&: VPI);
278 return true;
279}
280
281bool CachingVPExpander::expandPredicationToFPCall(
282 IRBuilder<> &Builder, VPIntrinsic &VPI, unsigned UnpredicatedIntrinsicID) {
283 assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
284 "Implicitly dropping %evl in non-speculatable operator!");
285
286 switch (UnpredicatedIntrinsicID) {
287 case Intrinsic::fabs:
288 case Intrinsic::sqrt:
289 case Intrinsic::maxnum:
290 case Intrinsic::minnum:
291 case Intrinsic::maximum:
292 case Intrinsic::minimum:
293 case Intrinsic::ceil:
294 case Intrinsic::floor:
295 case Intrinsic::round:
296 case Intrinsic::roundeven:
297 case Intrinsic::trunc:
298 case Intrinsic::rint:
299 case Intrinsic::nearbyint:
300 case Intrinsic::lrint:
301 case Intrinsic::llrint: {
302 SmallVector<Value *, 2> Argument;
303 for (unsigned i = 0; i < VPI.getNumOperands() - 3; i++) {
304 Argument.push_back(Elt: VPI.getOperand(i_nocapture: i));
305 }
306 Value *NewOp = Builder.CreateIntrinsic(RetTy: VPI.getType(),
307 ID: UnpredicatedIntrinsicID, Args: Argument);
308 replaceOperation(NewOp&: *NewOp, OldOp&: VPI);
309 return true;
310 }
311 case Intrinsic::fma:
312 case Intrinsic::fmuladd:
313 case Intrinsic::experimental_constrained_fma:
314 case Intrinsic::experimental_constrained_fmuladd: {
315 Value *Op0 = VPI.getOperand(i_nocapture: 0);
316 Value *Op1 = VPI.getOperand(i_nocapture: 1);
317 Value *Op2 = VPI.getOperand(i_nocapture: 2);
318 Function *Fn = Intrinsic::getOrInsertDeclaration(
319 M: VPI.getModule(), id: UnpredicatedIntrinsicID, OverloadTys: {VPI.getType()});
320 Value *NewOp;
321 if (Intrinsic::isConstrainedFPIntrinsic(QID: UnpredicatedIntrinsicID))
322 NewOp = Builder.CreateConstrainedFPCall(Callee: Fn, Args: {Op0, Op1, Op2});
323 else
324 NewOp = Builder.CreateCall(Callee: Fn, Args: {Op0, Op1, Op2});
325 replaceOperation(NewOp&: *NewOp, OldOp&: VPI);
326 return true;
327 }
328 }
329
330 return false;
331}
332
333static Value *getNeutralReductionElement(const VPReductionIntrinsic &VPI,
334 Type *EltTy) {
335 Intrinsic::ID RdxID = *VPI.getFunctionalIntrinsicID();
336 FastMathFlags FMF;
337 if (isa<FPMathOperator>(Val: VPI))
338 FMF = VPI.getFastMathFlags();
339 return getReductionIdentity(RdxID, Ty: EltTy, FMF);
340}
341
342bool CachingVPExpander::expandPredicationInReduction(
343 IRBuilder<> &Builder, VPReductionIntrinsic &VPI) {
344 assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
345 "Implicitly dropping %evl in non-speculatable operator!");
346
347 Value *Mask = VPI.getMaskParam();
348 Value *RedOp = VPI.getOperand(i_nocapture: VPI.getVectorParamPos());
349
350 // Insert neutral element in masked-out positions
351 if (Mask && !isAllTrueMask(MaskVal: Mask)) {
352 auto *NeutralElt = getNeutralReductionElement(VPI, EltTy: VPI.getType());
353 auto *NeutralVector = Builder.CreateVectorSplat(
354 EC: cast<VectorType>(Val: RedOp->getType())->getElementCount(), V: NeutralElt);
355 RedOp = Builder.CreateSelect(C: Mask, True: RedOp, False: NeutralVector);
356 }
357
358 Value *Reduction;
359 Value *Start = VPI.getOperand(i_nocapture: VPI.getStartParamPos());
360
361 switch (VPI.getIntrinsicID()) {
362 default:
363 llvm_unreachable("Impossible reduction kind");
364 case Intrinsic::vp_reduce_add:
365 case Intrinsic::vp_reduce_mul:
366 case Intrinsic::vp_reduce_and:
367 case Intrinsic::vp_reduce_or:
368 case Intrinsic::vp_reduce_xor: {
369 Intrinsic::ID RedID = *VPI.getFunctionalIntrinsicID();
370 unsigned Opc = getArithmeticReductionInstruction(RdxID: RedID);
371 assert(Instruction::isBinaryOp(Opc));
372 Reduction = Builder.CreateUnaryIntrinsic(ID: RedID, V: RedOp);
373 Reduction =
374 Builder.CreateBinOp(Opc: (Instruction::BinaryOps)Opc, LHS: Reduction, RHS: Start);
375 break;
376 }
377 case Intrinsic::vp_reduce_smax:
378 case Intrinsic::vp_reduce_smin:
379 case Intrinsic::vp_reduce_umax:
380 case Intrinsic::vp_reduce_umin:
381 case Intrinsic::vp_reduce_fmax:
382 case Intrinsic::vp_reduce_fmin:
383 case Intrinsic::vp_reduce_fmaximum:
384 case Intrinsic::vp_reduce_fminimum: {
385 Intrinsic::ID RedID = *VPI.getFunctionalIntrinsicID();
386 Intrinsic::ID ScalarID = getMinMaxReductionIntrinsicOp(RdxID: RedID);
387 Reduction = Builder.CreateUnaryIntrinsic(ID: RedID, V: RedOp);
388 transferDecorations(NewVal&: *Reduction, VPI);
389 Reduction = Builder.CreateBinaryIntrinsic(ID: ScalarID, LHS: Reduction, RHS: Start);
390 break;
391 }
392 case Intrinsic::vp_reduce_fadd:
393 Reduction = Builder.CreateFAddReduce(Acc: Start, Src: RedOp);
394 break;
395 case Intrinsic::vp_reduce_fmul:
396 Reduction = Builder.CreateFMulReduce(Acc: Start, Src: RedOp);
397 break;
398 }
399
400 replaceOperation(NewOp&: *Reduction, OldOp&: VPI);
401 return true;
402}
403
404bool CachingVPExpander::expandPredicationToCastIntrinsic(IRBuilder<> &Builder,
405 VPIntrinsic &VPI) {
406 Intrinsic::ID VPID = VPI.getIntrinsicID();
407 unsigned CastOpcode = VPIntrinsic::getFunctionalOpcodeForVP(ID: VPID).value();
408 assert(Instruction::isCast(CastOpcode));
409 Value *CastOp = Builder.CreateCast(Op: Instruction::CastOps(CastOpcode),
410 V: VPI.getOperand(i_nocapture: 0), DestTy: VPI.getType());
411
412 replaceOperation(NewOp&: *CastOp, OldOp&: VPI);
413 return true;
414}
415
416bool CachingVPExpander::expandPredicationInMemoryIntrinsic(IRBuilder<> &Builder,
417 VPIntrinsic &VPI) {
418 assert(VPI.canIgnoreVectorLengthParam());
419
420 const auto &DL = VPI.getDataLayout();
421
422 Value *MaskParam = VPI.getMaskParam();
423 Value *PtrParam = VPI.getMemoryPointerParam();
424 Value *DataParam = VPI.getMemoryDataParam();
425 bool IsUnmasked = isAllTrueMask(MaskVal: MaskParam);
426
427 MaybeAlign AlignOpt = VPI.getPointerAlignment();
428
429 Value *NewMemoryInst = nullptr;
430 switch (VPI.getIntrinsicID()) {
431 default:
432 llvm_unreachable("Not a VP memory intrinsic");
433 case Intrinsic::vp_store:
434 if (IsUnmasked) {
435 StoreInst *NewStore =
436 Builder.CreateStore(Val: DataParam, Ptr: PtrParam, /*IsVolatile*/ isVolatile: false);
437 if (AlignOpt.has_value())
438 NewStore->setAlignment(*AlignOpt);
439 NewMemoryInst = NewStore;
440 } else
441 NewMemoryInst = Builder.CreateMaskedStore(
442 Val: DataParam, Ptr: PtrParam, Alignment: AlignOpt.valueOrOne(), Mask: MaskParam);
443
444 break;
445 case Intrinsic::vp_load:
446 if (IsUnmasked) {
447 LoadInst *NewLoad =
448 Builder.CreateLoad(Ty: VPI.getType(), Ptr: PtrParam, /*IsVolatile*/ isVolatile: false);
449 if (AlignOpt.has_value())
450 NewLoad->setAlignment(*AlignOpt);
451 NewMemoryInst = NewLoad;
452 } else
453 NewMemoryInst = Builder.CreateMaskedLoad(
454 Ty: VPI.getType(), Ptr: PtrParam, Alignment: AlignOpt.valueOrOne(), Mask: MaskParam);
455
456 break;
457 case Intrinsic::vp_scatter: {
458 auto *ElementType =
459 cast<VectorType>(Val: DataParam->getType())->getElementType();
460 NewMemoryInst = Builder.CreateMaskedScatter(
461 Val: DataParam, Ptrs: PtrParam,
462 Alignment: AlignOpt.value_or(u: DL.getPrefTypeAlign(Ty: ElementType)), Mask: MaskParam);
463 break;
464 }
465 case Intrinsic::vp_gather: {
466 auto *ElementType = cast<VectorType>(Val: VPI.getType())->getElementType();
467 NewMemoryInst = Builder.CreateMaskedGather(
468 Ty: VPI.getType(), Ptrs: PtrParam,
469 Alignment: AlignOpt.value_or(u: DL.getPrefTypeAlign(Ty: ElementType)), Mask: MaskParam,
470 PassThru: nullptr);
471 break;
472 }
473 }
474
475 assert(NewMemoryInst);
476 replaceOperation(NewOp&: *NewMemoryInst, OldOp&: VPI);
477 return true;
478}
479
480bool CachingVPExpander::expandPredicationInComparison(IRBuilder<> &Builder,
481 VPCmpIntrinsic &VPI) {
482 assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
483 "Implicitly dropping %evl in non-speculatable operator!");
484
485 assert(*VPI.getFunctionalOpcode() == Instruction::ICmp ||
486 *VPI.getFunctionalOpcode() == Instruction::FCmp);
487
488 Value *Op0 = VPI.getOperand(i_nocapture: 0);
489 Value *Op1 = VPI.getOperand(i_nocapture: 1);
490 auto Pred = VPI.getPredicate();
491
492 auto *NewCmp = Builder.CreateCmp(Pred, LHS: Op0, RHS: Op1);
493
494 replaceOperation(NewOp&: *NewCmp, OldOp&: VPI);
495 return true;
496}
497
498bool CachingVPExpander::discardEVLParameter(VPIntrinsic &VPI) {
499 LLVM_DEBUG(dbgs() << "Discard EVL parameter in " << VPI << "\n");
500
501 if (VPI.canIgnoreVectorLengthParam())
502 return false;
503
504 Value *EVLParam = VPI.getVectorLengthParam();
505 if (!EVLParam)
506 return false;
507
508 ElementCount StaticElemCount = VPI.getStaticVectorLength();
509 Value *MaxEVL = nullptr;
510 Type *Int32Ty = Type::getInt32Ty(C&: VPI.getContext());
511 if (StaticElemCount.isScalable()) {
512 // TODO add caching
513 IRBuilder<> Builder(VPI.getParent(), VPI.getIterator());
514 Value *FactorConst = Builder.getInt32(C: StaticElemCount.getKnownMinValue());
515 Value *VScale = Builder.CreateVScale(Ty: Int32Ty, Name: "vscale");
516 MaxEVL = Builder.CreateNUWMul(LHS: VScale, RHS: FactorConst, Name: "scalable_size");
517 } else {
518 MaxEVL = ConstantInt::get(Ty: Int32Ty, V: StaticElemCount.getFixedValue(), IsSigned: false);
519 }
520 VPI.setVectorLengthParam(MaxEVL);
521 return true;
522}
523
524bool CachingVPExpander::foldEVLIntoMask(VPIntrinsic &VPI) {
525 LLVM_DEBUG(dbgs() << "Folding vlen for " << VPI << '\n');
526
527 IRBuilder<> Builder(&VPI);
528
529 // Ineffective %evl parameter and so nothing to do here.
530 if (VPI.canIgnoreVectorLengthParam())
531 return false;
532
533 // Only VP intrinsics can have an %evl parameter.
534 Value *OldMaskParam = VPI.getMaskParam();
535 if (!OldMaskParam) {
536 assert((VPI.getIntrinsicID() == Intrinsic::vp_merge ||
537 VPI.getIntrinsicID() == Intrinsic::vp_select) &&
538 "Unexpected VP intrinsic without mask operand");
539 OldMaskParam = VPI.getArgOperand(i: 0);
540 }
541
542 Value *OldEVLParam = VPI.getVectorLengthParam();
543 assert(OldMaskParam && "no mask param to fold the vl param into");
544 assert(OldEVLParam && "no EVL param to fold away");
545
546 LLVM_DEBUG(dbgs() << "OLD evl: " << *OldEVLParam << '\n');
547 LLVM_DEBUG(dbgs() << "OLD mask: " << *OldMaskParam << '\n');
548
549 // Convert the %evl predication into vector mask predication.
550 ElementCount ElemCount = VPI.getStaticVectorLength();
551 Value *VLMask = convertEVLToMask(Builder, EVLParam: OldEVLParam, ElemCount);
552 Value *NewMaskParam = Builder.CreateAnd(LHS: VLMask, RHS: OldMaskParam);
553 if (VPI.getIntrinsicID() == Intrinsic::vp_merge ||
554 VPI.getIntrinsicID() == Intrinsic::vp_select)
555 VPI.setArgOperand(i: 0, v: NewMaskParam);
556 else
557 VPI.setMaskParam(NewMaskParam);
558
559 // Drop the %evl parameter.
560 discardEVLParameter(VPI);
561 assert(VPI.canIgnoreVectorLengthParam() &&
562 "transformation did not render the evl param ineffective!");
563
564 // Reassess the modified instruction.
565 return true;
566}
567
568bool CachingVPExpander::expandPredication(VPIntrinsic &VPI) {
569 LLVM_DEBUG(dbgs() << "Lowering to unpredicated op: " << VPI << '\n');
570
571 IRBuilder<> Builder(&VPI);
572
573 // Try lowering to a LLVM instruction first.
574 auto OC = VPI.getFunctionalOpcode();
575
576 if (OC && Instruction::isBinaryOp(Opcode: *OC))
577 return expandPredicationInBinaryOperator(Builder, VPI);
578
579 if (auto *VPRI = dyn_cast<VPReductionIntrinsic>(Val: &VPI))
580 return expandPredicationInReduction(Builder, VPI&: *VPRI);
581
582 if (auto *VPCmp = dyn_cast<VPCmpIntrinsic>(Val: &VPI))
583 return expandPredicationInComparison(Builder, VPI&: *VPCmp);
584
585 if (VPCastIntrinsic::isVPCast(ID: VPI.getIntrinsicID()))
586 return expandPredicationToCastIntrinsic(Builder, VPI);
587
588 switch (VPI.getIntrinsicID()) {
589 default:
590 break;
591 case Intrinsic::vp_fneg: {
592 Value *NewNegOp = Builder.CreateFNeg(V: VPI.getOperand(i_nocapture: 0));
593 replaceOperation(NewOp&: *NewNegOp, OldOp&: VPI);
594 return NewNegOp;
595 }
596 case Intrinsic::vp_select:
597 case Intrinsic::vp_merge: {
598 assert(maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam());
599 Value *NewSelectOp = Builder.CreateSelect(
600 C: VPI.getOperand(i_nocapture: 0), True: VPI.getOperand(i_nocapture: 1), False: VPI.getOperand(i_nocapture: 2));
601 replaceOperation(NewOp&: *NewSelectOp, OldOp&: VPI);
602 return NewSelectOp;
603 }
604 case Intrinsic::vp_abs:
605 case Intrinsic::vp_smax:
606 case Intrinsic::vp_smin:
607 case Intrinsic::vp_umax:
608 case Intrinsic::vp_umin:
609 case Intrinsic::vp_bswap:
610 case Intrinsic::vp_bitreverse:
611 case Intrinsic::vp_ctpop:
612 case Intrinsic::vp_ctlz:
613 case Intrinsic::vp_cttz:
614 case Intrinsic::vp_sadd_sat:
615 case Intrinsic::vp_uadd_sat:
616 case Intrinsic::vp_ssub_sat:
617 case Intrinsic::vp_usub_sat:
618 case Intrinsic::vp_fshl:
619 case Intrinsic::vp_fshr:
620 return expandPredicationToIntCall(Builder, VPI);
621 case Intrinsic::vp_fabs:
622 case Intrinsic::vp_sqrt:
623 case Intrinsic::vp_maxnum:
624 case Intrinsic::vp_minnum:
625 case Intrinsic::vp_maximum:
626 case Intrinsic::vp_minimum:
627 case Intrinsic::vp_ceil:
628 case Intrinsic::vp_floor:
629 case Intrinsic::vp_round:
630 case Intrinsic::vp_roundeven:
631 case Intrinsic::vp_roundtozero:
632 case Intrinsic::vp_rint:
633 case Intrinsic::vp_nearbyint:
634 case Intrinsic::vp_lrint:
635 case Intrinsic::vp_llrint:
636 case Intrinsic::vp_fma:
637 case Intrinsic::vp_fmuladd:
638 return expandPredicationToFPCall(Builder, VPI,
639 UnpredicatedIntrinsicID: VPI.getFunctionalIntrinsicID().value());
640 case Intrinsic::vp_load:
641 case Intrinsic::vp_store:
642 case Intrinsic::vp_gather:
643 case Intrinsic::vp_scatter:
644 return expandPredicationInMemoryIntrinsic(Builder, VPI);
645 }
646
647 if (auto CID = VPI.getConstrainedIntrinsicID())
648 if (expandPredicationToFPCall(Builder, VPI, UnpredicatedIntrinsicID: *CID))
649 return true;
650
651 return false;
652}
653
654//// } CachingVPExpander
655
656void sanitizeStrategy(VPIntrinsic &VPI, VPLegalization &LegalizeStrat) {
657 // Operations with speculatable lanes do not strictly need predication.
658 if (maySpeculateLanes(VPI)) {
659 // Converting a speculatable VP intrinsic means dropping %mask and %evl.
660 // No need to expand %evl into the %mask only to ignore that code.
661 if (LegalizeStrat.OpStrategy == VPLegalization::Convert)
662 LegalizeStrat.EVLParamStrategy = VPLegalization::Discard;
663 return;
664 }
665
666 // We have to preserve the predicating effect of %evl for this
667 // non-speculatable VP intrinsic.
668 // 1) Never discard %evl.
669 // 2) If this VP intrinsic will be expanded to non-VP code, make sure that
670 // %evl gets folded into %mask.
671 if ((LegalizeStrat.EVLParamStrategy == VPLegalization::Discard) ||
672 (LegalizeStrat.OpStrategy == VPLegalization::Convert)) {
673 LegalizeStrat.EVLParamStrategy = VPLegalization::Convert;
674 }
675}
676
677VPLegalization
678CachingVPExpander::getVPLegalizationStrategy(const VPIntrinsic &VPI) const {
679 auto VPStrat = TTI.getVPLegalizationStrategy(PI: VPI);
680 if (LLVM_LIKELY(!UsingTTIOverrides)) {
681 // No overrides - we are in production.
682 return VPStrat;
683 }
684
685 // Overrides set - we are in testing, the following does not need to be
686 // efficient.
687 VPStrat.EVLParamStrategy = parseOverrideOption(TextOpt: EVLTransformOverride);
688 VPStrat.OpStrategy = parseOverrideOption(TextOpt: MaskTransformOverride);
689 return VPStrat;
690}
691
692VPExpansionDetails
693CachingVPExpander::expandVectorPredication(VPIntrinsic &VPI) {
694 auto Strategy = getVPLegalizationStrategy(VPI);
695 sanitizeStrategy(VPI, LegalizeStrat&: Strategy);
696
697 VPExpansionDetails Changed = VPExpansionDetails::IntrinsicUnchanged;
698
699 // Transform the EVL parameter.
700 switch (Strategy.EVLParamStrategy) {
701 case VPLegalization::Legal:
702 break;
703 case VPLegalization::Discard:
704 if (discardEVLParameter(VPI))
705 Changed = VPExpansionDetails::IntrinsicUpdated;
706 break;
707 case VPLegalization::Convert:
708 if (foldEVLIntoMask(VPI)) {
709 Changed = VPExpansionDetails::IntrinsicUpdated;
710 ++NumFoldedVL;
711 }
712 break;
713 }
714
715 // Replace with a non-predicated operation.
716 switch (Strategy.OpStrategy) {
717 case VPLegalization::Legal:
718 break;
719 case VPLegalization::Discard:
720 llvm_unreachable("Invalid strategy for operators.");
721 case VPLegalization::Convert:
722 if (expandPredication(VPI)) {
723 ++NumLoweredVPOps;
724 Changed = VPExpansionDetails::IntrinsicReplaced;
725 }
726 break;
727 }
728
729 return Changed;
730}
731} // namespace
732
733VPExpansionDetails
734llvm::expandVectorPredicationIntrinsic(VPIntrinsic &VPI,
735 const TargetTransformInfo &TTI) {
736 return CachingVPExpander(TTI).expandVectorPredication(VPI);
737}
738