1//===- ScalarizeMaskedMemIntrin.cpp - Scalarize unsupported masked mem ----===//
2// intrinsics
3//
4// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5// See https://llvm.org/LICENSE.txt for license information.
6// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass replaces masked memory intrinsics - when unsupported by the target
11// - with a chain of basic blocks, that deal with the elements one-by-one if the
12// appropriate mask bit is set.
13//
14//===----------------------------------------------------------------------===//
15
16#include "llvm/Transforms/Scalar/ScalarizeMaskedMemIntrin.h"
17#include "llvm/ADT/Twine.h"
18#include "llvm/Analysis/DomTreeUpdater.h"
19#include "llvm/Analysis/TargetTransformInfo.h"
20#include "llvm/Analysis/VectorUtils.h"
21#include "llvm/IR/BasicBlock.h"
22#include "llvm/IR/Constant.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DerivedTypes.h"
25#include "llvm/IR/Dominators.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/IRBuilder.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Instructions.h"
30#include "llvm/IR/IntrinsicInst.h"
31#include "llvm/IR/ProfDataUtils.h"
32#include "llvm/IR/Type.h"
33#include "llvm/IR/Value.h"
34#include "llvm/InitializePasses.h"
35#include "llvm/Pass.h"
36#include "llvm/Support/Casting.h"
37#include "llvm/Transforms/Scalar.h"
38#include "llvm/Transforms/Utils/BasicBlockUtils.h"
39#include <cassert>
40#include <optional>
41
42using namespace llvm;
43
44#define DEBUG_TYPE "scalarize-masked-mem-intrin"
45
46namespace {
47
48class ScalarizeMaskedMemIntrinLegacyPass : public FunctionPass {
49public:
50 static char ID; // Pass identification, replacement for typeid
51
52 explicit ScalarizeMaskedMemIntrinLegacyPass() : FunctionPass(ID) {
53 initializeScalarizeMaskedMemIntrinLegacyPassPass(
54 *PassRegistry::getPassRegistry());
55 }
56
57 bool runOnFunction(Function &F) override;
58
59 StringRef getPassName() const override {
60 return "Scalarize Masked Memory Intrinsics";
61 }
62
63 void getAnalysisUsage(AnalysisUsage &AU) const override {
64 AU.addRequired<TargetTransformInfoWrapperPass>();
65 AU.addPreserved<DominatorTreeWrapperPass>();
66 }
67};
68
69} // end anonymous namespace
70
71static bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT,
72 const TargetTransformInfo &TTI, const DataLayout &DL,
73 bool HasBranchDivergence, DomTreeUpdater *DTU);
74static bool optimizeCallInst(CallInst *CI, bool &ModifiedDT,
75 const TargetTransformInfo &TTI,
76 const DataLayout &DL, bool HasBranchDivergence,
77 DomTreeUpdater *DTU);
78
79char ScalarizeMaskedMemIntrinLegacyPass::ID = 0;
80
81INITIALIZE_PASS_BEGIN(ScalarizeMaskedMemIntrinLegacyPass, DEBUG_TYPE,
82 "Scalarize unsupported masked memory intrinsics", false,
83 false)
84INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
85INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
86INITIALIZE_PASS_END(ScalarizeMaskedMemIntrinLegacyPass, DEBUG_TYPE,
87 "Scalarize unsupported masked memory intrinsics", false,
88 false)
89
90FunctionPass *llvm::createScalarizeMaskedMemIntrinLegacyPass() {
91 return new ScalarizeMaskedMemIntrinLegacyPass();
92}
93
94static bool isConstantIntVector(Value *Mask) {
95 Constant *C = dyn_cast<Constant>(Val: Mask);
96 if (!C)
97 return false;
98
99 unsigned NumElts = cast<FixedVectorType>(Val: Mask->getType())->getNumElements();
100 for (unsigned i = 0; i != NumElts; ++i) {
101 Constant *CElt = C->getAggregateElement(Elt: i);
102 if (!CElt || !isa<ConstantInt>(Val: CElt))
103 return false;
104 }
105
106 return true;
107}
108
109static unsigned adjustForEndian(const DataLayout &DL, unsigned VectorWidth,
110 unsigned Idx) {
111 return DL.isBigEndian() ? VectorWidth - 1 - Idx : Idx;
112}
113
114// Translate a masked load intrinsic like
115// <16 x i32 > @llvm.masked.load( <16 x i32>* %addr,
116// <16 x i1> %mask, <16 x i32> %passthru)
117// to a chain of basic blocks, with loading element one-by-one if
118// the appropriate mask bit is set
119//
120// %1 = bitcast i8* %addr to i32*
121// %2 = extractelement <16 x i1> %mask, i32 0
122// br i1 %2, label %cond.load, label %else
123//
124// cond.load: ; preds = %0
125// %3 = getelementptr i32* %1, i32 0
126// %4 = load i32* %3
127// %5 = insertelement <16 x i32> %passthru, i32 %4, i32 0
128// br label %else
129//
130// else: ; preds = %0, %cond.load
131// %res.phi.else = phi <16 x i32> [ %5, %cond.load ], [ poison, %0 ]
132// %6 = extractelement <16 x i1> %mask, i32 1
133// br i1 %6, label %cond.load1, label %else2
134//
135// cond.load1: ; preds = %else
136// %7 = getelementptr i32* %1, i32 1
137// %8 = load i32* %7
138// %9 = insertelement <16 x i32> %res.phi.else, i32 %8, i32 1
139// br label %else2
140//
141// else2: ; preds = %else, %cond.load1
142// %res.phi.else3 = phi <16 x i32> [ %9, %cond.load1 ], [ %res.phi.else, %else ]
143// %10 = extractelement <16 x i1> %mask, i32 2
144// br i1 %10, label %cond.load4, label %else5
145//
146static void scalarizeMaskedLoad(const DataLayout &DL, bool HasBranchDivergence,
147 CallInst *CI, DomTreeUpdater *DTU,
148 bool &ModifiedDT) {
149 Value *Ptr = CI->getArgOperand(i: 0);
150 Value *Mask = CI->getArgOperand(i: 1);
151 Value *Src0 = CI->getArgOperand(i: 2);
152
153 const Align AlignVal = CI->getParamAlign(ArgNo: 0).valueOrOne();
154 VectorType *VecType = cast<FixedVectorType>(Val: CI->getType());
155
156 Type *EltTy = VecType->getElementType();
157
158 IRBuilder<> Builder(CI->getContext());
159 Instruction *InsertPt = CI;
160 BasicBlock *IfBlock = CI->getParent();
161
162 Builder.SetInsertPoint(InsertPt);
163 Builder.SetCurrentDebugLocation(CI->getDebugLoc());
164
165 // Short-cut if the mask is all-true.
166 if (isa<Constant>(Val: Mask) && cast<Constant>(Val: Mask)->isAllOnesValue()) {
167 LoadInst *NewI = Builder.CreateAlignedLoad(Ty: VecType, Ptr, Align: AlignVal);
168 NewI->copyMetadata(SrcInst: *CI);
169 NewI->takeName(V: CI);
170 CI->replaceAllUsesWith(V: NewI);
171 CI->eraseFromParent();
172 return;
173 }
174
175 // Adjust alignment for the scalar instruction.
176 const Align AdjustedAlignVal =
177 commonAlignment(A: AlignVal, Offset: EltTy->getPrimitiveSizeInBits() / 8);
178 unsigned VectorWidth = cast<FixedVectorType>(Val: VecType)->getNumElements();
179
180 // The result vector
181 Value *VResult = Src0;
182
183 if (isConstantIntVector(Mask)) {
184 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
185 if (cast<Constant>(Val: Mask)->getAggregateElement(Elt: Idx)->isNullValue())
186 continue;
187 Value *Gep = Builder.CreateConstInBoundsGEP1_32(Ty: EltTy, Ptr, Idx0: Idx);
188 LoadInst *Load = Builder.CreateAlignedLoad(Ty: EltTy, Ptr: Gep, Align: AdjustedAlignVal);
189 VResult = Builder.CreateInsertElement(Vec: VResult, NewElt: Load, Idx);
190 }
191 CI->replaceAllUsesWith(V: VResult);
192 CI->eraseFromParent();
193 return;
194 }
195
196 // Optimize the case where the "masked load" is a predicated load - that is,
197 // where the mask is the splat of a non-constant scalar boolean. In that case,
198 // use that splated value as the guard on a conditional vector load.
199 if (isSplatValue(V: Mask, /*Index=*/0)) {
200 Value *Predicate = Builder.CreateExtractElement(Vec: Mask, Idx: uint64_t(0ull),
201 Name: Mask->getName() + ".first");
202 Instruction *ThenTerm =
203 SplitBlockAndInsertIfThen(Cond: Predicate, SplitBefore: InsertPt, /*Unreachable=*/false,
204 /*BranchWeights=*/nullptr, DTU);
205
206 BasicBlock *CondBlock = ThenTerm->getParent();
207 CondBlock->setName("cond.load");
208 Builder.SetInsertPoint(CondBlock->getTerminator());
209 LoadInst *Load = Builder.CreateAlignedLoad(Ty: VecType, Ptr, Align: AlignVal,
210 Name: CI->getName() + ".cond.load");
211 Load->copyMetadata(SrcInst: *CI);
212
213 BasicBlock *PostLoad = ThenTerm->getSuccessor(Idx: 0);
214 Builder.SetInsertPoint(TheBB: PostLoad, IP: PostLoad->begin());
215 PHINode *Phi = Builder.CreatePHI(Ty: VecType, /*NumReservedValues=*/2);
216 Phi->addIncoming(V: Load, BB: CondBlock);
217 Phi->addIncoming(V: Src0, BB: IfBlock);
218 Phi->takeName(V: CI);
219
220 CI->replaceAllUsesWith(V: Phi);
221 CI->eraseFromParent();
222 ModifiedDT = true;
223 return;
224 }
225 // If the mask is not v1i1, use scalar bit test operations. This generates
226 // better results on X86 at least. However, don't do this on GPUs and other
227 // machines with divergence, as there each i1 needs a vector register.
228 Value *SclrMask = nullptr;
229 if (VectorWidth != 1 && !HasBranchDivergence) {
230 Type *SclrMaskTy = Builder.getIntNTy(N: VectorWidth);
231 SclrMask = Builder.CreateBitCast(V: Mask, DestTy: SclrMaskTy, Name: "scalar_mask");
232 }
233
234 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
235 // Fill the "else" block, created in the previous iteration
236 //
237 // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else,
238 // %else ] %mask_1 = and i16 %scalar_mask, i32 1 << Idx %cond = icmp ne i16
239 // %mask_1, 0 br i1 %mask_1, label %cond.load, label %else
240 //
241 // On GPUs, use
242 // %cond = extrectelement %mask, Idx
243 // instead
244 Value *Predicate;
245 if (SclrMask != nullptr) {
246 Value *Mask = Builder.getInt(AI: APInt::getOneBitSet(
247 numBits: VectorWidth, BitNo: adjustForEndian(DL, VectorWidth, Idx)));
248 Predicate = Builder.CreateICmpNE(LHS: Builder.CreateAnd(LHS: SclrMask, RHS: Mask),
249 RHS: Builder.getIntN(N: VectorWidth, C: 0));
250 } else {
251 Predicate = Builder.CreateExtractElement(Vec: Mask, Idx);
252 }
253
254 // Create "cond" block
255 //
256 // %EltAddr = getelementptr i32* %1, i32 0
257 // %Elt = load i32* %EltAddr
258 // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx
259 //
260 Instruction *ThenTerm =
261 SplitBlockAndInsertIfThen(Cond: Predicate, SplitBefore: InsertPt, /*Unreachable=*/false,
262 /*BranchWeights=*/nullptr, DTU);
263
264 BasicBlock *CondBlock = ThenTerm->getParent();
265 CondBlock->setName("cond.load");
266
267 Builder.SetInsertPoint(CondBlock->getTerminator());
268 Value *Gep = Builder.CreateConstInBoundsGEP1_32(Ty: EltTy, Ptr, Idx0: Idx);
269 LoadInst *Load = Builder.CreateAlignedLoad(Ty: EltTy, Ptr: Gep, Align: AdjustedAlignVal);
270 Value *NewVResult = Builder.CreateInsertElement(Vec: VResult, NewElt: Load, Idx);
271
272 // Create "else" block, fill it in the next iteration
273 BasicBlock *NewIfBlock = ThenTerm->getSuccessor(Idx: 0);
274 NewIfBlock->setName("else");
275 BasicBlock *PrevIfBlock = IfBlock;
276 IfBlock = NewIfBlock;
277
278 // Create the phi to join the new and previous value.
279 Builder.SetInsertPoint(TheBB: NewIfBlock, IP: NewIfBlock->begin());
280 PHINode *Phi = Builder.CreatePHI(Ty: VecType, NumReservedValues: 2, Name: "res.phi.else");
281 Phi->addIncoming(V: NewVResult, BB: CondBlock);
282 Phi->addIncoming(V: VResult, BB: PrevIfBlock);
283 VResult = Phi;
284 }
285
286 CI->replaceAllUsesWith(V: VResult);
287 CI->eraseFromParent();
288
289 ModifiedDT = true;
290}
291
292// Translate a masked store intrinsic, like
293// void @llvm.masked.store(<16 x i32> %src, <16 x i32>* %addr,
294// <16 x i1> %mask)
295// to a chain of basic blocks, that stores element one-by-one if
296// the appropriate mask bit is set
297//
298// %1 = bitcast i8* %addr to i32*
299// %2 = extractelement <16 x i1> %mask, i32 0
300// br i1 %2, label %cond.store, label %else
301//
302// cond.store: ; preds = %0
303// %3 = extractelement <16 x i32> %val, i32 0
304// %4 = getelementptr i32* %1, i32 0
305// store i32 %3, i32* %4
306// br label %else
307//
308// else: ; preds = %0, %cond.store
309// %5 = extractelement <16 x i1> %mask, i32 1
310// br i1 %5, label %cond.store1, label %else2
311//
312// cond.store1: ; preds = %else
313// %6 = extractelement <16 x i32> %val, i32 1
314// %7 = getelementptr i32* %1, i32 1
315// store i32 %6, i32* %7
316// br label %else2
317// . . .
318static void scalarizeMaskedStore(const DataLayout &DL, bool HasBranchDivergence,
319 CallInst *CI, DomTreeUpdater *DTU,
320 bool &ModifiedDT) {
321 Value *Src = CI->getArgOperand(i: 0);
322 Value *Ptr = CI->getArgOperand(i: 1);
323 Value *Mask = CI->getArgOperand(i: 2);
324
325 const Align AlignVal = CI->getParamAlign(ArgNo: 1).valueOrOne();
326 auto *VecType = cast<VectorType>(Val: Src->getType());
327
328 Type *EltTy = VecType->getElementType();
329
330 IRBuilder<> Builder(CI->getContext());
331 Instruction *InsertPt = CI;
332 Builder.SetInsertPoint(InsertPt);
333 Builder.SetCurrentDebugLocation(CI->getDebugLoc());
334
335 // Short-cut if the mask is all-true.
336 if (isa<Constant>(Val: Mask) && cast<Constant>(Val: Mask)->isAllOnesValue()) {
337 StoreInst *Store = Builder.CreateAlignedStore(Val: Src, Ptr, Align: AlignVal);
338 Store->takeName(V: CI);
339 Store->copyMetadata(SrcInst: *CI);
340 CI->eraseFromParent();
341 return;
342 }
343
344 // Adjust alignment for the scalar instruction.
345 const Align AdjustedAlignVal =
346 commonAlignment(A: AlignVal, Offset: EltTy->getPrimitiveSizeInBits() / 8);
347 unsigned VectorWidth = cast<FixedVectorType>(Val: VecType)->getNumElements();
348
349 if (isConstantIntVector(Mask)) {
350 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
351 if (cast<Constant>(Val: Mask)->getAggregateElement(Elt: Idx)->isNullValue())
352 continue;
353 Value *OneElt = Builder.CreateExtractElement(Vec: Src, Idx);
354 Value *Gep = Builder.CreateConstInBoundsGEP1_32(Ty: EltTy, Ptr, Idx0: Idx);
355 Builder.CreateAlignedStore(Val: OneElt, Ptr: Gep, Align: AdjustedAlignVal);
356 }
357 CI->eraseFromParent();
358 return;
359 }
360
361 // Optimize the case where the "masked store" is a predicated store - that is,
362 // when the mask is the splat of a non-constant scalar boolean. In that case,
363 // optimize to a conditional store.
364 if (isSplatValue(V: Mask, /*Index=*/0)) {
365 Value *Predicate = Builder.CreateExtractElement(Vec: Mask, Idx: uint64_t(0ull),
366 Name: Mask->getName() + ".first");
367 Instruction *ThenTerm =
368 SplitBlockAndInsertIfThen(Cond: Predicate, SplitBefore: InsertPt, /*Unreachable=*/false,
369 /*BranchWeights=*/nullptr, DTU);
370 BasicBlock *CondBlock = ThenTerm->getParent();
371 CondBlock->setName("cond.store");
372 Builder.SetInsertPoint(CondBlock->getTerminator());
373
374 StoreInst *Store = Builder.CreateAlignedStore(Val: Src, Ptr, Align: AlignVal);
375 Store->takeName(V: CI);
376 Store->copyMetadata(SrcInst: *CI);
377
378 CI->eraseFromParent();
379 ModifiedDT = true;
380 return;
381 }
382
383 // If the mask is not v1i1, use scalar bit test operations. This generates
384 // better results on X86 at least. However, don't do this on GPUs or other
385 // machines with branch divergence, as there each i1 takes up a register.
386 Value *SclrMask = nullptr;
387 if (VectorWidth != 1 && !HasBranchDivergence) {
388 Type *SclrMaskTy = Builder.getIntNTy(N: VectorWidth);
389 SclrMask = Builder.CreateBitCast(V: Mask, DestTy: SclrMaskTy, Name: "scalar_mask");
390 }
391
392 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
393 // Fill the "else" block, created in the previous iteration
394 //
395 // %mask_1 = and i16 %scalar_mask, i32 1 << Idx
396 // %cond = icmp ne i16 %mask_1, 0
397 // br i1 %mask_1, label %cond.store, label %else
398 //
399 // On GPUs, use
400 // %cond = extrectelement %mask, Idx
401 // instead
402 Value *Predicate;
403 if (SclrMask != nullptr) {
404 Value *Mask = Builder.getInt(AI: APInt::getOneBitSet(
405 numBits: VectorWidth, BitNo: adjustForEndian(DL, VectorWidth, Idx)));
406 Predicate = Builder.CreateICmpNE(LHS: Builder.CreateAnd(LHS: SclrMask, RHS: Mask),
407 RHS: Builder.getIntN(N: VectorWidth, C: 0));
408 } else {
409 Predicate = Builder.CreateExtractElement(Vec: Mask, Idx);
410 }
411
412 // Create "cond" block
413 //
414 // %OneElt = extractelement <16 x i32> %Src, i32 Idx
415 // %EltAddr = getelementptr i32* %1, i32 0
416 // %store i32 %OneElt, i32* %EltAddr
417 //
418 Instruction *ThenTerm =
419 SplitBlockAndInsertIfThen(Cond: Predicate, SplitBefore: InsertPt, /*Unreachable=*/false,
420 /*BranchWeights=*/nullptr, DTU);
421
422 BasicBlock *CondBlock = ThenTerm->getParent();
423 CondBlock->setName("cond.store");
424
425 Builder.SetInsertPoint(CondBlock->getTerminator());
426 Value *OneElt = Builder.CreateExtractElement(Vec: Src, Idx);
427 Value *Gep = Builder.CreateConstInBoundsGEP1_32(Ty: EltTy, Ptr, Idx0: Idx);
428 Builder.CreateAlignedStore(Val: OneElt, Ptr: Gep, Align: AdjustedAlignVal);
429
430 // Create "else" block, fill it in the next iteration
431 BasicBlock *NewIfBlock = ThenTerm->getSuccessor(Idx: 0);
432 NewIfBlock->setName("else");
433
434 Builder.SetInsertPoint(TheBB: NewIfBlock, IP: NewIfBlock->begin());
435 }
436 CI->eraseFromParent();
437
438 ModifiedDT = true;
439}
440
441// Translate a masked gather intrinsic like
442// <16 x i32 > @llvm.masked.gather.v16i32( <16 x i32*> %Ptrs, i32 4,
443// <16 x i1> %Mask, <16 x i32> %Src)
444// to a chain of basic blocks, with loading element one-by-one if
445// the appropriate mask bit is set
446//
447// %Ptrs = getelementptr i32, i32* %base, <16 x i64> %ind
448// %Mask0 = extractelement <16 x i1> %Mask, i32 0
449// br i1 %Mask0, label %cond.load, label %else
450//
451// cond.load:
452// %Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0
453// %Load0 = load i32, i32* %Ptr0, align 4
454// %Res0 = insertelement <16 x i32> poison, i32 %Load0, i32 0
455// br label %else
456//
457// else:
458// %res.phi.else = phi <16 x i32>[%Res0, %cond.load], [poison, %0]
459// %Mask1 = extractelement <16 x i1> %Mask, i32 1
460// br i1 %Mask1, label %cond.load1, label %else2
461//
462// cond.load1:
463// %Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1
464// %Load1 = load i32, i32* %Ptr1, align 4
465// %Res1 = insertelement <16 x i32> %res.phi.else, i32 %Load1, i32 1
466// br label %else2
467// . . .
468// %Result = select <16 x i1> %Mask, <16 x i32> %res.phi.select, <16 x i32> %Src
469// ret <16 x i32> %Result
470static void scalarizeMaskedGather(const DataLayout &DL,
471 bool HasBranchDivergence, CallInst *CI,
472 DomTreeUpdater *DTU, bool &ModifiedDT) {
473 Value *Ptrs = CI->getArgOperand(i: 0);
474 Value *Mask = CI->getArgOperand(i: 1);
475 Value *Src0 = CI->getArgOperand(i: 2);
476
477 auto *VecType = cast<FixedVectorType>(Val: CI->getType());
478 Type *EltTy = VecType->getElementType();
479
480 IRBuilder<> Builder(CI->getContext());
481 Instruction *InsertPt = CI;
482 BasicBlock *IfBlock = CI->getParent();
483 Builder.SetInsertPoint(InsertPt);
484 Align AlignVal = CI->getParamAlign(ArgNo: 0).valueOrOne();
485
486 Builder.SetCurrentDebugLocation(CI->getDebugLoc());
487
488 // The result vector
489 Value *VResult = Src0;
490 unsigned VectorWidth = VecType->getNumElements();
491
492 // Shorten the way if the mask is a vector of constants.
493 if (isConstantIntVector(Mask)) {
494 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
495 if (cast<Constant>(Val: Mask)->getAggregateElement(Elt: Idx)->isNullValue())
496 continue;
497 Value *Ptr = Builder.CreateExtractElement(Vec: Ptrs, Idx, Name: "Ptr" + Twine(Idx));
498 LoadInst *Load =
499 Builder.CreateAlignedLoad(Ty: EltTy, Ptr, Align: AlignVal, Name: "Load" + Twine(Idx));
500 VResult =
501 Builder.CreateInsertElement(Vec: VResult, NewElt: Load, Idx, Name: "Res" + Twine(Idx));
502 }
503 CI->replaceAllUsesWith(V: VResult);
504 CI->eraseFromParent();
505 return;
506 }
507
508 // If the mask is not v1i1, use scalar bit test operations. This generates
509 // better results on X86 at least. However, don't do this on GPUs or other
510 // machines with branch divergence, as there, each i1 takes up a register.
511 Value *SclrMask = nullptr;
512 if (VectorWidth != 1 && !HasBranchDivergence) {
513 Type *SclrMaskTy = Builder.getIntNTy(N: VectorWidth);
514 SclrMask = Builder.CreateBitCast(V: Mask, DestTy: SclrMaskTy, Name: "scalar_mask");
515 }
516
517 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
518 // Fill the "else" block, created in the previous iteration
519 //
520 // %Mask1 = and i16 %scalar_mask, i32 1 << Idx
521 // %cond = icmp ne i16 %mask_1, 0
522 // br i1 %Mask1, label %cond.load, label %else
523 //
524 // On GPUs, use
525 // %cond = extrectelement %mask, Idx
526 // instead
527
528 Value *Predicate;
529 if (SclrMask != nullptr) {
530 Value *Mask = Builder.getInt(AI: APInt::getOneBitSet(
531 numBits: VectorWidth, BitNo: adjustForEndian(DL, VectorWidth, Idx)));
532 Predicate = Builder.CreateICmpNE(LHS: Builder.CreateAnd(LHS: SclrMask, RHS: Mask),
533 RHS: Builder.getIntN(N: VectorWidth, C: 0));
534 } else {
535 Predicate = Builder.CreateExtractElement(Vec: Mask, Idx, Name: "Mask" + Twine(Idx));
536 }
537
538 // Create "cond" block
539 //
540 // %EltAddr = getelementptr i32* %1, i32 0
541 // %Elt = load i32* %EltAddr
542 // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx
543 //
544 // We mark the branch weights as explicitly unknown given they would only
545 // be derivable from the mask which we do not have VP information for.
546 Instruction *ThenTerm =
547 SplitBlockAndInsertIfThen(Cond: Predicate, SplitBefore: InsertPt, /*Unreachable=*/false,
548 BranchWeights: getExplicitlyUnknownBranchWeightsIfProfiled(
549 F&: *CI->getFunction(), DEBUG_TYPE),
550 DTU);
551
552 BasicBlock *CondBlock = ThenTerm->getParent();
553 CondBlock->setName("cond.load");
554
555 Builder.SetInsertPoint(CondBlock->getTerminator());
556 Value *Ptr = Builder.CreateExtractElement(Vec: Ptrs, Idx, Name: "Ptr" + Twine(Idx));
557 LoadInst *Load =
558 Builder.CreateAlignedLoad(Ty: EltTy, Ptr, Align: AlignVal, Name: "Load" + Twine(Idx));
559 Value *NewVResult =
560 Builder.CreateInsertElement(Vec: VResult, NewElt: Load, Idx, Name: "Res" + Twine(Idx));
561
562 // Create "else" block, fill it in the next iteration
563 BasicBlock *NewIfBlock = ThenTerm->getSuccessor(Idx: 0);
564 NewIfBlock->setName("else");
565 BasicBlock *PrevIfBlock = IfBlock;
566 IfBlock = NewIfBlock;
567
568 // Create the phi to join the new and previous value.
569 Builder.SetInsertPoint(TheBB: NewIfBlock, IP: NewIfBlock->begin());
570 PHINode *Phi = Builder.CreatePHI(Ty: VecType, NumReservedValues: 2, Name: "res.phi.else");
571 Phi->addIncoming(V: NewVResult, BB: CondBlock);
572 Phi->addIncoming(V: VResult, BB: PrevIfBlock);
573 VResult = Phi;
574 }
575
576 CI->replaceAllUsesWith(V: VResult);
577 CI->eraseFromParent();
578
579 ModifiedDT = true;
580}
581
582// Translate a masked scatter intrinsic, like
583// void @llvm.masked.scatter.v16i32(<16 x i32> %Src, <16 x i32*>* %Ptrs, i32 4,
584// <16 x i1> %Mask)
585// to a chain of basic blocks, that stores element one-by-one if
586// the appropriate mask bit is set.
587//
588// %Ptrs = getelementptr i32, i32* %ptr, <16 x i64> %ind
589// %Mask0 = extractelement <16 x i1> %Mask, i32 0
590// br i1 %Mask0, label %cond.store, label %else
591//
592// cond.store:
593// %Elt0 = extractelement <16 x i32> %Src, i32 0
594// %Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0
595// store i32 %Elt0, i32* %Ptr0, align 4
596// br label %else
597//
598// else:
599// %Mask1 = extractelement <16 x i1> %Mask, i32 1
600// br i1 %Mask1, label %cond.store1, label %else2
601//
602// cond.store1:
603// %Elt1 = extractelement <16 x i32> %Src, i32 1
604// %Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1
605// store i32 %Elt1, i32* %Ptr1, align 4
606// br label %else2
607// . . .
608static void scalarizeMaskedScatter(const DataLayout &DL,
609 bool HasBranchDivergence, CallInst *CI,
610 DomTreeUpdater *DTU, bool &ModifiedDT) {
611 Value *Src = CI->getArgOperand(i: 0);
612 Value *Ptrs = CI->getArgOperand(i: 1);
613 Value *Mask = CI->getArgOperand(i: 2);
614
615 auto *SrcFVTy = cast<FixedVectorType>(Val: Src->getType());
616
617 assert(
618 isa<VectorType>(Ptrs->getType()) &&
619 isa<PointerType>(cast<VectorType>(Ptrs->getType())->getElementType()) &&
620 "Vector of pointers is expected in masked scatter intrinsic");
621
622 IRBuilder<> Builder(CI->getContext());
623 Instruction *InsertPt = CI;
624 Builder.SetInsertPoint(InsertPt);
625 Builder.SetCurrentDebugLocation(CI->getDebugLoc());
626
627 Align AlignVal = CI->getParamAlign(ArgNo: 1).valueOrOne();
628 unsigned VectorWidth = SrcFVTy->getNumElements();
629
630 // Shorten the way if the mask is a vector of constants.
631 if (isConstantIntVector(Mask)) {
632 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
633 if (cast<Constant>(Val: Mask)->getAggregateElement(Elt: Idx)->isNullValue())
634 continue;
635 Value *OneElt =
636 Builder.CreateExtractElement(Vec: Src, Idx, Name: "Elt" + Twine(Idx));
637 Value *Ptr = Builder.CreateExtractElement(Vec: Ptrs, Idx, Name: "Ptr" + Twine(Idx));
638 Builder.CreateAlignedStore(Val: OneElt, Ptr, Align: AlignVal);
639 }
640 CI->eraseFromParent();
641 return;
642 }
643
644 // If the mask is not v1i1, use scalar bit test operations. This generates
645 // better results on X86 at least.
646 Value *SclrMask = nullptr;
647 if (VectorWidth != 1 && !HasBranchDivergence) {
648 Type *SclrMaskTy = Builder.getIntNTy(N: VectorWidth);
649 SclrMask = Builder.CreateBitCast(V: Mask, DestTy: SclrMaskTy, Name: "scalar_mask");
650 }
651
652 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
653 // Fill the "else" block, created in the previous iteration
654 //
655 // %Mask1 = and i16 %scalar_mask, i32 1 << Idx
656 // %cond = icmp ne i16 %mask_1, 0
657 // br i1 %Mask1, label %cond.store, label %else
658 //
659 // On GPUs, use
660 // %cond = extrectelement %mask, Idx
661 // instead
662 Value *Predicate;
663 if (SclrMask != nullptr) {
664 Value *Mask = Builder.getInt(AI: APInt::getOneBitSet(
665 numBits: VectorWidth, BitNo: adjustForEndian(DL, VectorWidth, Idx)));
666 Predicate = Builder.CreateICmpNE(LHS: Builder.CreateAnd(LHS: SclrMask, RHS: Mask),
667 RHS: Builder.getIntN(N: VectorWidth, C: 0));
668 } else {
669 Predicate = Builder.CreateExtractElement(Vec: Mask, Idx, Name: "Mask" + Twine(Idx));
670 }
671
672 // Create "cond" block
673 //
674 // %Elt1 = extractelement <16 x i32> %Src, i32 1
675 // %Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1
676 // %store i32 %Elt1, i32* %Ptr1
677 //
678 // We mark the branch weights as explicitly unknown given they would only
679 // be derivable from the mask which we do not have VP information for.
680 Instruction *ThenTerm =
681 SplitBlockAndInsertIfThen(Cond: Predicate, SplitBefore: InsertPt, /*Unreachable=*/false,
682 BranchWeights: getExplicitlyUnknownBranchWeightsIfProfiled(
683 F&: *CI->getFunction(), DEBUG_TYPE),
684 DTU);
685
686 BasicBlock *CondBlock = ThenTerm->getParent();
687 CondBlock->setName("cond.store");
688
689 Builder.SetInsertPoint(CondBlock->getTerminator());
690 Value *OneElt = Builder.CreateExtractElement(Vec: Src, Idx, Name: "Elt" + Twine(Idx));
691 Value *Ptr = Builder.CreateExtractElement(Vec: Ptrs, Idx, Name: "Ptr" + Twine(Idx));
692 Builder.CreateAlignedStore(Val: OneElt, Ptr, Align: AlignVal);
693
694 // Create "else" block, fill it in the next iteration
695 BasicBlock *NewIfBlock = ThenTerm->getSuccessor(Idx: 0);
696 NewIfBlock->setName("else");
697
698 Builder.SetInsertPoint(TheBB: NewIfBlock, IP: NewIfBlock->begin());
699 }
700 CI->eraseFromParent();
701
702 ModifiedDT = true;
703}
704
705static void scalarizeMaskedExpandLoad(const DataLayout &DL,
706 bool HasBranchDivergence, CallInst *CI,
707 DomTreeUpdater *DTU, bool &ModifiedDT) {
708 Value *Ptr = CI->getArgOperand(i: 0);
709 Value *Mask = CI->getArgOperand(i: 1);
710 Value *PassThru = CI->getArgOperand(i: 2);
711 Align Alignment = CI->getParamAlign(ArgNo: 0).valueOrOne();
712
713 auto *VecType = cast<FixedVectorType>(Val: CI->getType());
714
715 Type *EltTy = VecType->getElementType();
716
717 IRBuilder<> Builder(CI->getContext());
718 Instruction *InsertPt = CI;
719 BasicBlock *IfBlock = CI->getParent();
720
721 Builder.SetInsertPoint(InsertPt);
722 Builder.SetCurrentDebugLocation(CI->getDebugLoc());
723
724 unsigned VectorWidth = VecType->getNumElements();
725
726 // The result vector
727 Value *VResult = PassThru;
728
729 // Adjust alignment for the scalar instruction.
730 const Align AdjustedAlignment =
731 commonAlignment(A: Alignment, Offset: EltTy->getPrimitiveSizeInBits() / 8);
732
733 // Shorten the way if the mask is a vector of constants.
734 // Create a build_vector pattern, with loads/poisons as necessary and then
735 // shuffle blend with the pass through value.
736 if (isConstantIntVector(Mask)) {
737 unsigned MemIndex = 0;
738 VResult = PoisonValue::get(T: VecType);
739 SmallVector<int, 16> ShuffleMask(VectorWidth, PoisonMaskElem);
740 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
741 Value *InsertElt;
742 if (cast<Constant>(Val: Mask)->getAggregateElement(Elt: Idx)->isNullValue()) {
743 InsertElt = PoisonValue::get(T: EltTy);
744 ShuffleMask[Idx] = Idx + VectorWidth;
745 } else {
746 Value *NewPtr =
747 Builder.CreateConstInBoundsGEP1_32(Ty: EltTy, Ptr, Idx0: MemIndex);
748 InsertElt = Builder.CreateAlignedLoad(Ty: EltTy, Ptr: NewPtr, Align: AdjustedAlignment,
749 Name: "Load" + Twine(Idx));
750 ShuffleMask[Idx] = Idx;
751 ++MemIndex;
752 }
753 VResult = Builder.CreateInsertElement(Vec: VResult, NewElt: InsertElt, Idx,
754 Name: "Res" + Twine(Idx));
755 }
756 VResult = Builder.CreateShuffleVector(V1: VResult, V2: PassThru, Mask: ShuffleMask);
757 CI->replaceAllUsesWith(V: VResult);
758 CI->eraseFromParent();
759 return;
760 }
761
762 // If the mask is not v1i1, use scalar bit test operations. This generates
763 // better results on X86 at least. However, don't do this on GPUs or other
764 // machines with branch divergence, as there, each i1 takes up a register.
765 Value *SclrMask = nullptr;
766 if (VectorWidth != 1 && !HasBranchDivergence) {
767 Type *SclrMaskTy = Builder.getIntNTy(N: VectorWidth);
768 SclrMask = Builder.CreateBitCast(V: Mask, DestTy: SclrMaskTy, Name: "scalar_mask");
769 }
770
771 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
772 // Fill the "else" block, created in the previous iteration
773 //
774 // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else,
775 // %else ] %mask_1 = extractelement <16 x i1> %mask, i32 Idx br i1 %mask_1,
776 // label %cond.load, label %else
777 //
778 // On GPUs, use
779 // %cond = extrectelement %mask, Idx
780 // instead
781
782 Value *Predicate;
783 if (SclrMask != nullptr) {
784 Value *Mask = Builder.getInt(AI: APInt::getOneBitSet(
785 numBits: VectorWidth, BitNo: adjustForEndian(DL, VectorWidth, Idx)));
786 Predicate = Builder.CreateICmpNE(LHS: Builder.CreateAnd(LHS: SclrMask, RHS: Mask),
787 RHS: Builder.getIntN(N: VectorWidth, C: 0));
788 } else {
789 Predicate = Builder.CreateExtractElement(Vec: Mask, Idx, Name: "Mask" + Twine(Idx));
790 }
791
792 // Create "cond" block
793 //
794 // %EltAddr = getelementptr i32* %1, i32 0
795 // %Elt = load i32* %EltAddr
796 // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx
797 //
798 Instruction *ThenTerm =
799 SplitBlockAndInsertIfThen(Cond: Predicate, SplitBefore: InsertPt, /*Unreachable=*/false,
800 /*BranchWeights=*/nullptr, DTU);
801
802 BasicBlock *CondBlock = ThenTerm->getParent();
803 CondBlock->setName("cond.load");
804
805 Builder.SetInsertPoint(CondBlock->getTerminator());
806 LoadInst *Load = Builder.CreateAlignedLoad(Ty: EltTy, Ptr, Align: AdjustedAlignment);
807 Value *NewVResult = Builder.CreateInsertElement(Vec: VResult, NewElt: Load, Idx);
808
809 // Move the pointer if there are more blocks to come.
810 Value *NewPtr;
811 if ((Idx + 1) != VectorWidth)
812 NewPtr = Builder.CreateConstInBoundsGEP1_32(Ty: EltTy, Ptr, Idx0: 1);
813
814 // Create "else" block, fill it in the next iteration
815 BasicBlock *NewIfBlock = ThenTerm->getSuccessor(Idx: 0);
816 NewIfBlock->setName("else");
817 BasicBlock *PrevIfBlock = IfBlock;
818 IfBlock = NewIfBlock;
819
820 // Create the phi to join the new and previous value.
821 Builder.SetInsertPoint(TheBB: NewIfBlock, IP: NewIfBlock->begin());
822 PHINode *ResultPhi = Builder.CreatePHI(Ty: VecType, NumReservedValues: 2, Name: "res.phi.else");
823 ResultPhi->addIncoming(V: NewVResult, BB: CondBlock);
824 ResultPhi->addIncoming(V: VResult, BB: PrevIfBlock);
825 VResult = ResultPhi;
826
827 // Add a PHI for the pointer if this isn't the last iteration.
828 if ((Idx + 1) != VectorWidth) {
829 PHINode *PtrPhi = Builder.CreatePHI(Ty: Ptr->getType(), NumReservedValues: 2, Name: "ptr.phi.else");
830 PtrPhi->addIncoming(V: NewPtr, BB: CondBlock);
831 PtrPhi->addIncoming(V: Ptr, BB: PrevIfBlock);
832 Ptr = PtrPhi;
833 }
834 }
835
836 CI->replaceAllUsesWith(V: VResult);
837 CI->eraseFromParent();
838
839 ModifiedDT = true;
840}
841
842static void scalarizeMaskedCompressStore(const DataLayout &DL,
843 bool HasBranchDivergence, CallInst *CI,
844 DomTreeUpdater *DTU,
845 bool &ModifiedDT) {
846 Value *Src = CI->getArgOperand(i: 0);
847 Value *Ptr = CI->getArgOperand(i: 1);
848 Value *Mask = CI->getArgOperand(i: 2);
849 Align Alignment = CI->getParamAlign(ArgNo: 1).valueOrOne();
850
851 auto *VecType = cast<FixedVectorType>(Val: Src->getType());
852
853 IRBuilder<> Builder(CI->getContext());
854 Instruction *InsertPt = CI;
855 BasicBlock *IfBlock = CI->getParent();
856
857 Builder.SetInsertPoint(InsertPt);
858 Builder.SetCurrentDebugLocation(CI->getDebugLoc());
859
860 Type *EltTy = VecType->getElementType();
861
862 // Adjust alignment for the scalar instruction.
863 const Align AdjustedAlignment =
864 commonAlignment(A: Alignment, Offset: EltTy->getPrimitiveSizeInBits() / 8);
865
866 unsigned VectorWidth = VecType->getNumElements();
867
868 // Shorten the way if the mask is a vector of constants.
869 if (isConstantIntVector(Mask)) {
870 unsigned MemIndex = 0;
871 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
872 if (cast<Constant>(Val: Mask)->getAggregateElement(Elt: Idx)->isNullValue())
873 continue;
874 Value *OneElt =
875 Builder.CreateExtractElement(Vec: Src, Idx, Name: "Elt" + Twine(Idx));
876 Value *NewPtr = Builder.CreateConstInBoundsGEP1_32(Ty: EltTy, Ptr, Idx0: MemIndex);
877 Builder.CreateAlignedStore(Val: OneElt, Ptr: NewPtr, Align: AdjustedAlignment);
878 ++MemIndex;
879 }
880 CI->eraseFromParent();
881 return;
882 }
883
884 // If the mask is not v1i1, use scalar bit test operations. This generates
885 // better results on X86 at least. However, don't do this on GPUs or other
886 // machines with branch divergence, as there, each i1 takes up a register.
887 Value *SclrMask = nullptr;
888 if (VectorWidth != 1 && !HasBranchDivergence) {
889 Type *SclrMaskTy = Builder.getIntNTy(N: VectorWidth);
890 SclrMask = Builder.CreateBitCast(V: Mask, DestTy: SclrMaskTy, Name: "scalar_mask");
891 }
892
893 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
894 // Fill the "else" block, created in the previous iteration
895 //
896 // %mask_1 = extractelement <16 x i1> %mask, i32 Idx
897 // br i1 %mask_1, label %cond.store, label %else
898 //
899 // On GPUs, use
900 // %cond = extrectelement %mask, Idx
901 // instead
902 Value *Predicate;
903 if (SclrMask != nullptr) {
904 Value *Mask = Builder.getInt(AI: APInt::getOneBitSet(
905 numBits: VectorWidth, BitNo: adjustForEndian(DL, VectorWidth, Idx)));
906 Predicate = Builder.CreateICmpNE(LHS: Builder.CreateAnd(LHS: SclrMask, RHS: Mask),
907 RHS: Builder.getIntN(N: VectorWidth, C: 0));
908 } else {
909 Predicate = Builder.CreateExtractElement(Vec: Mask, Idx, Name: "Mask" + Twine(Idx));
910 }
911
912 // Create "cond" block
913 //
914 // %OneElt = extractelement <16 x i32> %Src, i32 Idx
915 // %EltAddr = getelementptr i32* %1, i32 0
916 // %store i32 %OneElt, i32* %EltAddr
917 //
918 Instruction *ThenTerm =
919 SplitBlockAndInsertIfThen(Cond: Predicate, SplitBefore: InsertPt, /*Unreachable=*/false,
920 /*BranchWeights=*/nullptr, DTU);
921
922 BasicBlock *CondBlock = ThenTerm->getParent();
923 CondBlock->setName("cond.store");
924
925 Builder.SetInsertPoint(CondBlock->getTerminator());
926 Value *OneElt = Builder.CreateExtractElement(Vec: Src, Idx);
927 Builder.CreateAlignedStore(Val: OneElt, Ptr, Align: AdjustedAlignment);
928
929 // Move the pointer if there are more blocks to come.
930 Value *NewPtr;
931 if ((Idx + 1) != VectorWidth)
932 NewPtr = Builder.CreateConstInBoundsGEP1_32(Ty: EltTy, Ptr, Idx0: 1);
933
934 // Create "else" block, fill it in the next iteration
935 BasicBlock *NewIfBlock = ThenTerm->getSuccessor(Idx: 0);
936 NewIfBlock->setName("else");
937 BasicBlock *PrevIfBlock = IfBlock;
938 IfBlock = NewIfBlock;
939
940 Builder.SetInsertPoint(TheBB: NewIfBlock, IP: NewIfBlock->begin());
941
942 // Add a PHI for the pointer if this isn't the last iteration.
943 if ((Idx + 1) != VectorWidth) {
944 PHINode *PtrPhi = Builder.CreatePHI(Ty: Ptr->getType(), NumReservedValues: 2, Name: "ptr.phi.else");
945 PtrPhi->addIncoming(V: NewPtr, BB: CondBlock);
946 PtrPhi->addIncoming(V: Ptr, BB: PrevIfBlock);
947 Ptr = PtrPhi;
948 }
949 }
950 CI->eraseFromParent();
951
952 ModifiedDT = true;
953}
954
955static void scalarizeMaskedVectorHistogram(const DataLayout &DL, CallInst *CI,
956 DomTreeUpdater *DTU,
957 bool &ModifiedDT) {
958 // If we extend histogram to return a result someday (like the updated vector)
959 // then we'll need to support it here.
960 assert(CI->getType()->isVoidTy() && "Histogram with non-void return.");
961 Value *Ptrs = CI->getArgOperand(i: 0);
962 Value *Inc = CI->getArgOperand(i: 1);
963 Value *Mask = CI->getArgOperand(i: 2);
964
965 auto *AddrType = cast<FixedVectorType>(Val: Ptrs->getType());
966 Type *EltTy = Inc->getType();
967
968 IRBuilder<> Builder(CI->getContext());
969 Instruction *InsertPt = CI;
970 Builder.SetInsertPoint(InsertPt);
971
972 Builder.SetCurrentDebugLocation(CI->getDebugLoc());
973
974 // FIXME: Do we need to add an alignment parameter to the intrinsic?
975 unsigned VectorWidth = AddrType->getNumElements();
976 auto CreateHistogramUpdateValue = [&](IntrinsicInst *CI, Value *Load,
977 Value *Inc) -> Value * {
978 Value *UpdateOp;
979 switch (CI->getIntrinsicID()) {
980 case Intrinsic::experimental_vector_histogram_add:
981 UpdateOp = Builder.CreateAdd(LHS: Load, RHS: Inc);
982 break;
983 case Intrinsic::experimental_vector_histogram_uadd_sat:
984 UpdateOp =
985 Builder.CreateIntrinsic(ID: Intrinsic::uadd_sat, Types: {EltTy}, Args: {Load, Inc});
986 break;
987 case Intrinsic::experimental_vector_histogram_umin:
988 UpdateOp = Builder.CreateIntrinsic(ID: Intrinsic::umin, Types: {EltTy}, Args: {Load, Inc});
989 break;
990 case Intrinsic::experimental_vector_histogram_umax:
991 UpdateOp = Builder.CreateIntrinsic(ID: Intrinsic::umax, Types: {EltTy}, Args: {Load, Inc});
992 break;
993
994 default:
995 llvm_unreachable("Unexpected histogram intrinsic");
996 }
997 return UpdateOp;
998 };
999
1000 // Shorten the way if the mask is a vector of constants.
1001 if (isConstantIntVector(Mask)) {
1002 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
1003 if (cast<Constant>(Val: Mask)->getAggregateElement(Elt: Idx)->isNullValue())
1004 continue;
1005 Value *Ptr = Builder.CreateExtractElement(Vec: Ptrs, Idx, Name: "Ptr" + Twine(Idx));
1006 LoadInst *Load = Builder.CreateLoad(Ty: EltTy, Ptr, Name: "Load" + Twine(Idx));
1007 Value *Update =
1008 CreateHistogramUpdateValue(cast<IntrinsicInst>(Val: CI), Load, Inc);
1009 Builder.CreateStore(Val: Update, Ptr);
1010 }
1011 CI->eraseFromParent();
1012 return;
1013 }
1014
1015 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
1016 Value *Predicate =
1017 Builder.CreateExtractElement(Vec: Mask, Idx, Name: "Mask" + Twine(Idx));
1018
1019 Instruction *ThenTerm =
1020 SplitBlockAndInsertIfThen(Cond: Predicate, SplitBefore: InsertPt, /*Unreachable=*/false,
1021 /*BranchWeights=*/nullptr, DTU);
1022
1023 BasicBlock *CondBlock = ThenTerm->getParent();
1024 CondBlock->setName("cond.histogram.update");
1025
1026 Builder.SetInsertPoint(CondBlock->getTerminator());
1027 Value *Ptr = Builder.CreateExtractElement(Vec: Ptrs, Idx, Name: "Ptr" + Twine(Idx));
1028 LoadInst *Load = Builder.CreateLoad(Ty: EltTy, Ptr, Name: "Load" + Twine(Idx));
1029 Value *UpdateOp =
1030 CreateHistogramUpdateValue(cast<IntrinsicInst>(Val: CI), Load, Inc);
1031 Builder.CreateStore(Val: UpdateOp, Ptr);
1032
1033 // Create "else" block, fill it in the next iteration
1034 BasicBlock *NewIfBlock = ThenTerm->getSuccessor(Idx: 0);
1035 NewIfBlock->setName("else");
1036 Builder.SetInsertPoint(TheBB: NewIfBlock, IP: NewIfBlock->begin());
1037 }
1038
1039 CI->eraseFromParent();
1040 ModifiedDT = true;
1041}
1042
1043static bool runImpl(Function &F, const TargetTransformInfo &TTI,
1044 DominatorTree *DT) {
1045 std::optional<DomTreeUpdater> DTU;
1046 if (DT)
1047 DTU.emplace(args&: DT, args: DomTreeUpdater::UpdateStrategy::Lazy);
1048
1049 bool EverMadeChange = false;
1050 bool MadeChange = true;
1051 auto &DL = F.getDataLayout();
1052 bool HasBranchDivergence = TTI.hasBranchDivergence(F: &F);
1053 while (MadeChange) {
1054 MadeChange = false;
1055 for (BasicBlock &BB : llvm::make_early_inc_range(Range&: F)) {
1056 bool ModifiedDTOnIteration = false;
1057 MadeChange |= optimizeBlock(BB, ModifiedDT&: ModifiedDTOnIteration, TTI, DL,
1058 HasBranchDivergence, DTU: DTU ? &*DTU : nullptr);
1059
1060 // Restart BB iteration if the dominator tree of the Function was changed
1061 if (ModifiedDTOnIteration)
1062 break;
1063 }
1064
1065 EverMadeChange |= MadeChange;
1066 }
1067 return EverMadeChange;
1068}
1069
1070bool ScalarizeMaskedMemIntrinLegacyPass::runOnFunction(Function &F) {
1071 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1072 DominatorTree *DT = nullptr;
1073 if (auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>())
1074 DT = &DTWP->getDomTree();
1075 return runImpl(F, TTI, DT);
1076}
1077
1078PreservedAnalyses
1079ScalarizeMaskedMemIntrinPass::run(Function &F, FunctionAnalysisManager &AM) {
1080 auto &TTI = AM.getResult<TargetIRAnalysis>(IR&: F);
1081 auto *DT = AM.getCachedResult<DominatorTreeAnalysis>(IR&: F);
1082 if (!runImpl(F, TTI, DT))
1083 return PreservedAnalyses::all();
1084 PreservedAnalyses PA;
1085 PA.preserve<TargetIRAnalysis>();
1086 PA.preserve<DominatorTreeAnalysis>();
1087 return PA;
1088}
1089
1090static bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT,
1091 const TargetTransformInfo &TTI, const DataLayout &DL,
1092 bool HasBranchDivergence, DomTreeUpdater *DTU) {
1093 bool MadeChange = false;
1094
1095 BasicBlock::iterator CurInstIterator = BB.begin();
1096 while (CurInstIterator != BB.end()) {
1097 if (CallInst *CI = dyn_cast<CallInst>(Val: &*CurInstIterator++))
1098 MadeChange |=
1099 optimizeCallInst(CI, ModifiedDT, TTI, DL, HasBranchDivergence, DTU);
1100 if (ModifiedDT)
1101 return true;
1102 }
1103
1104 return MadeChange;
1105}
1106
1107static bool optimizeCallInst(CallInst *CI, bool &ModifiedDT,
1108 const TargetTransformInfo &TTI,
1109 const DataLayout &DL, bool HasBranchDivergence,
1110 DomTreeUpdater *DTU) {
1111 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: CI);
1112 if (II) {
1113 // The scalarization code below does not work for scalable vectors.
1114 if (isa<ScalableVectorType>(Val: II->getType()) ||
1115 any_of(Range: II->args(),
1116 P: [](Value *V) { return isa<ScalableVectorType>(Val: V->getType()); }))
1117 return false;
1118 switch (II->getIntrinsicID()) {
1119 default:
1120 break;
1121 case Intrinsic::experimental_vector_histogram_add:
1122 case Intrinsic::experimental_vector_histogram_uadd_sat:
1123 case Intrinsic::experimental_vector_histogram_umin:
1124 case Intrinsic::experimental_vector_histogram_umax:
1125 if (TTI.isLegalMaskedVectorHistogram(AddrType: CI->getArgOperand(i: 0)->getType(),
1126 DataType: CI->getArgOperand(i: 1)->getType()))
1127 return false;
1128 scalarizeMaskedVectorHistogram(DL, CI, DTU, ModifiedDT);
1129 return true;
1130 case Intrinsic::masked_load:
1131 // Scalarize unsupported vector masked load
1132 if (TTI.isLegalMaskedLoad(
1133 DataType: CI->getType(), Alignment: CI->getParamAlign(ArgNo: 0).valueOrOne(),
1134 AddressSpace: cast<PointerType>(Val: CI->getArgOperand(i: 0)->getType())
1135 ->getAddressSpace(),
1136 MaskKind: isConstantIntVector(Mask: CI->getArgOperand(i: 1))
1137 ? TTI::MaskKind::ConstantMask
1138 : TTI::MaskKind::VariableOrConstantMask))
1139 return false;
1140 scalarizeMaskedLoad(DL, HasBranchDivergence, CI, DTU, ModifiedDT);
1141 return true;
1142 case Intrinsic::masked_store:
1143 if (TTI.isLegalMaskedStore(
1144 DataType: CI->getArgOperand(i: 0)->getType(),
1145 Alignment: CI->getParamAlign(ArgNo: 1).valueOrOne(),
1146 AddressSpace: cast<PointerType>(Val: CI->getArgOperand(i: 1)->getType())
1147 ->getAddressSpace(),
1148 MaskKind: isConstantIntVector(Mask: CI->getArgOperand(i: 2))
1149 ? TTI::MaskKind::ConstantMask
1150 : TTI::MaskKind::VariableOrConstantMask))
1151 return false;
1152 scalarizeMaskedStore(DL, HasBranchDivergence, CI, DTU, ModifiedDT);
1153 return true;
1154 case Intrinsic::masked_gather: {
1155 Align Alignment = CI->getParamAlign(ArgNo: 0).valueOrOne();
1156 Type *LoadTy = CI->getType();
1157 if (TTI.isLegalMaskedGather(DataType: LoadTy, Alignment) &&
1158 !TTI.forceScalarizeMaskedGather(Type: cast<VectorType>(Val: LoadTy), Alignment))
1159 return false;
1160 scalarizeMaskedGather(DL, HasBranchDivergence, CI, DTU, ModifiedDT);
1161 return true;
1162 }
1163 case Intrinsic::masked_scatter: {
1164 Align Alignment = CI->getParamAlign(ArgNo: 1).valueOrOne();
1165 Type *StoreTy = CI->getArgOperand(i: 0)->getType();
1166 if (TTI.isLegalMaskedScatter(DataType: StoreTy, Alignment) &&
1167 !TTI.forceScalarizeMaskedScatter(Type: cast<VectorType>(Val: StoreTy),
1168 Alignment))
1169 return false;
1170 scalarizeMaskedScatter(DL, HasBranchDivergence, CI, DTU, ModifiedDT);
1171 return true;
1172 }
1173 case Intrinsic::masked_expandload:
1174 if (TTI.isLegalMaskedExpandLoad(
1175 DataType: CI->getType(),
1176 Alignment: CI->getAttributes().getParamAttrs(ArgNo: 0).getAlignment().valueOrOne()))
1177 return false;
1178 scalarizeMaskedExpandLoad(DL, HasBranchDivergence, CI, DTU, ModifiedDT);
1179 return true;
1180 case Intrinsic::masked_compressstore:
1181 if (TTI.isLegalMaskedCompressStore(
1182 DataType: CI->getArgOperand(i: 0)->getType(),
1183 Alignment: CI->getAttributes().getParamAttrs(ArgNo: 1).getAlignment().valueOrOne()))
1184 return false;
1185 scalarizeMaskedCompressStore(DL, HasBranchDivergence, CI, DTU,
1186 ModifiedDT);
1187 return true;
1188 }
1189 }
1190
1191 return false;
1192}
1193