1//===-- RISCVInterleavedAccess.cpp - RISC-V Interleaved Access Transform --===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Functions and callbacks related to the InterleavedAccessPass.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCV.h"
14#include "RISCVISelLowering.h"
15#include "RISCVSubtarget.h"
16#include "llvm/Analysis/ValueTracking.h"
17#include "llvm/Analysis/VectorUtils.h"
18#include "llvm/CodeGen/ValueTypes.h"
19#include "llvm/IR/IRBuilder.h"
20#include "llvm/IR/Instructions.h"
21#include "llvm/IR/IntrinsicsRISCV.h"
22#include "llvm/IR/Module.h"
23#include "llvm/IR/PatternMatch.h"
24
25using namespace llvm;
26
27bool RISCVTargetLowering::isLegalInterleavedAccessType(
28 VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace,
29 const DataLayout &DL) const {
30 EVT VT = getValueType(DL, Ty: VTy);
31 // Don't lower vlseg/vsseg for vector types that can't be split.
32 if (!isTypeLegal(VT))
33 return false;
34
35 if (!isLegalElementTypeForRVV(ScalarTy: VT.getScalarType()) ||
36 !allowsMemoryAccessForAlignment(Context&: VTy->getContext(), DL, VT, AddrSpace,
37 Alignment))
38 return false;
39
40 MVT ContainerVT = VT.getSimpleVT();
41
42 if (auto *FVTy = dyn_cast<FixedVectorType>(Val: VTy)) {
43 if (!Subtarget.useRVVForFixedLengthVectors())
44 return false;
45 // Sometimes the interleaved access pass picks up splats as interleaves of
46 // one element. Don't lower these.
47 if (FVTy->getNumElements() < 2)
48 return false;
49
50 ContainerVT = getContainerForFixedLengthVector(VT: VT.getSimpleVT());
51 }
52
53 // Need to make sure that EMUL * NFIELDS ≤ 8
54 auto [LMUL, Fractional] = RISCVVType::decodeVLMUL(VLMul: getLMUL(VT: ContainerVT));
55 if (Fractional)
56 return true;
57 return Factor * LMUL <= 8;
58}
59
60static const Intrinsic::ID FixedVlsegIntrIds[] = {
61 Intrinsic::riscv_seg2_load_mask, Intrinsic::riscv_seg3_load_mask,
62 Intrinsic::riscv_seg4_load_mask, Intrinsic::riscv_seg5_load_mask,
63 Intrinsic::riscv_seg6_load_mask, Intrinsic::riscv_seg7_load_mask,
64 Intrinsic::riscv_seg8_load_mask};
65
66static const Intrinsic::ID FixedVlssegIntrIds[] = {
67 Intrinsic::riscv_sseg2_load_mask, Intrinsic::riscv_sseg3_load_mask,
68 Intrinsic::riscv_sseg4_load_mask, Intrinsic::riscv_sseg5_load_mask,
69 Intrinsic::riscv_sseg6_load_mask, Intrinsic::riscv_sseg7_load_mask,
70 Intrinsic::riscv_sseg8_load_mask};
71
72static const Intrinsic::ID ScalableVlsegIntrIds[] = {
73 Intrinsic::riscv_vlseg2_mask, Intrinsic::riscv_vlseg3_mask,
74 Intrinsic::riscv_vlseg4_mask, Intrinsic::riscv_vlseg5_mask,
75 Intrinsic::riscv_vlseg6_mask, Intrinsic::riscv_vlseg7_mask,
76 Intrinsic::riscv_vlseg8_mask};
77
78static const Intrinsic::ID FixedVssegIntrIds[] = {
79 Intrinsic::riscv_seg2_store_mask, Intrinsic::riscv_seg3_store_mask,
80 Intrinsic::riscv_seg4_store_mask, Intrinsic::riscv_seg5_store_mask,
81 Intrinsic::riscv_seg6_store_mask, Intrinsic::riscv_seg7_store_mask,
82 Intrinsic::riscv_seg8_store_mask};
83
84static const Intrinsic::ID FixedVsssegIntrIds[] = {
85 Intrinsic::riscv_sseg2_store_mask, Intrinsic::riscv_sseg3_store_mask,
86 Intrinsic::riscv_sseg4_store_mask, Intrinsic::riscv_sseg5_store_mask,
87 Intrinsic::riscv_sseg6_store_mask, Intrinsic::riscv_sseg7_store_mask,
88 Intrinsic::riscv_sseg8_store_mask};
89
90static const Intrinsic::ID ScalableVssegIntrIds[] = {
91 Intrinsic::riscv_vsseg2_mask, Intrinsic::riscv_vsseg3_mask,
92 Intrinsic::riscv_vsseg4_mask, Intrinsic::riscv_vsseg5_mask,
93 Intrinsic::riscv_vsseg6_mask, Intrinsic::riscv_vsseg7_mask,
94 Intrinsic::riscv_vsseg8_mask};
95
96static bool isMultipleOfN(const Value *V, const DataLayout &DL, unsigned N) {
97 assert(N);
98 if (N == 1)
99 return true;
100
101 using namespace PatternMatch;
102 // Right now we're only recognizing the simplest pattern.
103 uint64_t C;
104 if (match(V, P: m_CombineOr(L: m_ConstantInt(V&: C),
105 R: m_NUWMul(L: m_Value(), R: m_ConstantInt(V&: C)))) &&
106 C && C % N == 0)
107 return true;
108
109 if (isPowerOf2_32(Value: N)) {
110 KnownBits KB = llvm::computeKnownBits(V, DL);
111 return KB.countMinTrailingZeros() >= Log2_32(Value: N);
112 }
113
114 return false;
115}
116
117/// Do the common operand retrieval and validition required by the
118/// routines below.
119static bool getMemOperands(unsigned Factor, VectorType *VTy, Type *XLenTy,
120 Instruction *I, Value *&Ptr, Value *&Mask,
121 Value *&VL, Align &Alignment) {
122
123 IRBuilder<> Builder(I);
124 const DataLayout &DL = I->getDataLayout();
125 ElementCount EC = VTy->getElementCount();
126 if (auto *LI = dyn_cast<LoadInst>(Val: I)) {
127 assert(LI->isSimple());
128 Ptr = LI->getPointerOperand();
129 Alignment = LI->getAlign();
130 assert(!Mask && "Unexpected mask on a load");
131 Mask = Builder.getAllOnesMask(NumElts: EC);
132 VL = isa<FixedVectorType>(Val: VTy) ? Builder.CreateElementCount(Ty: XLenTy, EC)
133 : Constant::getAllOnesValue(Ty: XLenTy);
134 return true;
135 }
136 if (auto *SI = dyn_cast<StoreInst>(Val: I)) {
137 assert(SI->isSimple());
138 Ptr = SI->getPointerOperand();
139 Alignment = SI->getAlign();
140 assert(!Mask && "Unexpected mask on a store");
141 Mask = Builder.getAllOnesMask(NumElts: EC);
142 VL = isa<FixedVectorType>(Val: VTy) ? Builder.CreateElementCount(Ty: XLenTy, EC)
143 : Constant::getAllOnesValue(Ty: XLenTy);
144 return true;
145 }
146
147 auto *II = cast<IntrinsicInst>(Val: I);
148 switch (II->getIntrinsicID()) {
149 default:
150 llvm_unreachable("Unsupported intrinsic type");
151 case Intrinsic::vp_load:
152 case Intrinsic::vp_store: {
153 auto *VPLdSt = cast<VPIntrinsic>(Val: I);
154 Ptr = VPLdSt->getMemoryPointerParam();
155 Alignment = VPLdSt->getPointerAlignment().value_or(
156 u: DL.getABITypeAlign(Ty: VTy->getElementType()));
157
158 assert(Mask && "vp.load and vp.store needs a mask!");
159
160 Value *WideEVL = VPLdSt->getVectorLengthParam();
161 // Conservatively check if EVL is a multiple of factor, otherwise some
162 // (trailing) elements might be lost after the transformation.
163 if (!isMultipleOfN(V: WideEVL, DL: I->getDataLayout(), N: Factor))
164 return false;
165
166 auto *FactorC = ConstantInt::get(Ty: WideEVL->getType(), V: Factor);
167 VL = Builder.CreateZExt(V: Builder.CreateExactUDiv(LHS: WideEVL, RHS: FactorC), DestTy: XLenTy);
168 return true;
169 }
170 case Intrinsic::masked_load: {
171 Ptr = II->getOperand(i_nocapture: 0);
172 Alignment = II->getParamAlign(ArgNo: 0).valueOrOne();
173
174 if (!isa<UndefValue>(Val: II->getOperand(i_nocapture: 2)))
175 return false;
176
177 assert(Mask && "masked.load needs a mask!");
178
179 VL = isa<FixedVectorType>(Val: VTy)
180 ? Builder.CreateElementCount(Ty: XLenTy, EC: VTy->getElementCount())
181 : Constant::getAllOnesValue(Ty: XLenTy);
182 return true;
183 }
184 case Intrinsic::masked_store: {
185 Ptr = II->getOperand(i_nocapture: 1);
186 Alignment = II->getParamAlign(ArgNo: 1).valueOrOne();
187
188 assert(Mask && "masked.store needs a mask!");
189
190 VL = isa<FixedVectorType>(Val: VTy)
191 ? Builder.CreateElementCount(Ty: XLenTy, EC: VTy->getElementCount())
192 : Constant::getAllOnesValue(Ty: XLenTy);
193 return true;
194 }
195 }
196}
197
198/// Lower an interleaved load into a vlsegN intrinsic.
199///
200/// E.g. Lower an interleaved load (Factor = 2):
201/// %wide.vec = load <8 x i32>, <8 x i32>* %ptr
202/// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements
203/// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements
204///
205/// Into:
206/// %ld2 = { <4 x i32>, <4 x i32> } call llvm.riscv.seg2.load.v4i32.p0.i64(
207/// %ptr, i64 4)
208/// %vec0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0
209/// %vec1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1
210bool RISCVTargetLowering::lowerInterleavedLoad(
211 Instruction *Load, Value *Mask, ArrayRef<ShuffleVectorInst *> Shuffles,
212 ArrayRef<unsigned> Indices, unsigned Factor, const APInt &GapMask) const {
213 assert(Indices.size() == Shuffles.size());
214 assert(GapMask.getBitWidth() == Factor);
215
216 // We only support cases where the skipped fields are the trailing ones.
217 if (!GapMask.isMask())
218 return false;
219 IRBuilder<> Builder(Load);
220
221 unsigned MaskFactor = GapMask.popcount();
222 const DataLayout &DL = Load->getDataLayout();
223 auto *VTy = cast<FixedVectorType>(Val: Shuffles[0]->getType());
224 auto *XLenTy = Builder.getIntNTy(N: Subtarget.getXLen());
225
226 Value *Ptr, *VL;
227 Align Alignment;
228 if (!getMemOperands(Factor: MaskFactor, VTy, XLenTy, I: Load, Ptr, Mask, VL, Alignment))
229 return false;
230
231 Type *PtrTy = Ptr->getType();
232 unsigned AS = PtrTy->getPointerAddressSpace();
233 if (!isLegalInterleavedAccessType(VTy, Factor: MaskFactor, Alignment, AddrSpace: AS, DL))
234 return false;
235
236 CallInst *SegLoad = nullptr;
237 if (MaskFactor < Factor && MaskFactor != 1) {
238 // Lower to strided segmented load.
239 unsigned ScalarSizeInBytes = DL.getTypeStoreSize(Ty: VTy->getElementType());
240 Value *Stride = ConstantInt::get(Ty: XLenTy, V: Factor * ScalarSizeInBytes);
241 SegLoad = Builder.CreateIntrinsic(ID: FixedVlssegIntrIds[MaskFactor - 2],
242 Types: {VTy, PtrTy, XLenTy, XLenTy},
243 Args: {Ptr, Stride, Mask, VL});
244 } else {
245 // Lower to normal segmented load.
246 SegLoad = Builder.CreateIntrinsic(ID: FixedVlsegIntrIds[Factor - 2],
247 Types: {VTy, PtrTy, XLenTy}, Args: {Ptr, Mask, VL});
248 }
249
250 for (unsigned i = 0; i < Shuffles.size(); i++) {
251 unsigned FactorIdx = Indices[i];
252 if (FactorIdx >= MaskFactor) {
253 // Replace masked-off factors (that are still extracted) with poison.
254 Shuffles[i]->replaceAllUsesWith(V: PoisonValue::get(T: VTy));
255 } else {
256 Value *SubVec = Builder.CreateExtractValue(Agg: SegLoad, Idxs: FactorIdx);
257 Shuffles[i]->replaceAllUsesWith(V: SubVec);
258 }
259 }
260
261 return true;
262}
263
264/// Lower an interleaved store into a vssegN intrinsic.
265///
266/// E.g. Lower an interleaved store (Factor = 3):
267/// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
268/// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
269/// store <12 x i32> %i.vec, <12 x i32>* %ptr
270///
271/// Into:
272/// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
273/// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
274/// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
275/// call void llvm.riscv.seg3.store.v4i32.p0.i64(%sub.v0, %sub.v1, %sub.v2,
276/// %ptr, i32 4)
277///
278/// Note that the new shufflevectors will be removed and we'll only generate one
279/// vsseg3 instruction in CodeGen.
280bool RISCVTargetLowering::lowerInterleavedStore(Instruction *Store,
281 Value *LaneMask,
282 ShuffleVectorInst *SVI,
283 unsigned Factor,
284 const APInt &GapMask) const {
285 assert(GapMask.getBitWidth() == Factor);
286
287 // We only support cases where the skipped fields are the trailing ones.
288 // TODO: Lower to strided store if there is only a single active field.
289 unsigned MaskFactor = GapMask.popcount();
290 if (MaskFactor < 2 || !GapMask.isMask())
291 return false;
292
293 IRBuilder<> Builder(Store);
294 const DataLayout &DL = Store->getDataLayout();
295 auto Mask = SVI->getShuffleMask();
296 auto *ShuffleVTy = cast<FixedVectorType>(Val: SVI->getType());
297 // Given SVI : <n*factor x ty>, then VTy : <n x ty>
298 auto *VTy = FixedVectorType::get(ElementType: ShuffleVTy->getElementType(),
299 NumElts: ShuffleVTy->getNumElements() / Factor);
300 auto *XLenTy = Builder.getIntNTy(N: Subtarget.getXLen());
301
302 Value *Ptr, *VL;
303 Align Alignment;
304 if (!getMemOperands(Factor: MaskFactor, VTy, XLenTy, I: Store, Ptr, Mask&: LaneMask, VL,
305 Alignment))
306 return false;
307
308 Type *PtrTy = Ptr->getType();
309 unsigned AS = PtrTy->getPointerAddressSpace();
310 if (!isLegalInterleavedAccessType(VTy, Factor: MaskFactor, Alignment, AddrSpace: AS, DL))
311 return false;
312
313 Function *SegStoreFunc;
314 if (MaskFactor < Factor)
315 // Strided segmented store.
316 SegStoreFunc = Intrinsic::getOrInsertDeclaration(
317 M: Store->getModule(), id: FixedVsssegIntrIds[MaskFactor - 2],
318 Tys: {VTy, PtrTy, XLenTy, XLenTy});
319 else
320 // Normal segmented store.
321 SegStoreFunc = Intrinsic::getOrInsertDeclaration(
322 M: Store->getModule(), id: FixedVssegIntrIds[Factor - 2],
323 Tys: {VTy, PtrTy, XLenTy});
324
325 SmallVector<Value *, 10> Ops;
326 SmallVector<int, 16> NewShuffleMask;
327
328 for (unsigned i = 0; i < MaskFactor; i++) {
329 // Collect shuffle mask for this lane.
330 for (unsigned j = 0; j < VTy->getNumElements(); j++)
331 NewShuffleMask.push_back(Elt: Mask[i + Factor * j]);
332
333 Value *Shuffle = Builder.CreateShuffleVector(
334 V1: SVI->getOperand(i_nocapture: 0), V2: SVI->getOperand(i_nocapture: 1), Mask: NewShuffleMask);
335 Ops.push_back(Elt: Shuffle);
336
337 NewShuffleMask.clear();
338 }
339 Ops.push_back(Elt: Ptr);
340 if (MaskFactor < Factor) {
341 // Insert the stride argument.
342 unsigned ScalarSizeInBytes = DL.getTypeStoreSize(Ty: VTy->getElementType());
343 Ops.push_back(Elt: ConstantInt::get(Ty: XLenTy, V: Factor * ScalarSizeInBytes));
344 }
345 Ops.append(IL: {LaneMask, VL});
346 Builder.CreateCall(Callee: SegStoreFunc, Args: Ops);
347
348 return true;
349}
350
351bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
352 Instruction *Load, Value *Mask, IntrinsicInst *DI) const {
353 const unsigned Factor = getDeinterleaveIntrinsicFactor(ID: DI->getIntrinsicID());
354 if (Factor > 8)
355 return false;
356
357 IRBuilder<> Builder(Load);
358
359 VectorType *ResVTy = getDeinterleavedVectorType(DI);
360
361 const DataLayout &DL = Load->getDataLayout();
362 auto *XLenTy = Builder.getIntNTy(N: Subtarget.getXLen());
363
364 Value *Ptr, *VL;
365 Align Alignment;
366 if (!getMemOperands(Factor, VTy: ResVTy, XLenTy, I: Load, Ptr, Mask, VL, Alignment))
367 return false;
368
369 Type *PtrTy = Ptr->getType();
370 unsigned AS = PtrTy->getPointerAddressSpace();
371 if (!isLegalInterleavedAccessType(VTy: ResVTy, Factor, Alignment, AddrSpace: AS, DL))
372 return false;
373
374 Value *Return;
375 if (isa<FixedVectorType>(Val: ResVTy)) {
376 Return = Builder.CreateIntrinsic(ID: FixedVlsegIntrIds[Factor - 2],
377 Types: {ResVTy, PtrTy, XLenTy}, Args: {Ptr, Mask, VL});
378 } else {
379 unsigned SEW = DL.getTypeSizeInBits(Ty: ResVTy->getElementType());
380 unsigned NumElts = ResVTy->getElementCount().getKnownMinValue();
381 Type *VecTupTy = TargetExtType::get(
382 Context&: Load->getContext(), Name: "riscv.vector.tuple",
383 Types: ScalableVectorType::get(ElementType: Builder.getInt8Ty(), MinNumElts: NumElts * SEW / 8),
384 Ints: Factor);
385 Function *VlsegNFunc = Intrinsic::getOrInsertDeclaration(
386 M: Load->getModule(), id: ScalableVlsegIntrIds[Factor - 2],
387 Tys: {VecTupTy, PtrTy, Mask->getType(), VL->getType()});
388
389 Value *Operands[] = {
390 PoisonValue::get(T: VecTupTy),
391 Ptr,
392 Mask,
393 VL,
394 ConstantInt::get(Ty: XLenTy,
395 V: RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC),
396 ConstantInt::get(Ty: XLenTy, V: Log2_64(Value: SEW))};
397
398 CallInst *Vlseg = Builder.CreateCall(Callee: VlsegNFunc, Args: Operands);
399
400 SmallVector<Type *, 2> AggrTypes{Factor, ResVTy};
401 Return = PoisonValue::get(T: StructType::get(Context&: Load->getContext(), Elements: AggrTypes));
402 for (unsigned i = 0; i < Factor; ++i) {
403 Value *VecExtract = Builder.CreateIntrinsic(
404 ID: Intrinsic::riscv_tuple_extract, Types: {ResVTy, VecTupTy},
405 Args: {Vlseg, Builder.getInt32(C: i)});
406 Return = Builder.CreateInsertValue(Agg: Return, Val: VecExtract, Idxs: i);
407 }
408 }
409
410 DI->replaceAllUsesWith(V: Return);
411 return true;
412}
413
414bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(
415 Instruction *Store, Value *Mask, ArrayRef<Value *> InterleaveValues) const {
416 unsigned Factor = InterleaveValues.size();
417 if (Factor > 8)
418 return false;
419
420 IRBuilder<> Builder(Store);
421
422 auto *InVTy = cast<VectorType>(Val: InterleaveValues[0]->getType());
423 const DataLayout &DL = Store->getDataLayout();
424 Type *XLenTy = Builder.getIntNTy(N: Subtarget.getXLen());
425
426 Value *Ptr, *VL;
427 Align Alignment;
428 if (!getMemOperands(Factor, VTy: InVTy, XLenTy, I: Store, Ptr, Mask, VL, Alignment))
429 return false;
430 Type *PtrTy = Ptr->getType();
431 unsigned AS = Ptr->getType()->getPointerAddressSpace();
432 if (!isLegalInterleavedAccessType(VTy: InVTy, Factor, Alignment, AddrSpace: AS, DL))
433 return false;
434
435 if (isa<FixedVectorType>(Val: InVTy)) {
436 Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
437 M: Store->getModule(), id: FixedVssegIntrIds[Factor - 2],
438 Tys: {InVTy, PtrTy, XLenTy});
439 SmallVector<Value *, 10> Ops(InterleaveValues);
440 Ops.append(IL: {Ptr, Mask, VL});
441 Builder.CreateCall(Callee: VssegNFunc, Args: Ops);
442 return true;
443 }
444 unsigned SEW = DL.getTypeSizeInBits(Ty: InVTy->getElementType());
445 unsigned NumElts = InVTy->getElementCount().getKnownMinValue();
446 Type *VecTupTy = TargetExtType::get(
447 Context&: Store->getContext(), Name: "riscv.vector.tuple",
448 Types: ScalableVectorType::get(ElementType: Builder.getInt8Ty(), MinNumElts: NumElts * SEW / 8), Ints: Factor);
449
450 Value *StoredVal = PoisonValue::get(T: VecTupTy);
451 for (unsigned i = 0; i < Factor; ++i)
452 StoredVal = Builder.CreateIntrinsic(
453 ID: Intrinsic::riscv_tuple_insert, Types: {VecTupTy, InVTy},
454 Args: {StoredVal, InterleaveValues[i], Builder.getInt32(C: i)});
455
456 Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
457 M: Store->getModule(), id: ScalableVssegIntrIds[Factor - 2],
458 Tys: {VecTupTy, PtrTy, Mask->getType(), VL->getType()});
459
460 Value *Operands[] = {StoredVal, Ptr, Mask, VL,
461 ConstantInt::get(Ty: XLenTy, V: Log2_64(Value: SEW))};
462 Builder.CreateCall(Callee: VssegNFunc, Args: Operands);
463 return true;
464}
465