1 | //===-- RISCVTargetTransformInfo.cpp - RISC-V specific TTI ----------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "RISCVTargetTransformInfo.h" |
10 | #include "MCTargetDesc/RISCVMatInt.h" |
11 | #include "llvm/ADT/STLExtras.h" |
12 | #include "llvm/Analysis/TargetTransformInfo.h" |
13 | #include "llvm/CodeGen/BasicTTIImpl.h" |
14 | #include "llvm/CodeGen/CostTable.h" |
15 | #include "llvm/CodeGen/TargetLowering.h" |
16 | #include "llvm/CodeGen/ValueTypes.h" |
17 | #include "llvm/IR/Instructions.h" |
18 | #include "llvm/IR/PatternMatch.h" |
19 | #include <cmath> |
20 | #include <optional> |
21 | using namespace llvm; |
22 | using namespace llvm::PatternMatch; |
23 | |
24 | #define DEBUG_TYPE "riscvtti" |
25 | |
26 | static cl::opt<unsigned> RVVRegisterWidthLMUL( |
27 | "riscv-v-register-bit-width-lmul" , |
28 | cl::desc( |
29 | "The LMUL to use for getRegisterBitWidth queries. Affects LMUL used " |
30 | "by autovectorized code. Fractional LMULs are not supported." ), |
31 | cl::init(Val: 2), cl::Hidden); |
32 | |
33 | static cl::opt<unsigned> SLPMaxVF( |
34 | "riscv-v-slp-max-vf" , |
35 | cl::desc( |
36 | "Overrides result used for getMaximumVF query which is used " |
37 | "exclusively by SLP vectorizer." ), |
38 | cl::Hidden); |
39 | |
40 | static cl::opt<unsigned> |
41 | RVVMinTripCount("riscv-v-min-trip-count" , |
42 | cl::desc("Set the lower bound of a trip count to decide on " |
43 | "vectorization while tail-folding." ), |
44 | cl::init(Val: 5), cl::Hidden); |
45 | |
46 | InstructionCost |
47 | RISCVTTIImpl::getRISCVInstructionCost(ArrayRef<unsigned> OpCodes, MVT VT, |
48 | TTI::TargetCostKind CostKind) const { |
49 | // Check if the type is valid for all CostKind |
50 | if (!VT.isVector()) |
51 | return InstructionCost::getInvalid(); |
52 | size_t NumInstr = OpCodes.size(); |
53 | if (CostKind == TTI::TCK_CodeSize) |
54 | return NumInstr; |
55 | InstructionCost LMULCost = TLI->getLMULCost(VT); |
56 | if ((CostKind != TTI::TCK_RecipThroughput) && (CostKind != TTI::TCK_Latency)) |
57 | return LMULCost * NumInstr; |
58 | InstructionCost Cost = 0; |
59 | for (auto Op : OpCodes) { |
60 | switch (Op) { |
61 | case RISCV::VRGATHER_VI: |
62 | Cost += TLI->getVRGatherVICost(VT); |
63 | break; |
64 | case RISCV::VRGATHER_VV: |
65 | Cost += TLI->getVRGatherVVCost(VT); |
66 | break; |
67 | case RISCV::VSLIDEUP_VI: |
68 | case RISCV::VSLIDEDOWN_VI: |
69 | Cost += TLI->getVSlideVICost(VT); |
70 | break; |
71 | case RISCV::VSLIDEUP_VX: |
72 | case RISCV::VSLIDEDOWN_VX: |
73 | Cost += TLI->getVSlideVXCost(VT); |
74 | break; |
75 | case RISCV::VREDMAX_VS: |
76 | case RISCV::VREDMIN_VS: |
77 | case RISCV::VREDMAXU_VS: |
78 | case RISCV::VREDMINU_VS: |
79 | case RISCV::VREDSUM_VS: |
80 | case RISCV::VREDAND_VS: |
81 | case RISCV::VREDOR_VS: |
82 | case RISCV::VREDXOR_VS: |
83 | case RISCV::VFREDMAX_VS: |
84 | case RISCV::VFREDMIN_VS: |
85 | case RISCV::VFREDUSUM_VS: { |
86 | unsigned VL = VT.getVectorMinNumElements(); |
87 | if (!VT.isFixedLengthVector()) |
88 | VL *= *getVScaleForTuning(); |
89 | Cost += Log2_32_Ceil(Value: VL); |
90 | break; |
91 | } |
92 | case RISCV::VFREDOSUM_VS: { |
93 | unsigned VL = VT.getVectorMinNumElements(); |
94 | if (!VT.isFixedLengthVector()) |
95 | VL *= *getVScaleForTuning(); |
96 | Cost += VL; |
97 | break; |
98 | } |
99 | case RISCV::VMV_X_S: |
100 | case RISCV::VMV_S_X: |
101 | case RISCV::VFMV_F_S: |
102 | case RISCV::VFMV_S_F: |
103 | case RISCV::VMOR_MM: |
104 | case RISCV::VMXOR_MM: |
105 | case RISCV::VMAND_MM: |
106 | case RISCV::VMANDN_MM: |
107 | case RISCV::VMNAND_MM: |
108 | case RISCV::VCPOP_M: |
109 | case RISCV::VFIRST_M: |
110 | Cost += 1; |
111 | break; |
112 | default: |
113 | Cost += LMULCost; |
114 | } |
115 | } |
116 | return Cost; |
117 | } |
118 | |
119 | static InstructionCost getIntImmCostImpl(const DataLayout &DL, |
120 | const RISCVSubtarget *ST, |
121 | const APInt &Imm, Type *Ty, |
122 | TTI::TargetCostKind CostKind, |
123 | bool FreeZeroes) { |
124 | assert(Ty->isIntegerTy() && |
125 | "getIntImmCost can only estimate cost of materialising integers" ); |
126 | |
127 | // We have a Zero register, so 0 is always free. |
128 | if (Imm == 0) |
129 | return TTI::TCC_Free; |
130 | |
131 | // Otherwise, we check how many instructions it will take to materialise. |
132 | return RISCVMatInt::getIntMatCost(Val: Imm, Size: DL.getTypeSizeInBits(Ty), STI: *ST, |
133 | /*CompressionCost=*/false, FreeZeroes); |
134 | } |
135 | |
136 | InstructionCost |
137 | RISCVTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, |
138 | TTI::TargetCostKind CostKind) const { |
139 | return getIntImmCostImpl(DL: getDataLayout(), ST: getST(), Imm, Ty, CostKind, FreeZeroes: false); |
140 | } |
141 | |
142 | // Look for patterns of shift followed by AND that can be turned into a pair of |
143 | // shifts. We won't need to materialize an immediate for the AND so these can |
144 | // be considered free. |
145 | static bool canUseShiftPair(Instruction *Inst, const APInt &Imm) { |
146 | uint64_t Mask = Imm.getZExtValue(); |
147 | auto *BO = dyn_cast<BinaryOperator>(Val: Inst->getOperand(i: 0)); |
148 | if (!BO || !BO->hasOneUse()) |
149 | return false; |
150 | |
151 | if (BO->getOpcode() != Instruction::Shl) |
152 | return false; |
153 | |
154 | if (!isa<ConstantInt>(Val: BO->getOperand(i_nocapture: 1))) |
155 | return false; |
156 | |
157 | unsigned ShAmt = cast<ConstantInt>(Val: BO->getOperand(i_nocapture: 1))->getZExtValue(); |
158 | // (and (shl x, c2), c1) will be matched to (srli (slli x, c2+c3), c3) if c1 |
159 | // is a mask shifted by c2 bits with c3 leading zeros. |
160 | if (isShiftedMask_64(Value: Mask)) { |
161 | unsigned Trailing = llvm::countr_zero(Val: Mask); |
162 | if (ShAmt == Trailing) |
163 | return true; |
164 | } |
165 | |
166 | return false; |
167 | } |
168 | |
169 | InstructionCost RISCVTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, |
170 | const APInt &Imm, Type *Ty, |
171 | TTI::TargetCostKind CostKind, |
172 | Instruction *Inst) const { |
173 | assert(Ty->isIntegerTy() && |
174 | "getIntImmCost can only estimate cost of materialising integers" ); |
175 | |
176 | // We have a Zero register, so 0 is always free. |
177 | if (Imm == 0) |
178 | return TTI::TCC_Free; |
179 | |
180 | // Some instructions in RISC-V can take a 12-bit immediate. Some of these are |
181 | // commutative, in others the immediate comes from a specific argument index. |
182 | bool Takes12BitImm = false; |
183 | unsigned ImmArgIdx = ~0U; |
184 | |
185 | switch (Opcode) { |
186 | case Instruction::GetElementPtr: |
187 | // Never hoist any arguments to a GetElementPtr. CodeGenPrepare will |
188 | // split up large offsets in GEP into better parts than ConstantHoisting |
189 | // can. |
190 | return TTI::TCC_Free; |
191 | case Instruction::Store: { |
192 | // Use the materialization cost regardless of if it's the address or the |
193 | // value that is constant, except for if the store is misaligned and |
194 | // misaligned accesses are not legal (experience shows constant hoisting |
195 | // can sometimes be harmful in such cases). |
196 | if (Idx == 1 || !Inst) |
197 | return getIntImmCostImpl(DL: getDataLayout(), ST: getST(), Imm, Ty, CostKind, |
198 | /*FreeZeroes=*/true); |
199 | |
200 | StoreInst *ST = cast<StoreInst>(Val: Inst); |
201 | if (!getTLI()->allowsMemoryAccessForAlignment( |
202 | Context&: Ty->getContext(), DL, VT: getTLI()->getValueType(DL, Ty), |
203 | AddrSpace: ST->getPointerAddressSpace(), Alignment: ST->getAlign())) |
204 | return TTI::TCC_Free; |
205 | |
206 | return getIntImmCostImpl(DL: getDataLayout(), ST: getST(), Imm, Ty, CostKind, |
207 | /*FreeZeroes=*/true); |
208 | } |
209 | case Instruction::Load: |
210 | // If the address is a constant, use the materialization cost. |
211 | return getIntImmCost(Imm, Ty, CostKind); |
212 | case Instruction::And: |
213 | // zext.h |
214 | if (Imm == UINT64_C(0xffff) && ST->hasStdExtZbb()) |
215 | return TTI::TCC_Free; |
216 | // zext.w |
217 | if (Imm == UINT64_C(0xffffffff) && |
218 | ((ST->hasStdExtZba() && ST->isRV64()) || ST->isRV32())) |
219 | return TTI::TCC_Free; |
220 | // bclri |
221 | if (ST->hasStdExtZbs() && (~Imm).isPowerOf2()) |
222 | return TTI::TCC_Free; |
223 | if (Inst && Idx == 1 && Imm.getBitWidth() <= ST->getXLen() && |
224 | canUseShiftPair(Inst, Imm)) |
225 | return TTI::TCC_Free; |
226 | Takes12BitImm = true; |
227 | break; |
228 | case Instruction::Add: |
229 | Takes12BitImm = true; |
230 | break; |
231 | case Instruction::Or: |
232 | case Instruction::Xor: |
233 | // bseti/binvi |
234 | if (ST->hasStdExtZbs() && Imm.isPowerOf2()) |
235 | return TTI::TCC_Free; |
236 | Takes12BitImm = true; |
237 | break; |
238 | case Instruction::Mul: |
239 | // Power of 2 is a shift. Negated power of 2 is a shift and a negate. |
240 | if (Imm.isPowerOf2() || Imm.isNegatedPowerOf2()) |
241 | return TTI::TCC_Free; |
242 | // One more or less than a power of 2 can use SLLI+ADD/SUB. |
243 | if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2()) |
244 | return TTI::TCC_Free; |
245 | // FIXME: There is no MULI instruction. |
246 | Takes12BitImm = true; |
247 | break; |
248 | case Instruction::Sub: |
249 | case Instruction::Shl: |
250 | case Instruction::LShr: |
251 | case Instruction::AShr: |
252 | Takes12BitImm = true; |
253 | ImmArgIdx = 1; |
254 | break; |
255 | default: |
256 | break; |
257 | } |
258 | |
259 | if (Takes12BitImm) { |
260 | // Check immediate is the correct argument... |
261 | if (Instruction::isCommutative(Opcode) || Idx == ImmArgIdx) { |
262 | // ... and fits into the 12-bit immediate. |
263 | if (Imm.getSignificantBits() <= 64 && |
264 | getTLI()->isLegalAddImmediate(Imm: Imm.getSExtValue())) { |
265 | return TTI::TCC_Free; |
266 | } |
267 | } |
268 | |
269 | // Otherwise, use the full materialisation cost. |
270 | return getIntImmCost(Imm, Ty, CostKind); |
271 | } |
272 | |
273 | // By default, prevent hoisting. |
274 | return TTI::TCC_Free; |
275 | } |
276 | |
277 | InstructionCost |
278 | RISCVTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, |
279 | const APInt &Imm, Type *Ty, |
280 | TTI::TargetCostKind CostKind) const { |
281 | // Prevent hoisting in unknown cases. |
282 | return TTI::TCC_Free; |
283 | } |
284 | |
285 | bool RISCVTTIImpl::hasActiveVectorLength() const { |
286 | return ST->hasVInstructions(); |
287 | } |
288 | |
289 | TargetTransformInfo::PopcntSupportKind |
290 | RISCVTTIImpl::getPopcntSupport(unsigned TyWidth) const { |
291 | assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2" ); |
292 | return ST->hasStdExtZbb() || (ST->hasVendorXCVbitmanip() && !ST->is64Bit()) |
293 | ? TTI::PSK_FastHardware |
294 | : TTI::PSK_Software; |
295 | } |
296 | |
297 | InstructionCost RISCVTTIImpl::getPartialReductionCost( |
298 | unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType, |
299 | ElementCount VF, TTI::PartialReductionExtendKind OpAExtend, |
300 | TTI::PartialReductionExtendKind OpBExtend, std::optional<unsigned> BinOp, |
301 | TTI::TargetCostKind CostKind) const { |
302 | |
303 | // zve32x is broken for partial_reduce_umla, but let's make sure we |
304 | // don't generate them. |
305 | if (!ST->hasStdExtZvqdotq() || ST->getELen() < 64 || |
306 | Opcode != Instruction::Add || !BinOp || *BinOp != Instruction::Mul || |
307 | InputTypeA != InputTypeB || !InputTypeA->isIntegerTy(Bitwidth: 8) || |
308 | !AccumType->isIntegerTy(Bitwidth: 32) || !VF.isKnownMultipleOf(RHS: 4)) |
309 | return InstructionCost::getInvalid(); |
310 | |
311 | Type *Tp = VectorType::get(ElementType: AccumType, EC: VF.divideCoefficientBy(RHS: 4)); |
312 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: Tp); |
313 | // Note: Asuming all vqdot* variants are equal cost |
314 | return LT.first * |
315 | getRISCVInstructionCost(OpCodes: RISCV::VQDOT_VV, VT: LT.second, CostKind); |
316 | } |
317 | |
318 | bool RISCVTTIImpl::shouldExpandReduction(const IntrinsicInst *II) const { |
319 | // Currently, the ExpandReductions pass can't expand scalable-vector |
320 | // reductions, but we still request expansion as RVV doesn't support certain |
321 | // reductions and the SelectionDAG can't legalize them either. |
322 | switch (II->getIntrinsicID()) { |
323 | default: |
324 | return false; |
325 | // These reductions have no equivalent in RVV |
326 | case Intrinsic::vector_reduce_mul: |
327 | case Intrinsic::vector_reduce_fmul: |
328 | return true; |
329 | } |
330 | } |
331 | |
332 | std::optional<unsigned> RISCVTTIImpl::getMaxVScale() const { |
333 | if (ST->hasVInstructions()) |
334 | return ST->getRealMaxVLen() / RISCV::RVVBitsPerBlock; |
335 | return BaseT::getMaxVScale(); |
336 | } |
337 | |
338 | std::optional<unsigned> RISCVTTIImpl::getVScaleForTuning() const { |
339 | if (ST->hasVInstructions()) |
340 | if (unsigned MinVLen = ST->getRealMinVLen(); |
341 | MinVLen >= RISCV::RVVBitsPerBlock) |
342 | return MinVLen / RISCV::RVVBitsPerBlock; |
343 | return BaseT::getVScaleForTuning(); |
344 | } |
345 | |
346 | TypeSize |
347 | RISCVTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const { |
348 | unsigned LMUL = |
349 | llvm::bit_floor(Value: std::clamp<unsigned>(val: RVVRegisterWidthLMUL, lo: 1, hi: 8)); |
350 | switch (K) { |
351 | case TargetTransformInfo::RGK_Scalar: |
352 | return TypeSize::getFixed(ExactSize: ST->getXLen()); |
353 | case TargetTransformInfo::RGK_FixedWidthVector: |
354 | return TypeSize::getFixed( |
355 | ExactSize: ST->useRVVForFixedLengthVectors() ? LMUL * ST->getRealMinVLen() : 0); |
356 | case TargetTransformInfo::RGK_ScalableVector: |
357 | return TypeSize::getScalable( |
358 | MinimumSize: (ST->hasVInstructions() && |
359 | ST->getRealMinVLen() >= RISCV::RVVBitsPerBlock) |
360 | ? LMUL * RISCV::RVVBitsPerBlock |
361 | : 0); |
362 | } |
363 | |
364 | llvm_unreachable("Unsupported register kind" ); |
365 | } |
366 | |
367 | InstructionCost |
368 | RISCVTTIImpl::getConstantPoolLoadCost(Type *Ty, |
369 | TTI::TargetCostKind CostKind) const { |
370 | // Add a cost of address generation + the cost of the load. The address |
371 | // is expected to be a PC relative offset to a constant pool entry |
372 | // using auipc/addi. |
373 | return 2 + getMemoryOpCost(Opcode: Instruction::Load, Src: Ty, Alignment: DL.getABITypeAlign(Ty), |
374 | /*AddressSpace=*/0, CostKind); |
375 | } |
376 | |
377 | static bool isRepeatedConcatMask(ArrayRef<int> Mask, int &SubVectorSize) { |
378 | unsigned Size = Mask.size(); |
379 | if (!isPowerOf2_32(Value: Size)) |
380 | return false; |
381 | for (unsigned I = 0; I != Size; ++I) { |
382 | if (static_cast<unsigned>(Mask[I]) == I) |
383 | continue; |
384 | if (Mask[I] != 0) |
385 | return false; |
386 | if (Size % I != 0) |
387 | return false; |
388 | for (unsigned J = I + 1; J != Size; ++J) |
389 | // Check the pattern is repeated. |
390 | if (static_cast<unsigned>(Mask[J]) != J % I) |
391 | return false; |
392 | SubVectorSize = I; |
393 | return true; |
394 | } |
395 | // That means Mask is <0, 1, 2, 3>. This is not a concatenation. |
396 | return false; |
397 | } |
398 | |
399 | static VectorType *getVRGatherIndexType(MVT DataVT, const RISCVSubtarget &ST, |
400 | LLVMContext &C) { |
401 | assert((DataVT.getScalarSizeInBits() != 8 || |
402 | DataVT.getVectorNumElements() <= 256) && "unhandled case in lowering" ); |
403 | MVT IndexVT = DataVT.changeTypeToInteger(); |
404 | if (IndexVT.getScalarType().bitsGT(VT: ST.getXLenVT())) |
405 | IndexVT = IndexVT.changeVectorElementType(EltVT: MVT::i16); |
406 | return cast<VectorType>(Val: EVT(IndexVT).getTypeForEVT(Context&: C)); |
407 | } |
408 | |
409 | /// Attempt to approximate the cost of a shuffle which will require splitting |
410 | /// during legalization. Note that processShuffleMasks is not an exact proxy |
411 | /// for the algorithm used in LegalizeVectorTypes, but hopefully it's a |
412 | /// reasonably close upperbound. |
413 | static InstructionCost costShuffleViaSplitting(const RISCVTTIImpl &TTI, |
414 | MVT LegalVT, VectorType *Tp, |
415 | ArrayRef<int> Mask, |
416 | TTI::TargetCostKind CostKind) { |
417 | assert(LegalVT.isFixedLengthVector() && !Mask.empty() && |
418 | "Expected fixed vector type and non-empty mask" ); |
419 | unsigned LegalNumElts = LegalVT.getVectorNumElements(); |
420 | // Number of destination vectors after legalization: |
421 | unsigned NumOfDests = divideCeil(Numerator: Mask.size(), Denominator: LegalNumElts); |
422 | // We are going to permute multiple sources and the result will be in |
423 | // multiple destinations. Providing an accurate cost only for splits where |
424 | // the element type remains the same. |
425 | if (NumOfDests <= 1 || |
426 | LegalVT.getVectorElementType().getSizeInBits() != |
427 | Tp->getElementType()->getPrimitiveSizeInBits() || |
428 | LegalNumElts >= Tp->getElementCount().getFixedValue()) |
429 | return InstructionCost::getInvalid(); |
430 | |
431 | unsigned VecTySize = TTI.getDataLayout().getTypeStoreSize(Ty: Tp); |
432 | unsigned LegalVTSize = LegalVT.getStoreSize(); |
433 | // Number of source vectors after legalization: |
434 | unsigned NumOfSrcs = divideCeil(Numerator: VecTySize, Denominator: LegalVTSize); |
435 | |
436 | auto *SingleOpTy = FixedVectorType::get(ElementType: Tp->getElementType(), NumElts: LegalNumElts); |
437 | |
438 | unsigned NormalizedVF = LegalNumElts * std::max(a: NumOfSrcs, b: NumOfDests); |
439 | unsigned NumOfSrcRegs = NormalizedVF / LegalNumElts; |
440 | unsigned NumOfDestRegs = NormalizedVF / LegalNumElts; |
441 | SmallVector<int> NormalizedMask(NormalizedVF, PoisonMaskElem); |
442 | assert(NormalizedVF >= Mask.size() && |
443 | "Normalized mask expected to be not shorter than original mask." ); |
444 | copy(Range&: Mask, Out: NormalizedMask.begin()); |
445 | InstructionCost Cost = 0; |
446 | SmallDenseSet<std::pair<ArrayRef<int>, unsigned>> ReusedSingleSrcShuffles; |
447 | processShuffleMasks( |
448 | Mask: NormalizedMask, NumOfSrcRegs, NumOfDestRegs, NumOfUsedRegs: NumOfDestRegs, NoInputAction: []() {}, |
449 | SingleInputAction: [&](ArrayRef<int> RegMask, unsigned SrcReg, unsigned DestReg) { |
450 | if (ShuffleVectorInst::isIdentityMask(Mask: RegMask, NumSrcElts: RegMask.size())) |
451 | return; |
452 | if (!ReusedSingleSrcShuffles.insert(V: std::make_pair(x&: RegMask, y&: SrcReg)) |
453 | .second) |
454 | return; |
455 | Cost += TTI.getShuffleCost( |
456 | Kind: TTI::SK_PermuteSingleSrc, |
457 | DstTy: FixedVectorType::get(ElementType: SingleOpTy->getElementType(), NumElts: RegMask.size()), |
458 | SrcTy: SingleOpTy, Mask: RegMask, CostKind, Index: 0, SubTp: nullptr); |
459 | }, |
460 | ManyInputsAction: [&](ArrayRef<int> RegMask, unsigned Idx1, unsigned Idx2, bool NewReg) { |
461 | Cost += TTI.getShuffleCost( |
462 | Kind: TTI::SK_PermuteTwoSrc, |
463 | DstTy: FixedVectorType::get(ElementType: SingleOpTy->getElementType(), NumElts: RegMask.size()), |
464 | SrcTy: SingleOpTy, Mask: RegMask, CostKind, Index: 0, SubTp: nullptr); |
465 | }); |
466 | return Cost; |
467 | } |
468 | |
469 | /// Try to perform better estimation of the permutation. |
470 | /// 1. Split the source/destination vectors into real registers. |
471 | /// 2. Do the mask analysis to identify which real registers are |
472 | /// permuted. If more than 1 source registers are used for the |
473 | /// destination register building, the cost for this destination register |
474 | /// is (Number_of_source_register - 1) * Cost_PermuteTwoSrc. If only one |
475 | /// source register is used, build mask and calculate the cost as a cost |
476 | /// of PermuteSingleSrc. |
477 | /// Also, for the single register permute we try to identify if the |
478 | /// destination register is just a copy of the source register or the |
479 | /// copy of the previous destination register (the cost is |
480 | /// TTI::TCC_Basic). If the source register is just reused, the cost for |
481 | /// this operation is 0. |
482 | static InstructionCost |
483 | costShuffleViaVRegSplitting(const RISCVTTIImpl &TTI, MVT LegalVT, |
484 | std::optional<unsigned> VLen, VectorType *Tp, |
485 | ArrayRef<int> Mask, TTI::TargetCostKind CostKind) { |
486 | assert(LegalVT.isFixedLengthVector()); |
487 | if (!VLen || Mask.empty()) |
488 | return InstructionCost::getInvalid(); |
489 | MVT ElemVT = LegalVT.getVectorElementType(); |
490 | unsigned ElemsPerVReg = *VLen / ElemVT.getFixedSizeInBits(); |
491 | LegalVT = TTI.getTypeLegalizationCost( |
492 | Ty: FixedVectorType::get(ElementType: Tp->getElementType(), NumElts: ElemsPerVReg)) |
493 | .second; |
494 | // Number of destination vectors after legalization: |
495 | InstructionCost NumOfDests = |
496 | divideCeil(Numerator: Mask.size(), Denominator: LegalVT.getVectorNumElements()); |
497 | if (NumOfDests <= 1 || |
498 | LegalVT.getVectorElementType().getSizeInBits() != |
499 | Tp->getElementType()->getPrimitiveSizeInBits() || |
500 | LegalVT.getVectorNumElements() >= Tp->getElementCount().getFixedValue()) |
501 | return InstructionCost::getInvalid(); |
502 | |
503 | unsigned VecTySize = TTI.getDataLayout().getTypeStoreSize(Ty: Tp); |
504 | unsigned LegalVTSize = LegalVT.getStoreSize(); |
505 | // Number of source vectors after legalization: |
506 | unsigned NumOfSrcs = divideCeil(Numerator: VecTySize, Denominator: LegalVTSize); |
507 | |
508 | auto *SingleOpTy = FixedVectorType::get(ElementType: Tp->getElementType(), |
509 | NumElts: LegalVT.getVectorNumElements()); |
510 | |
511 | unsigned E = NumOfDests.getValue(); |
512 | unsigned NormalizedVF = |
513 | LegalVT.getVectorNumElements() * std::max(a: NumOfSrcs, b: E); |
514 | unsigned NumOfSrcRegs = NormalizedVF / LegalVT.getVectorNumElements(); |
515 | unsigned NumOfDestRegs = NormalizedVF / LegalVT.getVectorNumElements(); |
516 | SmallVector<int> NormalizedMask(NormalizedVF, PoisonMaskElem); |
517 | assert(NormalizedVF >= Mask.size() && |
518 | "Normalized mask expected to be not shorter than original mask." ); |
519 | copy(Range&: Mask, Out: NormalizedMask.begin()); |
520 | InstructionCost Cost = 0; |
521 | int NumShuffles = 0; |
522 | SmallDenseSet<std::pair<ArrayRef<int>, unsigned>> ReusedSingleSrcShuffles; |
523 | processShuffleMasks( |
524 | Mask: NormalizedMask, NumOfSrcRegs, NumOfDestRegs, NumOfUsedRegs: NumOfDestRegs, NoInputAction: []() {}, |
525 | SingleInputAction: [&](ArrayRef<int> RegMask, unsigned SrcReg, unsigned DestReg) { |
526 | if (ShuffleVectorInst::isIdentityMask(Mask: RegMask, NumSrcElts: RegMask.size())) |
527 | return; |
528 | if (!ReusedSingleSrcShuffles.insert(V: std::make_pair(x&: RegMask, y&: SrcReg)) |
529 | .second) |
530 | return; |
531 | ++NumShuffles; |
532 | Cost += TTI.getShuffleCost(Kind: TTI::SK_PermuteSingleSrc, DstTy: SingleOpTy, |
533 | SrcTy: SingleOpTy, Mask: RegMask, CostKind, Index: 0, SubTp: nullptr); |
534 | }, |
535 | ManyInputsAction: [&](ArrayRef<int> RegMask, unsigned Idx1, unsigned Idx2, bool NewReg) { |
536 | Cost += TTI.getShuffleCost(Kind: TTI::SK_PermuteTwoSrc, DstTy: SingleOpTy, |
537 | SrcTy: SingleOpTy, Mask: RegMask, CostKind, Index: 0, SubTp: nullptr); |
538 | NumShuffles += 2; |
539 | }); |
540 | // Note: check that we do not emit too many shuffles here to prevent code |
541 | // size explosion. |
542 | // TODO: investigate, if it can be improved by extra analysis of the masks |
543 | // to check if the code is more profitable. |
544 | if ((NumOfDestRegs > 2 && NumShuffles <= static_cast<int>(NumOfDestRegs)) || |
545 | (NumOfDestRegs <= 2 && NumShuffles < 4)) |
546 | return Cost; |
547 | return InstructionCost::getInvalid(); |
548 | } |
549 | |
550 | InstructionCost RISCVTTIImpl::getSlideCost(FixedVectorType *Tp, |
551 | ArrayRef<int> Mask, |
552 | TTI::TargetCostKind CostKind) const { |
553 | // Avoid missing masks and length changing shuffles |
554 | if (Mask.size() <= 2 || Mask.size() != Tp->getNumElements()) |
555 | return InstructionCost::getInvalid(); |
556 | |
557 | int NumElts = Tp->getNumElements(); |
558 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: Tp); |
559 | // Avoid scalarization cases |
560 | if (!LT.second.isFixedLengthVector()) |
561 | return InstructionCost::getInvalid(); |
562 | |
563 | // Requires moving elements between parts, which requires additional |
564 | // unmodeled instructions. |
565 | if (LT.first != 1) |
566 | return InstructionCost::getInvalid(); |
567 | |
568 | auto GetSlideOpcode = [&](int SlideAmt) { |
569 | assert(SlideAmt != 0); |
570 | bool IsVI = isUInt<5>(x: std::abs(x: SlideAmt)); |
571 | if (SlideAmt < 0) |
572 | return IsVI ? RISCV::VSLIDEDOWN_VI : RISCV::VSLIDEDOWN_VX; |
573 | return IsVI ? RISCV::VSLIDEUP_VI : RISCV::VSLIDEUP_VX; |
574 | }; |
575 | |
576 | std::array<std::pair<int, int>, 2> SrcInfo; |
577 | if (!isMaskedSlidePair(Mask, NumElts, SrcInfo)) |
578 | return InstructionCost::getInvalid(); |
579 | |
580 | if (SrcInfo[1].second == 0) |
581 | std::swap(x&: SrcInfo[0], y&: SrcInfo[1]); |
582 | |
583 | InstructionCost FirstSlideCost = 0; |
584 | if (SrcInfo[0].second != 0) { |
585 | unsigned Opcode = GetSlideOpcode(SrcInfo[0].second); |
586 | FirstSlideCost = getRISCVInstructionCost(OpCodes: Opcode, VT: LT.second, CostKind); |
587 | } |
588 | |
589 | if (SrcInfo[1].first == -1) |
590 | return FirstSlideCost; |
591 | |
592 | InstructionCost SecondSlideCost = 0; |
593 | if (SrcInfo[1].second != 0) { |
594 | unsigned Opcode = GetSlideOpcode(SrcInfo[1].second); |
595 | SecondSlideCost = getRISCVInstructionCost(OpCodes: Opcode, VT: LT.second, CostKind); |
596 | } else { |
597 | SecondSlideCost = |
598 | getRISCVInstructionCost(OpCodes: RISCV::VMERGE_VVM, VT: LT.second, CostKind); |
599 | } |
600 | |
601 | auto EC = Tp->getElementCount(); |
602 | VectorType *MaskTy = |
603 | VectorType::get(ElementType: IntegerType::getInt1Ty(C&: Tp->getContext()), EC); |
604 | InstructionCost MaskCost = getConstantPoolLoadCost(Ty: MaskTy, CostKind); |
605 | return FirstSlideCost + SecondSlideCost + MaskCost; |
606 | } |
607 | |
608 | InstructionCost |
609 | RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, |
610 | VectorType *SrcTy, ArrayRef<int> Mask, |
611 | TTI::TargetCostKind CostKind, int Index, |
612 | VectorType *SubTp, ArrayRef<const Value *> Args, |
613 | const Instruction *CxtI) const { |
614 | assert((Mask.empty() || DstTy->isScalableTy() || |
615 | Mask.size() == DstTy->getElementCount().getKnownMinValue()) && |
616 | "Expected the Mask to match the return size if given" ); |
617 | assert(SrcTy->getScalarType() == DstTy->getScalarType() && |
618 | "Expected the same scalar types" ); |
619 | |
620 | Kind = improveShuffleKindFromMask(Kind, Mask, SrcTy, Index, SubTy&: SubTp); |
621 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: SrcTy); |
622 | |
623 | // First, handle cases where having a fixed length vector enables us to |
624 | // give a more accurate cost than falling back to generic scalable codegen. |
625 | // TODO: Each of these cases hints at a modeling gap around scalable vectors. |
626 | if (auto *FVTp = dyn_cast<FixedVectorType>(Val: SrcTy); |
627 | FVTp && ST->hasVInstructions() && LT.second.isFixedLengthVector()) { |
628 | InstructionCost VRegSplittingCost = costShuffleViaVRegSplitting( |
629 | TTI: *this, LegalVT: LT.second, VLen: ST->getRealVLen(), |
630 | Tp: Kind == TTI::SK_InsertSubvector ? DstTy : SrcTy, Mask, CostKind); |
631 | if (VRegSplittingCost.isValid()) |
632 | return VRegSplittingCost; |
633 | switch (Kind) { |
634 | default: |
635 | break; |
636 | case TTI::SK_PermuteSingleSrc: { |
637 | if (Mask.size() >= 2) { |
638 | MVT EltTp = LT.second.getVectorElementType(); |
639 | // If the size of the element is < ELEN then shuffles of interleaves and |
640 | // deinterleaves of 2 vectors can be lowered into the following |
641 | // sequences |
642 | if (EltTp.getScalarSizeInBits() < ST->getELen()) { |
643 | // Example sequence: |
644 | // vsetivli zero, 4, e8, mf4, ta, ma (ignored) |
645 | // vwaddu.vv v10, v8, v9 |
646 | // li a0, -1 (ignored) |
647 | // vwmaccu.vx v10, a0, v9 |
648 | if (ShuffleVectorInst::isInterleaveMask(Mask, Factor: 2, NumInputElts: Mask.size())) |
649 | return 2 * LT.first * TLI->getLMULCost(VT: LT.second); |
650 | |
651 | if (Mask[0] == 0 || Mask[0] == 1) { |
652 | auto DeinterleaveMask = createStrideMask(Start: Mask[0], Stride: 2, VF: Mask.size()); |
653 | // Example sequence: |
654 | // vnsrl.wi v10, v8, 0 |
655 | if (equal(LRange&: DeinterleaveMask, RRange&: Mask)) |
656 | return LT.first * getRISCVInstructionCost(OpCodes: RISCV::VNSRL_WI, |
657 | VT: LT.second, CostKind); |
658 | } |
659 | } |
660 | int SubVectorSize; |
661 | if (LT.second.getScalarSizeInBits() != 1 && |
662 | isRepeatedConcatMask(Mask, SubVectorSize)) { |
663 | InstructionCost Cost = 0; |
664 | unsigned NumSlides = Log2_32(Value: Mask.size() / SubVectorSize); |
665 | // The cost of extraction from a subvector is 0 if the index is 0. |
666 | for (unsigned I = 0; I != NumSlides; ++I) { |
667 | unsigned InsertIndex = SubVectorSize * (1 << I); |
668 | FixedVectorType *SubTp = |
669 | FixedVectorType::get(ElementType: SrcTy->getElementType(), NumElts: InsertIndex); |
670 | FixedVectorType *DestTp = |
671 | FixedVectorType::getDoubleElementsVectorType(VTy: SubTp); |
672 | std::pair<InstructionCost, MVT> DestLT = |
673 | getTypeLegalizationCost(Ty: DestTp); |
674 | // Add the cost of whole vector register move because the |
675 | // destination vector register group for vslideup cannot overlap the |
676 | // source. |
677 | Cost += DestLT.first * TLI->getLMULCost(VT: DestLT.second); |
678 | Cost += getShuffleCost(Kind: TTI::SK_InsertSubvector, DstTy: DestTp, SrcTy: DestTp, Mask: {}, |
679 | CostKind, Index: InsertIndex, SubTp); |
680 | } |
681 | return Cost; |
682 | } |
683 | } |
684 | |
685 | if (InstructionCost SlideCost = getSlideCost(Tp: FVTp, Mask, CostKind); |
686 | SlideCost.isValid()) |
687 | return SlideCost; |
688 | |
689 | // vrgather + cost of generating the mask constant. |
690 | // We model this for an unknown mask with a single vrgather. |
691 | if (LT.first == 1 && (LT.second.getScalarSizeInBits() != 8 || |
692 | LT.second.getVectorNumElements() <= 256)) { |
693 | VectorType *IdxTy = |
694 | getVRGatherIndexType(DataVT: LT.second, ST: *ST, C&: SrcTy->getContext()); |
695 | InstructionCost IndexCost = getConstantPoolLoadCost(Ty: IdxTy, CostKind); |
696 | return IndexCost + |
697 | getRISCVInstructionCost(OpCodes: RISCV::VRGATHER_VV, VT: LT.second, CostKind); |
698 | } |
699 | break; |
700 | } |
701 | case TTI::SK_Transpose: |
702 | case TTI::SK_PermuteTwoSrc: { |
703 | |
704 | if (InstructionCost SlideCost = getSlideCost(Tp: FVTp, Mask, CostKind); |
705 | SlideCost.isValid()) |
706 | return SlideCost; |
707 | |
708 | // 2 x (vrgather + cost of generating the mask constant) + cost of mask |
709 | // register for the second vrgather. We model this for an unknown |
710 | // (shuffle) mask. |
711 | if (LT.first == 1 && (LT.second.getScalarSizeInBits() != 8 || |
712 | LT.second.getVectorNumElements() <= 256)) { |
713 | auto &C = SrcTy->getContext(); |
714 | auto EC = SrcTy->getElementCount(); |
715 | VectorType *IdxTy = getVRGatherIndexType(DataVT: LT.second, ST: *ST, C); |
716 | VectorType *MaskTy = VectorType::get(ElementType: IntegerType::getInt1Ty(C), EC); |
717 | InstructionCost IndexCost = getConstantPoolLoadCost(Ty: IdxTy, CostKind); |
718 | InstructionCost MaskCost = getConstantPoolLoadCost(Ty: MaskTy, CostKind); |
719 | return 2 * IndexCost + |
720 | getRISCVInstructionCost(OpCodes: {RISCV::VRGATHER_VV, RISCV::VRGATHER_VV}, |
721 | VT: LT.second, CostKind) + |
722 | MaskCost; |
723 | } |
724 | break; |
725 | } |
726 | } |
727 | |
728 | auto shouldSplit = [](TTI::ShuffleKind Kind) { |
729 | switch (Kind) { |
730 | default: |
731 | return false; |
732 | case TTI::SK_PermuteSingleSrc: |
733 | case TTI::SK_Transpose: |
734 | case TTI::SK_PermuteTwoSrc: |
735 | return true; |
736 | } |
737 | }; |
738 | |
739 | if (!Mask.empty() && LT.first.isValid() && LT.first != 1 && |
740 | shouldSplit(Kind)) { |
741 | InstructionCost SplitCost = |
742 | costShuffleViaSplitting(TTI: *this, LegalVT: LT.second, Tp: FVTp, Mask, CostKind); |
743 | if (SplitCost.isValid()) |
744 | return SplitCost; |
745 | } |
746 | } |
747 | |
748 | // Handle scalable vectors (and fixed vectors legalized to scalable vectors). |
749 | switch (Kind) { |
750 | default: |
751 | // Fallthrough to generic handling. |
752 | // TODO: Most of these cases will return getInvalid in generic code, and |
753 | // must be implemented here. |
754 | break; |
755 | case TTI::SK_ExtractSubvector: |
756 | // Extract at zero is always a subregister extract |
757 | if (Index == 0) |
758 | return TTI::TCC_Free; |
759 | |
760 | // If we're extracting a subvector of at most m1 size at a sub-register |
761 | // boundary - which unfortunately we need exact vlen to identify - this is |
762 | // a subregister extract at worst and thus won't require a vslidedown. |
763 | // TODO: Extend for aligned m2, m4 subvector extracts |
764 | // TODO: Extend for misalgined (but contained) extracts |
765 | // TODO: Extend for scalable subvector types |
766 | if (std::pair<InstructionCost, MVT> SubLT = getTypeLegalizationCost(Ty: SubTp); |
767 | SubLT.second.isValid() && SubLT.second.isFixedLengthVector()) { |
768 | if (std::optional<unsigned> VLen = ST->getRealVLen(); |
769 | VLen && SubLT.second.getScalarSizeInBits() * Index % *VLen == 0 && |
770 | SubLT.second.getSizeInBits() <= *VLen) |
771 | return TTI::TCC_Free; |
772 | } |
773 | |
774 | // Example sequence: |
775 | // vsetivli zero, 4, e8, mf2, tu, ma (ignored) |
776 | // vslidedown.vi v8, v9, 2 |
777 | return LT.first * |
778 | getRISCVInstructionCost(OpCodes: RISCV::VSLIDEDOWN_VI, VT: LT.second, CostKind); |
779 | case TTI::SK_InsertSubvector: |
780 | // Example sequence: |
781 | // vsetivli zero, 4, e8, mf2, tu, ma (ignored) |
782 | // vslideup.vi v8, v9, 2 |
783 | LT = getTypeLegalizationCost(Ty: DstTy); |
784 | return LT.first * |
785 | getRISCVInstructionCost(OpCodes: RISCV::VSLIDEUP_VI, VT: LT.second, CostKind); |
786 | case TTI::SK_Select: { |
787 | // Example sequence: |
788 | // li a0, 90 |
789 | // vsetivli zero, 8, e8, mf2, ta, ma (ignored) |
790 | // vmv.s.x v0, a0 |
791 | // vmerge.vvm v8, v9, v8, v0 |
792 | // We use 2 for the cost of the mask materialization as this is the true |
793 | // cost for small masks and most shuffles are small. At worst, this cost |
794 | // should be a very small constant for the constant pool load. As such, |
795 | // we may bias towards large selects slightly more than truly warranted. |
796 | return LT.first * |
797 | (1 + getRISCVInstructionCost(OpCodes: {RISCV::VMV_S_X, RISCV::VMERGE_VVM}, |
798 | VT: LT.second, CostKind)); |
799 | } |
800 | case TTI::SK_Broadcast: { |
801 | bool HasScalar = (Args.size() > 0) && (Operator::getOpcode(V: Args[0]) == |
802 | Instruction::InsertElement); |
803 | if (LT.second.getScalarSizeInBits() == 1) { |
804 | if (HasScalar) { |
805 | // Example sequence: |
806 | // andi a0, a0, 1 |
807 | // vsetivli zero, 2, e8, mf8, ta, ma (ignored) |
808 | // vmv.v.x v8, a0 |
809 | // vmsne.vi v0, v8, 0 |
810 | return LT.first * |
811 | (1 + getRISCVInstructionCost(OpCodes: {RISCV::VMV_V_X, RISCV::VMSNE_VI}, |
812 | VT: LT.second, CostKind)); |
813 | } |
814 | // Example sequence: |
815 | // vsetivli zero, 2, e8, mf8, ta, mu (ignored) |
816 | // vmv.v.i v8, 0 |
817 | // vmerge.vim v8, v8, 1, v0 |
818 | // vmv.x.s a0, v8 |
819 | // andi a0, a0, 1 |
820 | // vmv.v.x v8, a0 |
821 | // vmsne.vi v0, v8, 0 |
822 | |
823 | return LT.first * |
824 | (1 + getRISCVInstructionCost(OpCodes: {RISCV::VMV_V_I, RISCV::VMERGE_VIM, |
825 | RISCV::VMV_X_S, RISCV::VMV_V_X, |
826 | RISCV::VMSNE_VI}, |
827 | VT: LT.second, CostKind)); |
828 | } |
829 | |
830 | if (HasScalar) { |
831 | // Example sequence: |
832 | // vmv.v.x v8, a0 |
833 | return LT.first * |
834 | getRISCVInstructionCost(OpCodes: RISCV::VMV_V_X, VT: LT.second, CostKind); |
835 | } |
836 | |
837 | // Example sequence: |
838 | // vrgather.vi v9, v8, 0 |
839 | return LT.first * |
840 | getRISCVInstructionCost(OpCodes: RISCV::VRGATHER_VI, VT: LT.second, CostKind); |
841 | } |
842 | case TTI::SK_Splice: { |
843 | // vslidedown+vslideup. |
844 | // TODO: Multiplying by LT.first implies this legalizes into multiple copies |
845 | // of similar code, but I think we expand through memory. |
846 | unsigned Opcodes[2] = {RISCV::VSLIDEDOWN_VX, RISCV::VSLIDEUP_VX}; |
847 | if (Index >= 0 && Index < 32) |
848 | Opcodes[0] = RISCV::VSLIDEDOWN_VI; |
849 | else if (Index < 0 && Index > -32) |
850 | Opcodes[1] = RISCV::VSLIDEUP_VI; |
851 | return LT.first * getRISCVInstructionCost(OpCodes: Opcodes, VT: LT.second, CostKind); |
852 | } |
853 | case TTI::SK_Reverse: { |
854 | |
855 | if (!LT.second.isVector()) |
856 | return InstructionCost::getInvalid(); |
857 | |
858 | // TODO: Cases to improve here: |
859 | // * Illegal vector types |
860 | // * i64 on RV32 |
861 | if (SrcTy->getElementType()->isIntegerTy(Bitwidth: 1)) { |
862 | VectorType *WideTy = |
863 | VectorType::get(ElementType: IntegerType::get(C&: SrcTy->getContext(), NumBits: 8), |
864 | EC: cast<VectorType>(Val: SrcTy)->getElementCount()); |
865 | return getCastInstrCost(Opcode: Instruction::ZExt, Dst: WideTy, Src: SrcTy, |
866 | CCH: TTI::CastContextHint::None, CostKind) + |
867 | getShuffleCost(Kind: TTI::SK_Reverse, DstTy: WideTy, SrcTy: WideTy, Mask: {}, CostKind, Index: 0, |
868 | SubTp: nullptr) + |
869 | getCastInstrCost(Opcode: Instruction::Trunc, Dst: SrcTy, Src: WideTy, |
870 | CCH: TTI::CastContextHint::None, CostKind); |
871 | } |
872 | |
873 | MVT ContainerVT = LT.second; |
874 | if (LT.second.isFixedLengthVector()) |
875 | ContainerVT = TLI->getContainerForFixedLengthVector(VT: LT.second); |
876 | MVT M1VT = RISCVTargetLowering::getM1VT(VT: ContainerVT); |
877 | if (ContainerVT.bitsLE(VT: M1VT)) { |
878 | // Example sequence: |
879 | // csrr a0, vlenb |
880 | // srli a0, a0, 3 |
881 | // addi a0, a0, -1 |
882 | // vsetvli a1, zero, e8, mf8, ta, mu (ignored) |
883 | // vid.v v9 |
884 | // vrsub.vx v10, v9, a0 |
885 | // vrgather.vv v9, v8, v10 |
886 | InstructionCost LenCost = 3; |
887 | if (LT.second.isFixedLengthVector()) |
888 | // vrsub.vi has a 5 bit immediate field, otherwise an li suffices |
889 | LenCost = isInt<5>(x: LT.second.getVectorNumElements() - 1) ? 0 : 1; |
890 | unsigned Opcodes[] = {RISCV::VID_V, RISCV::VRSUB_VX, RISCV::VRGATHER_VV}; |
891 | if (LT.second.isFixedLengthVector() && |
892 | isInt<5>(x: LT.second.getVectorNumElements() - 1)) |
893 | Opcodes[1] = RISCV::VRSUB_VI; |
894 | InstructionCost GatherCost = |
895 | getRISCVInstructionCost(OpCodes: Opcodes, VT: LT.second, CostKind); |
896 | return LT.first * (LenCost + GatherCost); |
897 | } |
898 | |
899 | // At high LMUL, we split into a series of M1 reverses (see |
900 | // lowerVECTOR_REVERSE) and then do a single slide at the end to eliminate |
901 | // the resulting gap at the bottom (for fixed vectors only). The important |
902 | // bit is that the cost scales linearly, not quadratically with LMUL. |
903 | unsigned M1Opcodes[] = {RISCV::VID_V, RISCV::VRSUB_VX}; |
904 | InstructionCost FixedCost = |
905 | getRISCVInstructionCost(OpCodes: M1Opcodes, VT: M1VT, CostKind) + 3; |
906 | unsigned Ratio = |
907 | ContainerVT.getVectorMinNumElements() / M1VT.getVectorMinNumElements(); |
908 | InstructionCost GatherCost = |
909 | getRISCVInstructionCost(OpCodes: {RISCV::VRGATHER_VV}, VT: M1VT, CostKind) * Ratio; |
910 | InstructionCost SlideCost = !LT.second.isFixedLengthVector() ? 0 : |
911 | getRISCVInstructionCost(OpCodes: {RISCV::VSLIDEDOWN_VX}, VT: LT.second, CostKind); |
912 | return FixedCost + LT.first * (GatherCost + SlideCost); |
913 | } |
914 | } |
915 | return BaseT::getShuffleCost(Kind, DstTy, SrcTy, Mask, CostKind, Index, |
916 | SubTp); |
917 | } |
918 | |
919 | static unsigned isM1OrSmaller(MVT VT) { |
920 | RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); |
921 | return (LMUL == RISCVVType::VLMUL::LMUL_F8 || |
922 | LMUL == RISCVVType::VLMUL::LMUL_F4 || |
923 | LMUL == RISCVVType::VLMUL::LMUL_F2 || |
924 | LMUL == RISCVVType::VLMUL::LMUL_1); |
925 | } |
926 | |
927 | InstructionCost RISCVTTIImpl::getScalarizationOverhead( |
928 | VectorType *Ty, const APInt &DemandedElts, bool Insert, bool , |
929 | TTI::TargetCostKind CostKind, bool ForPoisonSrc, |
930 | ArrayRef<Value *> VL) const { |
931 | if (isa<ScalableVectorType>(Val: Ty)) |
932 | return InstructionCost::getInvalid(); |
933 | |
934 | // A build_vector (which is m1 sized or smaller) can be done in no |
935 | // worse than one vslide1down.vx per element in the type. We could |
936 | // in theory do an explode_vector in the inverse manner, but our |
937 | // lowering today does not have a first class node for this pattern. |
938 | InstructionCost Cost = BaseT::getScalarizationOverhead( |
939 | InTy: Ty, DemandedElts, Insert, Extract, CostKind); |
940 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty); |
941 | if (Insert && !Extract && LT.first.isValid() && LT.second.isVector()) { |
942 | if (Ty->getScalarSizeInBits() == 1) { |
943 | auto *WideVecTy = cast<VectorType>(Val: Ty->getWithNewBitWidth(NewBitWidth: 8)); |
944 | // Note: Implicit scalar anyextend is assumed to be free since the i1 |
945 | // must be stored in a GPR. |
946 | return getScalarizationOverhead(Ty: WideVecTy, DemandedElts, Insert, Extract, |
947 | CostKind) + |
948 | getCastInstrCost(Opcode: Instruction::Trunc, Dst: Ty, Src: WideVecTy, |
949 | CCH: TTI::CastContextHint::None, CostKind, I: nullptr); |
950 | } |
951 | |
952 | assert(LT.second.isFixedLengthVector()); |
953 | MVT ContainerVT = TLI->getContainerForFixedLengthVector(VT: LT.second); |
954 | if (isM1OrSmaller(VT: ContainerVT)) { |
955 | InstructionCost BV = |
956 | cast<FixedVectorType>(Val: Ty)->getNumElements() * |
957 | getRISCVInstructionCost(OpCodes: RISCV::VSLIDE1DOWN_VX, VT: LT.second, CostKind); |
958 | if (BV < Cost) |
959 | Cost = BV; |
960 | } |
961 | } |
962 | return Cost; |
963 | } |
964 | |
965 | InstructionCost |
966 | RISCVTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, |
967 | unsigned AddressSpace, |
968 | TTI::TargetCostKind CostKind) const { |
969 | if (!isLegalMaskedLoadStore(DataType: Src, Alignment) || |
970 | CostKind != TTI::TCK_RecipThroughput) |
971 | return BaseT::getMaskedMemoryOpCost(Opcode, DataTy: Src, Alignment, AddressSpace, |
972 | CostKind); |
973 | |
974 | return getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind); |
975 | } |
976 | |
977 | InstructionCost RISCVTTIImpl::getInterleavedMemoryOpCost( |
978 | unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, |
979 | Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, |
980 | bool UseMaskForCond, bool UseMaskForGaps) const { |
981 | |
982 | // The interleaved memory access pass will lower interleaved memory ops (i.e |
983 | // a load and store followed by a specific shuffle) to vlseg/vsseg |
984 | // intrinsics. |
985 | if (!UseMaskForCond && !UseMaskForGaps && |
986 | Factor <= TLI->getMaxSupportedInterleaveFactor()) { |
987 | auto *VTy = cast<VectorType>(Val: VecTy); |
988 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: VTy); |
989 | // Need to make sure type has't been scalarized |
990 | if (LT.second.isVector()) { |
991 | auto *SubVecTy = |
992 | VectorType::get(ElementType: VTy->getElementType(), |
993 | EC: VTy->getElementCount().divideCoefficientBy(RHS: Factor)); |
994 | if (VTy->getElementCount().isKnownMultipleOf(RHS: Factor) && |
995 | TLI->isLegalInterleavedAccessType(VTy: SubVecTy, Factor, Alignment, |
996 | AddrSpace: AddressSpace, DL)) { |
997 | |
998 | // Some processors optimize segment loads/stores as one wide memory op + |
999 | // Factor * LMUL shuffle ops. |
1000 | if (ST->hasOptimizedSegmentLoadStore(NF: Factor)) { |
1001 | InstructionCost Cost = |
1002 | getMemoryOpCost(Opcode, Src: VTy, Alignment, AddressSpace, CostKind); |
1003 | MVT SubVecVT = getTLI()->getValueType(DL, Ty: SubVecTy).getSimpleVT(); |
1004 | Cost += Factor * TLI->getLMULCost(VT: SubVecVT); |
1005 | return LT.first * Cost; |
1006 | } |
1007 | |
1008 | // Otherwise, the cost is proportional to the number of elements (VL * |
1009 | // Factor ops). |
1010 | InstructionCost MemOpCost = |
1011 | getMemoryOpCost(Opcode, Src: VTy->getElementType(), Alignment, AddressSpace: 0, |
1012 | CostKind, OpdInfo: {.Kind: TTI::OK_AnyValue, .Properties: TTI::OP_None}); |
1013 | unsigned NumLoads = getEstimatedVLFor(Ty: VTy); |
1014 | return NumLoads * MemOpCost; |
1015 | } |
1016 | } |
1017 | } |
1018 | |
1019 | // TODO: Return the cost of interleaved accesses for scalable vector when |
1020 | // unable to convert to segment accesses instructions. |
1021 | if (isa<ScalableVectorType>(Val: VecTy)) |
1022 | return InstructionCost::getInvalid(); |
1023 | |
1024 | auto *FVTy = cast<FixedVectorType>(Val: VecTy); |
1025 | InstructionCost MemCost = |
1026 | getMemoryOpCost(Opcode, Src: VecTy, Alignment, AddressSpace, CostKind); |
1027 | unsigned VF = FVTy->getNumElements() / Factor; |
1028 | |
1029 | // An interleaved load will look like this for Factor=3: |
1030 | // %wide.vec = load <12 x i32>, ptr %3, align 4 |
1031 | // %strided.vec = shufflevector %wide.vec, poison, <4 x i32> <stride mask> |
1032 | // %strided.vec1 = shufflevector %wide.vec, poison, <4 x i32> <stride mask> |
1033 | // %strided.vec2 = shufflevector %wide.vec, poison, <4 x i32> <stride mask> |
1034 | if (Opcode == Instruction::Load) { |
1035 | InstructionCost Cost = MemCost; |
1036 | for (unsigned Index : Indices) { |
1037 | FixedVectorType *VecTy = |
1038 | FixedVectorType::get(ElementType: FVTy->getElementType(), NumElts: VF * Factor); |
1039 | auto Mask = createStrideMask(Start: Index, Stride: Factor, VF); |
1040 | Mask.resize(N: VF * Factor, NV: -1); |
1041 | InstructionCost ShuffleCost = |
1042 | getShuffleCost(Kind: TTI::ShuffleKind::SK_PermuteSingleSrc, DstTy: VecTy, SrcTy: VecTy, |
1043 | Mask, CostKind, Index: 0, SubTp: nullptr, Args: {}); |
1044 | Cost += ShuffleCost; |
1045 | } |
1046 | return Cost; |
1047 | } |
1048 | |
1049 | // TODO: Model for NF > 2 |
1050 | // We'll need to enhance getShuffleCost to model shuffles that are just |
1051 | // inserts and extracts into subvectors, since they won't have the full cost |
1052 | // of a vrgather. |
1053 | // An interleaved store for 3 vectors of 4 lanes will look like |
1054 | // %11 = shufflevector <4 x i32> %4, <4 x i32> %6, <8 x i32> <0...7> |
1055 | // %12 = shufflevector <4 x i32> %9, <4 x i32> poison, <8 x i32> <0...3> |
1056 | // %13 = shufflevector <8 x i32> %11, <8 x i32> %12, <12 x i32> <0...11> |
1057 | // %interleaved.vec = shufflevector %13, poison, <12 x i32> <interleave mask> |
1058 | // store <12 x i32> %interleaved.vec, ptr %10, align 4 |
1059 | if (Factor != 2) |
1060 | return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, |
1061 | Alignment, AddressSpace, CostKind, |
1062 | UseMaskForCond, UseMaskForGaps); |
1063 | |
1064 | assert(Opcode == Instruction::Store && "Opcode must be a store" ); |
1065 | // For an interleaving store of 2 vectors, we perform one large interleaving |
1066 | // shuffle that goes into the wide store |
1067 | auto Mask = createInterleaveMask(VF, NumVecs: Factor); |
1068 | InstructionCost ShuffleCost = |
1069 | getShuffleCost(Kind: TTI::ShuffleKind::SK_PermuteSingleSrc, DstTy: FVTy, SrcTy: FVTy, Mask, |
1070 | CostKind, Index: 0, SubTp: nullptr, Args: {}); |
1071 | return MemCost + ShuffleCost; |
1072 | } |
1073 | |
1074 | InstructionCost RISCVTTIImpl::getGatherScatterOpCost( |
1075 | unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, |
1076 | Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const { |
1077 | if (CostKind != TTI::TCK_RecipThroughput) |
1078 | return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, |
1079 | Alignment, CostKind, I); |
1080 | |
1081 | if ((Opcode == Instruction::Load && |
1082 | !isLegalMaskedGather(DataType: DataTy, Alignment: Align(Alignment))) || |
1083 | (Opcode == Instruction::Store && |
1084 | !isLegalMaskedScatter(DataType: DataTy, Alignment: Align(Alignment)))) |
1085 | return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, |
1086 | Alignment, CostKind, I); |
1087 | |
1088 | // Cost is proportional to the number of memory operations implied. For |
1089 | // scalable vectors, we use an estimate on that number since we don't |
1090 | // know exactly what VL will be. |
1091 | auto &VTy = *cast<VectorType>(Val: DataTy); |
1092 | InstructionCost MemOpCost = |
1093 | getMemoryOpCost(Opcode, Src: VTy.getElementType(), Alignment, AddressSpace: 0, CostKind, |
1094 | OpdInfo: {.Kind: TTI::OK_AnyValue, .Properties: TTI::OP_None}, I); |
1095 | unsigned NumLoads = getEstimatedVLFor(Ty: &VTy); |
1096 | return NumLoads * MemOpCost; |
1097 | } |
1098 | |
1099 | InstructionCost RISCVTTIImpl::getExpandCompressMemoryOpCost( |
1100 | unsigned Opcode, Type *DataTy, bool VariableMask, Align Alignment, |
1101 | TTI::TargetCostKind CostKind, const Instruction *I) const { |
1102 | bool IsLegal = (Opcode == Instruction::Store && |
1103 | isLegalMaskedCompressStore(DataTy, Alignment)) || |
1104 | (Opcode == Instruction::Load && |
1105 | isLegalMaskedExpandLoad(DataType: DataTy, Alignment)); |
1106 | if (!IsLegal || CostKind != TTI::TCK_RecipThroughput) |
1107 | return BaseT::getExpandCompressMemoryOpCost(Opcode, DataTy, VariableMask, |
1108 | Alignment, CostKind, I); |
1109 | // Example compressstore sequence: |
1110 | // vsetivli zero, 8, e32, m2, ta, ma (ignored) |
1111 | // vcompress.vm v10, v8, v0 |
1112 | // vcpop.m a1, v0 |
1113 | // vsetvli zero, a1, e32, m2, ta, ma |
1114 | // vse32.v v10, (a0) |
1115 | // Example expandload sequence: |
1116 | // vsetivli zero, 8, e8, mf2, ta, ma (ignored) |
1117 | // vcpop.m a1, v0 |
1118 | // vsetvli zero, a1, e32, m2, ta, ma |
1119 | // vle32.v v10, (a0) |
1120 | // vsetivli zero, 8, e32, m2, ta, ma |
1121 | // viota.m v12, v0 |
1122 | // vrgather.vv v8, v10, v12, v0.t |
1123 | auto MemOpCost = |
1124 | getMemoryOpCost(Opcode, Src: DataTy, Alignment, /*AddressSpace*/ 0, CostKind); |
1125 | auto LT = getTypeLegalizationCost(Ty: DataTy); |
1126 | SmallVector<unsigned, 4> Opcodes{RISCV::VSETVLI}; |
1127 | if (VariableMask) |
1128 | Opcodes.push_back(Elt: RISCV::VCPOP_M); |
1129 | if (Opcode == Instruction::Store) |
1130 | Opcodes.append(IL: {RISCV::VCOMPRESS_VM}); |
1131 | else |
1132 | Opcodes.append(IL: {RISCV::VSETIVLI, RISCV::VIOTA_M, RISCV::VRGATHER_VV}); |
1133 | return MemOpCost + |
1134 | LT.first * getRISCVInstructionCost(OpCodes: Opcodes, VT: LT.second, CostKind); |
1135 | } |
1136 | |
1137 | InstructionCost RISCVTTIImpl::getStridedMemoryOpCost( |
1138 | unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, |
1139 | Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const { |
1140 | if (((Opcode == Instruction::Load || Opcode == Instruction::Store) && |
1141 | !isLegalStridedLoadStore(DataType: DataTy, Alignment)) || |
1142 | (Opcode != Instruction::Load && Opcode != Instruction::Store)) |
1143 | return BaseT::getStridedMemoryOpCost(Opcode, DataTy, Ptr, VariableMask, |
1144 | Alignment, CostKind, I); |
1145 | |
1146 | if (CostKind == TTI::TCK_CodeSize) |
1147 | return TTI::TCC_Basic; |
1148 | |
1149 | // Cost is proportional to the number of memory operations implied. For |
1150 | // scalable vectors, we use an estimate on that number since we don't |
1151 | // know exactly what VL will be. |
1152 | auto &VTy = *cast<VectorType>(Val: DataTy); |
1153 | InstructionCost MemOpCost = |
1154 | getMemoryOpCost(Opcode, Src: VTy.getElementType(), Alignment, AddressSpace: 0, CostKind, |
1155 | OpdInfo: {.Kind: TTI::OK_AnyValue, .Properties: TTI::OP_None}, I); |
1156 | unsigned NumLoads = getEstimatedVLFor(Ty: &VTy); |
1157 | return NumLoads * MemOpCost; |
1158 | } |
1159 | |
1160 | InstructionCost |
1161 | RISCVTTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const { |
1162 | // FIXME: This is a property of the default vector convention, not |
1163 | // all possible calling conventions. Fixing that will require |
1164 | // some TTI API and SLP rework. |
1165 | InstructionCost Cost = 0; |
1166 | TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; |
1167 | for (auto *Ty : Tys) { |
1168 | if (!Ty->isVectorTy()) |
1169 | continue; |
1170 | Align A = DL.getPrefTypeAlign(Ty); |
1171 | Cost += getMemoryOpCost(Opcode: Instruction::Store, Src: Ty, Alignment: A, AddressSpace: 0, CostKind) + |
1172 | getMemoryOpCost(Opcode: Instruction::Load, Src: Ty, Alignment: A, AddressSpace: 0, CostKind); |
1173 | } |
1174 | return Cost; |
1175 | } |
1176 | |
1177 | // Currently, these represent both throughput and codesize costs |
1178 | // for the respective intrinsics. The costs in this table are simply |
1179 | // instruction counts with the following adjustments made: |
1180 | // * One vsetvli is considered free. |
1181 | static const CostTblEntry VectorIntrinsicCostTable[]{ |
1182 | {.ISD: Intrinsic::floor, .Type: MVT::f32, .Cost: 9}, |
1183 | {.ISD: Intrinsic::floor, .Type: MVT::f64, .Cost: 9}, |
1184 | {.ISD: Intrinsic::ceil, .Type: MVT::f32, .Cost: 9}, |
1185 | {.ISD: Intrinsic::ceil, .Type: MVT::f64, .Cost: 9}, |
1186 | {.ISD: Intrinsic::trunc, .Type: MVT::f32, .Cost: 7}, |
1187 | {.ISD: Intrinsic::trunc, .Type: MVT::f64, .Cost: 7}, |
1188 | {.ISD: Intrinsic::round, .Type: MVT::f32, .Cost: 9}, |
1189 | {.ISD: Intrinsic::round, .Type: MVT::f64, .Cost: 9}, |
1190 | {.ISD: Intrinsic::roundeven, .Type: MVT::f32, .Cost: 9}, |
1191 | {.ISD: Intrinsic::roundeven, .Type: MVT::f64, .Cost: 9}, |
1192 | {.ISD: Intrinsic::rint, .Type: MVT::f32, .Cost: 7}, |
1193 | {.ISD: Intrinsic::rint, .Type: MVT::f64, .Cost: 7}, |
1194 | {.ISD: Intrinsic::lrint, .Type: MVT::i32, .Cost: 1}, |
1195 | {.ISD: Intrinsic::lrint, .Type: MVT::i64, .Cost: 1}, |
1196 | {.ISD: Intrinsic::llrint, .Type: MVT::i64, .Cost: 1}, |
1197 | {.ISD: Intrinsic::nearbyint, .Type: MVT::f32, .Cost: 9}, |
1198 | {.ISD: Intrinsic::nearbyint, .Type: MVT::f64, .Cost: 9}, |
1199 | {.ISD: Intrinsic::bswap, .Type: MVT::i16, .Cost: 3}, |
1200 | {.ISD: Intrinsic::bswap, .Type: MVT::i32, .Cost: 12}, |
1201 | {.ISD: Intrinsic::bswap, .Type: MVT::i64, .Cost: 31}, |
1202 | {.ISD: Intrinsic::vp_bswap, .Type: MVT::i16, .Cost: 3}, |
1203 | {.ISD: Intrinsic::vp_bswap, .Type: MVT::i32, .Cost: 12}, |
1204 | {.ISD: Intrinsic::vp_bswap, .Type: MVT::i64, .Cost: 31}, |
1205 | {.ISD: Intrinsic::vp_fshl, .Type: MVT::i8, .Cost: 7}, |
1206 | {.ISD: Intrinsic::vp_fshl, .Type: MVT::i16, .Cost: 7}, |
1207 | {.ISD: Intrinsic::vp_fshl, .Type: MVT::i32, .Cost: 7}, |
1208 | {.ISD: Intrinsic::vp_fshl, .Type: MVT::i64, .Cost: 7}, |
1209 | {.ISD: Intrinsic::vp_fshr, .Type: MVT::i8, .Cost: 7}, |
1210 | {.ISD: Intrinsic::vp_fshr, .Type: MVT::i16, .Cost: 7}, |
1211 | {.ISD: Intrinsic::vp_fshr, .Type: MVT::i32, .Cost: 7}, |
1212 | {.ISD: Intrinsic::vp_fshr, .Type: MVT::i64, .Cost: 7}, |
1213 | {.ISD: Intrinsic::bitreverse, .Type: MVT::i8, .Cost: 17}, |
1214 | {.ISD: Intrinsic::bitreverse, .Type: MVT::i16, .Cost: 24}, |
1215 | {.ISD: Intrinsic::bitreverse, .Type: MVT::i32, .Cost: 33}, |
1216 | {.ISD: Intrinsic::bitreverse, .Type: MVT::i64, .Cost: 52}, |
1217 | {.ISD: Intrinsic::vp_bitreverse, .Type: MVT::i8, .Cost: 17}, |
1218 | {.ISD: Intrinsic::vp_bitreverse, .Type: MVT::i16, .Cost: 24}, |
1219 | {.ISD: Intrinsic::vp_bitreverse, .Type: MVT::i32, .Cost: 33}, |
1220 | {.ISD: Intrinsic::vp_bitreverse, .Type: MVT::i64, .Cost: 52}, |
1221 | {.ISD: Intrinsic::ctpop, .Type: MVT::i8, .Cost: 12}, |
1222 | {.ISD: Intrinsic::ctpop, .Type: MVT::i16, .Cost: 19}, |
1223 | {.ISD: Intrinsic::ctpop, .Type: MVT::i32, .Cost: 20}, |
1224 | {.ISD: Intrinsic::ctpop, .Type: MVT::i64, .Cost: 21}, |
1225 | {.ISD: Intrinsic::ctlz, .Type: MVT::i8, .Cost: 19}, |
1226 | {.ISD: Intrinsic::ctlz, .Type: MVT::i16, .Cost: 28}, |
1227 | {.ISD: Intrinsic::ctlz, .Type: MVT::i32, .Cost: 31}, |
1228 | {.ISD: Intrinsic::ctlz, .Type: MVT::i64, .Cost: 35}, |
1229 | {.ISD: Intrinsic::cttz, .Type: MVT::i8, .Cost: 16}, |
1230 | {.ISD: Intrinsic::cttz, .Type: MVT::i16, .Cost: 23}, |
1231 | {.ISD: Intrinsic::cttz, .Type: MVT::i32, .Cost: 24}, |
1232 | {.ISD: Intrinsic::cttz, .Type: MVT::i64, .Cost: 25}, |
1233 | {.ISD: Intrinsic::vp_ctpop, .Type: MVT::i8, .Cost: 12}, |
1234 | {.ISD: Intrinsic::vp_ctpop, .Type: MVT::i16, .Cost: 19}, |
1235 | {.ISD: Intrinsic::vp_ctpop, .Type: MVT::i32, .Cost: 20}, |
1236 | {.ISD: Intrinsic::vp_ctpop, .Type: MVT::i64, .Cost: 21}, |
1237 | {.ISD: Intrinsic::vp_ctlz, .Type: MVT::i8, .Cost: 19}, |
1238 | {.ISD: Intrinsic::vp_ctlz, .Type: MVT::i16, .Cost: 28}, |
1239 | {.ISD: Intrinsic::vp_ctlz, .Type: MVT::i32, .Cost: 31}, |
1240 | {.ISD: Intrinsic::vp_ctlz, .Type: MVT::i64, .Cost: 35}, |
1241 | {.ISD: Intrinsic::vp_cttz, .Type: MVT::i8, .Cost: 16}, |
1242 | {.ISD: Intrinsic::vp_cttz, .Type: MVT::i16, .Cost: 23}, |
1243 | {.ISD: Intrinsic::vp_cttz, .Type: MVT::i32, .Cost: 24}, |
1244 | {.ISD: Intrinsic::vp_cttz, .Type: MVT::i64, .Cost: 25}, |
1245 | }; |
1246 | |
1247 | static unsigned getISDForVPIntrinsicID(Intrinsic::ID ID) { |
1248 | switch (ID) { |
1249 | #define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \ |
1250 | case Intrinsic::VPID: \ |
1251 | return ISD::VPSD; |
1252 | #include "llvm/IR/VPIntrinsics.def" |
1253 | #undef HELPER_MAP_VPID_TO_VPSD |
1254 | } |
1255 | return ISD::DELETED_NODE; |
1256 | } |
1257 | |
1258 | InstructionCost |
1259 | RISCVTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, |
1260 | TTI::TargetCostKind CostKind) const { |
1261 | auto *RetTy = ICA.getReturnType(); |
1262 | switch (ICA.getID()) { |
1263 | case Intrinsic::lrint: |
1264 | case Intrinsic::llrint: |
1265 | // We can't currently lower half or bfloat vector lrint/llrint. |
1266 | if (auto *VecTy = dyn_cast<VectorType>(Val: ICA.getArgTypes()[0]); |
1267 | VecTy && VecTy->getElementType()->is16bitFPTy()) |
1268 | return InstructionCost::getInvalid(); |
1269 | [[fallthrough]]; |
1270 | case Intrinsic::ceil: |
1271 | case Intrinsic::floor: |
1272 | case Intrinsic::trunc: |
1273 | case Intrinsic::rint: |
1274 | case Intrinsic::round: |
1275 | case Intrinsic::roundeven: { |
1276 | // These all use the same code. |
1277 | auto LT = getTypeLegalizationCost(Ty: RetTy); |
1278 | if (!LT.second.isVector() && TLI->isOperationCustom(Op: ISD::FCEIL, VT: LT.second)) |
1279 | return LT.first * 8; |
1280 | break; |
1281 | } |
1282 | case Intrinsic::umin: |
1283 | case Intrinsic::umax: |
1284 | case Intrinsic::smin: |
1285 | case Intrinsic::smax: { |
1286 | auto LT = getTypeLegalizationCost(Ty: RetTy); |
1287 | if (LT.second.isScalarInteger() && ST->hasStdExtZbb()) |
1288 | return LT.first; |
1289 | |
1290 | if (ST->hasVInstructions() && LT.second.isVector()) { |
1291 | unsigned Op; |
1292 | switch (ICA.getID()) { |
1293 | case Intrinsic::umin: |
1294 | Op = RISCV::VMINU_VV; |
1295 | break; |
1296 | case Intrinsic::umax: |
1297 | Op = RISCV::VMAXU_VV; |
1298 | break; |
1299 | case Intrinsic::smin: |
1300 | Op = RISCV::VMIN_VV; |
1301 | break; |
1302 | case Intrinsic::smax: |
1303 | Op = RISCV::VMAX_VV; |
1304 | break; |
1305 | } |
1306 | return LT.first * getRISCVInstructionCost(OpCodes: Op, VT: LT.second, CostKind); |
1307 | } |
1308 | break; |
1309 | } |
1310 | case Intrinsic::sadd_sat: |
1311 | case Intrinsic::ssub_sat: |
1312 | case Intrinsic::uadd_sat: |
1313 | case Intrinsic::usub_sat: { |
1314 | auto LT = getTypeLegalizationCost(Ty: RetTy); |
1315 | if (ST->hasVInstructions() && LT.second.isVector()) { |
1316 | unsigned Op; |
1317 | switch (ICA.getID()) { |
1318 | case Intrinsic::sadd_sat: |
1319 | Op = RISCV::VSADD_VV; |
1320 | break; |
1321 | case Intrinsic::ssub_sat: |
1322 | Op = RISCV::VSSUBU_VV; |
1323 | break; |
1324 | case Intrinsic::uadd_sat: |
1325 | Op = RISCV::VSADDU_VV; |
1326 | break; |
1327 | case Intrinsic::usub_sat: |
1328 | Op = RISCV::VSSUBU_VV; |
1329 | break; |
1330 | } |
1331 | return LT.first * getRISCVInstructionCost(OpCodes: Op, VT: LT.second, CostKind); |
1332 | } |
1333 | break; |
1334 | } |
1335 | case Intrinsic::fma: |
1336 | case Intrinsic::fmuladd: { |
1337 | // TODO: handle promotion with f16/bf16 with zvfhmin/zvfbfmin |
1338 | auto LT = getTypeLegalizationCost(Ty: RetTy); |
1339 | if (ST->hasVInstructions() && LT.second.isVector()) |
1340 | return LT.first * |
1341 | getRISCVInstructionCost(OpCodes: RISCV::VFMADD_VV, VT: LT.second, CostKind); |
1342 | break; |
1343 | } |
1344 | case Intrinsic::fabs: { |
1345 | auto LT = getTypeLegalizationCost(Ty: RetTy); |
1346 | if (ST->hasVInstructions() && LT.second.isVector()) { |
1347 | // lui a0, 8 |
1348 | // addi a0, a0, -1 |
1349 | // vsetvli a1, zero, e16, m1, ta, ma |
1350 | // vand.vx v8, v8, a0 |
1351 | // f16 with zvfhmin and bf16 with zvfhbmin |
1352 | if (LT.second.getVectorElementType() == MVT::bf16 || |
1353 | (LT.second.getVectorElementType() == MVT::f16 && |
1354 | !ST->hasVInstructionsF16())) |
1355 | return LT.first * getRISCVInstructionCost(OpCodes: RISCV::VAND_VX, VT: LT.second, |
1356 | CostKind) + |
1357 | 2; |
1358 | else |
1359 | return LT.first * |
1360 | getRISCVInstructionCost(OpCodes: RISCV::VFSGNJX_VV, VT: LT.second, CostKind); |
1361 | } |
1362 | break; |
1363 | } |
1364 | case Intrinsic::sqrt: { |
1365 | auto LT = getTypeLegalizationCost(Ty: RetTy); |
1366 | if (ST->hasVInstructions() && LT.second.isVector()) { |
1367 | SmallVector<unsigned, 4> ConvOp; |
1368 | SmallVector<unsigned, 2> FsqrtOp; |
1369 | MVT ConvType = LT.second; |
1370 | MVT FsqrtType = LT.second; |
1371 | // f16 with zvfhmin and bf16 with zvfbfmin and the type of nxv32[b]f16 |
1372 | // will be spilt. |
1373 | if (LT.second.getVectorElementType() == MVT::bf16) { |
1374 | if (LT.second == MVT::nxv32bf16) { |
1375 | ConvOp = {RISCV::VFWCVTBF16_F_F_V, RISCV::VFWCVTBF16_F_F_V, |
1376 | RISCV::VFNCVTBF16_F_F_W, RISCV::VFNCVTBF16_F_F_W}; |
1377 | FsqrtOp = {RISCV::VFSQRT_V, RISCV::VFSQRT_V}; |
1378 | ConvType = MVT::nxv16f16; |
1379 | FsqrtType = MVT::nxv16f32; |
1380 | } else { |
1381 | ConvOp = {RISCV::VFWCVTBF16_F_F_V, RISCV::VFNCVTBF16_F_F_W}; |
1382 | FsqrtOp = {RISCV::VFSQRT_V}; |
1383 | FsqrtType = TLI->getTypeToPromoteTo(Op: ISD::FSQRT, VT: FsqrtType); |
1384 | } |
1385 | } else if (LT.second.getVectorElementType() == MVT::f16 && |
1386 | !ST->hasVInstructionsF16()) { |
1387 | if (LT.second == MVT::nxv32f16) { |
1388 | ConvOp = {RISCV::VFWCVT_F_F_V, RISCV::VFWCVT_F_F_V, |
1389 | RISCV::VFNCVT_F_F_W, RISCV::VFNCVT_F_F_W}; |
1390 | FsqrtOp = {RISCV::VFSQRT_V, RISCV::VFSQRT_V}; |
1391 | ConvType = MVT::nxv16f16; |
1392 | FsqrtType = MVT::nxv16f32; |
1393 | } else { |
1394 | ConvOp = {RISCV::VFWCVT_F_F_V, RISCV::VFNCVT_F_F_W}; |
1395 | FsqrtOp = {RISCV::VFSQRT_V}; |
1396 | FsqrtType = TLI->getTypeToPromoteTo(Op: ISD::FSQRT, VT: FsqrtType); |
1397 | } |
1398 | } else { |
1399 | FsqrtOp = {RISCV::VFSQRT_V}; |
1400 | } |
1401 | |
1402 | return LT.first * (getRISCVInstructionCost(OpCodes: FsqrtOp, VT: FsqrtType, CostKind) + |
1403 | getRISCVInstructionCost(OpCodes: ConvOp, VT: ConvType, CostKind)); |
1404 | } |
1405 | break; |
1406 | } |
1407 | case Intrinsic::cttz: |
1408 | case Intrinsic::ctlz: |
1409 | case Intrinsic::ctpop: { |
1410 | auto LT = getTypeLegalizationCost(Ty: RetTy); |
1411 | if (ST->hasVInstructions() && ST->hasStdExtZvbb() && LT.second.isVector()) { |
1412 | unsigned Op; |
1413 | switch (ICA.getID()) { |
1414 | case Intrinsic::cttz: |
1415 | Op = RISCV::VCTZ_V; |
1416 | break; |
1417 | case Intrinsic::ctlz: |
1418 | Op = RISCV::VCLZ_V; |
1419 | break; |
1420 | case Intrinsic::ctpop: |
1421 | Op = RISCV::VCPOP_V; |
1422 | break; |
1423 | } |
1424 | return LT.first * getRISCVInstructionCost(OpCodes: Op, VT: LT.second, CostKind); |
1425 | } |
1426 | break; |
1427 | } |
1428 | case Intrinsic::abs: { |
1429 | auto LT = getTypeLegalizationCost(Ty: RetTy); |
1430 | if (ST->hasVInstructions() && LT.second.isVector()) { |
1431 | // vrsub.vi v10, v8, 0 |
1432 | // vmax.vv v8, v8, v10 |
1433 | return LT.first * |
1434 | getRISCVInstructionCost(OpCodes: {RISCV::VRSUB_VI, RISCV::VMAX_VV}, |
1435 | VT: LT.second, CostKind); |
1436 | } |
1437 | break; |
1438 | } |
1439 | case Intrinsic::get_active_lane_mask: { |
1440 | if (ST->hasVInstructions()) { |
1441 | Type *ExpRetTy = VectorType::get( |
1442 | ElementType: ICA.getArgTypes()[0], EC: cast<VectorType>(Val: RetTy)->getElementCount()); |
1443 | auto LT = getTypeLegalizationCost(Ty: ExpRetTy); |
1444 | |
1445 | // vid.v v8 // considered hoisted |
1446 | // vsaddu.vx v8, v8, a0 |
1447 | // vmsltu.vx v0, v8, a1 |
1448 | return LT.first * |
1449 | getRISCVInstructionCost(OpCodes: {RISCV::VSADDU_VX, RISCV::VMSLTU_VX}, |
1450 | VT: LT.second, CostKind); |
1451 | } |
1452 | break; |
1453 | } |
1454 | // TODO: add more intrinsic |
1455 | case Intrinsic::stepvector: { |
1456 | auto LT = getTypeLegalizationCost(Ty: RetTy); |
1457 | // Legalisation of illegal types involves an `index' instruction plus |
1458 | // (LT.first - 1) vector adds. |
1459 | if (ST->hasVInstructions()) |
1460 | return getRISCVInstructionCost(OpCodes: RISCV::VID_V, VT: LT.second, CostKind) + |
1461 | (LT.first - 1) * |
1462 | getRISCVInstructionCost(OpCodes: RISCV::VADD_VX, VT: LT.second, CostKind); |
1463 | return 1 + (LT.first - 1); |
1464 | } |
1465 | case Intrinsic::experimental_cttz_elts: { |
1466 | Type *ArgTy = ICA.getArgTypes()[0]; |
1467 | EVT ArgType = TLI->getValueType(DL, Ty: ArgTy, AllowUnknown: true); |
1468 | if (getTLI()->shouldExpandCttzElements(VT: ArgType)) |
1469 | break; |
1470 | InstructionCost Cost = getRISCVInstructionCost( |
1471 | OpCodes: RISCV::VFIRST_M, VT: getTypeLegalizationCost(Ty: ArgTy).second, CostKind); |
1472 | |
1473 | // If zero_is_poison is false, then we will generate additional |
1474 | // cmp + select instructions to convert -1 to EVL. |
1475 | Type *BoolTy = Type::getInt1Ty(C&: RetTy->getContext()); |
1476 | if (ICA.getArgs().size() > 1 && |
1477 | cast<ConstantInt>(Val: ICA.getArgs()[1])->isZero()) |
1478 | Cost += getCmpSelInstrCost(Opcode: Instruction::ICmp, ValTy: BoolTy, CondTy: RetTy, |
1479 | VecPred: CmpInst::ICMP_SLT, CostKind) + |
1480 | getCmpSelInstrCost(Opcode: Instruction::Select, ValTy: RetTy, CondTy: BoolTy, |
1481 | VecPred: CmpInst::BAD_ICMP_PREDICATE, CostKind); |
1482 | |
1483 | return Cost; |
1484 | } |
1485 | case Intrinsic::vp_rint: { |
1486 | // RISC-V target uses at least 5 instructions to lower rounding intrinsics. |
1487 | unsigned Cost = 5; |
1488 | auto LT = getTypeLegalizationCost(Ty: RetTy); |
1489 | if (TLI->isOperationCustom(Op: ISD::VP_FRINT, VT: LT.second)) |
1490 | return Cost * LT.first; |
1491 | break; |
1492 | } |
1493 | case Intrinsic::vp_nearbyint: { |
1494 | // More one read and one write for fflags than vp_rint. |
1495 | unsigned Cost = 7; |
1496 | auto LT = getTypeLegalizationCost(Ty: RetTy); |
1497 | if (TLI->isOperationCustom(Op: ISD::VP_FRINT, VT: LT.second)) |
1498 | return Cost * LT.first; |
1499 | break; |
1500 | } |
1501 | case Intrinsic::vp_ceil: |
1502 | case Intrinsic::vp_floor: |
1503 | case Intrinsic::vp_round: |
1504 | case Intrinsic::vp_roundeven: |
1505 | case Intrinsic::vp_roundtozero: { |
1506 | // Rounding with static rounding mode needs two more instructions to |
1507 | // swap/write FRM than vp_rint. |
1508 | unsigned Cost = 7; |
1509 | auto LT = getTypeLegalizationCost(Ty: RetTy); |
1510 | unsigned VPISD = getISDForVPIntrinsicID(ID: ICA.getID()); |
1511 | if (TLI->isOperationCustom(Op: VPISD, VT: LT.second)) |
1512 | return Cost * LT.first; |
1513 | break; |
1514 | } |
1515 | case Intrinsic::vp_select: { |
1516 | Intrinsic::ID IID = ICA.getID(); |
1517 | std::optional<unsigned> FOp = VPIntrinsic::getFunctionalOpcodeForVP(ID: IID); |
1518 | assert(FOp.has_value()); |
1519 | return getCmpSelInstrCost(Opcode: *FOp, ValTy: ICA.getReturnType(), CondTy: ICA.getArgTypes()[0], |
1520 | VecPred: CmpInst::BAD_ICMP_PREDICATE, CostKind); |
1521 | } |
1522 | case Intrinsic::vp_merge: |
1523 | return getCmpSelInstrCost(Opcode: Instruction::Select, ValTy: ICA.getReturnType(), |
1524 | CondTy: ICA.getArgTypes()[0], VecPred: CmpInst::BAD_ICMP_PREDICATE, |
1525 | CostKind); |
1526 | case Intrinsic::experimental_vp_splat: { |
1527 | auto LT = getTypeLegalizationCost(Ty: RetTy); |
1528 | // TODO: Lower i1 experimental_vp_splat |
1529 | if (!ST->hasVInstructions() || LT.second.getScalarType() == MVT::i1) |
1530 | return InstructionCost::getInvalid(); |
1531 | return LT.first * getRISCVInstructionCost(OpCodes: LT.second.isFloatingPoint() |
1532 | ? RISCV::VFMV_V_F |
1533 | : RISCV::VMV_V_X, |
1534 | VT: LT.second, CostKind); |
1535 | } |
1536 | case Intrinsic::experimental_vp_splice: { |
1537 | // To support type-based query from vectorizer, set the index to 0. |
1538 | // Note that index only change the cost from vslide.vx to vslide.vi and in |
1539 | // current implementations they have same costs. |
1540 | return getShuffleCost(Kind: TTI::SK_Splice, DstTy: cast<VectorType>(Val: ICA.getReturnType()), |
1541 | SrcTy: cast<VectorType>(Val: ICA.getArgTypes()[0]), Mask: {}, CostKind, |
1542 | Index: 0, SubTp: cast<VectorType>(Val: ICA.getReturnType())); |
1543 | } |
1544 | } |
1545 | |
1546 | if (ST->hasVInstructions() && RetTy->isVectorTy()) { |
1547 | if (auto LT = getTypeLegalizationCost(Ty: RetTy); |
1548 | LT.second.isVector()) { |
1549 | MVT EltTy = LT.second.getVectorElementType(); |
1550 | if (const auto *Entry = CostTableLookup(Table: VectorIntrinsicCostTable, |
1551 | ISD: ICA.getID(), Ty: EltTy)) |
1552 | return LT.first * Entry->Cost; |
1553 | } |
1554 | } |
1555 | |
1556 | return BaseT::getIntrinsicInstrCost(ICA, CostKind); |
1557 | } |
1558 | |
1559 | InstructionCost RISCVTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, |
1560 | Type *Src, |
1561 | TTI::CastContextHint CCH, |
1562 | TTI::TargetCostKind CostKind, |
1563 | const Instruction *I) const { |
1564 | bool IsVectorType = isa<VectorType>(Val: Dst) && isa<VectorType>(Val: Src); |
1565 | if (!IsVectorType) |
1566 | return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I); |
1567 | |
1568 | // FIXME: Need to compute legalizing cost for illegal types. The current |
1569 | // code handles only legal types and those which can be trivially |
1570 | // promoted to legal. |
1571 | if (!ST->hasVInstructions() || Src->getScalarSizeInBits() > ST->getELen() || |
1572 | Dst->getScalarSizeInBits() > ST->getELen()) |
1573 | return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I); |
1574 | |
1575 | int ISD = TLI->InstructionOpcodeToISD(Opcode); |
1576 | assert(ISD && "Invalid opcode" ); |
1577 | std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Ty: Src); |
1578 | std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(Ty: Dst); |
1579 | |
1580 | // Handle i1 source and dest cases *before* calling logic in BasicTTI. |
1581 | // The shared implementation doesn't model vector widening during legalization |
1582 | // and instead assumes scalarization. In order to scalarize an <N x i1> |
1583 | // vector, we need to extend/trunc to/from i8. If we don't special case |
1584 | // this, we can get an infinite recursion cycle. |
1585 | switch (ISD) { |
1586 | default: |
1587 | break; |
1588 | case ISD::SIGN_EXTEND: |
1589 | case ISD::ZERO_EXTEND: |
1590 | if (Src->getScalarSizeInBits() == 1) { |
1591 | // We do not use vsext/vzext to extend from mask vector. |
1592 | // Instead we use the following instructions to extend from mask vector: |
1593 | // vmv.v.i v8, 0 |
1594 | // vmerge.vim v8, v8, -1, v0 (repeated per split) |
1595 | return getRISCVInstructionCost(OpCodes: RISCV::VMV_V_I, VT: DstLT.second, CostKind) + |
1596 | DstLT.first * getRISCVInstructionCost(OpCodes: RISCV::VMERGE_VIM, |
1597 | VT: DstLT.second, CostKind) + |
1598 | DstLT.first - 1; |
1599 | } |
1600 | break; |
1601 | case ISD::TRUNCATE: |
1602 | if (Dst->getScalarSizeInBits() == 1) { |
1603 | // We do not use several vncvt to truncate to mask vector. So we could |
1604 | // not use PowDiff to calculate it. |
1605 | // Instead we use the following instructions to truncate to mask vector: |
1606 | // vand.vi v8, v8, 1 |
1607 | // vmsne.vi v0, v8, 0 |
1608 | return SrcLT.first * |
1609 | getRISCVInstructionCost(OpCodes: {RISCV::VAND_VI, RISCV::VMSNE_VI}, |
1610 | VT: SrcLT.second, CostKind) + |
1611 | SrcLT.first - 1; |
1612 | } |
1613 | break; |
1614 | }; |
1615 | |
1616 | // Our actual lowering for the case where a wider legal type is available |
1617 | // uses promotion to the wider type. This is reflected in the result of |
1618 | // getTypeLegalizationCost, but BasicTTI assumes the widened cases are |
1619 | // scalarized if the legalized Src and Dst are not equal sized. |
1620 | const DataLayout &DL = this->getDataLayout(); |
1621 | if (!SrcLT.second.isVector() || !DstLT.second.isVector() || |
1622 | !TypeSize::isKnownLE(LHS: DL.getTypeSizeInBits(Ty: Src), |
1623 | RHS: SrcLT.second.getSizeInBits()) || |
1624 | !TypeSize::isKnownLE(LHS: DL.getTypeSizeInBits(Ty: Dst), |
1625 | RHS: DstLT.second.getSizeInBits())) |
1626 | return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I); |
1627 | |
1628 | // The split cost is handled by the base getCastInstrCost |
1629 | assert((SrcLT.first == 1) && (DstLT.first == 1) && "Illegal type" ); |
1630 | |
1631 | int PowDiff = (int)Log2_32(Value: DstLT.second.getScalarSizeInBits()) - |
1632 | (int)Log2_32(Value: SrcLT.second.getScalarSizeInBits()); |
1633 | switch (ISD) { |
1634 | case ISD::SIGN_EXTEND: |
1635 | case ISD::ZERO_EXTEND: { |
1636 | if ((PowDiff < 1) || (PowDiff > 3)) |
1637 | return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I); |
1638 | unsigned SExtOp[] = {RISCV::VSEXT_VF2, RISCV::VSEXT_VF4, RISCV::VSEXT_VF8}; |
1639 | unsigned ZExtOp[] = {RISCV::VZEXT_VF2, RISCV::VZEXT_VF4, RISCV::VZEXT_VF8}; |
1640 | unsigned Op = |
1641 | (ISD == ISD::SIGN_EXTEND) ? SExtOp[PowDiff - 1] : ZExtOp[PowDiff - 1]; |
1642 | return getRISCVInstructionCost(OpCodes: Op, VT: DstLT.second, CostKind); |
1643 | } |
1644 | case ISD::TRUNCATE: |
1645 | case ISD::FP_EXTEND: |
1646 | case ISD::FP_ROUND: { |
1647 | // Counts of narrow/widen instructions. |
1648 | unsigned SrcEltSize = SrcLT.second.getScalarSizeInBits(); |
1649 | unsigned DstEltSize = DstLT.second.getScalarSizeInBits(); |
1650 | |
1651 | unsigned Op = (ISD == ISD::TRUNCATE) ? RISCV::VNSRL_WI |
1652 | : (ISD == ISD::FP_EXTEND) ? RISCV::VFWCVT_F_F_V |
1653 | : RISCV::VFNCVT_F_F_W; |
1654 | InstructionCost Cost = 0; |
1655 | for (; SrcEltSize != DstEltSize;) { |
1656 | MVT ElementMVT = (ISD == ISD::TRUNCATE) |
1657 | ? MVT::getIntegerVT(BitWidth: DstEltSize) |
1658 | : MVT::getFloatingPointVT(BitWidth: DstEltSize); |
1659 | MVT DstMVT = DstLT.second.changeVectorElementType(EltVT: ElementMVT); |
1660 | DstEltSize = |
1661 | (DstEltSize > SrcEltSize) ? DstEltSize >> 1 : DstEltSize << 1; |
1662 | Cost += getRISCVInstructionCost(OpCodes: Op, VT: DstMVT, CostKind); |
1663 | } |
1664 | return Cost; |
1665 | } |
1666 | case ISD::FP_TO_SINT: |
1667 | case ISD::FP_TO_UINT: { |
1668 | unsigned IsSigned = ISD == ISD::FP_TO_SINT; |
1669 | unsigned FCVT = IsSigned ? RISCV::VFCVT_RTZ_X_F_V : RISCV::VFCVT_RTZ_XU_F_V; |
1670 | unsigned FWCVT = |
1671 | IsSigned ? RISCV::VFWCVT_RTZ_X_F_V : RISCV::VFWCVT_RTZ_XU_F_V; |
1672 | unsigned FNCVT = |
1673 | IsSigned ? RISCV::VFNCVT_RTZ_X_F_W : RISCV::VFNCVT_RTZ_XU_F_W; |
1674 | unsigned SrcEltSize = Src->getScalarSizeInBits(); |
1675 | unsigned DstEltSize = Dst->getScalarSizeInBits(); |
1676 | InstructionCost Cost = 0; |
1677 | if ((SrcEltSize == 16) && |
1678 | (!ST->hasVInstructionsF16() || ((DstEltSize / 2) > SrcEltSize))) { |
1679 | // If the target only supports zvfhmin or it is fp16-to-i64 conversion |
1680 | // pre-widening to f32 and then convert f32 to integer |
1681 | VectorType *VecF32Ty = |
1682 | VectorType::get(ElementType: Type::getFloatTy(C&: Dst->getContext()), |
1683 | EC: cast<VectorType>(Val: Dst)->getElementCount()); |
1684 | std::pair<InstructionCost, MVT> VecF32LT = |
1685 | getTypeLegalizationCost(Ty: VecF32Ty); |
1686 | Cost += |
1687 | VecF32LT.first * getRISCVInstructionCost(OpCodes: RISCV::VFWCVT_F_F_V, |
1688 | VT: VecF32LT.second, CostKind); |
1689 | Cost += getCastInstrCost(Opcode, Dst, Src: VecF32Ty, CCH, CostKind, I); |
1690 | return Cost; |
1691 | } |
1692 | if (DstEltSize == SrcEltSize) |
1693 | Cost += getRISCVInstructionCost(OpCodes: FCVT, VT: DstLT.second, CostKind); |
1694 | else if (DstEltSize > SrcEltSize) |
1695 | Cost += getRISCVInstructionCost(OpCodes: FWCVT, VT: DstLT.second, CostKind); |
1696 | else { // (SrcEltSize > DstEltSize) |
1697 | // First do a narrowing conversion to an integer half the size, then |
1698 | // truncate if needed. |
1699 | MVT ElementVT = MVT::getIntegerVT(BitWidth: SrcEltSize / 2); |
1700 | MVT VecVT = DstLT.second.changeVectorElementType(EltVT: ElementVT); |
1701 | Cost += getRISCVInstructionCost(OpCodes: FNCVT, VT: VecVT, CostKind); |
1702 | if ((SrcEltSize / 2) > DstEltSize) { |
1703 | Type *VecTy = EVT(VecVT).getTypeForEVT(Context&: Dst->getContext()); |
1704 | Cost += |
1705 | getCastInstrCost(Opcode: Instruction::Trunc, Dst, Src: VecTy, CCH, CostKind, I); |
1706 | } |
1707 | } |
1708 | return Cost; |
1709 | } |
1710 | case ISD::SINT_TO_FP: |
1711 | case ISD::UINT_TO_FP: { |
1712 | unsigned IsSigned = ISD == ISD::SINT_TO_FP; |
1713 | unsigned FCVT = IsSigned ? RISCV::VFCVT_F_X_V : RISCV::VFCVT_F_XU_V; |
1714 | unsigned FWCVT = IsSigned ? RISCV::VFWCVT_F_X_V : RISCV::VFWCVT_F_XU_V; |
1715 | unsigned FNCVT = IsSigned ? RISCV::VFNCVT_F_X_W : RISCV::VFNCVT_F_XU_W; |
1716 | unsigned SrcEltSize = Src->getScalarSizeInBits(); |
1717 | unsigned DstEltSize = Dst->getScalarSizeInBits(); |
1718 | |
1719 | InstructionCost Cost = 0; |
1720 | if ((DstEltSize == 16) && |
1721 | (!ST->hasVInstructionsF16() || ((SrcEltSize / 2) > DstEltSize))) { |
1722 | // If the target only supports zvfhmin or it is i64-to-fp16 conversion |
1723 | // it is converted to f32 and then converted to f16 |
1724 | VectorType *VecF32Ty = |
1725 | VectorType::get(ElementType: Type::getFloatTy(C&: Dst->getContext()), |
1726 | EC: cast<VectorType>(Val: Dst)->getElementCount()); |
1727 | std::pair<InstructionCost, MVT> VecF32LT = |
1728 | getTypeLegalizationCost(Ty: VecF32Ty); |
1729 | Cost += getCastInstrCost(Opcode, Dst: VecF32Ty, Src, CCH, CostKind, I); |
1730 | Cost += VecF32LT.first * getRISCVInstructionCost(OpCodes: RISCV::VFNCVT_F_F_W, |
1731 | VT: DstLT.second, CostKind); |
1732 | return Cost; |
1733 | } |
1734 | |
1735 | if (DstEltSize == SrcEltSize) |
1736 | Cost += getRISCVInstructionCost(OpCodes: FCVT, VT: DstLT.second, CostKind); |
1737 | else if (DstEltSize > SrcEltSize) { |
1738 | if ((DstEltSize / 2) > SrcEltSize) { |
1739 | VectorType *VecTy = |
1740 | VectorType::get(ElementType: IntegerType::get(C&: Dst->getContext(), NumBits: DstEltSize / 2), |
1741 | EC: cast<VectorType>(Val: Dst)->getElementCount()); |
1742 | unsigned Op = IsSigned ? Instruction::SExt : Instruction::ZExt; |
1743 | Cost += getCastInstrCost(Opcode: Op, Dst: VecTy, Src, CCH, CostKind, I); |
1744 | } |
1745 | Cost += getRISCVInstructionCost(OpCodes: FWCVT, VT: DstLT.second, CostKind); |
1746 | } else |
1747 | Cost += getRISCVInstructionCost(OpCodes: FNCVT, VT: DstLT.second, CostKind); |
1748 | return Cost; |
1749 | } |
1750 | } |
1751 | return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I); |
1752 | } |
1753 | |
1754 | unsigned RISCVTTIImpl::getEstimatedVLFor(VectorType *Ty) const { |
1755 | if (isa<ScalableVectorType>(Val: Ty)) { |
1756 | const unsigned EltSize = DL.getTypeSizeInBits(Ty: Ty->getElementType()); |
1757 | const unsigned MinSize = DL.getTypeSizeInBits(Ty).getKnownMinValue(); |
1758 | const unsigned VectorBits = *getVScaleForTuning() * RISCV::RVVBitsPerBlock; |
1759 | return RISCVTargetLowering::computeVLMAX(VectorBits, EltSize, MinSize); |
1760 | } |
1761 | return cast<FixedVectorType>(Val: Ty)->getNumElements(); |
1762 | } |
1763 | |
1764 | InstructionCost |
1765 | RISCVTTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, |
1766 | FastMathFlags FMF, |
1767 | TTI::TargetCostKind CostKind) const { |
1768 | if (isa<FixedVectorType>(Val: Ty) && !ST->useRVVForFixedLengthVectors()) |
1769 | return BaseT::getMinMaxReductionCost(IID, Ty, FMF, CostKind); |
1770 | |
1771 | // Skip if scalar size of Ty is bigger than ELEN. |
1772 | if (Ty->getScalarSizeInBits() > ST->getELen()) |
1773 | return BaseT::getMinMaxReductionCost(IID, Ty, FMF, CostKind); |
1774 | |
1775 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty); |
1776 | if (Ty->getElementType()->isIntegerTy(Bitwidth: 1)) { |
1777 | // SelectionDAGBuilder does following transforms: |
1778 | // vector_reduce_{smin,umax}(<n x i1>) --> vector_reduce_or(<n x i1>) |
1779 | // vector_reduce_{smax,umin}(<n x i1>) --> vector_reduce_and(<n x i1>) |
1780 | if (IID == Intrinsic::umax || IID == Intrinsic::smin) |
1781 | return getArithmeticReductionCost(Opcode: Instruction::Or, Ty, FMF, CostKind); |
1782 | else |
1783 | return getArithmeticReductionCost(Opcode: Instruction::And, Ty, FMF, CostKind); |
1784 | } |
1785 | |
1786 | if (IID == Intrinsic::maximum || IID == Intrinsic::minimum) { |
1787 | SmallVector<unsigned, 3> Opcodes; |
1788 | InstructionCost = 0; |
1789 | switch (IID) { |
1790 | case Intrinsic::maximum: |
1791 | if (FMF.noNaNs()) { |
1792 | Opcodes = {RISCV::VFREDMAX_VS, RISCV::VFMV_F_S}; |
1793 | } else { |
1794 | Opcodes = {RISCV::VMFNE_VV, RISCV::VCPOP_M, RISCV::VFREDMAX_VS, |
1795 | RISCV::VFMV_F_S}; |
1796 | // Cost of Canonical Nan + branch |
1797 | // lui a0, 523264 |
1798 | // fmv.w.x fa0, a0 |
1799 | Type *DstTy = Ty->getScalarType(); |
1800 | const unsigned EltTyBits = DstTy->getScalarSizeInBits(); |
1801 | Type *SrcTy = IntegerType::getIntNTy(C&: DstTy->getContext(), N: EltTyBits); |
1802 | ExtraCost = 1 + |
1803 | getCastInstrCost(Opcode: Instruction::UIToFP, Dst: DstTy, Src: SrcTy, |
1804 | CCH: TTI::CastContextHint::None, CostKind) + |
1805 | getCFInstrCost(Opcode: Instruction::Br, CostKind); |
1806 | } |
1807 | break; |
1808 | |
1809 | case Intrinsic::minimum: |
1810 | if (FMF.noNaNs()) { |
1811 | Opcodes = {RISCV::VFREDMIN_VS, RISCV::VFMV_F_S}; |
1812 | } else { |
1813 | Opcodes = {RISCV::VMFNE_VV, RISCV::VCPOP_M, RISCV::VFREDMIN_VS, |
1814 | RISCV::VFMV_F_S}; |
1815 | // Cost of Canonical Nan + branch |
1816 | // lui a0, 523264 |
1817 | // fmv.w.x fa0, a0 |
1818 | Type *DstTy = Ty->getScalarType(); |
1819 | const unsigned EltTyBits = DL.getTypeSizeInBits(Ty: DstTy); |
1820 | Type *SrcTy = IntegerType::getIntNTy(C&: DstTy->getContext(), N: EltTyBits); |
1821 | ExtraCost = 1 + |
1822 | getCastInstrCost(Opcode: Instruction::UIToFP, Dst: DstTy, Src: SrcTy, |
1823 | CCH: TTI::CastContextHint::None, CostKind) + |
1824 | getCFInstrCost(Opcode: Instruction::Br, CostKind); |
1825 | } |
1826 | break; |
1827 | } |
1828 | return ExtraCost + getRISCVInstructionCost(OpCodes: Opcodes, VT: LT.second, CostKind); |
1829 | } |
1830 | |
1831 | // IR Reduction is composed by one rvv reduction instruction and vmv |
1832 | unsigned SplitOp; |
1833 | SmallVector<unsigned, 3> Opcodes; |
1834 | switch (IID) { |
1835 | default: |
1836 | llvm_unreachable("Unsupported intrinsic" ); |
1837 | case Intrinsic::smax: |
1838 | SplitOp = RISCV::VMAX_VV; |
1839 | Opcodes = {RISCV::VREDMAX_VS, RISCV::VMV_X_S}; |
1840 | break; |
1841 | case Intrinsic::smin: |
1842 | SplitOp = RISCV::VMIN_VV; |
1843 | Opcodes = {RISCV::VREDMIN_VS, RISCV::VMV_X_S}; |
1844 | break; |
1845 | case Intrinsic::umax: |
1846 | SplitOp = RISCV::VMAXU_VV; |
1847 | Opcodes = {RISCV::VREDMAXU_VS, RISCV::VMV_X_S}; |
1848 | break; |
1849 | case Intrinsic::umin: |
1850 | SplitOp = RISCV::VMINU_VV; |
1851 | Opcodes = {RISCV::VREDMINU_VS, RISCV::VMV_X_S}; |
1852 | break; |
1853 | case Intrinsic::maxnum: |
1854 | SplitOp = RISCV::VFMAX_VV; |
1855 | Opcodes = {RISCV::VFREDMAX_VS, RISCV::VFMV_F_S}; |
1856 | break; |
1857 | case Intrinsic::minnum: |
1858 | SplitOp = RISCV::VFMIN_VV; |
1859 | Opcodes = {RISCV::VFREDMIN_VS, RISCV::VFMV_F_S}; |
1860 | break; |
1861 | } |
1862 | // Add a cost for data larger than LMUL8 |
1863 | InstructionCost SplitCost = |
1864 | (LT.first > 1) ? (LT.first - 1) * |
1865 | getRISCVInstructionCost(OpCodes: SplitOp, VT: LT.second, CostKind) |
1866 | : 0; |
1867 | return SplitCost + getRISCVInstructionCost(OpCodes: Opcodes, VT: LT.second, CostKind); |
1868 | } |
1869 | |
1870 | InstructionCost |
1871 | RISCVTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, |
1872 | std::optional<FastMathFlags> FMF, |
1873 | TTI::TargetCostKind CostKind) const { |
1874 | if (isa<FixedVectorType>(Val: Ty) && !ST->useRVVForFixedLengthVectors()) |
1875 | return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind); |
1876 | |
1877 | // Skip if scalar size of Ty is bigger than ELEN. |
1878 | if (Ty->getScalarSizeInBits() > ST->getELen()) |
1879 | return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind); |
1880 | |
1881 | int ISD = TLI->InstructionOpcodeToISD(Opcode); |
1882 | assert(ISD && "Invalid opcode" ); |
1883 | |
1884 | if (ISD != ISD::ADD && ISD != ISD::OR && ISD != ISD::XOR && ISD != ISD::AND && |
1885 | ISD != ISD::FADD) |
1886 | return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind); |
1887 | |
1888 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty); |
1889 | Type *ElementTy = Ty->getElementType(); |
1890 | if (ElementTy->isIntegerTy(Bitwidth: 1)) { |
1891 | // Example sequences: |
1892 | // vfirst.m a0, v0 |
1893 | // seqz a0, a0 |
1894 | if (LT.second == MVT::v1i1) |
1895 | return getRISCVInstructionCost(OpCodes: RISCV::VFIRST_M, VT: LT.second, CostKind) + |
1896 | getCmpSelInstrCost(Opcode: Instruction::ICmp, ValTy: ElementTy, CondTy: ElementTy, |
1897 | VecPred: CmpInst::ICMP_EQ, CostKind); |
1898 | |
1899 | if (ISD == ISD::AND) { |
1900 | // Example sequences: |
1901 | // vmand.mm v8, v9, v8 ; needed every time type is split |
1902 | // vmnot.m v8, v0 ; alias for vmnand |
1903 | // vcpop.m a0, v8 |
1904 | // seqz a0, a0 |
1905 | |
1906 | // See the discussion: https://github.com/llvm/llvm-project/pull/119160 |
1907 | // For LMUL <= 8, there is no splitting, |
1908 | // the sequences are vmnot, vcpop and seqz. |
1909 | // When LMUL > 8 and split = 1, |
1910 | // the sequences are vmnand, vcpop and seqz. |
1911 | // When LMUL > 8 and split > 1, |
1912 | // the sequences are (LT.first-2) * vmand, vmnand, vcpop and seqz. |
1913 | return ((LT.first > 2) ? (LT.first - 2) : 0) * |
1914 | getRISCVInstructionCost(OpCodes: RISCV::VMAND_MM, VT: LT.second, CostKind) + |
1915 | getRISCVInstructionCost(OpCodes: RISCV::VMNAND_MM, VT: LT.second, CostKind) + |
1916 | getRISCVInstructionCost(OpCodes: RISCV::VCPOP_M, VT: LT.second, CostKind) + |
1917 | getCmpSelInstrCost(Opcode: Instruction::ICmp, ValTy: ElementTy, CondTy: ElementTy, |
1918 | VecPred: CmpInst::ICMP_EQ, CostKind); |
1919 | } else if (ISD == ISD::XOR || ISD == ISD::ADD) { |
1920 | // Example sequences: |
1921 | // vsetvli a0, zero, e8, mf8, ta, ma |
1922 | // vmxor.mm v8, v0, v8 ; needed every time type is split |
1923 | // vcpop.m a0, v8 |
1924 | // andi a0, a0, 1 |
1925 | return (LT.first - 1) * |
1926 | getRISCVInstructionCost(OpCodes: RISCV::VMXOR_MM, VT: LT.second, CostKind) + |
1927 | getRISCVInstructionCost(OpCodes: RISCV::VCPOP_M, VT: LT.second, CostKind) + 1; |
1928 | } else { |
1929 | assert(ISD == ISD::OR); |
1930 | // Example sequences: |
1931 | // vsetvli a0, zero, e8, mf8, ta, ma |
1932 | // vmor.mm v8, v9, v8 ; needed every time type is split |
1933 | // vcpop.m a0, v0 |
1934 | // snez a0, a0 |
1935 | return (LT.first - 1) * |
1936 | getRISCVInstructionCost(OpCodes: RISCV::VMOR_MM, VT: LT.second, CostKind) + |
1937 | getRISCVInstructionCost(OpCodes: RISCV::VCPOP_M, VT: LT.second, CostKind) + |
1938 | getCmpSelInstrCost(Opcode: Instruction::ICmp, ValTy: ElementTy, CondTy: ElementTy, |
1939 | VecPred: CmpInst::ICMP_NE, CostKind); |
1940 | } |
1941 | } |
1942 | |
1943 | // IR Reduction of or/and is composed by one vmv and one rvv reduction |
1944 | // instruction, and others is composed by two vmv and one rvv reduction |
1945 | // instruction |
1946 | unsigned SplitOp; |
1947 | SmallVector<unsigned, 3> Opcodes; |
1948 | switch (ISD) { |
1949 | case ISD::ADD: |
1950 | SplitOp = RISCV::VADD_VV; |
1951 | Opcodes = {RISCV::VMV_S_X, RISCV::VREDSUM_VS, RISCV::VMV_X_S}; |
1952 | break; |
1953 | case ISD::OR: |
1954 | SplitOp = RISCV::VOR_VV; |
1955 | Opcodes = {RISCV::VREDOR_VS, RISCV::VMV_X_S}; |
1956 | break; |
1957 | case ISD::XOR: |
1958 | SplitOp = RISCV::VXOR_VV; |
1959 | Opcodes = {RISCV::VMV_S_X, RISCV::VREDXOR_VS, RISCV::VMV_X_S}; |
1960 | break; |
1961 | case ISD::AND: |
1962 | SplitOp = RISCV::VAND_VV; |
1963 | Opcodes = {RISCV::VREDAND_VS, RISCV::VMV_X_S}; |
1964 | break; |
1965 | case ISD::FADD: |
1966 | // We can't promote f16/bf16 fadd reductions. |
1967 | if ((LT.second.getScalarType() == MVT::f16 && !ST->hasVInstructionsF16()) || |
1968 | LT.second.getScalarType() == MVT::bf16) |
1969 | return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind); |
1970 | if (TTI::requiresOrderedReduction(FMF)) { |
1971 | Opcodes.push_back(Elt: RISCV::VFMV_S_F); |
1972 | for (unsigned i = 0; i < LT.first.getValue(); i++) |
1973 | Opcodes.push_back(Elt: RISCV::VFREDOSUM_VS); |
1974 | Opcodes.push_back(Elt: RISCV::VFMV_F_S); |
1975 | return getRISCVInstructionCost(OpCodes: Opcodes, VT: LT.second, CostKind); |
1976 | } |
1977 | SplitOp = RISCV::VFADD_VV; |
1978 | Opcodes = {RISCV::VFMV_S_F, RISCV::VFREDUSUM_VS, RISCV::VFMV_F_S}; |
1979 | break; |
1980 | } |
1981 | // Add a cost for data larger than LMUL8 |
1982 | InstructionCost SplitCost = |
1983 | (LT.first > 1) ? (LT.first - 1) * |
1984 | getRISCVInstructionCost(OpCodes: SplitOp, VT: LT.second, CostKind) |
1985 | : 0; |
1986 | return SplitCost + getRISCVInstructionCost(OpCodes: Opcodes, VT: LT.second, CostKind); |
1987 | } |
1988 | |
1989 | InstructionCost RISCVTTIImpl::getExtendedReductionCost( |
1990 | unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy, |
1991 | std::optional<FastMathFlags> FMF, TTI::TargetCostKind CostKind) const { |
1992 | if (isa<FixedVectorType>(Val: ValTy) && !ST->useRVVForFixedLengthVectors()) |
1993 | return BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy, Ty: ValTy, |
1994 | FMF, CostKind); |
1995 | |
1996 | // Skip if scalar size of ResTy is bigger than ELEN. |
1997 | if (ResTy->getScalarSizeInBits() > ST->getELen()) |
1998 | return BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy, Ty: ValTy, |
1999 | FMF, CostKind); |
2000 | |
2001 | if (Opcode != Instruction::Add && Opcode != Instruction::FAdd) |
2002 | return BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy, Ty: ValTy, |
2003 | FMF, CostKind); |
2004 | |
2005 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: ValTy); |
2006 | |
2007 | if (IsUnsigned && Opcode == Instruction::Add && |
2008 | LT.second.isFixedLengthVector() && LT.second.getScalarType() == MVT::i1) { |
2009 | // Represent vector_reduce_add(ZExt(<n x i1>)) as |
2010 | // ZExtOrTrunc(ctpop(bitcast <n x i1> to in)). |
2011 | return LT.first * |
2012 | getRISCVInstructionCost(OpCodes: RISCV::VCPOP_M, VT: LT.second, CostKind); |
2013 | } |
2014 | |
2015 | if (ResTy->getScalarSizeInBits() != 2 * LT.second.getScalarSizeInBits()) |
2016 | return BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy, Ty: ValTy, |
2017 | FMF, CostKind); |
2018 | |
2019 | return (LT.first - 1) + |
2020 | getArithmeticReductionCost(Opcode, Ty: ValTy, FMF, CostKind); |
2021 | } |
2022 | |
2023 | InstructionCost |
2024 | RISCVTTIImpl::getStoreImmCost(Type *Ty, TTI::OperandValueInfo OpInfo, |
2025 | TTI::TargetCostKind CostKind) const { |
2026 | assert(OpInfo.isConstant() && "non constant operand?" ); |
2027 | if (!isa<VectorType>(Val: Ty)) |
2028 | // FIXME: We need to account for immediate materialization here, but doing |
2029 | // a decent job requires more knowledge about the immediate than we |
2030 | // currently have here. |
2031 | return 0; |
2032 | |
2033 | if (OpInfo.isUniform()) |
2034 | // vmv.v.i, vmv.v.x, or vfmv.v.f |
2035 | // We ignore the cost of the scalar constant materialization to be consistent |
2036 | // with how we treat scalar constants themselves just above. |
2037 | return 1; |
2038 | |
2039 | return getConstantPoolLoadCost(Ty, CostKind); |
2040 | } |
2041 | |
2042 | InstructionCost RISCVTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, |
2043 | Align Alignment, |
2044 | unsigned AddressSpace, |
2045 | TTI::TargetCostKind CostKind, |
2046 | TTI::OperandValueInfo OpInfo, |
2047 | const Instruction *I) const { |
2048 | EVT VT = TLI->getValueType(DL, Ty: Src, AllowUnknown: true); |
2049 | // Type legalization can't handle structs |
2050 | if (VT == MVT::Other) |
2051 | return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, |
2052 | CostKind, OpInfo, I); |
2053 | |
2054 | InstructionCost Cost = 0; |
2055 | if (Opcode == Instruction::Store && OpInfo.isConstant()) |
2056 | Cost += getStoreImmCost(Ty: Src, OpInfo, CostKind); |
2057 | |
2058 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: Src); |
2059 | |
2060 | InstructionCost BaseCost = [&]() { |
2061 | InstructionCost Cost = LT.first; |
2062 | if (CostKind != TTI::TCK_RecipThroughput) |
2063 | return Cost; |
2064 | |
2065 | // Our actual lowering for the case where a wider legal type is available |
2066 | // uses the a VL predicated load on the wider type. This is reflected in |
2067 | // the result of getTypeLegalizationCost, but BasicTTI assumes the |
2068 | // widened cases are scalarized. |
2069 | const DataLayout &DL = this->getDataLayout(); |
2070 | if (Src->isVectorTy() && LT.second.isVector() && |
2071 | TypeSize::isKnownLT(LHS: DL.getTypeStoreSizeInBits(Ty: Src), |
2072 | RHS: LT.second.getSizeInBits())) |
2073 | return Cost; |
2074 | |
2075 | return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, |
2076 | CostKind, OpInfo, I); |
2077 | }(); |
2078 | |
2079 | // Assume memory ops cost scale with the number of vector registers |
2080 | // possible accessed by the instruction. Note that BasicTTI already |
2081 | // handles the LT.first term for us. |
2082 | if (LT.second.isVector() && CostKind != TTI::TCK_CodeSize) |
2083 | BaseCost *= TLI->getLMULCost(VT: LT.second); |
2084 | return Cost + BaseCost; |
2085 | } |
2086 | |
2087 | InstructionCost RISCVTTIImpl::getCmpSelInstrCost( |
2088 | unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, |
2089 | TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, |
2090 | TTI::OperandValueInfo Op2Info, const Instruction *I) const { |
2091 | if (CostKind != TTI::TCK_RecipThroughput) |
2092 | return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, |
2093 | Op1Info, Op2Info, I); |
2094 | |
2095 | if (isa<FixedVectorType>(Val: ValTy) && !ST->useRVVForFixedLengthVectors()) |
2096 | return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, |
2097 | Op1Info, Op2Info, I); |
2098 | |
2099 | // Skip if scalar size of ValTy is bigger than ELEN. |
2100 | if (ValTy->isVectorTy() && ValTy->getScalarSizeInBits() > ST->getELen()) |
2101 | return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, |
2102 | Op1Info, Op2Info, I); |
2103 | |
2104 | auto GetConstantMatCost = |
2105 | [&](TTI::OperandValueInfo OpInfo) -> InstructionCost { |
2106 | if (OpInfo.isUniform()) |
2107 | // We return 0 we currently ignore the cost of materializing scalar |
2108 | // constants in GPRs. |
2109 | return 0; |
2110 | |
2111 | return getConstantPoolLoadCost(Ty: ValTy, CostKind); |
2112 | }; |
2113 | |
2114 | InstructionCost ConstantMatCost; |
2115 | if (Op1Info.isConstant()) |
2116 | ConstantMatCost += GetConstantMatCost(Op1Info); |
2117 | if (Op2Info.isConstant()) |
2118 | ConstantMatCost += GetConstantMatCost(Op2Info); |
2119 | |
2120 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: ValTy); |
2121 | if (Opcode == Instruction::Select && ValTy->isVectorTy()) { |
2122 | if (CondTy->isVectorTy()) { |
2123 | if (ValTy->getScalarSizeInBits() == 1) { |
2124 | // vmandn.mm v8, v8, v9 |
2125 | // vmand.mm v9, v0, v9 |
2126 | // vmor.mm v0, v9, v8 |
2127 | return ConstantMatCost + |
2128 | LT.first * |
2129 | getRISCVInstructionCost( |
2130 | OpCodes: {RISCV::VMANDN_MM, RISCV::VMAND_MM, RISCV::VMOR_MM}, |
2131 | VT: LT.second, CostKind); |
2132 | } |
2133 | // vselect and max/min are supported natively. |
2134 | return ConstantMatCost + |
2135 | LT.first * getRISCVInstructionCost(OpCodes: RISCV::VMERGE_VVM, VT: LT.second, |
2136 | CostKind); |
2137 | } |
2138 | |
2139 | if (ValTy->getScalarSizeInBits() == 1) { |
2140 | // vmv.v.x v9, a0 |
2141 | // vmsne.vi v9, v9, 0 |
2142 | // vmandn.mm v8, v8, v9 |
2143 | // vmand.mm v9, v0, v9 |
2144 | // vmor.mm v0, v9, v8 |
2145 | MVT InterimVT = LT.second.changeVectorElementType(EltVT: MVT::i8); |
2146 | return ConstantMatCost + |
2147 | LT.first * |
2148 | getRISCVInstructionCost(OpCodes: {RISCV::VMV_V_X, RISCV::VMSNE_VI}, |
2149 | VT: InterimVT, CostKind) + |
2150 | LT.first * getRISCVInstructionCost( |
2151 | OpCodes: {RISCV::VMANDN_MM, RISCV::VMAND_MM, RISCV::VMOR_MM}, |
2152 | VT: LT.second, CostKind); |
2153 | } |
2154 | |
2155 | // vmv.v.x v10, a0 |
2156 | // vmsne.vi v0, v10, 0 |
2157 | // vmerge.vvm v8, v9, v8, v0 |
2158 | return ConstantMatCost + |
2159 | LT.first * getRISCVInstructionCost( |
2160 | OpCodes: {RISCV::VMV_V_X, RISCV::VMSNE_VI, RISCV::VMERGE_VVM}, |
2161 | VT: LT.second, CostKind); |
2162 | } |
2163 | |
2164 | if ((Opcode == Instruction::ICmp) && ValTy->isVectorTy() && |
2165 | CmpInst::isIntPredicate(P: VecPred)) { |
2166 | // Use VMSLT_VV to represent VMSEQ, VMSNE, VMSLTU, VMSLEU, VMSLT, VMSLE |
2167 | // provided they incur the same cost across all implementations |
2168 | return ConstantMatCost + LT.first * getRISCVInstructionCost(OpCodes: RISCV::VMSLT_VV, |
2169 | VT: LT.second, |
2170 | CostKind); |
2171 | } |
2172 | |
2173 | if ((Opcode == Instruction::FCmp) && ValTy->isVectorTy() && |
2174 | CmpInst::isFPPredicate(P: VecPred)) { |
2175 | |
2176 | // Use VMXOR_MM and VMXNOR_MM to generate all true/false mask |
2177 | if ((VecPred == CmpInst::FCMP_FALSE) || (VecPred == CmpInst::FCMP_TRUE)) |
2178 | return ConstantMatCost + |
2179 | getRISCVInstructionCost(OpCodes: RISCV::VMXOR_MM, VT: LT.second, CostKind); |
2180 | |
2181 | // If we do not support the input floating point vector type, use the base |
2182 | // one which will calculate as: |
2183 | // ScalarizeCost + Num * Cost for fixed vector, |
2184 | // InvalidCost for scalable vector. |
2185 | if ((ValTy->getScalarSizeInBits() == 16 && !ST->hasVInstructionsF16()) || |
2186 | (ValTy->getScalarSizeInBits() == 32 && !ST->hasVInstructionsF32()) || |
2187 | (ValTy->getScalarSizeInBits() == 64 && !ST->hasVInstructionsF64())) |
2188 | return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, |
2189 | Op1Info, Op2Info, I); |
2190 | |
2191 | // Assuming vector fp compare and mask instructions are all the same cost |
2192 | // until a need arises to differentiate them. |
2193 | switch (VecPred) { |
2194 | case CmpInst::FCMP_ONE: // vmflt.vv + vmflt.vv + vmor.mm |
2195 | case CmpInst::FCMP_ORD: // vmfeq.vv + vmfeq.vv + vmand.mm |
2196 | case CmpInst::FCMP_UNO: // vmfne.vv + vmfne.vv + vmor.mm |
2197 | case CmpInst::FCMP_UEQ: // vmflt.vv + vmflt.vv + vmnor.mm |
2198 | return ConstantMatCost + |
2199 | LT.first * getRISCVInstructionCost( |
2200 | OpCodes: {RISCV::VMFLT_VV, RISCV::VMFLT_VV, RISCV::VMOR_MM}, |
2201 | VT: LT.second, CostKind); |
2202 | |
2203 | case CmpInst::FCMP_UGT: // vmfle.vv + vmnot.m |
2204 | case CmpInst::FCMP_UGE: // vmflt.vv + vmnot.m |
2205 | case CmpInst::FCMP_ULT: // vmfle.vv + vmnot.m |
2206 | case CmpInst::FCMP_ULE: // vmflt.vv + vmnot.m |
2207 | return ConstantMatCost + |
2208 | LT.first * |
2209 | getRISCVInstructionCost(OpCodes: {RISCV::VMFLT_VV, RISCV::VMNAND_MM}, |
2210 | VT: LT.second, CostKind); |
2211 | |
2212 | case CmpInst::FCMP_OEQ: // vmfeq.vv |
2213 | case CmpInst::FCMP_OGT: // vmflt.vv |
2214 | case CmpInst::FCMP_OGE: // vmfle.vv |
2215 | case CmpInst::FCMP_OLT: // vmflt.vv |
2216 | case CmpInst::FCMP_OLE: // vmfle.vv |
2217 | case CmpInst::FCMP_UNE: // vmfne.vv |
2218 | return ConstantMatCost + |
2219 | LT.first * |
2220 | getRISCVInstructionCost(OpCodes: RISCV::VMFLT_VV, VT: LT.second, CostKind); |
2221 | default: |
2222 | break; |
2223 | } |
2224 | } |
2225 | |
2226 | // With ShortForwardBranchOpt or ConditionalMoveFusion, scalar icmp + select |
2227 | // instructions will lower to SELECT_CC and lower to PseudoCCMOVGPR which will |
2228 | // generate a conditional branch + mv. The cost of scalar (icmp + select) will |
2229 | // be (0 + select instr cost). |
2230 | if (ST->hasConditionalMoveFusion() && I && isa<ICmpInst>(Val: I) && |
2231 | ValTy->isIntegerTy() && !I->user_empty()) { |
2232 | if (all_of(Range: I->users(), P: [&](const User *U) { |
2233 | return match(V: U, P: m_Select(C: m_Specific(V: I), L: m_Value(), R: m_Value())) && |
2234 | U->getType()->isIntegerTy() && |
2235 | !isa<ConstantData>(Val: U->getOperand(i: 1)) && |
2236 | !isa<ConstantData>(Val: U->getOperand(i: 2)); |
2237 | })) |
2238 | return 0; |
2239 | } |
2240 | |
2241 | // TODO: Add cost for scalar type. |
2242 | |
2243 | return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, |
2244 | Op1Info, Op2Info, I); |
2245 | } |
2246 | |
2247 | InstructionCost RISCVTTIImpl::getCFInstrCost(unsigned Opcode, |
2248 | TTI::TargetCostKind CostKind, |
2249 | const Instruction *I) const { |
2250 | if (CostKind != TTI::TCK_RecipThroughput) |
2251 | return Opcode == Instruction::PHI ? 0 : 1; |
2252 | // Branches are assumed to be predicted. |
2253 | return 0; |
2254 | } |
2255 | |
2256 | InstructionCost RISCVTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, |
2257 | TTI::TargetCostKind CostKind, |
2258 | unsigned Index, |
2259 | const Value *Op0, |
2260 | const Value *Op1) const { |
2261 | assert(Val->isVectorTy() && "This must be a vector type" ); |
2262 | |
2263 | if (Opcode != Instruction::ExtractElement && |
2264 | Opcode != Instruction::InsertElement) |
2265 | return BaseT::getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1); |
2266 | |
2267 | // Legalize the type. |
2268 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty: Val); |
2269 | |
2270 | // This type is legalized to a scalar type. |
2271 | if (!LT.second.isVector()) { |
2272 | auto *FixedVecTy = cast<FixedVectorType>(Val); |
2273 | // If Index is a known constant, cost is zero. |
2274 | if (Index != -1U) |
2275 | return 0; |
2276 | // Extract/InsertElement with non-constant index is very costly when |
2277 | // scalarized; estimate cost of loads/stores sequence via the stack: |
2278 | // ExtractElement cost: store vector to stack, load scalar; |
2279 | // InsertElement cost: store vector to stack, store scalar, load vector. |
2280 | Type *ElemTy = FixedVecTy->getElementType(); |
2281 | auto NumElems = FixedVecTy->getNumElements(); |
2282 | auto Align = DL.getPrefTypeAlign(Ty: ElemTy); |
2283 | InstructionCost LoadCost = |
2284 | getMemoryOpCost(Opcode: Instruction::Load, Src: ElemTy, Alignment: Align, AddressSpace: 0, CostKind); |
2285 | InstructionCost StoreCost = |
2286 | getMemoryOpCost(Opcode: Instruction::Store, Src: ElemTy, Alignment: Align, AddressSpace: 0, CostKind); |
2287 | return Opcode == Instruction::ExtractElement |
2288 | ? StoreCost * NumElems + LoadCost |
2289 | : (StoreCost + LoadCost) * NumElems + StoreCost; |
2290 | } |
2291 | |
2292 | // For unsupported scalable vector. |
2293 | if (LT.second.isScalableVector() && !LT.first.isValid()) |
2294 | return LT.first; |
2295 | |
2296 | // Mask vector extract/insert is expanded via e8. |
2297 | if (Val->getScalarSizeInBits() == 1) { |
2298 | VectorType *WideTy = |
2299 | VectorType::get(ElementType: IntegerType::get(C&: Val->getContext(), NumBits: 8), |
2300 | EC: cast<VectorType>(Val)->getElementCount()); |
2301 | if (Opcode == Instruction::ExtractElement) { |
2302 | InstructionCost ExtendCost |
2303 | = getCastInstrCost(Opcode: Instruction::ZExt, Dst: WideTy, Src: Val, |
2304 | CCH: TTI::CastContextHint::None, CostKind); |
2305 | InstructionCost |
2306 | = getVectorInstrCost(Opcode, Val: WideTy, CostKind, Index, Op0: nullptr, Op1: nullptr); |
2307 | return ExtendCost + ExtractCost; |
2308 | } |
2309 | InstructionCost ExtendCost |
2310 | = getCastInstrCost(Opcode: Instruction::ZExt, Dst: WideTy, Src: Val, |
2311 | CCH: TTI::CastContextHint::None, CostKind); |
2312 | InstructionCost InsertCost |
2313 | = getVectorInstrCost(Opcode, Val: WideTy, CostKind, Index, Op0: nullptr, Op1: nullptr); |
2314 | InstructionCost TruncCost |
2315 | = getCastInstrCost(Opcode: Instruction::Trunc, Dst: Val, Src: WideTy, |
2316 | CCH: TTI::CastContextHint::None, CostKind); |
2317 | return ExtendCost + InsertCost + TruncCost; |
2318 | } |
2319 | |
2320 | |
2321 | // In RVV, we could use vslidedown + vmv.x.s to extract element from vector |
2322 | // and vslideup + vmv.s.x to insert element to vector. |
2323 | unsigned BaseCost = 1; |
2324 | // When insertelement we should add the index with 1 as the input of vslideup. |
2325 | unsigned SlideCost = Opcode == Instruction::InsertElement ? 2 : 1; |
2326 | |
2327 | if (Index != -1U) { |
2328 | // The type may be split. For fixed-width vectors we can normalize the |
2329 | // index to the new type. |
2330 | if (LT.second.isFixedLengthVector()) { |
2331 | unsigned Width = LT.second.getVectorNumElements(); |
2332 | Index = Index % Width; |
2333 | } |
2334 | |
2335 | // If exact VLEN is known, we will insert/extract into the appropriate |
2336 | // subvector with no additional subvector insert/extract cost. |
2337 | if (auto VLEN = ST->getRealVLen()) { |
2338 | unsigned EltSize = LT.second.getScalarSizeInBits(); |
2339 | unsigned M1Max = *VLEN / EltSize; |
2340 | Index = Index % M1Max; |
2341 | } |
2342 | |
2343 | if (Index == 0) |
2344 | // We can extract/insert the first element without vslidedown/vslideup. |
2345 | SlideCost = 0; |
2346 | else if (ST->hasVendorXRivosVisni() && isUInt<5>(x: Index) && |
2347 | Val->getScalarType()->isIntegerTy()) |
2348 | SlideCost = 0; // With ri.vinsert/ri.vextract there is no slide needed |
2349 | else if (Opcode == Instruction::InsertElement) |
2350 | SlideCost = 1; // With a constant index, we do not need to use addi. |
2351 | } |
2352 | |
2353 | // When the vector needs to split into multiple register groups and the index |
2354 | // exceeds single vector register group, we need to insert/extract the element |
2355 | // via stack. |
2356 | if (LT.first > 1 && |
2357 | ((Index == -1U) || (Index >= LT.second.getVectorMinNumElements() && |
2358 | LT.second.isScalableVector()))) { |
2359 | Type *ScalarType = Val->getScalarType(); |
2360 | Align VecAlign = DL.getPrefTypeAlign(Ty: Val); |
2361 | Align SclAlign = DL.getPrefTypeAlign(Ty: ScalarType); |
2362 | // Extra addi for unknown index. |
2363 | InstructionCost IdxCost = Index == -1U ? 1 : 0; |
2364 | |
2365 | // Store all split vectors into stack and load the target element. |
2366 | if (Opcode == Instruction::ExtractElement) |
2367 | return getMemoryOpCost(Opcode: Instruction::Store, Src: Val, Alignment: VecAlign, AddressSpace: 0, CostKind) + |
2368 | getMemoryOpCost(Opcode: Instruction::Load, Src: ScalarType, Alignment: SclAlign, AddressSpace: 0, |
2369 | CostKind) + |
2370 | IdxCost; |
2371 | |
2372 | // Store all split vectors into stack and store the target element and load |
2373 | // vectors back. |
2374 | return getMemoryOpCost(Opcode: Instruction::Store, Src: Val, Alignment: VecAlign, AddressSpace: 0, CostKind) + |
2375 | getMemoryOpCost(Opcode: Instruction::Load, Src: Val, Alignment: VecAlign, AddressSpace: 0, CostKind) + |
2376 | getMemoryOpCost(Opcode: Instruction::Store, Src: ScalarType, Alignment: SclAlign, AddressSpace: 0, |
2377 | CostKind) + |
2378 | IdxCost; |
2379 | } |
2380 | |
2381 | // Extract i64 in the target that has XLEN=32 need more instruction. |
2382 | if (Val->getScalarType()->isIntegerTy() && |
2383 | ST->getXLen() < Val->getScalarSizeInBits()) { |
2384 | // For extractelement, we need the following instructions: |
2385 | // vsetivli zero, 1, e64, m1, ta, mu (not count) |
2386 | // vslidedown.vx v8, v8, a0 |
2387 | // vmv.x.s a0, v8 |
2388 | // li a1, 32 |
2389 | // vsrl.vx v8, v8, a1 |
2390 | // vmv.x.s a1, v8 |
2391 | |
2392 | // For insertelement, we need the following instructions: |
2393 | // vsetivli zero, 2, e32, m4, ta, mu (not count) |
2394 | // vmv.v.i v12, 0 |
2395 | // vslide1up.vx v16, v12, a1 |
2396 | // vslide1up.vx v12, v16, a0 |
2397 | // addi a0, a2, 1 |
2398 | // vsetvli zero, a0, e64, m4, tu, mu (not count) |
2399 | // vslideup.vx v8, v12, a2 |
2400 | |
2401 | // TODO: should we count these special vsetvlis? |
2402 | BaseCost = Opcode == Instruction::InsertElement ? 3 : 4; |
2403 | } |
2404 | return BaseCost + SlideCost; |
2405 | } |
2406 | |
2407 | InstructionCost RISCVTTIImpl::getArithmeticInstrCost( |
2408 | unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, |
2409 | TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, |
2410 | ArrayRef<const Value *> Args, const Instruction *CxtI) const { |
2411 | |
2412 | // TODO: Handle more cost kinds. |
2413 | if (CostKind != TTI::TCK_RecipThroughput) |
2414 | return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info: Op1Info, Opd2Info: Op2Info, |
2415 | Args, CxtI); |
2416 | |
2417 | if (isa<FixedVectorType>(Val: Ty) && !ST->useRVVForFixedLengthVectors()) |
2418 | return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info: Op1Info, Opd2Info: Op2Info, |
2419 | Args, CxtI); |
2420 | |
2421 | // Skip if scalar size of Ty is bigger than ELEN. |
2422 | if (isa<VectorType>(Val: Ty) && Ty->getScalarSizeInBits() > ST->getELen()) |
2423 | return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info: Op1Info, Opd2Info: Op2Info, |
2424 | Args, CxtI); |
2425 | |
2426 | // Legalize the type. |
2427 | std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty); |
2428 | |
2429 | // TODO: Handle scalar type. |
2430 | if (!LT.second.isVector()) |
2431 | return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info: Op1Info, Opd2Info: Op2Info, |
2432 | Args, CxtI); |
2433 | |
2434 | // f16 with zvfhmin and bf16 will be promoted to f32. |
2435 | // FIXME: nxv32[b]f16 will be custom lowered and split. |
2436 | unsigned ISDOpcode = TLI->InstructionOpcodeToISD(Opcode); |
2437 | InstructionCost CastCost = 0; |
2438 | if ((LT.second.getVectorElementType() == MVT::f16 || |
2439 | LT.second.getVectorElementType() == MVT::bf16) && |
2440 | TLI->getOperationAction(Op: ISDOpcode, VT: LT.second) == |
2441 | TargetLoweringBase::LegalizeAction::Promote) { |
2442 | MVT PromotedVT = TLI->getTypeToPromoteTo(Op: ISDOpcode, VT: LT.second); |
2443 | Type *PromotedTy = EVT(PromotedVT).getTypeForEVT(Context&: Ty->getContext()); |
2444 | Type *LegalTy = EVT(LT.second).getTypeForEVT(Context&: Ty->getContext()); |
2445 | // Add cost of extending arguments |
2446 | CastCost += LT.first * Args.size() * |
2447 | getCastInstrCost(Opcode: Instruction::FPExt, Dst: PromotedTy, Src: LegalTy, |
2448 | CCH: TTI::CastContextHint::None, CostKind); |
2449 | // Add cost of truncating result |
2450 | CastCost += |
2451 | LT.first * getCastInstrCost(Opcode: Instruction::FPTrunc, Dst: LegalTy, Src: PromotedTy, |
2452 | CCH: TTI::CastContextHint::None, CostKind); |
2453 | // Compute cost of op in promoted type |
2454 | LT.second = PromotedVT; |
2455 | } |
2456 | |
2457 | auto getConstantMatCost = |
2458 | [&](unsigned Operand, TTI::OperandValueInfo OpInfo) -> InstructionCost { |
2459 | if (OpInfo.isUniform() && canSplatOperand(Opcode, Operand)) |
2460 | // Two sub-cases: |
2461 | // * Has a 5 bit immediate operand which can be splatted. |
2462 | // * Has a larger immediate which must be materialized in scalar register |
2463 | // We return 0 for both as we currently ignore the cost of materializing |
2464 | // scalar constants in GPRs. |
2465 | return 0; |
2466 | |
2467 | return getConstantPoolLoadCost(Ty, CostKind); |
2468 | }; |
2469 | |
2470 | // Add the cost of materializing any constant vectors required. |
2471 | InstructionCost ConstantMatCost = 0; |
2472 | if (Op1Info.isConstant()) |
2473 | ConstantMatCost += getConstantMatCost(0, Op1Info); |
2474 | if (Op2Info.isConstant()) |
2475 | ConstantMatCost += getConstantMatCost(1, Op2Info); |
2476 | |
2477 | unsigned Op; |
2478 | switch (ISDOpcode) { |
2479 | case ISD::ADD: |
2480 | case ISD::SUB: |
2481 | Op = RISCV::VADD_VV; |
2482 | break; |
2483 | case ISD::SHL: |
2484 | case ISD::SRL: |
2485 | case ISD::SRA: |
2486 | Op = RISCV::VSLL_VV; |
2487 | break; |
2488 | case ISD::AND: |
2489 | case ISD::OR: |
2490 | case ISD::XOR: |
2491 | Op = (Ty->getScalarSizeInBits() == 1) ? RISCV::VMAND_MM : RISCV::VAND_VV; |
2492 | break; |
2493 | case ISD::MUL: |
2494 | case ISD::MULHS: |
2495 | case ISD::MULHU: |
2496 | Op = RISCV::VMUL_VV; |
2497 | break; |
2498 | case ISD::SDIV: |
2499 | case ISD::UDIV: |
2500 | Op = RISCV::VDIV_VV; |
2501 | break; |
2502 | case ISD::SREM: |
2503 | case ISD::UREM: |
2504 | Op = RISCV::VREM_VV; |
2505 | break; |
2506 | case ISD::FADD: |
2507 | case ISD::FSUB: |
2508 | Op = RISCV::VFADD_VV; |
2509 | break; |
2510 | case ISD::FMUL: |
2511 | Op = RISCV::VFMUL_VV; |
2512 | break; |
2513 | case ISD::FDIV: |
2514 | Op = RISCV::VFDIV_VV; |
2515 | break; |
2516 | case ISD::FNEG: |
2517 | Op = RISCV::VFSGNJN_VV; |
2518 | break; |
2519 | default: |
2520 | // Assuming all other instructions have the same cost until a need arises to |
2521 | // differentiate them. |
2522 | return CastCost + ConstantMatCost + |
2523 | BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info: Op1Info, Opd2Info: Op2Info, |
2524 | Args, CxtI); |
2525 | } |
2526 | |
2527 | InstructionCost InstrCost = getRISCVInstructionCost(OpCodes: Op, VT: LT.second, CostKind); |
2528 | // We use BasicTTIImpl to calculate scalar costs, which assumes floating point |
2529 | // ops are twice as expensive as integer ops. Do the same for vectors so |
2530 | // scalar floating point ops aren't cheaper than their vector equivalents. |
2531 | if (Ty->isFPOrFPVectorTy()) |
2532 | InstrCost *= 2; |
2533 | return CastCost + ConstantMatCost + LT.first * InstrCost; |
2534 | } |
2535 | |
2536 | // TODO: Deduplicate from TargetTransformInfoImplCRTPBase. |
2537 | InstructionCost RISCVTTIImpl::getPointersChainCost( |
2538 | ArrayRef<const Value *> Ptrs, const Value *Base, |
2539 | const TTI::PointersChainInfo &Info, Type *AccessTy, |
2540 | TTI::TargetCostKind CostKind) const { |
2541 | InstructionCost Cost = TTI::TCC_Free; |
2542 | // In the basic model we take into account GEP instructions only |
2543 | // (although here can come alloca instruction, a value, constants and/or |
2544 | // constant expressions, PHIs, bitcasts ... whatever allowed to be used as a |
2545 | // pointer). Typically, if Base is a not a GEP-instruction and all the |
2546 | // pointers are relative to the same base address, all the rest are |
2547 | // either GEP instructions, PHIs, bitcasts or constants. When we have same |
2548 | // base, we just calculate cost of each non-Base GEP as an ADD operation if |
2549 | // any their index is a non-const. |
2550 | // If no known dependencies between the pointers cost is calculated as a sum |
2551 | // of costs of GEP instructions. |
2552 | for (auto [I, V] : enumerate(First&: Ptrs)) { |
2553 | const auto *GEP = dyn_cast<GetElementPtrInst>(Val: V); |
2554 | if (!GEP) |
2555 | continue; |
2556 | if (Info.isSameBase() && V != Base) { |
2557 | if (GEP->hasAllConstantIndices()) |
2558 | continue; |
2559 | // If the chain is unit-stride and BaseReg + stride*i is a legal |
2560 | // addressing mode, then presume the base GEP is sitting around in a |
2561 | // register somewhere and check if we can fold the offset relative to |
2562 | // it. |
2563 | unsigned Stride = DL.getTypeStoreSize(Ty: AccessTy); |
2564 | if (Info.isUnitStride() && |
2565 | isLegalAddressingMode(Ty: AccessTy, |
2566 | /* BaseGV */ nullptr, |
2567 | /* BaseOffset */ Stride * I, |
2568 | /* HasBaseReg */ true, |
2569 | /* Scale */ 0, |
2570 | AddrSpace: GEP->getType()->getPointerAddressSpace())) |
2571 | continue; |
2572 | Cost += getArithmeticInstrCost(Opcode: Instruction::Add, Ty: GEP->getType(), CostKind, |
2573 | Op1Info: {.Kind: TTI::OK_AnyValue, .Properties: TTI::OP_None}, |
2574 | Op2Info: {.Kind: TTI::OK_AnyValue, .Properties: TTI::OP_None}, Args: {}); |
2575 | } else { |
2576 | SmallVector<const Value *> Indices(GEP->indices()); |
2577 | Cost += getGEPCost(PointeeType: GEP->getSourceElementType(), Ptr: GEP->getPointerOperand(), |
2578 | Operands: Indices, AccessType: AccessTy, CostKind); |
2579 | } |
2580 | } |
2581 | return Cost; |
2582 | } |
2583 | |
2584 | void RISCVTTIImpl::( |
2585 | Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, |
2586 | OptimizationRemarkEmitter *ORE) const { |
2587 | // TODO: More tuning on benchmarks and metrics with changes as needed |
2588 | // would apply to all settings below to enable performance. |
2589 | |
2590 | |
2591 | if (ST->enableDefaultUnroll()) |
2592 | return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP, ORE); |
2593 | |
2594 | // Enable Upper bound unrolling universally, not dependent upon the conditions |
2595 | // below. |
2596 | UP.UpperBound = true; |
2597 | |
2598 | // Disable loop unrolling for Oz and Os. |
2599 | UP.OptSizeThreshold = 0; |
2600 | UP.PartialOptSizeThreshold = 0; |
2601 | if (L->getHeader()->getParent()->hasOptSize()) |
2602 | return; |
2603 | |
2604 | SmallVector<BasicBlock *, 4> ExitingBlocks; |
2605 | L->getExitingBlocks(ExitingBlocks); |
2606 | LLVM_DEBUG(dbgs() << "Loop has:\n" |
2607 | << "Blocks: " << L->getNumBlocks() << "\n" |
2608 | << "Exit blocks: " << ExitingBlocks.size() << "\n" ); |
2609 | |
2610 | // Only allow another exit other than the latch. This acts as an early exit |
2611 | // as it mirrors the profitability calculation of the runtime unroller. |
2612 | if (ExitingBlocks.size() > 2) |
2613 | return; |
2614 | |
2615 | // Limit the CFG of the loop body for targets with a branch predictor. |
2616 | // Allowing 4 blocks permits if-then-else diamonds in the body. |
2617 | if (L->getNumBlocks() > 4) |
2618 | return; |
2619 | |
2620 | // Don't unroll vectorized loops, including the remainder loop |
2621 | if (getBooleanLoopAttribute(TheLoop: L, Name: "llvm.loop.isvectorized" )) |
2622 | return; |
2623 | |
2624 | // Scan the loop: don't unroll loops with calls as this could prevent |
2625 | // inlining. |
2626 | InstructionCost Cost = 0; |
2627 | for (auto *BB : L->getBlocks()) { |
2628 | for (auto &I : *BB) { |
2629 | // Initial setting - Don't unroll loops containing vectorized |
2630 | // instructions. |
2631 | if (I.getType()->isVectorTy()) |
2632 | return; |
2633 | |
2634 | if (isa<CallInst>(Val: I) || isa<InvokeInst>(Val: I)) { |
2635 | if (const Function *F = cast<CallBase>(Val&: I).getCalledFunction()) { |
2636 | if (!isLoweredToCall(F)) |
2637 | continue; |
2638 | } |
2639 | return; |
2640 | } |
2641 | |
2642 | SmallVector<const Value *> Operands(I.operand_values()); |
2643 | Cost += getInstructionCost(U: &I, Operands, |
2644 | CostKind: TargetTransformInfo::TCK_SizeAndLatency); |
2645 | } |
2646 | } |
2647 | |
2648 | LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n" ); |
2649 | |
2650 | UP.Partial = true; |
2651 | UP.Runtime = true; |
2652 | UP.UnrollRemainder = true; |
2653 | UP.UnrollAndJam = true; |
2654 | |
2655 | // Force unrolling small loops can be very useful because of the branch |
2656 | // taken cost of the backedge. |
2657 | if (Cost < 12) |
2658 | UP.Force = true; |
2659 | } |
2660 | |
2661 | void RISCVTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, |
2662 | TTI::PeelingPreferences &PP) const { |
2663 | BaseT::getPeelingPreferences(L, SE, PP); |
2664 | } |
2665 | |
2666 | unsigned RISCVTTIImpl::getRegUsageForType(Type *Ty) const { |
2667 | if (Ty->isVectorTy()) { |
2668 | // f16 with only zvfhmin and bf16 will be promoted to f32 |
2669 | Type *EltTy = cast<VectorType>(Val: Ty)->getElementType(); |
2670 | if ((EltTy->isHalfTy() && !ST->hasVInstructionsF16()) || |
2671 | EltTy->isBFloatTy()) |
2672 | Ty = VectorType::get(ElementType: Type::getFloatTy(C&: Ty->getContext()), |
2673 | Other: cast<VectorType>(Val: Ty)); |
2674 | |
2675 | TypeSize Size = DL.getTypeSizeInBits(Ty); |
2676 | if (Size.isScalable() && ST->hasVInstructions()) |
2677 | return divideCeil(Numerator: Size.getKnownMinValue(), Denominator: RISCV::RVVBitsPerBlock); |
2678 | |
2679 | if (ST->useRVVForFixedLengthVectors()) |
2680 | return divideCeil(Numerator: Size, Denominator: ST->getRealMinVLen()); |
2681 | } |
2682 | |
2683 | return BaseT::getRegUsageForType(Ty); |
2684 | } |
2685 | |
2686 | unsigned RISCVTTIImpl::getMaximumVF(unsigned ElemWidth, unsigned Opcode) const { |
2687 | if (SLPMaxVF.getNumOccurrences()) |
2688 | return SLPMaxVF; |
2689 | |
2690 | // Return how many elements can fit in getRegisterBitwidth. This is the |
2691 | // same routine as used in LoopVectorizer. We should probably be |
2692 | // accounting for whether we actually have instructions with the right |
2693 | // lane type, but we don't have enough information to do that without |
2694 | // some additional plumbing which hasn't been justified yet. |
2695 | TypeSize RegWidth = |
2696 | getRegisterBitWidth(K: TargetTransformInfo::RGK_FixedWidthVector); |
2697 | // If no vector registers, or absurd element widths, disable |
2698 | // vectorization by returning 1. |
2699 | return std::max<unsigned>(a: 1U, b: RegWidth.getFixedValue() / ElemWidth); |
2700 | } |
2701 | |
2702 | unsigned RISCVTTIImpl::getMinTripCountTailFoldingThreshold() const { |
2703 | return RVVMinTripCount; |
2704 | } |
2705 | |
2706 | TTI::AddressingModeKind |
2707 | RISCVTTIImpl::getPreferredAddressingMode(const Loop *L, |
2708 | ScalarEvolution *SE) const { |
2709 | if (ST->hasVendorXCVmem() && !ST->is64Bit()) |
2710 | return TTI::AMK_PostIndexed; |
2711 | |
2712 | return BasicTTIImplBase::getPreferredAddressingMode(L, SE); |
2713 | } |
2714 | |
2715 | bool RISCVTTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1, |
2716 | const TargetTransformInfo::LSRCost &C2) const { |
2717 | // RISC-V specific here are "instruction number 1st priority". |
2718 | // If we need to emit adds inside the loop to add up base registers, then |
2719 | // we need at least one extra temporary register. |
2720 | unsigned C1NumRegs = C1.NumRegs + (C1.NumBaseAdds != 0); |
2721 | unsigned C2NumRegs = C2.NumRegs + (C2.NumBaseAdds != 0); |
2722 | return std::tie(args: C1.Insns, args&: C1NumRegs, args: C1.AddRecCost, |
2723 | args: C1.NumIVMuls, args: C1.NumBaseAdds, |
2724 | args: C1.ScaleCost, args: C1.ImmCost, args: C1.SetupCost) < |
2725 | std::tie(args: C2.Insns, args&: C2NumRegs, args: C2.AddRecCost, |
2726 | args: C2.NumIVMuls, args: C2.NumBaseAdds, |
2727 | args: C2.ScaleCost, args: C2.ImmCost, args: C2.SetupCost); |
2728 | } |
2729 | |
2730 | bool RISCVTTIImpl::isLegalMaskedExpandLoad(Type *DataTy, |
2731 | Align Alignment) const { |
2732 | auto *VTy = dyn_cast<VectorType>(Val: DataTy); |
2733 | if (!VTy || VTy->isScalableTy()) |
2734 | return false; |
2735 | |
2736 | if (!isLegalMaskedLoadStore(DataType: DataTy, Alignment)) |
2737 | return false; |
2738 | |
2739 | // FIXME: If it is an i8 vector and the element count exceeds 256, we should |
2740 | // scalarize these types with LMUL >= maximum fixed-length LMUL. |
2741 | if (VTy->getElementType()->isIntegerTy(Bitwidth: 8)) |
2742 | if (VTy->getElementCount().getFixedValue() > 256) |
2743 | return VTy->getPrimitiveSizeInBits() / ST->getRealMinVLen() < |
2744 | ST->getMaxLMULForFixedLengthVectors(); |
2745 | return true; |
2746 | } |
2747 | |
2748 | bool RISCVTTIImpl::isLegalMaskedCompressStore(Type *DataTy, |
2749 | Align Alignment) const { |
2750 | auto *VTy = dyn_cast<VectorType>(Val: DataTy); |
2751 | if (!VTy || VTy->isScalableTy()) |
2752 | return false; |
2753 | |
2754 | if (!isLegalMaskedLoadStore(DataType: DataTy, Alignment)) |
2755 | return false; |
2756 | return true; |
2757 | } |
2758 | |
2759 | /// See if \p I should be considered for address type promotion. We check if \p |
2760 | /// I is a sext with right type and used in memory accesses. If it used in a |
2761 | /// "complex" getelementptr, we allow it to be promoted without finding other |
2762 | /// sext instructions that sign extended the same initial value. A getelementptr |
2763 | /// is considered as "complex" if it has more than 2 operands. |
2764 | bool RISCVTTIImpl::shouldConsiderAddressTypePromotion( |
2765 | const Instruction &I, bool &) const { |
2766 | bool Considerable = false; |
2767 | AllowPromotionWithoutCommonHeader = false; |
2768 | if (!isa<SExtInst>(Val: &I)) |
2769 | return false; |
2770 | Type *ConsideredSExtType = |
2771 | Type::getInt64Ty(C&: I.getParent()->getParent()->getContext()); |
2772 | if (I.getType() != ConsideredSExtType) |
2773 | return false; |
2774 | // See if the sext is the one with the right type and used in at least one |
2775 | // GetElementPtrInst. |
2776 | for (const User *U : I.users()) { |
2777 | if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(Val: U)) { |
2778 | Considerable = true; |
2779 | // A getelementptr is considered as "complex" if it has more than 2 |
2780 | // operands. We will promote a SExt used in such complex GEP as we |
2781 | // expect some computation to be merged if they are done on 64 bits. |
2782 | if (GEPInst->getNumOperands() > 2) { |
2783 | AllowPromotionWithoutCommonHeader = true; |
2784 | break; |
2785 | } |
2786 | } |
2787 | } |
2788 | return Considerable; |
2789 | } |
2790 | |
2791 | bool RISCVTTIImpl::canSplatOperand(unsigned Opcode, int Operand) const { |
2792 | switch (Opcode) { |
2793 | case Instruction::Add: |
2794 | case Instruction::Sub: |
2795 | case Instruction::Mul: |
2796 | case Instruction::And: |
2797 | case Instruction::Or: |
2798 | case Instruction::Xor: |
2799 | case Instruction::FAdd: |
2800 | case Instruction::FSub: |
2801 | case Instruction::FMul: |
2802 | case Instruction::FDiv: |
2803 | case Instruction::ICmp: |
2804 | case Instruction::FCmp: |
2805 | return true; |
2806 | case Instruction::Shl: |
2807 | case Instruction::LShr: |
2808 | case Instruction::AShr: |
2809 | case Instruction::UDiv: |
2810 | case Instruction::SDiv: |
2811 | case Instruction::URem: |
2812 | case Instruction::SRem: |
2813 | case Instruction::Select: |
2814 | return Operand == 1; |
2815 | default: |
2816 | return false; |
2817 | } |
2818 | } |
2819 | |
2820 | bool RISCVTTIImpl::canSplatOperand(Instruction *I, int Operand) const { |
2821 | if (!I->getType()->isVectorTy() || !ST->hasVInstructions()) |
2822 | return false; |
2823 | |
2824 | if (canSplatOperand(Opcode: I->getOpcode(), Operand)) |
2825 | return true; |
2826 | |
2827 | auto *II = dyn_cast<IntrinsicInst>(Val: I); |
2828 | if (!II) |
2829 | return false; |
2830 | |
2831 | switch (II->getIntrinsicID()) { |
2832 | case Intrinsic::fma: |
2833 | case Intrinsic::vp_fma: |
2834 | case Intrinsic::fmuladd: |
2835 | case Intrinsic::vp_fmuladd: |
2836 | return Operand == 0 || Operand == 1; |
2837 | case Intrinsic::vp_shl: |
2838 | case Intrinsic::vp_lshr: |
2839 | case Intrinsic::vp_ashr: |
2840 | case Intrinsic::vp_udiv: |
2841 | case Intrinsic::vp_sdiv: |
2842 | case Intrinsic::vp_urem: |
2843 | case Intrinsic::vp_srem: |
2844 | case Intrinsic::ssub_sat: |
2845 | case Intrinsic::vp_ssub_sat: |
2846 | case Intrinsic::usub_sat: |
2847 | case Intrinsic::vp_usub_sat: |
2848 | case Intrinsic::vp_select: |
2849 | return Operand == 1; |
2850 | // These intrinsics are commutative. |
2851 | case Intrinsic::vp_add: |
2852 | case Intrinsic::vp_mul: |
2853 | case Intrinsic::vp_and: |
2854 | case Intrinsic::vp_or: |
2855 | case Intrinsic::vp_xor: |
2856 | case Intrinsic::vp_fadd: |
2857 | case Intrinsic::vp_fmul: |
2858 | case Intrinsic::vp_icmp: |
2859 | case Intrinsic::vp_fcmp: |
2860 | case Intrinsic::smin: |
2861 | case Intrinsic::vp_smin: |
2862 | case Intrinsic::umin: |
2863 | case Intrinsic::vp_umin: |
2864 | case Intrinsic::smax: |
2865 | case Intrinsic::vp_smax: |
2866 | case Intrinsic::umax: |
2867 | case Intrinsic::vp_umax: |
2868 | case Intrinsic::sadd_sat: |
2869 | case Intrinsic::vp_sadd_sat: |
2870 | case Intrinsic::uadd_sat: |
2871 | case Intrinsic::vp_uadd_sat: |
2872 | // These intrinsics have 'vr' versions. |
2873 | case Intrinsic::vp_sub: |
2874 | case Intrinsic::vp_fsub: |
2875 | case Intrinsic::vp_fdiv: |
2876 | return Operand == 0 || Operand == 1; |
2877 | default: |
2878 | return false; |
2879 | } |
2880 | } |
2881 | |
2882 | /// Check if sinking \p I's operands to I's basic block is profitable, because |
2883 | /// the operands can be folded into a target instruction, e.g. |
2884 | /// splats of scalars can fold into vector instructions. |
2885 | bool RISCVTTIImpl::isProfitableToSinkOperands( |
2886 | Instruction *I, SmallVectorImpl<Use *> &Ops) const { |
2887 | using namespace llvm::PatternMatch; |
2888 | |
2889 | if (I->isBitwiseLogicOp()) { |
2890 | if (!I->getType()->isVectorTy()) { |
2891 | if (ST->hasStdExtZbb() || ST->hasStdExtZbkb()) { |
2892 | for (auto &Op : I->operands()) { |
2893 | // (and/or/xor X, (not Y)) -> (andn/orn/xnor X, Y) |
2894 | if (match(V: Op.get(), P: m_Not(V: m_Value()))) { |
2895 | Ops.push_back(Elt: &Op); |
2896 | return true; |
2897 | } |
2898 | } |
2899 | } |
2900 | } else if (I->getOpcode() == Instruction::And && ST->hasStdExtZvkb()) { |
2901 | for (auto &Op : I->operands()) { |
2902 | // (and X, (not Y)) -> (vandn.vv X, Y) |
2903 | if (match(V: Op.get(), P: m_Not(V: m_Value()))) { |
2904 | Ops.push_back(Elt: &Op); |
2905 | return true; |
2906 | } |
2907 | // (and X, (splat (not Y))) -> (vandn.vx X, Y) |
2908 | if (match(V: Op.get(), P: m_Shuffle(v1: m_InsertElt(Val: m_Value(), Elt: m_Not(V: m_Value()), |
2909 | Idx: m_ZeroInt()), |
2910 | v2: m_Value(), mask: m_ZeroMask()))) { |
2911 | Use &InsertElt = cast<Instruction>(Val&: Op)->getOperandUse(i: 0); |
2912 | Use &Not = cast<Instruction>(Val&: InsertElt)->getOperandUse(i: 1); |
2913 | Ops.push_back(Elt: &Not); |
2914 | Ops.push_back(Elt: &InsertElt); |
2915 | Ops.push_back(Elt: &Op); |
2916 | return true; |
2917 | } |
2918 | } |
2919 | } |
2920 | } |
2921 | |
2922 | if (!I->getType()->isVectorTy() || !ST->hasVInstructions()) |
2923 | return false; |
2924 | |
2925 | // Don't sink splat operands if the target prefers it. Some targets requires |
2926 | // S2V transfer buffers and we can run out of them copying the same value |
2927 | // repeatedly. |
2928 | // FIXME: It could still be worth doing if it would improve vector register |
2929 | // pressure and prevent a vector spill. |
2930 | if (!ST->sinkSplatOperands()) |
2931 | return false; |
2932 | |
2933 | for (auto OpIdx : enumerate(First: I->operands())) { |
2934 | if (!canSplatOperand(I, Operand: OpIdx.index())) |
2935 | continue; |
2936 | |
2937 | Instruction *Op = dyn_cast<Instruction>(Val: OpIdx.value().get()); |
2938 | // Make sure we are not already sinking this operand |
2939 | if (!Op || any_of(Range&: Ops, P: [&](Use *U) { return U->get() == Op; })) |
2940 | continue; |
2941 | |
2942 | // We are looking for a splat/vp.splat that can be sunk. |
2943 | bool IsVPSplat = match(V: Op, P: m_Intrinsic<Intrinsic::experimental_vp_splat>( |
2944 | Op0: m_Value(), Op1: m_Value(), Op2: m_Value())); |
2945 | if (!IsVPSplat && |
2946 | !match(V: Op, P: m_Shuffle(v1: m_InsertElt(Val: m_Undef(), Elt: m_Value(), Idx: m_ZeroInt()), |
2947 | v2: m_Undef(), mask: m_ZeroMask()))) |
2948 | continue; |
2949 | |
2950 | // Don't sink i1 splats. |
2951 | if (cast<VectorType>(Val: Op->getType())->getElementType()->isIntegerTy(Bitwidth: 1)) |
2952 | continue; |
2953 | |
2954 | // All uses of the shuffle should be sunk to avoid duplicating it across gpr |
2955 | // and vector registers |
2956 | for (Use &U : Op->uses()) { |
2957 | Instruction *Insn = cast<Instruction>(Val: U.getUser()); |
2958 | if (!canSplatOperand(I: Insn, Operand: U.getOperandNo())) |
2959 | return false; |
2960 | } |
2961 | |
2962 | // Sink any fpexts since they might be used in a widening fp pattern. |
2963 | if (IsVPSplat) { |
2964 | if (isa<FPExtInst>(Val: Op->getOperand(i: 0))) |
2965 | Ops.push_back(Elt: &Op->getOperandUse(i: 0)); |
2966 | } else { |
2967 | Use *InsertEltUse = &Op->getOperandUse(i: 0); |
2968 | auto *InsertElt = cast<InsertElementInst>(Val: InsertEltUse); |
2969 | if (isa<FPExtInst>(Val: InsertElt->getOperand(i_nocapture: 1))) |
2970 | Ops.push_back(Elt: &InsertElt->getOperandUse(i: 1)); |
2971 | Ops.push_back(Elt: InsertEltUse); |
2972 | } |
2973 | Ops.push_back(Elt: &OpIdx.value()); |
2974 | } |
2975 | return true; |
2976 | } |
2977 | |
2978 | RISCVTTIImpl::TTI::MemCmpExpansionOptions |
2979 | RISCVTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { |
2980 | TTI::MemCmpExpansionOptions Options; |
2981 | // TODO: Enable expansion when unaligned access is not supported after we fix |
2982 | // issues in ExpandMemcmp. |
2983 | if (!ST->enableUnalignedScalarMem()) |
2984 | return Options; |
2985 | |
2986 | if (!ST->hasStdExtZbb() && !ST->hasStdExtZbkb() && !IsZeroCmp) |
2987 | return Options; |
2988 | |
2989 | Options.AllowOverlappingLoads = true; |
2990 | Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); |
2991 | Options.NumLoadsPerBlock = Options.MaxNumLoads; |
2992 | if (ST->is64Bit()) { |
2993 | Options.LoadSizes = {8, 4, 2, 1}; |
2994 | Options.AllowedTailExpansions = {3, 5, 6}; |
2995 | } else { |
2996 | Options.LoadSizes = {4, 2, 1}; |
2997 | Options.AllowedTailExpansions = {3}; |
2998 | } |
2999 | |
3000 | if (IsZeroCmp && ST->hasVInstructions()) { |
3001 | unsigned VLenB = ST->getRealMinVLen() / 8; |
3002 | // The minimum size should be `XLen / 8 + 1`, and the maxinum size should be |
3003 | // `VLenB * MaxLMUL` so that it fits in a single register group. |
3004 | unsigned MinSize = ST->getXLen() / 8 + 1; |
3005 | unsigned MaxSize = VLenB * ST->getMaxLMULForFixedLengthVectors(); |
3006 | for (unsigned Size = MinSize; Size <= MaxSize; Size++) |
3007 | Options.LoadSizes.insert(I: Options.LoadSizes.begin(), Elt: Size); |
3008 | } |
3009 | return Options; |
3010 | } |
3011 | |