1//===- RISCVTargetTransformInfo.h - RISC-V specific TTI ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file defines a TargetTransformInfoImplBase conforming object specific
10/// to the RISC-V target machine. It uses the target's detailed information to
11/// provide more precise answers to certain TTI queries, while letting the
12/// target independent and default TTI implementations handle the rest.
13///
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
17#define LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
18
19#include "RISCVSubtarget.h"
20#include "RISCVTargetMachine.h"
21#include "llvm/Analysis/TargetTransformInfo.h"
22#include "llvm/CodeGen/BasicTTIImpl.h"
23#include "llvm/IR/Function.h"
24#include <optional>
25
26namespace llvm {
27
28class RISCVTTIImpl final : public BasicTTIImplBase<RISCVTTIImpl> {
29 using BaseT = BasicTTIImplBase<RISCVTTIImpl>;
30 using TTI = TargetTransformInfo;
31
32 friend BaseT;
33
34 const RISCVSubtarget *ST;
35 const RISCVTargetLowering *TLI;
36
37 const RISCVSubtarget *getST() const { return ST; }
38 const RISCVTargetLowering *getTLI() const { return TLI; }
39
40 /// This function returns an estimate for VL to be used in VL based terms
41 /// of the cost model. For fixed length vectors, this is simply the
42 /// vector length. For scalable vectors, we return results consistent
43 /// with getVScaleForTuning under the assumption that clients are also
44 /// using that when comparing costs between scalar and vector representation.
45 /// This does unfortunately mean that we can both undershoot and overshot
46 /// the true cost significantly if getVScaleForTuning is wildly off for the
47 /// actual target hardware.
48 unsigned getEstimatedVLFor(VectorType *Ty) const;
49
50 /// This function calculates the costs for one or more RVV opcodes based
51 /// on the vtype and the cost kind.
52 /// \param Opcodes A list of opcodes of the RVV instruction to evaluate.
53 /// \param VT The MVT of vtype associated with the RVV instructions.
54 /// For widening/narrowing instructions where the result and source types
55 /// differ, it is important to check the spec to determine whether the vtype
56 /// refers to the result or source type.
57 /// \param CostKind The type of cost to compute.
58 InstructionCost getRISCVInstructionCost(ArrayRef<unsigned> OpCodes, MVT VT,
59 TTI::TargetCostKind CostKind) const;
60
61 // Return the cost of generating a PC relative address
62 InstructionCost
63 getStaticDataAddrGenerationCost(const TTI::TargetCostKind CostKind) const;
64
65 /// Return the cost of accessing a constant pool entry of the specified
66 /// type.
67 InstructionCost getConstantPoolLoadCost(Type *Ty,
68 TTI::TargetCostKind CostKind) const;
69
70 /// If this shuffle can be lowered as a masked slide pair (at worst),
71 /// return a cost for it.
72 InstructionCost getSlideCost(FixedVectorType *Tp, ArrayRef<int> Mask,
73 TTI::TargetCostKind CostKind) const;
74
75public:
76 explicit RISCVTTIImpl(const RISCVTargetMachine *TM, const Function &F)
77 : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),
78 TLI(ST->getTargetLowering()) {}
79
80 /// Return the cost of materializing an immediate for a value operand of
81 /// a store instruction.
82 InstructionCost getStoreImmCost(Type *VecTy, TTI::OperandValueInfo OpInfo,
83 TTI::TargetCostKind CostKind) const;
84
85 InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
86 TTI::TargetCostKind CostKind) const override;
87 InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
88 const APInt &Imm, Type *Ty,
89 TTI::TargetCostKind CostKind,
90 Instruction *Inst = nullptr) const override;
91 InstructionCost
92 getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
93 Type *Ty, TTI::TargetCostKind CostKind) const override;
94
95 /// \name EVL Support for predicated vectorization.
96 /// Whether the target supports the %evl parameter of VP intrinsic efficiently
97 /// in hardware. (see LLVM Language Reference - "Vector Predication
98 /// Intrinsics",
99 /// https://llvm.org/docs/LangRef.html#vector-predication-intrinsics and
100 /// "IR-level VP intrinsics",
101 /// https://llvm.org/docs/Proposals/VectorPredication.html#ir-level-vp-intrinsics).
102 bool hasActiveVectorLength() const override;
103
104 TargetTransformInfo::PopcntSupportKind
105 getPopcntSupport(unsigned TyWidth) const override;
106
107 InstructionCost getPartialReductionCost(
108 unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType,
109 ElementCount VF, TTI::PartialReductionExtendKind OpAExtend,
110 TTI::PartialReductionExtendKind OpBExtend, std::optional<unsigned> BinOp,
111 TTI::TargetCostKind CostKind,
112 std::optional<FastMathFlags> FMF) const override;
113
114 bool shouldExpandReduction(const IntrinsicInst *II) const override;
115 bool supportsScalableVectors() const override {
116 // VLEN=32 support is incomplete.
117 return ST->hasVInstructions() &&
118 (ST->getRealMinVLen() >= RISCV::RVVBitsPerBlock);
119 }
120 bool enableOrderedReductions() const override { return true; }
121 bool enableScalableVectorization() const override {
122 return ST->hasVInstructions();
123 }
124 bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override {
125 return ST->hasVInstructions();
126 }
127 TailFoldingStyle getPreferredTailFoldingStyle() const override {
128 return ST->hasVInstructions() ? TailFoldingStyle::DataWithEVL
129 : TailFoldingStyle::None;
130 }
131 std::optional<unsigned> getMaxVScale() const override;
132 std::optional<unsigned> getVScaleForTuning() const override;
133
134 TypeSize
135 getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override;
136
137 unsigned getRegUsageForType(Type *Ty) const override;
138
139 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const override;
140
141 bool preferAlternateOpcodeVectorization() const override;
142
143 bool preferEpilogueVectorization(ElementCount Iters) const override {
144 // Epilogue vectorization is usually unprofitable - tail folding or
145 // a smaller VF would have been better. This a blunt hammer - we
146 // should re-examine this once vectorization is better tuned.
147 return false;
148 }
149
150 bool shouldConsiderVectorizationRegPressure() const override { return true; }
151
152 InstructionCost
153 getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA,
154 TTI::TargetCostKind CostKind) const override;
155
156 InstructionCost getMaskedMemoryOpCost(const MemIntrinsicCostAttributes &MICA,
157 TTI::TargetCostKind CostKind) const;
158
159 InstructionCost
160 getPointersChainCost(ArrayRef<const Value *> Ptrs, const Value *Base,
161 const TTI::PointersChainInfo &Info, Type *AccessTy,
162 TTI::TargetCostKind CostKind) const override;
163
164 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
165 TTI::UnrollingPreferences &UP,
166 OptimizationRemarkEmitter *ORE) const override;
167
168 void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
169 TTI::PeelingPreferences &PP) const override;
170
171 bool getTgtMemIntrinsic(IntrinsicInst *Inst,
172 MemIntrinsicInfo &Info) const override;
173
174 unsigned getMinVectorRegisterBitWidth() const override {
175 return ST->useRVVForFixedLengthVectors() ? 16 : 0;
176 }
177
178 InstructionCost
179 getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy,
180 ArrayRef<int> Mask, TTI::TargetCostKind CostKind, int Index,
181 VectorType *SubTp, ArrayRef<const Value *> Args = {},
182 const Instruction *CxtI = nullptr) const override;
183
184 InstructionCost
185 getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts,
186 bool Insert, bool Extract,
187 TTI::TargetCostKind CostKind,
188 bool ForPoisonSrc = true, ArrayRef<Value *> VL = {},
189 TTI::VectorInstrContext VIC =
190 TTI::VectorInstrContext::None) const override;
191
192 InstructionCost
193 getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
194 TTI::TargetCostKind CostKind) const override;
195
196 InstructionCost
197 getAddressComputationCost(Type *PTy, ScalarEvolution *SE, const SCEV *Ptr,
198 TTI::TargetCostKind CostKind) const override;
199
200 InstructionCost getInterleavedMemoryOpCost(
201 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
202 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
203 bool UseMaskForCond = false, bool UseMaskForGaps = false) const override;
204
205 InstructionCost getGatherScatterOpCost(const MemIntrinsicCostAttributes &MICA,
206 TTI::TargetCostKind CostKind) const;
207
208 InstructionCost
209 getExpandCompressMemoryOpCost(const MemIntrinsicCostAttributes &MICA,
210 TTI::TargetCostKind CostKind) const;
211
212 InstructionCost getStridedMemoryOpCost(const MemIntrinsicCostAttributes &MICA,
213 TTI::TargetCostKind CostKind) const;
214
215 InstructionCost
216 getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const override;
217
218 InstructionCost
219 getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
220 TTI::CastContextHint CCH, TTI::TargetCostKind CostKind,
221 const Instruction *I = nullptr) const override;
222
223 InstructionCost
224 getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF,
225 TTI::TargetCostKind CostKind) const override;
226
227 InstructionCost
228 getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
229 std::optional<FastMathFlags> FMF,
230 TTI::TargetCostKind CostKind) const override;
231
232 InstructionCost
233 getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy,
234 VectorType *ValTy, std::optional<FastMathFlags> FMF,
235 TTI::TargetCostKind CostKind) const override;
236
237 InstructionCost getMemoryOpCost(
238 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
239 TTI::TargetCostKind CostKind,
240 TTI::OperandValueInfo OpdInfo = {.Kind: TTI::OK_AnyValue, .Properties: TTI::OP_None},
241 const Instruction *I = nullptr) const override;
242
243 InstructionCost getCmpSelInstrCost(
244 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
245 TTI::TargetCostKind CostKind,
246 TTI::OperandValueInfo Op1Info = {.Kind: TTI::OK_AnyValue, .Properties: TTI::OP_None},
247 TTI::OperandValueInfo Op2Info = {.Kind: TTI::OK_AnyValue, .Properties: TTI::OP_None},
248 const Instruction *I = nullptr) const override;
249
250 InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
251 const Instruction *I = nullptr) const override;
252
253 using BaseT::getVectorInstrCost;
254 InstructionCost
255 getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind,
256 unsigned Index, const Value *Op0, const Value *Op1,
257 TTI::VectorInstrContext VIC =
258 TTI::VectorInstrContext::None) const override;
259
260 InstructionCost
261 getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val,
262 TTI::TargetCostKind CostKind,
263 unsigned Index) const override;
264
265 InstructionCost getArithmeticInstrCost(
266 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
267 TTI::OperandValueInfo Op1Info = {.Kind: TTI::OK_AnyValue, .Properties: TTI::OP_None},
268 TTI::OperandValueInfo Op2Info = {.Kind: TTI::OK_AnyValue, .Properties: TTI::OP_None},
269 ArrayRef<const Value *> Args = {},
270 const Instruction *CxtI = nullptr) const override;
271
272 bool isElementTypeLegalForScalableVector(Type *Ty) const override {
273 return TLI->isLegalElementTypeForRVV(ScalarTy: TLI->getValueType(DL, Ty));
274 }
275
276 bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) const {
277 if (!ST->hasVInstructions())
278 return false;
279
280 EVT DataTypeVT = TLI->getValueType(DL, Ty: DataType);
281
282 // Only support fixed vectors if we know the minimum vector size.
283 if (DataTypeVT.isFixedLengthVector() && !ST->useRVVForFixedLengthVectors())
284 return false;
285
286 EVT ElemType = DataTypeVT.getScalarType();
287 if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize())
288 return false;
289
290 return TLI->isLegalElementTypeForRVV(ScalarTy: ElemType);
291 }
292
293 bool isLegalMaskedLoad(Type *DataType, Align Alignment,
294 unsigned /*AddressSpace*/,
295 TTI::MaskKind /*MaskKind*/) const override {
296 return isLegalMaskedLoadStore(DataType, Alignment);
297 }
298 bool isLegalMaskedStore(Type *DataType, Align Alignment,
299 unsigned /*AddressSpace*/,
300 TTI::MaskKind /*MaskKind*/) const override {
301 return isLegalMaskedLoadStore(DataType, Alignment);
302 }
303
304 bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment) const {
305 if (!ST->hasVInstructions())
306 return false;
307
308 EVT DataTypeVT = TLI->getValueType(DL, Ty: DataType);
309
310 // Only support fixed vectors if we know the minimum vector size.
311 if (DataTypeVT.isFixedLengthVector() && !ST->useRVVForFixedLengthVectors())
312 return false;
313
314 // We also need to check if the vector of address is valid.
315 EVT PointerTypeVT = EVT(TLI->getPointerTy(DL));
316 if (DataTypeVT.isScalableVector() &&
317 !TLI->isLegalElementTypeForRVV(ScalarTy: PointerTypeVT))
318 return false;
319
320 EVT ElemType = DataTypeVT.getScalarType();
321 if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize())
322 return false;
323
324 return TLI->isLegalElementTypeForRVV(ScalarTy: ElemType);
325 }
326
327 bool isLegalMaskedGather(Type *DataType, Align Alignment) const override {
328 return isLegalMaskedGatherScatter(DataType, Alignment);
329 }
330 bool isLegalMaskedScatter(Type *DataType, Align Alignment) const override {
331 return isLegalMaskedGatherScatter(DataType, Alignment);
332 }
333
334 bool forceScalarizeMaskedGather(VectorType *VTy,
335 Align Alignment) const override {
336 // Scalarize masked gather for RV64 if EEW=64 indices aren't supported.
337 return ST->is64Bit() && !ST->hasVInstructionsI64();
338 }
339
340 bool forceScalarizeMaskedScatter(VectorType *VTy,
341 Align Alignment) const override {
342 // Scalarize masked scatter for RV64 if EEW=64 indices aren't supported.
343 return ST->is64Bit() && !ST->hasVInstructionsI64();
344 }
345
346 bool isLegalStridedLoadStore(Type *DataType, Align Alignment) const override {
347 EVT DataTypeVT = TLI->getValueType(DL, Ty: DataType);
348 return TLI->isLegalStridedLoadStore(DataType: DataTypeVT, Alignment);
349 }
350
351 bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor,
352 Align Alignment,
353 unsigned AddrSpace) const override {
354 return TLI->isLegalInterleavedAccessType(VTy, Factor, Alignment, AddrSpace,
355 DL);
356 }
357
358 bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const override;
359
360 bool isLegalMaskedCompressStore(Type *DataTy, Align Alignment) const override;
361
362 /// \returns How the target needs this vector-predicated operation to be
363 /// transformed.
364 TargetTransformInfo::VPLegalization
365 getVPLegalizationStrategy(const VPIntrinsic &PI) const override {
366 using VPLegalization = TargetTransformInfo::VPLegalization;
367 static const Intrinsic::ID Supported[] = {
368 Intrinsic::experimental_vp_strided_load,
369 Intrinsic::experimental_vp_strided_store,
370 Intrinsic::experimental_vp_reverse,
371 Intrinsic::experimental_vp_splice,
372 Intrinsic::vp_abs,
373 Intrinsic::vp_add,
374 Intrinsic::vp_and,
375 Intrinsic::vp_ashr,
376 Intrinsic::vp_bitreverse,
377 Intrinsic::vp_bswap,
378 Intrinsic::vp_copysign,
379 Intrinsic::vp_cttz_elts,
380 Intrinsic::vp_fabs,
381 Intrinsic::vp_fadd,
382 Intrinsic::vp_fcmp,
383 Intrinsic::vp_fdiv,
384 Intrinsic::vp_fma,
385 Intrinsic::vp_fmul,
386 Intrinsic::vp_fmuladd,
387 Intrinsic::vp_fneg,
388 Intrinsic::vp_fpext,
389 Intrinsic::vp_fptosi,
390 Intrinsic::vp_fptoui,
391 Intrinsic::vp_fptrunc,
392 Intrinsic::vp_frem,
393 Intrinsic::vp_fshl,
394 Intrinsic::vp_fshr,
395 Intrinsic::vp_fsub,
396 Intrinsic::vp_gather,
397 Intrinsic::vp_icmp,
398 Intrinsic::vp_inttoptr,
399 Intrinsic::vp_is_fpclass,
400 Intrinsic::vp_load,
401 Intrinsic::vp_load_ff,
402 Intrinsic::vp_lshr,
403 Intrinsic::vp_merge,
404 Intrinsic::vp_mul,
405 Intrinsic::vp_or,
406 Intrinsic::vp_ptrtoint,
407 Intrinsic::vp_reduce_add,
408 Intrinsic::vp_reduce_and,
409 Intrinsic::vp_reduce_fadd,
410 Intrinsic::vp_reduce_fmax,
411 Intrinsic::vp_reduce_fmaximum,
412 Intrinsic::vp_reduce_fmin,
413 Intrinsic::vp_reduce_fminimum,
414 Intrinsic::vp_reduce_fmul,
415 Intrinsic::vp_reduce_mul,
416 Intrinsic::vp_reduce_or,
417 Intrinsic::vp_reduce_smax,
418 Intrinsic::vp_reduce_smin,
419 Intrinsic::vp_reduce_umax,
420 Intrinsic::vp_reduce_umin,
421 Intrinsic::vp_reduce_xor,
422 Intrinsic::vp_sadd_sat,
423 Intrinsic::vp_scatter,
424 Intrinsic::vp_sdiv,
425 Intrinsic::vp_select,
426 Intrinsic::vp_sext,
427 Intrinsic::vp_shl,
428 Intrinsic::vp_sitofp,
429 Intrinsic::vp_smax,
430 Intrinsic::vp_smin,
431 Intrinsic::vp_sqrt,
432 Intrinsic::vp_srem,
433 Intrinsic::vp_ssub_sat,
434 Intrinsic::vp_store,
435 Intrinsic::vp_sub,
436 Intrinsic::vp_trunc,
437 Intrinsic::vp_uadd_sat,
438 Intrinsic::vp_udiv,
439 Intrinsic::vp_uitofp,
440 Intrinsic::vp_umax,
441 Intrinsic::vp_umin,
442 Intrinsic::vp_urem,
443 Intrinsic::vp_usub_sat,
444 Intrinsic::vp_xor,
445 Intrinsic::vp_zext};
446 if (!ST->hasVInstructions() ||
447 (PI.getIntrinsicID() == Intrinsic::vp_reduce_mul &&
448 cast<VectorType>(Val: PI.getArgOperand(i: 1)->getType())
449 ->getElementType()
450 ->getIntegerBitWidth() != 1) ||
451 !is_contained(Range: Supported, Element: PI.getIntrinsicID()))
452 return VPLegalization(VPLegalization::Discard, VPLegalization::Convert);
453 return VPLegalization(VPLegalization::Legal, VPLegalization::Legal);
454 }
455
456 bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc,
457 ElementCount VF) const override {
458 if (!VF.isScalable())
459 return true;
460
461 Type *Ty = RdxDesc.getRecurrenceType();
462 if (!TLI->isLegalElementTypeForRVV(ScalarTy: TLI->getValueType(DL, Ty)))
463 return false;
464
465 switch (RdxDesc.getRecurrenceKind()) {
466 case RecurKind::Add:
467 case RecurKind::Sub:
468 case RecurKind::AddChainWithSubs:
469 case RecurKind::And:
470 case RecurKind::Or:
471 case RecurKind::Xor:
472 case RecurKind::SMin:
473 case RecurKind::SMax:
474 case RecurKind::UMin:
475 case RecurKind::UMax:
476 case RecurKind::FMin:
477 case RecurKind::FMax:
478 return true;
479 case RecurKind::AnyOf:
480 case RecurKind::FAdd:
481 case RecurKind::FMulAdd:
482 // We can't promote f16/bf16 fadd reductions and scalable vectors can't be
483 // expanded.
484 if (Ty->isBFloatTy() || (Ty->isHalfTy() && !ST->hasVInstructionsF16()))
485 return false;
486 return true;
487 default:
488 return false;
489 }
490 }
491
492 unsigned getMaxInterleaveFactor(ElementCount VF) const override {
493 // Don't interleave if the loop has been vectorized with scalable vectors.
494 if (VF.isScalable())
495 return 1;
496 // If the loop will not be vectorized, don't interleave the loop.
497 // Let regular unroll to unroll the loop.
498 return VF.isScalar() ? 1 : ST->getMaxInterleaveFactor();
499 }
500
501 bool enableInterleavedAccessVectorization() const override { return true; }
502
503 bool enableMaskedInterleavedAccessVectorization() const override {
504 return ST->hasVInstructions();
505 }
506
507 unsigned getMinTripCountTailFoldingThreshold() const override;
508
509 enum RISCVRegisterClass { GPRRC, FPRRC, VRRC };
510 unsigned getNumberOfRegisters(unsigned ClassID) const override {
511 switch (ClassID) {
512 case RISCVRegisterClass::GPRRC:
513 // 31 = 32 GPR - x0 (zero register)
514 // FIXME: Should we exclude fixed registers like SP, TP or GP?
515 return 31;
516 case RISCVRegisterClass::FPRRC:
517 if (ST->hasStdExtF())
518 return 32;
519 return 0;
520 case RISCVRegisterClass::VRRC:
521 // Although there are 32 vector registers, v0 is special in that it is the
522 // only register that can be used to hold a mask.
523 // FIXME: Should we conservatively return 31 as the number of usable
524 // vector registers?
525 return ST->hasVInstructions() ? 32 : 0;
526 }
527 llvm_unreachable("unknown register class");
528 }
529
530 TTI::AddressingModeKind
531 getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const override;
532
533 unsigned getRegisterClassForType(bool Vector,
534 Type *Ty = nullptr) const override {
535 if (Vector)
536 return RISCVRegisterClass::VRRC;
537 if (!Ty)
538 return RISCVRegisterClass::GPRRC;
539
540 Type *ScalarTy = Ty->getScalarType();
541 if ((ScalarTy->isHalfTy() && ST->hasStdExtZfhmin()) ||
542 (ScalarTy->isFloatTy() && ST->hasStdExtF()) ||
543 (ScalarTy->isDoubleTy() && ST->hasStdExtD())) {
544 return RISCVRegisterClass::FPRRC;
545 }
546
547 return RISCVRegisterClass::GPRRC;
548 }
549
550 const char *getRegisterClassName(unsigned ClassID) const override {
551 switch (ClassID) {
552 case RISCVRegisterClass::GPRRC:
553 return "RISCV::GPRRC";
554 case RISCVRegisterClass::FPRRC:
555 return "RISCV::FPRRC";
556 case RISCVRegisterClass::VRRC:
557 return "RISCV::VRRC";
558 }
559 llvm_unreachable("unknown register class");
560 }
561
562 bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
563 const TargetTransformInfo::LSRCost &C2) const override;
564
565 bool shouldConsiderAddressTypePromotion(
566 const Instruction &I,
567 bool &AllowPromotionWithoutCommonHeader) const override;
568 std::optional<unsigned> getMinPageSize() const override { return 4096; }
569 /// Return true if the (vector) instruction I will be lowered to an
570 /// instruction with a scalar splat operand for the given Operand number.
571 bool canSplatOperand(Instruction *I, int Operand) const;
572 /// Return true if a vector instruction will lower to a target instruction
573 /// able to splat the given operand.
574 bool canSplatOperand(unsigned Opcode, int Operand) const;
575
576 bool isProfitableToSinkOperands(Instruction *I,
577 SmallVectorImpl<Use *> &Ops) const override;
578
579 TTI::MemCmpExpansionOptions
580 enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const override;
581
582 bool enableSelectOptimize() const override {
583 return ST->enableSelectOptimize();
584 }
585
586 bool shouldTreatInstructionLikeSelect(const Instruction *I) const override;
587
588 bool
589 shouldCopyAttributeWhenOutliningFrom(const Function *Caller,
590 const Attribute &Attr) const override;
591
592 std::optional<Instruction *>
593 instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override;
594};
595
596} // end namespace llvm
597
598#endif // LLVM_LIB_TARGET_RISCV_RISCVTARGETTRANSFORMINFO_H
599