1//===-- RISCVISelLowering.h - RISC-V DAG Lowering Interface -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that RISC-V uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
15#define LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
16
17#include "RISCV.h"
18#include "RISCVCallingConv.h"
19#include "llvm/CodeGen/CallingConvLower.h"
20#include "llvm/CodeGen/SelectionDAG.h"
21#include "llvm/CodeGen/TargetLowering.h"
22#include <optional>
23
24namespace llvm {
25class InstructionCost;
26class RISCVSubtarget;
27struct RISCVRegisterInfo;
28
29class RISCVTargetLowering : public TargetLowering {
30 const RISCVSubtarget &Subtarget;
31
32public:
33 explicit RISCVTargetLowering(const TargetMachine &TM,
34 const RISCVSubtarget &STI);
35
36 const RISCVSubtarget &getSubtarget() const { return Subtarget; }
37
38 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
39 MachineFunction &MF,
40 unsigned Intrinsic) const override;
41 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
42 unsigned AS,
43 Instruction *I = nullptr) const override;
44 bool isLegalICmpImmediate(int64_t Imm) const override;
45 bool isLegalAddImmediate(int64_t Imm) const override;
46 bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
47 bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
48 bool isTruncateFree(SDValue Val, EVT VT2) const override;
49 bool isZExtFree(SDValue Val, EVT VT2) const override;
50 bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override;
51 bool signExtendConstant(const ConstantInt *CI) const override;
52 bool isCheapToSpeculateCttz(Type *Ty) const override;
53 bool isCheapToSpeculateCtlz(Type *Ty) const override;
54 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
55 bool hasAndNotCompare(SDValue Y) const override;
56 bool hasAndNot(SDValue Y) const override;
57 bool hasBitTest(SDValue X, SDValue Y) const override;
58 bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
59 SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
60 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
61 SelectionDAG &DAG) const override;
62 bool shouldScalarizeBinop(SDValue VecOp) const override;
63 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
64 int getLegalZfaFPImm(const APFloat &Imm, EVT VT) const;
65 bool isFPImmLegal(const APFloat &Imm, EVT VT,
66 bool ForCodeSize) const override;
67 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
68 unsigned Index) const override;
69
70 bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
71
72 bool preferScalarizeSplat(SDNode *N) const override;
73
74 bool softPromoteHalfType() const override { return true; }
75
76 /// Return the register type for a given MVT, ensuring vectors are treated
77 /// as a series of gpr sized integers.
78 MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
79 EVT VT) const override;
80
81 /// Return the number of registers for a given MVT, for inline assembly
82 unsigned
83 getNumRegisters(LLVMContext &Context, EVT VT,
84 std::optional<MVT> RegisterVT = std::nullopt) const override;
85
86 /// Return the number of registers for a given MVT, ensuring vectors are
87 /// treated as a series of gpr sized integers.
88 unsigned getNumRegistersForCallingConv(LLVMContext &Context,
89 CallingConv::ID CC,
90 EVT VT) const override;
91
92 unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context,
93 CallingConv::ID CC, EVT VT,
94 EVT &IntermediateVT,
95 unsigned &NumIntermediates,
96 MVT &RegisterVT) const override;
97
98 bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT,
99 unsigned SelectOpcode, SDValue X,
100 SDValue Y) const override;
101
102 /// Return true if the given shuffle mask can be codegen'd directly, or if it
103 /// should be stack expanded.
104 bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
105
106 bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const override {
107 // If the pair to store is a mixture of float and int values, we will
108 // save two bitwise instructions and one float-to-int instruction and
109 // increase one store instruction. There is potentially a more
110 // significant benefit because it avoids the float->int domain switch
111 // for input value. So It is more likely a win.
112 if ((LTy.isFloatingPoint() && HTy.isInteger()) ||
113 (LTy.isInteger() && HTy.isFloatingPoint()))
114 return true;
115 // If the pair only contains int values, we will save two bitwise
116 // instructions and increase one store instruction (costing one more
117 // store buffer). Since the benefit is more blurred we leave such a pair
118 // out until we get testcase to prove it is a win.
119 return false;
120 }
121
122 bool
123 shouldExpandBuildVectorWithShuffles(EVT VT,
124 unsigned DefinedValues) const override;
125
126 bool shouldExpandCttzElements(EVT VT) const override;
127
128 /// Return the cost of LMUL for linear operations.
129 InstructionCost getLMULCost(MVT VT) const;
130
131 InstructionCost getVRGatherVVCost(MVT VT) const;
132 InstructionCost getVRGatherVICost(MVT VT) const;
133 InstructionCost getVSlideVXCost(MVT VT) const;
134 InstructionCost getVSlideVICost(MVT VT) const;
135
136 // Provide custom lowering hooks for some operations.
137 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
138 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
139 SelectionDAG &DAG) const override;
140
141 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
142
143 bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
144 const APInt &DemandedElts,
145 TargetLoweringOpt &TLO) const override;
146
147 void computeKnownBitsForTargetNode(const SDValue Op,
148 KnownBits &Known,
149 const APInt &DemandedElts,
150 const SelectionDAG &DAG,
151 unsigned Depth) const override;
152 unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
153 const APInt &DemandedElts,
154 const SelectionDAG &DAG,
155 unsigned Depth) const override;
156
157 bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits,
158 const APInt &DemandedElts,
159 KnownBits &Known,
160 TargetLoweringOpt &TLO,
161 unsigned Depth) const override;
162
163 bool canCreateUndefOrPoisonForTargetNode(SDValue Op,
164 const APInt &DemandedElts,
165 const SelectionDAG &DAG,
166 bool PoisonOnly, bool ConsiderFlags,
167 unsigned Depth) const override;
168
169 const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const override;
170
171 MachineMemOperand::Flags
172 getTargetMMOFlags(const Instruction &I) const override;
173
174 MachineMemOperand::Flags
175 getTargetMMOFlags(const MemSDNode &Node) const override;
176
177 bool
178 areTwoSDNodeTargetMMOFlagsMergeable(const MemSDNode &NodeX,
179 const MemSDNode &NodeY) const override;
180
181 ConstraintType getConstraintType(StringRef Constraint) const override;
182
183 InlineAsm::ConstraintCode
184 getInlineAsmMemConstraint(StringRef ConstraintCode) const override;
185
186 std::pair<unsigned, const TargetRegisterClass *>
187 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
188 StringRef Constraint, MVT VT) const override;
189
190 void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint,
191 std::vector<SDValue> &Ops,
192 SelectionDAG &DAG) const override;
193
194 MachineBasicBlock *
195 EmitInstrWithCustomInserter(MachineInstr &MI,
196 MachineBasicBlock *BB) const override;
197
198 void AdjustInstrPostInstrSelection(MachineInstr &MI,
199 SDNode *Node) const override;
200
201 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
202 EVT VT) const override;
203
204 bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
205 bool MathUsed) const override {
206 if (VT == MVT::i8 || VT == MVT::i16)
207 return false;
208
209 return TargetLowering::shouldFormOverflowOp(Opcode, VT, MathUsed);
210 }
211
212 bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT, unsigned NumElem,
213 unsigned AddrSpace) const override {
214 // If we can replace 4 or more scalar stores, there will be a reduction
215 // in instructions even after we add a vector constant load.
216 return NumElem >= 4;
217 }
218
219 bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
220 return VT.isScalarInteger();
221 }
222 bool convertSelectOfConstantsToMath(EVT VT) const override { return true; }
223
224 bool isCtpopFast(EVT VT) const override;
225
226 unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const override;
227
228 bool preferZeroCompareBranch() const override { return true; }
229
230 // Note that one specific case requires fence insertion for an
231 // AtomicCmpXchgInst but is handled via the RISCVZacasABIFix pass rather
232 // than this hook due to limitations in the interface here.
233 bool shouldInsertFencesForAtomic(const Instruction *I) const override;
234
235 Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
236 AtomicOrdering Ord) const override;
237 Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
238 AtomicOrdering Ord) const override;
239
240 bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
241 EVT VT) const override;
242
243 ISD::NodeType getExtendForAtomicOps() const override {
244 return ISD::SIGN_EXTEND;
245 }
246
247 ISD::NodeType getExtendForAtomicCmpSwapArg() const override;
248
249 bool shouldTransformSignedTruncationCheck(EVT XVT,
250 unsigned KeptBits) const override;
251
252 TargetLowering::ShiftLegalizationStrategy
253 preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N,
254 unsigned ExpansionFactor) const override {
255 if (DAG.getMachineFunction().getFunction().hasMinSize())
256 return ShiftLegalizationStrategy::LowerToLibcall;
257 return TargetLowering::preferredShiftLegalizationStrategy(DAG, N,
258 ExpansionFactor);
259 }
260
261 bool isDesirableToCommuteWithShift(const SDNode *N,
262 CombineLevel Level) const override;
263
264 /// If a physical register, this returns the register that receives the
265 /// exception address on entry to an EH pad.
266 Register
267 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
268
269 /// If a physical register, this returns the register that receives the
270 /// exception typeid on entry to a landing pad.
271 Register
272 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
273
274 bool shouldExtendTypeInLibCall(EVT Type) const override;
275 bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const override;
276
277 /// Returns the register with the specified architectural or ABI name. This
278 /// method is necessary to lower the llvm.read_register.* and
279 /// llvm.write_register.* intrinsics. Allocatable registers must be reserved
280 /// with the clang -ffixed-xX flag for access to be allowed.
281 Register getRegisterByName(const char *RegName, LLT VT,
282 const MachineFunction &MF) const override;
283
284 // Lower incoming arguments, copy physregs into vregs
285 SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
286 bool IsVarArg,
287 const SmallVectorImpl<ISD::InputArg> &Ins,
288 const SDLoc &DL, SelectionDAG &DAG,
289 SmallVectorImpl<SDValue> &InVals) const override;
290 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
291 bool IsVarArg,
292 const SmallVectorImpl<ISD::OutputArg> &Outs,
293 LLVMContext &Context, const Type *RetTy) const override;
294 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
295 const SmallVectorImpl<ISD::OutputArg> &Outs,
296 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
297 SelectionDAG &DAG) const override;
298 SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
299 SmallVectorImpl<SDValue> &InVals) const override;
300
301 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
302 Type *Ty) const override;
303 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
304 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
305 bool shouldConsiderGEPOffsetSplit() const override { return true; }
306
307 bool decomposeMulByConstant(LLVMContext &Context, EVT VT,
308 SDValue C) const override;
309
310 bool isMulAddWithConstProfitable(SDValue AddNode,
311 SDValue ConstNode) const override;
312
313 TargetLowering::AtomicExpansionKind
314 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
315 Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI,
316 Value *AlignedAddr, Value *Incr,
317 Value *Mask, Value *ShiftAmt,
318 AtomicOrdering Ord) const override;
319 TargetLowering::AtomicExpansionKind
320 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override;
321 Value *emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder,
322 AtomicCmpXchgInst *CI,
323 Value *AlignedAddr, Value *CmpVal,
324 Value *NewVal, Value *Mask,
325 AtomicOrdering Ord) const override;
326
327 /// Returns true if the target allows unaligned memory accesses of the
328 /// specified type.
329 bool allowsMisalignedMemoryAccesses(
330 EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1),
331 MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
332 unsigned *Fast = nullptr) const override;
333
334 EVT getOptimalMemOpType(const MemOp &Op,
335 const AttributeList &FuncAttributes) const override;
336
337 bool splitValueIntoRegisterParts(
338 SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
339 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC)
340 const override;
341
342 SDValue joinRegisterPartsIntoValue(
343 SelectionDAG & DAG, const SDLoc &DL, const SDValue *Parts,
344 unsigned NumParts, MVT PartVT, EVT ValueVT,
345 std::optional<CallingConv::ID> CC) const override;
346
347 // Return the value of VLMax for the given vector type (i.e. SEW and LMUL)
348 SDValue computeVLMax(MVT VecVT, const SDLoc &DL, SelectionDAG &DAG) const;
349
350 static RISCVVType::VLMUL getLMUL(MVT VT);
351 inline static unsigned computeVLMAX(unsigned VectorBits, unsigned EltSize,
352 unsigned MinSize) {
353 // Original equation:
354 // VLMAX = (VectorBits / EltSize) * LMUL
355 // where LMUL = MinSize / RISCV::RVVBitsPerBlock
356 // The following equations have been reordered to prevent loss of precision
357 // when calculating fractional LMUL.
358 return ((VectorBits / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
359 }
360
361 // Return inclusive (low, high) bounds on the value of VLMAX for the
362 // given scalable container type given known bounds on VLEN.
363 static std::pair<unsigned, unsigned>
364 computeVLMAXBounds(MVT ContainerVT, const RISCVSubtarget &Subtarget);
365
366 /// Given a vector (either fixed or scalable), return the scalable vector
367 /// corresponding to a vector register (i.e. an m1 register group).
368 static MVT getM1VT(MVT VT) {
369 unsigned EltSizeInBits = VT.getVectorElementType().getSizeInBits();
370 assert(EltSizeInBits <= RISCV::RVVBitsPerBlock && "Unexpected vector MVT");
371 return MVT::getScalableVectorVT(VT: VT.getVectorElementType(),
372 NumElements: RISCV::RVVBitsPerBlock / EltSizeInBits);
373 }
374
375 static unsigned getRegClassIDForLMUL(RISCVVType::VLMUL LMul);
376 static unsigned getSubregIndexByMVT(MVT VT, unsigned Index);
377 static unsigned getRegClassIDForVecVT(MVT VT);
378 static std::pair<unsigned, unsigned>
379 decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT,
380 unsigned InsertExtractIdx,
381 const RISCVRegisterInfo *TRI);
382 MVT getContainerForFixedLengthVector(MVT VT) const;
383
384 bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const override;
385
386 bool isLegalElementTypeForRVV(EVT ScalarTy) const;
387
388 bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
389
390 unsigned getJumpTableEncoding() const override;
391
392 const MCExpr *LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
393 const MachineBasicBlock *MBB,
394 unsigned uid,
395 MCContext &Ctx) const override;
396
397 bool isVScaleKnownToBeAPowerOfTwo() const override;
398
399 bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
400 ISD::MemIndexedMode &AM, SelectionDAG &DAG) const;
401 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
402 ISD::MemIndexedMode &AM,
403 SelectionDAG &DAG) const override;
404 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
405 SDValue &Offset, ISD::MemIndexedMode &AM,
406 SelectionDAG &DAG) const override;
407
408 bool isLegalScaleForGatherScatter(uint64_t Scale,
409 uint64_t ElemSize) const override {
410 // Scaled addressing not supported on indexed load/stores
411 return Scale == 1;
412 }
413
414 /// If the target has a standard location for the stack protector cookie,
415 /// returns the address of that location. Otherwise, returns nullptr.
416 Value *getIRStackGuard(IRBuilderBase &IRB) const override;
417
418 /// Returns whether or not generating a interleaved load/store intrinsic for
419 /// this type will be legal.
420 bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor,
421 Align Alignment, unsigned AddrSpace,
422 const DataLayout &) const;
423
424 /// Return true if a stride load store of the given result type and
425 /// alignment is legal.
426 bool isLegalStridedLoadStore(EVT DataType, Align Alignment) const;
427
428 unsigned getMaxSupportedInterleaveFactor() const override { return 8; }
429
430 bool fallBackToDAGISel(const Instruction &Inst) const override;
431
432 bool lowerInterleavedLoad(LoadInst *LI,
433 ArrayRef<ShuffleVectorInst *> Shuffles,
434 ArrayRef<unsigned> Indices,
435 unsigned Factor) const override;
436
437 bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
438 unsigned Factor) const override;
439
440 bool lowerDeinterleaveIntrinsicToLoad(
441 LoadInst *LI, ArrayRef<Value *> DeinterleaveValues) const override;
442
443 bool lowerInterleaveIntrinsicToStore(
444 StoreInst *SI, ArrayRef<Value *> InterleaveValues) const override;
445
446 bool lowerInterleavedVPLoad(VPIntrinsic *Load, Value *Mask,
447 ArrayRef<Value *> DeinterleaveRes) const override;
448
449 bool lowerInterleavedVPStore(VPIntrinsic *Store, Value *Mask,
450 ArrayRef<Value *> InterleaveOps) const override;
451
452 bool supportKCFIBundles() const override { return true; }
453
454 SDValue expandIndirectJTBranch(const SDLoc &dl, SDValue Value, SDValue Addr,
455 int JTI, SelectionDAG &DAG) const override;
456
457 MachineInstr *EmitKCFICheck(MachineBasicBlock &MBB,
458 MachineBasicBlock::instr_iterator &MBBI,
459 const TargetInstrInfo *TII) const override;
460
461 /// True if stack clash protection is enabled for this functions.
462 bool hasInlineStackProbe(const MachineFunction &MF) const override;
463
464 unsigned getStackProbeSize(const MachineFunction &MF, Align StackAlign) const;
465
466 MachineBasicBlock *emitDynamicProbedAlloc(MachineInstr &MI,
467 MachineBasicBlock *MBB) const;
468
469 ArrayRef<MCPhysReg> getRoundingControlRegisters() const override;
470
471private:
472 void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo,
473 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
474 RISCVCCAssignFn Fn) const;
475 void analyzeOutputArgs(MachineFunction &MF, CCState &CCInfo,
476 const SmallVectorImpl<ISD::OutputArg> &Outs,
477 bool IsRet, CallLoweringInfo *CLI,
478 RISCVCCAssignFn Fn) const;
479
480 template <class NodeTy>
481 SDValue getAddr(NodeTy *N, SelectionDAG &DAG, bool IsLocal = true,
482 bool IsExternWeak = false) const;
483 SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
484 bool UseGOT) const;
485 SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG) const;
486 SDValue getTLSDescAddr(GlobalAddressSDNode *N, SelectionDAG &DAG) const;
487
488 SDValue lowerConstantFP(SDValue Op, SelectionDAG &DAG) const;
489 SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
490 SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
491 SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
492 SDValue lowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
493 SDValue lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
494 SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const;
495 SDValue lowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
496 SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;
497 SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
498 SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
499 SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
500 SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const;
501 SDValue lowerSPLAT_VECTOR_PARTS(SDValue Op, SelectionDAG &DAG) const;
502 SDValue lowerVectorMaskSplat(SDValue Op, SelectionDAG &DAG) const;
503 SDValue lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
504 int64_t ExtTrueVal) const;
505 SDValue lowerVectorMaskTruncLike(SDValue Op, SelectionDAG &DAG) const;
506 SDValue lowerVectorTruncLike(SDValue Op, SelectionDAG &DAG) const;
507 SDValue lowerVectorFPExtendOrRoundLike(SDValue Op, SelectionDAG &DAG) const;
508 SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
509 SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
510 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
511 SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
512 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
513 SDValue lowerVPREDUCE(SDValue Op, SelectionDAG &DAG) const;
514 SDValue lowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
515 SDValue lowerVectorMaskVecReduction(SDValue Op, SelectionDAG &DAG,
516 bool IsVP) const;
517 SDValue lowerFPVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
518 SDValue lowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
519 SDValue lowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
520 SDValue lowerVECTOR_DEINTERLEAVE(SDValue Op, SelectionDAG &DAG) const;
521 SDValue lowerVECTOR_INTERLEAVE(SDValue Op, SelectionDAG &DAG) const;
522 SDValue lowerSTEP_VECTOR(SDValue Op, SelectionDAG &DAG) const;
523 SDValue lowerVECTOR_REVERSE(SDValue Op, SelectionDAG &DAG) const;
524 SDValue lowerVECTOR_SPLICE(SDValue Op, SelectionDAG &DAG) const;
525 SDValue lowerABS(SDValue Op, SelectionDAG &DAG) const;
526 SDValue lowerMaskedLoad(SDValue Op, SelectionDAG &DAG) const;
527 SDValue lowerMaskedStore(SDValue Op, SelectionDAG &DAG) const;
528 SDValue lowerVectorCompress(SDValue Op, SelectionDAG &DAG) const;
529 SDValue lowerFixedLengthVectorFCOPYSIGNToRVV(SDValue Op,
530 SelectionDAG &DAG) const;
531 SDValue lowerMaskedGather(SDValue Op, SelectionDAG &DAG) const;
532 SDValue lowerMaskedScatter(SDValue Op, SelectionDAG &DAG) const;
533 SDValue lowerFixedLengthVectorLoadToRVV(SDValue Op, SelectionDAG &DAG) const;
534 SDValue lowerFixedLengthVectorStoreToRVV(SDValue Op, SelectionDAG &DAG) const;
535 SDValue lowerFixedLengthVectorSetccToRVV(SDValue Op, SelectionDAG &DAG) const;
536 SDValue lowerFixedLengthVectorSelectToRVV(SDValue Op,
537 SelectionDAG &DAG) const;
538 SDValue lowerToScalableOp(SDValue Op, SelectionDAG &DAG) const;
539 SDValue LowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG) const;
540 SDValue lowerVPOp(SDValue Op, SelectionDAG &DAG) const;
541 SDValue lowerLogicVPOp(SDValue Op, SelectionDAG &DAG) const;
542 SDValue lowerVPExtMaskOp(SDValue Op, SelectionDAG &DAG) const;
543 SDValue lowerVPSetCCMaskOp(SDValue Op, SelectionDAG &DAG) const;
544 SDValue lowerVPMergeMask(SDValue Op, SelectionDAG &DAG) const;
545 SDValue lowerVPSplatExperimental(SDValue Op, SelectionDAG &DAG) const;
546 SDValue lowerVPSpliceExperimental(SDValue Op, SelectionDAG &DAG) const;
547 SDValue lowerVPReverseExperimental(SDValue Op, SelectionDAG &DAG) const;
548 SDValue lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG) const;
549 SDValue lowerVPStridedLoad(SDValue Op, SelectionDAG &DAG) const;
550 SDValue lowerVPStridedStore(SDValue Op, SelectionDAG &DAG) const;
551 SDValue lowerVPCttzElements(SDValue Op, SelectionDAG &DAG) const;
552 SDValue lowerFixedLengthVectorExtendToRVV(SDValue Op, SelectionDAG &DAG,
553 unsigned ExtendOpc) const;
554 SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
555 SDValue lowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
556 SDValue lowerGET_FPENV(SDValue Op, SelectionDAG &DAG) const;
557 SDValue lowerSET_FPENV(SDValue Op, SelectionDAG &DAG) const;
558 SDValue lowerRESET_FPENV(SDValue Op, SelectionDAG &DAG) const;
559
560 SDValue lowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;
561 SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) const;
562
563 SDValue lowerStrictFPExtendOrRoundLike(SDValue Op, SelectionDAG &DAG) const;
564
565 SDValue lowerVectorStrictFSetcc(SDValue Op, SelectionDAG &DAG) const;
566
567 SDValue lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
568
569 SDValue expandUnalignedRVVLoad(SDValue Op, SelectionDAG &DAG) const;
570 SDValue expandUnalignedRVVStore(SDValue Op, SelectionDAG &DAG) const;
571
572 SDValue lowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
573 SDValue lowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
574 SDValue lowerPARTIAL_REDUCE_MLA(SDValue Op, SelectionDAG &DAG) const;
575
576 bool isEligibleForTailCallOptimization(
577 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
578 const SmallVector<CCValAssign, 16> &ArgLocs) const;
579
580 /// Generate error diagnostics if any register used by CC has been marked
581 /// reserved.
582 void validateCCReservedRegs(
583 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
584 MachineFunction &MF) const;
585
586 bool useRVVForFixedLengthVectorVT(MVT VT) const;
587
588 MVT getVPExplicitVectorLengthTy() const override;
589
590 bool shouldExpandGetVectorLength(EVT TripCountVT, unsigned VF,
591 bool IsScalable) const override;
592
593 /// RVV code generation for fixed length vectors does not lower all
594 /// BUILD_VECTORs. This makes BUILD_VECTOR legalisation a source of stores to
595 /// merge. However, merging them creates a BUILD_VECTOR that is just as
596 /// illegal as the original, thus leading to an infinite legalisation loop.
597 /// NOTE: Once BUILD_VECTOR can be custom lowered for all legal vector types,
598 /// this override can be removed.
599 bool mergeStoresAfterLegalization(EVT VT) const override;
600
601 /// Disable normalizing
602 /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
603 /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y))
604 /// RISC-V doesn't have flags so it's better to perform the and/or in a GPR.
605 bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override {
606 return false;
607 }
608
609 /// Disables storing and loading vectors by default when there are function
610 /// calls between the load and store, since these are more expensive than just
611 /// using scalars
612 bool shouldMergeStoreOfLoadsOverCall(EVT SrcVT, EVT MergedVT) const override {
613 return !MergedVT.isVector() || SrcVT.isVector();
614 }
615
616 /// For available scheduling models FDIV + two independent FMULs are much
617 /// faster than two FDIVs.
618 unsigned combineRepeatedFPDivisors() const override;
619
620 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
621 SmallVectorImpl<SDNode *> &Created) const override;
622
623 bool shouldFoldSelectWithSingleBitTest(EVT VT,
624 const APInt &AndMask) const override;
625
626 unsigned getMinimumJumpTableEntries() const override;
627
628 SDValue emitFlushICache(SelectionDAG &DAG, SDValue InChain, SDValue Start,
629 SDValue End, SDValue Flags, SDLoc DL) const;
630
631 std::pair<const TargetRegisterClass *, uint8_t>
632 findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const override;
633};
634
635namespace RISCVVIntrinsicsTable {
636
637struct RISCVVIntrinsicInfo {
638 unsigned IntrinsicID;
639 uint8_t ScalarOperand;
640 uint8_t VLOperand;
641 bool hasScalarOperand() const {
642 // 0xF is not valid. See NoScalarOperand in IntrinsicsRISCV.td.
643 return ScalarOperand != 0xF;
644 }
645 bool hasVLOperand() const {
646 // 0x1F is not valid. See NoVLOperand in IntrinsicsRISCV.td.
647 return VLOperand != 0x1F;
648 }
649};
650
651using namespace RISCV;
652
653#define GET_RISCVVIntrinsicsTable_DECL
654#include "RISCVGenSearchableTables.inc"
655#undef GET_RISCVVIntrinsicsTable_DECL
656
657} // end namespace RISCVVIntrinsicsTable
658
659} // end namespace llvm
660
661#endif
662