1//===---- RISCVISelDAGToDAG.h - A dag to dag inst selector for RISC-V -----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines an instruction selector for the RISC-V target.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_RISCV_RISCVISELDAGTODAG_H
14#define LLVM_LIB_TARGET_RISCV_RISCVISELDAGTODAG_H
15
16#include "RISCV.h"
17#include "RISCVTargetMachine.h"
18#include "llvm/CodeGen/SelectionDAGISel.h"
19#include "llvm/Support/KnownBits.h"
20
21// RISC-V specific code to select RISC-V machine instructions for
22// SelectionDAG operations.
23namespace llvm {
24class RISCVDAGToDAGISel : public SelectionDAGISel {
25 const RISCVSubtarget *Subtarget = nullptr;
26
27public:
28 RISCVDAGToDAGISel() = delete;
29
30 explicit RISCVDAGToDAGISel(RISCVTargetMachine &TargetMachine,
31 CodeGenOptLevel OptLevel)
32 : SelectionDAGISel(TargetMachine, OptLevel) {}
33
34 bool runOnMachineFunction(MachineFunction &MF) override {
35 Subtarget = &MF.getSubtarget<RISCVSubtarget>();
36 return SelectionDAGISel::runOnMachineFunction(mf&: MF);
37 }
38
39 void PreprocessISelDAG() override;
40 void PostprocessISelDAG() override;
41
42 void Select(SDNode *Node) override;
43
44 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
45 InlineAsm::ConstraintCode ConstraintID,
46 std::vector<SDValue> &OutOps) override;
47
48 bool SelectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset);
49 bool SelectFrameAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset);
50 bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset,
51 bool IsINX = false);
52 bool SelectAddrRegImmINX(SDValue Addr, SDValue &Base, SDValue &Offset) {
53 return SelectAddrRegImm(Addr, Base, Offset, IsINX: true);
54 }
55 bool SelectAddrRegImmLsb00000(SDValue Addr, SDValue &Base, SDValue &Offset);
56
57 bool SelectAddrRegRegScale(SDValue Addr, unsigned MaxShiftAmount,
58 SDValue &Base, SDValue &Index, SDValue &Scale);
59
60 template <unsigned MaxShift>
61 bool SelectAddrRegRegScale(SDValue Addr, SDValue &Base, SDValue &Index,
62 SDValue &Scale) {
63 return SelectAddrRegRegScale(Addr, MaxShiftAmount: MaxShift, Base, Index, Scale);
64 }
65
66 template <unsigned MaxShift, unsigned Bits>
67 bool SelectAddrRegZextRegScale(SDValue Addr, SDValue &Base, SDValue &Index,
68 SDValue &Scale) {
69 if (SelectAddrRegRegScale(Addr, MaxShiftAmount: MaxShift, Base, Index, Scale)) {
70 if (Index.getOpcode() == ISD::AND) {
71 auto *C = dyn_cast<ConstantSDNode>(Val: Index.getOperand(i: 1));
72 if (C && C->getZExtValue() == maskTrailingOnes<uint64_t>(N: Bits)) {
73 Index = Index.getOperand(i: 0);
74 return true;
75 }
76 }
77 }
78 return false;
79 }
80
81 bool SelectAddrRegReg(SDValue Addr, SDValue &Base, SDValue &Offset);
82
83 bool tryShrinkShlLogicImm(SDNode *Node);
84 bool trySignedBitfieldExtract(SDNode *Node);
85 bool tryIndexedLoad(SDNode *Node);
86
87 bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt);
88 bool selectShiftMaskXLen(SDValue N, SDValue &ShAmt) {
89 return selectShiftMask(N, ShiftWidth: Subtarget->getXLen(), ShAmt);
90 }
91 bool selectShiftMask32(SDValue N, SDValue &ShAmt) {
92 return selectShiftMask(N, ShiftWidth: 32, ShAmt);
93 }
94
95 bool selectSETCC(SDValue N, ISD::CondCode ExpectedCCVal, SDValue &Val);
96 bool selectSETNE(SDValue N, SDValue &Val) {
97 return selectSETCC(N, ExpectedCCVal: ISD::SETNE, Val);
98 }
99 bool selectSETEQ(SDValue N, SDValue &Val) {
100 return selectSETCC(N, ExpectedCCVal: ISD::SETEQ, Val);
101 }
102
103 bool selectSExtBits(SDValue N, unsigned Bits, SDValue &Val);
104 template <unsigned Bits> bool selectSExtBits(SDValue N, SDValue &Val) {
105 return selectSExtBits(N, Bits, Val);
106 }
107 bool selectZExtBits(SDValue N, unsigned Bits, SDValue &Val);
108 template <unsigned Bits> bool selectZExtBits(SDValue N, SDValue &Val) {
109 return selectZExtBits(N, Bits, Val);
110 }
111
112 bool selectSHXADDOp(SDValue N, unsigned ShAmt, SDValue &Val);
113 template <unsigned ShAmt> bool selectSHXADDOp(SDValue N, SDValue &Val) {
114 return selectSHXADDOp(N, ShAmt, Val);
115 }
116
117 bool selectSHXADD_UWOp(SDValue N, unsigned ShAmt, SDValue &Val);
118 template <unsigned ShAmt> bool selectSHXADD_UWOp(SDValue N, SDValue &Val) {
119 return selectSHXADD_UWOp(N, ShAmt, Val);
120 }
121
122 bool hasAllNBitUsers(SDNode *Node, unsigned Bits,
123 const unsigned Depth = 0) const;
124 bool hasAllBUsers(SDNode *Node) const { return hasAllNBitUsers(Node, Bits: 8); }
125 bool hasAllHUsers(SDNode *Node) const { return hasAllNBitUsers(Node, Bits: 16); }
126 bool hasAllWUsers(SDNode *Node) const { return hasAllNBitUsers(Node, Bits: 32); }
127
128 bool selectSimm5Shl2(SDValue N, SDValue &Simm5, SDValue &Shl2);
129
130 bool selectVLOp(SDValue N, SDValue &VL);
131
132 bool selectVSplat(SDValue N, SDValue &SplatVal);
133 bool selectVSplatSimm5(SDValue N, SDValue &SplatVal);
134 bool selectVSplatUimm(SDValue N, unsigned Bits, SDValue &SplatVal);
135 template <unsigned Bits> bool selectVSplatUimmBits(SDValue N, SDValue &Val) {
136 return selectVSplatUimm(N, Bits, SplatVal&: Val);
137 }
138 bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal);
139 bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal);
140 // Matches the splat of a value which can be extended or truncated, such that
141 // only the bottom 8 bits are preserved.
142 bool selectLow8BitsVSplat(SDValue N, SDValue &SplatVal);
143 bool selectFPImm(SDValue N, SDValue &Imm);
144
145 bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm);
146 template <unsigned Width> bool selectRVVSimm5(SDValue N, SDValue &Imm) {
147 return selectRVVSimm5(N, Width, Imm);
148 }
149
150 void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm,
151 const SDLoc &DL, unsigned CurOp,
152 bool IsMasked, bool IsStridedOrIndexed,
153 SmallVectorImpl<SDValue> &Operands,
154 bool IsLoad = false, MVT *IndexVT = nullptr);
155
156 void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided);
157 void selectVLSEGFF(SDNode *Node, bool IsMasked);
158 void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered);
159 void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided);
160 void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered);
161
162 void selectVSETVLI(SDNode *Node);
163
164 void selectSF_VC_X_SE(SDNode *Node);
165
166 // Return the RISC-V condition code that matches the given DAG integer
167 // condition code. The CondCode must be one of those supported by the RISC-V
168 // ISA (see translateSetCCForBranch).
169 static RISCVCC::CondCode getRISCVCCForIntCC(ISD::CondCode CC) {
170 switch (CC) {
171 default:
172 llvm_unreachable("Unsupported CondCode");
173 case ISD::SETEQ:
174 return RISCVCC::COND_EQ;
175 case ISD::SETNE:
176 return RISCVCC::COND_NE;
177 case ISD::SETLT:
178 return RISCVCC::COND_LT;
179 case ISD::SETGE:
180 return RISCVCC::COND_GE;
181 case ISD::SETULT:
182 return RISCVCC::COND_LTU;
183 case ISD::SETUGE:
184 return RISCVCC::COND_GEU;
185 }
186 }
187
188// Include the pieces autogenerated from the target description.
189#include "RISCVGenDAGISel.inc"
190
191private:
192 bool doPeepholeSExtW(SDNode *Node);
193 bool doPeepholeMaskedRVV(MachineSDNode *Node);
194 bool doPeepholeMergeVVMFold();
195 bool doPeepholeNoRegPassThru();
196 bool performCombineVMergeAndVOps(SDNode *N);
197};
198
199class RISCVDAGToDAGISelLegacy : public SelectionDAGISelLegacy {
200public:
201 static char ID;
202 explicit RISCVDAGToDAGISelLegacy(RISCVTargetMachine &TargetMachine,
203 CodeGenOptLevel OptLevel);
204};
205
206namespace RISCV {
207struct VLSEGPseudo {
208 uint16_t NF : 4;
209 uint16_t Masked : 1;
210 uint16_t Strided : 1;
211 uint16_t FF : 1;
212 uint16_t Log2SEW : 3;
213 uint16_t LMUL : 3;
214 uint16_t Pseudo;
215};
216
217struct VLXSEGPseudo {
218 uint16_t NF : 4;
219 uint16_t Masked : 1;
220 uint16_t Ordered : 1;
221 uint16_t Log2SEW : 3;
222 uint16_t LMUL : 3;
223 uint16_t IndexLMUL : 3;
224 uint16_t Pseudo;
225};
226
227struct VSSEGPseudo {
228 uint16_t NF : 4;
229 uint16_t Masked : 1;
230 uint16_t Strided : 1;
231 uint16_t Log2SEW : 3;
232 uint16_t LMUL : 3;
233 uint16_t Pseudo;
234};
235
236struct VSXSEGPseudo {
237 uint16_t NF : 4;
238 uint16_t Masked : 1;
239 uint16_t Ordered : 1;
240 uint16_t Log2SEW : 3;
241 uint16_t LMUL : 3;
242 uint16_t IndexLMUL : 3;
243 uint16_t Pseudo;
244};
245
246struct VLEPseudo {
247 uint16_t Masked : 1;
248 uint16_t Strided : 1;
249 uint16_t FF : 1;
250 uint16_t Log2SEW : 3;
251 uint16_t LMUL : 3;
252 uint16_t Pseudo;
253};
254
255struct VSEPseudo {
256 uint16_t Masked :1;
257 uint16_t Strided : 1;
258 uint16_t Log2SEW : 3;
259 uint16_t LMUL : 3;
260 uint16_t Pseudo;
261};
262
263struct VLX_VSXPseudo {
264 uint16_t Masked : 1;
265 uint16_t Ordered : 1;
266 uint16_t Log2SEW : 3;
267 uint16_t LMUL : 3;
268 uint16_t IndexLMUL : 3;
269 uint16_t Pseudo;
270};
271
272#define GET_RISCVVSSEGTable_DECL
273#define GET_RISCVVLSEGTable_DECL
274#define GET_RISCVVLXSEGTable_DECL
275#define GET_RISCVVSXSEGTable_DECL
276#define GET_RISCVVLETable_DECL
277#define GET_RISCVVSETable_DECL
278#define GET_RISCVVLXTable_DECL
279#define GET_RISCVVSXTable_DECL
280} // namespace RISCV
281
282} // namespace llvm
283
284#endif
285