1//===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// RISC-V.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
14#include "MCTargetDesc/RISCVMatInt.h"
15#include "RISCVRegisterBankInfo.h"
16#include "RISCVSubtarget.h"
17#include "RISCVTargetMachine.h"
18#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
19#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
20#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
21#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
22#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
23#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24#include "llvm/CodeGen/MachineJumpTableInfo.h"
25#include "llvm/IR/IntrinsicsRISCV.h"
26#include "llvm/Support/Debug.h"
27
28#define DEBUG_TYPE "riscv-isel"
29
30using namespace llvm;
31using namespace MIPatternMatch;
32
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
36
37namespace {
38
39class RISCVInstructionSelector : public InstructionSelector {
40public:
41 RISCVInstructionSelector(const RISCVTargetMachine &TM,
42 const RISCVSubtarget &STI,
43 const RISCVRegisterBankInfo &RBI);
44
45 bool select(MachineInstr &MI) override;
46
47 void setupMF(MachineFunction &MF, GISelValueTracking *VT,
48 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
49 BlockFrequencyInfo *BFI) override {
50 InstructionSelector::setupMF(mf&: MF, vt: VT, covinfo: CoverageInfo, psi: PSI, bfi: BFI);
51 MRI = &MF.getRegInfo();
52 }
53
54 static const char *getName() { return DEBUG_TYPE; }
55
56private:
57 const TargetRegisterClass *
58 getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const;
59
60 static constexpr unsigned MaxRecursionDepth = 6;
61
62 bool hasAllNBitUsers(const MachineInstr &MI, unsigned Bits,
63 const unsigned Depth = 0) const;
64 bool hasAllHUsers(const MachineInstr &MI) const {
65 return hasAllNBitUsers(MI, Bits: 16);
66 }
67 bool hasAllWUsers(const MachineInstr &MI) const {
68 return hasAllNBitUsers(MI, Bits: 32);
69 }
70
71 bool isRegInGprb(Register Reg) const;
72 bool isRegInFprb(Register Reg) const;
73
74 // tblgen-erated 'select' implementation, used as the initial selector for
75 // the patterns that don't require complex C++.
76 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
77
78 // A lowering phase that runs before any selection attempts.
79 // Returns true if the instruction was modified.
80 void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB);
81
82 bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB);
83
84 // Custom selection methods
85 bool selectCopy(MachineInstr &MI) const;
86 bool selectImplicitDef(MachineInstr &MI, MachineIRBuilder &MIB) const;
87 bool materializeImm(Register Reg, int64_t Imm, MachineIRBuilder &MIB) const;
88 bool selectAddr(MachineInstr &MI, MachineIRBuilder &MIB, bool IsLocal = true,
89 bool IsExternWeak = false) const;
90 bool selectSelect(MachineInstr &MI, MachineIRBuilder &MIB) const;
91 bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB) const;
92 void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
93 MachineIRBuilder &MIB) const;
94 bool selectUnmergeValues(MachineInstr &MI, MachineIRBuilder &MIB) const;
95 void addVectorLoadStoreOperands(MachineInstr &I,
96 SmallVectorImpl<SrcOp> &SrcOps,
97 unsigned &CurOp, bool IsMasked,
98 bool IsStridedOrIndexed,
99 LLT *IndexVT = nullptr) const;
100 bool selectIntrinsicWithSideEffects(MachineInstr &I,
101 MachineIRBuilder &MIB) const;
102 bool selectIntrinsic(MachineInstr &I, MachineIRBuilder &MIB) const;
103 bool selectExtractSubvector(MachineInstr &MI, MachineIRBuilder &MIB) const;
104
105 ComplexRendererFns selectShiftMask(MachineOperand &Root,
106 unsigned ShiftWidth) const;
107 ComplexRendererFns selectShiftMaskXLen(MachineOperand &Root) const {
108 return selectShiftMask(Root, ShiftWidth: STI.getXLen());
109 }
110 ComplexRendererFns selectShiftMask32(MachineOperand &Root) const {
111 return selectShiftMask(Root, ShiftWidth: 32);
112 }
113 ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
114
115 ComplexRendererFns selectSExtBits(MachineOperand &Root, unsigned Bits) const;
116 template <unsigned Bits>
117 ComplexRendererFns selectSExtBits(MachineOperand &Root) const {
118 return selectSExtBits(Root, Bits);
119 }
120
121 ComplexRendererFns selectZExtBits(MachineOperand &Root, unsigned Bits) const;
122 template <unsigned Bits>
123 ComplexRendererFns selectZExtBits(MachineOperand &Root) const {
124 return selectZExtBits(Root, Bits);
125 }
126
127 ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const;
128 template <unsigned ShAmt>
129 ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const {
130 return selectSHXADDOp(Root, ShAmt);
131 }
132
133 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root,
134 unsigned ShAmt) const;
135 template <unsigned ShAmt>
136 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const {
137 return selectSHXADD_UWOp(Root, ShAmt);
138 }
139
140 ComplexRendererFns renderVLOp(MachineOperand &Root) const;
141
142 // Custom renderers for tablegen
143 void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
144 int OpIdx) const;
145 void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI,
146 int OpIdx) const;
147 void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI,
148 int OpIdx) const;
149 void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI,
150 int OpIdx) const;
151 void renderFrameIndex(MachineInstrBuilder &MIB, const MachineInstr &MI,
152 int OpIdx) const;
153
154 void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI,
155 int OpIdx) const;
156 void renderXLenSubTrailingOnes(MachineInstrBuilder &MIB,
157 const MachineInstr &MI, int OpIdx) const;
158
159 void renderAddiPairImmLarge(MachineInstrBuilder &MIB, const MachineInstr &MI,
160 int OpIdx) const;
161 void renderAddiPairImmSmall(MachineInstrBuilder &MIB, const MachineInstr &MI,
162 int OpIdx) const;
163
164 const RISCVSubtarget &STI;
165 const RISCVInstrInfo &TII;
166 const RISCVRegisterInfo &TRI;
167 const RISCVRegisterBankInfo &RBI;
168 const RISCVTargetMachine &TM;
169
170 MachineRegisterInfo *MRI = nullptr;
171
172 // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
173 // uses "STI." in the code generated by TableGen. We need to unify the name of
174 // Subtarget variable.
175 const RISCVSubtarget *Subtarget = &STI;
176
177#define GET_GLOBALISEL_PREDICATES_DECL
178#include "RISCVGenGlobalISel.inc"
179#undef GET_GLOBALISEL_PREDICATES_DECL
180
181#define GET_GLOBALISEL_TEMPORARIES_DECL
182#include "RISCVGenGlobalISel.inc"
183#undef GET_GLOBALISEL_TEMPORARIES_DECL
184};
185
186} // end anonymous namespace
187
188#define GET_GLOBALISEL_IMPL
189#include "RISCVGenGlobalISel.inc"
190#undef GET_GLOBALISEL_IMPL
191
192RISCVInstructionSelector::RISCVInstructionSelector(
193 const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
194 const RISCVRegisterBankInfo &RBI)
195 : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
196 TM(TM),
197
198#define GET_GLOBALISEL_PREDICATES_INIT
199#include "RISCVGenGlobalISel.inc"
200#undef GET_GLOBALISEL_PREDICATES_INIT
201#define GET_GLOBALISEL_TEMPORARIES_INIT
202#include "RISCVGenGlobalISel.inc"
203#undef GET_GLOBALISEL_TEMPORARIES_INIT
204{
205}
206
207// Mimics optimizations in ISel and RISCVOptWInst Pass
208bool RISCVInstructionSelector::hasAllNBitUsers(const MachineInstr &MI,
209 unsigned Bits,
210 const unsigned Depth) const {
211
212 assert((MI.getOpcode() == TargetOpcode::G_ADD ||
213 MI.getOpcode() == TargetOpcode::G_SUB ||
214 MI.getOpcode() == TargetOpcode::G_MUL ||
215 MI.getOpcode() == TargetOpcode::G_SHL ||
216 MI.getOpcode() == TargetOpcode::G_LSHR ||
217 MI.getOpcode() == TargetOpcode::G_AND ||
218 MI.getOpcode() == TargetOpcode::G_OR ||
219 MI.getOpcode() == TargetOpcode::G_XOR ||
220 MI.getOpcode() == TargetOpcode::G_SEXT_INREG || Depth != 0) &&
221 "Unexpected opcode");
222
223 if (Depth >= RISCVInstructionSelector::MaxRecursionDepth)
224 return false;
225
226 auto DestReg = MI.getOperand(i: 0).getReg();
227 for (auto &UserOp : MRI->use_nodbg_operands(Reg: DestReg)) {
228 assert(UserOp.getParent() && "UserOp must have a parent");
229 const MachineInstr &UserMI = *UserOp.getParent();
230 unsigned OpIdx = UserOp.getOperandNo();
231
232 switch (UserMI.getOpcode()) {
233 default:
234 return false;
235 case RISCV::ADDW:
236 case RISCV::ADDIW:
237 case RISCV::SUBW:
238 case RISCV::FCVT_D_W:
239 case RISCV::FCVT_S_W:
240 if (Bits >= 32)
241 break;
242 return false;
243 case RISCV::SLL:
244 case RISCV::SRA:
245 case RISCV::SRL:
246 // Shift amount operands only use log2(Xlen) bits.
247 if (OpIdx == 2 && Bits >= Log2_32(Value: Subtarget->getXLen()))
248 break;
249 return false;
250 case RISCV::SLLI:
251 // SLLI only uses the lower (XLen - ShAmt) bits.
252 if (Bits >= Subtarget->getXLen() - UserMI.getOperand(i: 2).getImm())
253 break;
254 return false;
255 case RISCV::ANDI:
256 if (Bits >= (unsigned)llvm::bit_width<uint64_t>(
257 Value: (uint64_t)UserMI.getOperand(i: 2).getImm()))
258 break;
259 goto RecCheck;
260 case RISCV::AND:
261 case RISCV::OR:
262 case RISCV::XOR:
263 RecCheck:
264 if (hasAllNBitUsers(MI: UserMI, Bits, Depth: Depth + 1))
265 break;
266 return false;
267 case RISCV::SRLI: {
268 unsigned ShAmt = UserMI.getOperand(i: 2).getImm();
269 // If we are shifting right by less than Bits, and users don't demand any
270 // bits that were shifted into [Bits-1:0], then we can consider this as an
271 // N-Bit user.
272 if (Bits > ShAmt && hasAllNBitUsers(MI: UserMI, Bits: Bits - ShAmt, Depth: Depth + 1))
273 break;
274 return false;
275 }
276 }
277 }
278
279 return true;
280}
281
282InstructionSelector::ComplexRendererFns
283RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
284 unsigned ShiftWidth) const {
285 if (!Root.isReg())
286 return std::nullopt;
287
288 using namespace llvm::MIPatternMatch;
289
290 Register ShAmtReg = Root.getReg();
291 // Peek through zext.
292 Register ZExtSrcReg;
293 if (mi_match(R: ShAmtReg, MRI: *MRI, P: m_GZExt(Src: m_Reg(R&: ZExtSrcReg))))
294 ShAmtReg = ZExtSrcReg;
295
296 APInt AndMask;
297 Register AndSrcReg;
298 // Try to combine the following pattern (applicable to other shift
299 // instructions as well as 32-bit ones):
300 //
301 // %4:gprb(s64) = G_AND %3, %2
302 // %5:gprb(s64) = G_LSHR %1, %4(s64)
303 //
304 // According to RISC-V's ISA manual, SLL, SRL, and SRA ignore other bits than
305 // the lowest log2(XLEN) bits of register rs2. As for the above pattern, if
306 // the lowest log2(XLEN) bits of register rd and rs2 of G_AND are the same,
307 // then it can be eliminated. Given register rs1 or rs2 holding a constant
308 // (the and mask), there are two cases G_AND can be erased:
309 //
310 // 1. the lowest log2(XLEN) bits of the and mask are all set
311 // 2. the bits of the register being masked are already unset (zero set)
312 if (mi_match(R: ShAmtReg, MRI: *MRI, P: m_GAnd(L: m_Reg(R&: AndSrcReg), R: m_ICst(Cst&: AndMask)))) {
313 APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
314 if (ShMask.isSubsetOf(RHS: AndMask)) {
315 ShAmtReg = AndSrcReg;
316 } else {
317 // SimplifyDemandedBits may have optimized the mask so try restoring any
318 // bits that are known zero.
319 KnownBits Known = VT->getKnownBits(R: AndSrcReg);
320 if (ShMask.isSubsetOf(RHS: AndMask | Known.Zero))
321 ShAmtReg = AndSrcReg;
322 }
323 }
324
325 APInt Imm;
326 Register Reg;
327 if (mi_match(R: ShAmtReg, MRI: *MRI, P: m_GAdd(L: m_Reg(R&: Reg), R: m_ICst(Cst&: Imm)))) {
328 if (Imm != 0 && Imm.urem(RHS: ShiftWidth) == 0)
329 // If we are shifting by X+N where N == 0 mod Size, then just shift by X
330 // to avoid the ADD.
331 ShAmtReg = Reg;
332 } else if (mi_match(R: ShAmtReg, MRI: *MRI, P: m_GSub(L: m_ICst(Cst&: Imm), R: m_Reg(R&: Reg)))) {
333 if (Imm != 0 && Imm.urem(RHS: ShiftWidth) == 0) {
334 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
335 // to generate a NEG instead of a SUB of a constant.
336 ShAmtReg = MRI->createVirtualRegister(RegClass: &RISCV::GPRRegClass);
337 unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;
338 return {{[=](MachineInstrBuilder &MIB) {
339 MachineIRBuilder(*MIB.getInstr())
340 .buildInstr(Opc: NegOpc, DstOps: {ShAmtReg}, SrcOps: {Register(RISCV::X0), Reg});
341 MIB.addReg(RegNo: ShAmtReg);
342 }}};
343 }
344 if (Imm.urem(RHS: ShiftWidth) == ShiftWidth - 1) {
345 // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
346 // to generate a NOT instead of a SUB of a constant.
347 ShAmtReg = MRI->createVirtualRegister(RegClass: &RISCV::GPRRegClass);
348 return {{[=](MachineInstrBuilder &MIB) {
349 MachineIRBuilder(*MIB.getInstr())
350 .buildInstr(Opc: RISCV::XORI, DstOps: {ShAmtReg}, SrcOps: {Reg})
351 .addImm(Val: -1);
352 MIB.addReg(RegNo: ShAmtReg);
353 }}};
354 }
355 }
356
357 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegNo: ShAmtReg); }}};
358}
359
360InstructionSelector::ComplexRendererFns
361RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
362 unsigned Bits) const {
363 if (!Root.isReg())
364 return std::nullopt;
365 Register RootReg = Root.getReg();
366 MachineInstr *RootDef = MRI->getVRegDef(Reg: RootReg);
367
368 if (RootDef->getOpcode() == TargetOpcode::G_SEXT_INREG &&
369 RootDef->getOperand(i: 2).getImm() == Bits) {
370 return {
371 {[=](MachineInstrBuilder &MIB) { MIB.add(MO: RootDef->getOperand(i: 1)); }}};
372 }
373
374 unsigned Size = MRI->getType(Reg: RootReg).getScalarSizeInBits();
375 if ((Size - VT->computeNumSignBits(R: RootReg)) < Bits)
376 return {{[=](MachineInstrBuilder &MIB) { MIB.add(MO: Root); }}};
377
378 return std::nullopt;
379}
380
381InstructionSelector::ComplexRendererFns
382RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
383 unsigned Bits) const {
384 if (!Root.isReg())
385 return std::nullopt;
386 Register RootReg = Root.getReg();
387
388 Register RegX;
389 uint64_t Mask = maskTrailingOnes<uint64_t>(N: Bits);
390 if (mi_match(R: RootReg, MRI: *MRI, P: m_GAnd(L: m_Reg(R&: RegX), R: m_SpecificICst(RequestedValue: Mask)))) {
391 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegNo: RegX); }}};
392 }
393
394 if (mi_match(R: RootReg, MRI: *MRI, P: m_GZExt(Src: m_Reg(R&: RegX))) &&
395 MRI->getType(Reg: RegX).getScalarSizeInBits() == Bits)
396 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegNo: RegX); }}};
397
398 unsigned Size = MRI->getType(Reg: RootReg).getScalarSizeInBits();
399 if (VT->maskedValueIsZero(Val: RootReg, Mask: APInt::getBitsSetFrom(numBits: Size, loBit: Bits)))
400 return {{[=](MachineInstrBuilder &MIB) { MIB.add(MO: Root); }}};
401
402 return std::nullopt;
403}
404
405InstructionSelector::ComplexRendererFns
406RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
407 unsigned ShAmt) const {
408 using namespace llvm::MIPatternMatch;
409
410 if (!Root.isReg())
411 return std::nullopt;
412 Register RootReg = Root.getReg();
413
414 const unsigned XLen = STI.getXLen();
415 APInt Mask, C2;
416 Register RegY;
417 std::optional<bool> LeftShift;
418 // (and (shl y, c2), mask)
419 if (mi_match(R: RootReg, MRI: *MRI,
420 P: m_GAnd(L: m_GShl(L: m_Reg(R&: RegY), R: m_ICst(Cst&: C2)), R: m_ICst(Cst&: Mask))))
421 LeftShift = true;
422 // (and (lshr y, c2), mask)
423 else if (mi_match(R: RootReg, MRI: *MRI,
424 P: m_GAnd(L: m_GLShr(L: m_Reg(R&: RegY), R: m_ICst(Cst&: C2)), R: m_ICst(Cst&: Mask))))
425 LeftShift = false;
426
427 if (LeftShift.has_value()) {
428 if (*LeftShift)
429 Mask &= maskTrailingZeros<uint64_t>(N: C2.getLimitedValue());
430 else
431 Mask &= maskTrailingOnes<uint64_t>(N: XLen - C2.getLimitedValue());
432
433 if (Mask.isShiftedMask()) {
434 unsigned Leading = XLen - Mask.getActiveBits();
435 unsigned Trailing = Mask.countr_zero();
436 // Given (and (shl y, c2), mask) in which mask has no leading zeros and
437 // c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD.
438 if (*LeftShift && Leading == 0 && C2.ult(RHS: Trailing) && Trailing == ShAmt) {
439 Register DstReg = MRI->createVirtualRegister(RegClass: &RISCV::GPRRegClass);
440 return {{[=](MachineInstrBuilder &MIB) {
441 MachineIRBuilder(*MIB.getInstr())
442 .buildInstr(Opc: RISCV::SRLI, DstOps: {DstReg}, SrcOps: {RegY})
443 .addImm(Val: Trailing - C2.getLimitedValue());
444 MIB.addReg(RegNo: DstReg);
445 }}};
446 }
447
448 // Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and
449 // c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD.
450 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
451 Register DstReg = MRI->createVirtualRegister(RegClass: &RISCV::GPRRegClass);
452 return {{[=](MachineInstrBuilder &MIB) {
453 MachineIRBuilder(*MIB.getInstr())
454 .buildInstr(Opc: RISCV::SRLI, DstOps: {DstReg}, SrcOps: {RegY})
455 .addImm(Val: Leading + Trailing);
456 MIB.addReg(RegNo: DstReg);
457 }}};
458 }
459 }
460 }
461
462 LeftShift.reset();
463
464 // (shl (and y, mask), c2)
465 if (mi_match(R: RootReg, MRI: *MRI,
466 P: m_GShl(L: m_OneNonDBGUse(SP: m_GAnd(L: m_Reg(R&: RegY), R: m_ICst(Cst&: Mask))),
467 R: m_ICst(Cst&: C2))))
468 LeftShift = true;
469 // (lshr (and y, mask), c2)
470 else if (mi_match(R: RootReg, MRI: *MRI,
471 P: m_GLShr(L: m_OneNonDBGUse(SP: m_GAnd(L: m_Reg(R&: RegY), R: m_ICst(Cst&: Mask))),
472 R: m_ICst(Cst&: C2))))
473 LeftShift = false;
474
475 if (LeftShift.has_value() && Mask.isShiftedMask()) {
476 unsigned Leading = XLen - Mask.getActiveBits();
477 unsigned Trailing = Mask.countr_zero();
478
479 // Given (shl (and y, mask), c2) in which mask has 32 leading zeros and
480 // c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD.
481 bool Cond = *LeftShift && Leading == 32 && Trailing > 0 &&
482 (Trailing + C2.getLimitedValue()) == ShAmt;
483 if (!Cond)
484 // Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and
485 // c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD.
486 Cond = !*LeftShift && Leading == 32 && C2.ult(RHS: Trailing) &&
487 (Trailing - C2.getLimitedValue()) == ShAmt;
488
489 if (Cond) {
490 Register DstReg = MRI->createVirtualRegister(RegClass: &RISCV::GPRRegClass);
491 return {{[=](MachineInstrBuilder &MIB) {
492 MachineIRBuilder(*MIB.getInstr())
493 .buildInstr(Opc: RISCV::SRLIW, DstOps: {DstReg}, SrcOps: {RegY})
494 .addImm(Val: Trailing);
495 MIB.addReg(RegNo: DstReg);
496 }}};
497 }
498 }
499
500 return std::nullopt;
501}
502
503InstructionSelector::ComplexRendererFns
504RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
505 unsigned ShAmt) const {
506 using namespace llvm::MIPatternMatch;
507
508 if (!Root.isReg())
509 return std::nullopt;
510 Register RootReg = Root.getReg();
511
512 // Given (and (shl x, c2), mask) in which mask is a shifted mask with
513 // 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
514 // c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount.
515 APInt Mask, C2;
516 Register RegX;
517 if (mi_match(
518 R: RootReg, MRI: *MRI,
519 P: m_OneNonDBGUse(SP: m_GAnd(L: m_OneNonDBGUse(SP: m_GShl(L: m_Reg(R&: RegX), R: m_ICst(Cst&: C2))),
520 R: m_ICst(Cst&: Mask))))) {
521 Mask &= maskTrailingZeros<uint64_t>(N: C2.getLimitedValue());
522
523 if (Mask.isShiftedMask()) {
524 unsigned Leading = Mask.countl_zero();
525 unsigned Trailing = Mask.countr_zero();
526 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
527 Register DstReg = MRI->createVirtualRegister(RegClass: &RISCV::GPRRegClass);
528 return {{[=](MachineInstrBuilder &MIB) {
529 MachineIRBuilder(*MIB.getInstr())
530 .buildInstr(Opc: RISCV::SLLI, DstOps: {DstReg}, SrcOps: {RegX})
531 .addImm(Val: C2.getLimitedValue() - ShAmt);
532 MIB.addReg(RegNo: DstReg);
533 }}};
534 }
535 }
536 }
537
538 return std::nullopt;
539}
540
541InstructionSelector::ComplexRendererFns
542RISCVInstructionSelector::renderVLOp(MachineOperand &Root) const {
543 assert(Root.isReg() && "Expected operand to be a Register");
544 MachineInstr *RootDef = MRI->getVRegDef(Reg: Root.getReg());
545
546 if (RootDef->getOpcode() == TargetOpcode::G_CONSTANT) {
547 auto C = RootDef->getOperand(i: 1).getCImm();
548 if (C->getValue().isAllOnes())
549 // If the operand is a G_CONSTANT with value of all ones it is larger than
550 // VLMAX. We convert it to an immediate with value VLMaxSentinel. This is
551 // recognized specially by the vsetvli insertion pass.
552 return {{[=](MachineInstrBuilder &MIB) {
553 MIB.addImm(Val: RISCV::VLMaxSentinel);
554 }}};
555
556 if (isUInt<5>(x: C->getZExtValue())) {
557 uint64_t ZExtC = C->getZExtValue();
558 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(Val: ZExtC); }}};
559 }
560 }
561 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegNo: Root.getReg()); }}};
562}
563
564InstructionSelector::ComplexRendererFns
565RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const {
566 if (!Root.isReg())
567 return std::nullopt;
568
569 MachineInstr *RootDef = MRI->getVRegDef(Reg: Root.getReg());
570 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
571 return {{
572 [=](MachineInstrBuilder &MIB) { MIB.add(MO: RootDef->getOperand(i: 1)); },
573 [=](MachineInstrBuilder &MIB) { MIB.addImm(Val: 0); },
574 }};
575 }
576
577 if (isBaseWithConstantOffset(Root, MRI: *MRI)) {
578 MachineOperand &LHS = RootDef->getOperand(i: 1);
579 MachineOperand &RHS = RootDef->getOperand(i: 2);
580 MachineInstr *LHSDef = MRI->getVRegDef(Reg: LHS.getReg());
581 MachineInstr *RHSDef = MRI->getVRegDef(Reg: RHS.getReg());
582
583 int64_t RHSC = RHSDef->getOperand(i: 1).getCImm()->getSExtValue();
584 if (isInt<12>(x: RHSC)) {
585 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
586 return {{
587 [=](MachineInstrBuilder &MIB) { MIB.add(MO: LHSDef->getOperand(i: 1)); },
588 [=](MachineInstrBuilder &MIB) { MIB.addImm(Val: RHSC); },
589 }};
590
591 return {{[=](MachineInstrBuilder &MIB) { MIB.add(MO: LHS); },
592 [=](MachineInstrBuilder &MIB) { MIB.addImm(Val: RHSC); }}};
593 }
594 }
595
596 // TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in
597 // the combiner?
598 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegNo: Root.getReg()); },
599 [=](MachineInstrBuilder &MIB) { MIB.addImm(Val: 0); }}};
600}
601
602/// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC.
603/// CC Must be an ICMP Predicate.
604static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) {
605 switch (CC) {
606 default:
607 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
608 case CmpInst::Predicate::ICMP_EQ:
609 return RISCVCC::COND_EQ;
610 case CmpInst::Predicate::ICMP_NE:
611 return RISCVCC::COND_NE;
612 case CmpInst::Predicate::ICMP_ULT:
613 return RISCVCC::COND_LTU;
614 case CmpInst::Predicate::ICMP_SLT:
615 return RISCVCC::COND_LT;
616 case CmpInst::Predicate::ICMP_UGE:
617 return RISCVCC::COND_GEU;
618 case CmpInst::Predicate::ICMP_SGE:
619 return RISCVCC::COND_GE;
620 }
621}
622
623static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC,
624 Register &LHS, Register &RHS,
625 MachineRegisterInfo &MRI) {
626 // Try to fold an ICmp. If that fails, use a NE compare with X0.
627 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
628 if (!mi_match(R: CondReg, MRI, P: m_GICmp(P: m_Pred(P&: Pred), L: m_Reg(R&: LHS), R: m_Reg(R&: RHS)))) {
629 LHS = CondReg;
630 RHS = RISCV::X0;
631 CC = RISCVCC::COND_NE;
632 return;
633 }
634
635 // We found an ICmp, do some canonicalization.
636
637 // Adjust comparisons to use comparison with 0 if possible.
638 if (auto Constant = getIConstantVRegSExtVal(VReg: RHS, MRI)) {
639 switch (Pred) {
640 case CmpInst::Predicate::ICMP_SGT:
641 // Convert X > -1 to X >= 0
642 if (*Constant == -1) {
643 CC = RISCVCC::COND_GE;
644 RHS = RISCV::X0;
645 return;
646 }
647 break;
648 case CmpInst::Predicate::ICMP_SLT:
649 // Convert X < 1 to 0 >= X
650 if (*Constant == 1) {
651 CC = RISCVCC::COND_GE;
652 RHS = LHS;
653 LHS = RISCV::X0;
654 return;
655 }
656 break;
657 default:
658 break;
659 }
660 }
661
662 switch (Pred) {
663 default:
664 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
665 case CmpInst::Predicate::ICMP_EQ:
666 case CmpInst::Predicate::ICMP_NE:
667 case CmpInst::Predicate::ICMP_ULT:
668 case CmpInst::Predicate::ICMP_SLT:
669 case CmpInst::Predicate::ICMP_UGE:
670 case CmpInst::Predicate::ICMP_SGE:
671 // These CCs are supported directly by RISC-V branches.
672 break;
673 case CmpInst::Predicate::ICMP_SGT:
674 case CmpInst::Predicate::ICMP_SLE:
675 case CmpInst::Predicate::ICMP_UGT:
676 case CmpInst::Predicate::ICMP_ULE:
677 // These CCs are not supported directly by RISC-V branches, but changing the
678 // direction of the CC and swapping LHS and RHS are.
679 Pred = CmpInst::getSwappedPredicate(pred: Pred);
680 std::swap(a&: LHS, b&: RHS);
681 break;
682 }
683
684 CC = getRISCVCCFromICmp(CC: Pred);
685}
686
687/// Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation
688/// \p GenericOpc, appropriate for the GPR register bank and of memory access
689/// size \p OpSize.
690static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
691 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
692 switch (OpSize) {
693 default:
694 llvm_unreachable("Unexpected memory size");
695 case 8:
696 return IsStore ? RISCV::SB_RL : RISCV::LB_AQ;
697 case 16:
698 return IsStore ? RISCV::SH_RL : RISCV::LH_AQ;
699 case 32:
700 return IsStore ? RISCV::SW_RL : RISCV::LW_AQ;
701 case 64:
702 return IsStore ? RISCV::SD_RL : RISCV::LD_AQ;
703 }
704}
705
706/// Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation
707/// \p GenericOpc, appropriate for the GPR register bank and of memory access
708/// size \p OpSize. \returns \p GenericOpc if the combination is unsupported.
709static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
710 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
711 switch (OpSize) {
712 case 8:
713 // Prefer unsigned due to no c.lb in Zcb.
714 return IsStore ? RISCV::SB : RISCV::LBU;
715 case 16:
716 return IsStore ? RISCV::SH : RISCV::LH;
717 case 32:
718 return IsStore ? RISCV::SW : RISCV::LW;
719 case 64:
720 return IsStore ? RISCV::SD : RISCV::LD;
721 }
722
723 return GenericOpc;
724}
725
726void RISCVInstructionSelector::addVectorLoadStoreOperands(
727 MachineInstr &I, SmallVectorImpl<SrcOp> &SrcOps, unsigned &CurOp,
728 bool IsMasked, bool IsStridedOrIndexed, LLT *IndexVT) const {
729 // Base Pointer
730 auto PtrReg = I.getOperand(i: CurOp++).getReg();
731 SrcOps.push_back(Elt: PtrReg);
732
733 // Stride or Index
734 if (IsStridedOrIndexed) {
735 auto StrideReg = I.getOperand(i: CurOp++).getReg();
736 SrcOps.push_back(Elt: StrideReg);
737 if (IndexVT)
738 *IndexVT = MRI->getType(Reg: StrideReg);
739 }
740
741 // Mask
742 if (IsMasked) {
743 auto MaskReg = I.getOperand(i: CurOp++).getReg();
744 SrcOps.push_back(Elt: MaskReg);
745 }
746}
747
748bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
749 MachineInstr &I, MachineIRBuilder &MIB) const {
750 // Find the intrinsic ID.
751 unsigned IntrinID = cast<GIntrinsic>(Val&: I).getIntrinsicID();
752 // Select the instruction.
753 switch (IntrinID) {
754 default:
755 return false;
756 case Intrinsic::riscv_vlm:
757 case Intrinsic::riscv_vle:
758 case Intrinsic::riscv_vle_mask:
759 case Intrinsic::riscv_vlse:
760 case Intrinsic::riscv_vlse_mask: {
761 bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask ||
762 IntrinID == Intrinsic::riscv_vlse_mask;
763 bool IsStrided = IntrinID == Intrinsic::riscv_vlse ||
764 IntrinID == Intrinsic::riscv_vlse_mask;
765 LLT VT = MRI->getType(Reg: I.getOperand(i: 0).getReg());
766 unsigned Log2SEW = Log2_32(Value: VT.getScalarSizeInBits());
767
768 // Result vector
769 const Register DstReg = I.getOperand(i: 0).getReg();
770
771 // Sources
772 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
773 unsigned CurOp = 2;
774 SmallVector<SrcOp, 4> SrcOps; // Source registers.
775
776 // Passthru
777 if (HasPassthruOperand) {
778 auto PassthruReg = I.getOperand(i: CurOp++).getReg();
779 SrcOps.push_back(Elt: PassthruReg);
780 } else {
781 SrcOps.push_back(Elt: Register(RISCV::NoRegister));
782 }
783
784 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStridedOrIndexed: IsStrided);
785
786 RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT: getMVTForLLT(Ty: VT));
787 const RISCV::VLEPseudo *P =
788 RISCV::getVLEPseudo(Masked: IsMasked, Strided: IsStrided, /*FF*/ false, Log2SEW,
789 LMUL: static_cast<unsigned>(LMUL));
790
791 auto PseudoMI = MIB.buildInstr(Opc: P->Pseudo, DstOps: {DstReg}, SrcOps);
792
793 // Select VL
794 auto VLOpFn = renderVLOp(Root&: I.getOperand(i: CurOp++));
795 for (auto &RenderFn : *VLOpFn)
796 RenderFn(PseudoMI);
797
798 // SEW
799 PseudoMI.addImm(Val: Log2SEW);
800
801 // Policy
802 uint64_t Policy = RISCVVType::MASK_AGNOSTIC;
803 if (IsMasked)
804 Policy = I.getOperand(i: CurOp++).getImm();
805 PseudoMI.addImm(Val: Policy);
806
807 // Memref
808 PseudoMI.cloneMemRefs(OtherMI: I);
809
810 I.eraseFromParent();
811 return constrainSelectedInstRegOperands(I&: *PseudoMI, TII, TRI, RBI);
812 }
813 case Intrinsic::riscv_vloxei:
814 case Intrinsic::riscv_vloxei_mask:
815 case Intrinsic::riscv_vluxei:
816 case Intrinsic::riscv_vluxei_mask: {
817 bool IsMasked = IntrinID == Intrinsic::riscv_vloxei_mask ||
818 IntrinID == Intrinsic::riscv_vluxei_mask;
819 bool IsOrdered = IntrinID == Intrinsic::riscv_vloxei ||
820 IntrinID == Intrinsic::riscv_vloxei_mask;
821 LLT VT = MRI->getType(Reg: I.getOperand(i: 0).getReg());
822 unsigned Log2SEW = Log2_32(Value: VT.getScalarSizeInBits());
823
824 // Result vector
825 const Register DstReg = I.getOperand(i: 0).getReg();
826
827 // Sources
828 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
829 unsigned CurOp = 2;
830 SmallVector<SrcOp, 4> SrcOps; // Source registers.
831
832 // Passthru
833 if (HasPassthruOperand) {
834 auto PassthruReg = I.getOperand(i: CurOp++).getReg();
835 SrcOps.push_back(Elt: PassthruReg);
836 } else {
837 // Use NoRegister if there is no specified passthru.
838 SrcOps.push_back(Elt: Register());
839 }
840 LLT IndexVT;
841 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStridedOrIndexed: true, IndexVT: &IndexVT);
842
843 RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT: getMVTForLLT(Ty: VT));
844 RISCVVType::VLMUL IndexLMUL =
845 RISCVTargetLowering::getLMUL(VT: getMVTForLLT(Ty: IndexVT));
846 unsigned IndexLog2EEW = Log2_32(Value: IndexVT.getScalarSizeInBits());
847 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
848 reportFatalUsageError(reason: "The V extension does not support EEW=64 for index "
849 "values when XLEN=32");
850 }
851 const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
852 Masked: IsMasked, Ordered: IsOrdered, Log2SEW: IndexLog2EEW, LMUL: static_cast<unsigned>(LMUL),
853 IndexLMUL: static_cast<unsigned>(IndexLMUL));
854
855 auto PseudoMI = MIB.buildInstr(Opc: P->Pseudo, DstOps: {DstReg}, SrcOps);
856
857 // Select VL
858 auto VLOpFn = renderVLOp(Root&: I.getOperand(i: CurOp++));
859 for (auto &RenderFn : *VLOpFn)
860 RenderFn(PseudoMI);
861
862 // SEW
863 PseudoMI.addImm(Val: Log2SEW);
864
865 // Policy
866 uint64_t Policy = RISCVVType::MASK_AGNOSTIC;
867 if (IsMasked)
868 Policy = I.getOperand(i: CurOp++).getImm();
869 PseudoMI.addImm(Val: Policy);
870
871 // Memref
872 PseudoMI.cloneMemRefs(OtherMI: I);
873
874 I.eraseFromParent();
875 return constrainSelectedInstRegOperands(I&: *PseudoMI, TII, TRI, RBI);
876 }
877 case Intrinsic::riscv_vsm:
878 case Intrinsic::riscv_vse:
879 case Intrinsic::riscv_vse_mask:
880 case Intrinsic::riscv_vsse:
881 case Intrinsic::riscv_vsse_mask: {
882 bool IsMasked = IntrinID == Intrinsic::riscv_vse_mask ||
883 IntrinID == Intrinsic::riscv_vsse_mask;
884 bool IsStrided = IntrinID == Intrinsic::riscv_vsse ||
885 IntrinID == Intrinsic::riscv_vsse_mask;
886 LLT VT = MRI->getType(Reg: I.getOperand(i: 1).getReg());
887 unsigned Log2SEW = Log2_32(Value: VT.getScalarSizeInBits());
888
889 // Sources
890 unsigned CurOp = 1;
891 SmallVector<SrcOp, 4> SrcOps; // Source registers.
892
893 // Store value
894 auto PassthruReg = I.getOperand(i: CurOp++).getReg();
895 SrcOps.push_back(Elt: PassthruReg);
896
897 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStridedOrIndexed: IsStrided);
898
899 RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT: getMVTForLLT(Ty: VT));
900 const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
901 Masked: IsMasked, Strided: IsStrided, Log2SEW, LMUL: static_cast<unsigned>(LMUL));
902
903 auto PseudoMI = MIB.buildInstr(Opc: P->Pseudo, DstOps: {}, SrcOps);
904
905 // Select VL
906 auto VLOpFn = renderVLOp(Root&: I.getOperand(i: CurOp++));
907 for (auto &RenderFn : *VLOpFn)
908 RenderFn(PseudoMI);
909
910 // SEW
911 PseudoMI.addImm(Val: Log2SEW);
912
913 // Memref
914 PseudoMI.cloneMemRefs(OtherMI: I);
915
916 I.eraseFromParent();
917 return constrainSelectedInstRegOperands(I&: *PseudoMI, TII, TRI, RBI);
918 }
919 case Intrinsic::riscv_vsoxei:
920 case Intrinsic::riscv_vsoxei_mask:
921 case Intrinsic::riscv_vsuxei:
922 case Intrinsic::riscv_vsuxei_mask: {
923 bool IsMasked = IntrinID == Intrinsic::riscv_vsoxei_mask ||
924 IntrinID == Intrinsic::riscv_vsuxei_mask;
925 bool IsOrdered = IntrinID == Intrinsic::riscv_vsoxei ||
926 IntrinID == Intrinsic::riscv_vsoxei_mask;
927 LLT VT = MRI->getType(Reg: I.getOperand(i: 1).getReg());
928 unsigned Log2SEW = Log2_32(Value: VT.getScalarSizeInBits());
929
930 // Sources
931 unsigned CurOp = 1;
932 SmallVector<SrcOp, 4> SrcOps; // Source registers.
933
934 // Store value
935 auto PassthruReg = I.getOperand(i: CurOp++).getReg();
936 SrcOps.push_back(Elt: PassthruReg);
937
938 LLT IndexVT;
939 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStridedOrIndexed: true, IndexVT: &IndexVT);
940
941 RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT: getMVTForLLT(Ty: VT));
942 RISCVVType::VLMUL IndexLMUL =
943 RISCVTargetLowering::getLMUL(VT: getMVTForLLT(Ty: IndexVT));
944 unsigned IndexLog2EEW = Log2_32(Value: IndexVT.getScalarSizeInBits());
945 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
946 reportFatalUsageError(reason: "The V extension does not support EEW=64 for index "
947 "values when XLEN=32");
948 }
949 const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
950 Masked: IsMasked, Ordered: IsOrdered, Log2SEW: IndexLog2EEW, LMUL: static_cast<unsigned>(LMUL),
951 IndexLMUL: static_cast<unsigned>(IndexLMUL));
952
953 auto PseudoMI = MIB.buildInstr(Opc: P->Pseudo, DstOps: {}, SrcOps);
954
955 // Select VL
956 auto VLOpFn = renderVLOp(Root&: I.getOperand(i: CurOp++));
957 for (auto &RenderFn : *VLOpFn)
958 RenderFn(PseudoMI);
959
960 // SEW
961 PseudoMI.addImm(Val: Log2SEW);
962
963 // Memref
964 PseudoMI.cloneMemRefs(OtherMI: I);
965
966 I.eraseFromParent();
967 return constrainSelectedInstRegOperands(I&: *PseudoMI, TII, TRI, RBI);
968 }
969 }
970}
971
972bool RISCVInstructionSelector::selectIntrinsic(MachineInstr &I,
973 MachineIRBuilder &MIB) const {
974 // Find the intrinsic ID.
975 unsigned IntrinID = cast<GIntrinsic>(Val&: I).getIntrinsicID();
976 // Select the instruction.
977 switch (IntrinID) {
978 default:
979 return false;
980 case Intrinsic::riscv_vsetvli:
981 case Intrinsic::riscv_vsetvlimax: {
982
983 bool VLMax = IntrinID == Intrinsic::riscv_vsetvlimax;
984
985 unsigned Offset = VLMax ? 2 : 3;
986 unsigned SEW = RISCVVType::decodeVSEW(VSEW: I.getOperand(i: Offset).getImm() & 0x7);
987 RISCVVType::VLMUL VLMul =
988 static_cast<RISCVVType::VLMUL>(I.getOperand(i: Offset + 1).getImm() & 0x7);
989
990 unsigned VTypeI = RISCVVType::encodeVTYPE(VLMUL: VLMul, SEW, /*TailAgnostic*/ true,
991 /*MaskAgnostic*/ true);
992
993 Register DstReg = I.getOperand(i: 0).getReg();
994
995 Register VLOperand;
996 unsigned Opcode = RISCV::PseudoVSETVLI;
997
998 // Check if AVL is a constant that equals VLMAX.
999 if (!VLMax) {
1000 Register AVLReg = I.getOperand(i: 2).getReg();
1001 if (auto AVLConst = getIConstantVRegValWithLookThrough(VReg: AVLReg, MRI: *MRI)) {
1002 uint64_t AVL = AVLConst->Value.getZExtValue();
1003 if (auto VLEN = Subtarget->getRealVLen()) {
1004 if (*VLEN / RISCVVType::getSEWLMULRatio(SEW, VLMul) == AVL)
1005 VLMax = true;
1006 }
1007 }
1008
1009 MachineInstr *AVLDef = MRI->getVRegDef(Reg: AVLReg);
1010 if (AVLDef && AVLDef->getOpcode() == TargetOpcode::G_CONSTANT) {
1011 const auto *C = AVLDef->getOperand(i: 1).getCImm();
1012 if (C->getValue().isAllOnes())
1013 VLMax = true;
1014 }
1015 }
1016
1017 if (VLMax) {
1018 VLOperand = Register(RISCV::X0);
1019 Opcode = RISCV::PseudoVSETVLIX0;
1020 } else {
1021 Register AVLReg = I.getOperand(i: 2).getReg();
1022 VLOperand = AVLReg;
1023
1024 // Check if AVL is a small constant that can use PseudoVSETIVLI.
1025 if (auto AVLConst = getIConstantVRegValWithLookThrough(VReg: AVLReg, MRI: *MRI)) {
1026 uint64_t AVL = AVLConst->Value.getZExtValue();
1027 if (isUInt<5>(x: AVL)) {
1028 auto PseudoMI = MIB.buildInstr(Opc: RISCV::PseudoVSETIVLI, DstOps: {DstReg}, SrcOps: {})
1029 .addImm(Val: AVL)
1030 .addImm(Val: VTypeI);
1031 I.eraseFromParent();
1032 return constrainSelectedInstRegOperands(I&: *PseudoMI, TII, TRI, RBI);
1033 }
1034 }
1035 }
1036
1037 auto PseudoMI =
1038 MIB.buildInstr(Opc: Opcode, DstOps: {DstReg}, SrcOps: {VLOperand}).addImm(Val: VTypeI);
1039 I.eraseFromParent();
1040 return constrainSelectedInstRegOperands(I&: *PseudoMI, TII, TRI, RBI);
1041 }
1042 }
1043}
1044
1045bool RISCVInstructionSelector::selectExtractSubvector(
1046 MachineInstr &MI, MachineIRBuilder &MIB) const {
1047 assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR);
1048
1049 Register DstReg = MI.getOperand(i: 0).getReg();
1050 Register SrcReg = MI.getOperand(i: 1).getReg();
1051
1052 LLT DstTy = MRI->getType(Reg: DstReg);
1053 LLT SrcTy = MRI->getType(Reg: SrcReg);
1054
1055 unsigned Idx = static_cast<unsigned>(MI.getOperand(i: 2).getImm());
1056
1057 MVT DstMVT = getMVTForLLT(Ty: DstTy);
1058 MVT SrcMVT = getMVTForLLT(Ty: SrcTy);
1059
1060 unsigned SubRegIdx;
1061 std::tie(args&: SubRegIdx, args&: Idx) =
1062 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1063 VecVT: SrcMVT, SubVecVT: DstMVT, InsertExtractIdx: Idx, TRI: &TRI);
1064
1065 if (Idx != 0)
1066 return false;
1067
1068 unsigned DstRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT: DstMVT);
1069 const TargetRegisterClass *DstRC = TRI.getRegClass(i: DstRegClassID);
1070 if (!RBI.constrainGenericRegister(Reg: DstReg, RC: *DstRC, MRI&: *MRI))
1071 return false;
1072
1073 unsigned SrcRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT: SrcMVT);
1074 const TargetRegisterClass *SrcRC = TRI.getRegClass(i: SrcRegClassID);
1075 if (!RBI.constrainGenericRegister(Reg: SrcReg, RC: *SrcRC, MRI&: *MRI))
1076 return false;
1077
1078 MIB.buildInstr(Opc: TargetOpcode::COPY, DstOps: {DstReg}, SrcOps: {})
1079 .addReg(RegNo: SrcReg, Flags: {}, SubReg: SubRegIdx);
1080
1081 MI.eraseFromParent();
1082 return true;
1083}
1084
1085bool RISCVInstructionSelector::select(MachineInstr &MI) {
1086 MachineIRBuilder MIB(MI);
1087
1088 preISelLower(MI, MIB);
1089 const unsigned Opc = MI.getOpcode();
1090
1091 if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
1092 if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
1093 const Register DefReg = MI.getOperand(i: 0).getReg();
1094 const LLT DefTy = MRI->getType(Reg: DefReg);
1095
1096 const RegClassOrRegBank &RegClassOrBank =
1097 MRI->getRegClassOrRegBank(Reg: DefReg);
1098
1099 const TargetRegisterClass *DefRC =
1100 dyn_cast<const TargetRegisterClass *>(Val: RegClassOrBank);
1101 if (!DefRC) {
1102 if (!DefTy.isValid()) {
1103 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
1104 return false;
1105 }
1106
1107 const RegisterBank &RB = *cast<const RegisterBank *>(Val: RegClassOrBank);
1108 DefRC = getRegClassForTypeOnBank(Ty: DefTy, RB);
1109 if (!DefRC) {
1110 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
1111 return false;
1112 }
1113 }
1114
1115 MI.setDesc(TII.get(Opcode: TargetOpcode::PHI));
1116 return RBI.constrainGenericRegister(Reg: DefReg, RC: *DefRC, MRI&: *MRI);
1117 }
1118
1119 // Certain non-generic instructions also need some special handling.
1120 if (MI.isCopy())
1121 return selectCopy(MI);
1122
1123 return true;
1124 }
1125
1126 if (selectImpl(I&: MI, CoverageInfo&: *CoverageInfo))
1127 return true;
1128
1129 switch (Opc) {
1130 case TargetOpcode::G_ANYEXT:
1131 case TargetOpcode::G_PTRTOINT:
1132 case TargetOpcode::G_INTTOPTR:
1133 case TargetOpcode::G_TRUNC:
1134 case TargetOpcode::G_FREEZE:
1135 return selectCopy(MI);
1136 case TargetOpcode::G_CONSTANT: {
1137 Register DstReg = MI.getOperand(i: 0).getReg();
1138 int64_t Imm = MI.getOperand(i: 1).getCImm()->getSExtValue();
1139
1140 if (!materializeImm(Reg: DstReg, Imm, MIB))
1141 return false;
1142
1143 MI.eraseFromParent();
1144 return true;
1145 }
1146 case TargetOpcode::G_ZEXT:
1147 case TargetOpcode::G_SEXT: {
1148 bool IsSigned = Opc != TargetOpcode::G_ZEXT;
1149 Register DstReg = MI.getOperand(i: 0).getReg();
1150 Register SrcReg = MI.getOperand(i: 1).getReg();
1151 LLT SrcTy = MRI->getType(Reg: SrcReg);
1152 unsigned SrcSize = SrcTy.getSizeInBits();
1153
1154 if (SrcTy.isVector())
1155 return false; // Should be handled by imported patterns.
1156
1157 assert((*RBI.getRegBank(DstReg, *MRI, TRI)).getID() ==
1158 RISCV::GPRBRegBankID &&
1159 "Unexpected ext regbank");
1160
1161 // Use addiw SrcReg, 0 (sext.w) for i32.
1162 if (IsSigned && SrcSize == 32) {
1163 MI.setDesc(TII.get(Opcode: RISCV::ADDIW));
1164 MI.addOperand(Op: MachineOperand::CreateImm(Val: 0));
1165 return constrainSelectedInstRegOperands(I&: MI, TII, TRI, RBI);
1166 }
1167
1168 // Use add.uw SrcReg, X0 (zext.w) for i32 with Zba.
1169 if (!IsSigned && SrcSize == 32 && STI.hasStdExtZba()) {
1170 MI.setDesc(TII.get(Opcode: RISCV::ADD_UW));
1171 MI.addOperand(Op: MachineOperand::CreateReg(Reg: RISCV::X0, /*isDef=*/false));
1172 return constrainSelectedInstRegOperands(I&: MI, TII, TRI, RBI);
1173 }
1174
1175 // Use sext.h/zext.h for i16 with Zbb.
1176 if (SrcSize == 16 && STI.hasStdExtZbb()) {
1177 MI.setDesc(TII.get(Opcode: IsSigned ? RISCV::SEXT_H
1178 : STI.isRV64() ? RISCV::ZEXT_H_RV64
1179 : RISCV::ZEXT_H_RV32));
1180 return constrainSelectedInstRegOperands(I&: MI, TII, TRI, RBI);
1181 }
1182
1183 // Use pack(w) SrcReg, X0 for i16 zext with Zbkb.
1184 if (!IsSigned && SrcSize == 16 && STI.hasStdExtZbkb()) {
1185 MI.setDesc(TII.get(Opcode: STI.is64Bit() ? RISCV::PACKW : RISCV::PACK));
1186 MI.addOperand(Op: MachineOperand::CreateReg(Reg: RISCV::X0, /*isDef=*/false));
1187 return constrainSelectedInstRegOperands(I&: MI, TII, TRI, RBI);
1188 }
1189
1190 // Fall back to shift pair.
1191 auto ShiftLeft =
1192 MIB.buildInstr(Opc: RISCV::SLLI, DstOps: {&RISCV::GPRRegClass}, SrcOps: {SrcReg})
1193 .addImm(Val: STI.getXLen() - SrcSize);
1194 constrainSelectedInstRegOperands(I&: *ShiftLeft, TII, TRI, RBI);
1195 auto ShiftRight = MIB.buildInstr(Opc: IsSigned ? RISCV::SRAI : RISCV::SRLI,
1196 DstOps: {DstReg}, SrcOps: {ShiftLeft})
1197 .addImm(Val: STI.getXLen() - SrcSize);
1198 constrainSelectedInstRegOperands(I&: *ShiftRight, TII, TRI, RBI);
1199 MI.eraseFromParent();
1200 return true;
1201 }
1202 case TargetOpcode::G_FCONSTANT: {
1203 // TODO: Use constant pool for complex constants.
1204 Register DstReg = MI.getOperand(i: 0).getReg();
1205 const APFloat &FPimm = MI.getOperand(i: 1).getFPImm()->getValueAPF();
1206 unsigned Size = MRI->getType(Reg: DstReg).getSizeInBits();
1207 if (Size == 16 || Size == 32 || (Size == 64 && Subtarget->is64Bit())) {
1208 Register GPRReg;
1209 if (FPimm.isPosZero()) {
1210 GPRReg = RISCV::X0;
1211 } else {
1212 GPRReg = MRI->createVirtualRegister(RegClass: &RISCV::GPRRegClass);
1213 APInt Imm = FPimm.bitcastToAPInt();
1214 if (!materializeImm(Reg: GPRReg, Imm: Imm.getSExtValue(), MIB))
1215 return false;
1216 }
1217
1218 unsigned Opcode = Size == 64 ? RISCV::FMV_D_X
1219 : Size == 32 ? RISCV::FMV_W_X
1220 : RISCV::FMV_H_X;
1221 auto FMV = MIB.buildInstr(Opc: Opcode, DstOps: {DstReg}, SrcOps: {GPRReg});
1222 if (!FMV.constrainAllUses(TII, TRI, RBI))
1223 return false;
1224 } else {
1225 // s64 on rv32
1226 assert(Size == 64 && !Subtarget->is64Bit() &&
1227 "Unexpected size or subtarget");
1228
1229 if (FPimm.isPosZero()) {
1230 // Optimize +0.0 to use fcvt.d.w
1231 MachineInstrBuilder FCVT =
1232 MIB.buildInstr(Opc: RISCV::FCVT_D_W, DstOps: {DstReg}, SrcOps: {Register(RISCV::X0)})
1233 .addImm(Val: RISCVFPRndMode::RNE);
1234 if (!FCVT.constrainAllUses(TII, TRI, RBI))
1235 return false;
1236
1237 MI.eraseFromParent();
1238 return true;
1239 }
1240
1241 // Split into two pieces and build through the stack.
1242 Register GPRRegHigh = MRI->createVirtualRegister(RegClass: &RISCV::GPRRegClass);
1243 Register GPRRegLow = MRI->createVirtualRegister(RegClass: &RISCV::GPRRegClass);
1244 APInt Imm = FPimm.bitcastToAPInt();
1245 if (!materializeImm(Reg: GPRRegHigh, Imm: Imm.extractBits(numBits: 32, bitPosition: 32).getSExtValue(),
1246 MIB))
1247 return false;
1248 if (!materializeImm(Reg: GPRRegLow, Imm: Imm.trunc(width: 32).getSExtValue(), MIB))
1249 return false;
1250 MachineInstrBuilder PairF64 = MIB.buildInstr(
1251 Opc: RISCV::BuildPairF64Pseudo, DstOps: {DstReg}, SrcOps: {GPRRegLow, GPRRegHigh});
1252 if (!PairF64.constrainAllUses(TII, TRI, RBI))
1253 return false;
1254 }
1255
1256 MI.eraseFromParent();
1257 return true;
1258 }
1259 case TargetOpcode::G_GLOBAL_VALUE: {
1260 auto *GV = MI.getOperand(i: 1).getGlobal();
1261 if (GV->isThreadLocal()) {
1262 // TODO: implement this case.
1263 return false;
1264 }
1265
1266 return selectAddr(MI, MIB, IsLocal: GV->isDSOLocal(), IsExternWeak: GV->hasExternalWeakLinkage());
1267 }
1268 case TargetOpcode::G_JUMP_TABLE:
1269 case TargetOpcode::G_CONSTANT_POOL:
1270 return selectAddr(MI, MIB, IsLocal: MRI);
1271 case TargetOpcode::G_BRCOND: {
1272 Register LHS, RHS;
1273 RISCVCC::CondCode CC;
1274 getOperandsForBranch(CondReg: MI.getOperand(i: 0).getReg(), CC, LHS, RHS, MRI&: *MRI);
1275
1276 auto Bcc = MIB.buildInstr(Opc: RISCVCC::getBrCond(CC), DstOps: {}, SrcOps: {LHS, RHS})
1277 .addMBB(MBB: MI.getOperand(i: 1).getMBB());
1278 MI.eraseFromParent();
1279 return constrainSelectedInstRegOperands(I&: *Bcc, TII, TRI, RBI);
1280 }
1281 case TargetOpcode::G_BRINDIRECT:
1282 MI.setDesc(TII.get(Opcode: RISCV::PseudoBRIND));
1283 MI.addOperand(Op: MachineOperand::CreateImm(Val: 0));
1284 return constrainSelectedInstRegOperands(I&: MI, TII, TRI, RBI);
1285 case TargetOpcode::G_SELECT:
1286 return selectSelect(MI, MIB);
1287 case TargetOpcode::G_FCMP:
1288 return selectFPCompare(MI, MIB);
1289 case TargetOpcode::G_FENCE: {
1290 AtomicOrdering FenceOrdering =
1291 static_cast<AtomicOrdering>(MI.getOperand(i: 0).getImm());
1292 SyncScope::ID FenceSSID =
1293 static_cast<SyncScope::ID>(MI.getOperand(i: 1).getImm());
1294 emitFence(FenceOrdering, FenceSSID, MIB);
1295 MI.eraseFromParent();
1296 return true;
1297 }
1298 case TargetOpcode::G_IMPLICIT_DEF:
1299 return selectImplicitDef(MI, MIB);
1300 case TargetOpcode::G_UNMERGE_VALUES:
1301 return selectUnmergeValues(MI, MIB);
1302 case TargetOpcode::G_LOAD:
1303 case TargetOpcode::G_STORE: {
1304 GLoadStore &LdSt = cast<GLoadStore>(Val&: MI);
1305 const Register ValReg = LdSt.getReg(Idx: 0);
1306 const Register PtrReg = LdSt.getPointerReg();
1307 LLT PtrTy = MRI->getType(Reg: PtrReg);
1308
1309 const RegisterBank &RB = *RBI.getRegBank(Reg: ValReg, MRI: *MRI, TRI);
1310 if (RB.getID() != RISCV::GPRBRegBankID)
1311 return false;
1312
1313#ifndef NDEBUG
1314 const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, *MRI, TRI);
1315 // Check that the pointer register is valid.
1316 assert(PtrRB.getID() == RISCV::GPRBRegBankID &&
1317 "Load/Store pointer operand isn't a GPR");
1318 assert(PtrTy.isPointer() && "Load/Store pointer operand isn't a pointer");
1319#endif
1320
1321 // Can only handle AddressSpace 0.
1322 if (PtrTy.getAddressSpace() != 0)
1323 return false;
1324
1325 unsigned MemSize = LdSt.getMemSizeInBits().getValue();
1326 AtomicOrdering Order = LdSt.getMMO().getSuccessOrdering();
1327
1328 if (isStrongerThanMonotonic(AO: Order)) {
1329 MI.setDesc(TII.get(Opcode: selectZalasrLoadStoreOp(GenericOpc: Opc, OpSize: MemSize)));
1330 return constrainSelectedInstRegOperands(I&: MI, TII, TRI, RBI);
1331 }
1332
1333 const unsigned NewOpc = selectRegImmLoadStoreOp(GenericOpc: MI.getOpcode(), OpSize: MemSize);
1334 if (NewOpc == MI.getOpcode())
1335 return false;
1336
1337 // Check if we can fold anything into the addressing mode.
1338 auto AddrModeFns = selectAddrRegImm(Root&: MI.getOperand(i: 1));
1339 if (!AddrModeFns)
1340 return false;
1341
1342 // Folded something. Create a new instruction and return it.
1343 auto NewInst = MIB.buildInstr(Opc: NewOpc, DstOps: {}, SrcOps: {}, Flags: MI.getFlags());
1344 if (isa<GStore>(Val: MI))
1345 NewInst.addUse(RegNo: ValReg);
1346 else
1347 NewInst.addDef(RegNo: ValReg);
1348 NewInst.cloneMemRefs(OtherMI: MI);
1349 for (auto &Fn : *AddrModeFns)
1350 Fn(NewInst);
1351 MI.eraseFromParent();
1352
1353 return constrainSelectedInstRegOperands(I&: *NewInst, TII, TRI, RBI);
1354 }
1355 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1356 return selectIntrinsicWithSideEffects(I&: MI, MIB);
1357 case TargetOpcode::G_INTRINSIC:
1358 return selectIntrinsic(I&: MI, MIB);
1359 case TargetOpcode::G_EXTRACT_SUBVECTOR:
1360 return selectExtractSubvector(MI, MIB);
1361 default:
1362 return false;
1363 }
1364}
1365
1366bool RISCVInstructionSelector::selectUnmergeValues(
1367 MachineInstr &MI, MachineIRBuilder &MIB) const {
1368 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
1369
1370 if (!Subtarget->hasStdExtZfa())
1371 return false;
1372
1373 // Split F64 Src into two s32 parts
1374 if (MI.getNumOperands() != 3)
1375 return false;
1376 Register Src = MI.getOperand(i: 2).getReg();
1377 Register Lo = MI.getOperand(i: 0).getReg();
1378 Register Hi = MI.getOperand(i: 1).getReg();
1379 if (!isRegInFprb(Reg: Src) || !isRegInGprb(Reg: Lo) || !isRegInGprb(Reg: Hi))
1380 return false;
1381
1382 MachineInstr *ExtractLo = MIB.buildInstr(Opc: RISCV::FMV_X_W_FPR64, DstOps: {Lo}, SrcOps: {Src});
1383 if (!constrainSelectedInstRegOperands(I&: *ExtractLo, TII, TRI, RBI))
1384 return false;
1385
1386 MachineInstr *ExtractHi = MIB.buildInstr(Opc: RISCV::FMVH_X_D, DstOps: {Hi}, SrcOps: {Src});
1387 if (!constrainSelectedInstRegOperands(I&: *ExtractHi, TII, TRI, RBI))
1388 return false;
1389
1390 MI.eraseFromParent();
1391 return true;
1392}
1393
1394bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op,
1395 MachineIRBuilder &MIB) {
1396 Register PtrReg = Op.getReg();
1397 assert(MRI->getType(PtrReg).isPointer() && "Operand is not a pointer!");
1398
1399 const LLT sXLen = LLT::scalar(SizeInBits: STI.getXLen());
1400 auto PtrToInt = MIB.buildPtrToInt(Dst: sXLen, Src: PtrReg);
1401 MRI->setRegBank(Reg: PtrToInt.getReg(Idx: 0), RegBank: RBI.getRegBank(ID: RISCV::GPRBRegBankID));
1402 Op.setReg(PtrToInt.getReg(Idx: 0));
1403 return select(MI&: *PtrToInt);
1404}
1405
1406void RISCVInstructionSelector::preISelLower(MachineInstr &MI,
1407 MachineIRBuilder &MIB) {
1408 switch (MI.getOpcode()) {
1409 case TargetOpcode::G_PTR_ADD: {
1410 Register DstReg = MI.getOperand(i: 0).getReg();
1411 const LLT sXLen = LLT::scalar(SizeInBits: STI.getXLen());
1412
1413 replacePtrWithInt(Op&: MI.getOperand(i: 1), MIB);
1414 MI.setDesc(TII.get(Opcode: TargetOpcode::G_ADD));
1415 MRI->setType(VReg: DstReg, Ty: sXLen);
1416 break;
1417 }
1418 case TargetOpcode::G_PTRMASK: {
1419 Register DstReg = MI.getOperand(i: 0).getReg();
1420 const LLT sXLen = LLT::scalar(SizeInBits: STI.getXLen());
1421 replacePtrWithInt(Op&: MI.getOperand(i: 1), MIB);
1422 MI.setDesc(TII.get(Opcode: TargetOpcode::G_AND));
1423 MRI->setType(VReg: DstReg, Ty: sXLen);
1424 break;
1425 }
1426 }
1427}
1428
1429void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
1430 const MachineInstr &MI,
1431 int OpIdx) const {
1432 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1433 "Expected G_CONSTANT");
1434 int64_t CstVal = MI.getOperand(i: 1).getCImm()->getSExtValue();
1435 MIB.addImm(Val: -CstVal);
1436}
1437
1438void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
1439 const MachineInstr &MI,
1440 int OpIdx) const {
1441 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1442 "Expected G_CONSTANT");
1443 uint64_t CstVal = MI.getOperand(i: 1).getCImm()->getZExtValue();
1444 MIB.addImm(Val: STI.getXLen() - CstVal);
1445}
1446
1447void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
1448 const MachineInstr &MI,
1449 int OpIdx) const {
1450 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1451 "Expected G_CONSTANT");
1452 uint64_t CstVal = MI.getOperand(i: 1).getCImm()->getZExtValue();
1453 MIB.addImm(Val: 32 - CstVal);
1454}
1455
1456void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
1457 const MachineInstr &MI,
1458 int OpIdx) const {
1459 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1460 "Expected G_CONSTANT");
1461 int64_t CstVal = MI.getOperand(i: 1).getCImm()->getSExtValue();
1462 MIB.addImm(Val: CstVal + 1);
1463}
1464
1465void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
1466 const MachineInstr &MI,
1467 int OpIdx) const {
1468 assert(MI.getOpcode() == TargetOpcode::G_FRAME_INDEX && OpIdx == -1 &&
1469 "Expected G_FRAME_INDEX");
1470 MIB.add(MO: MI.getOperand(i: 1));
1471}
1472
1473void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
1474 const MachineInstr &MI,
1475 int OpIdx) const {
1476 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1477 "Expected G_CONSTANT");
1478 uint64_t C = MI.getOperand(i: 1).getCImm()->getZExtValue();
1479 MIB.addImm(Val: llvm::countr_zero(Val: C));
1480}
1481
1482void RISCVInstructionSelector::renderXLenSubTrailingOnes(
1483 MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
1484 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1485 "Expected G_CONSTANT");
1486 uint64_t C = MI.getOperand(i: 1).getCImm()->getZExtValue();
1487 MIB.addImm(Val: Subtarget->getXLen() - llvm::countr_one(Value: C));
1488}
1489
1490void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
1491 const MachineInstr &MI,
1492 int OpIdx) const {
1493 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1494 "Expected G_CONSTANT");
1495 int64_t Imm = MI.getOperand(i: 1).getCImm()->getSExtValue();
1496 int64_t Adj = Imm < 0 ? -2048 : 2047;
1497 MIB.addImm(Val: Imm - Adj);
1498}
1499
1500void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
1501 const MachineInstr &MI,
1502 int OpIdx) const {
1503 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1504 "Expected G_CONSTANT");
1505 int64_t Imm = MI.getOperand(i: 1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
1506 MIB.addImm(Val: Imm);
1507}
1508
1509const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
1510 LLT Ty, const RegisterBank &RB) const {
1511 if (RB.getID() == RISCV::GPRBRegBankID) {
1512 if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64))
1513 return &RISCV::GPRRegClass;
1514 }
1515
1516 if (RB.getID() == RISCV::FPRBRegBankID) {
1517 if (Ty.getSizeInBits() == 16)
1518 return &RISCV::FPR16RegClass;
1519 if (Ty.getSizeInBits() == 32)
1520 return &RISCV::FPR32RegClass;
1521 if (Ty.getSizeInBits() == 64)
1522 return &RISCV::FPR64RegClass;
1523 }
1524
1525 if (RB.getID() == RISCV::VRBRegBankID) {
1526 if (Ty.getSizeInBits().getKnownMinValue() <= 64)
1527 return &RISCV::VRRegClass;
1528
1529 if (Ty.getSizeInBits().getKnownMinValue() == 128)
1530 return &RISCV::VRM2RegClass;
1531
1532 if (Ty.getSizeInBits().getKnownMinValue() == 256)
1533 return &RISCV::VRM4RegClass;
1534
1535 if (Ty.getSizeInBits().getKnownMinValue() == 512)
1536 return &RISCV::VRM8RegClass;
1537 }
1538
1539 return nullptr;
1540}
1541
1542bool RISCVInstructionSelector::isRegInGprb(Register Reg) const {
1543 return RBI.getRegBank(Reg, MRI: *MRI, TRI)->getID() == RISCV::GPRBRegBankID;
1544}
1545
1546bool RISCVInstructionSelector::isRegInFprb(Register Reg) const {
1547 return RBI.getRegBank(Reg, MRI: *MRI, TRI)->getID() == RISCV::FPRBRegBankID;
1548}
1549
1550bool RISCVInstructionSelector::selectCopy(MachineInstr &MI) const {
1551 Register DstReg = MI.getOperand(i: 0).getReg();
1552
1553 if (DstReg.isPhysical())
1554 return true;
1555
1556 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1557 Ty: MRI->getType(Reg: DstReg), RB: *RBI.getRegBank(Reg: DstReg, MRI: *MRI, TRI));
1558 assert(DstRC &&
1559 "Register class not available for LLT, register bank combination");
1560
1561 // No need to constrain SrcReg. It will get constrained when
1562 // we hit another of its uses or its defs.
1563 // Copies do not have constraints.
1564 if (!RBI.constrainGenericRegister(Reg: DstReg, RC: *DstRC, MRI&: *MRI)) {
1565 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1566 << " operand\n");
1567 return false;
1568 }
1569
1570 MI.setDesc(TII.get(Opcode: RISCV::COPY));
1571 return true;
1572}
1573
1574bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &MI,
1575 MachineIRBuilder &MIB) const {
1576 assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
1577
1578 const Register DstReg = MI.getOperand(i: 0).getReg();
1579 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1580 Ty: MRI->getType(Reg: DstReg), RB: *RBI.getRegBank(Reg: DstReg, MRI: *MRI, TRI));
1581
1582 assert(DstRC &&
1583 "Register class not available for LLT, register bank combination");
1584
1585 if (!RBI.constrainGenericRegister(Reg: DstReg, RC: *DstRC, MRI&: *MRI)) {
1586 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1587 << " operand\n");
1588 }
1589 MI.setDesc(TII.get(Opcode: TargetOpcode::IMPLICIT_DEF));
1590 return true;
1591}
1592
1593bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
1594 MachineIRBuilder &MIB) const {
1595 if (Imm == 0) {
1596 MIB.buildCopy(Res: DstReg, Op: Register(RISCV::X0));
1597 RBI.constrainGenericRegister(Reg: DstReg, RC: RISCV::GPRRegClass, MRI&: *MRI);
1598 return true;
1599 }
1600
1601 RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Val: Imm, STI: *Subtarget);
1602 unsigned NumInsts = Seq.size();
1603 Register SrcReg = RISCV::X0;
1604
1605 for (unsigned i = 0; i < NumInsts; i++) {
1606 Register TmpReg = i < NumInsts - 1
1607 ? MRI->createVirtualRegister(RegClass: &RISCV::GPRRegClass)
1608 : DstReg;
1609 const RISCVMatInt::Inst &I = Seq[i];
1610 MachineInstr *Result;
1611
1612 switch (I.getOpndKind()) {
1613 case RISCVMatInt::Imm:
1614 // clang-format off
1615 Result = MIB.buildInstr(Opc: I.getOpcode(), DstOps: {TmpReg}, SrcOps: {})
1616 .addImm(Val: I.getImm());
1617 // clang-format on
1618 break;
1619 case RISCVMatInt::RegX0:
1620 Result = MIB.buildInstr(Opc: I.getOpcode(), DstOps: {TmpReg},
1621 SrcOps: {SrcReg, Register(RISCV::X0)});
1622 break;
1623 case RISCVMatInt::RegReg:
1624 Result = MIB.buildInstr(Opc: I.getOpcode(), DstOps: {TmpReg}, SrcOps: {SrcReg, SrcReg});
1625 break;
1626 case RISCVMatInt::RegImm:
1627 Result =
1628 MIB.buildInstr(Opc: I.getOpcode(), DstOps: {TmpReg}, SrcOps: {SrcReg}).addImm(Val: I.getImm());
1629 break;
1630 }
1631
1632 if (!constrainSelectedInstRegOperands(I&: *Result, TII, TRI, RBI))
1633 return false;
1634
1635 SrcReg = TmpReg;
1636 }
1637
1638 return true;
1639}
1640
1641bool RISCVInstructionSelector::selectAddr(MachineInstr &MI,
1642 MachineIRBuilder &MIB, bool IsLocal,
1643 bool IsExternWeak) const {
1644 assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1645 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1646 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1647 "Unexpected opcode");
1648
1649 const MachineOperand &DispMO = MI.getOperand(i: 1);
1650
1651 Register DefReg = MI.getOperand(i: 0).getReg();
1652 const LLT DefTy = MRI->getType(Reg: DefReg);
1653
1654 // When HWASAN is used and tagging of global variables is enabled
1655 // they should be accessed via the GOT, since the tagged address of a global
1656 // is incompatible with existing code models. This also applies to non-pic
1657 // mode.
1658 if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
1659 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1660 // Use PC-relative addressing to access the symbol. This generates the
1661 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
1662 // %pcrel_lo(auipc)).
1663 MI.setDesc(TII.get(Opcode: RISCV::PseudoLLA));
1664 return constrainSelectedInstRegOperands(I&: MI, TII, TRI, RBI);
1665 }
1666
1667 // Use PC-relative addressing to access the GOT for this symbol, then
1668 // load the address from the GOT. This generates the pattern (PseudoLGA
1669 // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
1670 // %pcrel_lo(auipc))).
1671 MachineFunction &MF = *MI.getParent()->getParent();
1672 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1673 PtrInfo: MachinePointerInfo::getGOT(MF),
1674 f: MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
1675 MachineMemOperand::MOInvariant,
1676 MemTy: DefTy, base_alignment: Align(DefTy.getSizeInBits() / 8));
1677
1678 auto Result = MIB.buildInstr(Opc: RISCV::PseudoLGA, DstOps: {DefReg}, SrcOps: {})
1679 .addDisp(Disp: DispMO, off: 0)
1680 .addMemOperand(MMO: MemOp);
1681
1682 if (!constrainSelectedInstRegOperands(I&: *Result, TII, TRI, RBI))
1683 return false;
1684
1685 MI.eraseFromParent();
1686 return true;
1687 }
1688
1689 switch (TM.getCodeModel()) {
1690 default: {
1691 reportGISelFailure(MF&: *MF, MORE&: *MORE, PassName: getName(),
1692 Msg: "Unsupported code model for lowering", MI);
1693 return false;
1694 }
1695 case CodeModel::Small: {
1696 // Must lie within a single 2 GiB address range and must lie between
1697 // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
1698 // (lui %hi(sym)) %lo(sym)).
1699 Register AddrHiDest = MRI->createVirtualRegister(RegClass: &RISCV::GPRRegClass);
1700 MachineInstr *AddrHi = MIB.buildInstr(Opc: RISCV::LUI, DstOps: {AddrHiDest}, SrcOps: {})
1701 .addDisp(Disp: DispMO, off: 0, TargetFlags: RISCVII::MO_HI);
1702
1703 if (!constrainSelectedInstRegOperands(I&: *AddrHi, TII, TRI, RBI))
1704 return false;
1705
1706 auto Result = MIB.buildInstr(Opc: RISCV::ADDI, DstOps: {DefReg}, SrcOps: {AddrHiDest})
1707 .addDisp(Disp: DispMO, off: 0, TargetFlags: RISCVII::MO_LO);
1708
1709 if (!constrainSelectedInstRegOperands(I&: *Result, TII, TRI, RBI))
1710 return false;
1711
1712 MI.eraseFromParent();
1713 return true;
1714 }
1715 case CodeModel::Medium:
1716 // Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo
1717 // relocation needs to reference a label that points to the auipc
1718 // instruction itself, not the global. This cannot be done inside the
1719 // instruction selector.
1720 if (IsExternWeak) {
1721 // An extern weak symbol may be undefined, i.e. have value 0, which may
1722 // not be within 2GiB of PC, so use GOT-indirect addressing to access the
1723 // symbol. This generates the pattern (PseudoLGA sym), which expands to
1724 // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
1725 MachineFunction &MF = *MI.getParent()->getParent();
1726 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1727 PtrInfo: MachinePointerInfo::getGOT(MF),
1728 f: MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
1729 MachineMemOperand::MOInvariant,
1730 MemTy: DefTy, base_alignment: Align(DefTy.getSizeInBits() / 8));
1731
1732 auto Result = MIB.buildInstr(Opc: RISCV::PseudoLGA, DstOps: {DefReg}, SrcOps: {})
1733 .addDisp(Disp: DispMO, off: 0)
1734 .addMemOperand(MMO: MemOp);
1735
1736 if (!constrainSelectedInstRegOperands(I&: *Result, TII, TRI, RBI))
1737 return false;
1738
1739 MI.eraseFromParent();
1740 return true;
1741 }
1742
1743 // Generate a sequence for accessing addresses within any 2GiB range
1744 // within the address space. This generates the pattern (PseudoLLA sym),
1745 // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
1746 MI.setDesc(TII.get(Opcode: RISCV::PseudoLLA));
1747 return constrainSelectedInstRegOperands(I&: MI, TII, TRI, RBI);
1748 }
1749
1750 return false;
1751}
1752
1753bool RISCVInstructionSelector::selectSelect(MachineInstr &MI,
1754 MachineIRBuilder &MIB) const {
1755 auto &SelectMI = cast<GSelect>(Val&: MI);
1756
1757 Register LHS, RHS;
1758 RISCVCC::CondCode CC;
1759 getOperandsForBranch(CondReg: SelectMI.getCondReg(), CC, LHS, RHS, MRI&: *MRI);
1760
1761 Register DstReg = SelectMI.getReg(Idx: 0);
1762
1763 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1764 if (RBI.getRegBank(Reg: DstReg, MRI: *MRI, TRI)->getID() == RISCV::FPRBRegBankID) {
1765 unsigned Size = MRI->getType(Reg: DstReg).getSizeInBits();
1766 Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1767 : RISCV::Select_FPR64_Using_CC_GPR;
1768 }
1769
1770 MachineInstr *Result = MIB.buildInstr(Opcode: Opc)
1771 .addDef(RegNo: DstReg)
1772 .addReg(RegNo: LHS)
1773 .addReg(RegNo: RHS)
1774 .addImm(Val: CC)
1775 .addReg(RegNo: SelectMI.getTrueReg())
1776 .addReg(RegNo: SelectMI.getFalseReg());
1777 MI.eraseFromParent();
1778 return constrainSelectedInstRegOperands(I&: *Result, TII, TRI, RBI);
1779}
1780
1781// Convert an FCMP predicate to one of the supported F or D instructions.
1782static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) {
1783 assert((Size == 16 || Size == 32 || Size == 64) && "Unsupported size");
1784 switch (Pred) {
1785 default:
1786 llvm_unreachable("Unsupported predicate");
1787 case CmpInst::FCMP_OLT:
1788 return Size == 16 ? RISCV::FLT_H : Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1789 case CmpInst::FCMP_OLE:
1790 return Size == 16 ? RISCV::FLE_H : Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1791 case CmpInst::FCMP_OEQ:
1792 return Size == 16 ? RISCV::FEQ_H : Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1793 }
1794}
1795
1796// Try legalizing an FCMP by swapping or inverting the predicate to one that
1797// is supported.
1798static bool legalizeFCmpPredicate(Register &LHS, Register &RHS,
1799 CmpInst::Predicate &Pred, bool &NeedInvert) {
1800 auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) {
1801 return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE ||
1802 Pred == CmpInst::FCMP_OEQ;
1803 };
1804
1805 assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?");
1806
1807 CmpInst::Predicate InvPred = CmpInst::getSwappedPredicate(pred: Pred);
1808 if (isLegalFCmpPredicate(InvPred)) {
1809 Pred = InvPred;
1810 std::swap(a&: LHS, b&: RHS);
1811 return true;
1812 }
1813
1814 InvPred = CmpInst::getInversePredicate(pred: Pred);
1815 NeedInvert = true;
1816 if (isLegalFCmpPredicate(InvPred)) {
1817 Pred = InvPred;
1818 return true;
1819 }
1820 InvPred = CmpInst::getSwappedPredicate(pred: InvPred);
1821 if (isLegalFCmpPredicate(InvPred)) {
1822 Pred = InvPred;
1823 std::swap(a&: LHS, b&: RHS);
1824 return true;
1825 }
1826
1827 return false;
1828}
1829
1830// Emit a sequence of instructions to compare LHS and RHS using Pred. Return
1831// the result in DstReg.
1832// FIXME: Maybe we should expand this earlier.
1833bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,
1834 MachineIRBuilder &MIB) const {
1835 auto &CmpMI = cast<GFCmp>(Val&: MI);
1836 CmpInst::Predicate Pred = CmpMI.getCond();
1837
1838 Register DstReg = CmpMI.getReg(Idx: 0);
1839 Register LHS = CmpMI.getLHSReg();
1840 Register RHS = CmpMI.getRHSReg();
1841
1842 unsigned Size = MRI->getType(Reg: LHS).getSizeInBits();
1843 assert((Size == 16 || Size == 32 || Size == 64) && "Unexpected size");
1844
1845 Register TmpReg = DstReg;
1846
1847 bool NeedInvert = false;
1848 // First try swapping operands or inverting.
1849 if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) {
1850 if (NeedInvert)
1851 TmpReg = MRI->createVirtualRegister(RegClass: &RISCV::GPRRegClass);
1852 auto Cmp = MIB.buildInstr(Opc: getFCmpOpcode(Pred, Size), DstOps: {TmpReg}, SrcOps: {LHS, RHS});
1853 if (!Cmp.constrainAllUses(TII, TRI, RBI))
1854 return false;
1855 } else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) {
1856 // fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
1857 NeedInvert = Pred == CmpInst::FCMP_UEQ;
1858 auto Cmp1 = MIB.buildInstr(Opc: getFCmpOpcode(Pred: CmpInst::FCMP_OLT, Size),
1859 DstOps: {&RISCV::GPRRegClass}, SrcOps: {LHS, RHS});
1860 if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1861 return false;
1862 auto Cmp2 = MIB.buildInstr(Opc: getFCmpOpcode(Pred: CmpInst::FCMP_OLT, Size),
1863 DstOps: {&RISCV::GPRRegClass}, SrcOps: {RHS, LHS});
1864 if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1865 return false;
1866 if (NeedInvert)
1867 TmpReg = MRI->createVirtualRegister(RegClass: &RISCV::GPRRegClass);
1868 auto Or =
1869 MIB.buildInstr(Opc: RISCV::OR, DstOps: {TmpReg}, SrcOps: {Cmp1.getReg(Idx: 0), Cmp2.getReg(Idx: 0)});
1870 if (!Or.constrainAllUses(TII, TRI, RBI))
1871 return false;
1872 } else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
1873 // fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
1874 // FIXME: If LHS and RHS are the same we can use a single FEQ.
1875 NeedInvert = Pred == CmpInst::FCMP_UNO;
1876 auto Cmp1 = MIB.buildInstr(Opc: getFCmpOpcode(Pred: CmpInst::FCMP_OEQ, Size),
1877 DstOps: {&RISCV::GPRRegClass}, SrcOps: {LHS, LHS});
1878 if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1879 return false;
1880 auto Cmp2 = MIB.buildInstr(Opc: getFCmpOpcode(Pred: CmpInst::FCMP_OEQ, Size),
1881 DstOps: {&RISCV::GPRRegClass}, SrcOps: {RHS, RHS});
1882 if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1883 return false;
1884 if (NeedInvert)
1885 TmpReg = MRI->createVirtualRegister(RegClass: &RISCV::GPRRegClass);
1886 auto And =
1887 MIB.buildInstr(Opc: RISCV::AND, DstOps: {TmpReg}, SrcOps: {Cmp1.getReg(Idx: 0), Cmp2.getReg(Idx: 0)});
1888 if (!And.constrainAllUses(TII, TRI, RBI))
1889 return false;
1890 } else
1891 llvm_unreachable("Unhandled predicate");
1892
1893 // Emit an XORI to invert the result if needed.
1894 if (NeedInvert) {
1895 auto Xor = MIB.buildInstr(Opc: RISCV::XORI, DstOps: {DstReg}, SrcOps: {TmpReg}).addImm(Val: 1);
1896 if (!Xor.constrainAllUses(TII, TRI, RBI))
1897 return false;
1898 }
1899
1900 MI.eraseFromParent();
1901 return true;
1902}
1903
1904void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
1905 SyncScope::ID FenceSSID,
1906 MachineIRBuilder &MIB) const {
1907 if (STI.hasStdExtZtso()) {
1908 // The only fence that needs an instruction is a sequentially-consistent
1909 // cross-thread fence.
1910 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1911 FenceSSID == SyncScope::System) {
1912 // fence rw, rw
1913 MIB.buildInstr(Opc: RISCV::FENCE, DstOps: {}, SrcOps: {})
1914 .addImm(Val: RISCVFenceField::R | RISCVFenceField::W)
1915 .addImm(Val: RISCVFenceField::R | RISCVFenceField::W);
1916 return;
1917 }
1918
1919 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
1920 MIB.buildInstr(Opc: TargetOpcode::MEMBARRIER, DstOps: {}, SrcOps: {});
1921 return;
1922 }
1923
1924 // singlethread fences only synchronize with signal handlers on the same
1925 // thread and thus only need to preserve instruction order, not actually
1926 // enforce memory ordering.
1927 if (FenceSSID == SyncScope::SingleThread) {
1928 MIB.buildInstr(Opc: TargetOpcode::MEMBARRIER, DstOps: {}, SrcOps: {});
1929 return;
1930 }
1931
1932 // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
1933 // Manual: Volume I.
1934 unsigned Pred, Succ;
1935 switch (FenceOrdering) {
1936 default:
1937 llvm_unreachable("Unexpected ordering");
1938 case AtomicOrdering::AcquireRelease:
1939 // fence acq_rel -> fence.tso
1940 MIB.buildInstr(Opc: RISCV::FENCE_TSO, DstOps: {}, SrcOps: {});
1941 return;
1942 case AtomicOrdering::Acquire:
1943 // fence acquire -> fence r, rw
1944 Pred = RISCVFenceField::R;
1945 Succ = RISCVFenceField::R | RISCVFenceField::W;
1946 break;
1947 case AtomicOrdering::Release:
1948 // fence release -> fence rw, w
1949 Pred = RISCVFenceField::R | RISCVFenceField::W;
1950 Succ = RISCVFenceField::W;
1951 break;
1952 case AtomicOrdering::SequentiallyConsistent:
1953 // fence seq_cst -> fence rw, rw
1954 Pred = RISCVFenceField::R | RISCVFenceField::W;
1955 Succ = RISCVFenceField::R | RISCVFenceField::W;
1956 break;
1957 }
1958 MIB.buildInstr(Opc: RISCV::FENCE, DstOps: {}, SrcOps: {}).addImm(Val: Pred).addImm(Val: Succ);
1959}
1960
1961namespace llvm {
1962InstructionSelector *
1963createRISCVInstructionSelector(const RISCVTargetMachine &TM,
1964 const RISCVSubtarget &Subtarget,
1965 const RISCVRegisterBankInfo &RBI) {
1966 return new RISCVInstructionSelector(TM, Subtarget, RBI);
1967}
1968} // end namespace llvm
1969